4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
29 #include <sys/mount.h>
31 #include <sys/fsuid.h>
32 #include <sys/personality.h>
33 #include <sys/prctl.h>
34 #include <sys/resource.h>
36 #include <linux/capability.h>
38 #include <sys/timex.h>
39 #include <sys/socket.h>
43 #include <sys/times.h>
46 #include <sys/statfs.h>
48 #include <sys/sysinfo.h>
49 #include <sys/signalfd.h>
50 //#include <sys/user.h>
51 #include <netinet/ip.h>
52 #include <netinet/tcp.h>
53 #include <linux/wireless.h>
54 #include <linux/icmp.h>
55 #include <linux/icmpv6.h>
56 #include <linux/errqueue.h>
57 #include <linux/random.h>
58 #include "qemu-common.h"
60 #include <sys/timerfd.h>
66 #include <sys/eventfd.h>
69 #include <sys/epoll.h>
72 #include "qemu/xattr.h"
74 #ifdef CONFIG_SENDFILE
75 #include <sys/sendfile.h>
78 #define termios host_termios
79 #define winsize host_winsize
80 #define termio host_termio
81 #define sgttyb host_sgttyb /* same as target */
82 #define tchars host_tchars /* same as target */
83 #define ltchars host_ltchars /* same as target */
85 #include <linux/termios.h>
86 #include <linux/unistd.h>
87 #include <linux/cdrom.h>
88 #include <linux/hdreg.h>
89 #include <linux/soundcard.h>
91 #include <linux/mtio.h>
93 #if defined(CONFIG_FIEMAP)
94 #include <linux/fiemap.h>
98 #include <linux/dm-ioctl.h>
99 #include <linux/reboot.h>
100 #include <linux/route.h>
101 #include <linux/filter.h>
102 #include <linux/blkpg.h>
103 #include <netpacket/packet.h>
104 #include <linux/netlink.h>
105 #ifdef CONFIG_RTNETLINK
106 #include <linux/rtnetlink.h>
107 #include <linux/if_bridge.h>
109 #include <linux/audit.h>
110 #include "linux_loop.h"
116 #define CLONE_IO 0x80000000 /* Clone io context */
119 /* We can't directly call the host clone syscall, because this will
120 * badly confuse libc (breaking mutexes, for example). So we must
121 * divide clone flags into:
122 * * flag combinations that look like pthread_create()
123 * * flag combinations that look like fork()
124 * * flags we can implement within QEMU itself
125 * * flags we can't support and will return an error for
127 /* For thread creation, all these flags must be present; for
128 * fork, none must be present.
130 #define CLONE_THREAD_FLAGS \
131 (CLONE_VM | CLONE_FS | CLONE_FILES | \
132 CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
134 /* These flags are ignored:
135 * CLONE_DETACHED is now ignored by the kernel;
136 * CLONE_IO is just an optimisation hint to the I/O scheduler
138 #define CLONE_IGNORED_FLAGS \
139 (CLONE_DETACHED | CLONE_IO)
141 /* Flags for fork which we can implement within QEMU itself */
142 #define CLONE_OPTIONAL_FORK_FLAGS \
143 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
144 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
146 /* Flags for thread creation which we can implement within QEMU itself */
147 #define CLONE_OPTIONAL_THREAD_FLAGS \
148 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
149 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
151 #define CLONE_INVALID_FORK_FLAGS \
152 (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
154 #define CLONE_INVALID_THREAD_FLAGS \
155 (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS | \
156 CLONE_IGNORED_FLAGS))
158 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
159 * have almost all been allocated. We cannot support any of
160 * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
161 * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
162 * The checks against the invalid thread masks above will catch these.
163 * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
167 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
168 * once. This exercises the codepaths for restart.
170 //#define DEBUG_ERESTARTSYS
172 //#include <linux/msdos_fs.h>
173 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
174 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
184 #define _syscall0(type,name) \
185 static type name (void) \
187 return syscall(__NR_##name); \
190 #define _syscall1(type,name,type1,arg1) \
191 static type name (type1 arg1) \
193 return syscall(__NR_##name, arg1); \
196 #define _syscall2(type,name,type1,arg1,type2,arg2) \
197 static type name (type1 arg1,type2 arg2) \
199 return syscall(__NR_##name, arg1, arg2); \
202 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
203 static type name (type1 arg1,type2 arg2,type3 arg3) \
205 return syscall(__NR_##name, arg1, arg2, arg3); \
208 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
209 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
211 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
214 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
216 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
218 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
222 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
223 type5,arg5,type6,arg6) \
224 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
227 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
231 #define __NR_sys_uname __NR_uname
232 #define __NR_sys_getcwd1 __NR_getcwd
233 #define __NR_sys_getdents __NR_getdents
234 #define __NR_sys_getdents64 __NR_getdents64
235 #define __NR_sys_getpriority __NR_getpriority
236 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
237 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
238 #define __NR_sys_syslog __NR_syslog
239 #define __NR_sys_futex __NR_futex
240 #define __NR_sys_inotify_init __NR_inotify_init
241 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
242 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
244 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
245 #define __NR__llseek __NR_lseek
248 /* Newer kernel ports have llseek() instead of _llseek() */
249 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
250 #define TARGET_NR__llseek TARGET_NR_llseek
254 _syscall0(int, gettid)
256 /* This is a replacement for the host gettid() and must return a host
258 static int gettid(void) {
262 #if defined(TARGET_NR_getdents) && defined(__NR_getdents)
263 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
265 #if !defined(__NR_getdents) || \
266 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
267 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
269 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
270 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
271 loff_t *, res, uint, wh);
273 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
274 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
276 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
277 #ifdef __NR_exit_group
278 _syscall1(int,exit_group,int,error_code)
280 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
281 _syscall1(int,set_tid_address,int *,tidptr)
283 #if defined(TARGET_NR_futex) && defined(__NR_futex)
284 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
285 const struct timespec *,timeout,int *,uaddr2,int,val3)
287 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
288 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
289 unsigned long *, user_mask_ptr);
290 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
291 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
292 unsigned long *, user_mask_ptr);
293 #define __NR_sys_getcpu __NR_getcpu
294 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
295 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
297 _syscall2(int, capget, struct __user_cap_header_struct *, header,
298 struct __user_cap_data_struct *, data);
299 _syscall2(int, capset, struct __user_cap_header_struct *, header,
300 struct __user_cap_data_struct *, data);
301 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
302 _syscall2(int, ioprio_get, int, which, int, who)
304 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
305 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
307 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
308 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
311 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
312 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
313 unsigned long, idx1, unsigned long, idx2)
316 static bitmask_transtbl fcntl_flags_tbl[] = {
317 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
318 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, },
319 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, },
320 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, },
321 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, },
322 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, },
323 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, },
324 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, },
325 { TARGET_O_SYNC, TARGET_O_DSYNC, O_SYNC, O_DSYNC, },
326 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, },
327 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, },
328 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
329 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, },
330 #if defined(O_DIRECT)
331 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, },
333 #if defined(O_NOATIME)
334 { TARGET_O_NOATIME, TARGET_O_NOATIME, O_NOATIME, O_NOATIME },
336 #if defined(O_CLOEXEC)
337 { TARGET_O_CLOEXEC, TARGET_O_CLOEXEC, O_CLOEXEC, O_CLOEXEC },
340 { TARGET_O_PATH, TARGET_O_PATH, O_PATH, O_PATH },
342 #if defined(O_TMPFILE)
343 { TARGET_O_TMPFILE, TARGET_O_TMPFILE, O_TMPFILE, O_TMPFILE },
345 /* Don't terminate the list prematurely on 64-bit host+guest. */
346 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
347 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
354 QEMU_IFLA_BR_FORWARD_DELAY,
355 QEMU_IFLA_BR_HELLO_TIME,
356 QEMU_IFLA_BR_MAX_AGE,
357 QEMU_IFLA_BR_AGEING_TIME,
358 QEMU_IFLA_BR_STP_STATE,
359 QEMU_IFLA_BR_PRIORITY,
360 QEMU_IFLA_BR_VLAN_FILTERING,
361 QEMU_IFLA_BR_VLAN_PROTOCOL,
362 QEMU_IFLA_BR_GROUP_FWD_MASK,
363 QEMU_IFLA_BR_ROOT_ID,
364 QEMU_IFLA_BR_BRIDGE_ID,
365 QEMU_IFLA_BR_ROOT_PORT,
366 QEMU_IFLA_BR_ROOT_PATH_COST,
367 QEMU_IFLA_BR_TOPOLOGY_CHANGE,
368 QEMU_IFLA_BR_TOPOLOGY_CHANGE_DETECTED,
369 QEMU_IFLA_BR_HELLO_TIMER,
370 QEMU_IFLA_BR_TCN_TIMER,
371 QEMU_IFLA_BR_TOPOLOGY_CHANGE_TIMER,
372 QEMU_IFLA_BR_GC_TIMER,
373 QEMU_IFLA_BR_GROUP_ADDR,
374 QEMU_IFLA_BR_FDB_FLUSH,
375 QEMU_IFLA_BR_MCAST_ROUTER,
376 QEMU_IFLA_BR_MCAST_SNOOPING,
377 QEMU_IFLA_BR_MCAST_QUERY_USE_IFADDR,
378 QEMU_IFLA_BR_MCAST_QUERIER,
379 QEMU_IFLA_BR_MCAST_HASH_ELASTICITY,
380 QEMU_IFLA_BR_MCAST_HASH_MAX,
381 QEMU_IFLA_BR_MCAST_LAST_MEMBER_CNT,
382 QEMU_IFLA_BR_MCAST_STARTUP_QUERY_CNT,
383 QEMU_IFLA_BR_MCAST_LAST_MEMBER_INTVL,
384 QEMU_IFLA_BR_MCAST_MEMBERSHIP_INTVL,
385 QEMU_IFLA_BR_MCAST_QUERIER_INTVL,
386 QEMU_IFLA_BR_MCAST_QUERY_INTVL,
387 QEMU_IFLA_BR_MCAST_QUERY_RESPONSE_INTVL,
388 QEMU_IFLA_BR_MCAST_STARTUP_QUERY_INTVL,
389 QEMU_IFLA_BR_NF_CALL_IPTABLES,
390 QEMU_IFLA_BR_NF_CALL_IP6TABLES,
391 QEMU_IFLA_BR_NF_CALL_ARPTABLES,
392 QEMU_IFLA_BR_VLAN_DEFAULT_PVID,
394 QEMU_IFLA_BR_VLAN_STATS_ENABLED,
395 QEMU_IFLA_BR_MCAST_STATS_ENABLED,
419 QEMU_IFLA_NET_NS_PID,
422 QEMU_IFLA_VFINFO_LIST,
430 QEMU_IFLA_PROMISCUITY,
431 QEMU_IFLA_NUM_TX_QUEUES,
432 QEMU_IFLA_NUM_RX_QUEUES,
434 QEMU_IFLA_PHYS_PORT_ID,
435 QEMU_IFLA_CARRIER_CHANGES,
436 QEMU_IFLA_PHYS_SWITCH_ID,
437 QEMU_IFLA_LINK_NETNSID,
438 QEMU_IFLA_PHYS_PORT_NAME,
439 QEMU_IFLA_PROTO_DOWN,
440 QEMU_IFLA_GSO_MAX_SEGS,
441 QEMU_IFLA_GSO_MAX_SIZE,
448 QEMU_IFLA_BRPORT_UNSPEC,
449 QEMU_IFLA_BRPORT_STATE,
450 QEMU_IFLA_BRPORT_PRIORITY,
451 QEMU_IFLA_BRPORT_COST,
452 QEMU_IFLA_BRPORT_MODE,
453 QEMU_IFLA_BRPORT_GUARD,
454 QEMU_IFLA_BRPORT_PROTECT,
455 QEMU_IFLA_BRPORT_FAST_LEAVE,
456 QEMU_IFLA_BRPORT_LEARNING,
457 QEMU_IFLA_BRPORT_UNICAST_FLOOD,
458 QEMU_IFLA_BRPORT_PROXYARP,
459 QEMU_IFLA_BRPORT_LEARNING_SYNC,
460 QEMU_IFLA_BRPORT_PROXYARP_WIFI,
461 QEMU_IFLA_BRPORT_ROOT_ID,
462 QEMU_IFLA_BRPORT_BRIDGE_ID,
463 QEMU_IFLA_BRPORT_DESIGNATED_PORT,
464 QEMU_IFLA_BRPORT_DESIGNATED_COST,
467 QEMU_IFLA_BRPORT_TOPOLOGY_CHANGE_ACK,
468 QEMU_IFLA_BRPORT_CONFIG_PENDING,
469 QEMU_IFLA_BRPORT_MESSAGE_AGE_TIMER,
470 QEMU_IFLA_BRPORT_FORWARD_DELAY_TIMER,
471 QEMU_IFLA_BRPORT_HOLD_TIMER,
472 QEMU_IFLA_BRPORT_FLUSH,
473 QEMU_IFLA_BRPORT_MULTICAST_ROUTER,
474 QEMU_IFLA_BRPORT_PAD,
475 QEMU___IFLA_BRPORT_MAX
479 QEMU_IFLA_INFO_UNSPEC,
482 QEMU_IFLA_INFO_XSTATS,
483 QEMU_IFLA_INFO_SLAVE_KIND,
484 QEMU_IFLA_INFO_SLAVE_DATA,
485 QEMU___IFLA_INFO_MAX,
489 QEMU_IFLA_INET_UNSPEC,
491 QEMU___IFLA_INET_MAX,
495 QEMU_IFLA_INET6_UNSPEC,
496 QEMU_IFLA_INET6_FLAGS,
497 QEMU_IFLA_INET6_CONF,
498 QEMU_IFLA_INET6_STATS,
499 QEMU_IFLA_INET6_MCAST,
500 QEMU_IFLA_INET6_CACHEINFO,
501 QEMU_IFLA_INET6_ICMP6STATS,
502 QEMU_IFLA_INET6_TOKEN,
503 QEMU_IFLA_INET6_ADDR_GEN_MODE,
504 QEMU___IFLA_INET6_MAX
507 typedef abi_long (*TargetFdDataFunc)(void *, size_t);
508 typedef abi_long (*TargetFdAddrFunc)(void *, abi_ulong, socklen_t);
509 typedef struct TargetFdTrans {
510 TargetFdDataFunc host_to_target_data;
511 TargetFdDataFunc target_to_host_data;
512 TargetFdAddrFunc target_to_host_addr;
515 static TargetFdTrans **target_fd_trans;
517 static unsigned int target_fd_max;
519 static TargetFdDataFunc fd_trans_target_to_host_data(int fd)
521 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
522 return target_fd_trans[fd]->target_to_host_data;
527 static TargetFdDataFunc fd_trans_host_to_target_data(int fd)
529 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
530 return target_fd_trans[fd]->host_to_target_data;
535 static TargetFdAddrFunc fd_trans_target_to_host_addr(int fd)
537 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
538 return target_fd_trans[fd]->target_to_host_addr;
543 static void fd_trans_register(int fd, TargetFdTrans *trans)
547 if (fd >= target_fd_max) {
548 oldmax = target_fd_max;
549 target_fd_max = ((fd >> 6) + 1) << 6; /* by slice of 64 entries */
550 target_fd_trans = g_renew(TargetFdTrans *,
551 target_fd_trans, target_fd_max);
552 memset((void *)(target_fd_trans + oldmax), 0,
553 (target_fd_max - oldmax) * sizeof(TargetFdTrans *));
555 target_fd_trans[fd] = trans;
558 static void fd_trans_unregister(int fd)
560 if (fd >= 0 && fd < target_fd_max) {
561 target_fd_trans[fd] = NULL;
565 static void fd_trans_dup(int oldfd, int newfd)
567 fd_trans_unregister(newfd);
568 if (oldfd < target_fd_max && target_fd_trans[oldfd]) {
569 fd_trans_register(newfd, target_fd_trans[oldfd]);
573 static int sys_getcwd1(char *buf, size_t size)
575 if (getcwd(buf, size) == NULL) {
576 /* getcwd() sets errno */
579 return strlen(buf)+1;
582 #ifdef TARGET_NR_utimensat
583 #if defined(__NR_utimensat)
584 #define __NR_sys_utimensat __NR_utimensat
585 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
586 const struct timespec *,tsp,int,flags)
588 static int sys_utimensat(int dirfd, const char *pathname,
589 const struct timespec times[2], int flags)
595 #endif /* TARGET_NR_utimensat */
597 #ifdef TARGET_NR_renameat2
598 #if defined(__NR_renameat2)
599 #define __NR_sys_renameat2 __NR_renameat2
600 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
601 const char *, new, unsigned int, flags)
603 static int sys_renameat2(int oldfd, const char *old,
604 int newfd, const char *new, int flags)
607 return renameat(oldfd, old, newfd, new);
613 #endif /* TARGET_NR_renameat2 */
615 #ifdef CONFIG_INOTIFY
616 #include <sys/inotify.h>
618 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
619 static int sys_inotify_init(void)
621 return (inotify_init());
624 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
625 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
627 return (inotify_add_watch(fd, pathname, mask));
630 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
631 static int sys_inotify_rm_watch(int fd, int32_t wd)
633 return (inotify_rm_watch(fd, wd));
636 #ifdef CONFIG_INOTIFY1
637 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
638 static int sys_inotify_init1(int flags)
640 return (inotify_init1(flags));
645 /* Userspace can usually survive runtime without inotify */
646 #undef TARGET_NR_inotify_init
647 #undef TARGET_NR_inotify_init1
648 #undef TARGET_NR_inotify_add_watch
649 #undef TARGET_NR_inotify_rm_watch
650 #endif /* CONFIG_INOTIFY */
652 #if defined(TARGET_NR_prlimit64)
653 #ifndef __NR_prlimit64
654 # define __NR_prlimit64 -1
656 #define __NR_sys_prlimit64 __NR_prlimit64
657 /* The glibc rlimit structure may not be that used by the underlying syscall */
658 struct host_rlimit64 {
662 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
663 const struct host_rlimit64 *, new_limit,
664 struct host_rlimit64 *, old_limit)
668 #if defined(TARGET_NR_timer_create)
669 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
670 static timer_t g_posix_timers[32] = { 0, } ;
672 static inline int next_free_host_timer(void)
675 /* FIXME: Does finding the next free slot require a lock? */
676 for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
677 if (g_posix_timers[k] == 0) {
678 g_posix_timers[k] = (timer_t) 1;
686 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
688 static inline int regpairs_aligned(void *cpu_env, int num)
690 return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
692 #elif defined(TARGET_MIPS) && (TARGET_ABI_BITS == 32)
693 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
694 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
695 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
696 * of registers which translates to the same as ARM/MIPS, because we start with
698 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
699 #elif defined(TARGET_SH4)
700 /* SH4 doesn't align register pairs, except for p{read,write}64 */
701 static inline int regpairs_aligned(void *cpu_env, int num)
704 case TARGET_NR_pread64:
705 case TARGET_NR_pwrite64:
713 static inline int regpairs_aligned(void *cpu_env, int num) { return 0; }
716 #define ERRNO_TABLE_SIZE 1200
718 /* target_to_host_errno_table[] is initialized from
719 * host_to_target_errno_table[] in syscall_init(). */
720 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
724 * This list is the union of errno values overridden in asm-<arch>/errno.h
725 * minus the errnos that are not actually generic to all archs.
727 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
728 [EAGAIN] = TARGET_EAGAIN,
729 [EIDRM] = TARGET_EIDRM,
730 [ECHRNG] = TARGET_ECHRNG,
731 [EL2NSYNC] = TARGET_EL2NSYNC,
732 [EL3HLT] = TARGET_EL3HLT,
733 [EL3RST] = TARGET_EL3RST,
734 [ELNRNG] = TARGET_ELNRNG,
735 [EUNATCH] = TARGET_EUNATCH,
736 [ENOCSI] = TARGET_ENOCSI,
737 [EL2HLT] = TARGET_EL2HLT,
738 [EDEADLK] = TARGET_EDEADLK,
739 [ENOLCK] = TARGET_ENOLCK,
740 [EBADE] = TARGET_EBADE,
741 [EBADR] = TARGET_EBADR,
742 [EXFULL] = TARGET_EXFULL,
743 [ENOANO] = TARGET_ENOANO,
744 [EBADRQC] = TARGET_EBADRQC,
745 [EBADSLT] = TARGET_EBADSLT,
746 [EBFONT] = TARGET_EBFONT,
747 [ENOSTR] = TARGET_ENOSTR,
748 [ENODATA] = TARGET_ENODATA,
749 [ETIME] = TARGET_ETIME,
750 [ENOSR] = TARGET_ENOSR,
751 [ENONET] = TARGET_ENONET,
752 [ENOPKG] = TARGET_ENOPKG,
753 [EREMOTE] = TARGET_EREMOTE,
754 [ENOLINK] = TARGET_ENOLINK,
755 [EADV] = TARGET_EADV,
756 [ESRMNT] = TARGET_ESRMNT,
757 [ECOMM] = TARGET_ECOMM,
758 [EPROTO] = TARGET_EPROTO,
759 [EDOTDOT] = TARGET_EDOTDOT,
760 [EMULTIHOP] = TARGET_EMULTIHOP,
761 [EBADMSG] = TARGET_EBADMSG,
762 [ENAMETOOLONG] = TARGET_ENAMETOOLONG,
763 [EOVERFLOW] = TARGET_EOVERFLOW,
764 [ENOTUNIQ] = TARGET_ENOTUNIQ,
765 [EBADFD] = TARGET_EBADFD,
766 [EREMCHG] = TARGET_EREMCHG,
767 [ELIBACC] = TARGET_ELIBACC,
768 [ELIBBAD] = TARGET_ELIBBAD,
769 [ELIBSCN] = TARGET_ELIBSCN,
770 [ELIBMAX] = TARGET_ELIBMAX,
771 [ELIBEXEC] = TARGET_ELIBEXEC,
772 [EILSEQ] = TARGET_EILSEQ,
773 [ENOSYS] = TARGET_ENOSYS,
774 [ELOOP] = TARGET_ELOOP,
775 [ERESTART] = TARGET_ERESTART,
776 [ESTRPIPE] = TARGET_ESTRPIPE,
777 [ENOTEMPTY] = TARGET_ENOTEMPTY,
778 [EUSERS] = TARGET_EUSERS,
779 [ENOTSOCK] = TARGET_ENOTSOCK,
780 [EDESTADDRREQ] = TARGET_EDESTADDRREQ,
781 [EMSGSIZE] = TARGET_EMSGSIZE,
782 [EPROTOTYPE] = TARGET_EPROTOTYPE,
783 [ENOPROTOOPT] = TARGET_ENOPROTOOPT,
784 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT,
785 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT,
786 [EOPNOTSUPP] = TARGET_EOPNOTSUPP,
787 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT,
788 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT,
789 [EADDRINUSE] = TARGET_EADDRINUSE,
790 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL,
791 [ENETDOWN] = TARGET_ENETDOWN,
792 [ENETUNREACH] = TARGET_ENETUNREACH,
793 [ENETRESET] = TARGET_ENETRESET,
794 [ECONNABORTED] = TARGET_ECONNABORTED,
795 [ECONNRESET] = TARGET_ECONNRESET,
796 [ENOBUFS] = TARGET_ENOBUFS,
797 [EISCONN] = TARGET_EISCONN,
798 [ENOTCONN] = TARGET_ENOTCONN,
799 [EUCLEAN] = TARGET_EUCLEAN,
800 [ENOTNAM] = TARGET_ENOTNAM,
801 [ENAVAIL] = TARGET_ENAVAIL,
802 [EISNAM] = TARGET_EISNAM,
803 [EREMOTEIO] = TARGET_EREMOTEIO,
804 [EDQUOT] = TARGET_EDQUOT,
805 [ESHUTDOWN] = TARGET_ESHUTDOWN,
806 [ETOOMANYREFS] = TARGET_ETOOMANYREFS,
807 [ETIMEDOUT] = TARGET_ETIMEDOUT,
808 [ECONNREFUSED] = TARGET_ECONNREFUSED,
809 [EHOSTDOWN] = TARGET_EHOSTDOWN,
810 [EHOSTUNREACH] = TARGET_EHOSTUNREACH,
811 [EALREADY] = TARGET_EALREADY,
812 [EINPROGRESS] = TARGET_EINPROGRESS,
813 [ESTALE] = TARGET_ESTALE,
814 [ECANCELED] = TARGET_ECANCELED,
815 [ENOMEDIUM] = TARGET_ENOMEDIUM,
816 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE,
818 [ENOKEY] = TARGET_ENOKEY,
821 [EKEYEXPIRED] = TARGET_EKEYEXPIRED,
824 [EKEYREVOKED] = TARGET_EKEYREVOKED,
827 [EKEYREJECTED] = TARGET_EKEYREJECTED,
830 [EOWNERDEAD] = TARGET_EOWNERDEAD,
832 #ifdef ENOTRECOVERABLE
833 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE,
836 [ENOMSG] = TARGET_ENOMSG,
839 [ERFKILL] = TARGET_ERFKILL,
842 [EHWPOISON] = TARGET_EHWPOISON,
846 static inline int host_to_target_errno(int err)
848 if (err >= 0 && err < ERRNO_TABLE_SIZE &&
849 host_to_target_errno_table[err]) {
850 return host_to_target_errno_table[err];
855 static inline int target_to_host_errno(int err)
857 if (err >= 0 && err < ERRNO_TABLE_SIZE &&
858 target_to_host_errno_table[err]) {
859 return target_to_host_errno_table[err];
864 static inline abi_long get_errno(abi_long ret)
867 return -host_to_target_errno(errno);
872 static inline int is_error(abi_long ret)
874 return (abi_ulong)ret >= (abi_ulong)(-4096);
877 const char *target_strerror(int err)
879 if (err == TARGET_ERESTARTSYS) {
880 return "To be restarted";
882 if (err == TARGET_QEMU_ESIGRETURN) {
883 return "Successful exit from sigreturn";
886 if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
889 return strerror(target_to_host_errno(err));
892 #define safe_syscall0(type, name) \
893 static type safe_##name(void) \
895 return safe_syscall(__NR_##name); \
898 #define safe_syscall1(type, name, type1, arg1) \
899 static type safe_##name(type1 arg1) \
901 return safe_syscall(__NR_##name, arg1); \
904 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
905 static type safe_##name(type1 arg1, type2 arg2) \
907 return safe_syscall(__NR_##name, arg1, arg2); \
910 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
911 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
913 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
916 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
918 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
920 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
923 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
924 type4, arg4, type5, arg5) \
925 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
928 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
931 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
932 type4, arg4, type5, arg5, type6, arg6) \
933 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
934 type5 arg5, type6 arg6) \
936 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
939 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
940 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
941 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
942 int, flags, mode_t, mode)
943 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
944 struct rusage *, rusage)
945 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
946 int, options, struct rusage *, rusage)
947 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
948 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
949 fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
950 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
951 struct timespec *, tsp, const sigset_t *, sigmask,
953 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
954 int, maxevents, int, timeout, const sigset_t *, sigmask,
956 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
957 const struct timespec *,timeout,int *,uaddr2,int,val3)
958 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
959 safe_syscall2(int, kill, pid_t, pid, int, sig)
960 safe_syscall2(int, tkill, int, tid, int, sig)
961 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
962 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
963 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
964 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
965 unsigned long, pos_l, unsigned long, pos_h)
966 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
967 unsigned long, pos_l, unsigned long, pos_h)
968 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
970 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
971 int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
972 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
973 int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
974 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
975 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
976 safe_syscall2(int, flock, int, fd, int, operation)
977 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
978 const struct timespec *, uts, size_t, sigsetsize)
979 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
981 safe_syscall2(int, nanosleep, const struct timespec *, req,
982 struct timespec *, rem)
983 #ifdef TARGET_NR_clock_nanosleep
984 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
985 const struct timespec *, req, struct timespec *, rem)
988 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
990 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
991 long, msgtype, int, flags)
992 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
993 unsigned, nsops, const struct timespec *, timeout)
995 /* This host kernel architecture uses a single ipc syscall; fake up
996 * wrappers for the sub-operations to hide this implementation detail.
997 * Annoyingly we can't include linux/ipc.h to get the constant definitions
998 * for the call parameter because some structs in there conflict with the
999 * sys/ipc.h ones. So we just define them here, and rely on them being
1000 * the same for all host architectures.
1002 #define Q_SEMTIMEDOP 4
1005 #define Q_IPCCALL(VERSION, OP) ((VERSION) << 16 | (OP))
1007 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
1008 void *, ptr, long, fifth)
1009 static int safe_msgsnd(int msgid, const void *msgp, size_t sz, int flags)
1011 return safe_ipc(Q_IPCCALL(0, Q_MSGSND), msgid, sz, flags, (void *)msgp, 0);
1013 static int safe_msgrcv(int msgid, void *msgp, size_t sz, long type, int flags)
1015 return safe_ipc(Q_IPCCALL(1, Q_MSGRCV), msgid, sz, flags, msgp, type);
1017 static int safe_semtimedop(int semid, struct sembuf *tsops, unsigned nsops,
1018 const struct timespec *timeout)
1020 return safe_ipc(Q_IPCCALL(0, Q_SEMTIMEDOP), semid, nsops, 0, tsops,
1024 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1025 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
1026 size_t, len, unsigned, prio, const struct timespec *, timeout)
1027 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
1028 size_t, len, unsigned *, prio, const struct timespec *, timeout)
1030 /* We do ioctl like this rather than via safe_syscall3 to preserve the
1031 * "third argument might be integer or pointer or not present" behaviour of
1032 * the libc function.
1034 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
1035 /* Similarly for fcntl. Note that callers must always:
1036 * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
1037 * use the flock64 struct rather than unsuffixed flock
1038 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
1041 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
1043 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
1046 static inline int host_to_target_sock_type(int host_type)
1050 switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
1052 target_type = TARGET_SOCK_DGRAM;
1055 target_type = TARGET_SOCK_STREAM;
1058 target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
1062 #if defined(SOCK_CLOEXEC)
1063 if (host_type & SOCK_CLOEXEC) {
1064 target_type |= TARGET_SOCK_CLOEXEC;
1068 #if defined(SOCK_NONBLOCK)
1069 if (host_type & SOCK_NONBLOCK) {
1070 target_type |= TARGET_SOCK_NONBLOCK;
1077 static abi_ulong target_brk;
1078 static abi_ulong target_original_brk;
1079 static abi_ulong brk_page;
1081 void target_set_brk(abi_ulong new_brk)
1083 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
1084 brk_page = HOST_PAGE_ALIGN(target_brk);
1087 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
1088 #define DEBUGF_BRK(message, args...)
1090 /* do_brk() must return target values and target errnos. */
1091 abi_long do_brk(abi_ulong new_brk)
1093 abi_long mapped_addr;
1094 abi_ulong new_alloc_size;
1096 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
1099 DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
1102 if (new_brk < target_original_brk) {
1103 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
1108 /* If the new brk is less than the highest page reserved to the
1109 * target heap allocation, set it and we're almost done... */
1110 if (new_brk <= brk_page) {
1111 /* Heap contents are initialized to zero, as for anonymous
1113 if (new_brk > target_brk) {
1114 memset(g2h(target_brk), 0, new_brk - target_brk);
1116 target_brk = new_brk;
1117 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
1121 /* We need to allocate more memory after the brk... Note that
1122 * we don't use MAP_FIXED because that will map over the top of
1123 * any existing mapping (like the one with the host libc or qemu
1124 * itself); instead we treat "mapped but at wrong address" as
1125 * a failure and unmap again.
1127 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
1128 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
1129 PROT_READ|PROT_WRITE,
1130 MAP_ANON|MAP_PRIVATE, 0, 0));
1132 if (mapped_addr == brk_page) {
1133 /* Heap contents are initialized to zero, as for anonymous
1134 * mapped pages. Technically the new pages are already
1135 * initialized to zero since they *are* anonymous mapped
1136 * pages, however we have to take care with the contents that
1137 * come from the remaining part of the previous page: it may
1138 * contains garbage data due to a previous heap usage (grown
1139 * then shrunken). */
1140 memset(g2h(target_brk), 0, brk_page - target_brk);
1142 target_brk = new_brk;
1143 brk_page = HOST_PAGE_ALIGN(target_brk);
1144 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
1147 } else if (mapped_addr != -1) {
1148 /* Mapped but at wrong address, meaning there wasn't actually
1149 * enough space for this brk.
1151 target_munmap(mapped_addr, new_alloc_size);
1153 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
1156 DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
1159 #if defined(TARGET_ALPHA)
1160 /* We (partially) emulate OSF/1 on Alpha, which requires we
1161 return a proper errno, not an unchanged brk value. */
1162 return -TARGET_ENOMEM;
1164 /* For everything else, return the previous break. */
1168 static inline abi_long copy_from_user_fdset(fd_set *fds,
1169 abi_ulong target_fds_addr,
1173 abi_ulong b, *target_fds;
1175 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
1176 if (!(target_fds = lock_user(VERIFY_READ,
1178 sizeof(abi_ulong) * nw,
1180 return -TARGET_EFAULT;
1184 for (i = 0; i < nw; i++) {
1185 /* grab the abi_ulong */
1186 __get_user(b, &target_fds[i]);
1187 for (j = 0; j < TARGET_ABI_BITS; j++) {
1188 /* check the bit inside the abi_ulong */
1195 unlock_user(target_fds, target_fds_addr, 0);
1200 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
1201 abi_ulong target_fds_addr,
1204 if (target_fds_addr) {
1205 if (copy_from_user_fdset(fds, target_fds_addr, n))
1206 return -TARGET_EFAULT;
1214 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
1220 abi_ulong *target_fds;
1222 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
1223 if (!(target_fds = lock_user(VERIFY_WRITE,
1225 sizeof(abi_ulong) * nw,
1227 return -TARGET_EFAULT;
1230 for (i = 0; i < nw; i++) {
1232 for (j = 0; j < TARGET_ABI_BITS; j++) {
1233 v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
1236 __put_user(v, &target_fds[i]);
1239 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
1244 #if defined(__alpha__)
1245 #define HOST_HZ 1024
1250 static inline abi_long host_to_target_clock_t(long ticks)
1252 #if HOST_HZ == TARGET_HZ
1255 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
1259 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
1260 const struct rusage *rusage)
1262 struct target_rusage *target_rusage;
1264 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
1265 return -TARGET_EFAULT;
1266 target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
1267 target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
1268 target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1269 target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1270 target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1271 target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1272 target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1273 target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1274 target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1275 target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1276 target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1277 target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1278 target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1279 target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1280 target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1281 target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1282 target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1283 target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1284 unlock_user_struct(target_rusage, target_addr, 1);
1289 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1291 abi_ulong target_rlim_swap;
1294 target_rlim_swap = tswapal(target_rlim);
1295 if (target_rlim_swap == TARGET_RLIM_INFINITY)
1296 return RLIM_INFINITY;
1298 result = target_rlim_swap;
1299 if (target_rlim_swap != (rlim_t)result)
1300 return RLIM_INFINITY;
1305 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1307 abi_ulong target_rlim_swap;
1310 if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1311 target_rlim_swap = TARGET_RLIM_INFINITY;
1313 target_rlim_swap = rlim;
1314 result = tswapal(target_rlim_swap);
1319 static inline int target_to_host_resource(int code)
1322 case TARGET_RLIMIT_AS:
1324 case TARGET_RLIMIT_CORE:
1326 case TARGET_RLIMIT_CPU:
1328 case TARGET_RLIMIT_DATA:
1330 case TARGET_RLIMIT_FSIZE:
1331 return RLIMIT_FSIZE;
1332 case TARGET_RLIMIT_LOCKS:
1333 return RLIMIT_LOCKS;
1334 case TARGET_RLIMIT_MEMLOCK:
1335 return RLIMIT_MEMLOCK;
1336 case TARGET_RLIMIT_MSGQUEUE:
1337 return RLIMIT_MSGQUEUE;
1338 case TARGET_RLIMIT_NICE:
1340 case TARGET_RLIMIT_NOFILE:
1341 return RLIMIT_NOFILE;
1342 case TARGET_RLIMIT_NPROC:
1343 return RLIMIT_NPROC;
1344 case TARGET_RLIMIT_RSS:
1346 case TARGET_RLIMIT_RTPRIO:
1347 return RLIMIT_RTPRIO;
1348 case TARGET_RLIMIT_SIGPENDING:
1349 return RLIMIT_SIGPENDING;
1350 case TARGET_RLIMIT_STACK:
1351 return RLIMIT_STACK;
1357 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1358 abi_ulong target_tv_addr)
1360 struct target_timeval *target_tv;
1362 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1))
1363 return -TARGET_EFAULT;
1365 __get_user(tv->tv_sec, &target_tv->tv_sec);
1366 __get_user(tv->tv_usec, &target_tv->tv_usec);
1368 unlock_user_struct(target_tv, target_tv_addr, 0);
1373 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1374 const struct timeval *tv)
1376 struct target_timeval *target_tv;
1378 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0))
1379 return -TARGET_EFAULT;
1381 __put_user(tv->tv_sec, &target_tv->tv_sec);
1382 __put_user(tv->tv_usec, &target_tv->tv_usec);
1384 unlock_user_struct(target_tv, target_tv_addr, 1);
1389 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1390 abi_ulong target_tz_addr)
1392 struct target_timezone *target_tz;
1394 if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1395 return -TARGET_EFAULT;
1398 __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1399 __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1401 unlock_user_struct(target_tz, target_tz_addr, 0);
1406 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1409 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1410 abi_ulong target_mq_attr_addr)
1412 struct target_mq_attr *target_mq_attr;
1414 if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1415 target_mq_attr_addr, 1))
1416 return -TARGET_EFAULT;
1418 __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1419 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1420 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1421 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1423 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1428 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1429 const struct mq_attr *attr)
1431 struct target_mq_attr *target_mq_attr;
1433 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1434 target_mq_attr_addr, 0))
1435 return -TARGET_EFAULT;
1437 __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1438 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1439 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1440 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1442 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1448 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1449 /* do_select() must return target values and target errnos. */
1450 static abi_long do_select(int n,
1451 abi_ulong rfd_addr, abi_ulong wfd_addr,
1452 abi_ulong efd_addr, abi_ulong target_tv_addr)
1454 fd_set rfds, wfds, efds;
1455 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1457 struct timespec ts, *ts_ptr;
1460 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1464 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1468 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1473 if (target_tv_addr) {
1474 if (copy_from_user_timeval(&tv, target_tv_addr))
1475 return -TARGET_EFAULT;
1476 ts.tv_sec = tv.tv_sec;
1477 ts.tv_nsec = tv.tv_usec * 1000;
1483 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1486 if (!is_error(ret)) {
1487 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1488 return -TARGET_EFAULT;
1489 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1490 return -TARGET_EFAULT;
1491 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1492 return -TARGET_EFAULT;
1494 if (target_tv_addr) {
1495 tv.tv_sec = ts.tv_sec;
1496 tv.tv_usec = ts.tv_nsec / 1000;
1497 if (copy_to_user_timeval(target_tv_addr, &tv)) {
1498 return -TARGET_EFAULT;
1506 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1507 static abi_long do_old_select(abi_ulong arg1)
1509 struct target_sel_arg_struct *sel;
1510 abi_ulong inp, outp, exp, tvp;
1513 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1514 return -TARGET_EFAULT;
1517 nsel = tswapal(sel->n);
1518 inp = tswapal(sel->inp);
1519 outp = tswapal(sel->outp);
1520 exp = tswapal(sel->exp);
1521 tvp = tswapal(sel->tvp);
1523 unlock_user_struct(sel, arg1, 0);
1525 return do_select(nsel, inp, outp, exp, tvp);
1530 static abi_long do_pipe2(int host_pipe[], int flags)
1533 return pipe2(host_pipe, flags);
1539 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1540 int flags, int is_pipe2)
1544 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1547 return get_errno(ret);
1549 /* Several targets have special calling conventions for the original
1550 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1552 #if defined(TARGET_ALPHA)
1553 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1554 return host_pipe[0];
1555 #elif defined(TARGET_MIPS)
1556 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1557 return host_pipe[0];
1558 #elif defined(TARGET_SH4)
1559 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1560 return host_pipe[0];
1561 #elif defined(TARGET_SPARC)
1562 ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1563 return host_pipe[0];
1567 if (put_user_s32(host_pipe[0], pipedes)
1568 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1569 return -TARGET_EFAULT;
1570 return get_errno(ret);
1573 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1574 abi_ulong target_addr,
1577 struct target_ip_mreqn *target_smreqn;
1579 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1581 return -TARGET_EFAULT;
1582 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1583 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1584 if (len == sizeof(struct target_ip_mreqn))
1585 mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1586 unlock_user(target_smreqn, target_addr, 0);
1591 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1592 abi_ulong target_addr,
1595 const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1596 sa_family_t sa_family;
1597 struct target_sockaddr *target_saddr;
1599 if (fd_trans_target_to_host_addr(fd)) {
1600 return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1603 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1605 return -TARGET_EFAULT;
1607 sa_family = tswap16(target_saddr->sa_family);
1609 /* Oops. The caller might send a incomplete sun_path; sun_path
1610 * must be terminated by \0 (see the manual page), but
1611 * unfortunately it is quite common to specify sockaddr_un
1612 * length as "strlen(x->sun_path)" while it should be
1613 * "strlen(...) + 1". We'll fix that here if needed.
1614 * Linux kernel has a similar feature.
1617 if (sa_family == AF_UNIX) {
1618 if (len < unix_maxlen && len > 0) {
1619 char *cp = (char*)target_saddr;
1621 if ( cp[len-1] && !cp[len] )
1624 if (len > unix_maxlen)
1628 memcpy(addr, target_saddr, len);
1629 addr->sa_family = sa_family;
1630 if (sa_family == AF_NETLINK) {
1631 struct sockaddr_nl *nladdr;
1633 nladdr = (struct sockaddr_nl *)addr;
1634 nladdr->nl_pid = tswap32(nladdr->nl_pid);
1635 nladdr->nl_groups = tswap32(nladdr->nl_groups);
1636 } else if (sa_family == AF_PACKET) {
1637 struct target_sockaddr_ll *lladdr;
1639 lladdr = (struct target_sockaddr_ll *)addr;
1640 lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1641 lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1643 unlock_user(target_saddr, target_addr, 0);
1648 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1649 struct sockaddr *addr,
1652 struct target_sockaddr *target_saddr;
1659 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1661 return -TARGET_EFAULT;
1662 memcpy(target_saddr, addr, len);
1663 if (len >= offsetof(struct target_sockaddr, sa_family) +
1664 sizeof(target_saddr->sa_family)) {
1665 target_saddr->sa_family = tswap16(addr->sa_family);
1667 if (addr->sa_family == AF_NETLINK && len >= sizeof(struct sockaddr_nl)) {
1668 struct sockaddr_nl *target_nl = (struct sockaddr_nl *)target_saddr;
1669 target_nl->nl_pid = tswap32(target_nl->nl_pid);
1670 target_nl->nl_groups = tswap32(target_nl->nl_groups);
1671 } else if (addr->sa_family == AF_PACKET) {
1672 struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1673 target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1674 target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1675 } else if (addr->sa_family == AF_INET6 &&
1676 len >= sizeof(struct target_sockaddr_in6)) {
1677 struct target_sockaddr_in6 *target_in6 =
1678 (struct target_sockaddr_in6 *)target_saddr;
1679 target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1681 unlock_user(target_saddr, target_addr, len);
1686 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1687 struct target_msghdr *target_msgh)
1689 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1690 abi_long msg_controllen;
1691 abi_ulong target_cmsg_addr;
1692 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1693 socklen_t space = 0;
1695 msg_controllen = tswapal(target_msgh->msg_controllen);
1696 if (msg_controllen < sizeof (struct target_cmsghdr))
1698 target_cmsg_addr = tswapal(target_msgh->msg_control);
1699 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1700 target_cmsg_start = target_cmsg;
1702 return -TARGET_EFAULT;
1704 while (cmsg && target_cmsg) {
1705 void *data = CMSG_DATA(cmsg);
1706 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1708 int len = tswapal(target_cmsg->cmsg_len)
1709 - sizeof(struct target_cmsghdr);
1711 space += CMSG_SPACE(len);
1712 if (space > msgh->msg_controllen) {
1713 space -= CMSG_SPACE(len);
1714 /* This is a QEMU bug, since we allocated the payload
1715 * area ourselves (unlike overflow in host-to-target
1716 * conversion, which is just the guest giving us a buffer
1717 * that's too small). It can't happen for the payload types
1718 * we currently support; if it becomes an issue in future
1719 * we would need to improve our allocation strategy to
1720 * something more intelligent than "twice the size of the
1721 * target buffer we're reading from".
1723 gemu_log("Host cmsg overflow\n");
1727 if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1728 cmsg->cmsg_level = SOL_SOCKET;
1730 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1732 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1733 cmsg->cmsg_len = CMSG_LEN(len);
1735 if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1736 int *fd = (int *)data;
1737 int *target_fd = (int *)target_data;
1738 int i, numfds = len / sizeof(int);
1740 for (i = 0; i < numfds; i++) {
1741 __get_user(fd[i], target_fd + i);
1743 } else if (cmsg->cmsg_level == SOL_SOCKET
1744 && cmsg->cmsg_type == SCM_CREDENTIALS) {
1745 struct ucred *cred = (struct ucred *)data;
1746 struct target_ucred *target_cred =
1747 (struct target_ucred *)target_data;
1749 __get_user(cred->pid, &target_cred->pid);
1750 __get_user(cred->uid, &target_cred->uid);
1751 __get_user(cred->gid, &target_cred->gid);
1753 gemu_log("Unsupported ancillary data: %d/%d\n",
1754 cmsg->cmsg_level, cmsg->cmsg_type);
1755 memcpy(data, target_data, len);
1758 cmsg = CMSG_NXTHDR(msgh, cmsg);
1759 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1762 unlock_user(target_cmsg, target_cmsg_addr, 0);
1764 msgh->msg_controllen = space;
1768 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1769 struct msghdr *msgh)
1771 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1772 abi_long msg_controllen;
1773 abi_ulong target_cmsg_addr;
1774 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1775 socklen_t space = 0;
1777 msg_controllen = tswapal(target_msgh->msg_controllen);
1778 if (msg_controllen < sizeof (struct target_cmsghdr))
1780 target_cmsg_addr = tswapal(target_msgh->msg_control);
1781 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1782 target_cmsg_start = target_cmsg;
1784 return -TARGET_EFAULT;
1786 while (cmsg && target_cmsg) {
1787 void *data = CMSG_DATA(cmsg);
1788 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1790 int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1791 int tgt_len, tgt_space;
1793 /* We never copy a half-header but may copy half-data;
1794 * this is Linux's behaviour in put_cmsg(). Note that
1795 * truncation here is a guest problem (which we report
1796 * to the guest via the CTRUNC bit), unlike truncation
1797 * in target_to_host_cmsg, which is a QEMU bug.
1799 if (msg_controllen < sizeof(struct target_cmsghdr)) {
1800 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1804 if (cmsg->cmsg_level == SOL_SOCKET) {
1805 target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1807 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1809 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1811 /* Payload types which need a different size of payload on
1812 * the target must adjust tgt_len here.
1814 switch (cmsg->cmsg_level) {
1816 switch (cmsg->cmsg_type) {
1818 tgt_len = sizeof(struct target_timeval);
1828 if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1829 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1830 tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1833 /* We must now copy-and-convert len bytes of payload
1834 * into tgt_len bytes of destination space. Bear in mind
1835 * that in both source and destination we may be dealing
1836 * with a truncated value!
1838 switch (cmsg->cmsg_level) {
1840 switch (cmsg->cmsg_type) {
1843 int *fd = (int *)data;
1844 int *target_fd = (int *)target_data;
1845 int i, numfds = tgt_len / sizeof(int);
1847 for (i = 0; i < numfds; i++) {
1848 __put_user(fd[i], target_fd + i);
1854 struct timeval *tv = (struct timeval *)data;
1855 struct target_timeval *target_tv =
1856 (struct target_timeval *)target_data;
1858 if (len != sizeof(struct timeval) ||
1859 tgt_len != sizeof(struct target_timeval)) {
1863 /* copy struct timeval to target */
1864 __put_user(tv->tv_sec, &target_tv->tv_sec);
1865 __put_user(tv->tv_usec, &target_tv->tv_usec);
1868 case SCM_CREDENTIALS:
1870 struct ucred *cred = (struct ucred *)data;
1871 struct target_ucred *target_cred =
1872 (struct target_ucred *)target_data;
1874 __put_user(cred->pid, &target_cred->pid);
1875 __put_user(cred->uid, &target_cred->uid);
1876 __put_user(cred->gid, &target_cred->gid);
1885 switch (cmsg->cmsg_type) {
1888 uint32_t *v = (uint32_t *)data;
1889 uint32_t *t_int = (uint32_t *)target_data;
1891 if (len != sizeof(uint32_t) ||
1892 tgt_len != sizeof(uint32_t)) {
1895 __put_user(*v, t_int);
1901 struct sock_extended_err ee;
1902 struct sockaddr_in offender;
1904 struct errhdr_t *errh = (struct errhdr_t *)data;
1905 struct errhdr_t *target_errh =
1906 (struct errhdr_t *)target_data;
1908 if (len != sizeof(struct errhdr_t) ||
1909 tgt_len != sizeof(struct errhdr_t)) {
1912 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1913 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1914 __put_user(errh->ee.ee_type, &target_errh->ee.ee_type);
1915 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1916 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1917 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1918 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1919 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1920 (void *) &errh->offender, sizeof(errh->offender));
1929 switch (cmsg->cmsg_type) {
1932 uint32_t *v = (uint32_t *)data;
1933 uint32_t *t_int = (uint32_t *)target_data;
1935 if (len != sizeof(uint32_t) ||
1936 tgt_len != sizeof(uint32_t)) {
1939 __put_user(*v, t_int);
1945 struct sock_extended_err ee;
1946 struct sockaddr_in6 offender;
1948 struct errhdr6_t *errh = (struct errhdr6_t *)data;
1949 struct errhdr6_t *target_errh =
1950 (struct errhdr6_t *)target_data;
1952 if (len != sizeof(struct errhdr6_t) ||
1953 tgt_len != sizeof(struct errhdr6_t)) {
1956 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1957 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1958 __put_user(errh->ee.ee_type, &target_errh->ee.ee_type);
1959 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1960 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1961 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1962 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1963 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1964 (void *) &errh->offender, sizeof(errh->offender));
1974 gemu_log("Unsupported ancillary data: %d/%d\n",
1975 cmsg->cmsg_level, cmsg->cmsg_type);
1976 memcpy(target_data, data, MIN(len, tgt_len));
1977 if (tgt_len > len) {
1978 memset(target_data + len, 0, tgt_len - len);
1982 target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
1983 tgt_space = TARGET_CMSG_SPACE(tgt_len);
1984 if (msg_controllen < tgt_space) {
1985 tgt_space = msg_controllen;
1987 msg_controllen -= tgt_space;
1989 cmsg = CMSG_NXTHDR(msgh, cmsg);
1990 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1993 unlock_user(target_cmsg, target_cmsg_addr, space);
1995 target_msgh->msg_controllen = tswapal(space);
1999 static void tswap_nlmsghdr(struct nlmsghdr *nlh)
2001 nlh->nlmsg_len = tswap32(nlh->nlmsg_len);
2002 nlh->nlmsg_type = tswap16(nlh->nlmsg_type);
2003 nlh->nlmsg_flags = tswap16(nlh->nlmsg_flags);
2004 nlh->nlmsg_seq = tswap32(nlh->nlmsg_seq);
2005 nlh->nlmsg_pid = tswap32(nlh->nlmsg_pid);
2008 static abi_long host_to_target_for_each_nlmsg(struct nlmsghdr *nlh,
2010 abi_long (*host_to_target_nlmsg)
2011 (struct nlmsghdr *))
2016 while (len > sizeof(struct nlmsghdr)) {
2018 nlmsg_len = nlh->nlmsg_len;
2019 if (nlmsg_len < sizeof(struct nlmsghdr) ||
2024 switch (nlh->nlmsg_type) {
2026 tswap_nlmsghdr(nlh);
2032 struct nlmsgerr *e = NLMSG_DATA(nlh);
2033 e->error = tswap32(e->error);
2034 tswap_nlmsghdr(&e->msg);
2035 tswap_nlmsghdr(nlh);
2039 ret = host_to_target_nlmsg(nlh);
2041 tswap_nlmsghdr(nlh);
2046 tswap_nlmsghdr(nlh);
2047 len -= NLMSG_ALIGN(nlmsg_len);
2048 nlh = (struct nlmsghdr *)(((char*)nlh) + NLMSG_ALIGN(nlmsg_len));
2053 static abi_long target_to_host_for_each_nlmsg(struct nlmsghdr *nlh,
2055 abi_long (*target_to_host_nlmsg)
2056 (struct nlmsghdr *))
2060 while (len > sizeof(struct nlmsghdr)) {
2061 if (tswap32(nlh->nlmsg_len) < sizeof(struct nlmsghdr) ||
2062 tswap32(nlh->nlmsg_len) > len) {
2065 tswap_nlmsghdr(nlh);
2066 switch (nlh->nlmsg_type) {
2073 struct nlmsgerr *e = NLMSG_DATA(nlh);
2074 e->error = tswap32(e->error);
2075 tswap_nlmsghdr(&e->msg);
2079 ret = target_to_host_nlmsg(nlh);
2084 len -= NLMSG_ALIGN(nlh->nlmsg_len);
2085 nlh = (struct nlmsghdr *)(((char *)nlh) + NLMSG_ALIGN(nlh->nlmsg_len));
2090 #ifdef CONFIG_RTNETLINK
2091 static abi_long host_to_target_for_each_nlattr(struct nlattr *nlattr,
2092 size_t len, void *context,
2093 abi_long (*host_to_target_nlattr)
2097 unsigned short nla_len;
2100 while (len > sizeof(struct nlattr)) {
2101 nla_len = nlattr->nla_len;
2102 if (nla_len < sizeof(struct nlattr) ||
2106 ret = host_to_target_nlattr(nlattr, context);
2107 nlattr->nla_len = tswap16(nlattr->nla_len);
2108 nlattr->nla_type = tswap16(nlattr->nla_type);
2112 len -= NLA_ALIGN(nla_len);
2113 nlattr = (struct nlattr *)(((char *)nlattr) + NLA_ALIGN(nla_len));
2118 static abi_long host_to_target_for_each_rtattr(struct rtattr *rtattr,
2120 abi_long (*host_to_target_rtattr)
2123 unsigned short rta_len;
2126 while (len > sizeof(struct rtattr)) {
2127 rta_len = rtattr->rta_len;
2128 if (rta_len < sizeof(struct rtattr) ||
2132 ret = host_to_target_rtattr(rtattr);
2133 rtattr->rta_len = tswap16(rtattr->rta_len);
2134 rtattr->rta_type = tswap16(rtattr->rta_type);
2138 len -= RTA_ALIGN(rta_len);
2139 rtattr = (struct rtattr *)(((char *)rtattr) + RTA_ALIGN(rta_len));
2144 #define NLA_DATA(nla) ((void *)((char *)(nla)) + NLA_HDRLEN)
2146 static abi_long host_to_target_data_bridge_nlattr(struct nlattr *nlattr,
2153 switch (nlattr->nla_type) {
2155 case QEMU_IFLA_BR_FDB_FLUSH:
2158 case QEMU_IFLA_BR_GROUP_ADDR:
2161 case QEMU_IFLA_BR_VLAN_FILTERING:
2162 case QEMU_IFLA_BR_TOPOLOGY_CHANGE:
2163 case QEMU_IFLA_BR_TOPOLOGY_CHANGE_DETECTED:
2164 case QEMU_IFLA_BR_MCAST_ROUTER:
2165 case QEMU_IFLA_BR_MCAST_SNOOPING:
2166 case QEMU_IFLA_BR_MCAST_QUERY_USE_IFADDR:
2167 case QEMU_IFLA_BR_MCAST_QUERIER:
2168 case QEMU_IFLA_BR_NF_CALL_IPTABLES:
2169 case QEMU_IFLA_BR_NF_CALL_IP6TABLES:
2170 case QEMU_IFLA_BR_NF_CALL_ARPTABLES:
2173 case QEMU_IFLA_BR_PRIORITY:
2174 case QEMU_IFLA_BR_VLAN_PROTOCOL:
2175 case QEMU_IFLA_BR_GROUP_FWD_MASK:
2176 case QEMU_IFLA_BR_ROOT_PORT:
2177 case QEMU_IFLA_BR_VLAN_DEFAULT_PVID:
2178 u16 = NLA_DATA(nlattr);
2179 *u16 = tswap16(*u16);
2182 case QEMU_IFLA_BR_FORWARD_DELAY:
2183 case QEMU_IFLA_BR_HELLO_TIME:
2184 case QEMU_IFLA_BR_MAX_AGE:
2185 case QEMU_IFLA_BR_AGEING_TIME:
2186 case QEMU_IFLA_BR_STP_STATE:
2187 case QEMU_IFLA_BR_ROOT_PATH_COST:
2188 case QEMU_IFLA_BR_MCAST_HASH_ELASTICITY:
2189 case QEMU_IFLA_BR_MCAST_HASH_MAX:
2190 case QEMU_IFLA_BR_MCAST_LAST_MEMBER_CNT:
2191 case QEMU_IFLA_BR_MCAST_STARTUP_QUERY_CNT:
2192 u32 = NLA_DATA(nlattr);
2193 *u32 = tswap32(*u32);
2196 case QEMU_IFLA_BR_HELLO_TIMER:
2197 case QEMU_IFLA_BR_TCN_TIMER:
2198 case QEMU_IFLA_BR_GC_TIMER:
2199 case QEMU_IFLA_BR_TOPOLOGY_CHANGE_TIMER:
2200 case QEMU_IFLA_BR_MCAST_LAST_MEMBER_INTVL:
2201 case QEMU_IFLA_BR_MCAST_MEMBERSHIP_INTVL:
2202 case QEMU_IFLA_BR_MCAST_QUERIER_INTVL:
2203 case QEMU_IFLA_BR_MCAST_QUERY_INTVL:
2204 case QEMU_IFLA_BR_MCAST_QUERY_RESPONSE_INTVL:
2205 case QEMU_IFLA_BR_MCAST_STARTUP_QUERY_INTVL:
2206 u64 = NLA_DATA(nlattr);
2207 *u64 = tswap64(*u64);
2209 /* ifla_bridge_id: uin8_t[] */
2210 case QEMU_IFLA_BR_ROOT_ID:
2211 case QEMU_IFLA_BR_BRIDGE_ID:
2214 gemu_log("Unknown QEMU_IFLA_BR type %d\n", nlattr->nla_type);
2220 static abi_long host_to_target_slave_data_bridge_nlattr(struct nlattr *nlattr,
2227 switch (nlattr->nla_type) {
2229 case QEMU_IFLA_BRPORT_STATE:
2230 case QEMU_IFLA_BRPORT_MODE:
2231 case QEMU_IFLA_BRPORT_GUARD:
2232 case QEMU_IFLA_BRPORT_PROTECT:
2233 case QEMU_IFLA_BRPORT_FAST_LEAVE:
2234 case QEMU_IFLA_BRPORT_LEARNING:
2235 case QEMU_IFLA_BRPORT_UNICAST_FLOOD:
2236 case QEMU_IFLA_BRPORT_PROXYARP:
2237 case QEMU_IFLA_BRPORT_LEARNING_SYNC:
2238 case QEMU_IFLA_BRPORT_PROXYARP_WIFI:
2239 case QEMU_IFLA_BRPORT_TOPOLOGY_CHANGE_ACK:
2240 case QEMU_IFLA_BRPORT_CONFIG_PENDING:
2241 case QEMU_IFLA_BRPORT_MULTICAST_ROUTER:
2244 case QEMU_IFLA_BRPORT_PRIORITY:
2245 case QEMU_IFLA_BRPORT_DESIGNATED_PORT:
2246 case QEMU_IFLA_BRPORT_DESIGNATED_COST:
2247 case QEMU_IFLA_BRPORT_ID:
2248 case QEMU_IFLA_BRPORT_NO:
2249 u16 = NLA_DATA(nlattr);
2250 *u16 = tswap16(*u16);
2253 case QEMU_IFLA_BRPORT_COST:
2254 u32 = NLA_DATA(nlattr);
2255 *u32 = tswap32(*u32);
2258 case QEMU_IFLA_BRPORT_MESSAGE_AGE_TIMER:
2259 case QEMU_IFLA_BRPORT_FORWARD_DELAY_TIMER:
2260 case QEMU_IFLA_BRPORT_HOLD_TIMER:
2261 u64 = NLA_DATA(nlattr);
2262 *u64 = tswap64(*u64);
2264 /* ifla_bridge_id: uint8_t[] */
2265 case QEMU_IFLA_BRPORT_ROOT_ID:
2266 case QEMU_IFLA_BRPORT_BRIDGE_ID:
2269 gemu_log("Unknown QEMU_IFLA_BRPORT type %d\n", nlattr->nla_type);
2275 struct linkinfo_context {
2282 static abi_long host_to_target_data_linkinfo_nlattr(struct nlattr *nlattr,
2285 struct linkinfo_context *li_context = context;
2287 switch (nlattr->nla_type) {
2289 case QEMU_IFLA_INFO_KIND:
2290 li_context->name = NLA_DATA(nlattr);
2291 li_context->len = nlattr->nla_len - NLA_HDRLEN;
2293 case QEMU_IFLA_INFO_SLAVE_KIND:
2294 li_context->slave_name = NLA_DATA(nlattr);
2295 li_context->slave_len = nlattr->nla_len - NLA_HDRLEN;
2298 case QEMU_IFLA_INFO_XSTATS:
2299 /* FIXME: only used by CAN */
2302 case QEMU_IFLA_INFO_DATA:
2303 if (strncmp(li_context->name, "bridge",
2304 li_context->len) == 0) {
2305 return host_to_target_for_each_nlattr(NLA_DATA(nlattr),
2308 host_to_target_data_bridge_nlattr);
2310 gemu_log("Unknown QEMU_IFLA_INFO_KIND %s\n", li_context->name);
2313 case QEMU_IFLA_INFO_SLAVE_DATA:
2314 if (strncmp(li_context->slave_name, "bridge",
2315 li_context->slave_len) == 0) {
2316 return host_to_target_for_each_nlattr(NLA_DATA(nlattr),
2319 host_to_target_slave_data_bridge_nlattr);
2321 gemu_log("Unknown QEMU_IFLA_INFO_SLAVE_KIND %s\n",
2322 li_context->slave_name);
2326 gemu_log("Unknown host QEMU_IFLA_INFO type: %d\n", nlattr->nla_type);
2333 static abi_long host_to_target_data_inet_nlattr(struct nlattr *nlattr,
2339 switch (nlattr->nla_type) {
2340 case QEMU_IFLA_INET_CONF:
2341 u32 = NLA_DATA(nlattr);
2342 for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u32);
2344 u32[i] = tswap32(u32[i]);
2348 gemu_log("Unknown host AF_INET type: %d\n", nlattr->nla_type);
2353 static abi_long host_to_target_data_inet6_nlattr(struct nlattr *nlattr,
2358 struct ifla_cacheinfo *ci;
2361 switch (nlattr->nla_type) {
2363 case QEMU_IFLA_INET6_TOKEN:
2366 case QEMU_IFLA_INET6_ADDR_GEN_MODE:
2369 case QEMU_IFLA_INET6_FLAGS:
2370 u32 = NLA_DATA(nlattr);
2371 *u32 = tswap32(*u32);
2374 case QEMU_IFLA_INET6_CONF:
2375 u32 = NLA_DATA(nlattr);
2376 for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u32);
2378 u32[i] = tswap32(u32[i]);
2381 /* ifla_cacheinfo */
2382 case QEMU_IFLA_INET6_CACHEINFO:
2383 ci = NLA_DATA(nlattr);
2384 ci->max_reasm_len = tswap32(ci->max_reasm_len);
2385 ci->tstamp = tswap32(ci->tstamp);
2386 ci->reachable_time = tswap32(ci->reachable_time);
2387 ci->retrans_time = tswap32(ci->retrans_time);
2390 case QEMU_IFLA_INET6_STATS:
2391 case QEMU_IFLA_INET6_ICMP6STATS:
2392 u64 = NLA_DATA(nlattr);
2393 for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u64);
2395 u64[i] = tswap64(u64[i]);
2399 gemu_log("Unknown host AF_INET6 type: %d\n", nlattr->nla_type);
2404 static abi_long host_to_target_data_spec_nlattr(struct nlattr *nlattr,
2407 switch (nlattr->nla_type) {
2409 return host_to_target_for_each_nlattr(NLA_DATA(nlattr), nlattr->nla_len,
2411 host_to_target_data_inet_nlattr);
2413 return host_to_target_for_each_nlattr(NLA_DATA(nlattr), nlattr->nla_len,
2415 host_to_target_data_inet6_nlattr);
2417 gemu_log("Unknown host AF_SPEC type: %d\n", nlattr->nla_type);
2423 static abi_long host_to_target_data_link_rtattr(struct rtattr *rtattr)
2426 struct rtnl_link_stats *st;
2427 struct rtnl_link_stats64 *st64;
2428 struct rtnl_link_ifmap *map;
2429 struct linkinfo_context li_context;
2431 switch (rtattr->rta_type) {
2433 case QEMU_IFLA_ADDRESS:
2434 case QEMU_IFLA_BROADCAST:
2436 case QEMU_IFLA_IFNAME:
2437 case QEMU_IFLA_QDISC:
2440 case QEMU_IFLA_OPERSTATE:
2441 case QEMU_IFLA_LINKMODE:
2442 case QEMU_IFLA_CARRIER:
2443 case QEMU_IFLA_PROTO_DOWN:
2447 case QEMU_IFLA_LINK:
2448 case QEMU_IFLA_WEIGHT:
2449 case QEMU_IFLA_TXQLEN:
2450 case QEMU_IFLA_CARRIER_CHANGES:
2451 case QEMU_IFLA_NUM_RX_QUEUES:
2452 case QEMU_IFLA_NUM_TX_QUEUES:
2453 case QEMU_IFLA_PROMISCUITY:
2454 case QEMU_IFLA_EXT_MASK:
2455 case QEMU_IFLA_LINK_NETNSID:
2456 case QEMU_IFLA_GROUP:
2457 case QEMU_IFLA_MASTER:
2458 case QEMU_IFLA_NUM_VF:
2459 case QEMU_IFLA_GSO_MAX_SEGS:
2460 case QEMU_IFLA_GSO_MAX_SIZE:
2461 u32 = RTA_DATA(rtattr);
2462 *u32 = tswap32(*u32);
2464 /* struct rtnl_link_stats */
2465 case QEMU_IFLA_STATS:
2466 st = RTA_DATA(rtattr);
2467 st->rx_packets = tswap32(st->rx_packets);
2468 st->tx_packets = tswap32(st->tx_packets);
2469 st->rx_bytes = tswap32(st->rx_bytes);
2470 st->tx_bytes = tswap32(st->tx_bytes);
2471 st->rx_errors = tswap32(st->rx_errors);
2472 st->tx_errors = tswap32(st->tx_errors);
2473 st->rx_dropped = tswap32(st->rx_dropped);
2474 st->tx_dropped = tswap32(st->tx_dropped);
2475 st->multicast = tswap32(st->multicast);
2476 st->collisions = tswap32(st->collisions);
2478 /* detailed rx_errors: */
2479 st->rx_length_errors = tswap32(st->rx_length_errors);
2480 st->rx_over_errors = tswap32(st->rx_over_errors);
2481 st->rx_crc_errors = tswap32(st->rx_crc_errors);
2482 st->rx_frame_errors = tswap32(st->rx_frame_errors);
2483 st->rx_fifo_errors = tswap32(st->rx_fifo_errors);
2484 st->rx_missed_errors = tswap32(st->rx_missed_errors);
2486 /* detailed tx_errors */
2487 st->tx_aborted_errors = tswap32(st->tx_aborted_errors);
2488 st->tx_carrier_errors = tswap32(st->tx_carrier_errors);
2489 st->tx_fifo_errors = tswap32(st->tx_fifo_errors);
2490 st->tx_heartbeat_errors = tswap32(st->tx_heartbeat_errors);
2491 st->tx_window_errors = tswap32(st->tx_window_errors);
2494 st->rx_compressed = tswap32(st->rx_compressed);
2495 st->tx_compressed = tswap32(st->tx_compressed);
2497 /* struct rtnl_link_stats64 */
2498 case QEMU_IFLA_STATS64:
2499 st64 = RTA_DATA(rtattr);
2500 st64->rx_packets = tswap64(st64->rx_packets);
2501 st64->tx_packets = tswap64(st64->tx_packets);
2502 st64->rx_bytes = tswap64(st64->rx_bytes);
2503 st64->tx_bytes = tswap64(st64->tx_bytes);
2504 st64->rx_errors = tswap64(st64->rx_errors);
2505 st64->tx_errors = tswap64(st64->tx_errors);
2506 st64->rx_dropped = tswap64(st64->rx_dropped);
2507 st64->tx_dropped = tswap64(st64->tx_dropped);
2508 st64->multicast = tswap64(st64->multicast);
2509 st64->collisions = tswap64(st64->collisions);
2511 /* detailed rx_errors: */
2512 st64->rx_length_errors = tswap64(st64->rx_length_errors);
2513 st64->rx_over_errors = tswap64(st64->rx_over_errors);
2514 st64->rx_crc_errors = tswap64(st64->rx_crc_errors);
2515 st64->rx_frame_errors = tswap64(st64->rx_frame_errors);
2516 st64->rx_fifo_errors = tswap64(st64->rx_fifo_errors);
2517 st64->rx_missed_errors = tswap64(st64->rx_missed_errors);
2519 /* detailed tx_errors */
2520 st64->tx_aborted_errors = tswap64(st64->tx_aborted_errors);
2521 st64->tx_carrier_errors = tswap64(st64->tx_carrier_errors);
2522 st64->tx_fifo_errors = tswap64(st64->tx_fifo_errors);
2523 st64->tx_heartbeat_errors = tswap64(st64->tx_heartbeat_errors);
2524 st64->tx_window_errors = tswap64(st64->tx_window_errors);
2527 st64->rx_compressed = tswap64(st64->rx_compressed);
2528 st64->tx_compressed = tswap64(st64->tx_compressed);
2530 /* struct rtnl_link_ifmap */
2532 map = RTA_DATA(rtattr);
2533 map->mem_start = tswap64(map->mem_start);
2534 map->mem_end = tswap64(map->mem_end);
2535 map->base_addr = tswap64(map->base_addr);
2536 map->irq = tswap16(map->irq);
2539 case QEMU_IFLA_LINKINFO:
2540 memset(&li_context, 0, sizeof(li_context));
2541 return host_to_target_for_each_nlattr(RTA_DATA(rtattr), rtattr->rta_len,
2543 host_to_target_data_linkinfo_nlattr);
2544 case QEMU_IFLA_AF_SPEC:
2545 return host_to_target_for_each_nlattr(RTA_DATA(rtattr), rtattr->rta_len,
2547 host_to_target_data_spec_nlattr);
2549 gemu_log("Unknown host QEMU_IFLA type: %d\n", rtattr->rta_type);
2555 static abi_long host_to_target_data_addr_rtattr(struct rtattr *rtattr)
2558 struct ifa_cacheinfo *ci;
2560 switch (rtattr->rta_type) {
2561 /* binary: depends on family type */
2571 u32 = RTA_DATA(rtattr);
2572 *u32 = tswap32(*u32);
2574 /* struct ifa_cacheinfo */
2576 ci = RTA_DATA(rtattr);
2577 ci->ifa_prefered = tswap32(ci->ifa_prefered);
2578 ci->ifa_valid = tswap32(ci->ifa_valid);
2579 ci->cstamp = tswap32(ci->cstamp);
2580 ci->tstamp = tswap32(ci->tstamp);
2583 gemu_log("Unknown host IFA type: %d\n", rtattr->rta_type);
2589 static abi_long host_to_target_data_route_rtattr(struct rtattr *rtattr)
2592 switch (rtattr->rta_type) {
2593 /* binary: depends on family type */
2602 u32 = RTA_DATA(rtattr);
2603 *u32 = tswap32(*u32);
2606 gemu_log("Unknown host RTA type: %d\n", rtattr->rta_type);
2612 static abi_long host_to_target_link_rtattr(struct rtattr *rtattr,
2613 uint32_t rtattr_len)
2615 return host_to_target_for_each_rtattr(rtattr, rtattr_len,
2616 host_to_target_data_link_rtattr);
2619 static abi_long host_to_target_addr_rtattr(struct rtattr *rtattr,
2620 uint32_t rtattr_len)
2622 return host_to_target_for_each_rtattr(rtattr, rtattr_len,
2623 host_to_target_data_addr_rtattr);
2626 static abi_long host_to_target_route_rtattr(struct rtattr *rtattr,
2627 uint32_t rtattr_len)
2629 return host_to_target_for_each_rtattr(rtattr, rtattr_len,
2630 host_to_target_data_route_rtattr);
2633 static abi_long host_to_target_data_route(struct nlmsghdr *nlh)
2636 struct ifinfomsg *ifi;
2637 struct ifaddrmsg *ifa;
2640 nlmsg_len = nlh->nlmsg_len;
2641 switch (nlh->nlmsg_type) {
2645 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifi))) {
2646 ifi = NLMSG_DATA(nlh);
2647 ifi->ifi_type = tswap16(ifi->ifi_type);
2648 ifi->ifi_index = tswap32(ifi->ifi_index);
2649 ifi->ifi_flags = tswap32(ifi->ifi_flags);
2650 ifi->ifi_change = tswap32(ifi->ifi_change);
2651 host_to_target_link_rtattr(IFLA_RTA(ifi),
2652 nlmsg_len - NLMSG_LENGTH(sizeof(*ifi)));
2658 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifa))) {
2659 ifa = NLMSG_DATA(nlh);
2660 ifa->ifa_index = tswap32(ifa->ifa_index);
2661 host_to_target_addr_rtattr(IFA_RTA(ifa),
2662 nlmsg_len - NLMSG_LENGTH(sizeof(*ifa)));
2668 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*rtm))) {
2669 rtm = NLMSG_DATA(nlh);
2670 rtm->rtm_flags = tswap32(rtm->rtm_flags);
2671 host_to_target_route_rtattr(RTM_RTA(rtm),
2672 nlmsg_len - NLMSG_LENGTH(sizeof(*rtm)));
2676 return -TARGET_EINVAL;
2681 static inline abi_long host_to_target_nlmsg_route(struct nlmsghdr *nlh,
2684 return host_to_target_for_each_nlmsg(nlh, len, host_to_target_data_route);
2687 static abi_long target_to_host_for_each_rtattr(struct rtattr *rtattr,
2689 abi_long (*target_to_host_rtattr)
2694 while (len >= sizeof(struct rtattr)) {
2695 if (tswap16(rtattr->rta_len) < sizeof(struct rtattr) ||
2696 tswap16(rtattr->rta_len) > len) {
2699 rtattr->rta_len = tswap16(rtattr->rta_len);
2700 rtattr->rta_type = tswap16(rtattr->rta_type);
2701 ret = target_to_host_rtattr(rtattr);
2705 len -= RTA_ALIGN(rtattr->rta_len);
2706 rtattr = (struct rtattr *)(((char *)rtattr) +
2707 RTA_ALIGN(rtattr->rta_len));
2712 static abi_long target_to_host_data_link_rtattr(struct rtattr *rtattr)
2714 switch (rtattr->rta_type) {
2716 gemu_log("Unknown target QEMU_IFLA type: %d\n", rtattr->rta_type);
2722 static abi_long target_to_host_data_addr_rtattr(struct rtattr *rtattr)
2724 switch (rtattr->rta_type) {
2725 /* binary: depends on family type */
2730 gemu_log("Unknown target IFA type: %d\n", rtattr->rta_type);
2736 static abi_long target_to_host_data_route_rtattr(struct rtattr *rtattr)
2739 switch (rtattr->rta_type) {
2740 /* binary: depends on family type */
2748 u32 = RTA_DATA(rtattr);
2749 *u32 = tswap32(*u32);
2752 gemu_log("Unknown target RTA type: %d\n", rtattr->rta_type);
2758 static void target_to_host_link_rtattr(struct rtattr *rtattr,
2759 uint32_t rtattr_len)
2761 target_to_host_for_each_rtattr(rtattr, rtattr_len,
2762 target_to_host_data_link_rtattr);
2765 static void target_to_host_addr_rtattr(struct rtattr *rtattr,
2766 uint32_t rtattr_len)
2768 target_to_host_for_each_rtattr(rtattr, rtattr_len,
2769 target_to_host_data_addr_rtattr);
2772 static void target_to_host_route_rtattr(struct rtattr *rtattr,
2773 uint32_t rtattr_len)
2775 target_to_host_for_each_rtattr(rtattr, rtattr_len,
2776 target_to_host_data_route_rtattr);
2779 static abi_long target_to_host_data_route(struct nlmsghdr *nlh)
2781 struct ifinfomsg *ifi;
2782 struct ifaddrmsg *ifa;
2785 switch (nlh->nlmsg_type) {
2790 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifi))) {
2791 ifi = NLMSG_DATA(nlh);
2792 ifi->ifi_type = tswap16(ifi->ifi_type);
2793 ifi->ifi_index = tswap32(ifi->ifi_index);
2794 ifi->ifi_flags = tswap32(ifi->ifi_flags);
2795 ifi->ifi_change = tswap32(ifi->ifi_change);
2796 target_to_host_link_rtattr(IFLA_RTA(ifi), nlh->nlmsg_len -
2797 NLMSG_LENGTH(sizeof(*ifi)));
2803 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifa))) {
2804 ifa = NLMSG_DATA(nlh);
2805 ifa->ifa_index = tswap32(ifa->ifa_index);
2806 target_to_host_addr_rtattr(IFA_RTA(ifa), nlh->nlmsg_len -
2807 NLMSG_LENGTH(sizeof(*ifa)));
2814 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*rtm))) {
2815 rtm = NLMSG_DATA(nlh);
2816 rtm->rtm_flags = tswap32(rtm->rtm_flags);
2817 target_to_host_route_rtattr(RTM_RTA(rtm), nlh->nlmsg_len -
2818 NLMSG_LENGTH(sizeof(*rtm)));
2822 return -TARGET_EOPNOTSUPP;
2827 static abi_long target_to_host_nlmsg_route(struct nlmsghdr *nlh, size_t len)
2829 return target_to_host_for_each_nlmsg(nlh, len, target_to_host_data_route);
2831 #endif /* CONFIG_RTNETLINK */
2833 static abi_long host_to_target_data_audit(struct nlmsghdr *nlh)
2835 switch (nlh->nlmsg_type) {
2837 gemu_log("Unknown host audit message type %d\n",
2839 return -TARGET_EINVAL;
2844 static inline abi_long host_to_target_nlmsg_audit(struct nlmsghdr *nlh,
2847 return host_to_target_for_each_nlmsg(nlh, len, host_to_target_data_audit);
2850 static abi_long target_to_host_data_audit(struct nlmsghdr *nlh)
2852 switch (nlh->nlmsg_type) {
2854 case AUDIT_FIRST_USER_MSG ... AUDIT_LAST_USER_MSG:
2855 case AUDIT_FIRST_USER_MSG2 ... AUDIT_LAST_USER_MSG2:
2858 gemu_log("Unknown target audit message type %d\n",
2860 return -TARGET_EINVAL;
2866 static abi_long target_to_host_nlmsg_audit(struct nlmsghdr *nlh, size_t len)
2868 return target_to_host_for_each_nlmsg(nlh, len, target_to_host_data_audit);
2871 /* do_setsockopt() Must return target values and target errnos. */
2872 static abi_long do_setsockopt(int sockfd, int level, int optname,
2873 abi_ulong optval_addr, socklen_t optlen)
2877 struct ip_mreqn *ip_mreq;
2878 struct ip_mreq_source *ip_mreq_source;
2882 /* TCP options all take an 'int' value. */
2883 if (optlen < sizeof(uint32_t))
2884 return -TARGET_EINVAL;
2886 if (get_user_u32(val, optval_addr))
2887 return -TARGET_EFAULT;
2888 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2895 case IP_ROUTER_ALERT:
2899 case IP_MTU_DISCOVER:
2906 case IP_MULTICAST_TTL:
2907 case IP_MULTICAST_LOOP:
2909 if (optlen >= sizeof(uint32_t)) {
2910 if (get_user_u32(val, optval_addr))
2911 return -TARGET_EFAULT;
2912 } else if (optlen >= 1) {
2913 if (get_user_u8(val, optval_addr))
2914 return -TARGET_EFAULT;
2916 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2918 case IP_ADD_MEMBERSHIP:
2919 case IP_DROP_MEMBERSHIP:
2920 if (optlen < sizeof (struct target_ip_mreq) ||
2921 optlen > sizeof (struct target_ip_mreqn))
2922 return -TARGET_EINVAL;
2924 ip_mreq = (struct ip_mreqn *) alloca(optlen);
2925 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
2926 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
2929 case IP_BLOCK_SOURCE:
2930 case IP_UNBLOCK_SOURCE:
2931 case IP_ADD_SOURCE_MEMBERSHIP:
2932 case IP_DROP_SOURCE_MEMBERSHIP:
2933 if (optlen != sizeof (struct target_ip_mreq_source))
2934 return -TARGET_EINVAL;
2936 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2937 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
2938 unlock_user (ip_mreq_source, optval_addr, 0);
2947 case IPV6_MTU_DISCOVER:
2950 case IPV6_RECVPKTINFO:
2951 case IPV6_UNICAST_HOPS:
2953 case IPV6_RECVHOPLIMIT:
2954 case IPV6_2292HOPLIMIT:
2957 if (optlen < sizeof(uint32_t)) {
2958 return -TARGET_EINVAL;
2960 if (get_user_u32(val, optval_addr)) {
2961 return -TARGET_EFAULT;
2963 ret = get_errno(setsockopt(sockfd, level, optname,
2964 &val, sizeof(val)));
2968 struct in6_pktinfo pki;
2970 if (optlen < sizeof(pki)) {
2971 return -TARGET_EINVAL;
2974 if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
2975 return -TARGET_EFAULT;
2978 pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
2980 ret = get_errno(setsockopt(sockfd, level, optname,
2981 &pki, sizeof(pki)));
2992 struct icmp6_filter icmp6f;
2994 if (optlen > sizeof(icmp6f)) {
2995 optlen = sizeof(icmp6f);
2998 if (copy_from_user(&icmp6f, optval_addr, optlen)) {
2999 return -TARGET_EFAULT;
3002 for (val = 0; val < 8; val++) {
3003 icmp6f.data[val] = tswap32(icmp6f.data[val]);
3006 ret = get_errno(setsockopt(sockfd, level, optname,
3018 /* those take an u32 value */
3019 if (optlen < sizeof(uint32_t)) {
3020 return -TARGET_EINVAL;
3023 if (get_user_u32(val, optval_addr)) {
3024 return -TARGET_EFAULT;
3026 ret = get_errno(setsockopt(sockfd, level, optname,
3027 &val, sizeof(val)));
3034 case TARGET_SOL_SOCKET:
3036 case TARGET_SO_RCVTIMEO:
3040 optname = SO_RCVTIMEO;
3043 if (optlen != sizeof(struct target_timeval)) {
3044 return -TARGET_EINVAL;
3047 if (copy_from_user_timeval(&tv, optval_addr)) {
3048 return -TARGET_EFAULT;
3051 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
3055 case TARGET_SO_SNDTIMEO:
3056 optname = SO_SNDTIMEO;
3058 case TARGET_SO_ATTACH_FILTER:
3060 struct target_sock_fprog *tfprog;
3061 struct target_sock_filter *tfilter;
3062 struct sock_fprog fprog;
3063 struct sock_filter *filter;
3066 if (optlen != sizeof(*tfprog)) {
3067 return -TARGET_EINVAL;
3069 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
3070 return -TARGET_EFAULT;
3072 if (!lock_user_struct(VERIFY_READ, tfilter,
3073 tswapal(tfprog->filter), 0)) {
3074 unlock_user_struct(tfprog, optval_addr, 1);
3075 return -TARGET_EFAULT;
3078 fprog.len = tswap16(tfprog->len);
3079 filter = g_try_new(struct sock_filter, fprog.len);
3080 if (filter == NULL) {
3081 unlock_user_struct(tfilter, tfprog->filter, 1);
3082 unlock_user_struct(tfprog, optval_addr, 1);
3083 return -TARGET_ENOMEM;
3085 for (i = 0; i < fprog.len; i++) {
3086 filter[i].code = tswap16(tfilter[i].code);
3087 filter[i].jt = tfilter[i].jt;
3088 filter[i].jf = tfilter[i].jf;
3089 filter[i].k = tswap32(tfilter[i].k);
3091 fprog.filter = filter;
3093 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
3094 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
3097 unlock_user_struct(tfilter, tfprog->filter, 1);
3098 unlock_user_struct(tfprog, optval_addr, 1);
3101 case TARGET_SO_BINDTODEVICE:
3103 char *dev_ifname, *addr_ifname;
3105 if (optlen > IFNAMSIZ - 1) {
3106 optlen = IFNAMSIZ - 1;
3108 dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
3110 return -TARGET_EFAULT;
3112 optname = SO_BINDTODEVICE;
3113 addr_ifname = alloca(IFNAMSIZ);
3114 memcpy(addr_ifname, dev_ifname, optlen);
3115 addr_ifname[optlen] = 0;
3116 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
3117 addr_ifname, optlen));
3118 unlock_user (dev_ifname, optval_addr, 0);
3121 /* Options with 'int' argument. */
3122 case TARGET_SO_DEBUG:
3125 case TARGET_SO_REUSEADDR:
3126 optname = SO_REUSEADDR;
3128 case TARGET_SO_TYPE:
3131 case TARGET_SO_ERROR:
3134 case TARGET_SO_DONTROUTE:
3135 optname = SO_DONTROUTE;
3137 case TARGET_SO_BROADCAST:
3138 optname = SO_BROADCAST;
3140 case TARGET_SO_SNDBUF:
3141 optname = SO_SNDBUF;
3143 case TARGET_SO_SNDBUFFORCE:
3144 optname = SO_SNDBUFFORCE;
3146 case TARGET_SO_RCVBUF:
3147 optname = SO_RCVBUF;
3149 case TARGET_SO_RCVBUFFORCE:
3150 optname = SO_RCVBUFFORCE;
3152 case TARGET_SO_KEEPALIVE:
3153 optname = SO_KEEPALIVE;
3155 case TARGET_SO_OOBINLINE:
3156 optname = SO_OOBINLINE;
3158 case TARGET_SO_NO_CHECK:
3159 optname = SO_NO_CHECK;
3161 case TARGET_SO_PRIORITY:
3162 optname = SO_PRIORITY;
3165 case TARGET_SO_BSDCOMPAT:
3166 optname = SO_BSDCOMPAT;
3169 case TARGET_SO_PASSCRED:
3170 optname = SO_PASSCRED;
3172 case TARGET_SO_PASSSEC:
3173 optname = SO_PASSSEC;
3175 case TARGET_SO_TIMESTAMP:
3176 optname = SO_TIMESTAMP;
3178 case TARGET_SO_RCVLOWAT:
3179 optname = SO_RCVLOWAT;
3184 if (optlen < sizeof(uint32_t))
3185 return -TARGET_EINVAL;
3187 if (get_user_u32(val, optval_addr))
3188 return -TARGET_EFAULT;
3189 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
3193 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname);
3194 ret = -TARGET_ENOPROTOOPT;
3199 /* do_getsockopt() Must return target values and target errnos. */
3200 static abi_long do_getsockopt(int sockfd, int level, int optname,
3201 abi_ulong optval_addr, abi_ulong optlen)
3208 case TARGET_SOL_SOCKET:
3211 /* These don't just return a single integer */
3212 case TARGET_SO_LINGER:
3213 case TARGET_SO_RCVTIMEO:
3214 case TARGET_SO_SNDTIMEO:
3215 case TARGET_SO_PEERNAME:
3217 case TARGET_SO_PEERCRED: {
3220 struct target_ucred *tcr;
3222 if (get_user_u32(len, optlen)) {
3223 return -TARGET_EFAULT;
3226 return -TARGET_EINVAL;
3230 ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
3238 if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
3239 return -TARGET_EFAULT;
3241 __put_user(cr.pid, &tcr->pid);
3242 __put_user(cr.uid, &tcr->uid);
3243 __put_user(cr.gid, &tcr->gid);
3244 unlock_user_struct(tcr, optval_addr, 1);
3245 if (put_user_u32(len, optlen)) {
3246 return -TARGET_EFAULT;
3250 /* Options with 'int' argument. */
3251 case TARGET_SO_DEBUG:
3254 case TARGET_SO_REUSEADDR:
3255 optname = SO_REUSEADDR;
3257 case TARGET_SO_TYPE:
3260 case TARGET_SO_ERROR:
3263 case TARGET_SO_DONTROUTE:
3264 optname = SO_DONTROUTE;
3266 case TARGET_SO_BROADCAST:
3267 optname = SO_BROADCAST;
3269 case TARGET_SO_SNDBUF:
3270 optname = SO_SNDBUF;
3272 case TARGET_SO_RCVBUF:
3273 optname = SO_RCVBUF;
3275 case TARGET_SO_KEEPALIVE:
3276 optname = SO_KEEPALIVE;
3278 case TARGET_SO_OOBINLINE:
3279 optname = SO_OOBINLINE;
3281 case TARGET_SO_NO_CHECK:
3282 optname = SO_NO_CHECK;
3284 case TARGET_SO_PRIORITY:
3285 optname = SO_PRIORITY;
3288 case TARGET_SO_BSDCOMPAT:
3289 optname = SO_BSDCOMPAT;
3292 case TARGET_SO_PASSCRED:
3293 optname = SO_PASSCRED;
3295 case TARGET_SO_TIMESTAMP:
3296 optname = SO_TIMESTAMP;
3298 case TARGET_SO_RCVLOWAT:
3299 optname = SO_RCVLOWAT;
3301 case TARGET_SO_ACCEPTCONN:
3302 optname = SO_ACCEPTCONN;
3309 /* TCP options all take an 'int' value. */
3311 if (get_user_u32(len, optlen))
3312 return -TARGET_EFAULT;
3314 return -TARGET_EINVAL;
3316 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
3319 if (optname == SO_TYPE) {
3320 val = host_to_target_sock_type(val);
3325 if (put_user_u32(val, optval_addr))
3326 return -TARGET_EFAULT;
3328 if (put_user_u8(val, optval_addr))
3329 return -TARGET_EFAULT;
3331 if (put_user_u32(len, optlen))
3332 return -TARGET_EFAULT;
3339 case IP_ROUTER_ALERT:
3343 case IP_MTU_DISCOVER:
3349 case IP_MULTICAST_TTL:
3350 case IP_MULTICAST_LOOP:
3351 if (get_user_u32(len, optlen))
3352 return -TARGET_EFAULT;
3354 return -TARGET_EINVAL;
3356 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
3359 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
3361 if (put_user_u32(len, optlen)
3362 || put_user_u8(val, optval_addr))
3363 return -TARGET_EFAULT;
3365 if (len > sizeof(int))
3367 if (put_user_u32(len, optlen)
3368 || put_user_u32(val, optval_addr))
3369 return -TARGET_EFAULT;
3373 ret = -TARGET_ENOPROTOOPT;
3379 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
3381 ret = -TARGET_EOPNOTSUPP;
3387 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
3388 abi_ulong count, int copy)
3390 struct target_iovec *target_vec;
3392 abi_ulong total_len, max_len;
3395 bool bad_address = false;
3401 if (count > IOV_MAX) {
3406 vec = g_try_new0(struct iovec, count);
3412 target_vec = lock_user(VERIFY_READ, target_addr,
3413 count * sizeof(struct target_iovec), 1);
3414 if (target_vec == NULL) {
3419 /* ??? If host page size > target page size, this will result in a
3420 value larger than what we can actually support. */
3421 max_len = 0x7fffffff & TARGET_PAGE_MASK;
3424 for (i = 0; i < count; i++) {
3425 abi_ulong base = tswapal(target_vec[i].iov_base);
3426 abi_long len = tswapal(target_vec[i].iov_len);
3431 } else if (len == 0) {
3432 /* Zero length pointer is ignored. */
3433 vec[i].iov_base = 0;
3435 vec[i].iov_base = lock_user(type, base, len, copy);
3436 /* If the first buffer pointer is bad, this is a fault. But
3437 * subsequent bad buffers will result in a partial write; this
3438 * is realized by filling the vector with null pointers and
3440 if (!vec[i].iov_base) {
3451 if (len > max_len - total_len) {
3452 len = max_len - total_len;
3455 vec[i].iov_len = len;
3459 unlock_user(target_vec, target_addr, 0);
3464 if (tswapal(target_vec[i].iov_len) > 0) {
3465 unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
3468 unlock_user(target_vec, target_addr, 0);
3475 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
3476 abi_ulong count, int copy)
3478 struct target_iovec *target_vec;
3481 target_vec = lock_user(VERIFY_READ, target_addr,
3482 count * sizeof(struct target_iovec), 1);
3484 for (i = 0; i < count; i++) {
3485 abi_ulong base = tswapal(target_vec[i].iov_base);
3486 abi_long len = tswapal(target_vec[i].iov_len);
3490 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
3492 unlock_user(target_vec, target_addr, 0);
3498 static inline int target_to_host_sock_type(int *type)
3501 int target_type = *type;
3503 switch (target_type & TARGET_SOCK_TYPE_MASK) {
3504 case TARGET_SOCK_DGRAM:
3505 host_type = SOCK_DGRAM;
3507 case TARGET_SOCK_STREAM:
3508 host_type = SOCK_STREAM;
3511 host_type = target_type & TARGET_SOCK_TYPE_MASK;
3514 if (target_type & TARGET_SOCK_CLOEXEC) {
3515 #if defined(SOCK_CLOEXEC)
3516 host_type |= SOCK_CLOEXEC;
3518 return -TARGET_EINVAL;
3521 if (target_type & TARGET_SOCK_NONBLOCK) {
3522 #if defined(SOCK_NONBLOCK)
3523 host_type |= SOCK_NONBLOCK;
3524 #elif !defined(O_NONBLOCK)
3525 return -TARGET_EINVAL;
3532 /* Try to emulate socket type flags after socket creation. */
3533 static int sock_flags_fixup(int fd, int target_type)
3535 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3536 if (target_type & TARGET_SOCK_NONBLOCK) {
3537 int flags = fcntl(fd, F_GETFL);
3538 if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
3540 return -TARGET_EINVAL;
3547 static abi_long packet_target_to_host_sockaddr(void *host_addr,
3548 abi_ulong target_addr,
3551 struct sockaddr *addr = host_addr;
3552 struct target_sockaddr *target_saddr;
3554 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
3555 if (!target_saddr) {
3556 return -TARGET_EFAULT;
3559 memcpy(addr, target_saddr, len);
3560 addr->sa_family = tswap16(target_saddr->sa_family);
3561 /* spkt_protocol is big-endian */
3563 unlock_user(target_saddr, target_addr, 0);
3567 static TargetFdTrans target_packet_trans = {
3568 .target_to_host_addr = packet_target_to_host_sockaddr,
3571 #ifdef CONFIG_RTNETLINK
3572 static abi_long netlink_route_target_to_host(void *buf, size_t len)
3576 ret = target_to_host_nlmsg_route(buf, len);
3584 static abi_long netlink_route_host_to_target(void *buf, size_t len)
3588 ret = host_to_target_nlmsg_route(buf, len);
3596 static TargetFdTrans target_netlink_route_trans = {
3597 .target_to_host_data = netlink_route_target_to_host,
3598 .host_to_target_data = netlink_route_host_to_target,
3600 #endif /* CONFIG_RTNETLINK */
3602 static abi_long netlink_audit_target_to_host(void *buf, size_t len)
3606 ret = target_to_host_nlmsg_audit(buf, len);
3614 static abi_long netlink_audit_host_to_target(void *buf, size_t len)
3618 ret = host_to_target_nlmsg_audit(buf, len);
3626 static TargetFdTrans target_netlink_audit_trans = {
3627 .target_to_host_data = netlink_audit_target_to_host,
3628 .host_to_target_data = netlink_audit_host_to_target,
3631 /* do_socket() Must return target values and target errnos. */
3632 static abi_long do_socket(int domain, int type, int protocol)
3634 int target_type = type;
3637 ret = target_to_host_sock_type(&type);
3642 if (domain == PF_NETLINK && !(
3643 #ifdef CONFIG_RTNETLINK
3644 protocol == NETLINK_ROUTE ||
3646 protocol == NETLINK_KOBJECT_UEVENT ||
3647 protocol == NETLINK_AUDIT)) {
3648 return -EPFNOSUPPORT;
3651 if (domain == AF_PACKET ||
3652 (domain == AF_INET && type == SOCK_PACKET)) {
3653 protocol = tswap16(protocol);
3656 ret = get_errno(socket(domain, type, protocol));
3658 ret = sock_flags_fixup(ret, target_type);
3659 if (type == SOCK_PACKET) {
3660 /* Manage an obsolete case :
3661 * if socket type is SOCK_PACKET, bind by name
3663 fd_trans_register(ret, &target_packet_trans);
3664 } else if (domain == PF_NETLINK) {
3666 #ifdef CONFIG_RTNETLINK
3668 fd_trans_register(ret, &target_netlink_route_trans);
3671 case NETLINK_KOBJECT_UEVENT:
3672 /* nothing to do: messages are strings */
3675 fd_trans_register(ret, &target_netlink_audit_trans);
3678 g_assert_not_reached();
3685 /* do_bind() Must return target values and target errnos. */
3686 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3692 if ((int)addrlen < 0) {
3693 return -TARGET_EINVAL;
3696 addr = alloca(addrlen+1);
3698 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3702 return get_errno(bind(sockfd, addr, addrlen));
3705 /* do_connect() Must return target values and target errnos. */
3706 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3712 if ((int)addrlen < 0) {
3713 return -TARGET_EINVAL;
3716 addr = alloca(addrlen+1);
3718 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3722 return get_errno(safe_connect(sockfd, addr, addrlen));
3725 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3726 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3727 int flags, int send)
3733 abi_ulong target_vec;
3735 if (msgp->msg_name) {
3736 msg.msg_namelen = tswap32(msgp->msg_namelen);
3737 msg.msg_name = alloca(msg.msg_namelen+1);
3738 ret = target_to_host_sockaddr(fd, msg.msg_name,
3739 tswapal(msgp->msg_name),
3741 if (ret == -TARGET_EFAULT) {
3742 /* For connected sockets msg_name and msg_namelen must
3743 * be ignored, so returning EFAULT immediately is wrong.
3744 * Instead, pass a bad msg_name to the host kernel, and
3745 * let it decide whether to return EFAULT or not.
3747 msg.msg_name = (void *)-1;
3752 msg.msg_name = NULL;
3753 msg.msg_namelen = 0;
3755 msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3756 msg.msg_control = alloca(msg.msg_controllen);
3757 msg.msg_flags = tswap32(msgp->msg_flags);
3759 count = tswapal(msgp->msg_iovlen);
3760 target_vec = tswapal(msgp->msg_iov);
3762 if (count > IOV_MAX) {
3763 /* sendrcvmsg returns a different errno for this condition than
3764 * readv/writev, so we must catch it here before lock_iovec() does.
3766 ret = -TARGET_EMSGSIZE;
3770 vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3771 target_vec, count, send);
3773 ret = -host_to_target_errno(errno);
3776 msg.msg_iovlen = count;
3780 if (fd_trans_target_to_host_data(fd)) {
3783 host_msg = g_malloc(msg.msg_iov->iov_len);
3784 memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3785 ret = fd_trans_target_to_host_data(fd)(host_msg,
3786 msg.msg_iov->iov_len);
3788 msg.msg_iov->iov_base = host_msg;
3789 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3793 ret = target_to_host_cmsg(&msg, msgp);
3795 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3799 ret = get_errno(safe_recvmsg(fd, &msg, flags));
3800 if (!is_error(ret)) {
3802 if (fd_trans_host_to_target_data(fd)) {
3803 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3806 ret = host_to_target_cmsg(msgp, &msg);
3808 if (!is_error(ret)) {
3809 msgp->msg_namelen = tswap32(msg.msg_namelen);
3810 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3811 ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3812 msg.msg_name, msg.msg_namelen);
3824 unlock_iovec(vec, target_vec, count, !send);
3829 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3830 int flags, int send)
3833 struct target_msghdr *msgp;
3835 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3839 return -TARGET_EFAULT;
3841 ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3842 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3846 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3847 * so it might not have this *mmsg-specific flag either.
3849 #ifndef MSG_WAITFORONE
3850 #define MSG_WAITFORONE 0x10000
3853 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3854 unsigned int vlen, unsigned int flags,
3857 struct target_mmsghdr *mmsgp;
3861 if (vlen > UIO_MAXIOV) {
3865 mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3867 return -TARGET_EFAULT;
3870 for (i = 0; i < vlen; i++) {
3871 ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3872 if (is_error(ret)) {
3875 mmsgp[i].msg_len = tswap32(ret);
3876 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3877 if (flags & MSG_WAITFORONE) {
3878 flags |= MSG_DONTWAIT;
3882 unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3884 /* Return number of datagrams sent if we sent any at all;
3885 * otherwise return the error.
3893 /* do_accept4() Must return target values and target errnos. */
3894 static abi_long do_accept4(int fd, abi_ulong target_addr,
3895 abi_ulong target_addrlen_addr, int flags)
3902 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
3904 if (target_addr == 0) {
3905 return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3908 /* linux returns EINVAL if addrlen pointer is invalid */
3909 if (get_user_u32(addrlen, target_addrlen_addr))
3910 return -TARGET_EINVAL;
3912 if ((int)addrlen < 0) {
3913 return -TARGET_EINVAL;
3916 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3917 return -TARGET_EINVAL;
3919 addr = alloca(addrlen);
3921 ret = get_errno(safe_accept4(fd, addr, &addrlen, host_flags));
3922 if (!is_error(ret)) {
3923 host_to_target_sockaddr(target_addr, addr, addrlen);
3924 if (put_user_u32(addrlen, target_addrlen_addr))
3925 ret = -TARGET_EFAULT;
3930 /* do_getpeername() Must return target values and target errnos. */
3931 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3932 abi_ulong target_addrlen_addr)
3938 if (get_user_u32(addrlen, target_addrlen_addr))
3939 return -TARGET_EFAULT;
3941 if ((int)addrlen < 0) {
3942 return -TARGET_EINVAL;
3945 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3946 return -TARGET_EFAULT;
3948 addr = alloca(addrlen);
3950 ret = get_errno(getpeername(fd, addr, &addrlen));
3951 if (!is_error(ret)) {
3952 host_to_target_sockaddr(target_addr, addr, addrlen);
3953 if (put_user_u32(addrlen, target_addrlen_addr))
3954 ret = -TARGET_EFAULT;
3959 /* do_getsockname() Must return target values and target errnos. */
3960 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3961 abi_ulong target_addrlen_addr)
3967 if (get_user_u32(addrlen, target_addrlen_addr))
3968 return -TARGET_EFAULT;
3970 if ((int)addrlen < 0) {
3971 return -TARGET_EINVAL;
3974 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3975 return -TARGET_EFAULT;
3977 addr = alloca(addrlen);
3979 ret = get_errno(getsockname(fd, addr, &addrlen));
3980 if (!is_error(ret)) {
3981 host_to_target_sockaddr(target_addr, addr, addrlen);
3982 if (put_user_u32(addrlen, target_addrlen_addr))
3983 ret = -TARGET_EFAULT;
3988 /* do_socketpair() Must return target values and target errnos. */
3989 static abi_long do_socketpair(int domain, int type, int protocol,
3990 abi_ulong target_tab_addr)
3995 target_to_host_sock_type(&type);
3997 ret = get_errno(socketpair(domain, type, protocol, tab));
3998 if (!is_error(ret)) {
3999 if (put_user_s32(tab[0], target_tab_addr)
4000 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
4001 ret = -TARGET_EFAULT;
4006 /* do_sendto() Must return target values and target errnos. */
4007 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
4008 abi_ulong target_addr, socklen_t addrlen)
4012 void *copy_msg = NULL;
4015 if ((int)addrlen < 0) {
4016 return -TARGET_EINVAL;
4019 host_msg = lock_user(VERIFY_READ, msg, len, 1);
4021 return -TARGET_EFAULT;
4022 if (fd_trans_target_to_host_data(fd)) {
4023 copy_msg = host_msg;
4024 host_msg = g_malloc(len);
4025 memcpy(host_msg, copy_msg, len);
4026 ret = fd_trans_target_to_host_data(fd)(host_msg, len);
4032 addr = alloca(addrlen+1);
4033 ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
4037 ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
4039 ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
4044 host_msg = copy_msg;
4046 unlock_user(host_msg, msg, 0);
4050 /* do_recvfrom() Must return target values and target errnos. */
4051 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
4052 abi_ulong target_addr,
4053 abi_ulong target_addrlen)
4060 host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
4062 return -TARGET_EFAULT;
4064 if (get_user_u32(addrlen, target_addrlen)) {
4065 ret = -TARGET_EFAULT;
4068 if ((int)addrlen < 0) {
4069 ret = -TARGET_EINVAL;
4072 addr = alloca(addrlen);
4073 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
4076 addr = NULL; /* To keep compiler quiet. */
4077 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
4079 if (!is_error(ret)) {
4080 if (fd_trans_host_to_target_data(fd)) {
4081 ret = fd_trans_host_to_target_data(fd)(host_msg, ret);
4084 host_to_target_sockaddr(target_addr, addr, addrlen);
4085 if (put_user_u32(addrlen, target_addrlen)) {
4086 ret = -TARGET_EFAULT;
4090 unlock_user(host_msg, msg, len);
4093 unlock_user(host_msg, msg, 0);
4098 #ifdef TARGET_NR_socketcall
4099 /* do_socketcall() must return target values and target errnos. */
4100 static abi_long do_socketcall(int num, abi_ulong vptr)
4102 static const unsigned nargs[] = { /* number of arguments per operation */
4103 [TARGET_SYS_SOCKET] = 3, /* domain, type, protocol */
4104 [TARGET_SYS_BIND] = 3, /* fd, addr, addrlen */
4105 [TARGET_SYS_CONNECT] = 3, /* fd, addr, addrlen */
4106 [TARGET_SYS_LISTEN] = 2, /* fd, backlog */
4107 [TARGET_SYS_ACCEPT] = 3, /* fd, addr, addrlen */
4108 [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
4109 [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
4110 [TARGET_SYS_SOCKETPAIR] = 4, /* domain, type, protocol, tab */
4111 [TARGET_SYS_SEND] = 4, /* fd, msg, len, flags */
4112 [TARGET_SYS_RECV] = 4, /* fd, msg, len, flags */
4113 [TARGET_SYS_SENDTO] = 6, /* fd, msg, len, flags, addr, addrlen */
4114 [TARGET_SYS_RECVFROM] = 6, /* fd, msg, len, flags, addr, addrlen */
4115 [TARGET_SYS_SHUTDOWN] = 2, /* fd, how */
4116 [TARGET_SYS_SETSOCKOPT] = 5, /* fd, level, optname, optval, optlen */
4117 [TARGET_SYS_GETSOCKOPT] = 5, /* fd, level, optname, optval, optlen */
4118 [TARGET_SYS_SENDMSG] = 3, /* fd, msg, flags */
4119 [TARGET_SYS_RECVMSG] = 3, /* fd, msg, flags */
4120 [TARGET_SYS_ACCEPT4] = 4, /* fd, addr, addrlen, flags */
4121 [TARGET_SYS_RECVMMSG] = 4, /* fd, msgvec, vlen, flags */
4122 [TARGET_SYS_SENDMMSG] = 4, /* fd, msgvec, vlen, flags */
4124 abi_long a[6]; /* max 6 args */
4127 /* check the range of the first argument num */
4128 /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
4129 if (num < 1 || num > TARGET_SYS_SENDMMSG) {
4130 return -TARGET_EINVAL;
4132 /* ensure we have space for args */
4133 if (nargs[num] > ARRAY_SIZE(a)) {
4134 return -TARGET_EINVAL;
4136 /* collect the arguments in a[] according to nargs[] */
4137 for (i = 0; i < nargs[num]; ++i) {
4138 if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
4139 return -TARGET_EFAULT;
4142 /* now when we have the args, invoke the appropriate underlying function */
4144 case TARGET_SYS_SOCKET: /* domain, type, protocol */
4145 return do_socket(a[0], a[1], a[2]);
4146 case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
4147 return do_bind(a[0], a[1], a[2]);
4148 case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
4149 return do_connect(a[0], a[1], a[2]);
4150 case TARGET_SYS_LISTEN: /* sockfd, backlog */
4151 return get_errno(listen(a[0], a[1]));
4152 case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
4153 return do_accept4(a[0], a[1], a[2], 0);
4154 case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
4155 return do_getsockname(a[0], a[1], a[2]);
4156 case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
4157 return do_getpeername(a[0], a[1], a[2]);
4158 case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
4159 return do_socketpair(a[0], a[1], a[2], a[3]);
4160 case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
4161 return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
4162 case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
4163 return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
4164 case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
4165 return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
4166 case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
4167 return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
4168 case TARGET_SYS_SHUTDOWN: /* sockfd, how */
4169 return get_errno(shutdown(a[0], a[1]));
4170 case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
4171 return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
4172 case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
4173 return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
4174 case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
4175 return do_sendrecvmsg(a[0], a[1], a[2], 1);
4176 case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
4177 return do_sendrecvmsg(a[0], a[1], a[2], 0);
4178 case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
4179 return do_accept4(a[0], a[1], a[2], a[3]);
4180 case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
4181 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
4182 case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
4183 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
4185 gemu_log("Unsupported socketcall: %d\n", num);
4186 return -TARGET_EINVAL;
4191 #define N_SHM_REGIONS 32
4193 static struct shm_region {
4197 } shm_regions[N_SHM_REGIONS];
4199 #ifndef TARGET_SEMID64_DS
4200 /* asm-generic version of this struct */
4201 struct target_semid64_ds
4203 struct target_ipc_perm sem_perm;
4204 abi_ulong sem_otime;
4205 #if TARGET_ABI_BITS == 32
4206 abi_ulong __unused1;
4208 abi_ulong sem_ctime;
4209 #if TARGET_ABI_BITS == 32
4210 abi_ulong __unused2;
4212 abi_ulong sem_nsems;
4213 abi_ulong __unused3;
4214 abi_ulong __unused4;
4218 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
4219 abi_ulong target_addr)
4221 struct target_ipc_perm *target_ip;
4222 struct target_semid64_ds *target_sd;
4224 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4225 return -TARGET_EFAULT;
4226 target_ip = &(target_sd->sem_perm);
4227 host_ip->__key = tswap32(target_ip->__key);
4228 host_ip->uid = tswap32(target_ip->uid);
4229 host_ip->gid = tswap32(target_ip->gid);
4230 host_ip->cuid = tswap32(target_ip->cuid);
4231 host_ip->cgid = tswap32(target_ip->cgid);
4232 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
4233 host_ip->mode = tswap32(target_ip->mode);
4235 host_ip->mode = tswap16(target_ip->mode);
4237 #if defined(TARGET_PPC)
4238 host_ip->__seq = tswap32(target_ip->__seq);
4240 host_ip->__seq = tswap16(target_ip->__seq);
4242 unlock_user_struct(target_sd, target_addr, 0);
4246 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
4247 struct ipc_perm *host_ip)
4249 struct target_ipc_perm *target_ip;
4250 struct target_semid64_ds *target_sd;
4252 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4253 return -TARGET_EFAULT;
4254 target_ip = &(target_sd->sem_perm);
4255 target_ip->__key = tswap32(host_ip->__key);
4256 target_ip->uid = tswap32(host_ip->uid);
4257 target_ip->gid = tswap32(host_ip->gid);
4258 target_ip->cuid = tswap32(host_ip->cuid);
4259 target_ip->cgid = tswap32(host_ip->cgid);
4260 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
4261 target_ip->mode = tswap32(host_ip->mode);
4263 target_ip->mode = tswap16(host_ip->mode);
4265 #if defined(TARGET_PPC)
4266 target_ip->__seq = tswap32(host_ip->__seq);
4268 target_ip->__seq = tswap16(host_ip->__seq);
4270 unlock_user_struct(target_sd, target_addr, 1);
4274 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
4275 abi_ulong target_addr)
4277 struct target_semid64_ds *target_sd;
4279 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4280 return -TARGET_EFAULT;
4281 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
4282 return -TARGET_EFAULT;
4283 host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
4284 host_sd->sem_otime = tswapal(target_sd->sem_otime);
4285 host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
4286 unlock_user_struct(target_sd, target_addr, 0);
4290 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
4291 struct semid_ds *host_sd)
4293 struct target_semid64_ds *target_sd;
4295 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4296 return -TARGET_EFAULT;
4297 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
4298 return -TARGET_EFAULT;
4299 target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
4300 target_sd->sem_otime = tswapal(host_sd->sem_otime);
4301 target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
4302 unlock_user_struct(target_sd, target_addr, 1);
4306 struct target_seminfo {
4319 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
4320 struct seminfo *host_seminfo)
4322 struct target_seminfo *target_seminfo;
4323 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
4324 return -TARGET_EFAULT;
4325 __put_user(host_seminfo->semmap, &target_seminfo->semmap);
4326 __put_user(host_seminfo->semmni, &target_seminfo->semmni);
4327 __put_user(host_seminfo->semmns, &target_seminfo->semmns);
4328 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
4329 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
4330 __put_user(host_seminfo->semopm, &target_seminfo->semopm);
4331 __put_user(host_seminfo->semume, &target_seminfo->semume);
4332 __put_user(host_seminfo->semusz, &target_seminfo->semusz);
4333 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
4334 __put_user(host_seminfo->semaem, &target_seminfo->semaem);
4335 unlock_user_struct(target_seminfo, target_addr, 1);
4341 struct semid_ds *buf;
4342 unsigned short *array;
4343 struct seminfo *__buf;
4346 union target_semun {
4353 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
4354 abi_ulong target_addr)
4357 unsigned short *array;
4359 struct semid_ds semid_ds;
4362 semun.buf = &semid_ds;
4364 ret = semctl(semid, 0, IPC_STAT, semun);
4366 return get_errno(ret);
4368 nsems = semid_ds.sem_nsems;
4370 *host_array = g_try_new(unsigned short, nsems);
4372 return -TARGET_ENOMEM;
4374 array = lock_user(VERIFY_READ, target_addr,
4375 nsems*sizeof(unsigned short), 1);
4377 g_free(*host_array);
4378 return -TARGET_EFAULT;
4381 for(i=0; i<nsems; i++) {
4382 __get_user((*host_array)[i], &array[i]);
4384 unlock_user(array, target_addr, 0);
4389 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
4390 unsigned short **host_array)
4393 unsigned short *array;
4395 struct semid_ds semid_ds;
4398 semun.buf = &semid_ds;
4400 ret = semctl(semid, 0, IPC_STAT, semun);
4402 return get_errno(ret);
4404 nsems = semid_ds.sem_nsems;
4406 array = lock_user(VERIFY_WRITE, target_addr,
4407 nsems*sizeof(unsigned short), 0);
4409 return -TARGET_EFAULT;
4411 for(i=0; i<nsems; i++) {
4412 __put_user((*host_array)[i], &array[i]);
4414 g_free(*host_array);
4415 unlock_user(array, target_addr, 1);
4420 static inline abi_long do_semctl(int semid, int semnum, int cmd,
4421 abi_ulong target_arg)
4423 union target_semun target_su = { .buf = target_arg };
4425 struct semid_ds dsarg;
4426 unsigned short *array = NULL;
4427 struct seminfo seminfo;
4428 abi_long ret = -TARGET_EINVAL;
4435 /* In 64 bit cross-endian situations, we will erroneously pick up
4436 * the wrong half of the union for the "val" element. To rectify
4437 * this, the entire 8-byte structure is byteswapped, followed by
4438 * a swap of the 4 byte val field. In other cases, the data is
4439 * already in proper host byte order. */
4440 if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
4441 target_su.buf = tswapal(target_su.buf);
4442 arg.val = tswap32(target_su.val);
4444 arg.val = target_su.val;
4446 ret = get_errno(semctl(semid, semnum, cmd, arg));
4450 err = target_to_host_semarray(semid, &array, target_su.array);
4454 ret = get_errno(semctl(semid, semnum, cmd, arg));
4455 err = host_to_target_semarray(semid, target_su.array, &array);
4462 err = target_to_host_semid_ds(&dsarg, target_su.buf);
4466 ret = get_errno(semctl(semid, semnum, cmd, arg));
4467 err = host_to_target_semid_ds(target_su.buf, &dsarg);
4473 arg.__buf = &seminfo;
4474 ret = get_errno(semctl(semid, semnum, cmd, arg));
4475 err = host_to_target_seminfo(target_su.__buf, &seminfo);
4483 ret = get_errno(semctl(semid, semnum, cmd, NULL));
4490 struct target_sembuf {
4491 unsigned short sem_num;
4496 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
4497 abi_ulong target_addr,
4500 struct target_sembuf *target_sembuf;
4503 target_sembuf = lock_user(VERIFY_READ, target_addr,
4504 nsops*sizeof(struct target_sembuf), 1);
4506 return -TARGET_EFAULT;
4508 for(i=0; i<nsops; i++) {
4509 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
4510 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
4511 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
4514 unlock_user(target_sembuf, target_addr, 0);
4519 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
4521 struct sembuf sops[nsops];
4523 if (target_to_host_sembuf(sops, ptr, nsops))
4524 return -TARGET_EFAULT;
4526 return get_errno(safe_semtimedop(semid, sops, nsops, NULL));
4529 struct target_msqid_ds
4531 struct target_ipc_perm msg_perm;
4532 abi_ulong msg_stime;
4533 #if TARGET_ABI_BITS == 32
4534 abi_ulong __unused1;
4536 abi_ulong msg_rtime;
4537 #if TARGET_ABI_BITS == 32
4538 abi_ulong __unused2;
4540 abi_ulong msg_ctime;
4541 #if TARGET_ABI_BITS == 32
4542 abi_ulong __unused3;
4544 abi_ulong __msg_cbytes;
4546 abi_ulong msg_qbytes;
4547 abi_ulong msg_lspid;
4548 abi_ulong msg_lrpid;
4549 abi_ulong __unused4;
4550 abi_ulong __unused5;
4553 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
4554 abi_ulong target_addr)
4556 struct target_msqid_ds *target_md;
4558 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
4559 return -TARGET_EFAULT;
4560 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
4561 return -TARGET_EFAULT;
4562 host_md->msg_stime = tswapal(target_md->msg_stime);
4563 host_md->msg_rtime = tswapal(target_md->msg_rtime);
4564 host_md->msg_ctime = tswapal(target_md->msg_ctime);
4565 host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
4566 host_md->msg_qnum = tswapal(target_md->msg_qnum);
4567 host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
4568 host_md->msg_lspid = tswapal(target_md->msg_lspid);
4569 host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
4570 unlock_user_struct(target_md, target_addr, 0);
4574 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
4575 struct msqid_ds *host_md)
4577 struct target_msqid_ds *target_md;
4579 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
4580 return -TARGET_EFAULT;
4581 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
4582 return -TARGET_EFAULT;
4583 target_md->msg_stime = tswapal(host_md->msg_stime);
4584 target_md->msg_rtime = tswapal(host_md->msg_rtime);
4585 target_md->msg_ctime = tswapal(host_md->msg_ctime);
4586 target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
4587 target_md->msg_qnum = tswapal(host_md->msg_qnum);
4588 target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
4589 target_md->msg_lspid = tswapal(host_md->msg_lspid);
4590 target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
4591 unlock_user_struct(target_md, target_addr, 1);
4595 struct target_msginfo {
4603 unsigned short int msgseg;
4606 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
4607 struct msginfo *host_msginfo)
4609 struct target_msginfo *target_msginfo;
4610 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
4611 return -TARGET_EFAULT;
4612 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
4613 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
4614 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
4615 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
4616 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
4617 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
4618 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4619 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4620 unlock_user_struct(target_msginfo, target_addr, 1);
4624 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4626 struct msqid_ds dsarg;
4627 struct msginfo msginfo;
4628 abi_long ret = -TARGET_EINVAL;
4636 if (target_to_host_msqid_ds(&dsarg,ptr))
4637 return -TARGET_EFAULT;
4638 ret = get_errno(msgctl(msgid, cmd, &dsarg));
4639 if (host_to_target_msqid_ds(ptr,&dsarg))
4640 return -TARGET_EFAULT;
4643 ret = get_errno(msgctl(msgid, cmd, NULL));
4647 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4648 if (host_to_target_msginfo(ptr, &msginfo))
4649 return -TARGET_EFAULT;
4656 struct target_msgbuf {
4661 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4662 ssize_t msgsz, int msgflg)
4664 struct target_msgbuf *target_mb;
4665 struct msgbuf *host_mb;
4669 return -TARGET_EINVAL;
4672 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4673 return -TARGET_EFAULT;
4674 host_mb = g_try_malloc(msgsz + sizeof(long));
4676 unlock_user_struct(target_mb, msgp, 0);
4677 return -TARGET_ENOMEM;
4679 host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4680 memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4681 ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4683 unlock_user_struct(target_mb, msgp, 0);
4688 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4689 ssize_t msgsz, abi_long msgtyp,
4692 struct target_msgbuf *target_mb;
4694 struct msgbuf *host_mb;
4698 return -TARGET_EINVAL;
4701 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4702 return -TARGET_EFAULT;
4704 host_mb = g_try_malloc(msgsz + sizeof(long));
4706 ret = -TARGET_ENOMEM;
4709 ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4712 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4713 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4714 if (!target_mtext) {
4715 ret = -TARGET_EFAULT;
4718 memcpy(target_mb->mtext, host_mb->mtext, ret);
4719 unlock_user(target_mtext, target_mtext_addr, ret);
4722 target_mb->mtype = tswapal(host_mb->mtype);
4726 unlock_user_struct(target_mb, msgp, 1);
4731 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4732 abi_ulong target_addr)
4734 struct target_shmid_ds *target_sd;
4736 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4737 return -TARGET_EFAULT;
4738 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4739 return -TARGET_EFAULT;
4740 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4741 __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4742 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4743 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4744 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4745 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4746 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4747 unlock_user_struct(target_sd, target_addr, 0);
4751 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4752 struct shmid_ds *host_sd)
4754 struct target_shmid_ds *target_sd;
4756 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4757 return -TARGET_EFAULT;
4758 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4759 return -TARGET_EFAULT;
4760 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4761 __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4762 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4763 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4764 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4765 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4766 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4767 unlock_user_struct(target_sd, target_addr, 1);
4771 struct target_shminfo {
4779 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4780 struct shminfo *host_shminfo)
4782 struct target_shminfo *target_shminfo;
4783 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4784 return -TARGET_EFAULT;
4785 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4786 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4787 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4788 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4789 __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4790 unlock_user_struct(target_shminfo, target_addr, 1);
4794 struct target_shm_info {
4799 abi_ulong swap_attempts;
4800 abi_ulong swap_successes;
4803 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4804 struct shm_info *host_shm_info)
4806 struct target_shm_info *target_shm_info;
4807 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4808 return -TARGET_EFAULT;
4809 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4810 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4811 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4812 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4813 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4814 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4815 unlock_user_struct(target_shm_info, target_addr, 1);
4819 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4821 struct shmid_ds dsarg;
4822 struct shminfo shminfo;
4823 struct shm_info shm_info;
4824 abi_long ret = -TARGET_EINVAL;
4832 if (target_to_host_shmid_ds(&dsarg, buf))
4833 return -TARGET_EFAULT;
4834 ret = get_errno(shmctl(shmid, cmd, &dsarg));
4835 if (host_to_target_shmid_ds(buf, &dsarg))
4836 return -TARGET_EFAULT;
4839 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4840 if (host_to_target_shminfo(buf, &shminfo))
4841 return -TARGET_EFAULT;
4844 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4845 if (host_to_target_shm_info(buf, &shm_info))
4846 return -TARGET_EFAULT;
4851 ret = get_errno(shmctl(shmid, cmd, NULL));
4858 #ifndef TARGET_FORCE_SHMLBA
4859 /* For most architectures, SHMLBA is the same as the page size;
4860 * some architectures have larger values, in which case they should
4861 * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4862 * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4863 * and defining its own value for SHMLBA.
4865 * The kernel also permits SHMLBA to be set by the architecture to a
4866 * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4867 * this means that addresses are rounded to the large size if
4868 * SHM_RND is set but addresses not aligned to that size are not rejected
4869 * as long as they are at least page-aligned. Since the only architecture
4870 * which uses this is ia64 this code doesn't provide for that oddity.
4872 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
4874 return TARGET_PAGE_SIZE;
4878 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
4879 int shmid, abi_ulong shmaddr, int shmflg)
4883 struct shmid_ds shm_info;
4887 /* find out the length of the shared memory segment */
4888 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4889 if (is_error(ret)) {
4890 /* can't get length, bail out */
4894 shmlba = target_shmlba(cpu_env);
4896 if (shmaddr & (shmlba - 1)) {
4897 if (shmflg & SHM_RND) {
4898 shmaddr &= ~(shmlba - 1);
4900 return -TARGET_EINVAL;
4903 if (!guest_range_valid(shmaddr, shm_info.shm_segsz)) {
4904 return -TARGET_EINVAL;
4910 host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
4912 abi_ulong mmap_start;
4914 mmap_start = mmap_find_vma(0, shm_info.shm_segsz);
4916 if (mmap_start == -1) {
4918 host_raddr = (void *)-1;
4920 host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
4923 if (host_raddr == (void *)-1) {
4925 return get_errno((long)host_raddr);
4927 raddr=h2g((unsigned long)host_raddr);
4929 page_set_flags(raddr, raddr + shm_info.shm_segsz,
4930 PAGE_VALID | PAGE_READ |
4931 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
4933 for (i = 0; i < N_SHM_REGIONS; i++) {
4934 if (!shm_regions[i].in_use) {
4935 shm_regions[i].in_use = true;
4936 shm_regions[i].start = raddr;
4937 shm_regions[i].size = shm_info.shm_segsz;
4947 static inline abi_long do_shmdt(abi_ulong shmaddr)
4954 for (i = 0; i < N_SHM_REGIONS; ++i) {
4955 if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4956 shm_regions[i].in_use = false;
4957 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
4961 rv = get_errno(shmdt(g2h(shmaddr)));
4968 #ifdef TARGET_NR_ipc
4969 /* ??? This only works with linear mappings. */
4970 /* do_ipc() must return target values and target errnos. */
4971 static abi_long do_ipc(CPUArchState *cpu_env,
4972 unsigned int call, abi_long first,
4973 abi_long second, abi_long third,
4974 abi_long ptr, abi_long fifth)
4979 version = call >> 16;
4984 ret = do_semop(first, ptr, second);
4988 ret = get_errno(semget(first, second, third));
4991 case IPCOP_semctl: {
4992 /* The semun argument to semctl is passed by value, so dereference the
4995 get_user_ual(atptr, ptr);
4996 ret = do_semctl(first, second, third, atptr);
5001 ret = get_errno(msgget(first, second));
5005 ret = do_msgsnd(first, ptr, second, third);
5009 ret = do_msgctl(first, second, ptr);
5016 struct target_ipc_kludge {
5021 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
5022 ret = -TARGET_EFAULT;
5026 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
5028 unlock_user_struct(tmp, ptr, 0);
5032 ret = do_msgrcv(first, ptr, second, fifth, third);
5041 raddr = do_shmat(cpu_env, first, ptr, second);
5042 if (is_error(raddr))
5043 return get_errno(raddr);
5044 if (put_user_ual(raddr, third))
5045 return -TARGET_EFAULT;
5049 ret = -TARGET_EINVAL;
5054 ret = do_shmdt(ptr);
5058 /* IPC_* flag values are the same on all linux platforms */
5059 ret = get_errno(shmget(first, second, third));
5062 /* IPC_* and SHM_* command values are the same on all linux platforms */
5064 ret = do_shmctl(first, second, ptr);
5067 gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
5068 ret = -TARGET_ENOSYS;
5075 /* kernel structure types definitions */
5077 #define STRUCT(name, ...) STRUCT_ ## name,
5078 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
5080 #include "syscall_types.h"
5084 #undef STRUCT_SPECIAL
5086 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
5087 #define STRUCT_SPECIAL(name)
5088 #include "syscall_types.h"
5090 #undef STRUCT_SPECIAL
5092 typedef struct IOCTLEntry IOCTLEntry;
5094 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
5095 int fd, int cmd, abi_long arg);
5099 unsigned int host_cmd;
5102 do_ioctl_fn *do_ioctl;
5103 const argtype arg_type[5];
5106 #define IOC_R 0x0001
5107 #define IOC_W 0x0002
5108 #define IOC_RW (IOC_R | IOC_W)
5110 #define MAX_STRUCT_SIZE 4096
5112 #ifdef CONFIG_FIEMAP
5113 /* So fiemap access checks don't overflow on 32 bit systems.
5114 * This is very slightly smaller than the limit imposed by
5115 * the underlying kernel.
5117 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
5118 / sizeof(struct fiemap_extent))
5120 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
5121 int fd, int cmd, abi_long arg)
5123 /* The parameter for this ioctl is a struct fiemap followed
5124 * by an array of struct fiemap_extent whose size is set
5125 * in fiemap->fm_extent_count. The array is filled in by the
5128 int target_size_in, target_size_out;
5130 const argtype *arg_type = ie->arg_type;
5131 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
5134 int i, extent_size = thunk_type_size(extent_arg_type, 0);
5138 assert(arg_type[0] == TYPE_PTR);
5139 assert(ie->access == IOC_RW);
5141 target_size_in = thunk_type_size(arg_type, 0);
5142 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
5144 return -TARGET_EFAULT;
5146 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5147 unlock_user(argptr, arg, 0);
5148 fm = (struct fiemap *)buf_temp;
5149 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
5150 return -TARGET_EINVAL;
5153 outbufsz = sizeof (*fm) +
5154 (sizeof(struct fiemap_extent) * fm->fm_extent_count);
5156 if (outbufsz > MAX_STRUCT_SIZE) {
5157 /* We can't fit all the extents into the fixed size buffer.
5158 * Allocate one that is large enough and use it instead.
5160 fm = g_try_malloc(outbufsz);
5162 return -TARGET_ENOMEM;
5164 memcpy(fm, buf_temp, sizeof(struct fiemap));
5167 ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
5168 if (!is_error(ret)) {
5169 target_size_out = target_size_in;
5170 /* An extent_count of 0 means we were only counting the extents
5171 * so there are no structs to copy
5173 if (fm->fm_extent_count != 0) {
5174 target_size_out += fm->fm_mapped_extents * extent_size;
5176 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
5178 ret = -TARGET_EFAULT;
5180 /* Convert the struct fiemap */
5181 thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
5182 if (fm->fm_extent_count != 0) {
5183 p = argptr + target_size_in;
5184 /* ...and then all the struct fiemap_extents */
5185 for (i = 0; i < fm->fm_mapped_extents; i++) {
5186 thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
5191 unlock_user(argptr, arg, target_size_out);
5201 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
5202 int fd, int cmd, abi_long arg)
5204 const argtype *arg_type = ie->arg_type;
5208 struct ifconf *host_ifconf;
5210 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
5211 int target_ifreq_size;
5216 abi_long target_ifc_buf;
5220 assert(arg_type[0] == TYPE_PTR);
5221 assert(ie->access == IOC_RW);
5224 target_size = thunk_type_size(arg_type, 0);
5226 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5228 return -TARGET_EFAULT;
5229 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5230 unlock_user(argptr, arg, 0);
5232 host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
5233 target_ifc_len = host_ifconf->ifc_len;
5234 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
5236 target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
5237 nb_ifreq = target_ifc_len / target_ifreq_size;
5238 host_ifc_len = nb_ifreq * sizeof(struct ifreq);
5240 outbufsz = sizeof(*host_ifconf) + host_ifc_len;
5241 if (outbufsz > MAX_STRUCT_SIZE) {
5242 /* We can't fit all the extents into the fixed size buffer.
5243 * Allocate one that is large enough and use it instead.
5245 host_ifconf = malloc(outbufsz);
5247 return -TARGET_ENOMEM;
5249 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
5252 host_ifc_buf = (char*)host_ifconf + sizeof(*host_ifconf);
5254 host_ifconf->ifc_len = host_ifc_len;
5255 host_ifconf->ifc_buf = host_ifc_buf;
5257 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
5258 if (!is_error(ret)) {
5259 /* convert host ifc_len to target ifc_len */
5261 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
5262 target_ifc_len = nb_ifreq * target_ifreq_size;
5263 host_ifconf->ifc_len = target_ifc_len;
5265 /* restore target ifc_buf */
5267 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
5269 /* copy struct ifconf to target user */
5271 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5273 return -TARGET_EFAULT;
5274 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
5275 unlock_user(argptr, arg, target_size);
5277 /* copy ifreq[] to target user */
5279 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
5280 for (i = 0; i < nb_ifreq ; i++) {
5281 thunk_convert(argptr + i * target_ifreq_size,
5282 host_ifc_buf + i * sizeof(struct ifreq),
5283 ifreq_arg_type, THUNK_TARGET);
5285 unlock_user(argptr, target_ifc_buf, target_ifc_len);
5295 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5296 int cmd, abi_long arg)
5299 struct dm_ioctl *host_dm;
5300 abi_long guest_data;
5301 uint32_t guest_data_size;
5303 const argtype *arg_type = ie->arg_type;
5305 void *big_buf = NULL;
5309 target_size = thunk_type_size(arg_type, 0);
5310 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5312 ret = -TARGET_EFAULT;
5315 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5316 unlock_user(argptr, arg, 0);
5318 /* buf_temp is too small, so fetch things into a bigger buffer */
5319 big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
5320 memcpy(big_buf, buf_temp, target_size);
5324 guest_data = arg + host_dm->data_start;
5325 if ((guest_data - arg) < 0) {
5326 ret = -TARGET_EINVAL;
5329 guest_data_size = host_dm->data_size - host_dm->data_start;
5330 host_data = (char*)host_dm + host_dm->data_start;
5332 argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
5334 ret = -TARGET_EFAULT;
5338 switch (ie->host_cmd) {
5340 case DM_LIST_DEVICES:
5343 case DM_DEV_SUSPEND:
5346 case DM_TABLE_STATUS:
5347 case DM_TABLE_CLEAR:
5349 case DM_LIST_VERSIONS:
5353 case DM_DEV_SET_GEOMETRY:
5354 /* data contains only strings */
5355 memcpy(host_data, argptr, guest_data_size);
5358 memcpy(host_data, argptr, guest_data_size);
5359 *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
5363 void *gspec = argptr;
5364 void *cur_data = host_data;
5365 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5366 int spec_size = thunk_type_size(arg_type, 0);
5369 for (i = 0; i < host_dm->target_count; i++) {
5370 struct dm_target_spec *spec = cur_data;
5374 thunk_convert(spec, gspec, arg_type, THUNK_HOST);
5375 slen = strlen((char*)gspec + spec_size) + 1;
5377 spec->next = sizeof(*spec) + slen;
5378 strcpy((char*)&spec[1], gspec + spec_size);
5380 cur_data += spec->next;
5385 ret = -TARGET_EINVAL;
5386 unlock_user(argptr, guest_data, 0);
5389 unlock_user(argptr, guest_data, 0);
5391 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5392 if (!is_error(ret)) {
5393 guest_data = arg + host_dm->data_start;
5394 guest_data_size = host_dm->data_size - host_dm->data_start;
5395 argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
5396 switch (ie->host_cmd) {
5401 case DM_DEV_SUSPEND:
5404 case DM_TABLE_CLEAR:
5406 case DM_DEV_SET_GEOMETRY:
5407 /* no return data */
5409 case DM_LIST_DEVICES:
5411 struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
5412 uint32_t remaining_data = guest_data_size;
5413 void *cur_data = argptr;
5414 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
5415 int nl_size = 12; /* can't use thunk_size due to alignment */
5418 uint32_t next = nl->next;
5420 nl->next = nl_size + (strlen(nl->name) + 1);
5422 if (remaining_data < nl->next) {
5423 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5426 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
5427 strcpy(cur_data + nl_size, nl->name);
5428 cur_data += nl->next;
5429 remaining_data -= nl->next;
5433 nl = (void*)nl + next;
5438 case DM_TABLE_STATUS:
5440 struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5441 void *cur_data = argptr;
5442 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5443 int spec_size = thunk_type_size(arg_type, 0);
5446 for (i = 0; i < host_dm->target_count; i++) {
5447 uint32_t next = spec->next;
5448 int slen = strlen((char*)&spec[1]) + 1;
5449 spec->next = (cur_data - argptr) + spec_size + slen;
5450 if (guest_data_size < spec->next) {
5451 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5454 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
5455 strcpy(cur_data + spec_size, (char*)&spec[1]);
5456 cur_data = argptr + spec->next;
5457 spec = (void*)host_dm + host_dm->data_start + next;
5463 void *hdata = (void*)host_dm + host_dm->data_start;
5464 int count = *(uint32_t*)hdata;
5465 uint64_t *hdev = hdata + 8;
5466 uint64_t *gdev = argptr + 8;
5469 *(uint32_t*)argptr = tswap32(count);
5470 for (i = 0; i < count; i++) {
5471 *gdev = tswap64(*hdev);
5477 case DM_LIST_VERSIONS:
5479 struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5480 uint32_t remaining_data = guest_data_size;
5481 void *cur_data = argptr;
5482 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5483 int vers_size = thunk_type_size(arg_type, 0);
5486 uint32_t next = vers->next;
5488 vers->next = vers_size + (strlen(vers->name) + 1);
5490 if (remaining_data < vers->next) {
5491 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5494 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
5495 strcpy(cur_data + vers_size, vers->name);
5496 cur_data += vers->next;
5497 remaining_data -= vers->next;
5501 vers = (void*)vers + next;
5506 unlock_user(argptr, guest_data, 0);
5507 ret = -TARGET_EINVAL;
5510 unlock_user(argptr, guest_data, guest_data_size);
5512 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5514 ret = -TARGET_EFAULT;
5517 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5518 unlock_user(argptr, arg, target_size);
5525 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5526 int cmd, abi_long arg)
5530 const argtype *arg_type = ie->arg_type;
5531 const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5534 struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5535 struct blkpg_partition host_part;
5537 /* Read and convert blkpg */
5539 target_size = thunk_type_size(arg_type, 0);
5540 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5542 ret = -TARGET_EFAULT;
5545 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5546 unlock_user(argptr, arg, 0);
5548 switch (host_blkpg->op) {
5549 case BLKPG_ADD_PARTITION:
5550 case BLKPG_DEL_PARTITION:
5551 /* payload is struct blkpg_partition */
5554 /* Unknown opcode */
5555 ret = -TARGET_EINVAL;
5559 /* Read and convert blkpg->data */
5560 arg = (abi_long)(uintptr_t)host_blkpg->data;
5561 target_size = thunk_type_size(part_arg_type, 0);
5562 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5564 ret = -TARGET_EFAULT;
5567 thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5568 unlock_user(argptr, arg, 0);
5570 /* Swizzle the data pointer to our local copy and call! */
5571 host_blkpg->data = &host_part;
5572 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5578 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5579 int fd, int cmd, abi_long arg)
5581 const argtype *arg_type = ie->arg_type;
5582 const StructEntry *se;
5583 const argtype *field_types;
5584 const int *dst_offsets, *src_offsets;
5587 abi_ulong *target_rt_dev_ptr;
5588 unsigned long *host_rt_dev_ptr;
5592 assert(ie->access == IOC_W);
5593 assert(*arg_type == TYPE_PTR);
5595 assert(*arg_type == TYPE_STRUCT);
5596 target_size = thunk_type_size(arg_type, 0);
5597 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5599 return -TARGET_EFAULT;
5602 assert(*arg_type == (int)STRUCT_rtentry);
5603 se = struct_entries + *arg_type++;
5604 assert(se->convert[0] == NULL);
5605 /* convert struct here to be able to catch rt_dev string */
5606 field_types = se->field_types;
5607 dst_offsets = se->field_offsets[THUNK_HOST];
5608 src_offsets = se->field_offsets[THUNK_TARGET];
5609 for (i = 0; i < se->nb_fields; i++) {
5610 if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5611 assert(*field_types == TYPE_PTRVOID);
5612 target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
5613 host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5614 if (*target_rt_dev_ptr != 0) {
5615 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5616 tswapal(*target_rt_dev_ptr));
5617 if (!*host_rt_dev_ptr) {
5618 unlock_user(argptr, arg, 0);
5619 return -TARGET_EFAULT;
5622 *host_rt_dev_ptr = 0;
5627 field_types = thunk_convert(buf_temp + dst_offsets[i],
5628 argptr + src_offsets[i],
5629 field_types, THUNK_HOST);
5631 unlock_user(argptr, arg, 0);
5633 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5634 if (*host_rt_dev_ptr != 0) {
5635 unlock_user((void *)*host_rt_dev_ptr,
5636 *target_rt_dev_ptr, 0);
5641 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5642 int fd, int cmd, abi_long arg)
5644 int sig = target_to_host_signal(arg);
5645 return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5649 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5650 int fd, int cmd, abi_long arg)
5652 int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5653 return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5657 static IOCTLEntry ioctl_entries[] = {
5658 #define IOCTL(cmd, access, ...) \
5659 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
5660 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
5661 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
5662 #define IOCTL_IGNORE(cmd) \
5663 { TARGET_ ## cmd, 0, #cmd },
5668 /* ??? Implement proper locking for ioctls. */
5669 /* do_ioctl() Must return target values and target errnos. */
5670 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5672 const IOCTLEntry *ie;
5673 const argtype *arg_type;
5675 uint8_t buf_temp[MAX_STRUCT_SIZE];
5681 if (ie->target_cmd == 0) {
5682 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5683 return -TARGET_ENOSYS;
5685 if (ie->target_cmd == cmd)
5689 arg_type = ie->arg_type;
5691 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd, ie->name);
5694 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5695 } else if (!ie->host_cmd) {
5696 /* Some architectures define BSD ioctls in their headers
5697 that are not implemented in Linux. */
5698 return -TARGET_ENOSYS;
5701 switch(arg_type[0]) {
5704 ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5708 ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5712 target_size = thunk_type_size(arg_type, 0);
5713 switch(ie->access) {
5715 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5716 if (!is_error(ret)) {
5717 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5719 return -TARGET_EFAULT;
5720 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5721 unlock_user(argptr, arg, target_size);
5725 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5727 return -TARGET_EFAULT;
5728 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5729 unlock_user(argptr, arg, 0);
5730 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5734 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5736 return -TARGET_EFAULT;
5737 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5738 unlock_user(argptr, arg, 0);
5739 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5740 if (!is_error(ret)) {
5741 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5743 return -TARGET_EFAULT;
5744 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5745 unlock_user(argptr, arg, target_size);
5751 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5752 (long)cmd, arg_type[0]);
5753 ret = -TARGET_ENOSYS;
5759 static const bitmask_transtbl iflag_tbl[] = {
5760 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5761 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5762 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5763 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5764 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5765 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5766 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5767 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5768 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5769 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5770 { TARGET_IXON, TARGET_IXON, IXON, IXON },
5771 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5772 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5773 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5777 static const bitmask_transtbl oflag_tbl[] = {
5778 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5779 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5780 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5781 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5782 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5783 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5784 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5785 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5786 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5787 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5788 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5789 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5790 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5791 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5792 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5793 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5794 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5795 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5796 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5797 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5798 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5799 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5800 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5801 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5805 static const bitmask_transtbl cflag_tbl[] = {
5806 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5807 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5808 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5809 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5810 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5811 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5812 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5813 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5814 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5815 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5816 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5817 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5818 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5819 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5820 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5821 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5822 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5823 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5824 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5825 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5826 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5827 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5828 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5829 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5830 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5831 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5832 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5833 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5834 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5835 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5836 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5840 static const bitmask_transtbl lflag_tbl[] = {
5841 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5842 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5843 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5844 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5845 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5846 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5847 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5848 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5849 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5850 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5851 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5852 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5853 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5854 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5855 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5859 static void target_to_host_termios (void *dst, const void *src)
5861 struct host_termios *host = dst;
5862 const struct target_termios *target = src;
5865 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5867 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5869 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5871 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5872 host->c_line = target->c_line;
5874 memset(host->c_cc, 0, sizeof(host->c_cc));
5875 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5876 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5877 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5878 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5879 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5880 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5881 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5882 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5883 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5884 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5885 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5886 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5887 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5888 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5889 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5890 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5891 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5894 static void host_to_target_termios (void *dst, const void *src)
5896 struct target_termios *target = dst;
5897 const struct host_termios *host = src;
5900 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5902 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5904 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5906 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5907 target->c_line = host->c_line;
5909 memset(target->c_cc, 0, sizeof(target->c_cc));
5910 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5911 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5912 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5913 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5914 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5915 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5916 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5917 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5918 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5919 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5920 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5921 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5922 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5923 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5924 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5925 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5926 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5929 static const StructEntry struct_termios_def = {
5930 .convert = { host_to_target_termios, target_to_host_termios },
5931 .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5932 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5935 static bitmask_transtbl mmap_flags_tbl[] = {
5936 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
5937 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
5938 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5939 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
5940 MAP_ANONYMOUS, MAP_ANONYMOUS },
5941 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
5942 MAP_GROWSDOWN, MAP_GROWSDOWN },
5943 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
5944 MAP_DENYWRITE, MAP_DENYWRITE },
5945 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
5946 MAP_EXECUTABLE, MAP_EXECUTABLE },
5947 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5948 { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
5949 MAP_NORESERVE, MAP_NORESERVE },
5950 { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
5951 /* MAP_STACK had been ignored by the kernel for quite some time.
5952 Recognize it for the target insofar as we do not want to pass
5953 it through to the host. */
5954 { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
5958 #if defined(TARGET_I386)
5960 /* NOTE: there is really one LDT for all the threads */
5961 static uint8_t *ldt_table;
5963 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
5970 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
5971 if (size > bytecount)
5973 p = lock_user(VERIFY_WRITE, ptr, size, 0);
5975 return -TARGET_EFAULT;
5976 /* ??? Should this by byteswapped? */
5977 memcpy(p, ldt_table, size);
5978 unlock_user(p, ptr, size);
5982 /* XXX: add locking support */
5983 static abi_long write_ldt(CPUX86State *env,
5984 abi_ulong ptr, unsigned long bytecount, int oldmode)
5986 struct target_modify_ldt_ldt_s ldt_info;
5987 struct target_modify_ldt_ldt_s *target_ldt_info;
5988 int seg_32bit, contents, read_exec_only, limit_in_pages;
5989 int seg_not_present, useable, lm;
5990 uint32_t *lp, entry_1, entry_2;
5992 if (bytecount != sizeof(ldt_info))
5993 return -TARGET_EINVAL;
5994 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
5995 return -TARGET_EFAULT;
5996 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5997 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5998 ldt_info.limit = tswap32(target_ldt_info->limit);
5999 ldt_info.flags = tswap32(target_ldt_info->flags);
6000 unlock_user_struct(target_ldt_info, ptr, 0);
6002 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
6003 return -TARGET_EINVAL;
6004 seg_32bit = ldt_info.flags & 1;
6005 contents = (ldt_info.flags >> 1) & 3;
6006 read_exec_only = (ldt_info.flags >> 3) & 1;
6007 limit_in_pages = (ldt_info.flags >> 4) & 1;
6008 seg_not_present = (ldt_info.flags >> 5) & 1;
6009 useable = (ldt_info.flags >> 6) & 1;
6013 lm = (ldt_info.flags >> 7) & 1;
6015 if (contents == 3) {
6017 return -TARGET_EINVAL;
6018 if (seg_not_present == 0)
6019 return -TARGET_EINVAL;
6021 /* allocate the LDT */
6023 env->ldt.base = target_mmap(0,
6024 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
6025 PROT_READ|PROT_WRITE,
6026 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
6027 if (env->ldt.base == -1)
6028 return -TARGET_ENOMEM;
6029 memset(g2h(env->ldt.base), 0,
6030 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
6031 env->ldt.limit = 0xffff;
6032 ldt_table = g2h(env->ldt.base);
6035 /* NOTE: same code as Linux kernel */
6036 /* Allow LDTs to be cleared by the user. */
6037 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6040 read_exec_only == 1 &&
6042 limit_in_pages == 0 &&
6043 seg_not_present == 1 &&
6051 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6052 (ldt_info.limit & 0x0ffff);
6053 entry_2 = (ldt_info.base_addr & 0xff000000) |
6054 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6055 (ldt_info.limit & 0xf0000) |
6056 ((read_exec_only ^ 1) << 9) |
6058 ((seg_not_present ^ 1) << 15) |
6060 (limit_in_pages << 23) |
6064 entry_2 |= (useable << 20);
6066 /* Install the new entry ... */
6068 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
6069 lp[0] = tswap32(entry_1);
6070 lp[1] = tswap32(entry_2);
6074 /* specific and weird i386 syscalls */
6075 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
6076 unsigned long bytecount)
6082 ret = read_ldt(ptr, bytecount);
6085 ret = write_ldt(env, ptr, bytecount, 1);
6088 ret = write_ldt(env, ptr, bytecount, 0);
6091 ret = -TARGET_ENOSYS;
6097 #if defined(TARGET_I386) && defined(TARGET_ABI32)
6098 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
6100 uint64_t *gdt_table = g2h(env->gdt.base);
6101 struct target_modify_ldt_ldt_s ldt_info;
6102 struct target_modify_ldt_ldt_s *target_ldt_info;
6103 int seg_32bit, contents, read_exec_only, limit_in_pages;
6104 int seg_not_present, useable, lm;
6105 uint32_t *lp, entry_1, entry_2;
6108 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6109 if (!target_ldt_info)
6110 return -TARGET_EFAULT;
6111 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6112 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6113 ldt_info.limit = tswap32(target_ldt_info->limit);
6114 ldt_info.flags = tswap32(target_ldt_info->flags);
6115 if (ldt_info.entry_number == -1) {
6116 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
6117 if (gdt_table[i] == 0) {
6118 ldt_info.entry_number = i;
6119 target_ldt_info->entry_number = tswap32(i);
6124 unlock_user_struct(target_ldt_info, ptr, 1);
6126 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
6127 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
6128 return -TARGET_EINVAL;
6129 seg_32bit = ldt_info.flags & 1;
6130 contents = (ldt_info.flags >> 1) & 3;
6131 read_exec_only = (ldt_info.flags >> 3) & 1;
6132 limit_in_pages = (ldt_info.flags >> 4) & 1;
6133 seg_not_present = (ldt_info.flags >> 5) & 1;
6134 useable = (ldt_info.flags >> 6) & 1;
6138 lm = (ldt_info.flags >> 7) & 1;
6141 if (contents == 3) {
6142 if (seg_not_present == 0)
6143 return -TARGET_EINVAL;
6146 /* NOTE: same code as Linux kernel */
6147 /* Allow LDTs to be cleared by the user. */
6148 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6149 if ((contents == 0 &&
6150 read_exec_only == 1 &&
6152 limit_in_pages == 0 &&
6153 seg_not_present == 1 &&
6161 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6162 (ldt_info.limit & 0x0ffff);
6163 entry_2 = (ldt_info.base_addr & 0xff000000) |
6164 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6165 (ldt_info.limit & 0xf0000) |
6166 ((read_exec_only ^ 1) << 9) |
6168 ((seg_not_present ^ 1) << 15) |
6170 (limit_in_pages << 23) |
6175 /* Install the new entry ... */
6177 lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
6178 lp[0] = tswap32(entry_1);
6179 lp[1] = tswap32(entry_2);
6183 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
6185 struct target_modify_ldt_ldt_s *target_ldt_info;
6186 uint64_t *gdt_table = g2h(env->gdt.base);
6187 uint32_t base_addr, limit, flags;
6188 int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
6189 int seg_not_present, useable, lm;
6190 uint32_t *lp, entry_1, entry_2;
6192 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6193 if (!target_ldt_info)
6194 return -TARGET_EFAULT;
6195 idx = tswap32(target_ldt_info->entry_number);
6196 if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
6197 idx > TARGET_GDT_ENTRY_TLS_MAX) {
6198 unlock_user_struct(target_ldt_info, ptr, 1);
6199 return -TARGET_EINVAL;
6201 lp = (uint32_t *)(gdt_table + idx);
6202 entry_1 = tswap32(lp[0]);
6203 entry_2 = tswap32(lp[1]);
6205 read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
6206 contents = (entry_2 >> 10) & 3;
6207 seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
6208 seg_32bit = (entry_2 >> 22) & 1;
6209 limit_in_pages = (entry_2 >> 23) & 1;
6210 useable = (entry_2 >> 20) & 1;
6214 lm = (entry_2 >> 21) & 1;
6216 flags = (seg_32bit << 0) | (contents << 1) |
6217 (read_exec_only << 3) | (limit_in_pages << 4) |
6218 (seg_not_present << 5) | (useable << 6) | (lm << 7);
6219 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000);
6220 base_addr = (entry_1 >> 16) |
6221 (entry_2 & 0xff000000) |
6222 ((entry_2 & 0xff) << 16);
6223 target_ldt_info->base_addr = tswapal(base_addr);
6224 target_ldt_info->limit = tswap32(limit);
6225 target_ldt_info->flags = tswap32(flags);
6226 unlock_user_struct(target_ldt_info, ptr, 1);
6229 #endif /* TARGET_I386 && TARGET_ABI32 */
6231 #ifndef TARGET_ABI32
6232 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6239 case TARGET_ARCH_SET_GS:
6240 case TARGET_ARCH_SET_FS:
6241 if (code == TARGET_ARCH_SET_GS)
6245 cpu_x86_load_seg(env, idx, 0);
6246 env->segs[idx].base = addr;
6248 case TARGET_ARCH_GET_GS:
6249 case TARGET_ARCH_GET_FS:
6250 if (code == TARGET_ARCH_GET_GS)
6254 val = env->segs[idx].base;
6255 if (put_user(val, addr, abi_ulong))
6256 ret = -TARGET_EFAULT;
6259 ret = -TARGET_EINVAL;
6266 #endif /* defined(TARGET_I386) */
6268 #define NEW_STACK_SIZE 0x40000
6271 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
6274 pthread_mutex_t mutex;
6275 pthread_cond_t cond;
6278 abi_ulong child_tidptr;
6279 abi_ulong parent_tidptr;
6283 static void *clone_func(void *arg)
6285 new_thread_info *info = arg;
6290 rcu_register_thread();
6291 tcg_register_thread();
6293 cpu = ENV_GET_CPU(env);
6295 ts = (TaskState *)cpu->opaque;
6296 info->tid = gettid();
6298 if (info->child_tidptr)
6299 put_user_u32(info->tid, info->child_tidptr);
6300 if (info->parent_tidptr)
6301 put_user_u32(info->tid, info->parent_tidptr);
6302 /* Enable signals. */
6303 sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
6304 /* Signal to the parent that we're ready. */
6305 pthread_mutex_lock(&info->mutex);
6306 pthread_cond_broadcast(&info->cond);
6307 pthread_mutex_unlock(&info->mutex);
6308 /* Wait until the parent has finished initializing the tls state. */
6309 pthread_mutex_lock(&clone_lock);
6310 pthread_mutex_unlock(&clone_lock);
6316 /* do_fork() Must return host values and target errnos (unlike most
6317 do_*() functions). */
6318 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
6319 abi_ulong parent_tidptr, target_ulong newtls,
6320 abi_ulong child_tidptr)
6322 CPUState *cpu = ENV_GET_CPU(env);
6326 CPUArchState *new_env;
6329 flags &= ~CLONE_IGNORED_FLAGS;
6331 /* Emulate vfork() with fork() */
6332 if (flags & CLONE_VFORK)
6333 flags &= ~(CLONE_VFORK | CLONE_VM);
6335 if (flags & CLONE_VM) {
6336 TaskState *parent_ts = (TaskState *)cpu->opaque;
6337 new_thread_info info;
6338 pthread_attr_t attr;
6340 if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
6341 (flags & CLONE_INVALID_THREAD_FLAGS)) {
6342 return -TARGET_EINVAL;
6345 ts = g_new0(TaskState, 1);
6346 init_task_state(ts);
6347 /* we create a new CPU instance. */
6348 new_env = cpu_copy(env);
6349 /* Init regs that differ from the parent. */
6350 cpu_clone_regs(new_env, newsp);
6351 new_cpu = ENV_GET_CPU(new_env);
6352 new_cpu->opaque = ts;
6353 ts->bprm = parent_ts->bprm;
6354 ts->info = parent_ts->info;
6355 ts->signal_mask = parent_ts->signal_mask;
6357 if (flags & CLONE_CHILD_CLEARTID) {
6358 ts->child_tidptr = child_tidptr;
6361 if (flags & CLONE_SETTLS) {
6362 cpu_set_tls (new_env, newtls);
6365 /* Grab a mutex so that thread setup appears atomic. */
6366 pthread_mutex_lock(&clone_lock);
6368 memset(&info, 0, sizeof(info));
6369 pthread_mutex_init(&info.mutex, NULL);
6370 pthread_mutex_lock(&info.mutex);
6371 pthread_cond_init(&info.cond, NULL);
6373 if (flags & CLONE_CHILD_SETTID) {
6374 info.child_tidptr = child_tidptr;
6376 if (flags & CLONE_PARENT_SETTID) {
6377 info.parent_tidptr = parent_tidptr;
6380 ret = pthread_attr_init(&attr);
6381 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6382 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6383 /* It is not safe to deliver signals until the child has finished
6384 initializing, so temporarily block all signals. */
6385 sigfillset(&sigmask);
6386 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6388 /* If this is our first additional thread, we need to ensure we
6389 * generate code for parallel execution and flush old translations.
6391 if (!parallel_cpus) {
6392 parallel_cpus = true;
6396 ret = pthread_create(&info.thread, &attr, clone_func, &info);
6397 /* TODO: Free new CPU state if thread creation failed. */
6399 sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6400 pthread_attr_destroy(&attr);
6402 /* Wait for the child to initialize. */
6403 pthread_cond_wait(&info.cond, &info.mutex);
6408 pthread_mutex_unlock(&info.mutex);
6409 pthread_cond_destroy(&info.cond);
6410 pthread_mutex_destroy(&info.mutex);
6411 pthread_mutex_unlock(&clone_lock);
6413 /* if no CLONE_VM, we consider it is a fork */
6414 if (flags & CLONE_INVALID_FORK_FLAGS) {
6415 return -TARGET_EINVAL;
6418 /* We can't support custom termination signals */
6419 if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6420 return -TARGET_EINVAL;
6423 if (block_signals()) {
6424 return -TARGET_ERESTARTSYS;
6430 /* Child Process. */
6431 cpu_clone_regs(env, newsp);
6433 /* There is a race condition here. The parent process could
6434 theoretically read the TID in the child process before the child
6435 tid is set. This would require using either ptrace
6436 (not implemented) or having *_tidptr to point at a shared memory
6437 mapping. We can't repeat the spinlock hack used above because
6438 the child process gets its own copy of the lock. */
6439 if (flags & CLONE_CHILD_SETTID)
6440 put_user_u32(gettid(), child_tidptr);
6441 if (flags & CLONE_PARENT_SETTID)
6442 put_user_u32(gettid(), parent_tidptr);
6443 ts = (TaskState *)cpu->opaque;
6444 if (flags & CLONE_SETTLS)
6445 cpu_set_tls (env, newtls);
6446 if (flags & CLONE_CHILD_CLEARTID)
6447 ts->child_tidptr = child_tidptr;
6455 /* warning : doesn't handle linux specific flags... */
6456 static int target_to_host_fcntl_cmd(int cmd)
6459 case TARGET_F_DUPFD:
6460 case TARGET_F_GETFD:
6461 case TARGET_F_SETFD:
6462 case TARGET_F_GETFL:
6463 case TARGET_F_SETFL:
6465 case TARGET_F_GETLK:
6467 case TARGET_F_SETLK:
6469 case TARGET_F_SETLKW:
6471 case TARGET_F_GETOWN:
6473 case TARGET_F_SETOWN:
6475 case TARGET_F_GETSIG:
6477 case TARGET_F_SETSIG:
6479 #if TARGET_ABI_BITS == 32
6480 case TARGET_F_GETLK64:
6482 case TARGET_F_SETLK64:
6484 case TARGET_F_SETLKW64:
6487 case TARGET_F_SETLEASE:
6489 case TARGET_F_GETLEASE:
6491 #ifdef F_DUPFD_CLOEXEC
6492 case TARGET_F_DUPFD_CLOEXEC:
6493 return F_DUPFD_CLOEXEC;
6495 case TARGET_F_NOTIFY:
6498 case TARGET_F_GETOWN_EX:
6502 case TARGET_F_SETOWN_EX:
6506 case TARGET_F_SETPIPE_SZ:
6507 return F_SETPIPE_SZ;
6508 case TARGET_F_GETPIPE_SZ:
6509 return F_GETPIPE_SZ;
6512 return -TARGET_EINVAL;
6514 return -TARGET_EINVAL;
6517 #define TRANSTBL_CONVERT(a) { -1, TARGET_##a, -1, a }
6518 static const bitmask_transtbl flock_tbl[] = {
6519 TRANSTBL_CONVERT(F_RDLCK),
6520 TRANSTBL_CONVERT(F_WRLCK),
6521 TRANSTBL_CONVERT(F_UNLCK),
6522 TRANSTBL_CONVERT(F_EXLCK),
6523 TRANSTBL_CONVERT(F_SHLCK),
6527 static inline abi_long copy_from_user_flock(struct flock64 *fl,
6528 abi_ulong target_flock_addr)
6530 struct target_flock *target_fl;
6533 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6534 return -TARGET_EFAULT;
6537 __get_user(l_type, &target_fl->l_type);
6538 fl->l_type = target_to_host_bitmask(l_type, flock_tbl);
6539 __get_user(fl->l_whence, &target_fl->l_whence);
6540 __get_user(fl->l_start, &target_fl->l_start);
6541 __get_user(fl->l_len, &target_fl->l_len);
6542 __get_user(fl->l_pid, &target_fl->l_pid);
6543 unlock_user_struct(target_fl, target_flock_addr, 0);
6547 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6548 const struct flock64 *fl)
6550 struct target_flock *target_fl;
6553 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6554 return -TARGET_EFAULT;
6557 l_type = host_to_target_bitmask(fl->l_type, flock_tbl);
6558 __put_user(l_type, &target_fl->l_type);
6559 __put_user(fl->l_whence, &target_fl->l_whence);
6560 __put_user(fl->l_start, &target_fl->l_start);
6561 __put_user(fl->l_len, &target_fl->l_len);
6562 __put_user(fl->l_pid, &target_fl->l_pid);
6563 unlock_user_struct(target_fl, target_flock_addr, 1);
6567 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
6568 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
6570 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6571 static inline abi_long copy_from_user_eabi_flock64(struct flock64 *fl,
6572 abi_ulong target_flock_addr)
6574 struct target_eabi_flock64 *target_fl;
6577 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6578 return -TARGET_EFAULT;
6581 __get_user(l_type, &target_fl->l_type);
6582 fl->l_type = target_to_host_bitmask(l_type, flock_tbl);
6583 __get_user(fl->l_whence, &target_fl->l_whence);
6584 __get_user(fl->l_start, &target_fl->l_start);
6585 __get_user(fl->l_len, &target_fl->l_len);
6586 __get_user(fl->l_pid, &target_fl->l_pid);
6587 unlock_user_struct(target_fl, target_flock_addr, 0);
6591 static inline abi_long copy_to_user_eabi_flock64(abi_ulong target_flock_addr,
6592 const struct flock64 *fl)
6594 struct target_eabi_flock64 *target_fl;
6597 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6598 return -TARGET_EFAULT;
6601 l_type = host_to_target_bitmask(fl->l_type, flock_tbl);
6602 __put_user(l_type, &target_fl->l_type);
6603 __put_user(fl->l_whence, &target_fl->l_whence);
6604 __put_user(fl->l_start, &target_fl->l_start);
6605 __put_user(fl->l_len, &target_fl->l_len);
6606 __put_user(fl->l_pid, &target_fl->l_pid);
6607 unlock_user_struct(target_fl, target_flock_addr, 1);
6612 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
6613 abi_ulong target_flock_addr)
6615 struct target_flock64 *target_fl;
6618 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6619 return -TARGET_EFAULT;
6622 __get_user(l_type, &target_fl->l_type);
6623 fl->l_type = target_to_host_bitmask(l_type, flock_tbl);
6624 __get_user(fl->l_whence, &target_fl->l_whence);
6625 __get_user(fl->l_start, &target_fl->l_start);
6626 __get_user(fl->l_len, &target_fl->l_len);
6627 __get_user(fl->l_pid, &target_fl->l_pid);
6628 unlock_user_struct(target_fl, target_flock_addr, 0);
6632 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
6633 const struct flock64 *fl)
6635 struct target_flock64 *target_fl;
6638 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6639 return -TARGET_EFAULT;
6642 l_type = host_to_target_bitmask(fl->l_type, flock_tbl);
6643 __put_user(l_type, &target_fl->l_type);
6644 __put_user(fl->l_whence, &target_fl->l_whence);
6645 __put_user(fl->l_start, &target_fl->l_start);
6646 __put_user(fl->l_len, &target_fl->l_len);
6647 __put_user(fl->l_pid, &target_fl->l_pid);
6648 unlock_user_struct(target_fl, target_flock_addr, 1);
6652 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
6654 struct flock64 fl64;
6656 struct f_owner_ex fox;
6657 struct target_f_owner_ex *target_fox;
6660 int host_cmd = target_to_host_fcntl_cmd(cmd);
6662 if (host_cmd == -TARGET_EINVAL)
6666 case TARGET_F_GETLK:
6667 ret = copy_from_user_flock(&fl64, arg);
6671 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6673 ret = copy_to_user_flock(arg, &fl64);
6677 case TARGET_F_SETLK:
6678 case TARGET_F_SETLKW:
6679 ret = copy_from_user_flock(&fl64, arg);
6683 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6686 case TARGET_F_GETLK64:
6687 ret = copy_from_user_flock64(&fl64, arg);
6691 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6693 ret = copy_to_user_flock64(arg, &fl64);
6696 case TARGET_F_SETLK64:
6697 case TARGET_F_SETLKW64:
6698 ret = copy_from_user_flock64(&fl64, arg);
6702 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6705 case TARGET_F_GETFL:
6706 ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6708 ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
6712 case TARGET_F_SETFL:
6713 ret = get_errno(safe_fcntl(fd, host_cmd,
6714 target_to_host_bitmask(arg,
6719 case TARGET_F_GETOWN_EX:
6720 ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6722 if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
6723 return -TARGET_EFAULT;
6724 target_fox->type = tswap32(fox.type);
6725 target_fox->pid = tswap32(fox.pid);
6726 unlock_user_struct(target_fox, arg, 1);
6732 case TARGET_F_SETOWN_EX:
6733 if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
6734 return -TARGET_EFAULT;
6735 fox.type = tswap32(target_fox->type);
6736 fox.pid = tswap32(target_fox->pid);
6737 unlock_user_struct(target_fox, arg, 0);
6738 ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6742 case TARGET_F_SETOWN:
6743 case TARGET_F_GETOWN:
6744 case TARGET_F_SETSIG:
6745 case TARGET_F_GETSIG:
6746 case TARGET_F_SETLEASE:
6747 case TARGET_F_GETLEASE:
6748 case TARGET_F_SETPIPE_SZ:
6749 case TARGET_F_GETPIPE_SZ:
6750 ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6754 ret = get_errno(safe_fcntl(fd, cmd, arg));
6762 static inline int high2lowuid(int uid)
6770 static inline int high2lowgid(int gid)
6778 static inline int low2highuid(int uid)
6780 if ((int16_t)uid == -1)
6786 static inline int low2highgid(int gid)
6788 if ((int16_t)gid == -1)
6793 static inline int tswapid(int id)
6798 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6800 #else /* !USE_UID16 */
6801 static inline int high2lowuid(int uid)
6805 static inline int high2lowgid(int gid)
6809 static inline int low2highuid(int uid)
6813 static inline int low2highgid(int gid)
6817 static inline int tswapid(int id)
6822 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6824 #endif /* USE_UID16 */
6826 /* We must do direct syscalls for setting UID/GID, because we want to
6827 * implement the Linux system call semantics of "change only for this thread",
6828 * not the libc/POSIX semantics of "change for all threads in process".
6829 * (See http://ewontfix.com/17/ for more details.)
6830 * We use the 32-bit version of the syscalls if present; if it is not
6831 * then either the host architecture supports 32-bit UIDs natively with
6832 * the standard syscall, or the 16-bit UID is the best we can do.
6834 #ifdef __NR_setuid32
6835 #define __NR_sys_setuid __NR_setuid32
6837 #define __NR_sys_setuid __NR_setuid
6839 #ifdef __NR_setgid32
6840 #define __NR_sys_setgid __NR_setgid32
6842 #define __NR_sys_setgid __NR_setgid
6844 #ifdef __NR_setresuid32
6845 #define __NR_sys_setresuid __NR_setresuid32
6847 #define __NR_sys_setresuid __NR_setresuid
6849 #ifdef __NR_setresgid32
6850 #define __NR_sys_setresgid __NR_setresgid32
6852 #define __NR_sys_setresgid __NR_setresgid
6855 _syscall1(int, sys_setuid, uid_t, uid)
6856 _syscall1(int, sys_setgid, gid_t, gid)
6857 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
6858 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
6860 void syscall_init(void)
6863 const argtype *arg_type;
6867 thunk_init(STRUCT_MAX);
6869 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6870 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6871 #include "syscall_types.h"
6873 #undef STRUCT_SPECIAL
6875 /* Build target_to_host_errno_table[] table from
6876 * host_to_target_errno_table[]. */
6877 for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
6878 target_to_host_errno_table[host_to_target_errno_table[i]] = i;
6881 /* we patch the ioctl size if necessary. We rely on the fact that
6882 no ioctl has all the bits at '1' in the size field */
6884 while (ie->target_cmd != 0) {
6885 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
6886 TARGET_IOC_SIZEMASK) {
6887 arg_type = ie->arg_type;
6888 if (arg_type[0] != TYPE_PTR) {
6889 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
6894 size = thunk_type_size(arg_type, 0);
6895 ie->target_cmd = (ie->target_cmd &
6896 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
6897 (size << TARGET_IOC_SIZESHIFT);
6900 /* automatic consistency check if same arch */
6901 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
6902 (defined(__x86_64__) && defined(TARGET_X86_64))
6903 if (unlikely(ie->target_cmd != ie->host_cmd)) {
6904 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
6905 ie->name, ie->target_cmd, ie->host_cmd);
6912 #if TARGET_ABI_BITS == 32
6913 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
6915 #ifdef TARGET_WORDS_BIGENDIAN
6916 return ((uint64_t)word0 << 32) | word1;
6918 return ((uint64_t)word1 << 32) | word0;
6921 #else /* TARGET_ABI_BITS == 32 */
6922 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
6926 #endif /* TARGET_ABI_BITS != 32 */
6928 #ifdef TARGET_NR_truncate64
6929 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
6934 if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
6938 return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
6942 #ifdef TARGET_NR_ftruncate64
6943 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
6948 if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
6952 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
6956 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
6957 abi_ulong target_addr)
6959 struct target_timespec *target_ts;
6961 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1))
6962 return -TARGET_EFAULT;
6963 __get_user(host_ts->tv_sec, &target_ts->tv_sec);
6964 __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
6965 unlock_user_struct(target_ts, target_addr, 0);
6969 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
6970 struct timespec *host_ts)
6972 struct target_timespec *target_ts;
6974 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0))
6975 return -TARGET_EFAULT;
6976 __put_user(host_ts->tv_sec, &target_ts->tv_sec);
6977 __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
6978 unlock_user_struct(target_ts, target_addr, 1);
6982 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_itspec,
6983 abi_ulong target_addr)
6985 struct target_itimerspec *target_itspec;
6987 if (!lock_user_struct(VERIFY_READ, target_itspec, target_addr, 1)) {
6988 return -TARGET_EFAULT;
6991 host_itspec->it_interval.tv_sec =
6992 tswapal(target_itspec->it_interval.tv_sec);
6993 host_itspec->it_interval.tv_nsec =
6994 tswapal(target_itspec->it_interval.tv_nsec);
6995 host_itspec->it_value.tv_sec = tswapal(target_itspec->it_value.tv_sec);
6996 host_itspec->it_value.tv_nsec = tswapal(target_itspec->it_value.tv_nsec);
6998 unlock_user_struct(target_itspec, target_addr, 1);
7002 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
7003 struct itimerspec *host_its)
7005 struct target_itimerspec *target_itspec;
7007 if (!lock_user_struct(VERIFY_WRITE, target_itspec, target_addr, 0)) {
7008 return -TARGET_EFAULT;
7011 target_itspec->it_interval.tv_sec = tswapal(host_its->it_interval.tv_sec);
7012 target_itspec->it_interval.tv_nsec = tswapal(host_its->it_interval.tv_nsec);
7014 target_itspec->it_value.tv_sec = tswapal(host_its->it_value.tv_sec);
7015 target_itspec->it_value.tv_nsec = tswapal(host_its->it_value.tv_nsec);
7017 unlock_user_struct(target_itspec, target_addr, 0);
7021 static inline abi_long target_to_host_timex(struct timex *host_tx,
7022 abi_long target_addr)
7024 struct target_timex *target_tx;
7026 if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7027 return -TARGET_EFAULT;
7030 __get_user(host_tx->modes, &target_tx->modes);
7031 __get_user(host_tx->offset, &target_tx->offset);
7032 __get_user(host_tx->freq, &target_tx->freq);
7033 __get_user(host_tx->maxerror, &target_tx->maxerror);
7034 __get_user(host_tx->esterror, &target_tx->esterror);
7035 __get_user(host_tx->status, &target_tx->status);
7036 __get_user(host_tx->constant, &target_tx->constant);
7037 __get_user(host_tx->precision, &target_tx->precision);
7038 __get_user(host_tx->tolerance, &target_tx->tolerance);
7039 __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7040 __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7041 __get_user(host_tx->tick, &target_tx->tick);
7042 __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7043 __get_user(host_tx->jitter, &target_tx->jitter);
7044 __get_user(host_tx->shift, &target_tx->shift);
7045 __get_user(host_tx->stabil, &target_tx->stabil);
7046 __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7047 __get_user(host_tx->calcnt, &target_tx->calcnt);
7048 __get_user(host_tx->errcnt, &target_tx->errcnt);
7049 __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7050 __get_user(host_tx->tai, &target_tx->tai);
7052 unlock_user_struct(target_tx, target_addr, 0);
7056 static inline abi_long host_to_target_timex(abi_long target_addr,
7057 struct timex *host_tx)
7059 struct target_timex *target_tx;
7061 if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7062 return -TARGET_EFAULT;
7065 __put_user(host_tx->modes, &target_tx->modes);
7066 __put_user(host_tx->offset, &target_tx->offset);
7067 __put_user(host_tx->freq, &target_tx->freq);
7068 __put_user(host_tx->maxerror, &target_tx->maxerror);
7069 __put_user(host_tx->esterror, &target_tx->esterror);
7070 __put_user(host_tx->status, &target_tx->status);
7071 __put_user(host_tx->constant, &target_tx->constant);
7072 __put_user(host_tx->precision, &target_tx->precision);
7073 __put_user(host_tx->tolerance, &target_tx->tolerance);
7074 __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7075 __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7076 __put_user(host_tx->tick, &target_tx->tick);
7077 __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7078 __put_user(host_tx->jitter, &target_tx->jitter);
7079 __put_user(host_tx->shift, &target_tx->shift);
7080 __put_user(host_tx->stabil, &target_tx->stabil);
7081 __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7082 __put_user(host_tx->calcnt, &target_tx->calcnt);
7083 __put_user(host_tx->errcnt, &target_tx->errcnt);
7084 __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7085 __put_user(host_tx->tai, &target_tx->tai);
7087 unlock_user_struct(target_tx, target_addr, 1);
7092 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
7093 abi_ulong target_addr)
7095 struct target_sigevent *target_sevp;
7097 if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
7098 return -TARGET_EFAULT;
7101 /* This union is awkward on 64 bit systems because it has a 32 bit
7102 * integer and a pointer in it; we follow the conversion approach
7103 * used for handling sigval types in signal.c so the guest should get
7104 * the correct value back even if we did a 64 bit byteswap and it's
7105 * using the 32 bit integer.
7107 host_sevp->sigev_value.sival_ptr =
7108 (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
7109 host_sevp->sigev_signo =
7110 target_to_host_signal(tswap32(target_sevp->sigev_signo));
7111 host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
7112 host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid);
7114 unlock_user_struct(target_sevp, target_addr, 1);
7118 #if defined(TARGET_NR_mlockall)
7119 static inline int target_to_host_mlockall_arg(int arg)
7123 if (arg & TARGET_MLOCKALL_MCL_CURRENT) {
7124 result |= MCL_CURRENT;
7126 if (arg & TARGET_MLOCKALL_MCL_FUTURE) {
7127 result |= MCL_FUTURE;
7133 static inline abi_long host_to_target_stat64(void *cpu_env,
7134 abi_ulong target_addr,
7135 struct stat *host_st)
7137 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7138 if (((CPUARMState *)cpu_env)->eabi) {
7139 struct target_eabi_stat64 *target_st;
7141 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7142 return -TARGET_EFAULT;
7143 memset(target_st, 0, sizeof(struct target_eabi_stat64));
7144 __put_user(host_st->st_dev, &target_st->st_dev);
7145 __put_user(host_st->st_ino, &target_st->st_ino);
7146 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7147 __put_user(host_st->st_ino, &target_st->__st_ino);
7149 __put_user(host_st->st_mode, &target_st->st_mode);
7150 __put_user(host_st->st_nlink, &target_st->st_nlink);
7151 __put_user(host_st->st_uid, &target_st->st_uid);
7152 __put_user(host_st->st_gid, &target_st->st_gid);
7153 __put_user(host_st->st_rdev, &target_st->st_rdev);
7154 __put_user(host_st->st_size, &target_st->st_size);
7155 __put_user(host_st->st_blksize, &target_st->st_blksize);
7156 __put_user(host_st->st_blocks, &target_st->st_blocks);
7157 __put_user(host_st->st_atime, &target_st->target_st_atime);
7158 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7159 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7160 unlock_user_struct(target_st, target_addr, 1);
7164 #if defined(TARGET_HAS_STRUCT_STAT64)
7165 struct target_stat64 *target_st;
7167 struct target_stat *target_st;
7170 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7171 return -TARGET_EFAULT;
7172 memset(target_st, 0, sizeof(*target_st));
7173 __put_user(host_st->st_dev, &target_st->st_dev);
7174 __put_user(host_st->st_ino, &target_st->st_ino);
7175 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7176 __put_user(host_st->st_ino, &target_st->__st_ino);
7178 __put_user(host_st->st_mode, &target_st->st_mode);
7179 __put_user(host_st->st_nlink, &target_st->st_nlink);
7180 __put_user(host_st->st_uid, &target_st->st_uid);
7181 __put_user(host_st->st_gid, &target_st->st_gid);
7182 __put_user(host_st->st_rdev, &target_st->st_rdev);
7183 /* XXX: better use of kernel struct */
7184 __put_user(host_st->st_size, &target_st->st_size);
7185 __put_user(host_st->st_blksize, &target_st->st_blksize);
7186 __put_user(host_st->st_blocks, &target_st->st_blocks);
7187 __put_user(host_st->st_atime, &target_st->target_st_atime);
7188 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7189 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7190 unlock_user_struct(target_st, target_addr, 1);
7196 /* ??? Using host futex calls even when target atomic operations
7197 are not really atomic probably breaks things. However implementing
7198 futexes locally would make futexes shared between multiple processes
7199 tricky. However they're probably useless because guest atomic
7200 operations won't work either. */
7201 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
7202 target_ulong uaddr2, int val3)
7204 struct timespec ts, *pts;
7207 /* ??? We assume FUTEX_* constants are the same on both host
7209 #ifdef FUTEX_CMD_MASK
7210 base_op = op & FUTEX_CMD_MASK;
7216 case FUTEX_WAIT_BITSET:
7219 target_to_host_timespec(pts, timeout);
7223 return get_errno(safe_futex(g2h(uaddr), op, tswap32(val),
7226 return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
7228 return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
7230 case FUTEX_CMP_REQUEUE:
7232 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7233 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7234 But the prototype takes a `struct timespec *'; insert casts
7235 to satisfy the compiler. We do not need to tswap TIMEOUT
7236 since it's not compared to guest memory. */
7237 pts = (struct timespec *)(uintptr_t) timeout;
7238 return get_errno(safe_futex(g2h(uaddr), op, val, pts,
7240 (base_op == FUTEX_CMP_REQUEUE
7244 return -TARGET_ENOSYS;
7247 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7248 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
7249 abi_long handle, abi_long mount_id,
7252 struct file_handle *target_fh;
7253 struct file_handle *fh;
7257 unsigned int size, total_size;
7259 if (get_user_s32(size, handle)) {
7260 return -TARGET_EFAULT;
7263 name = lock_user_string(pathname);
7265 return -TARGET_EFAULT;
7268 total_size = sizeof(struct file_handle) + size;
7269 target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
7271 unlock_user(name, pathname, 0);
7272 return -TARGET_EFAULT;
7275 fh = g_malloc0(total_size);
7276 fh->handle_bytes = size;
7278 ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
7279 unlock_user(name, pathname, 0);
7281 /* man name_to_handle_at(2):
7282 * Other than the use of the handle_bytes field, the caller should treat
7283 * the file_handle structure as an opaque data type
7286 memcpy(target_fh, fh, total_size);
7287 target_fh->handle_bytes = tswap32(fh->handle_bytes);
7288 target_fh->handle_type = tswap32(fh->handle_type);
7290 unlock_user(target_fh, handle, total_size);
7292 if (put_user_s32(mid, mount_id)) {
7293 return -TARGET_EFAULT;
7301 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7302 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
7305 struct file_handle *target_fh;
7306 struct file_handle *fh;
7307 unsigned int size, total_size;
7310 if (get_user_s32(size, handle)) {
7311 return -TARGET_EFAULT;
7314 total_size = sizeof(struct file_handle) + size;
7315 target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
7317 return -TARGET_EFAULT;
7320 fh = g_memdup(target_fh, total_size);
7321 fh->handle_bytes = size;
7322 fh->handle_type = tswap32(target_fh->handle_type);
7324 ret = get_errno(open_by_handle_at(mount_fd, fh,
7325 target_to_host_bitmask(flags, fcntl_flags_tbl)));
7329 unlock_user(target_fh, handle, total_size);
7335 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7337 /* signalfd siginfo conversion */
7340 host_to_target_signalfd_siginfo(struct signalfd_siginfo *tinfo,
7341 const struct signalfd_siginfo *info)
7343 int sig = host_to_target_signal(info->ssi_signo);
7345 /* linux/signalfd.h defines a ssi_addr_lsb
7346 * not defined in sys/signalfd.h but used by some kernels
7349 #ifdef BUS_MCEERR_AO
7350 if (tinfo->ssi_signo == SIGBUS &&
7351 (tinfo->ssi_code == BUS_MCEERR_AR ||
7352 tinfo->ssi_code == BUS_MCEERR_AO)) {
7353 uint16_t *ssi_addr_lsb = (uint16_t *)(&info->ssi_addr + 1);
7354 uint16_t *tssi_addr_lsb = (uint16_t *)(&tinfo->ssi_addr + 1);
7355 *tssi_addr_lsb = tswap16(*ssi_addr_lsb);
7359 tinfo->ssi_signo = tswap32(sig);
7360 tinfo->ssi_errno = tswap32(tinfo->ssi_errno);
7361 tinfo->ssi_code = tswap32(info->ssi_code);
7362 tinfo->ssi_pid = tswap32(info->ssi_pid);
7363 tinfo->ssi_uid = tswap32(info->ssi_uid);
7364 tinfo->ssi_fd = tswap32(info->ssi_fd);
7365 tinfo->ssi_tid = tswap32(info->ssi_tid);
7366 tinfo->ssi_band = tswap32(info->ssi_band);
7367 tinfo->ssi_overrun = tswap32(info->ssi_overrun);
7368 tinfo->ssi_trapno = tswap32(info->ssi_trapno);
7369 tinfo->ssi_status = tswap32(info->ssi_status);
7370 tinfo->ssi_int = tswap32(info->ssi_int);
7371 tinfo->ssi_ptr = tswap64(info->ssi_ptr);
7372 tinfo->ssi_utime = tswap64(info->ssi_utime);
7373 tinfo->ssi_stime = tswap64(info->ssi_stime);
7374 tinfo->ssi_addr = tswap64(info->ssi_addr);
7377 static abi_long host_to_target_data_signalfd(void *buf, size_t len)
7381 for (i = 0; i < len; i += sizeof(struct signalfd_siginfo)) {
7382 host_to_target_signalfd_siginfo(buf + i, buf + i);
7388 static TargetFdTrans target_signalfd_trans = {
7389 .host_to_target_data = host_to_target_data_signalfd,
7392 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
7395 target_sigset_t *target_mask;
7399 if (flags & ~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)) {
7400 return -TARGET_EINVAL;
7402 if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
7403 return -TARGET_EFAULT;
7406 target_to_host_sigset(&host_mask, target_mask);
7408 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
7410 ret = get_errno(signalfd(fd, &host_mask, host_flags));
7412 fd_trans_register(ret, &target_signalfd_trans);
7415 unlock_user_struct(target_mask, mask, 0);
7421 /* Map host to target signal numbers for the wait family of syscalls.
7422 Assume all other status bits are the same. */
7423 int host_to_target_waitstatus(int status)
7425 if (WIFSIGNALED(status)) {
7426 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
7428 if (WIFSTOPPED(status)) {
7429 return (host_to_target_signal(WSTOPSIG(status)) << 8)
7435 static int open_self_cmdline(void *cpu_env, int fd)
7437 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
7438 struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
7441 for (i = 0; i < bprm->argc; i++) {
7442 size_t len = strlen(bprm->argv[i]) + 1;
7444 if (write(fd, bprm->argv[i], len) != len) {
7452 static int open_self_maps(void *cpu_env, int fd)
7454 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
7455 TaskState *ts = cpu->opaque;
7461 fp = fopen("/proc/self/maps", "r");
7466 while ((read = getline(&line, &len, fp)) != -1) {
7467 int fields, dev_maj, dev_min, inode;
7468 uint64_t min, max, offset;
7469 char flag_r, flag_w, flag_x, flag_p;
7470 char path[512] = "";
7471 fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d"
7472 " %512s", &min, &max, &flag_r, &flag_w, &flag_x,
7473 &flag_p, &offset, &dev_maj, &dev_min, &inode, path);
7475 if ((fields < 10) || (fields > 11)) {
7478 if (h2g_valid(min)) {
7479 int flags = page_get_flags(h2g(min));
7480 max = h2g_valid(max - 1) ? max : (uintptr_t)g2h(GUEST_ADDR_MAX) + 1;
7481 if (page_check_range(h2g(min), max - min, flags) == -1) {
7484 if (h2g(min) == ts->info->stack_limit) {
7485 pstrcpy(path, sizeof(path), " [stack]");
7487 dprintf(fd, TARGET_ABI_FMT_lx "-" TARGET_ABI_FMT_lx
7488 " %c%c%c%c %08" PRIx64 " %02x:%02x %d %s%s\n",
7489 h2g(min), h2g(max - 1) + 1, flag_r, flag_w,
7490 flag_x, flag_p, offset, dev_maj, dev_min, inode,
7491 path[0] ? " " : "", path);
7501 static int open_self_stat(void *cpu_env, int fd)
7503 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
7504 TaskState *ts = cpu->opaque;
7505 abi_ulong start_stack = ts->info->start_stack;
7508 for (i = 0; i < 44; i++) {
7516 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
7517 } else if (i == 1) {
7519 snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]);
7520 } else if (i == 27) {
7523 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
7525 /* for the rest, there is MasterCard */
7526 snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' ');
7530 if (write(fd, buf, len) != len) {
7538 static int open_self_auxv(void *cpu_env, int fd)
7540 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
7541 TaskState *ts = cpu->opaque;
7542 abi_ulong auxv = ts->info->saved_auxv;
7543 abi_ulong len = ts->info->auxv_len;
7547 * Auxiliary vector is stored in target process stack.
7548 * read in whole auxv vector and copy it to file
7550 ptr = lock_user(VERIFY_READ, auxv, len, 0);
7554 r = write(fd, ptr, len);
7561 lseek(fd, 0, SEEK_SET);
7562 unlock_user(ptr, auxv, len);
7568 static int is_proc_myself(const char *filename, const char *entry)
7570 if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
7571 filename += strlen("/proc/");
7572 if (!strncmp(filename, "self/", strlen("self/"))) {
7573 filename += strlen("self/");
7574 } else if (*filename >= '1' && *filename <= '9') {
7576 snprintf(myself, sizeof(myself), "%d/", getpid());
7577 if (!strncmp(filename, myself, strlen(myself))) {
7578 filename += strlen(myself);
7585 if (!strcmp(filename, entry)) {
7592 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7593 static int is_proc(const char *filename, const char *entry)
7595 return strcmp(filename, entry) == 0;
7598 static int open_net_route(void *cpu_env, int fd)
7605 fp = fopen("/proc/net/route", "r");
7612 read = getline(&line, &len, fp);
7613 dprintf(fd, "%s", line);
7617 while ((read = getline(&line, &len, fp)) != -1) {
7619 uint32_t dest, gw, mask;
7620 unsigned int flags, refcnt, use, metric, mtu, window, irtt;
7621 sscanf(line, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7622 iface, &dest, &gw, &flags, &refcnt, &use, &metric,
7623 &mask, &mtu, &window, &irtt);
7624 dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7625 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
7626 metric, tswap32(mask), mtu, window, irtt);
7636 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
7639 const char *filename;
7640 int (*fill)(void *cpu_env, int fd);
7641 int (*cmp)(const char *s1, const char *s2);
7643 const struct fake_open *fake_open;
7644 static const struct fake_open fakes[] = {
7645 { "maps", open_self_maps, is_proc_myself },
7646 { "stat", open_self_stat, is_proc_myself },
7647 { "auxv", open_self_auxv, is_proc_myself },
7648 { "cmdline", open_self_cmdline, is_proc_myself },
7649 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7650 { "/proc/net/route", open_net_route, is_proc },
7652 { NULL, NULL, NULL }
7655 if (is_proc_myself(pathname, "exe")) {
7656 int execfd = qemu_getauxval(AT_EXECFD);
7657 return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
7660 for (fake_open = fakes; fake_open->filename; fake_open++) {
7661 if (fake_open->cmp(pathname, fake_open->filename)) {
7666 if (fake_open->filename) {
7668 char filename[PATH_MAX];
7671 /* create temporary file to map stat to */
7672 tmpdir = getenv("TMPDIR");
7675 snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
7676 fd = mkstemp(filename);
7682 if ((r = fake_open->fill(cpu_env, fd))) {
7688 lseek(fd, 0, SEEK_SET);
7693 return safe_openat(dirfd, path(pathname), flags, mode);
7696 #define TIMER_MAGIC 0x0caf0000
7697 #define TIMER_MAGIC_MASK 0xffff0000
7699 /* Convert QEMU provided timer ID back to internal 16bit index format */
7700 static target_timer_t get_timer_id(abi_long arg)
7702 target_timer_t timerid = arg;
7704 if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
7705 return -TARGET_EINVAL;
7710 if (timerid >= ARRAY_SIZE(g_posix_timers)) {
7711 return -TARGET_EINVAL;
7717 static abi_long swap_data_eventfd(void *buf, size_t len)
7719 uint64_t *counter = buf;
7722 if (len < sizeof(uint64_t)) {
7726 for (i = 0; i < len; i += sizeof(uint64_t)) {
7727 *counter = tswap64(*counter);
7734 static TargetFdTrans target_eventfd_trans = {
7735 .host_to_target_data = swap_data_eventfd,
7736 .target_to_host_data = swap_data_eventfd,
7739 #if (defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)) || \
7740 (defined(CONFIG_INOTIFY1) && defined(TARGET_NR_inotify_init1) && \
7741 defined(__NR_inotify_init1))
7742 static abi_long host_to_target_data_inotify(void *buf, size_t len)
7744 struct inotify_event *ev;
7748 for (i = 0; i < len; i += sizeof(struct inotify_event) + name_len) {
7749 ev = (struct inotify_event *)((char *)buf + i);
7752 ev->wd = tswap32(ev->wd);
7753 ev->mask = tswap32(ev->mask);
7754 ev->cookie = tswap32(ev->cookie);
7755 ev->len = tswap32(name_len);
7761 static TargetFdTrans target_inotify_trans = {
7762 .host_to_target_data = host_to_target_data_inotify,
7766 static int target_to_host_cpu_mask(unsigned long *host_mask,
7768 abi_ulong target_addr,
7771 unsigned target_bits = sizeof(abi_ulong) * 8;
7772 unsigned host_bits = sizeof(*host_mask) * 8;
7773 abi_ulong *target_mask;
7776 assert(host_size >= target_size);
7778 target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
7780 return -TARGET_EFAULT;
7782 memset(host_mask, 0, host_size);
7784 for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
7785 unsigned bit = i * target_bits;
7788 __get_user(val, &target_mask[i]);
7789 for (j = 0; j < target_bits; j++, bit++) {
7790 if (val & (1UL << j)) {
7791 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
7796 unlock_user(target_mask, target_addr, 0);
7800 static int host_to_target_cpu_mask(const unsigned long *host_mask,
7802 abi_ulong target_addr,
7805 unsigned target_bits = sizeof(abi_ulong) * 8;
7806 unsigned host_bits = sizeof(*host_mask) * 8;
7807 abi_ulong *target_mask;
7810 assert(host_size >= target_size);
7812 target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
7814 return -TARGET_EFAULT;
7817 for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
7818 unsigned bit = i * target_bits;
7821 for (j = 0; j < target_bits; j++, bit++) {
7822 if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
7826 __put_user(val, &target_mask[i]);
7829 unlock_user(target_mask, target_addr, target_size);
7833 /* do_syscall() should always have a single exit point at the end so
7834 that actions, such as logging of syscall results, can be performed.
7835 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
7836 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
7837 abi_long arg2, abi_long arg3, abi_long arg4,
7838 abi_long arg5, abi_long arg6, abi_long arg7,
7841 CPUState *cpu = ENV_GET_CPU(cpu_env);
7847 #if defined(DEBUG_ERESTARTSYS)
7848 /* Debug-only code for exercising the syscall-restart code paths
7849 * in the per-architecture cpu main loops: restart every syscall
7850 * the guest makes once before letting it through.
7857 return -TARGET_ERESTARTSYS;
7863 gemu_log("syscall %d", num);
7865 trace_guest_user_syscall(cpu, num, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8);
7867 print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
7870 case TARGET_NR_exit:
7871 /* In old applications this may be used to implement _exit(2).
7872 However in threaded applictions it is used for thread termination,
7873 and _exit_group is used for application termination.
7874 Do thread termination if we have more then one thread. */
7876 if (block_signals()) {
7877 ret = -TARGET_ERESTARTSYS;
7883 if (CPU_NEXT(first_cpu)) {
7886 /* Remove the CPU from the list. */
7887 QTAILQ_REMOVE(&cpus, cpu, node);
7892 if (ts->child_tidptr) {
7893 put_user_u32(0, ts->child_tidptr);
7894 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
7898 object_unref(OBJECT(cpu));
7900 rcu_unregister_thread();
7908 gdb_exit(cpu_env, arg1);
7910 ret = 0; /* avoid warning */
7912 case TARGET_NR_read:
7916 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
7918 ret = get_errno(safe_read(arg1, p, arg3));
7920 fd_trans_host_to_target_data(arg1)) {
7921 ret = fd_trans_host_to_target_data(arg1)(p, ret);
7923 unlock_user(p, arg2, ret);
7926 case TARGET_NR_write:
7927 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
7929 if (fd_trans_target_to_host_data(arg1)) {
7930 void *copy = g_malloc(arg3);
7931 memcpy(copy, p, arg3);
7932 ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
7934 ret = get_errno(safe_write(arg1, copy, ret));
7938 ret = get_errno(safe_write(arg1, p, arg3));
7940 unlock_user(p, arg2, 0);
7942 #ifdef TARGET_NR_open
7943 case TARGET_NR_open:
7944 if (!(p = lock_user_string(arg1)))
7946 ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
7947 target_to_host_bitmask(arg2, fcntl_flags_tbl),
7949 fd_trans_unregister(ret);
7950 unlock_user(p, arg1, 0);
7953 case TARGET_NR_openat:
7954 if (!(p = lock_user_string(arg2)))
7956 ret = get_errno(do_openat(cpu_env, arg1, p,
7957 target_to_host_bitmask(arg3, fcntl_flags_tbl),
7959 fd_trans_unregister(ret);
7960 unlock_user(p, arg2, 0);
7962 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7963 case TARGET_NR_name_to_handle_at:
7964 ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
7967 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7968 case TARGET_NR_open_by_handle_at:
7969 ret = do_open_by_handle_at(arg1, arg2, arg3);
7970 fd_trans_unregister(ret);
7973 case TARGET_NR_close:
7974 fd_trans_unregister(arg1);
7975 ret = get_errno(close(arg1));
7980 #ifdef TARGET_NR_fork
7981 case TARGET_NR_fork:
7982 ret = get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
7985 #ifdef TARGET_NR_waitpid
7986 case TARGET_NR_waitpid:
7989 ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
7990 if (!is_error(ret) && arg2 && ret
7991 && put_user_s32(host_to_target_waitstatus(status), arg2))
7996 #ifdef TARGET_NR_waitid
7997 case TARGET_NR_waitid:
8001 ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
8002 if (!is_error(ret) && arg3 && info.si_pid != 0) {
8003 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
8005 host_to_target_siginfo(p, &info);
8006 unlock_user(p, arg3, sizeof(target_siginfo_t));
8011 #ifdef TARGET_NR_creat /* not on alpha */
8012 case TARGET_NR_creat:
8013 if (!(p = lock_user_string(arg1)))
8015 ret = get_errno(creat(p, arg2));
8016 fd_trans_unregister(ret);
8017 unlock_user(p, arg1, 0);
8020 #ifdef TARGET_NR_link
8021 case TARGET_NR_link:
8024 p = lock_user_string(arg1);
8025 p2 = lock_user_string(arg2);
8027 ret = -TARGET_EFAULT;
8029 ret = get_errno(link(p, p2));
8030 unlock_user(p2, arg2, 0);
8031 unlock_user(p, arg1, 0);
8035 #if defined(TARGET_NR_linkat)
8036 case TARGET_NR_linkat:
8041 p = lock_user_string(arg2);
8042 p2 = lock_user_string(arg4);
8044 ret = -TARGET_EFAULT;
8046 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
8047 unlock_user(p, arg2, 0);
8048 unlock_user(p2, arg4, 0);
8052 #ifdef TARGET_NR_unlink
8053 case TARGET_NR_unlink:
8054 if (!(p = lock_user_string(arg1)))
8056 ret = get_errno(unlink(p));
8057 unlock_user(p, arg1, 0);
8060 #if defined(TARGET_NR_unlinkat)
8061 case TARGET_NR_unlinkat:
8062 if (!(p = lock_user_string(arg2)))
8064 ret = get_errno(unlinkat(arg1, p, arg3));
8065 unlock_user(p, arg2, 0);
8068 case TARGET_NR_execve:
8070 char **argp, **envp;
8073 abi_ulong guest_argp;
8074 abi_ulong guest_envp;
8081 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
8082 if (get_user_ual(addr, gp))
8090 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
8091 if (get_user_ual(addr, gp))
8098 argp = g_new0(char *, argc + 1);
8099 envp = g_new0(char *, envc + 1);
8101 for (gp = guest_argp, q = argp; gp;
8102 gp += sizeof(abi_ulong), q++) {
8103 if (get_user_ual(addr, gp))
8107 if (!(*q = lock_user_string(addr)))
8109 total_size += strlen(*q) + 1;
8113 for (gp = guest_envp, q = envp; gp;
8114 gp += sizeof(abi_ulong), q++) {
8115 if (get_user_ual(addr, gp))
8119 if (!(*q = lock_user_string(addr)))
8121 total_size += strlen(*q) + 1;
8125 if (!(p = lock_user_string(arg1)))
8127 /* Although execve() is not an interruptible syscall it is
8128 * a special case where we must use the safe_syscall wrapper:
8129 * if we allow a signal to happen before we make the host
8130 * syscall then we will 'lose' it, because at the point of
8131 * execve the process leaves QEMU's control. So we use the
8132 * safe syscall wrapper to ensure that we either take the
8133 * signal as a guest signal, or else it does not happen
8134 * before the execve completes and makes it the other
8135 * program's problem.
8137 ret = get_errno(safe_execve(p, argp, envp));
8138 unlock_user(p, arg1, 0);
8143 ret = -TARGET_EFAULT;
8146 for (gp = guest_argp, q = argp; *q;
8147 gp += sizeof(abi_ulong), q++) {
8148 if (get_user_ual(addr, gp)
8151 unlock_user(*q, addr, 0);
8153 for (gp = guest_envp, q = envp; *q;
8154 gp += sizeof(abi_ulong), q++) {
8155 if (get_user_ual(addr, gp)
8158 unlock_user(*q, addr, 0);
8165 case TARGET_NR_chdir:
8166 if (!(p = lock_user_string(arg1)))
8168 ret = get_errno(chdir(p));
8169 unlock_user(p, arg1, 0);
8171 #ifdef TARGET_NR_time
8172 case TARGET_NR_time:
8175 ret = get_errno(time(&host_time));
8178 && put_user_sal(host_time, arg1))
8183 #ifdef TARGET_NR_mknod
8184 case TARGET_NR_mknod:
8185 if (!(p = lock_user_string(arg1)))
8187 ret = get_errno(mknod(p, arg2, arg3));
8188 unlock_user(p, arg1, 0);
8191 #if defined(TARGET_NR_mknodat)
8192 case TARGET_NR_mknodat:
8193 if (!(p = lock_user_string(arg2)))
8195 ret = get_errno(mknodat(arg1, p, arg3, arg4));
8196 unlock_user(p, arg2, 0);
8199 #ifdef TARGET_NR_chmod
8200 case TARGET_NR_chmod:
8201 if (!(p = lock_user_string(arg1)))
8203 ret = get_errno(chmod(p, arg2));
8204 unlock_user(p, arg1, 0);
8207 #ifdef TARGET_NR_break
8208 case TARGET_NR_break:
8211 #ifdef TARGET_NR_oldstat
8212 case TARGET_NR_oldstat:
8215 case TARGET_NR_lseek:
8216 ret = get_errno(lseek(arg1, arg2, arg3));
8218 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
8219 /* Alpha specific */
8220 case TARGET_NR_getxpid:
8221 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
8222 ret = get_errno(getpid());
8225 #ifdef TARGET_NR_getpid
8226 case TARGET_NR_getpid:
8227 ret = get_errno(getpid());
8230 case TARGET_NR_mount:
8232 /* need to look at the data field */
8236 p = lock_user_string(arg1);
8244 p2 = lock_user_string(arg2);
8247 unlock_user(p, arg1, 0);
8253 p3 = lock_user_string(arg3);
8256 unlock_user(p, arg1, 0);
8258 unlock_user(p2, arg2, 0);
8265 /* FIXME - arg5 should be locked, but it isn't clear how to
8266 * do that since it's not guaranteed to be a NULL-terminated
8270 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
8272 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5));
8274 ret = get_errno(ret);
8277 unlock_user(p, arg1, 0);
8279 unlock_user(p2, arg2, 0);
8281 unlock_user(p3, arg3, 0);
8285 #ifdef TARGET_NR_umount
8286 case TARGET_NR_umount:
8287 if (!(p = lock_user_string(arg1)))
8289 ret = get_errno(umount(p));
8290 unlock_user(p, arg1, 0);
8293 #ifdef TARGET_NR_stime /* not on alpha */
8294 case TARGET_NR_stime:
8297 if (get_user_sal(host_time, arg1))
8299 ret = get_errno(stime(&host_time));
8303 case TARGET_NR_ptrace:
8305 #ifdef TARGET_NR_alarm /* not on alpha */
8306 case TARGET_NR_alarm:
8310 #ifdef TARGET_NR_oldfstat
8311 case TARGET_NR_oldfstat:
8314 #ifdef TARGET_NR_pause /* not on alpha */
8315 case TARGET_NR_pause:
8316 if (!block_signals()) {
8317 sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
8319 ret = -TARGET_EINTR;
8322 #ifdef TARGET_NR_utime
8323 case TARGET_NR_utime:
8325 struct utimbuf tbuf, *host_tbuf;
8326 struct target_utimbuf *target_tbuf;
8328 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
8330 tbuf.actime = tswapal(target_tbuf->actime);
8331 tbuf.modtime = tswapal(target_tbuf->modtime);
8332 unlock_user_struct(target_tbuf, arg2, 0);
8337 if (!(p = lock_user_string(arg1)))
8339 ret = get_errno(utime(p, host_tbuf));
8340 unlock_user(p, arg1, 0);
8344 #ifdef TARGET_NR_utimes
8345 case TARGET_NR_utimes:
8347 struct timeval *tvp, tv[2];
8349 if (copy_from_user_timeval(&tv[0], arg2)
8350 || copy_from_user_timeval(&tv[1],
8351 arg2 + sizeof(struct target_timeval)))
8357 if (!(p = lock_user_string(arg1)))
8359 ret = get_errno(utimes(p, tvp));
8360 unlock_user(p, arg1, 0);
8364 #if defined(TARGET_NR_futimesat)
8365 case TARGET_NR_futimesat:
8367 struct timeval *tvp, tv[2];
8369 if (copy_from_user_timeval(&tv[0], arg3)
8370 || copy_from_user_timeval(&tv[1],
8371 arg3 + sizeof(struct target_timeval)))
8377 if (!(p = lock_user_string(arg2)))
8379 ret = get_errno(futimesat(arg1, path(p), tvp));
8380 unlock_user(p, arg2, 0);
8384 #ifdef TARGET_NR_stty
8385 case TARGET_NR_stty:
8388 #ifdef TARGET_NR_gtty
8389 case TARGET_NR_gtty:
8392 #ifdef TARGET_NR_access
8393 case TARGET_NR_access:
8394 if (!(p = lock_user_string(arg1)))
8396 ret = get_errno(access(path(p), arg2));
8397 unlock_user(p, arg1, 0);
8400 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
8401 case TARGET_NR_faccessat:
8402 if (!(p = lock_user_string(arg2)))
8404 ret = get_errno(faccessat(arg1, p, arg3, 0));
8405 unlock_user(p, arg2, 0);
8408 #ifdef TARGET_NR_nice /* not on alpha */
8409 case TARGET_NR_nice:
8410 ret = get_errno(nice(arg1));
8413 #ifdef TARGET_NR_ftime
8414 case TARGET_NR_ftime:
8417 case TARGET_NR_sync:
8421 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
8422 case TARGET_NR_syncfs:
8423 ret = get_errno(syncfs(arg1));
8426 case TARGET_NR_kill:
8427 ret = get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
8429 #ifdef TARGET_NR_rename
8430 case TARGET_NR_rename:
8433 p = lock_user_string(arg1);
8434 p2 = lock_user_string(arg2);
8436 ret = -TARGET_EFAULT;
8438 ret = get_errno(rename(p, p2));
8439 unlock_user(p2, arg2, 0);
8440 unlock_user(p, arg1, 0);
8444 #if defined(TARGET_NR_renameat)
8445 case TARGET_NR_renameat:
8448 p = lock_user_string(arg2);
8449 p2 = lock_user_string(arg4);
8451 ret = -TARGET_EFAULT;
8453 ret = get_errno(renameat(arg1, p, arg3, p2));
8454 unlock_user(p2, arg4, 0);
8455 unlock_user(p, arg2, 0);
8459 #if defined(TARGET_NR_renameat2)
8460 case TARGET_NR_renameat2:
8463 p = lock_user_string(arg2);
8464 p2 = lock_user_string(arg4);
8466 ret = -TARGET_EFAULT;
8468 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
8470 unlock_user(p2, arg4, 0);
8471 unlock_user(p, arg2, 0);
8475 #ifdef TARGET_NR_mkdir
8476 case TARGET_NR_mkdir:
8477 if (!(p = lock_user_string(arg1)))
8479 ret = get_errno(mkdir(p, arg2));
8480 unlock_user(p, arg1, 0);
8483 #if defined(TARGET_NR_mkdirat)
8484 case TARGET_NR_mkdirat:
8485 if (!(p = lock_user_string(arg2)))
8487 ret = get_errno(mkdirat(arg1, p, arg3));
8488 unlock_user(p, arg2, 0);
8491 #ifdef TARGET_NR_rmdir
8492 case TARGET_NR_rmdir:
8493 if (!(p = lock_user_string(arg1)))
8495 ret = get_errno(rmdir(p));
8496 unlock_user(p, arg1, 0);
8500 ret = get_errno(dup(arg1));
8502 fd_trans_dup(arg1, ret);
8505 #ifdef TARGET_NR_pipe
8506 case TARGET_NR_pipe:
8507 ret = do_pipe(cpu_env, arg1, 0, 0);
8510 #ifdef TARGET_NR_pipe2
8511 case TARGET_NR_pipe2:
8512 ret = do_pipe(cpu_env, arg1,
8513 target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
8516 case TARGET_NR_times:
8518 struct target_tms *tmsp;
8520 ret = get_errno(times(&tms));
8522 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
8525 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
8526 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
8527 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
8528 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
8531 ret = host_to_target_clock_t(ret);
8534 #ifdef TARGET_NR_prof
8535 case TARGET_NR_prof:
8538 #ifdef TARGET_NR_signal
8539 case TARGET_NR_signal:
8542 case TARGET_NR_acct:
8544 ret = get_errno(acct(NULL));
8546 if (!(p = lock_user_string(arg1)))
8548 ret = get_errno(acct(path(p)));
8549 unlock_user(p, arg1, 0);
8552 #ifdef TARGET_NR_umount2
8553 case TARGET_NR_umount2:
8554 if (!(p = lock_user_string(arg1)))
8556 ret = get_errno(umount2(p, arg2));
8557 unlock_user(p, arg1, 0);
8560 #ifdef TARGET_NR_lock
8561 case TARGET_NR_lock:
8564 case TARGET_NR_ioctl:
8565 ret = do_ioctl(arg1, arg2, arg3);
8567 #ifdef TARGET_NR_fcntl
8568 case TARGET_NR_fcntl:
8569 ret = do_fcntl(arg1, arg2, arg3);
8572 #ifdef TARGET_NR_mpx
8576 case TARGET_NR_setpgid:
8577 ret = get_errno(setpgid(arg1, arg2));
8579 #ifdef TARGET_NR_ulimit
8580 case TARGET_NR_ulimit:
8583 #ifdef TARGET_NR_oldolduname
8584 case TARGET_NR_oldolduname:
8587 case TARGET_NR_umask:
8588 ret = get_errno(umask(arg1));
8590 case TARGET_NR_chroot:
8591 if (!(p = lock_user_string(arg1)))
8593 ret = get_errno(chroot(p));
8594 unlock_user(p, arg1, 0);
8596 #ifdef TARGET_NR_ustat
8597 case TARGET_NR_ustat:
8600 #ifdef TARGET_NR_dup2
8601 case TARGET_NR_dup2:
8602 ret = get_errno(dup2(arg1, arg2));
8604 fd_trans_dup(arg1, arg2);
8608 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
8609 case TARGET_NR_dup3:
8613 if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
8616 host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
8617 ret = get_errno(dup3(arg1, arg2, host_flags));
8619 fd_trans_dup(arg1, arg2);
8624 #ifdef TARGET_NR_getppid /* not on alpha */
8625 case TARGET_NR_getppid:
8626 ret = get_errno(getppid());
8629 #ifdef TARGET_NR_getpgrp
8630 case TARGET_NR_getpgrp:
8631 ret = get_errno(getpgrp());
8634 case TARGET_NR_setsid:
8635 ret = get_errno(setsid());
8637 #ifdef TARGET_NR_sigaction
8638 case TARGET_NR_sigaction:
8640 #if defined(TARGET_ALPHA)
8641 struct target_sigaction act, oact, *pact = 0;
8642 struct target_old_sigaction *old_act;
8644 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8646 act._sa_handler = old_act->_sa_handler;
8647 target_siginitset(&act.sa_mask, old_act->sa_mask);
8648 act.sa_flags = old_act->sa_flags;
8649 act.sa_restorer = 0;
8650 unlock_user_struct(old_act, arg2, 0);
8653 ret = get_errno(do_sigaction(arg1, pact, &oact));
8654 if (!is_error(ret) && arg3) {
8655 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8657 old_act->_sa_handler = oact._sa_handler;
8658 old_act->sa_mask = oact.sa_mask.sig[0];
8659 old_act->sa_flags = oact.sa_flags;
8660 unlock_user_struct(old_act, arg3, 1);
8662 #elif defined(TARGET_MIPS)
8663 struct target_sigaction act, oact, *pact, *old_act;
8666 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8668 act._sa_handler = old_act->_sa_handler;
8669 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
8670 act.sa_flags = old_act->sa_flags;
8671 unlock_user_struct(old_act, arg2, 0);
8677 ret = get_errno(do_sigaction(arg1, pact, &oact));
8679 if (!is_error(ret) && arg3) {
8680 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8682 old_act->_sa_handler = oact._sa_handler;
8683 old_act->sa_flags = oact.sa_flags;
8684 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
8685 old_act->sa_mask.sig[1] = 0;
8686 old_act->sa_mask.sig[2] = 0;
8687 old_act->sa_mask.sig[3] = 0;
8688 unlock_user_struct(old_act, arg3, 1);
8691 struct target_old_sigaction *old_act;
8692 struct target_sigaction act, oact, *pact;
8694 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8696 act._sa_handler = old_act->_sa_handler;
8697 target_siginitset(&act.sa_mask, old_act->sa_mask);
8698 act.sa_flags = old_act->sa_flags;
8699 act.sa_restorer = old_act->sa_restorer;
8700 unlock_user_struct(old_act, arg2, 0);
8705 ret = get_errno(do_sigaction(arg1, pact, &oact));
8706 if (!is_error(ret) && arg3) {
8707 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8709 old_act->_sa_handler = oact._sa_handler;
8710 old_act->sa_mask = oact.sa_mask.sig[0];
8711 old_act->sa_flags = oact.sa_flags;
8712 old_act->sa_restorer = oact.sa_restorer;
8713 unlock_user_struct(old_act, arg3, 1);
8719 case TARGET_NR_rt_sigaction:
8721 #if defined(TARGET_ALPHA)
8722 /* For Alpha and SPARC this is a 5 argument syscall, with
8723 * a 'restorer' parameter which must be copied into the
8724 * sa_restorer field of the sigaction struct.
8725 * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
8726 * and arg5 is the sigsetsize.
8727 * Alpha also has a separate rt_sigaction struct that it uses
8728 * here; SPARC uses the usual sigaction struct.
8730 struct target_rt_sigaction *rt_act;
8731 struct target_sigaction act, oact, *pact = 0;
8733 if (arg4 != sizeof(target_sigset_t)) {
8734 ret = -TARGET_EINVAL;
8738 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
8740 act._sa_handler = rt_act->_sa_handler;
8741 act.sa_mask = rt_act->sa_mask;
8742 act.sa_flags = rt_act->sa_flags;
8743 act.sa_restorer = arg5;
8744 unlock_user_struct(rt_act, arg2, 0);
8747 ret = get_errno(do_sigaction(arg1, pact, &oact));
8748 if (!is_error(ret) && arg3) {
8749 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
8751 rt_act->_sa_handler = oact._sa_handler;
8752 rt_act->sa_mask = oact.sa_mask;
8753 rt_act->sa_flags = oact.sa_flags;
8754 unlock_user_struct(rt_act, arg3, 1);
8758 target_ulong restorer = arg4;
8759 target_ulong sigsetsize = arg5;
8761 target_ulong sigsetsize = arg4;
8763 struct target_sigaction *act;
8764 struct target_sigaction *oact;
8766 if (sigsetsize != sizeof(target_sigset_t)) {
8767 ret = -TARGET_EINVAL;
8771 if (!lock_user_struct(VERIFY_READ, act, arg2, 1)) {
8775 act->sa_restorer = restorer;
8781 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
8782 ret = -TARGET_EFAULT;
8783 goto rt_sigaction_fail;
8787 ret = get_errno(do_sigaction(arg1, act, oact));
8790 unlock_user_struct(act, arg2, 0);
8792 unlock_user_struct(oact, arg3, 1);
8796 #ifdef TARGET_NR_sgetmask /* not on alpha */
8797 case TARGET_NR_sgetmask:
8800 abi_ulong target_set;
8801 ret = do_sigprocmask(0, NULL, &cur_set);
8803 host_to_target_old_sigset(&target_set, &cur_set);
8809 #ifdef TARGET_NR_ssetmask /* not on alpha */
8810 case TARGET_NR_ssetmask:
8813 abi_ulong target_set = arg1;
8814 target_to_host_old_sigset(&set, &target_set);
8815 ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
8817 host_to_target_old_sigset(&target_set, &oset);
8823 #ifdef TARGET_NR_sigprocmask
8824 case TARGET_NR_sigprocmask:
8826 #if defined(TARGET_ALPHA)
8827 sigset_t set, oldset;
8832 case TARGET_SIG_BLOCK:
8835 case TARGET_SIG_UNBLOCK:
8838 case TARGET_SIG_SETMASK:
8842 ret = -TARGET_EINVAL;
8846 target_to_host_old_sigset(&set, &mask);
8848 ret = do_sigprocmask(how, &set, &oldset);
8849 if (!is_error(ret)) {
8850 host_to_target_old_sigset(&mask, &oldset);
8852 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
8855 sigset_t set, oldset, *set_ptr;
8860 case TARGET_SIG_BLOCK:
8863 case TARGET_SIG_UNBLOCK:
8866 case TARGET_SIG_SETMASK:
8870 ret = -TARGET_EINVAL;
8873 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
8875 target_to_host_old_sigset(&set, p);
8876 unlock_user(p, arg2, 0);
8882 ret = do_sigprocmask(how, set_ptr, &oldset);
8883 if (!is_error(ret) && arg3) {
8884 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
8886 host_to_target_old_sigset(p, &oldset);
8887 unlock_user(p, arg3, sizeof(target_sigset_t));
8893 case TARGET_NR_rt_sigprocmask:
8896 sigset_t set, oldset, *set_ptr;
8898 if (arg4 != sizeof(target_sigset_t)) {
8899 ret = -TARGET_EINVAL;
8905 case TARGET_SIG_BLOCK:
8908 case TARGET_SIG_UNBLOCK:
8911 case TARGET_SIG_SETMASK:
8915 ret = -TARGET_EINVAL;
8918 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
8920 target_to_host_sigset(&set, p);
8921 unlock_user(p, arg2, 0);
8927 ret = do_sigprocmask(how, set_ptr, &oldset);
8928 if (!is_error(ret) && arg3) {
8929 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
8931 host_to_target_sigset(p, &oldset);
8932 unlock_user(p, arg3, sizeof(target_sigset_t));
8936 #ifdef TARGET_NR_sigpending
8937 case TARGET_NR_sigpending:
8940 ret = get_errno(sigpending(&set));
8941 if (!is_error(ret)) {
8942 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8944 host_to_target_old_sigset(p, &set);
8945 unlock_user(p, arg1, sizeof(target_sigset_t));
8950 case TARGET_NR_rt_sigpending:
8954 /* Yes, this check is >, not != like most. We follow the kernel's
8955 * logic and it does it like this because it implements
8956 * NR_sigpending through the same code path, and in that case
8957 * the old_sigset_t is smaller in size.
8959 if (arg2 > sizeof(target_sigset_t)) {
8960 ret = -TARGET_EINVAL;
8964 ret = get_errno(sigpending(&set));
8965 if (!is_error(ret)) {
8966 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8968 host_to_target_sigset(p, &set);
8969 unlock_user(p, arg1, sizeof(target_sigset_t));
8973 #ifdef TARGET_NR_sigsuspend
8974 case TARGET_NR_sigsuspend:
8976 TaskState *ts = cpu->opaque;
8977 #if defined(TARGET_ALPHA)
8978 abi_ulong mask = arg1;
8979 target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
8981 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8983 target_to_host_old_sigset(&ts->sigsuspend_mask, p);
8984 unlock_user(p, arg1, 0);
8986 ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8988 if (ret != -TARGET_ERESTARTSYS) {
8989 ts->in_sigsuspend = 1;
8994 case TARGET_NR_rt_sigsuspend:
8996 TaskState *ts = cpu->opaque;
8998 if (arg2 != sizeof(target_sigset_t)) {
8999 ret = -TARGET_EINVAL;
9002 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9004 target_to_host_sigset(&ts->sigsuspend_mask, p);
9005 unlock_user(p, arg1, 0);
9006 ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
9008 if (ret != -TARGET_ERESTARTSYS) {
9009 ts->in_sigsuspend = 1;
9013 case TARGET_NR_rt_sigtimedwait:
9016 struct timespec uts, *puts;
9019 if (arg4 != sizeof(target_sigset_t)) {
9020 ret = -TARGET_EINVAL;
9024 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9026 target_to_host_sigset(&set, p);
9027 unlock_user(p, arg1, 0);
9030 target_to_host_timespec(puts, arg3);
9034 ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9036 if (!is_error(ret)) {
9038 p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
9043 host_to_target_siginfo(p, &uinfo);
9044 unlock_user(p, arg2, sizeof(target_siginfo_t));
9046 ret = host_to_target_signal(ret);
9050 case TARGET_NR_rt_sigqueueinfo:
9054 p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
9058 target_to_host_siginfo(&uinfo, p);
9059 unlock_user(p, arg3, 0);
9060 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
9063 case TARGET_NR_rt_tgsigqueueinfo:
9067 p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
9071 target_to_host_siginfo(&uinfo, p);
9072 unlock_user(p, arg4, 0);
9073 ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, arg3, &uinfo));
9076 #ifdef TARGET_NR_sigreturn
9077 case TARGET_NR_sigreturn:
9078 if (block_signals()) {
9079 ret = -TARGET_ERESTARTSYS;
9081 ret = do_sigreturn(cpu_env);
9085 case TARGET_NR_rt_sigreturn:
9086 if (block_signals()) {
9087 ret = -TARGET_ERESTARTSYS;
9089 ret = do_rt_sigreturn(cpu_env);
9092 case TARGET_NR_sethostname:
9093 if (!(p = lock_user_string(arg1)))
9095 ret = get_errno(sethostname(p, arg2));
9096 unlock_user(p, arg1, 0);
9098 case TARGET_NR_setrlimit:
9100 int resource = target_to_host_resource(arg1);
9101 struct target_rlimit *target_rlim;
9103 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
9105 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
9106 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
9107 unlock_user_struct(target_rlim, arg2, 0);
9108 ret = get_errno(setrlimit(resource, &rlim));
9111 case TARGET_NR_getrlimit:
9113 int resource = target_to_host_resource(arg1);
9114 struct target_rlimit *target_rlim;
9117 ret = get_errno(getrlimit(resource, &rlim));
9118 if (!is_error(ret)) {
9119 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
9121 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
9122 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
9123 unlock_user_struct(target_rlim, arg2, 1);
9127 case TARGET_NR_getrusage:
9129 struct rusage rusage;
9130 ret = get_errno(getrusage(arg1, &rusage));
9131 if (!is_error(ret)) {
9132 ret = host_to_target_rusage(arg2, &rusage);
9136 case TARGET_NR_gettimeofday:
9139 ret = get_errno(gettimeofday(&tv, NULL));
9140 if (!is_error(ret)) {
9141 if (copy_to_user_timeval(arg1, &tv))
9146 case TARGET_NR_settimeofday:
9148 struct timeval tv, *ptv = NULL;
9149 struct timezone tz, *ptz = NULL;
9152 if (copy_from_user_timeval(&tv, arg1)) {
9159 if (copy_from_user_timezone(&tz, arg2)) {
9165 ret = get_errno(settimeofday(ptv, ptz));
9168 #if defined(TARGET_NR_select)
9169 case TARGET_NR_select:
9170 #if defined(TARGET_WANT_NI_OLD_SELECT)
9171 /* some architectures used to have old_select here
9172 * but now ENOSYS it.
9174 ret = -TARGET_ENOSYS;
9175 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
9176 ret = do_old_select(arg1);
9178 ret = do_select(arg1, arg2, arg3, arg4, arg5);
9182 #ifdef TARGET_NR_pselect6
9183 case TARGET_NR_pselect6:
9185 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
9186 fd_set rfds, wfds, efds;
9187 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
9188 struct timespec ts, *ts_ptr;
9191 * The 6th arg is actually two args smashed together,
9192 * so we cannot use the C library.
9200 abi_ulong arg_sigset, arg_sigsize, *arg7;
9201 target_sigset_t *target_sigset;
9209 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
9213 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
9217 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
9223 * This takes a timespec, and not a timeval, so we cannot
9224 * use the do_select() helper ...
9227 if (target_to_host_timespec(&ts, ts_addr)) {
9235 /* Extract the two packed args for the sigset */
9238 sig.size = SIGSET_T_SIZE;
9240 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
9244 arg_sigset = tswapal(arg7[0]);
9245 arg_sigsize = tswapal(arg7[1]);
9246 unlock_user(arg7, arg6, 0);
9250 if (arg_sigsize != sizeof(*target_sigset)) {
9251 /* Like the kernel, we enforce correct size sigsets */
9252 ret = -TARGET_EINVAL;
9255 target_sigset = lock_user(VERIFY_READ, arg_sigset,
9256 sizeof(*target_sigset), 1);
9257 if (!target_sigset) {
9260 target_to_host_sigset(&set, target_sigset);
9261 unlock_user(target_sigset, arg_sigset, 0);
9269 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
9272 if (!is_error(ret)) {
9273 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
9275 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
9277 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
9280 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
9286 #ifdef TARGET_NR_symlink
9287 case TARGET_NR_symlink:
9290 p = lock_user_string(arg1);
9291 p2 = lock_user_string(arg2);
9293 ret = -TARGET_EFAULT;
9295 ret = get_errno(symlink(p, p2));
9296 unlock_user(p2, arg2, 0);
9297 unlock_user(p, arg1, 0);
9301 #if defined(TARGET_NR_symlinkat)
9302 case TARGET_NR_symlinkat:
9305 p = lock_user_string(arg1);
9306 p2 = lock_user_string(arg3);
9308 ret = -TARGET_EFAULT;
9310 ret = get_errno(symlinkat(p, arg2, p2));
9311 unlock_user(p2, arg3, 0);
9312 unlock_user(p, arg1, 0);
9316 #ifdef TARGET_NR_oldlstat
9317 case TARGET_NR_oldlstat:
9320 #ifdef TARGET_NR_readlink
9321 case TARGET_NR_readlink:
9324 p = lock_user_string(arg1);
9325 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9327 ret = -TARGET_EFAULT;
9329 /* Short circuit this for the magic exe check. */
9330 ret = -TARGET_EINVAL;
9331 } else if (is_proc_myself((const char *)p, "exe")) {
9332 char real[PATH_MAX], *temp;
9333 temp = realpath(exec_path, real);
9334 /* Return value is # of bytes that we wrote to the buffer. */
9336 ret = get_errno(-1);
9338 /* Don't worry about sign mismatch as earlier mapping
9339 * logic would have thrown a bad address error. */
9340 ret = MIN(strlen(real), arg3);
9341 /* We cannot NUL terminate the string. */
9342 memcpy(p2, real, ret);
9345 ret = get_errno(readlink(path(p), p2, arg3));
9347 unlock_user(p2, arg2, ret);
9348 unlock_user(p, arg1, 0);
9352 #if defined(TARGET_NR_readlinkat)
9353 case TARGET_NR_readlinkat:
9356 p = lock_user_string(arg2);
9357 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
9359 ret = -TARGET_EFAULT;
9360 } else if (is_proc_myself((const char *)p, "exe")) {
9361 char real[PATH_MAX], *temp;
9362 temp = realpath(exec_path, real);
9363 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
9364 snprintf((char *)p2, arg4, "%s", real);
9366 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
9368 unlock_user(p2, arg3, ret);
9369 unlock_user(p, arg2, 0);
9373 #ifdef TARGET_NR_uselib
9374 case TARGET_NR_uselib:
9377 #ifdef TARGET_NR_swapon
9378 case TARGET_NR_swapon:
9379 if (!(p = lock_user_string(arg1)))
9381 ret = get_errno(swapon(p, arg2));
9382 unlock_user(p, arg1, 0);
9385 case TARGET_NR_reboot:
9386 if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
9387 /* arg4 must be ignored in all other cases */
9388 p = lock_user_string(arg4);
9392 ret = get_errno(reboot(arg1, arg2, arg3, p));
9393 unlock_user(p, arg4, 0);
9395 ret = get_errno(reboot(arg1, arg2, arg3, NULL));
9398 #ifdef TARGET_NR_readdir
9399 case TARGET_NR_readdir:
9402 #ifdef TARGET_NR_mmap
9403 case TARGET_NR_mmap:
9404 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
9405 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
9406 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
9407 || defined(TARGET_S390X)
9410 abi_ulong v1, v2, v3, v4, v5, v6;
9411 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
9419 unlock_user(v, arg1, 0);
9420 ret = get_errno(target_mmap(v1, v2, v3,
9421 target_to_host_bitmask(v4, mmap_flags_tbl),
9425 ret = get_errno(target_mmap(arg1, arg2, arg3,
9426 target_to_host_bitmask(arg4, mmap_flags_tbl),
9432 #ifdef TARGET_NR_mmap2
9433 case TARGET_NR_mmap2:
9435 #define MMAP_SHIFT 12
9437 ret = get_errno(target_mmap(arg1, arg2, arg3,
9438 target_to_host_bitmask(arg4, mmap_flags_tbl),
9440 arg6 << MMAP_SHIFT));
9443 case TARGET_NR_munmap:
9444 ret = get_errno(target_munmap(arg1, arg2));
9446 case TARGET_NR_mprotect:
9448 TaskState *ts = cpu->opaque;
9449 /* Special hack to detect libc making the stack executable. */
9450 if ((arg3 & PROT_GROWSDOWN)
9451 && arg1 >= ts->info->stack_limit
9452 && arg1 <= ts->info->start_stack) {
9453 arg3 &= ~PROT_GROWSDOWN;
9454 arg2 = arg2 + arg1 - ts->info->stack_limit;
9455 arg1 = ts->info->stack_limit;
9458 ret = get_errno(target_mprotect(arg1, arg2, arg3));
9460 #ifdef TARGET_NR_mremap
9461 case TARGET_NR_mremap:
9462 ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
9465 /* ??? msync/mlock/munlock are broken for softmmu. */
9466 #ifdef TARGET_NR_msync
9467 case TARGET_NR_msync:
9468 ret = get_errno(msync(g2h(arg1), arg2, arg3));
9471 #ifdef TARGET_NR_mlock
9472 case TARGET_NR_mlock:
9473 ret = get_errno(mlock(g2h(arg1), arg2));
9476 #ifdef TARGET_NR_munlock
9477 case TARGET_NR_munlock:
9478 ret = get_errno(munlock(g2h(arg1), arg2));
9481 #ifdef TARGET_NR_mlockall
9482 case TARGET_NR_mlockall:
9483 ret = get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
9486 #ifdef TARGET_NR_munlockall
9487 case TARGET_NR_munlockall:
9488 ret = get_errno(munlockall());
9491 case TARGET_NR_truncate:
9492 if (!(p = lock_user_string(arg1)))
9494 ret = get_errno(truncate(p, arg2));
9495 unlock_user(p, arg1, 0);
9497 case TARGET_NR_ftruncate:
9498 ret = get_errno(ftruncate(arg1, arg2));
9500 case TARGET_NR_fchmod:
9501 ret = get_errno(fchmod(arg1, arg2));
9503 #if defined(TARGET_NR_fchmodat)
9504 case TARGET_NR_fchmodat:
9505 if (!(p = lock_user_string(arg2)))
9507 ret = get_errno(fchmodat(arg1, p, arg3, 0));
9508 unlock_user(p, arg2, 0);
9511 case TARGET_NR_getpriority:
9512 /* Note that negative values are valid for getpriority, so we must
9513 differentiate based on errno settings. */
9515 ret = getpriority(arg1, arg2);
9516 if (ret == -1 && errno != 0) {
9517 ret = -host_to_target_errno(errno);
9521 /* Return value is the unbiased priority. Signal no error. */
9522 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
9524 /* Return value is a biased priority to avoid negative numbers. */
9528 case TARGET_NR_setpriority:
9529 ret = get_errno(setpriority(arg1, arg2, arg3));
9531 #ifdef TARGET_NR_profil
9532 case TARGET_NR_profil:
9535 case TARGET_NR_statfs:
9536 if (!(p = lock_user_string(arg1)))
9538 ret = get_errno(statfs(path(p), &stfs));
9539 unlock_user(p, arg1, 0);
9541 if (!is_error(ret)) {
9542 struct target_statfs *target_stfs;
9544 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
9546 __put_user(stfs.f_type, &target_stfs->f_type);
9547 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9548 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9549 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9550 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9551 __put_user(stfs.f_files, &target_stfs->f_files);
9552 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9553 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9554 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9555 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9556 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9557 #ifdef _STATFS_F_FLAGS
9558 __put_user(stfs.f_flags, &target_stfs->f_flags);
9560 __put_user(0, &target_stfs->f_flags);
9562 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9563 unlock_user_struct(target_stfs, arg2, 1);
9566 case TARGET_NR_fstatfs:
9567 ret = get_errno(fstatfs(arg1, &stfs));
9568 goto convert_statfs;
9569 #ifdef TARGET_NR_statfs64
9570 case TARGET_NR_statfs64:
9571 if (!(p = lock_user_string(arg1)))
9573 ret = get_errno(statfs(path(p), &stfs));
9574 unlock_user(p, arg1, 0);
9576 if (!is_error(ret)) {
9577 struct target_statfs64 *target_stfs;
9579 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
9581 __put_user(stfs.f_type, &target_stfs->f_type);
9582 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9583 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9584 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9585 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9586 __put_user(stfs.f_files, &target_stfs->f_files);
9587 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9588 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9589 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9590 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9591 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9592 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9593 unlock_user_struct(target_stfs, arg3, 1);
9596 case TARGET_NR_fstatfs64:
9597 ret = get_errno(fstatfs(arg1, &stfs));
9598 goto convert_statfs64;
9600 #ifdef TARGET_NR_ioperm
9601 case TARGET_NR_ioperm:
9604 #ifdef TARGET_NR_socketcall
9605 case TARGET_NR_socketcall:
9606 ret = do_socketcall(arg1, arg2);
9609 #ifdef TARGET_NR_accept
9610 case TARGET_NR_accept:
9611 ret = do_accept4(arg1, arg2, arg3, 0);
9614 #ifdef TARGET_NR_accept4
9615 case TARGET_NR_accept4:
9616 ret = do_accept4(arg1, arg2, arg3, arg4);
9619 #ifdef TARGET_NR_bind
9620 case TARGET_NR_bind:
9621 ret = do_bind(arg1, arg2, arg3);
9624 #ifdef TARGET_NR_connect
9625 case TARGET_NR_connect:
9626 ret = do_connect(arg1, arg2, arg3);
9629 #ifdef TARGET_NR_getpeername
9630 case TARGET_NR_getpeername:
9631 ret = do_getpeername(arg1, arg2, arg3);
9634 #ifdef TARGET_NR_getsockname
9635 case TARGET_NR_getsockname:
9636 ret = do_getsockname(arg1, arg2, arg3);
9639 #ifdef TARGET_NR_getsockopt
9640 case TARGET_NR_getsockopt:
9641 ret = do_getsockopt(arg1, arg2, arg3, arg4, arg5);
9644 #ifdef TARGET_NR_listen
9645 case TARGET_NR_listen:
9646 ret = get_errno(listen(arg1, arg2));
9649 #ifdef TARGET_NR_recv
9650 case TARGET_NR_recv:
9651 ret = do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
9654 #ifdef TARGET_NR_recvfrom
9655 case TARGET_NR_recvfrom:
9656 ret = do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
9659 #ifdef TARGET_NR_recvmsg
9660 case TARGET_NR_recvmsg:
9661 ret = do_sendrecvmsg(arg1, arg2, arg3, 0);
9664 #ifdef TARGET_NR_send
9665 case TARGET_NR_send:
9666 ret = do_sendto(arg1, arg2, arg3, arg4, 0, 0);
9669 #ifdef TARGET_NR_sendmsg
9670 case TARGET_NR_sendmsg:
9671 ret = do_sendrecvmsg(arg1, arg2, arg3, 1);
9674 #ifdef TARGET_NR_sendmmsg
9675 case TARGET_NR_sendmmsg:
9676 ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
9678 case TARGET_NR_recvmmsg:
9679 ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
9682 #ifdef TARGET_NR_sendto
9683 case TARGET_NR_sendto:
9684 ret = do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
9687 #ifdef TARGET_NR_shutdown
9688 case TARGET_NR_shutdown:
9689 ret = get_errno(shutdown(arg1, arg2));
9692 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
9693 case TARGET_NR_getrandom:
9694 p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
9698 ret = get_errno(getrandom(p, arg2, arg3));
9699 unlock_user(p, arg1, ret);
9702 #ifdef TARGET_NR_socket
9703 case TARGET_NR_socket:
9704 ret = do_socket(arg1, arg2, arg3);
9707 #ifdef TARGET_NR_socketpair
9708 case TARGET_NR_socketpair:
9709 ret = do_socketpair(arg1, arg2, arg3, arg4);
9712 #ifdef TARGET_NR_setsockopt
9713 case TARGET_NR_setsockopt:
9714 ret = do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
9717 #if defined(TARGET_NR_syslog)
9718 case TARGET_NR_syslog:
9723 case TARGET_SYSLOG_ACTION_CLOSE: /* Close log */
9724 case TARGET_SYSLOG_ACTION_OPEN: /* Open log */
9725 case TARGET_SYSLOG_ACTION_CLEAR: /* Clear ring buffer */
9726 case TARGET_SYSLOG_ACTION_CONSOLE_OFF: /* Disable logging */
9727 case TARGET_SYSLOG_ACTION_CONSOLE_ON: /* Enable logging */
9728 case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
9729 case TARGET_SYSLOG_ACTION_SIZE_UNREAD: /* Number of chars */
9730 case TARGET_SYSLOG_ACTION_SIZE_BUFFER: /* Size of the buffer */
9732 ret = get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
9735 case TARGET_SYSLOG_ACTION_READ: /* Read from log */
9736 case TARGET_SYSLOG_ACTION_READ_CLEAR: /* Read/clear msgs */
9737 case TARGET_SYSLOG_ACTION_READ_ALL: /* Read last messages */
9739 ret = -TARGET_EINVAL;
9747 p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9749 ret = -TARGET_EFAULT;
9752 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
9753 unlock_user(p, arg2, arg3);
9763 case TARGET_NR_setitimer:
9765 struct itimerval value, ovalue, *pvalue;
9769 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
9770 || copy_from_user_timeval(&pvalue->it_value,
9771 arg2 + sizeof(struct target_timeval)))
9776 ret = get_errno(setitimer(arg1, pvalue, &ovalue));
9777 if (!is_error(ret) && arg3) {
9778 if (copy_to_user_timeval(arg3,
9779 &ovalue.it_interval)
9780 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
9786 case TARGET_NR_getitimer:
9788 struct itimerval value;
9790 ret = get_errno(getitimer(arg1, &value));
9791 if (!is_error(ret) && arg2) {
9792 if (copy_to_user_timeval(arg2,
9794 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
9800 #ifdef TARGET_NR_stat
9801 case TARGET_NR_stat:
9802 if (!(p = lock_user_string(arg1)))
9804 ret = get_errno(stat(path(p), &st));
9805 unlock_user(p, arg1, 0);
9808 #ifdef TARGET_NR_lstat
9809 case TARGET_NR_lstat:
9810 if (!(p = lock_user_string(arg1)))
9812 ret = get_errno(lstat(path(p), &st));
9813 unlock_user(p, arg1, 0);
9816 case TARGET_NR_fstat:
9818 ret = get_errno(fstat(arg1, &st));
9819 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
9822 if (!is_error(ret)) {
9823 struct target_stat *target_st;
9825 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
9827 memset(target_st, 0, sizeof(*target_st));
9828 __put_user(st.st_dev, &target_st->st_dev);
9829 __put_user(st.st_ino, &target_st->st_ino);
9830 __put_user(st.st_mode, &target_st->st_mode);
9831 __put_user(st.st_uid, &target_st->st_uid);
9832 __put_user(st.st_gid, &target_st->st_gid);
9833 __put_user(st.st_nlink, &target_st->st_nlink);
9834 __put_user(st.st_rdev, &target_st->st_rdev);
9835 __put_user(st.st_size, &target_st->st_size);
9836 __put_user(st.st_blksize, &target_st->st_blksize);
9837 __put_user(st.st_blocks, &target_st->st_blocks);
9838 __put_user(st.st_atime, &target_st->target_st_atime);
9839 __put_user(st.st_mtime, &target_st->target_st_mtime);
9840 __put_user(st.st_ctime, &target_st->target_st_ctime);
9841 unlock_user_struct(target_st, arg2, 1);
9845 #ifdef TARGET_NR_olduname
9846 case TARGET_NR_olduname:
9849 #ifdef TARGET_NR_iopl
9850 case TARGET_NR_iopl:
9853 case TARGET_NR_vhangup:
9854 ret = get_errno(vhangup());
9856 #ifdef TARGET_NR_idle
9857 case TARGET_NR_idle:
9860 #ifdef TARGET_NR_syscall
9861 case TARGET_NR_syscall:
9862 ret = do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
9863 arg6, arg7, arg8, 0);
9866 case TARGET_NR_wait4:
9869 abi_long status_ptr = arg2;
9870 struct rusage rusage, *rusage_ptr;
9871 abi_ulong target_rusage = arg4;
9872 abi_long rusage_err;
9874 rusage_ptr = &rusage;
9877 ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
9878 if (!is_error(ret)) {
9879 if (status_ptr && ret) {
9880 status = host_to_target_waitstatus(status);
9881 if (put_user_s32(status, status_ptr))
9884 if (target_rusage) {
9885 rusage_err = host_to_target_rusage(target_rusage, &rusage);
9893 #ifdef TARGET_NR_swapoff
9894 case TARGET_NR_swapoff:
9895 if (!(p = lock_user_string(arg1)))
9897 ret = get_errno(swapoff(p));
9898 unlock_user(p, arg1, 0);
9901 case TARGET_NR_sysinfo:
9903 struct target_sysinfo *target_value;
9904 struct sysinfo value;
9905 ret = get_errno(sysinfo(&value));
9906 if (!is_error(ret) && arg1)
9908 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
9910 __put_user(value.uptime, &target_value->uptime);
9911 __put_user(value.loads[0], &target_value->loads[0]);
9912 __put_user(value.loads[1], &target_value->loads[1]);
9913 __put_user(value.loads[2], &target_value->loads[2]);
9914 __put_user(value.totalram, &target_value->totalram);
9915 __put_user(value.freeram, &target_value->freeram);
9916 __put_user(value.sharedram, &target_value->sharedram);
9917 __put_user(value.bufferram, &target_value->bufferram);
9918 __put_user(value.totalswap, &target_value->totalswap);
9919 __put_user(value.freeswap, &target_value->freeswap);
9920 __put_user(value.procs, &target_value->procs);
9921 __put_user(value.totalhigh, &target_value->totalhigh);
9922 __put_user(value.freehigh, &target_value->freehigh);
9923 __put_user(value.mem_unit, &target_value->mem_unit);
9924 unlock_user_struct(target_value, arg1, 1);
9928 #ifdef TARGET_NR_ipc
9930 ret = do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
9933 #ifdef TARGET_NR_semget
9934 case TARGET_NR_semget:
9935 ret = get_errno(semget(arg1, arg2, arg3));
9938 #ifdef TARGET_NR_semop
9939 case TARGET_NR_semop:
9940 ret = do_semop(arg1, arg2, arg3);
9943 #ifdef TARGET_NR_semctl
9944 case TARGET_NR_semctl:
9945 ret = do_semctl(arg1, arg2, arg3, arg4);
9948 #ifdef TARGET_NR_msgctl
9949 case TARGET_NR_msgctl:
9950 ret = do_msgctl(arg1, arg2, arg3);
9953 #ifdef TARGET_NR_msgget
9954 case TARGET_NR_msgget:
9955 ret = get_errno(msgget(arg1, arg2));
9958 #ifdef TARGET_NR_msgrcv
9959 case TARGET_NR_msgrcv:
9960 ret = do_msgrcv(arg1, arg2, arg3, arg4, arg5);
9963 #ifdef TARGET_NR_msgsnd
9964 case TARGET_NR_msgsnd:
9965 ret = do_msgsnd(arg1, arg2, arg3, arg4);
9968 #ifdef TARGET_NR_shmget
9969 case TARGET_NR_shmget:
9970 ret = get_errno(shmget(arg1, arg2, arg3));
9973 #ifdef TARGET_NR_shmctl
9974 case TARGET_NR_shmctl:
9975 ret = do_shmctl(arg1, arg2, arg3);
9978 #ifdef TARGET_NR_shmat
9979 case TARGET_NR_shmat:
9980 ret = do_shmat(cpu_env, arg1, arg2, arg3);
9983 #ifdef TARGET_NR_shmdt
9984 case TARGET_NR_shmdt:
9985 ret = do_shmdt(arg1);
9988 case TARGET_NR_fsync:
9989 ret = get_errno(fsync(arg1));
9991 case TARGET_NR_clone:
9992 /* Linux manages to have three different orderings for its
9993 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
9994 * match the kernel's CONFIG_CLONE_* settings.
9995 * Microblaze is further special in that it uses a sixth
9996 * implicit argument to clone for the TLS pointer.
9998 #if defined(TARGET_MICROBLAZE)
9999 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
10000 #elif defined(TARGET_CLONE_BACKWARDS)
10001 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
10002 #elif defined(TARGET_CLONE_BACKWARDS2)
10003 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
10005 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
10008 #ifdef __NR_exit_group
10009 /* new thread calls */
10010 case TARGET_NR_exit_group:
10011 #ifdef TARGET_GPROF
10014 gdb_exit(cpu_env, arg1);
10015 ret = get_errno(exit_group(arg1));
10018 case TARGET_NR_setdomainname:
10019 if (!(p = lock_user_string(arg1)))
10021 ret = get_errno(setdomainname(p, arg2));
10022 unlock_user(p, arg1, 0);
10024 case TARGET_NR_uname:
10025 /* no need to transcode because we use the linux syscall */
10027 struct new_utsname * buf;
10029 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
10031 ret = get_errno(sys_uname(buf));
10032 if (!is_error(ret)) {
10033 /* Overwrite the native machine name with whatever is being
10035 strcpy (buf->machine, cpu_to_uname_machine(cpu_env));
10036 /* Allow the user to override the reported release. */
10037 if (qemu_uname_release && *qemu_uname_release) {
10038 g_strlcpy(buf->release, qemu_uname_release,
10039 sizeof(buf->release));
10042 unlock_user_struct(buf, arg1, 1);
10046 case TARGET_NR_modify_ldt:
10047 ret = do_modify_ldt(cpu_env, arg1, arg2, arg3);
10049 #if !defined(TARGET_X86_64)
10050 case TARGET_NR_vm86old:
10051 goto unimplemented;
10052 case TARGET_NR_vm86:
10053 ret = do_vm86(cpu_env, arg1, arg2);
10057 case TARGET_NR_adjtimex:
10059 struct timex host_buf;
10061 if (target_to_host_timex(&host_buf, arg1) != 0) {
10064 ret = get_errno(adjtimex(&host_buf));
10065 if (!is_error(ret)) {
10066 if (host_to_target_timex(arg1, &host_buf) != 0) {
10072 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
10073 case TARGET_NR_clock_adjtime:
10075 struct timex htx, *phtx = &htx;
10077 if (target_to_host_timex(phtx, arg2) != 0) {
10080 ret = get_errno(clock_adjtime(arg1, phtx));
10081 if (!is_error(ret) && phtx) {
10082 if (host_to_target_timex(arg2, phtx) != 0) {
10089 #ifdef TARGET_NR_create_module
10090 case TARGET_NR_create_module:
10092 case TARGET_NR_init_module:
10093 case TARGET_NR_delete_module:
10094 #ifdef TARGET_NR_get_kernel_syms
10095 case TARGET_NR_get_kernel_syms:
10097 goto unimplemented;
10098 case TARGET_NR_quotactl:
10099 goto unimplemented;
10100 case TARGET_NR_getpgid:
10101 ret = get_errno(getpgid(arg1));
10103 case TARGET_NR_fchdir:
10104 ret = get_errno(fchdir(arg1));
10106 #ifdef TARGET_NR_bdflush /* not on x86_64 */
10107 case TARGET_NR_bdflush:
10108 goto unimplemented;
10110 #ifdef TARGET_NR_sysfs
10111 case TARGET_NR_sysfs:
10112 goto unimplemented;
10114 case TARGET_NR_personality:
10115 ret = get_errno(personality(arg1));
10117 #ifdef TARGET_NR_afs_syscall
10118 case TARGET_NR_afs_syscall:
10119 goto unimplemented;
10121 #ifdef TARGET_NR__llseek /* Not on alpha */
10122 case TARGET_NR__llseek:
10125 #if !defined(__NR_llseek)
10126 res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
10128 ret = get_errno(res);
10133 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
10135 if ((ret == 0) && put_user_s64(res, arg4)) {
10141 #ifdef TARGET_NR_getdents
10142 case TARGET_NR_getdents:
10143 #ifdef __NR_getdents
10144 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
10146 struct target_dirent *target_dirp;
10147 struct linux_dirent *dirp;
10148 abi_long count = arg3;
10150 dirp = g_try_malloc(count);
10152 ret = -TARGET_ENOMEM;
10156 ret = get_errno(sys_getdents(arg1, dirp, count));
10157 if (!is_error(ret)) {
10158 struct linux_dirent *de;
10159 struct target_dirent *tde;
10161 int reclen, treclen;
10162 int count1, tnamelen;
10166 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10170 reclen = de->d_reclen;
10171 tnamelen = reclen - offsetof(struct linux_dirent, d_name);
10172 assert(tnamelen >= 0);
10173 treclen = tnamelen + offsetof(struct target_dirent, d_name);
10174 assert(count1 + treclen <= count);
10175 tde->d_reclen = tswap16(treclen);
10176 tde->d_ino = tswapal(de->d_ino);
10177 tde->d_off = tswapal(de->d_off);
10178 memcpy(tde->d_name, de->d_name, tnamelen);
10179 de = (struct linux_dirent *)((char *)de + reclen);
10181 tde = (struct target_dirent *)((char *)tde + treclen);
10185 unlock_user(target_dirp, arg2, ret);
10191 struct linux_dirent *dirp;
10192 abi_long count = arg3;
10194 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10196 ret = get_errno(sys_getdents(arg1, dirp, count));
10197 if (!is_error(ret)) {
10198 struct linux_dirent *de;
10203 reclen = de->d_reclen;
10206 de->d_reclen = tswap16(reclen);
10207 tswapls(&de->d_ino);
10208 tswapls(&de->d_off);
10209 de = (struct linux_dirent *)((char *)de + reclen);
10213 unlock_user(dirp, arg2, ret);
10217 /* Implement getdents in terms of getdents64 */
10219 struct linux_dirent64 *dirp;
10220 abi_long count = arg3;
10222 dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
10226 ret = get_errno(sys_getdents64(arg1, dirp, count));
10227 if (!is_error(ret)) {
10228 /* Convert the dirent64 structs to target dirent. We do this
10229 * in-place, since we can guarantee that a target_dirent is no
10230 * larger than a dirent64; however this means we have to be
10231 * careful to read everything before writing in the new format.
10233 struct linux_dirent64 *de;
10234 struct target_dirent *tde;
10239 tde = (struct target_dirent *)dirp;
10241 int namelen, treclen;
10242 int reclen = de->d_reclen;
10243 uint64_t ino = de->d_ino;
10244 int64_t off = de->d_off;
10245 uint8_t type = de->d_type;
10247 namelen = strlen(de->d_name);
10248 treclen = offsetof(struct target_dirent, d_name)
10250 treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
10252 memmove(tde->d_name, de->d_name, namelen + 1);
10253 tde->d_ino = tswapal(ino);
10254 tde->d_off = tswapal(off);
10255 tde->d_reclen = tswap16(treclen);
10256 /* The target_dirent type is in what was formerly a padding
10257 * byte at the end of the structure:
10259 *(((char *)tde) + treclen - 1) = type;
10261 de = (struct linux_dirent64 *)((char *)de + reclen);
10262 tde = (struct target_dirent *)((char *)tde + treclen);
10268 unlock_user(dirp, arg2, ret);
10272 #endif /* TARGET_NR_getdents */
10273 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
10274 case TARGET_NR_getdents64:
10276 struct linux_dirent64 *dirp;
10277 abi_long count = arg3;
10278 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10280 ret = get_errno(sys_getdents64(arg1, dirp, count));
10281 if (!is_error(ret)) {
10282 struct linux_dirent64 *de;
10287 reclen = de->d_reclen;
10290 de->d_reclen = tswap16(reclen);
10291 tswap64s((uint64_t *)&de->d_ino);
10292 tswap64s((uint64_t *)&de->d_off);
10293 de = (struct linux_dirent64 *)((char *)de + reclen);
10297 unlock_user(dirp, arg2, ret);
10300 #endif /* TARGET_NR_getdents64 */
10301 #if defined(TARGET_NR__newselect)
10302 case TARGET_NR__newselect:
10303 ret = do_select(arg1, arg2, arg3, arg4, arg5);
10306 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
10307 # ifdef TARGET_NR_poll
10308 case TARGET_NR_poll:
10310 # ifdef TARGET_NR_ppoll
10311 case TARGET_NR_ppoll:
10314 struct target_pollfd *target_pfd;
10315 unsigned int nfds = arg2;
10316 struct pollfd *pfd;
10322 if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
10323 ret = -TARGET_EINVAL;
10327 target_pfd = lock_user(VERIFY_WRITE, arg1,
10328 sizeof(struct target_pollfd) * nfds, 1);
10333 pfd = alloca(sizeof(struct pollfd) * nfds);
10334 for (i = 0; i < nfds; i++) {
10335 pfd[i].fd = tswap32(target_pfd[i].fd);
10336 pfd[i].events = tswap16(target_pfd[i].events);
10341 # ifdef TARGET_NR_ppoll
10342 case TARGET_NR_ppoll:
10344 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
10345 target_sigset_t *target_set;
10346 sigset_t _set, *set = &_set;
10349 if (target_to_host_timespec(timeout_ts, arg3)) {
10350 unlock_user(target_pfd, arg1, 0);
10358 if (arg5 != sizeof(target_sigset_t)) {
10359 unlock_user(target_pfd, arg1, 0);
10360 ret = -TARGET_EINVAL;
10364 target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
10366 unlock_user(target_pfd, arg1, 0);
10369 target_to_host_sigset(set, target_set);
10374 ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
10375 set, SIGSET_T_SIZE));
10377 if (!is_error(ret) && arg3) {
10378 host_to_target_timespec(arg3, timeout_ts);
10381 unlock_user(target_set, arg4, 0);
10386 # ifdef TARGET_NR_poll
10387 case TARGET_NR_poll:
10389 struct timespec ts, *pts;
10392 /* Convert ms to secs, ns */
10393 ts.tv_sec = arg3 / 1000;
10394 ts.tv_nsec = (arg3 % 1000) * 1000000LL;
10397 /* -ve poll() timeout means "infinite" */
10400 ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
10405 g_assert_not_reached();
10408 if (!is_error(ret)) {
10409 for(i = 0; i < nfds; i++) {
10410 target_pfd[i].revents = tswap16(pfd[i].revents);
10413 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
10417 case TARGET_NR_flock:
10418 /* NOTE: the flock constant seems to be the same for every
10420 ret = get_errno(safe_flock(arg1, arg2));
10422 case TARGET_NR_readv:
10424 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10426 ret = get_errno(safe_readv(arg1, vec, arg3));
10427 unlock_iovec(vec, arg2, arg3, 1);
10429 ret = -host_to_target_errno(errno);
10433 case TARGET_NR_writev:
10435 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10437 ret = get_errno(safe_writev(arg1, vec, arg3));
10438 unlock_iovec(vec, arg2, arg3, 0);
10440 ret = -host_to_target_errno(errno);
10444 #if defined(TARGET_NR_preadv)
10445 case TARGET_NR_preadv:
10447 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10449 ret = get_errno(safe_preadv(arg1, vec, arg3, arg4, arg5));
10450 unlock_iovec(vec, arg2, arg3, 1);
10452 ret = -host_to_target_errno(errno);
10457 #if defined(TARGET_NR_pwritev)
10458 case TARGET_NR_pwritev:
10460 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10462 ret = get_errno(safe_pwritev(arg1, vec, arg3, arg4, arg5));
10463 unlock_iovec(vec, arg2, arg3, 0);
10465 ret = -host_to_target_errno(errno);
10470 case TARGET_NR_getsid:
10471 ret = get_errno(getsid(arg1));
10473 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10474 case TARGET_NR_fdatasync:
10475 ret = get_errno(fdatasync(arg1));
10478 #ifdef TARGET_NR__sysctl
10479 case TARGET_NR__sysctl:
10480 /* We don't implement this, but ENOTDIR is always a safe
10482 ret = -TARGET_ENOTDIR;
10485 case TARGET_NR_sched_getaffinity:
10487 unsigned int mask_size;
10488 unsigned long *mask;
10491 * sched_getaffinity needs multiples of ulong, so need to take
10492 * care of mismatches between target ulong and host ulong sizes.
10494 if (arg2 & (sizeof(abi_ulong) - 1)) {
10495 ret = -TARGET_EINVAL;
10498 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10500 mask = alloca(mask_size);
10501 memset(mask, 0, mask_size);
10502 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
10504 if (!is_error(ret)) {
10506 /* More data returned than the caller's buffer will fit.
10507 * This only happens if sizeof(abi_long) < sizeof(long)
10508 * and the caller passed us a buffer holding an odd number
10509 * of abi_longs. If the host kernel is actually using the
10510 * extra 4 bytes then fail EINVAL; otherwise we can just
10511 * ignore them and only copy the interesting part.
10513 int numcpus = sysconf(_SC_NPROCESSORS_CONF);
10514 if (numcpus > arg2 * 8) {
10515 ret = -TARGET_EINVAL;
10521 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
10527 case TARGET_NR_sched_setaffinity:
10529 unsigned int mask_size;
10530 unsigned long *mask;
10533 * sched_setaffinity needs multiples of ulong, so need to take
10534 * care of mismatches between target ulong and host ulong sizes.
10536 if (arg2 & (sizeof(abi_ulong) - 1)) {
10537 ret = -TARGET_EINVAL;
10540 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10541 mask = alloca(mask_size);
10543 ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
10548 ret = get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
10551 case TARGET_NR_getcpu:
10553 unsigned cpu, node;
10554 ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
10555 arg2 ? &node : NULL,
10557 if (is_error(ret)) {
10560 if (arg1 && put_user_u32(cpu, arg1)) {
10563 if (arg2 && put_user_u32(node, arg2)) {
10568 case TARGET_NR_sched_setparam:
10570 struct sched_param *target_schp;
10571 struct sched_param schp;
10574 return -TARGET_EINVAL;
10576 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
10578 schp.sched_priority = tswap32(target_schp->sched_priority);
10579 unlock_user_struct(target_schp, arg2, 0);
10580 ret = get_errno(sched_setparam(arg1, &schp));
10583 case TARGET_NR_sched_getparam:
10585 struct sched_param *target_schp;
10586 struct sched_param schp;
10589 return -TARGET_EINVAL;
10591 ret = get_errno(sched_getparam(arg1, &schp));
10592 if (!is_error(ret)) {
10593 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
10595 target_schp->sched_priority = tswap32(schp.sched_priority);
10596 unlock_user_struct(target_schp, arg2, 1);
10600 case TARGET_NR_sched_setscheduler:
10602 struct sched_param *target_schp;
10603 struct sched_param schp;
10605 return -TARGET_EINVAL;
10607 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
10609 schp.sched_priority = tswap32(target_schp->sched_priority);
10610 unlock_user_struct(target_schp, arg3, 0);
10611 ret = get_errno(sched_setscheduler(arg1, arg2, &schp));
10614 case TARGET_NR_sched_getscheduler:
10615 ret = get_errno(sched_getscheduler(arg1));
10617 case TARGET_NR_sched_yield:
10618 ret = get_errno(sched_yield());
10620 case TARGET_NR_sched_get_priority_max:
10621 ret = get_errno(sched_get_priority_max(arg1));
10623 case TARGET_NR_sched_get_priority_min:
10624 ret = get_errno(sched_get_priority_min(arg1));
10626 case TARGET_NR_sched_rr_get_interval:
10628 struct timespec ts;
10629 ret = get_errno(sched_rr_get_interval(arg1, &ts));
10630 if (!is_error(ret)) {
10631 ret = host_to_target_timespec(arg2, &ts);
10635 case TARGET_NR_nanosleep:
10637 struct timespec req, rem;
10638 target_to_host_timespec(&req, arg1);
10639 ret = get_errno(safe_nanosleep(&req, &rem));
10640 if (is_error(ret) && arg2) {
10641 host_to_target_timespec(arg2, &rem);
10645 #ifdef TARGET_NR_query_module
10646 case TARGET_NR_query_module:
10647 goto unimplemented;
10649 #ifdef TARGET_NR_nfsservctl
10650 case TARGET_NR_nfsservctl:
10651 goto unimplemented;
10653 case TARGET_NR_prctl:
10655 case PR_GET_PDEATHSIG:
10658 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
10659 if (!is_error(ret) && arg2
10660 && put_user_ual(deathsig, arg2)) {
10668 void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
10672 ret = get_errno(prctl(arg1, (unsigned long)name,
10673 arg3, arg4, arg5));
10674 unlock_user(name, arg2, 16);
10679 void *name = lock_user(VERIFY_READ, arg2, 16, 1);
10683 ret = get_errno(prctl(arg1, (unsigned long)name,
10684 arg3, arg4, arg5));
10685 unlock_user(name, arg2, 0);
10689 case PR_GET_SECCOMP:
10690 case PR_SET_SECCOMP:
10691 /* Disable seccomp to prevent the target disabling syscalls we
10693 ret = -TARGET_EINVAL;
10696 /* Most prctl options have no pointer arguments */
10697 ret = get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
10701 #ifdef TARGET_NR_arch_prctl
10702 case TARGET_NR_arch_prctl:
10703 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
10704 ret = do_arch_prctl(cpu_env, arg1, arg2);
10707 goto unimplemented;
10710 #ifdef TARGET_NR_pread64
10711 case TARGET_NR_pread64:
10712 if (regpairs_aligned(cpu_env, num)) {
10716 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
10718 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
10719 unlock_user(p, arg2, ret);
10721 case TARGET_NR_pwrite64:
10722 if (regpairs_aligned(cpu_env, num)) {
10726 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
10728 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
10729 unlock_user(p, arg2, 0);
10732 case TARGET_NR_getcwd:
10733 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
10735 ret = get_errno(sys_getcwd1(p, arg2));
10736 unlock_user(p, arg1, ret);
10738 case TARGET_NR_capget:
10739 case TARGET_NR_capset:
10741 struct target_user_cap_header *target_header;
10742 struct target_user_cap_data *target_data = NULL;
10743 struct __user_cap_header_struct header;
10744 struct __user_cap_data_struct data[2];
10745 struct __user_cap_data_struct *dataptr = NULL;
10746 int i, target_datalen;
10747 int data_items = 1;
10749 if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
10752 header.version = tswap32(target_header->version);
10753 header.pid = tswap32(target_header->pid);
10755 if (header.version != _LINUX_CAPABILITY_VERSION) {
10756 /* Version 2 and up takes pointer to two user_data structs */
10760 target_datalen = sizeof(*target_data) * data_items;
10763 if (num == TARGET_NR_capget) {
10764 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
10766 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
10768 if (!target_data) {
10769 unlock_user_struct(target_header, arg1, 0);
10773 if (num == TARGET_NR_capset) {
10774 for (i = 0; i < data_items; i++) {
10775 data[i].effective = tswap32(target_data[i].effective);
10776 data[i].permitted = tswap32(target_data[i].permitted);
10777 data[i].inheritable = tswap32(target_data[i].inheritable);
10784 if (num == TARGET_NR_capget) {
10785 ret = get_errno(capget(&header, dataptr));
10787 ret = get_errno(capset(&header, dataptr));
10790 /* The kernel always updates version for both capget and capset */
10791 target_header->version = tswap32(header.version);
10792 unlock_user_struct(target_header, arg1, 1);
10795 if (num == TARGET_NR_capget) {
10796 for (i = 0; i < data_items; i++) {
10797 target_data[i].effective = tswap32(data[i].effective);
10798 target_data[i].permitted = tswap32(data[i].permitted);
10799 target_data[i].inheritable = tswap32(data[i].inheritable);
10801 unlock_user(target_data, arg2, target_datalen);
10803 unlock_user(target_data, arg2, 0);
10808 case TARGET_NR_sigaltstack:
10809 ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUArchState *)cpu_env));
10812 #ifdef CONFIG_SENDFILE
10813 case TARGET_NR_sendfile:
10815 off_t *offp = NULL;
10818 ret = get_user_sal(off, arg3);
10819 if (is_error(ret)) {
10824 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
10825 if (!is_error(ret) && arg3) {
10826 abi_long ret2 = put_user_sal(off, arg3);
10827 if (is_error(ret2)) {
10833 #ifdef TARGET_NR_sendfile64
10834 case TARGET_NR_sendfile64:
10836 off_t *offp = NULL;
10839 ret = get_user_s64(off, arg3);
10840 if (is_error(ret)) {
10845 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
10846 if (!is_error(ret) && arg3) {
10847 abi_long ret2 = put_user_s64(off, arg3);
10848 if (is_error(ret2)) {
10856 case TARGET_NR_sendfile:
10857 #ifdef TARGET_NR_sendfile64
10858 case TARGET_NR_sendfile64:
10860 goto unimplemented;
10863 #ifdef TARGET_NR_getpmsg
10864 case TARGET_NR_getpmsg:
10865 goto unimplemented;
10867 #ifdef TARGET_NR_putpmsg
10868 case TARGET_NR_putpmsg:
10869 goto unimplemented;
10871 #ifdef TARGET_NR_vfork
10872 case TARGET_NR_vfork:
10873 ret = get_errno(do_fork(cpu_env,
10874 CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
10878 #ifdef TARGET_NR_ugetrlimit
10879 case TARGET_NR_ugetrlimit:
10881 struct rlimit rlim;
10882 int resource = target_to_host_resource(arg1);
10883 ret = get_errno(getrlimit(resource, &rlim));
10884 if (!is_error(ret)) {
10885 struct target_rlimit *target_rlim;
10886 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
10888 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
10889 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
10890 unlock_user_struct(target_rlim, arg2, 1);
10895 #ifdef TARGET_NR_truncate64
10896 case TARGET_NR_truncate64:
10897 if (!(p = lock_user_string(arg1)))
10899 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
10900 unlock_user(p, arg1, 0);
10903 #ifdef TARGET_NR_ftruncate64
10904 case TARGET_NR_ftruncate64:
10905 ret = target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
10908 #ifdef TARGET_NR_stat64
10909 case TARGET_NR_stat64:
10910 if (!(p = lock_user_string(arg1)))
10912 ret = get_errno(stat(path(p), &st));
10913 unlock_user(p, arg1, 0);
10914 if (!is_error(ret))
10915 ret = host_to_target_stat64(cpu_env, arg2, &st);
10918 #ifdef TARGET_NR_lstat64
10919 case TARGET_NR_lstat64:
10920 if (!(p = lock_user_string(arg1)))
10922 ret = get_errno(lstat(path(p), &st));
10923 unlock_user(p, arg1, 0);
10924 if (!is_error(ret))
10925 ret = host_to_target_stat64(cpu_env, arg2, &st);
10928 #ifdef TARGET_NR_fstat64
10929 case TARGET_NR_fstat64:
10930 ret = get_errno(fstat(arg1, &st));
10931 if (!is_error(ret))
10932 ret = host_to_target_stat64(cpu_env, arg2, &st);
10935 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
10936 #ifdef TARGET_NR_fstatat64
10937 case TARGET_NR_fstatat64:
10939 #ifdef TARGET_NR_newfstatat
10940 case TARGET_NR_newfstatat:
10942 if (!(p = lock_user_string(arg2)))
10944 ret = get_errno(fstatat(arg1, path(p), &st, arg4));
10945 if (!is_error(ret))
10946 ret = host_to_target_stat64(cpu_env, arg3, &st);
10949 #ifdef TARGET_NR_lchown
10950 case TARGET_NR_lchown:
10951 if (!(p = lock_user_string(arg1)))
10953 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
10954 unlock_user(p, arg1, 0);
10957 #ifdef TARGET_NR_getuid
10958 case TARGET_NR_getuid:
10959 ret = get_errno(high2lowuid(getuid()));
10962 #ifdef TARGET_NR_getgid
10963 case TARGET_NR_getgid:
10964 ret = get_errno(high2lowgid(getgid()));
10967 #ifdef TARGET_NR_geteuid
10968 case TARGET_NR_geteuid:
10969 ret = get_errno(high2lowuid(geteuid()));
10972 #ifdef TARGET_NR_getegid
10973 case TARGET_NR_getegid:
10974 ret = get_errno(high2lowgid(getegid()));
10977 case TARGET_NR_setreuid:
10978 ret = get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
10980 case TARGET_NR_setregid:
10981 ret = get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
10983 case TARGET_NR_getgroups:
10985 int gidsetsize = arg1;
10986 target_id *target_grouplist;
10990 grouplist = alloca(gidsetsize * sizeof(gid_t));
10991 ret = get_errno(getgroups(gidsetsize, grouplist));
10992 if (gidsetsize == 0)
10994 if (!is_error(ret)) {
10995 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
10996 if (!target_grouplist)
10998 for(i = 0;i < ret; i++)
10999 target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
11000 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
11004 case TARGET_NR_setgroups:
11006 int gidsetsize = arg1;
11007 target_id *target_grouplist;
11008 gid_t *grouplist = NULL;
11011 grouplist = alloca(gidsetsize * sizeof(gid_t));
11012 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
11013 if (!target_grouplist) {
11014 ret = -TARGET_EFAULT;
11017 for (i = 0; i < gidsetsize; i++) {
11018 grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
11020 unlock_user(target_grouplist, arg2, 0);
11022 ret = get_errno(setgroups(gidsetsize, grouplist));
11025 case TARGET_NR_fchown:
11026 ret = get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
11028 #if defined(TARGET_NR_fchownat)
11029 case TARGET_NR_fchownat:
11030 if (!(p = lock_user_string(arg2)))
11032 ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
11033 low2highgid(arg4), arg5));
11034 unlock_user(p, arg2, 0);
11037 #ifdef TARGET_NR_setresuid
11038 case TARGET_NR_setresuid:
11039 ret = get_errno(sys_setresuid(low2highuid(arg1),
11041 low2highuid(arg3)));
11044 #ifdef TARGET_NR_getresuid
11045 case TARGET_NR_getresuid:
11047 uid_t ruid, euid, suid;
11048 ret = get_errno(getresuid(&ruid, &euid, &suid));
11049 if (!is_error(ret)) {
11050 if (put_user_id(high2lowuid(ruid), arg1)
11051 || put_user_id(high2lowuid(euid), arg2)
11052 || put_user_id(high2lowuid(suid), arg3))
11058 #ifdef TARGET_NR_getresgid
11059 case TARGET_NR_setresgid:
11060 ret = get_errno(sys_setresgid(low2highgid(arg1),
11062 low2highgid(arg3)));
11065 #ifdef TARGET_NR_getresgid
11066 case TARGET_NR_getresgid:
11068 gid_t rgid, egid, sgid;
11069 ret = get_errno(getresgid(&rgid, &egid, &sgid));
11070 if (!is_error(ret)) {
11071 if (put_user_id(high2lowgid(rgid), arg1)
11072 || put_user_id(high2lowgid(egid), arg2)
11073 || put_user_id(high2lowgid(sgid), arg3))
11079 #ifdef TARGET_NR_chown
11080 case TARGET_NR_chown:
11081 if (!(p = lock_user_string(arg1)))
11083 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
11084 unlock_user(p, arg1, 0);
11087 case TARGET_NR_setuid:
11088 ret = get_errno(sys_setuid(low2highuid(arg1)));
11090 case TARGET_NR_setgid:
11091 ret = get_errno(sys_setgid(low2highgid(arg1)));
11093 case TARGET_NR_setfsuid:
11094 ret = get_errno(setfsuid(arg1));
11096 case TARGET_NR_setfsgid:
11097 ret = get_errno(setfsgid(arg1));
11100 #ifdef TARGET_NR_lchown32
11101 case TARGET_NR_lchown32:
11102 if (!(p = lock_user_string(arg1)))
11104 ret = get_errno(lchown(p, arg2, arg3));
11105 unlock_user(p, arg1, 0);
11108 #ifdef TARGET_NR_getuid32
11109 case TARGET_NR_getuid32:
11110 ret = get_errno(getuid());
11114 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
11115 /* Alpha specific */
11116 case TARGET_NR_getxuid:
11120 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
11122 ret = get_errno(getuid());
11125 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
11126 /* Alpha specific */
11127 case TARGET_NR_getxgid:
11131 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
11133 ret = get_errno(getgid());
11136 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
11137 /* Alpha specific */
11138 case TARGET_NR_osf_getsysinfo:
11139 ret = -TARGET_EOPNOTSUPP;
11141 case TARGET_GSI_IEEE_FP_CONTROL:
11143 uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env);
11145 /* Copied from linux ieee_fpcr_to_swcr. */
11146 swcr = (fpcr >> 35) & SWCR_STATUS_MASK;
11147 swcr |= (fpcr >> 36) & SWCR_MAP_DMZ;
11148 swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV
11149 | SWCR_TRAP_ENABLE_DZE
11150 | SWCR_TRAP_ENABLE_OVF);
11151 swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF
11152 | SWCR_TRAP_ENABLE_INE);
11153 swcr |= (fpcr >> 47) & SWCR_MAP_UMZ;
11154 swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO;
11156 if (put_user_u64 (swcr, arg2))
11162 /* case GSI_IEEE_STATE_AT_SIGNAL:
11163 -- Not implemented in linux kernel.
11165 -- Retrieves current unaligned access state; not much used.
11166 case GSI_PROC_TYPE:
11167 -- Retrieves implver information; surely not used.
11168 case GSI_GET_HWRPB:
11169 -- Grabs a copy of the HWRPB; surely not used.
11174 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
11175 /* Alpha specific */
11176 case TARGET_NR_osf_setsysinfo:
11177 ret = -TARGET_EOPNOTSUPP;
11179 case TARGET_SSI_IEEE_FP_CONTROL:
11181 uint64_t swcr, fpcr, orig_fpcr;
11183 if (get_user_u64 (swcr, arg2)) {
11186 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
11187 fpcr = orig_fpcr & FPCR_DYN_MASK;
11189 /* Copied from linux ieee_swcr_to_fpcr. */
11190 fpcr |= (swcr & SWCR_STATUS_MASK) << 35;
11191 fpcr |= (swcr & SWCR_MAP_DMZ) << 36;
11192 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV
11193 | SWCR_TRAP_ENABLE_DZE
11194 | SWCR_TRAP_ENABLE_OVF)) << 48;
11195 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF
11196 | SWCR_TRAP_ENABLE_INE)) << 57;
11197 fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0);
11198 fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41;
11200 cpu_alpha_store_fpcr(cpu_env, fpcr);
11205 case TARGET_SSI_IEEE_RAISE_EXCEPTION:
11207 uint64_t exc, fpcr, orig_fpcr;
11210 if (get_user_u64(exc, arg2)) {
11214 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
11216 /* We only add to the exception status here. */
11217 fpcr = orig_fpcr | ((exc & SWCR_STATUS_MASK) << 35);
11219 cpu_alpha_store_fpcr(cpu_env, fpcr);
11222 /* Old exceptions are not signaled. */
11223 fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK);
11225 /* If any exceptions set by this call,
11226 and are unmasked, send a signal. */
11228 if ((fpcr & (FPCR_INE | FPCR_INED)) == FPCR_INE) {
11229 si_code = TARGET_FPE_FLTRES;
11231 if ((fpcr & (FPCR_UNF | FPCR_UNFD)) == FPCR_UNF) {
11232 si_code = TARGET_FPE_FLTUND;
11234 if ((fpcr & (FPCR_OVF | FPCR_OVFD)) == FPCR_OVF) {
11235 si_code = TARGET_FPE_FLTOVF;
11237 if ((fpcr & (FPCR_DZE | FPCR_DZED)) == FPCR_DZE) {
11238 si_code = TARGET_FPE_FLTDIV;
11240 if ((fpcr & (FPCR_INV | FPCR_INVD)) == FPCR_INV) {
11241 si_code = TARGET_FPE_FLTINV;
11243 if (si_code != 0) {
11244 target_siginfo_t info;
11245 info.si_signo = SIGFPE;
11247 info.si_code = si_code;
11248 info._sifields._sigfault._addr
11249 = ((CPUArchState *)cpu_env)->pc;
11250 queue_signal((CPUArchState *)cpu_env, info.si_signo,
11251 QEMU_SI_FAULT, &info);
11256 /* case SSI_NVPAIRS:
11257 -- Used with SSIN_UACPROC to enable unaligned accesses.
11258 case SSI_IEEE_STATE_AT_SIGNAL:
11259 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
11260 -- Not implemented in linux kernel
11265 #ifdef TARGET_NR_osf_sigprocmask
11266 /* Alpha specific. */
11267 case TARGET_NR_osf_sigprocmask:
11271 sigset_t set, oldset;
11274 case TARGET_SIG_BLOCK:
11277 case TARGET_SIG_UNBLOCK:
11280 case TARGET_SIG_SETMASK:
11284 ret = -TARGET_EINVAL;
11288 target_to_host_old_sigset(&set, &mask);
11289 ret = do_sigprocmask(how, &set, &oldset);
11291 host_to_target_old_sigset(&mask, &oldset);
11298 #ifdef TARGET_NR_getgid32
11299 case TARGET_NR_getgid32:
11300 ret = get_errno(getgid());
11303 #ifdef TARGET_NR_geteuid32
11304 case TARGET_NR_geteuid32:
11305 ret = get_errno(geteuid());
11308 #ifdef TARGET_NR_getegid32
11309 case TARGET_NR_getegid32:
11310 ret = get_errno(getegid());
11313 #ifdef TARGET_NR_setreuid32
11314 case TARGET_NR_setreuid32:
11315 ret = get_errno(setreuid(arg1, arg2));
11318 #ifdef TARGET_NR_setregid32
11319 case TARGET_NR_setregid32:
11320 ret = get_errno(setregid(arg1, arg2));
11323 #ifdef TARGET_NR_getgroups32
11324 case TARGET_NR_getgroups32:
11326 int gidsetsize = arg1;
11327 uint32_t *target_grouplist;
11331 grouplist = alloca(gidsetsize * sizeof(gid_t));
11332 ret = get_errno(getgroups(gidsetsize, grouplist));
11333 if (gidsetsize == 0)
11335 if (!is_error(ret)) {
11336 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
11337 if (!target_grouplist) {
11338 ret = -TARGET_EFAULT;
11341 for(i = 0;i < ret; i++)
11342 target_grouplist[i] = tswap32(grouplist[i]);
11343 unlock_user(target_grouplist, arg2, gidsetsize * 4);
11348 #ifdef TARGET_NR_setgroups32
11349 case TARGET_NR_setgroups32:
11351 int gidsetsize = arg1;
11352 uint32_t *target_grouplist;
11356 grouplist = alloca(gidsetsize * sizeof(gid_t));
11357 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
11358 if (!target_grouplist) {
11359 ret = -TARGET_EFAULT;
11362 for(i = 0;i < gidsetsize; i++)
11363 grouplist[i] = tswap32(target_grouplist[i]);
11364 unlock_user(target_grouplist, arg2, 0);
11365 ret = get_errno(setgroups(gidsetsize, grouplist));
11369 #ifdef TARGET_NR_fchown32
11370 case TARGET_NR_fchown32:
11371 ret = get_errno(fchown(arg1, arg2, arg3));
11374 #ifdef TARGET_NR_setresuid32
11375 case TARGET_NR_setresuid32:
11376 ret = get_errno(sys_setresuid(arg1, arg2, arg3));
11379 #ifdef TARGET_NR_getresuid32
11380 case TARGET_NR_getresuid32:
11382 uid_t ruid, euid, suid;
11383 ret = get_errno(getresuid(&ruid, &euid, &suid));
11384 if (!is_error(ret)) {
11385 if (put_user_u32(ruid, arg1)
11386 || put_user_u32(euid, arg2)
11387 || put_user_u32(suid, arg3))
11393 #ifdef TARGET_NR_setresgid32
11394 case TARGET_NR_setresgid32:
11395 ret = get_errno(sys_setresgid(arg1, arg2, arg3));
11398 #ifdef TARGET_NR_getresgid32
11399 case TARGET_NR_getresgid32:
11401 gid_t rgid, egid, sgid;
11402 ret = get_errno(getresgid(&rgid, &egid, &sgid));
11403 if (!is_error(ret)) {
11404 if (put_user_u32(rgid, arg1)
11405 || put_user_u32(egid, arg2)
11406 || put_user_u32(sgid, arg3))
11412 #ifdef TARGET_NR_chown32
11413 case TARGET_NR_chown32:
11414 if (!(p = lock_user_string(arg1)))
11416 ret = get_errno(chown(p, arg2, arg3));
11417 unlock_user(p, arg1, 0);
11420 #ifdef TARGET_NR_setuid32
11421 case TARGET_NR_setuid32:
11422 ret = get_errno(sys_setuid(arg1));
11425 #ifdef TARGET_NR_setgid32
11426 case TARGET_NR_setgid32:
11427 ret = get_errno(sys_setgid(arg1));
11430 #ifdef TARGET_NR_setfsuid32
11431 case TARGET_NR_setfsuid32:
11432 ret = get_errno(setfsuid(arg1));
11435 #ifdef TARGET_NR_setfsgid32
11436 case TARGET_NR_setfsgid32:
11437 ret = get_errno(setfsgid(arg1));
11441 case TARGET_NR_pivot_root:
11442 goto unimplemented;
11443 #ifdef TARGET_NR_mincore
11444 case TARGET_NR_mincore:
11447 ret = -TARGET_ENOMEM;
11448 a = lock_user(VERIFY_READ, arg1, arg2, 0);
11452 ret = -TARGET_EFAULT;
11453 p = lock_user_string(arg3);
11457 ret = get_errno(mincore(a, arg2, p));
11458 unlock_user(p, arg3, ret);
11460 unlock_user(a, arg1, 0);
11464 #ifdef TARGET_NR_arm_fadvise64_64
11465 case TARGET_NR_arm_fadvise64_64:
11466 /* arm_fadvise64_64 looks like fadvise64_64 but
11467 * with different argument order: fd, advice, offset, len
11468 * rather than the usual fd, offset, len, advice.
11469 * Note that offset and len are both 64-bit so appear as
11470 * pairs of 32-bit registers.
11472 ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
11473 target_offset64(arg5, arg6), arg2);
11474 ret = -host_to_target_errno(ret);
11478 #if TARGET_ABI_BITS == 32
11480 #ifdef TARGET_NR_fadvise64_64
11481 case TARGET_NR_fadvise64_64:
11482 #if defined(TARGET_PPC)
11483 /* 6 args: fd, advice, offset (high, low), len (high, low) */
11491 /* 6 args: fd, offset (high, low), len (high, low), advice */
11492 if (regpairs_aligned(cpu_env, num)) {
11493 /* offset is in (3,4), len in (5,6) and advice in 7 */
11501 ret = -host_to_target_errno(posix_fadvise(arg1,
11502 target_offset64(arg2, arg3),
11503 target_offset64(arg4, arg5),
11508 #ifdef TARGET_NR_fadvise64
11509 case TARGET_NR_fadvise64:
11510 /* 5 args: fd, offset (high, low), len, advice */
11511 if (regpairs_aligned(cpu_env, num)) {
11512 /* offset is in (3,4), len in 5 and advice in 6 */
11518 ret = -host_to_target_errno(posix_fadvise(arg1,
11519 target_offset64(arg2, arg3),
11524 #else /* not a 32-bit ABI */
11525 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11526 #ifdef TARGET_NR_fadvise64_64
11527 case TARGET_NR_fadvise64_64:
11529 #ifdef TARGET_NR_fadvise64
11530 case TARGET_NR_fadvise64:
11532 #ifdef TARGET_S390X
11534 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
11535 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
11536 case 6: arg4 = POSIX_FADV_DONTNEED; break;
11537 case 7: arg4 = POSIX_FADV_NOREUSE; break;
11541 ret = -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
11544 #endif /* end of 64-bit ABI fadvise handling */
11546 #ifdef TARGET_NR_madvise
11547 case TARGET_NR_madvise:
11548 /* A straight passthrough may not be safe because qemu sometimes
11549 turns private file-backed mappings into anonymous mappings.
11550 This will break MADV_DONTNEED.
11551 This is a hint, so ignoring and returning success is ok. */
11552 ret = get_errno(0);
11555 #if TARGET_ABI_BITS == 32
11556 case TARGET_NR_fcntl64:
11560 from_flock64_fn *copyfrom = copy_from_user_flock64;
11561 to_flock64_fn *copyto = copy_to_user_flock64;
11564 if (((CPUARMState *)cpu_env)->eabi) {
11565 copyfrom = copy_from_user_eabi_flock64;
11566 copyto = copy_to_user_eabi_flock64;
11570 cmd = target_to_host_fcntl_cmd(arg2);
11571 if (cmd == -TARGET_EINVAL) {
11577 case TARGET_F_GETLK64:
11578 ret = copyfrom(&fl, arg3);
11582 ret = get_errno(fcntl(arg1, cmd, &fl));
11584 ret = copyto(arg3, &fl);
11588 case TARGET_F_SETLK64:
11589 case TARGET_F_SETLKW64:
11590 ret = copyfrom(&fl, arg3);
11594 ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11597 ret = do_fcntl(arg1, arg2, arg3);
11603 #ifdef TARGET_NR_cacheflush
11604 case TARGET_NR_cacheflush:
11605 /* self-modifying code is handled automatically, so nothing needed */
11609 #ifdef TARGET_NR_security
11610 case TARGET_NR_security:
11611 goto unimplemented;
11613 #ifdef TARGET_NR_getpagesize
11614 case TARGET_NR_getpagesize:
11615 ret = TARGET_PAGE_SIZE;
11618 case TARGET_NR_gettid:
11619 ret = get_errno(gettid());
11621 #ifdef TARGET_NR_readahead
11622 case TARGET_NR_readahead:
11623 #if TARGET_ABI_BITS == 32
11624 if (regpairs_aligned(cpu_env, num)) {
11629 ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
11631 ret = get_errno(readahead(arg1, arg2, arg3));
11636 #ifdef TARGET_NR_setxattr
11637 case TARGET_NR_listxattr:
11638 case TARGET_NR_llistxattr:
11642 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11644 ret = -TARGET_EFAULT;
11648 p = lock_user_string(arg1);
11650 if (num == TARGET_NR_listxattr) {
11651 ret = get_errno(listxattr(p, b, arg3));
11653 ret = get_errno(llistxattr(p, b, arg3));
11656 ret = -TARGET_EFAULT;
11658 unlock_user(p, arg1, 0);
11659 unlock_user(b, arg2, arg3);
11662 case TARGET_NR_flistxattr:
11666 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11668 ret = -TARGET_EFAULT;
11672 ret = get_errno(flistxattr(arg1, b, arg3));
11673 unlock_user(b, arg2, arg3);
11676 case TARGET_NR_setxattr:
11677 case TARGET_NR_lsetxattr:
11679 void *p, *n, *v = 0;
11681 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11683 ret = -TARGET_EFAULT;
11687 p = lock_user_string(arg1);
11688 n = lock_user_string(arg2);
11690 if (num == TARGET_NR_setxattr) {
11691 ret = get_errno(setxattr(p, n, v, arg4, arg5));
11693 ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
11696 ret = -TARGET_EFAULT;
11698 unlock_user(p, arg1, 0);
11699 unlock_user(n, arg2, 0);
11700 unlock_user(v, arg3, 0);
11703 case TARGET_NR_fsetxattr:
11707 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11709 ret = -TARGET_EFAULT;
11713 n = lock_user_string(arg2);
11715 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
11717 ret = -TARGET_EFAULT;
11719 unlock_user(n, arg2, 0);
11720 unlock_user(v, arg3, 0);
11723 case TARGET_NR_getxattr:
11724 case TARGET_NR_lgetxattr:
11726 void *p, *n, *v = 0;
11728 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11730 ret = -TARGET_EFAULT;
11734 p = lock_user_string(arg1);
11735 n = lock_user_string(arg2);
11737 if (num == TARGET_NR_getxattr) {
11738 ret = get_errno(getxattr(p, n, v, arg4));
11740 ret = get_errno(lgetxattr(p, n, v, arg4));
11743 ret = -TARGET_EFAULT;
11745 unlock_user(p, arg1, 0);
11746 unlock_user(n, arg2, 0);
11747 unlock_user(v, arg3, arg4);
11750 case TARGET_NR_fgetxattr:
11754 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11756 ret = -TARGET_EFAULT;
11760 n = lock_user_string(arg2);
11762 ret = get_errno(fgetxattr(arg1, n, v, arg4));
11764 ret = -TARGET_EFAULT;
11766 unlock_user(n, arg2, 0);
11767 unlock_user(v, arg3, arg4);
11770 case TARGET_NR_removexattr:
11771 case TARGET_NR_lremovexattr:
11774 p = lock_user_string(arg1);
11775 n = lock_user_string(arg2);
11777 if (num == TARGET_NR_removexattr) {
11778 ret = get_errno(removexattr(p, n));
11780 ret = get_errno(lremovexattr(p, n));
11783 ret = -TARGET_EFAULT;
11785 unlock_user(p, arg1, 0);
11786 unlock_user(n, arg2, 0);
11789 case TARGET_NR_fremovexattr:
11792 n = lock_user_string(arg2);
11794 ret = get_errno(fremovexattr(arg1, n));
11796 ret = -TARGET_EFAULT;
11798 unlock_user(n, arg2, 0);
11802 #endif /* CONFIG_ATTR */
11803 #ifdef TARGET_NR_set_thread_area
11804 case TARGET_NR_set_thread_area:
11805 #if defined(TARGET_MIPS)
11806 ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
11809 #elif defined(TARGET_CRIS)
11811 ret = -TARGET_EINVAL;
11813 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
11817 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
11818 ret = do_set_thread_area(cpu_env, arg1);
11820 #elif defined(TARGET_M68K)
11822 TaskState *ts = cpu->opaque;
11823 ts->tp_value = arg1;
11828 goto unimplemented_nowarn;
11831 #ifdef TARGET_NR_get_thread_area
11832 case TARGET_NR_get_thread_area:
11833 #if defined(TARGET_I386) && defined(TARGET_ABI32)
11834 ret = do_get_thread_area(cpu_env, arg1);
11836 #elif defined(TARGET_M68K)
11838 TaskState *ts = cpu->opaque;
11839 ret = ts->tp_value;
11843 goto unimplemented_nowarn;
11846 #ifdef TARGET_NR_getdomainname
11847 case TARGET_NR_getdomainname:
11848 goto unimplemented_nowarn;
11851 #ifdef TARGET_NR_clock_gettime
11852 case TARGET_NR_clock_gettime:
11854 struct timespec ts;
11855 ret = get_errno(clock_gettime(arg1, &ts));
11856 if (!is_error(ret)) {
11857 host_to_target_timespec(arg2, &ts);
11862 #ifdef TARGET_NR_clock_getres
11863 case TARGET_NR_clock_getres:
11865 struct timespec ts;
11866 ret = get_errno(clock_getres(arg1, &ts));
11867 if (!is_error(ret)) {
11868 host_to_target_timespec(arg2, &ts);
11873 #ifdef TARGET_NR_clock_nanosleep
11874 case TARGET_NR_clock_nanosleep:
11876 struct timespec ts;
11877 target_to_host_timespec(&ts, arg3);
11878 ret = get_errno(safe_clock_nanosleep(arg1, arg2,
11879 &ts, arg4 ? &ts : NULL));
11881 host_to_target_timespec(arg4, &ts);
11883 #if defined(TARGET_PPC)
11884 /* clock_nanosleep is odd in that it returns positive errno values.
11885 * On PPC, CR0 bit 3 should be set in such a situation. */
11886 if (ret && ret != -TARGET_ERESTARTSYS) {
11887 ((CPUPPCState *)cpu_env)->crf[0] |= 1;
11894 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
11895 case TARGET_NR_set_tid_address:
11896 ret = get_errno(set_tid_address((int *)g2h(arg1)));
11900 case TARGET_NR_tkill:
11901 ret = get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
11904 case TARGET_NR_tgkill:
11905 ret = get_errno(safe_tgkill((int)arg1, (int)arg2,
11906 target_to_host_signal(arg3)));
11909 #ifdef TARGET_NR_set_robust_list
11910 case TARGET_NR_set_robust_list:
11911 case TARGET_NR_get_robust_list:
11912 /* The ABI for supporting robust futexes has userspace pass
11913 * the kernel a pointer to a linked list which is updated by
11914 * userspace after the syscall; the list is walked by the kernel
11915 * when the thread exits. Since the linked list in QEMU guest
11916 * memory isn't a valid linked list for the host and we have
11917 * no way to reliably intercept the thread-death event, we can't
11918 * support these. Silently return ENOSYS so that guest userspace
11919 * falls back to a non-robust futex implementation (which should
11920 * be OK except in the corner case of the guest crashing while
11921 * holding a mutex that is shared with another process via
11924 goto unimplemented_nowarn;
11927 #if defined(TARGET_NR_utimensat)
11928 case TARGET_NR_utimensat:
11930 struct timespec *tsp, ts[2];
11934 target_to_host_timespec(ts, arg3);
11935 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
11939 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
11941 if (!(p = lock_user_string(arg2))) {
11942 ret = -TARGET_EFAULT;
11945 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
11946 unlock_user(p, arg2, 0);
11951 case TARGET_NR_futex:
11952 ret = do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
11954 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
11955 case TARGET_NR_inotify_init:
11956 ret = get_errno(sys_inotify_init());
11958 fd_trans_register(ret, &target_inotify_trans);
11962 #ifdef CONFIG_INOTIFY1
11963 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
11964 case TARGET_NR_inotify_init1:
11965 ret = get_errno(sys_inotify_init1(target_to_host_bitmask(arg1,
11966 fcntl_flags_tbl)));
11968 fd_trans_register(ret, &target_inotify_trans);
11973 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
11974 case TARGET_NR_inotify_add_watch:
11975 p = lock_user_string(arg2);
11976 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
11977 unlock_user(p, arg2, 0);
11980 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
11981 case TARGET_NR_inotify_rm_watch:
11982 ret = get_errno(sys_inotify_rm_watch(arg1, arg2));
11986 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
11987 case TARGET_NR_mq_open:
11989 struct mq_attr posix_mq_attr;
11990 struct mq_attr *pposix_mq_attr;
11993 host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
11994 pposix_mq_attr = NULL;
11996 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
11999 pposix_mq_attr = &posix_mq_attr;
12001 p = lock_user_string(arg1 - 1);
12005 ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
12006 unlock_user (p, arg1, 0);
12010 case TARGET_NR_mq_unlink:
12011 p = lock_user_string(arg1 - 1);
12013 ret = -TARGET_EFAULT;
12016 ret = get_errno(mq_unlink(p));
12017 unlock_user (p, arg1, 0);
12020 case TARGET_NR_mq_timedsend:
12022 struct timespec ts;
12024 p = lock_user (VERIFY_READ, arg2, arg3, 1);
12026 target_to_host_timespec(&ts, arg5);
12027 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12028 host_to_target_timespec(arg5, &ts);
12030 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12032 unlock_user (p, arg2, arg3);
12036 case TARGET_NR_mq_timedreceive:
12038 struct timespec ts;
12041 p = lock_user (VERIFY_READ, arg2, arg3, 1);
12043 target_to_host_timespec(&ts, arg5);
12044 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12046 host_to_target_timespec(arg5, &ts);
12048 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12051 unlock_user (p, arg2, arg3);
12053 put_user_u32(prio, arg4);
12057 /* Not implemented for now... */
12058 /* case TARGET_NR_mq_notify: */
12061 case TARGET_NR_mq_getsetattr:
12063 struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
12066 ret = mq_getattr(arg1, &posix_mq_attr_out);
12067 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
12070 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
12071 ret |= mq_setattr(arg1, &posix_mq_attr_in, &posix_mq_attr_out);
12078 #ifdef CONFIG_SPLICE
12079 #ifdef TARGET_NR_tee
12080 case TARGET_NR_tee:
12082 ret = get_errno(tee(arg1,arg2,arg3,arg4));
12086 #ifdef TARGET_NR_splice
12087 case TARGET_NR_splice:
12089 loff_t loff_in, loff_out;
12090 loff_t *ploff_in = NULL, *ploff_out = NULL;
12092 if (get_user_u64(loff_in, arg2)) {
12095 ploff_in = &loff_in;
12098 if (get_user_u64(loff_out, arg4)) {
12101 ploff_out = &loff_out;
12103 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
12105 if (put_user_u64(loff_in, arg2)) {
12110 if (put_user_u64(loff_out, arg4)) {
12117 #ifdef TARGET_NR_vmsplice
12118 case TARGET_NR_vmsplice:
12120 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
12122 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
12123 unlock_iovec(vec, arg2, arg3, 0);
12125 ret = -host_to_target_errno(errno);
12130 #endif /* CONFIG_SPLICE */
12131 #ifdef CONFIG_EVENTFD
12132 #if defined(TARGET_NR_eventfd)
12133 case TARGET_NR_eventfd:
12134 ret = get_errno(eventfd(arg1, 0));
12136 fd_trans_register(ret, &target_eventfd_trans);
12140 #if defined(TARGET_NR_eventfd2)
12141 case TARGET_NR_eventfd2:
12143 int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
12144 if (arg2 & TARGET_O_NONBLOCK) {
12145 host_flags |= O_NONBLOCK;
12147 if (arg2 & TARGET_O_CLOEXEC) {
12148 host_flags |= O_CLOEXEC;
12150 ret = get_errno(eventfd(arg1, host_flags));
12152 fd_trans_register(ret, &target_eventfd_trans);
12157 #endif /* CONFIG_EVENTFD */
12158 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
12159 case TARGET_NR_fallocate:
12160 #if TARGET_ABI_BITS == 32
12161 ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
12162 target_offset64(arg5, arg6)));
12164 ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
12168 #if defined(CONFIG_SYNC_FILE_RANGE)
12169 #if defined(TARGET_NR_sync_file_range)
12170 case TARGET_NR_sync_file_range:
12171 #if TARGET_ABI_BITS == 32
12172 #if defined(TARGET_MIPS)
12173 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12174 target_offset64(arg5, arg6), arg7));
12176 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
12177 target_offset64(arg4, arg5), arg6));
12178 #endif /* !TARGET_MIPS */
12180 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
12184 #if defined(TARGET_NR_sync_file_range2)
12185 case TARGET_NR_sync_file_range2:
12186 /* This is like sync_file_range but the arguments are reordered */
12187 #if TARGET_ABI_BITS == 32
12188 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12189 target_offset64(arg5, arg6), arg2));
12191 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
12196 #if defined(TARGET_NR_signalfd4)
12197 case TARGET_NR_signalfd4:
12198 ret = do_signalfd4(arg1, arg2, arg4);
12201 #if defined(TARGET_NR_signalfd)
12202 case TARGET_NR_signalfd:
12203 ret = do_signalfd4(arg1, arg2, 0);
12206 #if defined(CONFIG_EPOLL)
12207 #if defined(TARGET_NR_epoll_create)
12208 case TARGET_NR_epoll_create:
12209 ret = get_errno(epoll_create(arg1));
12212 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
12213 case TARGET_NR_epoll_create1:
12214 ret = get_errno(epoll_create1(arg1));
12217 #if defined(TARGET_NR_epoll_ctl)
12218 case TARGET_NR_epoll_ctl:
12220 struct epoll_event ep;
12221 struct epoll_event *epp = 0;
12223 struct target_epoll_event *target_ep;
12224 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
12227 ep.events = tswap32(target_ep->events);
12228 /* The epoll_data_t union is just opaque data to the kernel,
12229 * so we transfer all 64 bits across and need not worry what
12230 * actual data type it is.
12232 ep.data.u64 = tswap64(target_ep->data.u64);
12233 unlock_user_struct(target_ep, arg4, 0);
12236 ret = get_errno(epoll_ctl(arg1, arg2, arg3, epp));
12241 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
12242 #if defined(TARGET_NR_epoll_wait)
12243 case TARGET_NR_epoll_wait:
12245 #if defined(TARGET_NR_epoll_pwait)
12246 case TARGET_NR_epoll_pwait:
12249 struct target_epoll_event *target_ep;
12250 struct epoll_event *ep;
12252 int maxevents = arg3;
12253 int timeout = arg4;
12255 if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
12256 ret = -TARGET_EINVAL;
12260 target_ep = lock_user(VERIFY_WRITE, arg2,
12261 maxevents * sizeof(struct target_epoll_event), 1);
12266 ep = g_try_new(struct epoll_event, maxevents);
12268 unlock_user(target_ep, arg2, 0);
12269 ret = -TARGET_ENOMEM;
12274 #if defined(TARGET_NR_epoll_pwait)
12275 case TARGET_NR_epoll_pwait:
12277 target_sigset_t *target_set;
12278 sigset_t _set, *set = &_set;
12281 if (arg6 != sizeof(target_sigset_t)) {
12282 ret = -TARGET_EINVAL;
12286 target_set = lock_user(VERIFY_READ, arg5,
12287 sizeof(target_sigset_t), 1);
12289 ret = -TARGET_EFAULT;
12292 target_to_host_sigset(set, target_set);
12293 unlock_user(target_set, arg5, 0);
12298 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12299 set, SIGSET_T_SIZE));
12303 #if defined(TARGET_NR_epoll_wait)
12304 case TARGET_NR_epoll_wait:
12305 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12310 ret = -TARGET_ENOSYS;
12312 if (!is_error(ret)) {
12314 for (i = 0; i < ret; i++) {
12315 target_ep[i].events = tswap32(ep[i].events);
12316 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
12318 unlock_user(target_ep, arg2,
12319 ret * sizeof(struct target_epoll_event));
12321 unlock_user(target_ep, arg2, 0);
12328 #ifdef TARGET_NR_prlimit64
12329 case TARGET_NR_prlimit64:
12331 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
12332 struct target_rlimit64 *target_rnew, *target_rold;
12333 struct host_rlimit64 rnew, rold, *rnewp = 0;
12334 int resource = target_to_host_resource(arg2);
12336 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
12339 rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
12340 rnew.rlim_max = tswap64(target_rnew->rlim_max);
12341 unlock_user_struct(target_rnew, arg3, 0);
12345 ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
12346 if (!is_error(ret) && arg4) {
12347 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
12350 target_rold->rlim_cur = tswap64(rold.rlim_cur);
12351 target_rold->rlim_max = tswap64(rold.rlim_max);
12352 unlock_user_struct(target_rold, arg4, 1);
12357 #ifdef TARGET_NR_gethostname
12358 case TARGET_NR_gethostname:
12360 char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
12362 ret = get_errno(gethostname(name, arg2));
12363 unlock_user(name, arg1, arg2);
12365 ret = -TARGET_EFAULT;
12370 #ifdef TARGET_NR_atomic_cmpxchg_32
12371 case TARGET_NR_atomic_cmpxchg_32:
12373 /* should use start_exclusive from main.c */
12374 abi_ulong mem_value;
12375 if (get_user_u32(mem_value, arg6)) {
12376 target_siginfo_t info;
12377 info.si_signo = SIGSEGV;
12379 info.si_code = TARGET_SEGV_MAPERR;
12380 info._sifields._sigfault._addr = arg6;
12381 queue_signal((CPUArchState *)cpu_env, info.si_signo,
12382 QEMU_SI_FAULT, &info);
12386 if (mem_value == arg2)
12387 put_user_u32(arg1, arg6);
12392 #ifdef TARGET_NR_atomic_barrier
12393 case TARGET_NR_atomic_barrier:
12395 /* Like the kernel implementation and the qemu arm barrier, no-op this? */
12401 #ifdef TARGET_NR_timer_create
12402 case TARGET_NR_timer_create:
12404 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
12406 struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
12409 int timer_index = next_free_host_timer();
12411 if (timer_index < 0) {
12412 ret = -TARGET_EAGAIN;
12414 timer_t *phtimer = g_posix_timers + timer_index;
12417 phost_sevp = &host_sevp;
12418 ret = target_to_host_sigevent(phost_sevp, arg2);
12424 ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
12428 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
12437 #ifdef TARGET_NR_timer_settime
12438 case TARGET_NR_timer_settime:
12440 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
12441 * struct itimerspec * old_value */
12442 target_timer_t timerid = get_timer_id(arg1);
12446 } else if (arg3 == 0) {
12447 ret = -TARGET_EINVAL;
12449 timer_t htimer = g_posix_timers[timerid];
12450 struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12452 if (target_to_host_itimerspec(&hspec_new, arg3)) {
12456 timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12457 if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
12465 #ifdef TARGET_NR_timer_gettime
12466 case TARGET_NR_timer_gettime:
12468 /* args: timer_t timerid, struct itimerspec *curr_value */
12469 target_timer_t timerid = get_timer_id(arg1);
12473 } else if (!arg2) {
12474 ret = -TARGET_EFAULT;
12476 timer_t htimer = g_posix_timers[timerid];
12477 struct itimerspec hspec;
12478 ret = get_errno(timer_gettime(htimer, &hspec));
12480 if (host_to_target_itimerspec(arg2, &hspec)) {
12481 ret = -TARGET_EFAULT;
12488 #ifdef TARGET_NR_timer_getoverrun
12489 case TARGET_NR_timer_getoverrun:
12491 /* args: timer_t timerid */
12492 target_timer_t timerid = get_timer_id(arg1);
12497 timer_t htimer = g_posix_timers[timerid];
12498 ret = get_errno(timer_getoverrun(htimer));
12500 fd_trans_unregister(ret);
12505 #ifdef TARGET_NR_timer_delete
12506 case TARGET_NR_timer_delete:
12508 /* args: timer_t timerid */
12509 target_timer_t timerid = get_timer_id(arg1);
12514 timer_t htimer = g_posix_timers[timerid];
12515 ret = get_errno(timer_delete(htimer));
12516 g_posix_timers[timerid] = 0;
12522 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
12523 case TARGET_NR_timerfd_create:
12524 ret = get_errno(timerfd_create(arg1,
12525 target_to_host_bitmask(arg2, fcntl_flags_tbl)));
12529 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
12530 case TARGET_NR_timerfd_gettime:
12532 struct itimerspec its_curr;
12534 ret = get_errno(timerfd_gettime(arg1, &its_curr));
12536 if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
12543 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
12544 case TARGET_NR_timerfd_settime:
12546 struct itimerspec its_new, its_old, *p_new;
12549 if (target_to_host_itimerspec(&its_new, arg3)) {
12557 ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
12559 if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
12566 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
12567 case TARGET_NR_ioprio_get:
12568 ret = get_errno(ioprio_get(arg1, arg2));
12572 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
12573 case TARGET_NR_ioprio_set:
12574 ret = get_errno(ioprio_set(arg1, arg2, arg3));
12578 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
12579 case TARGET_NR_setns:
12580 ret = get_errno(setns(arg1, arg2));
12583 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
12584 case TARGET_NR_unshare:
12585 ret = get_errno(unshare(arg1));
12588 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
12589 case TARGET_NR_kcmp:
12590 ret = get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
12596 gemu_log("qemu: Unsupported syscall: %d\n", num);
12597 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
12598 unimplemented_nowarn:
12600 ret = -TARGET_ENOSYS;
12605 gemu_log(" = " TARGET_ABI_FMT_ld "\n", ret);
12608 print_syscall_ret(num, ret);
12609 trace_guest_user_syscall_ret(cpu, num, ret);
12612 ret = -TARGET_EFAULT;