4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
29 #include <sys/mount.h>
31 #include <sys/fsuid.h>
32 #include <sys/personality.h>
33 #include <sys/prctl.h>
34 #include <sys/resource.h>
36 #include <linux/capability.h>
38 #include <sys/timex.h>
39 #include <sys/socket.h>
43 #include <sys/times.h>
46 #include <sys/statfs.h>
48 #include <sys/sysinfo.h>
49 #include <sys/signalfd.h>
50 //#include <sys/user.h>
51 #include <netinet/ip.h>
52 #include <netinet/tcp.h>
53 #include <linux/wireless.h>
54 #include <linux/icmp.h>
55 #include <linux/icmpv6.h>
56 #include <linux/errqueue.h>
57 #include <linux/random.h>
58 #include "qemu-common.h"
60 #include <sys/timerfd.h>
66 #include <sys/eventfd.h>
69 #include <sys/epoll.h>
72 #include "qemu/xattr.h"
74 #ifdef CONFIG_SENDFILE
75 #include <sys/sendfile.h>
78 #define termios host_termios
79 #define winsize host_winsize
80 #define termio host_termio
81 #define sgttyb host_sgttyb /* same as target */
82 #define tchars host_tchars /* same as target */
83 #define ltchars host_ltchars /* same as target */
85 #include <linux/termios.h>
86 #include <linux/unistd.h>
87 #include <linux/cdrom.h>
88 #include <linux/hdreg.h>
89 #include <linux/soundcard.h>
91 #include <linux/mtio.h>
93 #if defined(CONFIG_FIEMAP)
94 #include <linux/fiemap.h>
98 #include <linux/dm-ioctl.h>
99 #include <linux/reboot.h>
100 #include <linux/route.h>
101 #include <linux/filter.h>
102 #include <linux/blkpg.h>
103 #include <netpacket/packet.h>
104 #include <linux/netlink.h>
105 #ifdef CONFIG_RTNETLINK
106 #include <linux/rtnetlink.h>
107 #include <linux/if_bridge.h>
109 #include <linux/audit.h>
110 #include "linux_loop.h"
116 #define CLONE_IO 0x80000000 /* Clone io context */
119 /* We can't directly call the host clone syscall, because this will
120 * badly confuse libc (breaking mutexes, for example). So we must
121 * divide clone flags into:
122 * * flag combinations that look like pthread_create()
123 * * flag combinations that look like fork()
124 * * flags we can implement within QEMU itself
125 * * flags we can't support and will return an error for
127 /* For thread creation, all these flags must be present; for
128 * fork, none must be present.
130 #define CLONE_THREAD_FLAGS \
131 (CLONE_VM | CLONE_FS | CLONE_FILES | \
132 CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
134 /* These flags are ignored:
135 * CLONE_DETACHED is now ignored by the kernel;
136 * CLONE_IO is just an optimisation hint to the I/O scheduler
138 #define CLONE_IGNORED_FLAGS \
139 (CLONE_DETACHED | CLONE_IO)
141 /* Flags for fork which we can implement within QEMU itself */
142 #define CLONE_OPTIONAL_FORK_FLAGS \
143 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
144 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
146 /* Flags for thread creation which we can implement within QEMU itself */
147 #define CLONE_OPTIONAL_THREAD_FLAGS \
148 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
149 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
151 #define CLONE_INVALID_FORK_FLAGS \
152 (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
154 #define CLONE_INVALID_THREAD_FLAGS \
155 (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS | \
156 CLONE_IGNORED_FLAGS))
158 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
159 * have almost all been allocated. We cannot support any of
160 * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
161 * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
162 * The checks against the invalid thread masks above will catch these.
163 * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
167 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
168 * once. This exercises the codepaths for restart.
170 //#define DEBUG_ERESTARTSYS
172 //#include <linux/msdos_fs.h>
173 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
174 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
184 #define _syscall0(type,name) \
185 static type name (void) \
187 return syscall(__NR_##name); \
190 #define _syscall1(type,name,type1,arg1) \
191 static type name (type1 arg1) \
193 return syscall(__NR_##name, arg1); \
196 #define _syscall2(type,name,type1,arg1,type2,arg2) \
197 static type name (type1 arg1,type2 arg2) \
199 return syscall(__NR_##name, arg1, arg2); \
202 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
203 static type name (type1 arg1,type2 arg2,type3 arg3) \
205 return syscall(__NR_##name, arg1, arg2, arg3); \
208 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
209 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
211 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
214 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
216 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
218 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
222 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
223 type5,arg5,type6,arg6) \
224 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
227 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
231 #define __NR_sys_uname __NR_uname
232 #define __NR_sys_getcwd1 __NR_getcwd
233 #define __NR_sys_getdents __NR_getdents
234 #define __NR_sys_getdents64 __NR_getdents64
235 #define __NR_sys_getpriority __NR_getpriority
236 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
237 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
238 #define __NR_sys_syslog __NR_syslog
239 #define __NR_sys_futex __NR_futex
240 #define __NR_sys_inotify_init __NR_inotify_init
241 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
242 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
244 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
245 #define __NR__llseek __NR_lseek
248 /* Newer kernel ports have llseek() instead of _llseek() */
249 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
250 #define TARGET_NR__llseek TARGET_NR_llseek
254 _syscall0(int, gettid)
256 /* This is a replacement for the host gettid() and must return a host
258 static int gettid(void) {
262 #if defined(TARGET_NR_getdents) && defined(__NR_getdents)
263 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
265 #if !defined(__NR_getdents) || \
266 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
267 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
269 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
270 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
271 loff_t *, res, uint, wh);
273 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
274 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
276 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
277 #ifdef __NR_exit_group
278 _syscall1(int,exit_group,int,error_code)
280 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
281 _syscall1(int,set_tid_address,int *,tidptr)
283 #if defined(TARGET_NR_futex) && defined(__NR_futex)
284 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
285 const struct timespec *,timeout,int *,uaddr2,int,val3)
287 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
288 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
289 unsigned long *, user_mask_ptr);
290 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
291 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
292 unsigned long *, user_mask_ptr);
293 #define __NR_sys_getcpu __NR_getcpu
294 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
295 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
297 _syscall2(int, capget, struct __user_cap_header_struct *, header,
298 struct __user_cap_data_struct *, data);
299 _syscall2(int, capset, struct __user_cap_header_struct *, header,
300 struct __user_cap_data_struct *, data);
301 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
302 _syscall2(int, ioprio_get, int, which, int, who)
304 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
305 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
307 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
308 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
311 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
312 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
313 unsigned long, idx1, unsigned long, idx2)
316 static bitmask_transtbl fcntl_flags_tbl[] = {
317 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
318 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, },
319 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, },
320 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, },
321 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, },
322 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, },
323 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, },
324 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, },
325 { TARGET_O_SYNC, TARGET_O_DSYNC, O_SYNC, O_DSYNC, },
326 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, },
327 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, },
328 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
329 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, },
330 #if defined(O_DIRECT)
331 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, },
333 #if defined(O_NOATIME)
334 { TARGET_O_NOATIME, TARGET_O_NOATIME, O_NOATIME, O_NOATIME },
336 #if defined(O_CLOEXEC)
337 { TARGET_O_CLOEXEC, TARGET_O_CLOEXEC, O_CLOEXEC, O_CLOEXEC },
340 { TARGET_O_PATH, TARGET_O_PATH, O_PATH, O_PATH },
342 #if defined(O_TMPFILE)
343 { TARGET_O_TMPFILE, TARGET_O_TMPFILE, O_TMPFILE, O_TMPFILE },
345 /* Don't terminate the list prematurely on 64-bit host+guest. */
346 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
347 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
354 QEMU_IFLA_BR_FORWARD_DELAY,
355 QEMU_IFLA_BR_HELLO_TIME,
356 QEMU_IFLA_BR_MAX_AGE,
357 QEMU_IFLA_BR_AGEING_TIME,
358 QEMU_IFLA_BR_STP_STATE,
359 QEMU_IFLA_BR_PRIORITY,
360 QEMU_IFLA_BR_VLAN_FILTERING,
361 QEMU_IFLA_BR_VLAN_PROTOCOL,
362 QEMU_IFLA_BR_GROUP_FWD_MASK,
363 QEMU_IFLA_BR_ROOT_ID,
364 QEMU_IFLA_BR_BRIDGE_ID,
365 QEMU_IFLA_BR_ROOT_PORT,
366 QEMU_IFLA_BR_ROOT_PATH_COST,
367 QEMU_IFLA_BR_TOPOLOGY_CHANGE,
368 QEMU_IFLA_BR_TOPOLOGY_CHANGE_DETECTED,
369 QEMU_IFLA_BR_HELLO_TIMER,
370 QEMU_IFLA_BR_TCN_TIMER,
371 QEMU_IFLA_BR_TOPOLOGY_CHANGE_TIMER,
372 QEMU_IFLA_BR_GC_TIMER,
373 QEMU_IFLA_BR_GROUP_ADDR,
374 QEMU_IFLA_BR_FDB_FLUSH,
375 QEMU_IFLA_BR_MCAST_ROUTER,
376 QEMU_IFLA_BR_MCAST_SNOOPING,
377 QEMU_IFLA_BR_MCAST_QUERY_USE_IFADDR,
378 QEMU_IFLA_BR_MCAST_QUERIER,
379 QEMU_IFLA_BR_MCAST_HASH_ELASTICITY,
380 QEMU_IFLA_BR_MCAST_HASH_MAX,
381 QEMU_IFLA_BR_MCAST_LAST_MEMBER_CNT,
382 QEMU_IFLA_BR_MCAST_STARTUP_QUERY_CNT,
383 QEMU_IFLA_BR_MCAST_LAST_MEMBER_INTVL,
384 QEMU_IFLA_BR_MCAST_MEMBERSHIP_INTVL,
385 QEMU_IFLA_BR_MCAST_QUERIER_INTVL,
386 QEMU_IFLA_BR_MCAST_QUERY_INTVL,
387 QEMU_IFLA_BR_MCAST_QUERY_RESPONSE_INTVL,
388 QEMU_IFLA_BR_MCAST_STARTUP_QUERY_INTVL,
389 QEMU_IFLA_BR_NF_CALL_IPTABLES,
390 QEMU_IFLA_BR_NF_CALL_IP6TABLES,
391 QEMU_IFLA_BR_NF_CALL_ARPTABLES,
392 QEMU_IFLA_BR_VLAN_DEFAULT_PVID,
394 QEMU_IFLA_BR_VLAN_STATS_ENABLED,
395 QEMU_IFLA_BR_MCAST_STATS_ENABLED,
419 QEMU_IFLA_NET_NS_PID,
422 QEMU_IFLA_VFINFO_LIST,
430 QEMU_IFLA_PROMISCUITY,
431 QEMU_IFLA_NUM_TX_QUEUES,
432 QEMU_IFLA_NUM_RX_QUEUES,
434 QEMU_IFLA_PHYS_PORT_ID,
435 QEMU_IFLA_CARRIER_CHANGES,
436 QEMU_IFLA_PHYS_SWITCH_ID,
437 QEMU_IFLA_LINK_NETNSID,
438 QEMU_IFLA_PHYS_PORT_NAME,
439 QEMU_IFLA_PROTO_DOWN,
440 QEMU_IFLA_GSO_MAX_SEGS,
441 QEMU_IFLA_GSO_MAX_SIZE,
448 QEMU_IFLA_BRPORT_UNSPEC,
449 QEMU_IFLA_BRPORT_STATE,
450 QEMU_IFLA_BRPORT_PRIORITY,
451 QEMU_IFLA_BRPORT_COST,
452 QEMU_IFLA_BRPORT_MODE,
453 QEMU_IFLA_BRPORT_GUARD,
454 QEMU_IFLA_BRPORT_PROTECT,
455 QEMU_IFLA_BRPORT_FAST_LEAVE,
456 QEMU_IFLA_BRPORT_LEARNING,
457 QEMU_IFLA_BRPORT_UNICAST_FLOOD,
458 QEMU_IFLA_BRPORT_PROXYARP,
459 QEMU_IFLA_BRPORT_LEARNING_SYNC,
460 QEMU_IFLA_BRPORT_PROXYARP_WIFI,
461 QEMU_IFLA_BRPORT_ROOT_ID,
462 QEMU_IFLA_BRPORT_BRIDGE_ID,
463 QEMU_IFLA_BRPORT_DESIGNATED_PORT,
464 QEMU_IFLA_BRPORT_DESIGNATED_COST,
467 QEMU_IFLA_BRPORT_TOPOLOGY_CHANGE_ACK,
468 QEMU_IFLA_BRPORT_CONFIG_PENDING,
469 QEMU_IFLA_BRPORT_MESSAGE_AGE_TIMER,
470 QEMU_IFLA_BRPORT_FORWARD_DELAY_TIMER,
471 QEMU_IFLA_BRPORT_HOLD_TIMER,
472 QEMU_IFLA_BRPORT_FLUSH,
473 QEMU_IFLA_BRPORT_MULTICAST_ROUTER,
474 QEMU_IFLA_BRPORT_PAD,
475 QEMU___IFLA_BRPORT_MAX
479 QEMU_IFLA_INFO_UNSPEC,
482 QEMU_IFLA_INFO_XSTATS,
483 QEMU_IFLA_INFO_SLAVE_KIND,
484 QEMU_IFLA_INFO_SLAVE_DATA,
485 QEMU___IFLA_INFO_MAX,
489 QEMU_IFLA_INET_UNSPEC,
491 QEMU___IFLA_INET_MAX,
495 QEMU_IFLA_INET6_UNSPEC,
496 QEMU_IFLA_INET6_FLAGS,
497 QEMU_IFLA_INET6_CONF,
498 QEMU_IFLA_INET6_STATS,
499 QEMU_IFLA_INET6_MCAST,
500 QEMU_IFLA_INET6_CACHEINFO,
501 QEMU_IFLA_INET6_ICMP6STATS,
502 QEMU_IFLA_INET6_TOKEN,
503 QEMU_IFLA_INET6_ADDR_GEN_MODE,
504 QEMU___IFLA_INET6_MAX
507 typedef abi_long (*TargetFdDataFunc)(void *, size_t);
508 typedef abi_long (*TargetFdAddrFunc)(void *, abi_ulong, socklen_t);
509 typedef struct TargetFdTrans {
510 TargetFdDataFunc host_to_target_data;
511 TargetFdDataFunc target_to_host_data;
512 TargetFdAddrFunc target_to_host_addr;
515 static TargetFdTrans **target_fd_trans;
517 static unsigned int target_fd_max;
519 static TargetFdDataFunc fd_trans_target_to_host_data(int fd)
521 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
522 return target_fd_trans[fd]->target_to_host_data;
527 static TargetFdDataFunc fd_trans_host_to_target_data(int fd)
529 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
530 return target_fd_trans[fd]->host_to_target_data;
535 static TargetFdAddrFunc fd_trans_target_to_host_addr(int fd)
537 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
538 return target_fd_trans[fd]->target_to_host_addr;
543 static void fd_trans_register(int fd, TargetFdTrans *trans)
547 if (fd >= target_fd_max) {
548 oldmax = target_fd_max;
549 target_fd_max = ((fd >> 6) + 1) << 6; /* by slice of 64 entries */
550 target_fd_trans = g_renew(TargetFdTrans *,
551 target_fd_trans, target_fd_max);
552 memset((void *)(target_fd_trans + oldmax), 0,
553 (target_fd_max - oldmax) * sizeof(TargetFdTrans *));
555 target_fd_trans[fd] = trans;
558 static void fd_trans_unregister(int fd)
560 if (fd >= 0 && fd < target_fd_max) {
561 target_fd_trans[fd] = NULL;
565 static void fd_trans_dup(int oldfd, int newfd)
567 fd_trans_unregister(newfd);
568 if (oldfd < target_fd_max && target_fd_trans[oldfd]) {
569 fd_trans_register(newfd, target_fd_trans[oldfd]);
573 static int sys_getcwd1(char *buf, size_t size)
575 if (getcwd(buf, size) == NULL) {
576 /* getcwd() sets errno */
579 return strlen(buf)+1;
582 #ifdef TARGET_NR_utimensat
583 #if defined(__NR_utimensat)
584 #define __NR_sys_utimensat __NR_utimensat
585 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
586 const struct timespec *,tsp,int,flags)
588 static int sys_utimensat(int dirfd, const char *pathname,
589 const struct timespec times[2], int flags)
595 #endif /* TARGET_NR_utimensat */
597 #ifdef TARGET_NR_renameat2
598 #if defined(__NR_renameat2)
599 #define __NR_sys_renameat2 __NR_renameat2
600 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
601 const char *, new, unsigned int, flags)
603 static int sys_renameat2(int oldfd, const char *old,
604 int newfd, const char *new, int flags)
607 return renameat(oldfd, old, newfd, new);
613 #endif /* TARGET_NR_renameat2 */
615 #ifdef CONFIG_INOTIFY
616 #include <sys/inotify.h>
618 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
619 static int sys_inotify_init(void)
621 return (inotify_init());
624 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
625 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
627 return (inotify_add_watch(fd, pathname, mask));
630 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
631 static int sys_inotify_rm_watch(int fd, int32_t wd)
633 return (inotify_rm_watch(fd, wd));
636 #ifdef CONFIG_INOTIFY1
637 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
638 static int sys_inotify_init1(int flags)
640 return (inotify_init1(flags));
645 /* Userspace can usually survive runtime without inotify */
646 #undef TARGET_NR_inotify_init
647 #undef TARGET_NR_inotify_init1
648 #undef TARGET_NR_inotify_add_watch
649 #undef TARGET_NR_inotify_rm_watch
650 #endif /* CONFIG_INOTIFY */
652 #if defined(TARGET_NR_prlimit64)
653 #ifndef __NR_prlimit64
654 # define __NR_prlimit64 -1
656 #define __NR_sys_prlimit64 __NR_prlimit64
657 /* The glibc rlimit structure may not be that used by the underlying syscall */
658 struct host_rlimit64 {
662 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
663 const struct host_rlimit64 *, new_limit,
664 struct host_rlimit64 *, old_limit)
668 #if defined(TARGET_NR_timer_create)
669 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
670 static timer_t g_posix_timers[32] = { 0, } ;
672 static inline int next_free_host_timer(void)
675 /* FIXME: Does finding the next free slot require a lock? */
676 for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
677 if (g_posix_timers[k] == 0) {
678 g_posix_timers[k] = (timer_t) 1;
686 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
688 static inline int regpairs_aligned(void *cpu_env, int num)
690 return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
692 #elif defined(TARGET_MIPS) && (TARGET_ABI_BITS == 32)
693 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
694 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
695 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
696 * of registers which translates to the same as ARM/MIPS, because we start with
698 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
699 #elif defined(TARGET_SH4)
700 /* SH4 doesn't align register pairs, except for p{read,write}64 */
701 static inline int regpairs_aligned(void *cpu_env, int num)
704 case TARGET_NR_pread64:
705 case TARGET_NR_pwrite64:
713 static inline int regpairs_aligned(void *cpu_env, int num) { return 0; }
716 #define ERRNO_TABLE_SIZE 1200
718 /* target_to_host_errno_table[] is initialized from
719 * host_to_target_errno_table[] in syscall_init(). */
720 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
724 * This list is the union of errno values overridden in asm-<arch>/errno.h
725 * minus the errnos that are not actually generic to all archs.
727 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
728 [EAGAIN] = TARGET_EAGAIN,
729 [EIDRM] = TARGET_EIDRM,
730 [ECHRNG] = TARGET_ECHRNG,
731 [EL2NSYNC] = TARGET_EL2NSYNC,
732 [EL3HLT] = TARGET_EL3HLT,
733 [EL3RST] = TARGET_EL3RST,
734 [ELNRNG] = TARGET_ELNRNG,
735 [EUNATCH] = TARGET_EUNATCH,
736 [ENOCSI] = TARGET_ENOCSI,
737 [EL2HLT] = TARGET_EL2HLT,
738 [EDEADLK] = TARGET_EDEADLK,
739 [ENOLCK] = TARGET_ENOLCK,
740 [EBADE] = TARGET_EBADE,
741 [EBADR] = TARGET_EBADR,
742 [EXFULL] = TARGET_EXFULL,
743 [ENOANO] = TARGET_ENOANO,
744 [EBADRQC] = TARGET_EBADRQC,
745 [EBADSLT] = TARGET_EBADSLT,
746 [EBFONT] = TARGET_EBFONT,
747 [ENOSTR] = TARGET_ENOSTR,
748 [ENODATA] = TARGET_ENODATA,
749 [ETIME] = TARGET_ETIME,
750 [ENOSR] = TARGET_ENOSR,
751 [ENONET] = TARGET_ENONET,
752 [ENOPKG] = TARGET_ENOPKG,
753 [EREMOTE] = TARGET_EREMOTE,
754 [ENOLINK] = TARGET_ENOLINK,
755 [EADV] = TARGET_EADV,
756 [ESRMNT] = TARGET_ESRMNT,
757 [ECOMM] = TARGET_ECOMM,
758 [EPROTO] = TARGET_EPROTO,
759 [EDOTDOT] = TARGET_EDOTDOT,
760 [EMULTIHOP] = TARGET_EMULTIHOP,
761 [EBADMSG] = TARGET_EBADMSG,
762 [ENAMETOOLONG] = TARGET_ENAMETOOLONG,
763 [EOVERFLOW] = TARGET_EOVERFLOW,
764 [ENOTUNIQ] = TARGET_ENOTUNIQ,
765 [EBADFD] = TARGET_EBADFD,
766 [EREMCHG] = TARGET_EREMCHG,
767 [ELIBACC] = TARGET_ELIBACC,
768 [ELIBBAD] = TARGET_ELIBBAD,
769 [ELIBSCN] = TARGET_ELIBSCN,
770 [ELIBMAX] = TARGET_ELIBMAX,
771 [ELIBEXEC] = TARGET_ELIBEXEC,
772 [EILSEQ] = TARGET_EILSEQ,
773 [ENOSYS] = TARGET_ENOSYS,
774 [ELOOP] = TARGET_ELOOP,
775 [ERESTART] = TARGET_ERESTART,
776 [ESTRPIPE] = TARGET_ESTRPIPE,
777 [ENOTEMPTY] = TARGET_ENOTEMPTY,
778 [EUSERS] = TARGET_EUSERS,
779 [ENOTSOCK] = TARGET_ENOTSOCK,
780 [EDESTADDRREQ] = TARGET_EDESTADDRREQ,
781 [EMSGSIZE] = TARGET_EMSGSIZE,
782 [EPROTOTYPE] = TARGET_EPROTOTYPE,
783 [ENOPROTOOPT] = TARGET_ENOPROTOOPT,
784 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT,
785 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT,
786 [EOPNOTSUPP] = TARGET_EOPNOTSUPP,
787 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT,
788 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT,
789 [EADDRINUSE] = TARGET_EADDRINUSE,
790 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL,
791 [ENETDOWN] = TARGET_ENETDOWN,
792 [ENETUNREACH] = TARGET_ENETUNREACH,
793 [ENETRESET] = TARGET_ENETRESET,
794 [ECONNABORTED] = TARGET_ECONNABORTED,
795 [ECONNRESET] = TARGET_ECONNRESET,
796 [ENOBUFS] = TARGET_ENOBUFS,
797 [EISCONN] = TARGET_EISCONN,
798 [ENOTCONN] = TARGET_ENOTCONN,
799 [EUCLEAN] = TARGET_EUCLEAN,
800 [ENOTNAM] = TARGET_ENOTNAM,
801 [ENAVAIL] = TARGET_ENAVAIL,
802 [EISNAM] = TARGET_EISNAM,
803 [EREMOTEIO] = TARGET_EREMOTEIO,
804 [EDQUOT] = TARGET_EDQUOT,
805 [ESHUTDOWN] = TARGET_ESHUTDOWN,
806 [ETOOMANYREFS] = TARGET_ETOOMANYREFS,
807 [ETIMEDOUT] = TARGET_ETIMEDOUT,
808 [ECONNREFUSED] = TARGET_ECONNREFUSED,
809 [EHOSTDOWN] = TARGET_EHOSTDOWN,
810 [EHOSTUNREACH] = TARGET_EHOSTUNREACH,
811 [EALREADY] = TARGET_EALREADY,
812 [EINPROGRESS] = TARGET_EINPROGRESS,
813 [ESTALE] = TARGET_ESTALE,
814 [ECANCELED] = TARGET_ECANCELED,
815 [ENOMEDIUM] = TARGET_ENOMEDIUM,
816 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE,
818 [ENOKEY] = TARGET_ENOKEY,
821 [EKEYEXPIRED] = TARGET_EKEYEXPIRED,
824 [EKEYREVOKED] = TARGET_EKEYREVOKED,
827 [EKEYREJECTED] = TARGET_EKEYREJECTED,
830 [EOWNERDEAD] = TARGET_EOWNERDEAD,
832 #ifdef ENOTRECOVERABLE
833 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE,
836 [ENOMSG] = TARGET_ENOMSG,
839 [ERFKILL] = TARGET_ERFKILL,
842 [EHWPOISON] = TARGET_EHWPOISON,
846 static inline int host_to_target_errno(int err)
848 if (err >= 0 && err < ERRNO_TABLE_SIZE &&
849 host_to_target_errno_table[err]) {
850 return host_to_target_errno_table[err];
855 static inline int target_to_host_errno(int err)
857 if (err >= 0 && err < ERRNO_TABLE_SIZE &&
858 target_to_host_errno_table[err]) {
859 return target_to_host_errno_table[err];
864 static inline abi_long get_errno(abi_long ret)
867 return -host_to_target_errno(errno);
872 static inline int is_error(abi_long ret)
874 return (abi_ulong)ret >= (abi_ulong)(-4096);
877 const char *target_strerror(int err)
879 if (err == TARGET_ERESTARTSYS) {
880 return "To be restarted";
882 if (err == TARGET_QEMU_ESIGRETURN) {
883 return "Successful exit from sigreturn";
886 if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
889 return strerror(target_to_host_errno(err));
892 #define safe_syscall0(type, name) \
893 static type safe_##name(void) \
895 return safe_syscall(__NR_##name); \
898 #define safe_syscall1(type, name, type1, arg1) \
899 static type safe_##name(type1 arg1) \
901 return safe_syscall(__NR_##name, arg1); \
904 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
905 static type safe_##name(type1 arg1, type2 arg2) \
907 return safe_syscall(__NR_##name, arg1, arg2); \
910 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
911 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
913 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
916 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
918 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
920 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
923 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
924 type4, arg4, type5, arg5) \
925 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
928 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
931 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
932 type4, arg4, type5, arg5, type6, arg6) \
933 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
934 type5 arg5, type6 arg6) \
936 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
939 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
940 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
941 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
942 int, flags, mode_t, mode)
943 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
944 struct rusage *, rusage)
945 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
946 int, options, struct rusage *, rusage)
947 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
948 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
949 fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
950 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
951 struct timespec *, tsp, const sigset_t *, sigmask,
953 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
954 int, maxevents, int, timeout, const sigset_t *, sigmask,
956 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
957 const struct timespec *,timeout,int *,uaddr2,int,val3)
958 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
959 safe_syscall2(int, kill, pid_t, pid, int, sig)
960 safe_syscall2(int, tkill, int, tid, int, sig)
961 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
962 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
963 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
964 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
965 unsigned long, pos_l, unsigned long, pos_h)
966 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
967 unsigned long, pos_l, unsigned long, pos_h)
968 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
970 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
971 int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
972 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
973 int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
974 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
975 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
976 safe_syscall2(int, flock, int, fd, int, operation)
977 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
978 const struct timespec *, uts, size_t, sigsetsize)
979 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
981 safe_syscall2(int, nanosleep, const struct timespec *, req,
982 struct timespec *, rem)
983 #ifdef TARGET_NR_clock_nanosleep
984 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
985 const struct timespec *, req, struct timespec *, rem)
988 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
990 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
991 long, msgtype, int, flags)
992 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
993 unsigned, nsops, const struct timespec *, timeout)
995 /* This host kernel architecture uses a single ipc syscall; fake up
996 * wrappers for the sub-operations to hide this implementation detail.
997 * Annoyingly we can't include linux/ipc.h to get the constant definitions
998 * for the call parameter because some structs in there conflict with the
999 * sys/ipc.h ones. So we just define them here, and rely on them being
1000 * the same for all host architectures.
1002 #define Q_SEMTIMEDOP 4
1005 #define Q_IPCCALL(VERSION, OP) ((VERSION) << 16 | (OP))
1007 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
1008 void *, ptr, long, fifth)
1009 static int safe_msgsnd(int msgid, const void *msgp, size_t sz, int flags)
1011 return safe_ipc(Q_IPCCALL(0, Q_MSGSND), msgid, sz, flags, (void *)msgp, 0);
1013 static int safe_msgrcv(int msgid, void *msgp, size_t sz, long type, int flags)
1015 return safe_ipc(Q_IPCCALL(1, Q_MSGRCV), msgid, sz, flags, msgp, type);
1017 static int safe_semtimedop(int semid, struct sembuf *tsops, unsigned nsops,
1018 const struct timespec *timeout)
1020 return safe_ipc(Q_IPCCALL(0, Q_SEMTIMEDOP), semid, nsops, 0, tsops,
1024 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1025 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
1026 size_t, len, unsigned, prio, const struct timespec *, timeout)
1027 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
1028 size_t, len, unsigned *, prio, const struct timespec *, timeout)
1030 /* We do ioctl like this rather than via safe_syscall3 to preserve the
1031 * "third argument might be integer or pointer or not present" behaviour of
1032 * the libc function.
1034 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
1035 /* Similarly for fcntl. Note that callers must always:
1036 * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
1037 * use the flock64 struct rather than unsuffixed flock
1038 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
1041 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
1043 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
1046 static inline int host_to_target_sock_type(int host_type)
1050 switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
1052 target_type = TARGET_SOCK_DGRAM;
1055 target_type = TARGET_SOCK_STREAM;
1058 target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
1062 #if defined(SOCK_CLOEXEC)
1063 if (host_type & SOCK_CLOEXEC) {
1064 target_type |= TARGET_SOCK_CLOEXEC;
1068 #if defined(SOCK_NONBLOCK)
1069 if (host_type & SOCK_NONBLOCK) {
1070 target_type |= TARGET_SOCK_NONBLOCK;
1077 static abi_ulong target_brk;
1078 static abi_ulong target_original_brk;
1079 static abi_ulong brk_page;
1081 void target_set_brk(abi_ulong new_brk)
1083 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
1084 brk_page = HOST_PAGE_ALIGN(target_brk);
1087 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
1088 #define DEBUGF_BRK(message, args...)
1090 /* do_brk() must return target values and target errnos. */
1091 abi_long do_brk(abi_ulong new_brk)
1093 abi_long mapped_addr;
1094 abi_ulong new_alloc_size;
1096 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
1099 DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
1102 if (new_brk < target_original_brk) {
1103 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
1108 /* If the new brk is less than the highest page reserved to the
1109 * target heap allocation, set it and we're almost done... */
1110 if (new_brk <= brk_page) {
1111 /* Heap contents are initialized to zero, as for anonymous
1113 if (new_brk > target_brk) {
1114 memset(g2h(target_brk), 0, new_brk - target_brk);
1116 target_brk = new_brk;
1117 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
1121 /* We need to allocate more memory after the brk... Note that
1122 * we don't use MAP_FIXED because that will map over the top of
1123 * any existing mapping (like the one with the host libc or qemu
1124 * itself); instead we treat "mapped but at wrong address" as
1125 * a failure and unmap again.
1127 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
1128 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
1129 PROT_READ|PROT_WRITE,
1130 MAP_ANON|MAP_PRIVATE, 0, 0));
1132 if (mapped_addr == brk_page) {
1133 /* Heap contents are initialized to zero, as for anonymous
1134 * mapped pages. Technically the new pages are already
1135 * initialized to zero since they *are* anonymous mapped
1136 * pages, however we have to take care with the contents that
1137 * come from the remaining part of the previous page: it may
1138 * contains garbage data due to a previous heap usage (grown
1139 * then shrunken). */
1140 memset(g2h(target_brk), 0, brk_page - target_brk);
1142 target_brk = new_brk;
1143 brk_page = HOST_PAGE_ALIGN(target_brk);
1144 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
1147 } else if (mapped_addr != -1) {
1148 /* Mapped but at wrong address, meaning there wasn't actually
1149 * enough space for this brk.
1151 target_munmap(mapped_addr, new_alloc_size);
1153 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
1156 DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
1159 #if defined(TARGET_ALPHA)
1160 /* We (partially) emulate OSF/1 on Alpha, which requires we
1161 return a proper errno, not an unchanged brk value. */
1162 return -TARGET_ENOMEM;
1164 /* For everything else, return the previous break. */
1168 static inline abi_long copy_from_user_fdset(fd_set *fds,
1169 abi_ulong target_fds_addr,
1173 abi_ulong b, *target_fds;
1175 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
1176 if (!(target_fds = lock_user(VERIFY_READ,
1178 sizeof(abi_ulong) * nw,
1180 return -TARGET_EFAULT;
1184 for (i = 0; i < nw; i++) {
1185 /* grab the abi_ulong */
1186 __get_user(b, &target_fds[i]);
1187 for (j = 0; j < TARGET_ABI_BITS; j++) {
1188 /* check the bit inside the abi_ulong */
1195 unlock_user(target_fds, target_fds_addr, 0);
1200 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
1201 abi_ulong target_fds_addr,
1204 if (target_fds_addr) {
1205 if (copy_from_user_fdset(fds, target_fds_addr, n))
1206 return -TARGET_EFAULT;
1214 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
1220 abi_ulong *target_fds;
1222 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
1223 if (!(target_fds = lock_user(VERIFY_WRITE,
1225 sizeof(abi_ulong) * nw,
1227 return -TARGET_EFAULT;
1230 for (i = 0; i < nw; i++) {
1232 for (j = 0; j < TARGET_ABI_BITS; j++) {
1233 v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
1236 __put_user(v, &target_fds[i]);
1239 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
1244 #if defined(__alpha__)
1245 #define HOST_HZ 1024
1250 static inline abi_long host_to_target_clock_t(long ticks)
1252 #if HOST_HZ == TARGET_HZ
1255 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
1259 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
1260 const struct rusage *rusage)
1262 struct target_rusage *target_rusage;
1264 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
1265 return -TARGET_EFAULT;
1266 target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
1267 target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
1268 target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1269 target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1270 target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1271 target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1272 target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1273 target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1274 target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1275 target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1276 target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1277 target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1278 target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1279 target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1280 target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1281 target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1282 target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1283 target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1284 unlock_user_struct(target_rusage, target_addr, 1);
1289 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1291 abi_ulong target_rlim_swap;
1294 target_rlim_swap = tswapal(target_rlim);
1295 if (target_rlim_swap == TARGET_RLIM_INFINITY)
1296 return RLIM_INFINITY;
1298 result = target_rlim_swap;
1299 if (target_rlim_swap != (rlim_t)result)
1300 return RLIM_INFINITY;
1305 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1307 abi_ulong target_rlim_swap;
1310 if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1311 target_rlim_swap = TARGET_RLIM_INFINITY;
1313 target_rlim_swap = rlim;
1314 result = tswapal(target_rlim_swap);
1319 static inline int target_to_host_resource(int code)
1322 case TARGET_RLIMIT_AS:
1324 case TARGET_RLIMIT_CORE:
1326 case TARGET_RLIMIT_CPU:
1328 case TARGET_RLIMIT_DATA:
1330 case TARGET_RLIMIT_FSIZE:
1331 return RLIMIT_FSIZE;
1332 case TARGET_RLIMIT_LOCKS:
1333 return RLIMIT_LOCKS;
1334 case TARGET_RLIMIT_MEMLOCK:
1335 return RLIMIT_MEMLOCK;
1336 case TARGET_RLIMIT_MSGQUEUE:
1337 return RLIMIT_MSGQUEUE;
1338 case TARGET_RLIMIT_NICE:
1340 case TARGET_RLIMIT_NOFILE:
1341 return RLIMIT_NOFILE;
1342 case TARGET_RLIMIT_NPROC:
1343 return RLIMIT_NPROC;
1344 case TARGET_RLIMIT_RSS:
1346 case TARGET_RLIMIT_RTPRIO:
1347 return RLIMIT_RTPRIO;
1348 case TARGET_RLIMIT_SIGPENDING:
1349 return RLIMIT_SIGPENDING;
1350 case TARGET_RLIMIT_STACK:
1351 return RLIMIT_STACK;
1357 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1358 abi_ulong target_tv_addr)
1360 struct target_timeval *target_tv;
1362 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1))
1363 return -TARGET_EFAULT;
1365 __get_user(tv->tv_sec, &target_tv->tv_sec);
1366 __get_user(tv->tv_usec, &target_tv->tv_usec);
1368 unlock_user_struct(target_tv, target_tv_addr, 0);
1373 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1374 const struct timeval *tv)
1376 struct target_timeval *target_tv;
1378 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0))
1379 return -TARGET_EFAULT;
1381 __put_user(tv->tv_sec, &target_tv->tv_sec);
1382 __put_user(tv->tv_usec, &target_tv->tv_usec);
1384 unlock_user_struct(target_tv, target_tv_addr, 1);
1389 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1390 abi_ulong target_tz_addr)
1392 struct target_timezone *target_tz;
1394 if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1395 return -TARGET_EFAULT;
1398 __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1399 __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1401 unlock_user_struct(target_tz, target_tz_addr, 0);
1406 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1409 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1410 abi_ulong target_mq_attr_addr)
1412 struct target_mq_attr *target_mq_attr;
1414 if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1415 target_mq_attr_addr, 1))
1416 return -TARGET_EFAULT;
1418 __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1419 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1420 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1421 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1423 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1428 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1429 const struct mq_attr *attr)
1431 struct target_mq_attr *target_mq_attr;
1433 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1434 target_mq_attr_addr, 0))
1435 return -TARGET_EFAULT;
1437 __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1438 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1439 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1440 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1442 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1448 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1449 /* do_select() must return target values and target errnos. */
1450 static abi_long do_select(int n,
1451 abi_ulong rfd_addr, abi_ulong wfd_addr,
1452 abi_ulong efd_addr, abi_ulong target_tv_addr)
1454 fd_set rfds, wfds, efds;
1455 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1457 struct timespec ts, *ts_ptr;
1460 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1464 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1468 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1473 if (target_tv_addr) {
1474 if (copy_from_user_timeval(&tv, target_tv_addr))
1475 return -TARGET_EFAULT;
1476 ts.tv_sec = tv.tv_sec;
1477 ts.tv_nsec = tv.tv_usec * 1000;
1483 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1486 if (!is_error(ret)) {
1487 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1488 return -TARGET_EFAULT;
1489 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1490 return -TARGET_EFAULT;
1491 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1492 return -TARGET_EFAULT;
1494 if (target_tv_addr) {
1495 tv.tv_sec = ts.tv_sec;
1496 tv.tv_usec = ts.tv_nsec / 1000;
1497 if (copy_to_user_timeval(target_tv_addr, &tv)) {
1498 return -TARGET_EFAULT;
1506 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1507 static abi_long do_old_select(abi_ulong arg1)
1509 struct target_sel_arg_struct *sel;
1510 abi_ulong inp, outp, exp, tvp;
1513 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1514 return -TARGET_EFAULT;
1517 nsel = tswapal(sel->n);
1518 inp = tswapal(sel->inp);
1519 outp = tswapal(sel->outp);
1520 exp = tswapal(sel->exp);
1521 tvp = tswapal(sel->tvp);
1523 unlock_user_struct(sel, arg1, 0);
1525 return do_select(nsel, inp, outp, exp, tvp);
1530 static abi_long do_pipe2(int host_pipe[], int flags)
1533 return pipe2(host_pipe, flags);
1539 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1540 int flags, int is_pipe2)
1544 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1547 return get_errno(ret);
1549 /* Several targets have special calling conventions for the original
1550 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1552 #if defined(TARGET_ALPHA)
1553 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1554 return host_pipe[0];
1555 #elif defined(TARGET_MIPS)
1556 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1557 return host_pipe[0];
1558 #elif defined(TARGET_SH4)
1559 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1560 return host_pipe[0];
1561 #elif defined(TARGET_SPARC)
1562 ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1563 return host_pipe[0];
1567 if (put_user_s32(host_pipe[0], pipedes)
1568 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1569 return -TARGET_EFAULT;
1570 return get_errno(ret);
1573 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1574 abi_ulong target_addr,
1577 struct target_ip_mreqn *target_smreqn;
1579 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1581 return -TARGET_EFAULT;
1582 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1583 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1584 if (len == sizeof(struct target_ip_mreqn))
1585 mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1586 unlock_user(target_smreqn, target_addr, 0);
1591 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1592 abi_ulong target_addr,
1595 const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1596 sa_family_t sa_family;
1597 struct target_sockaddr *target_saddr;
1599 if (fd_trans_target_to_host_addr(fd)) {
1600 return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1603 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1605 return -TARGET_EFAULT;
1607 sa_family = tswap16(target_saddr->sa_family);
1609 /* Oops. The caller might send a incomplete sun_path; sun_path
1610 * must be terminated by \0 (see the manual page), but
1611 * unfortunately it is quite common to specify sockaddr_un
1612 * length as "strlen(x->sun_path)" while it should be
1613 * "strlen(...) + 1". We'll fix that here if needed.
1614 * Linux kernel has a similar feature.
1617 if (sa_family == AF_UNIX) {
1618 if (len < unix_maxlen && len > 0) {
1619 char *cp = (char*)target_saddr;
1621 if ( cp[len-1] && !cp[len] )
1624 if (len > unix_maxlen)
1628 memcpy(addr, target_saddr, len);
1629 addr->sa_family = sa_family;
1630 if (sa_family == AF_NETLINK) {
1631 struct sockaddr_nl *nladdr;
1633 nladdr = (struct sockaddr_nl *)addr;
1634 nladdr->nl_pid = tswap32(nladdr->nl_pid);
1635 nladdr->nl_groups = tswap32(nladdr->nl_groups);
1636 } else if (sa_family == AF_PACKET) {
1637 struct target_sockaddr_ll *lladdr;
1639 lladdr = (struct target_sockaddr_ll *)addr;
1640 lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1641 lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1643 unlock_user(target_saddr, target_addr, 0);
1648 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1649 struct sockaddr *addr,
1652 struct target_sockaddr *target_saddr;
1659 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1661 return -TARGET_EFAULT;
1662 memcpy(target_saddr, addr, len);
1663 if (len >= offsetof(struct target_sockaddr, sa_family) +
1664 sizeof(target_saddr->sa_family)) {
1665 target_saddr->sa_family = tswap16(addr->sa_family);
1667 if (addr->sa_family == AF_NETLINK && len >= sizeof(struct sockaddr_nl)) {
1668 struct sockaddr_nl *target_nl = (struct sockaddr_nl *)target_saddr;
1669 target_nl->nl_pid = tswap32(target_nl->nl_pid);
1670 target_nl->nl_groups = tswap32(target_nl->nl_groups);
1671 } else if (addr->sa_family == AF_PACKET) {
1672 struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1673 target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1674 target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1675 } else if (addr->sa_family == AF_INET6 &&
1676 len >= sizeof(struct target_sockaddr_in6)) {
1677 struct target_sockaddr_in6 *target_in6 =
1678 (struct target_sockaddr_in6 *)target_saddr;
1679 target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1681 unlock_user(target_saddr, target_addr, len);
1686 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1687 struct target_msghdr *target_msgh)
1689 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1690 abi_long msg_controllen;
1691 abi_ulong target_cmsg_addr;
1692 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1693 socklen_t space = 0;
1695 msg_controllen = tswapal(target_msgh->msg_controllen);
1696 if (msg_controllen < sizeof (struct target_cmsghdr))
1698 target_cmsg_addr = tswapal(target_msgh->msg_control);
1699 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1700 target_cmsg_start = target_cmsg;
1702 return -TARGET_EFAULT;
1704 while (cmsg && target_cmsg) {
1705 void *data = CMSG_DATA(cmsg);
1706 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1708 int len = tswapal(target_cmsg->cmsg_len)
1709 - sizeof(struct target_cmsghdr);
1711 space += CMSG_SPACE(len);
1712 if (space > msgh->msg_controllen) {
1713 space -= CMSG_SPACE(len);
1714 /* This is a QEMU bug, since we allocated the payload
1715 * area ourselves (unlike overflow in host-to-target
1716 * conversion, which is just the guest giving us a buffer
1717 * that's too small). It can't happen for the payload types
1718 * we currently support; if it becomes an issue in future
1719 * we would need to improve our allocation strategy to
1720 * something more intelligent than "twice the size of the
1721 * target buffer we're reading from".
1723 gemu_log("Host cmsg overflow\n");
1727 if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1728 cmsg->cmsg_level = SOL_SOCKET;
1730 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1732 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1733 cmsg->cmsg_len = CMSG_LEN(len);
1735 if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1736 int *fd = (int *)data;
1737 int *target_fd = (int *)target_data;
1738 int i, numfds = len / sizeof(int);
1740 for (i = 0; i < numfds; i++) {
1741 __get_user(fd[i], target_fd + i);
1743 } else if (cmsg->cmsg_level == SOL_SOCKET
1744 && cmsg->cmsg_type == SCM_CREDENTIALS) {
1745 struct ucred *cred = (struct ucred *)data;
1746 struct target_ucred *target_cred =
1747 (struct target_ucred *)target_data;
1749 __get_user(cred->pid, &target_cred->pid);
1750 __get_user(cred->uid, &target_cred->uid);
1751 __get_user(cred->gid, &target_cred->gid);
1753 gemu_log("Unsupported ancillary data: %d/%d\n",
1754 cmsg->cmsg_level, cmsg->cmsg_type);
1755 memcpy(data, target_data, len);
1758 cmsg = CMSG_NXTHDR(msgh, cmsg);
1759 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1762 unlock_user(target_cmsg, target_cmsg_addr, 0);
1764 msgh->msg_controllen = space;
1768 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1769 struct msghdr *msgh)
1771 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1772 abi_long msg_controllen;
1773 abi_ulong target_cmsg_addr;
1774 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1775 socklen_t space = 0;
1777 msg_controllen = tswapal(target_msgh->msg_controllen);
1778 if (msg_controllen < sizeof (struct target_cmsghdr))
1780 target_cmsg_addr = tswapal(target_msgh->msg_control);
1781 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1782 target_cmsg_start = target_cmsg;
1784 return -TARGET_EFAULT;
1786 while (cmsg && target_cmsg) {
1787 void *data = CMSG_DATA(cmsg);
1788 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1790 int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1791 int tgt_len, tgt_space;
1793 /* We never copy a half-header but may copy half-data;
1794 * this is Linux's behaviour in put_cmsg(). Note that
1795 * truncation here is a guest problem (which we report
1796 * to the guest via the CTRUNC bit), unlike truncation
1797 * in target_to_host_cmsg, which is a QEMU bug.
1799 if (msg_controllen < sizeof(struct target_cmsghdr)) {
1800 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1804 if (cmsg->cmsg_level == SOL_SOCKET) {
1805 target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1807 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1809 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1811 /* Payload types which need a different size of payload on
1812 * the target must adjust tgt_len here.
1814 switch (cmsg->cmsg_level) {
1816 switch (cmsg->cmsg_type) {
1818 tgt_len = sizeof(struct target_timeval);
1828 if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1829 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1830 tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1833 /* We must now copy-and-convert len bytes of payload
1834 * into tgt_len bytes of destination space. Bear in mind
1835 * that in both source and destination we may be dealing
1836 * with a truncated value!
1838 switch (cmsg->cmsg_level) {
1840 switch (cmsg->cmsg_type) {
1843 int *fd = (int *)data;
1844 int *target_fd = (int *)target_data;
1845 int i, numfds = tgt_len / sizeof(int);
1847 for (i = 0; i < numfds; i++) {
1848 __put_user(fd[i], target_fd + i);
1854 struct timeval *tv = (struct timeval *)data;
1855 struct target_timeval *target_tv =
1856 (struct target_timeval *)target_data;
1858 if (len != sizeof(struct timeval) ||
1859 tgt_len != sizeof(struct target_timeval)) {
1863 /* copy struct timeval to target */
1864 __put_user(tv->tv_sec, &target_tv->tv_sec);
1865 __put_user(tv->tv_usec, &target_tv->tv_usec);
1868 case SCM_CREDENTIALS:
1870 struct ucred *cred = (struct ucred *)data;
1871 struct target_ucred *target_cred =
1872 (struct target_ucred *)target_data;
1874 __put_user(cred->pid, &target_cred->pid);
1875 __put_user(cred->uid, &target_cred->uid);
1876 __put_user(cred->gid, &target_cred->gid);
1885 switch (cmsg->cmsg_type) {
1888 uint32_t *v = (uint32_t *)data;
1889 uint32_t *t_int = (uint32_t *)target_data;
1891 if (len != sizeof(uint32_t) ||
1892 tgt_len != sizeof(uint32_t)) {
1895 __put_user(*v, t_int);
1901 struct sock_extended_err ee;
1902 struct sockaddr_in offender;
1904 struct errhdr_t *errh = (struct errhdr_t *)data;
1905 struct errhdr_t *target_errh =
1906 (struct errhdr_t *)target_data;
1908 if (len != sizeof(struct errhdr_t) ||
1909 tgt_len != sizeof(struct errhdr_t)) {
1912 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1913 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1914 __put_user(errh->ee.ee_type, &target_errh->ee.ee_type);
1915 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1916 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1917 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1918 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1919 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1920 (void *) &errh->offender, sizeof(errh->offender));
1929 switch (cmsg->cmsg_type) {
1932 uint32_t *v = (uint32_t *)data;
1933 uint32_t *t_int = (uint32_t *)target_data;
1935 if (len != sizeof(uint32_t) ||
1936 tgt_len != sizeof(uint32_t)) {
1939 __put_user(*v, t_int);
1945 struct sock_extended_err ee;
1946 struct sockaddr_in6 offender;
1948 struct errhdr6_t *errh = (struct errhdr6_t *)data;
1949 struct errhdr6_t *target_errh =
1950 (struct errhdr6_t *)target_data;
1952 if (len != sizeof(struct errhdr6_t) ||
1953 tgt_len != sizeof(struct errhdr6_t)) {
1956 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1957 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1958 __put_user(errh->ee.ee_type, &target_errh->ee.ee_type);
1959 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1960 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1961 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1962 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1963 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1964 (void *) &errh->offender, sizeof(errh->offender));
1974 gemu_log("Unsupported ancillary data: %d/%d\n",
1975 cmsg->cmsg_level, cmsg->cmsg_type);
1976 memcpy(target_data, data, MIN(len, tgt_len));
1977 if (tgt_len > len) {
1978 memset(target_data + len, 0, tgt_len - len);
1982 target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
1983 tgt_space = TARGET_CMSG_SPACE(tgt_len);
1984 if (msg_controllen < tgt_space) {
1985 tgt_space = msg_controllen;
1987 msg_controllen -= tgt_space;
1989 cmsg = CMSG_NXTHDR(msgh, cmsg);
1990 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1993 unlock_user(target_cmsg, target_cmsg_addr, space);
1995 target_msgh->msg_controllen = tswapal(space);
1999 static void tswap_nlmsghdr(struct nlmsghdr *nlh)
2001 nlh->nlmsg_len = tswap32(nlh->nlmsg_len);
2002 nlh->nlmsg_type = tswap16(nlh->nlmsg_type);
2003 nlh->nlmsg_flags = tswap16(nlh->nlmsg_flags);
2004 nlh->nlmsg_seq = tswap32(nlh->nlmsg_seq);
2005 nlh->nlmsg_pid = tswap32(nlh->nlmsg_pid);
2008 static abi_long host_to_target_for_each_nlmsg(struct nlmsghdr *nlh,
2010 abi_long (*host_to_target_nlmsg)
2011 (struct nlmsghdr *))
2016 while (len > sizeof(struct nlmsghdr)) {
2018 nlmsg_len = nlh->nlmsg_len;
2019 if (nlmsg_len < sizeof(struct nlmsghdr) ||
2024 switch (nlh->nlmsg_type) {
2026 tswap_nlmsghdr(nlh);
2032 struct nlmsgerr *e = NLMSG_DATA(nlh);
2033 e->error = tswap32(e->error);
2034 tswap_nlmsghdr(&e->msg);
2035 tswap_nlmsghdr(nlh);
2039 ret = host_to_target_nlmsg(nlh);
2041 tswap_nlmsghdr(nlh);
2046 tswap_nlmsghdr(nlh);
2047 len -= NLMSG_ALIGN(nlmsg_len);
2048 nlh = (struct nlmsghdr *)(((char*)nlh) + NLMSG_ALIGN(nlmsg_len));
2053 static abi_long target_to_host_for_each_nlmsg(struct nlmsghdr *nlh,
2055 abi_long (*target_to_host_nlmsg)
2056 (struct nlmsghdr *))
2060 while (len > sizeof(struct nlmsghdr)) {
2061 if (tswap32(nlh->nlmsg_len) < sizeof(struct nlmsghdr) ||
2062 tswap32(nlh->nlmsg_len) > len) {
2065 tswap_nlmsghdr(nlh);
2066 switch (nlh->nlmsg_type) {
2073 struct nlmsgerr *e = NLMSG_DATA(nlh);
2074 e->error = tswap32(e->error);
2075 tswap_nlmsghdr(&e->msg);
2079 ret = target_to_host_nlmsg(nlh);
2084 len -= NLMSG_ALIGN(nlh->nlmsg_len);
2085 nlh = (struct nlmsghdr *)(((char *)nlh) + NLMSG_ALIGN(nlh->nlmsg_len));
2090 #ifdef CONFIG_RTNETLINK
2091 static abi_long host_to_target_for_each_nlattr(struct nlattr *nlattr,
2092 size_t len, void *context,
2093 abi_long (*host_to_target_nlattr)
2097 unsigned short nla_len;
2100 while (len > sizeof(struct nlattr)) {
2101 nla_len = nlattr->nla_len;
2102 if (nla_len < sizeof(struct nlattr) ||
2106 ret = host_to_target_nlattr(nlattr, context);
2107 nlattr->nla_len = tswap16(nlattr->nla_len);
2108 nlattr->nla_type = tswap16(nlattr->nla_type);
2112 len -= NLA_ALIGN(nla_len);
2113 nlattr = (struct nlattr *)(((char *)nlattr) + NLA_ALIGN(nla_len));
2118 static abi_long host_to_target_for_each_rtattr(struct rtattr *rtattr,
2120 abi_long (*host_to_target_rtattr)
2123 unsigned short rta_len;
2126 while (len > sizeof(struct rtattr)) {
2127 rta_len = rtattr->rta_len;
2128 if (rta_len < sizeof(struct rtattr) ||
2132 ret = host_to_target_rtattr(rtattr);
2133 rtattr->rta_len = tswap16(rtattr->rta_len);
2134 rtattr->rta_type = tswap16(rtattr->rta_type);
2138 len -= RTA_ALIGN(rta_len);
2139 rtattr = (struct rtattr *)(((char *)rtattr) + RTA_ALIGN(rta_len));
2144 #define NLA_DATA(nla) ((void *)((char *)(nla)) + NLA_HDRLEN)
2146 static abi_long host_to_target_data_bridge_nlattr(struct nlattr *nlattr,
2153 switch (nlattr->nla_type) {
2155 case QEMU_IFLA_BR_FDB_FLUSH:
2158 case QEMU_IFLA_BR_GROUP_ADDR:
2161 case QEMU_IFLA_BR_VLAN_FILTERING:
2162 case QEMU_IFLA_BR_TOPOLOGY_CHANGE:
2163 case QEMU_IFLA_BR_TOPOLOGY_CHANGE_DETECTED:
2164 case QEMU_IFLA_BR_MCAST_ROUTER:
2165 case QEMU_IFLA_BR_MCAST_SNOOPING:
2166 case QEMU_IFLA_BR_MCAST_QUERY_USE_IFADDR:
2167 case QEMU_IFLA_BR_MCAST_QUERIER:
2168 case QEMU_IFLA_BR_NF_CALL_IPTABLES:
2169 case QEMU_IFLA_BR_NF_CALL_IP6TABLES:
2170 case QEMU_IFLA_BR_NF_CALL_ARPTABLES:
2173 case QEMU_IFLA_BR_PRIORITY:
2174 case QEMU_IFLA_BR_VLAN_PROTOCOL:
2175 case QEMU_IFLA_BR_GROUP_FWD_MASK:
2176 case QEMU_IFLA_BR_ROOT_PORT:
2177 case QEMU_IFLA_BR_VLAN_DEFAULT_PVID:
2178 u16 = NLA_DATA(nlattr);
2179 *u16 = tswap16(*u16);
2182 case QEMU_IFLA_BR_FORWARD_DELAY:
2183 case QEMU_IFLA_BR_HELLO_TIME:
2184 case QEMU_IFLA_BR_MAX_AGE:
2185 case QEMU_IFLA_BR_AGEING_TIME:
2186 case QEMU_IFLA_BR_STP_STATE:
2187 case QEMU_IFLA_BR_ROOT_PATH_COST:
2188 case QEMU_IFLA_BR_MCAST_HASH_ELASTICITY:
2189 case QEMU_IFLA_BR_MCAST_HASH_MAX:
2190 case QEMU_IFLA_BR_MCAST_LAST_MEMBER_CNT:
2191 case QEMU_IFLA_BR_MCAST_STARTUP_QUERY_CNT:
2192 u32 = NLA_DATA(nlattr);
2193 *u32 = tswap32(*u32);
2196 case QEMU_IFLA_BR_HELLO_TIMER:
2197 case QEMU_IFLA_BR_TCN_TIMER:
2198 case QEMU_IFLA_BR_GC_TIMER:
2199 case QEMU_IFLA_BR_TOPOLOGY_CHANGE_TIMER:
2200 case QEMU_IFLA_BR_MCAST_LAST_MEMBER_INTVL:
2201 case QEMU_IFLA_BR_MCAST_MEMBERSHIP_INTVL:
2202 case QEMU_IFLA_BR_MCAST_QUERIER_INTVL:
2203 case QEMU_IFLA_BR_MCAST_QUERY_INTVL:
2204 case QEMU_IFLA_BR_MCAST_QUERY_RESPONSE_INTVL:
2205 case QEMU_IFLA_BR_MCAST_STARTUP_QUERY_INTVL:
2206 u64 = NLA_DATA(nlattr);
2207 *u64 = tswap64(*u64);
2209 /* ifla_bridge_id: uin8_t[] */
2210 case QEMU_IFLA_BR_ROOT_ID:
2211 case QEMU_IFLA_BR_BRIDGE_ID:
2214 gemu_log("Unknown QEMU_IFLA_BR type %d\n", nlattr->nla_type);
2220 static abi_long host_to_target_slave_data_bridge_nlattr(struct nlattr *nlattr,
2227 switch (nlattr->nla_type) {
2229 case QEMU_IFLA_BRPORT_STATE:
2230 case QEMU_IFLA_BRPORT_MODE:
2231 case QEMU_IFLA_BRPORT_GUARD:
2232 case QEMU_IFLA_BRPORT_PROTECT:
2233 case QEMU_IFLA_BRPORT_FAST_LEAVE:
2234 case QEMU_IFLA_BRPORT_LEARNING:
2235 case QEMU_IFLA_BRPORT_UNICAST_FLOOD:
2236 case QEMU_IFLA_BRPORT_PROXYARP:
2237 case QEMU_IFLA_BRPORT_LEARNING_SYNC:
2238 case QEMU_IFLA_BRPORT_PROXYARP_WIFI:
2239 case QEMU_IFLA_BRPORT_TOPOLOGY_CHANGE_ACK:
2240 case QEMU_IFLA_BRPORT_CONFIG_PENDING:
2241 case QEMU_IFLA_BRPORT_MULTICAST_ROUTER:
2244 case QEMU_IFLA_BRPORT_PRIORITY:
2245 case QEMU_IFLA_BRPORT_DESIGNATED_PORT:
2246 case QEMU_IFLA_BRPORT_DESIGNATED_COST:
2247 case QEMU_IFLA_BRPORT_ID:
2248 case QEMU_IFLA_BRPORT_NO:
2249 u16 = NLA_DATA(nlattr);
2250 *u16 = tswap16(*u16);
2253 case QEMU_IFLA_BRPORT_COST:
2254 u32 = NLA_DATA(nlattr);
2255 *u32 = tswap32(*u32);
2258 case QEMU_IFLA_BRPORT_MESSAGE_AGE_TIMER:
2259 case QEMU_IFLA_BRPORT_FORWARD_DELAY_TIMER:
2260 case QEMU_IFLA_BRPORT_HOLD_TIMER:
2261 u64 = NLA_DATA(nlattr);
2262 *u64 = tswap64(*u64);
2264 /* ifla_bridge_id: uint8_t[] */
2265 case QEMU_IFLA_BRPORT_ROOT_ID:
2266 case QEMU_IFLA_BRPORT_BRIDGE_ID:
2269 gemu_log("Unknown QEMU_IFLA_BRPORT type %d\n", nlattr->nla_type);
2275 struct linkinfo_context {
2282 static abi_long host_to_target_data_linkinfo_nlattr(struct nlattr *nlattr,
2285 struct linkinfo_context *li_context = context;
2287 switch (nlattr->nla_type) {
2289 case QEMU_IFLA_INFO_KIND:
2290 li_context->name = NLA_DATA(nlattr);
2291 li_context->len = nlattr->nla_len - NLA_HDRLEN;
2293 case QEMU_IFLA_INFO_SLAVE_KIND:
2294 li_context->slave_name = NLA_DATA(nlattr);
2295 li_context->slave_len = nlattr->nla_len - NLA_HDRLEN;
2298 case QEMU_IFLA_INFO_XSTATS:
2299 /* FIXME: only used by CAN */
2302 case QEMU_IFLA_INFO_DATA:
2303 if (strncmp(li_context->name, "bridge",
2304 li_context->len) == 0) {
2305 return host_to_target_for_each_nlattr(NLA_DATA(nlattr),
2308 host_to_target_data_bridge_nlattr);
2310 gemu_log("Unknown QEMU_IFLA_INFO_KIND %s\n", li_context->name);
2313 case QEMU_IFLA_INFO_SLAVE_DATA:
2314 if (strncmp(li_context->slave_name, "bridge",
2315 li_context->slave_len) == 0) {
2316 return host_to_target_for_each_nlattr(NLA_DATA(nlattr),
2319 host_to_target_slave_data_bridge_nlattr);
2321 gemu_log("Unknown QEMU_IFLA_INFO_SLAVE_KIND %s\n",
2322 li_context->slave_name);
2326 gemu_log("Unknown host QEMU_IFLA_INFO type: %d\n", nlattr->nla_type);
2333 static abi_long host_to_target_data_inet_nlattr(struct nlattr *nlattr,
2339 switch (nlattr->nla_type) {
2340 case QEMU_IFLA_INET_CONF:
2341 u32 = NLA_DATA(nlattr);
2342 for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u32);
2344 u32[i] = tswap32(u32[i]);
2348 gemu_log("Unknown host AF_INET type: %d\n", nlattr->nla_type);
2353 static abi_long host_to_target_data_inet6_nlattr(struct nlattr *nlattr,
2358 struct ifla_cacheinfo *ci;
2361 switch (nlattr->nla_type) {
2363 case QEMU_IFLA_INET6_TOKEN:
2366 case QEMU_IFLA_INET6_ADDR_GEN_MODE:
2369 case QEMU_IFLA_INET6_FLAGS:
2370 u32 = NLA_DATA(nlattr);
2371 *u32 = tswap32(*u32);
2374 case QEMU_IFLA_INET6_CONF:
2375 u32 = NLA_DATA(nlattr);
2376 for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u32);
2378 u32[i] = tswap32(u32[i]);
2381 /* ifla_cacheinfo */
2382 case QEMU_IFLA_INET6_CACHEINFO:
2383 ci = NLA_DATA(nlattr);
2384 ci->max_reasm_len = tswap32(ci->max_reasm_len);
2385 ci->tstamp = tswap32(ci->tstamp);
2386 ci->reachable_time = tswap32(ci->reachable_time);
2387 ci->retrans_time = tswap32(ci->retrans_time);
2390 case QEMU_IFLA_INET6_STATS:
2391 case QEMU_IFLA_INET6_ICMP6STATS:
2392 u64 = NLA_DATA(nlattr);
2393 for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u64);
2395 u64[i] = tswap64(u64[i]);
2399 gemu_log("Unknown host AF_INET6 type: %d\n", nlattr->nla_type);
2404 static abi_long host_to_target_data_spec_nlattr(struct nlattr *nlattr,
2407 switch (nlattr->nla_type) {
2409 return host_to_target_for_each_nlattr(NLA_DATA(nlattr), nlattr->nla_len,
2411 host_to_target_data_inet_nlattr);
2413 return host_to_target_for_each_nlattr(NLA_DATA(nlattr), nlattr->nla_len,
2415 host_to_target_data_inet6_nlattr);
2417 gemu_log("Unknown host AF_SPEC type: %d\n", nlattr->nla_type);
2423 static abi_long host_to_target_data_link_rtattr(struct rtattr *rtattr)
2426 struct rtnl_link_stats *st;
2427 struct rtnl_link_stats64 *st64;
2428 struct rtnl_link_ifmap *map;
2429 struct linkinfo_context li_context;
2431 switch (rtattr->rta_type) {
2433 case QEMU_IFLA_ADDRESS:
2434 case QEMU_IFLA_BROADCAST:
2436 case QEMU_IFLA_IFNAME:
2437 case QEMU_IFLA_QDISC:
2440 case QEMU_IFLA_OPERSTATE:
2441 case QEMU_IFLA_LINKMODE:
2442 case QEMU_IFLA_CARRIER:
2443 case QEMU_IFLA_PROTO_DOWN:
2447 case QEMU_IFLA_LINK:
2448 case QEMU_IFLA_WEIGHT:
2449 case QEMU_IFLA_TXQLEN:
2450 case QEMU_IFLA_CARRIER_CHANGES:
2451 case QEMU_IFLA_NUM_RX_QUEUES:
2452 case QEMU_IFLA_NUM_TX_QUEUES:
2453 case QEMU_IFLA_PROMISCUITY:
2454 case QEMU_IFLA_EXT_MASK:
2455 case QEMU_IFLA_LINK_NETNSID:
2456 case QEMU_IFLA_GROUP:
2457 case QEMU_IFLA_MASTER:
2458 case QEMU_IFLA_NUM_VF:
2459 case QEMU_IFLA_GSO_MAX_SEGS:
2460 case QEMU_IFLA_GSO_MAX_SIZE:
2461 u32 = RTA_DATA(rtattr);
2462 *u32 = tswap32(*u32);
2464 /* struct rtnl_link_stats */
2465 case QEMU_IFLA_STATS:
2466 st = RTA_DATA(rtattr);
2467 st->rx_packets = tswap32(st->rx_packets);
2468 st->tx_packets = tswap32(st->tx_packets);
2469 st->rx_bytes = tswap32(st->rx_bytes);
2470 st->tx_bytes = tswap32(st->tx_bytes);
2471 st->rx_errors = tswap32(st->rx_errors);
2472 st->tx_errors = tswap32(st->tx_errors);
2473 st->rx_dropped = tswap32(st->rx_dropped);
2474 st->tx_dropped = tswap32(st->tx_dropped);
2475 st->multicast = tswap32(st->multicast);
2476 st->collisions = tswap32(st->collisions);
2478 /* detailed rx_errors: */
2479 st->rx_length_errors = tswap32(st->rx_length_errors);
2480 st->rx_over_errors = tswap32(st->rx_over_errors);
2481 st->rx_crc_errors = tswap32(st->rx_crc_errors);
2482 st->rx_frame_errors = tswap32(st->rx_frame_errors);
2483 st->rx_fifo_errors = tswap32(st->rx_fifo_errors);
2484 st->rx_missed_errors = tswap32(st->rx_missed_errors);
2486 /* detailed tx_errors */
2487 st->tx_aborted_errors = tswap32(st->tx_aborted_errors);
2488 st->tx_carrier_errors = tswap32(st->tx_carrier_errors);
2489 st->tx_fifo_errors = tswap32(st->tx_fifo_errors);
2490 st->tx_heartbeat_errors = tswap32(st->tx_heartbeat_errors);
2491 st->tx_window_errors = tswap32(st->tx_window_errors);
2494 st->rx_compressed = tswap32(st->rx_compressed);
2495 st->tx_compressed = tswap32(st->tx_compressed);
2497 /* struct rtnl_link_stats64 */
2498 case QEMU_IFLA_STATS64:
2499 st64 = RTA_DATA(rtattr);
2500 st64->rx_packets = tswap64(st64->rx_packets);
2501 st64->tx_packets = tswap64(st64->tx_packets);
2502 st64->rx_bytes = tswap64(st64->rx_bytes);
2503 st64->tx_bytes = tswap64(st64->tx_bytes);
2504 st64->rx_errors = tswap64(st64->rx_errors);
2505 st64->tx_errors = tswap64(st64->tx_errors);
2506 st64->rx_dropped = tswap64(st64->rx_dropped);
2507 st64->tx_dropped = tswap64(st64->tx_dropped);
2508 st64->multicast = tswap64(st64->multicast);
2509 st64->collisions = tswap64(st64->collisions);
2511 /* detailed rx_errors: */
2512 st64->rx_length_errors = tswap64(st64->rx_length_errors);
2513 st64->rx_over_errors = tswap64(st64->rx_over_errors);
2514 st64->rx_crc_errors = tswap64(st64->rx_crc_errors);
2515 st64->rx_frame_errors = tswap64(st64->rx_frame_errors);
2516 st64->rx_fifo_errors = tswap64(st64->rx_fifo_errors);
2517 st64->rx_missed_errors = tswap64(st64->rx_missed_errors);
2519 /* detailed tx_errors */
2520 st64->tx_aborted_errors = tswap64(st64->tx_aborted_errors);
2521 st64->tx_carrier_errors = tswap64(st64->tx_carrier_errors);
2522 st64->tx_fifo_errors = tswap64(st64->tx_fifo_errors);
2523 st64->tx_heartbeat_errors = tswap64(st64->tx_heartbeat_errors);
2524 st64->tx_window_errors = tswap64(st64->tx_window_errors);
2527 st64->rx_compressed = tswap64(st64->rx_compressed);
2528 st64->tx_compressed = tswap64(st64->tx_compressed);
2530 /* struct rtnl_link_ifmap */
2532 map = RTA_DATA(rtattr);
2533 map->mem_start = tswap64(map->mem_start);
2534 map->mem_end = tswap64(map->mem_end);
2535 map->base_addr = tswap64(map->base_addr);
2536 map->irq = tswap16(map->irq);
2539 case QEMU_IFLA_LINKINFO:
2540 memset(&li_context, 0, sizeof(li_context));
2541 return host_to_target_for_each_nlattr(RTA_DATA(rtattr), rtattr->rta_len,
2543 host_to_target_data_linkinfo_nlattr);
2544 case QEMU_IFLA_AF_SPEC:
2545 return host_to_target_for_each_nlattr(RTA_DATA(rtattr), rtattr->rta_len,
2547 host_to_target_data_spec_nlattr);
2549 gemu_log("Unknown host QEMU_IFLA type: %d\n", rtattr->rta_type);
2555 static abi_long host_to_target_data_addr_rtattr(struct rtattr *rtattr)
2558 struct ifa_cacheinfo *ci;
2560 switch (rtattr->rta_type) {
2561 /* binary: depends on family type */
2571 u32 = RTA_DATA(rtattr);
2572 *u32 = tswap32(*u32);
2574 /* struct ifa_cacheinfo */
2576 ci = RTA_DATA(rtattr);
2577 ci->ifa_prefered = tswap32(ci->ifa_prefered);
2578 ci->ifa_valid = tswap32(ci->ifa_valid);
2579 ci->cstamp = tswap32(ci->cstamp);
2580 ci->tstamp = tswap32(ci->tstamp);
2583 gemu_log("Unknown host IFA type: %d\n", rtattr->rta_type);
2589 static abi_long host_to_target_data_route_rtattr(struct rtattr *rtattr)
2592 switch (rtattr->rta_type) {
2593 /* binary: depends on family type */
2602 u32 = RTA_DATA(rtattr);
2603 *u32 = tswap32(*u32);
2606 gemu_log("Unknown host RTA type: %d\n", rtattr->rta_type);
2612 static abi_long host_to_target_link_rtattr(struct rtattr *rtattr,
2613 uint32_t rtattr_len)
2615 return host_to_target_for_each_rtattr(rtattr, rtattr_len,
2616 host_to_target_data_link_rtattr);
2619 static abi_long host_to_target_addr_rtattr(struct rtattr *rtattr,
2620 uint32_t rtattr_len)
2622 return host_to_target_for_each_rtattr(rtattr, rtattr_len,
2623 host_to_target_data_addr_rtattr);
2626 static abi_long host_to_target_route_rtattr(struct rtattr *rtattr,
2627 uint32_t rtattr_len)
2629 return host_to_target_for_each_rtattr(rtattr, rtattr_len,
2630 host_to_target_data_route_rtattr);
2633 static abi_long host_to_target_data_route(struct nlmsghdr *nlh)
2636 struct ifinfomsg *ifi;
2637 struct ifaddrmsg *ifa;
2640 nlmsg_len = nlh->nlmsg_len;
2641 switch (nlh->nlmsg_type) {
2645 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifi))) {
2646 ifi = NLMSG_DATA(nlh);
2647 ifi->ifi_type = tswap16(ifi->ifi_type);
2648 ifi->ifi_index = tswap32(ifi->ifi_index);
2649 ifi->ifi_flags = tswap32(ifi->ifi_flags);
2650 ifi->ifi_change = tswap32(ifi->ifi_change);
2651 host_to_target_link_rtattr(IFLA_RTA(ifi),
2652 nlmsg_len - NLMSG_LENGTH(sizeof(*ifi)));
2658 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifa))) {
2659 ifa = NLMSG_DATA(nlh);
2660 ifa->ifa_index = tswap32(ifa->ifa_index);
2661 host_to_target_addr_rtattr(IFA_RTA(ifa),
2662 nlmsg_len - NLMSG_LENGTH(sizeof(*ifa)));
2668 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*rtm))) {
2669 rtm = NLMSG_DATA(nlh);
2670 rtm->rtm_flags = tswap32(rtm->rtm_flags);
2671 host_to_target_route_rtattr(RTM_RTA(rtm),
2672 nlmsg_len - NLMSG_LENGTH(sizeof(*rtm)));
2676 return -TARGET_EINVAL;
2681 static inline abi_long host_to_target_nlmsg_route(struct nlmsghdr *nlh,
2684 return host_to_target_for_each_nlmsg(nlh, len, host_to_target_data_route);
2687 static abi_long target_to_host_for_each_rtattr(struct rtattr *rtattr,
2689 abi_long (*target_to_host_rtattr)
2694 while (len >= sizeof(struct rtattr)) {
2695 if (tswap16(rtattr->rta_len) < sizeof(struct rtattr) ||
2696 tswap16(rtattr->rta_len) > len) {
2699 rtattr->rta_len = tswap16(rtattr->rta_len);
2700 rtattr->rta_type = tswap16(rtattr->rta_type);
2701 ret = target_to_host_rtattr(rtattr);
2705 len -= RTA_ALIGN(rtattr->rta_len);
2706 rtattr = (struct rtattr *)(((char *)rtattr) +
2707 RTA_ALIGN(rtattr->rta_len));
2712 static abi_long target_to_host_data_link_rtattr(struct rtattr *rtattr)
2714 switch (rtattr->rta_type) {
2716 gemu_log("Unknown target QEMU_IFLA type: %d\n", rtattr->rta_type);
2722 static abi_long target_to_host_data_addr_rtattr(struct rtattr *rtattr)
2724 switch (rtattr->rta_type) {
2725 /* binary: depends on family type */
2730 gemu_log("Unknown target IFA type: %d\n", rtattr->rta_type);
2736 static abi_long target_to_host_data_route_rtattr(struct rtattr *rtattr)
2739 switch (rtattr->rta_type) {
2740 /* binary: depends on family type */
2748 u32 = RTA_DATA(rtattr);
2749 *u32 = tswap32(*u32);
2752 gemu_log("Unknown target RTA type: %d\n", rtattr->rta_type);
2758 static void target_to_host_link_rtattr(struct rtattr *rtattr,
2759 uint32_t rtattr_len)
2761 target_to_host_for_each_rtattr(rtattr, rtattr_len,
2762 target_to_host_data_link_rtattr);
2765 static void target_to_host_addr_rtattr(struct rtattr *rtattr,
2766 uint32_t rtattr_len)
2768 target_to_host_for_each_rtattr(rtattr, rtattr_len,
2769 target_to_host_data_addr_rtattr);
2772 static void target_to_host_route_rtattr(struct rtattr *rtattr,
2773 uint32_t rtattr_len)
2775 target_to_host_for_each_rtattr(rtattr, rtattr_len,
2776 target_to_host_data_route_rtattr);
2779 static abi_long target_to_host_data_route(struct nlmsghdr *nlh)
2781 struct ifinfomsg *ifi;
2782 struct ifaddrmsg *ifa;
2785 switch (nlh->nlmsg_type) {
2790 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifi))) {
2791 ifi = NLMSG_DATA(nlh);
2792 ifi->ifi_type = tswap16(ifi->ifi_type);
2793 ifi->ifi_index = tswap32(ifi->ifi_index);
2794 ifi->ifi_flags = tswap32(ifi->ifi_flags);
2795 ifi->ifi_change = tswap32(ifi->ifi_change);
2796 target_to_host_link_rtattr(IFLA_RTA(ifi), nlh->nlmsg_len -
2797 NLMSG_LENGTH(sizeof(*ifi)));
2803 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifa))) {
2804 ifa = NLMSG_DATA(nlh);
2805 ifa->ifa_index = tswap32(ifa->ifa_index);
2806 target_to_host_addr_rtattr(IFA_RTA(ifa), nlh->nlmsg_len -
2807 NLMSG_LENGTH(sizeof(*ifa)));
2814 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*rtm))) {
2815 rtm = NLMSG_DATA(nlh);
2816 rtm->rtm_flags = tswap32(rtm->rtm_flags);
2817 target_to_host_route_rtattr(RTM_RTA(rtm), nlh->nlmsg_len -
2818 NLMSG_LENGTH(sizeof(*rtm)));
2822 return -TARGET_EOPNOTSUPP;
2827 static abi_long target_to_host_nlmsg_route(struct nlmsghdr *nlh, size_t len)
2829 return target_to_host_for_each_nlmsg(nlh, len, target_to_host_data_route);
2831 #endif /* CONFIG_RTNETLINK */
2833 static abi_long host_to_target_data_audit(struct nlmsghdr *nlh)
2835 switch (nlh->nlmsg_type) {
2837 gemu_log("Unknown host audit message type %d\n",
2839 return -TARGET_EINVAL;
2844 static inline abi_long host_to_target_nlmsg_audit(struct nlmsghdr *nlh,
2847 return host_to_target_for_each_nlmsg(nlh, len, host_to_target_data_audit);
2850 static abi_long target_to_host_data_audit(struct nlmsghdr *nlh)
2852 switch (nlh->nlmsg_type) {
2854 case AUDIT_FIRST_USER_MSG ... AUDIT_LAST_USER_MSG:
2855 case AUDIT_FIRST_USER_MSG2 ... AUDIT_LAST_USER_MSG2:
2858 gemu_log("Unknown target audit message type %d\n",
2860 return -TARGET_EINVAL;
2866 static abi_long target_to_host_nlmsg_audit(struct nlmsghdr *nlh, size_t len)
2868 return target_to_host_for_each_nlmsg(nlh, len, target_to_host_data_audit);
2871 /* do_setsockopt() Must return target values and target errnos. */
2872 static abi_long do_setsockopt(int sockfd, int level, int optname,
2873 abi_ulong optval_addr, socklen_t optlen)
2877 struct ip_mreqn *ip_mreq;
2878 struct ip_mreq_source *ip_mreq_source;
2882 /* TCP options all take an 'int' value. */
2883 if (optlen < sizeof(uint32_t))
2884 return -TARGET_EINVAL;
2886 if (get_user_u32(val, optval_addr))
2887 return -TARGET_EFAULT;
2888 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2895 case IP_ROUTER_ALERT:
2899 case IP_MTU_DISCOVER:
2906 case IP_MULTICAST_TTL:
2907 case IP_MULTICAST_LOOP:
2909 if (optlen >= sizeof(uint32_t)) {
2910 if (get_user_u32(val, optval_addr))
2911 return -TARGET_EFAULT;
2912 } else if (optlen >= 1) {
2913 if (get_user_u8(val, optval_addr))
2914 return -TARGET_EFAULT;
2916 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2918 case IP_ADD_MEMBERSHIP:
2919 case IP_DROP_MEMBERSHIP:
2920 if (optlen < sizeof (struct target_ip_mreq) ||
2921 optlen > sizeof (struct target_ip_mreqn))
2922 return -TARGET_EINVAL;
2924 ip_mreq = (struct ip_mreqn *) alloca(optlen);
2925 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
2926 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
2929 case IP_BLOCK_SOURCE:
2930 case IP_UNBLOCK_SOURCE:
2931 case IP_ADD_SOURCE_MEMBERSHIP:
2932 case IP_DROP_SOURCE_MEMBERSHIP:
2933 if (optlen != sizeof (struct target_ip_mreq_source))
2934 return -TARGET_EINVAL;
2936 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2937 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
2938 unlock_user (ip_mreq_source, optval_addr, 0);
2947 case IPV6_MTU_DISCOVER:
2950 case IPV6_RECVPKTINFO:
2951 case IPV6_UNICAST_HOPS:
2953 case IPV6_RECVHOPLIMIT:
2954 case IPV6_2292HOPLIMIT:
2957 if (optlen < sizeof(uint32_t)) {
2958 return -TARGET_EINVAL;
2960 if (get_user_u32(val, optval_addr)) {
2961 return -TARGET_EFAULT;
2963 ret = get_errno(setsockopt(sockfd, level, optname,
2964 &val, sizeof(val)));
2968 struct in6_pktinfo pki;
2970 if (optlen < sizeof(pki)) {
2971 return -TARGET_EINVAL;
2974 if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
2975 return -TARGET_EFAULT;
2978 pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
2980 ret = get_errno(setsockopt(sockfd, level, optname,
2981 &pki, sizeof(pki)));
2992 struct icmp6_filter icmp6f;
2994 if (optlen > sizeof(icmp6f)) {
2995 optlen = sizeof(icmp6f);
2998 if (copy_from_user(&icmp6f, optval_addr, optlen)) {
2999 return -TARGET_EFAULT;
3002 for (val = 0; val < 8; val++) {
3003 icmp6f.data[val] = tswap32(icmp6f.data[val]);
3006 ret = get_errno(setsockopt(sockfd, level, optname,
3018 /* those take an u32 value */
3019 if (optlen < sizeof(uint32_t)) {
3020 return -TARGET_EINVAL;
3023 if (get_user_u32(val, optval_addr)) {
3024 return -TARGET_EFAULT;
3026 ret = get_errno(setsockopt(sockfd, level, optname,
3027 &val, sizeof(val)));
3034 case TARGET_SOL_SOCKET:
3036 case TARGET_SO_RCVTIMEO:
3040 optname = SO_RCVTIMEO;
3043 if (optlen != sizeof(struct target_timeval)) {
3044 return -TARGET_EINVAL;
3047 if (copy_from_user_timeval(&tv, optval_addr)) {
3048 return -TARGET_EFAULT;
3051 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
3055 case TARGET_SO_SNDTIMEO:
3056 optname = SO_SNDTIMEO;
3058 case TARGET_SO_ATTACH_FILTER:
3060 struct target_sock_fprog *tfprog;
3061 struct target_sock_filter *tfilter;
3062 struct sock_fprog fprog;
3063 struct sock_filter *filter;
3066 if (optlen != sizeof(*tfprog)) {
3067 return -TARGET_EINVAL;
3069 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
3070 return -TARGET_EFAULT;
3072 if (!lock_user_struct(VERIFY_READ, tfilter,
3073 tswapal(tfprog->filter), 0)) {
3074 unlock_user_struct(tfprog, optval_addr, 1);
3075 return -TARGET_EFAULT;
3078 fprog.len = tswap16(tfprog->len);
3079 filter = g_try_new(struct sock_filter, fprog.len);
3080 if (filter == NULL) {
3081 unlock_user_struct(tfilter, tfprog->filter, 1);
3082 unlock_user_struct(tfprog, optval_addr, 1);
3083 return -TARGET_ENOMEM;
3085 for (i = 0; i < fprog.len; i++) {
3086 filter[i].code = tswap16(tfilter[i].code);
3087 filter[i].jt = tfilter[i].jt;
3088 filter[i].jf = tfilter[i].jf;
3089 filter[i].k = tswap32(tfilter[i].k);
3091 fprog.filter = filter;
3093 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
3094 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
3097 unlock_user_struct(tfilter, tfprog->filter, 1);
3098 unlock_user_struct(tfprog, optval_addr, 1);
3101 case TARGET_SO_BINDTODEVICE:
3103 char *dev_ifname, *addr_ifname;
3105 if (optlen > IFNAMSIZ - 1) {
3106 optlen = IFNAMSIZ - 1;
3108 dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
3110 return -TARGET_EFAULT;
3112 optname = SO_BINDTODEVICE;
3113 addr_ifname = alloca(IFNAMSIZ);
3114 memcpy(addr_ifname, dev_ifname, optlen);
3115 addr_ifname[optlen] = 0;
3116 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
3117 addr_ifname, optlen));
3118 unlock_user (dev_ifname, optval_addr, 0);
3121 /* Options with 'int' argument. */
3122 case TARGET_SO_DEBUG:
3125 case TARGET_SO_REUSEADDR:
3126 optname = SO_REUSEADDR;
3128 case TARGET_SO_TYPE:
3131 case TARGET_SO_ERROR:
3134 case TARGET_SO_DONTROUTE:
3135 optname = SO_DONTROUTE;
3137 case TARGET_SO_BROADCAST:
3138 optname = SO_BROADCAST;
3140 case TARGET_SO_SNDBUF:
3141 optname = SO_SNDBUF;
3143 case TARGET_SO_SNDBUFFORCE:
3144 optname = SO_SNDBUFFORCE;
3146 case TARGET_SO_RCVBUF:
3147 optname = SO_RCVBUF;
3149 case TARGET_SO_RCVBUFFORCE:
3150 optname = SO_RCVBUFFORCE;
3152 case TARGET_SO_KEEPALIVE:
3153 optname = SO_KEEPALIVE;
3155 case TARGET_SO_OOBINLINE:
3156 optname = SO_OOBINLINE;
3158 case TARGET_SO_NO_CHECK:
3159 optname = SO_NO_CHECK;
3161 case TARGET_SO_PRIORITY:
3162 optname = SO_PRIORITY;
3165 case TARGET_SO_BSDCOMPAT:
3166 optname = SO_BSDCOMPAT;
3169 case TARGET_SO_PASSCRED:
3170 optname = SO_PASSCRED;
3172 case TARGET_SO_PASSSEC:
3173 optname = SO_PASSSEC;
3175 case TARGET_SO_TIMESTAMP:
3176 optname = SO_TIMESTAMP;
3178 case TARGET_SO_RCVLOWAT:
3179 optname = SO_RCVLOWAT;
3184 if (optlen < sizeof(uint32_t))
3185 return -TARGET_EINVAL;
3187 if (get_user_u32(val, optval_addr))
3188 return -TARGET_EFAULT;
3189 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
3193 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname);
3194 ret = -TARGET_ENOPROTOOPT;
3199 /* do_getsockopt() Must return target values and target errnos. */
3200 static abi_long do_getsockopt(int sockfd, int level, int optname,
3201 abi_ulong optval_addr, abi_ulong optlen)
3208 case TARGET_SOL_SOCKET:
3211 /* These don't just return a single integer */
3212 case TARGET_SO_LINGER:
3213 case TARGET_SO_RCVTIMEO:
3214 case TARGET_SO_SNDTIMEO:
3215 case TARGET_SO_PEERNAME:
3217 case TARGET_SO_PEERCRED: {
3220 struct target_ucred *tcr;
3222 if (get_user_u32(len, optlen)) {
3223 return -TARGET_EFAULT;
3226 return -TARGET_EINVAL;
3230 ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
3238 if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
3239 return -TARGET_EFAULT;
3241 __put_user(cr.pid, &tcr->pid);
3242 __put_user(cr.uid, &tcr->uid);
3243 __put_user(cr.gid, &tcr->gid);
3244 unlock_user_struct(tcr, optval_addr, 1);
3245 if (put_user_u32(len, optlen)) {
3246 return -TARGET_EFAULT;
3250 /* Options with 'int' argument. */
3251 case TARGET_SO_DEBUG:
3254 case TARGET_SO_REUSEADDR:
3255 optname = SO_REUSEADDR;
3257 case TARGET_SO_TYPE:
3260 case TARGET_SO_ERROR:
3263 case TARGET_SO_DONTROUTE:
3264 optname = SO_DONTROUTE;
3266 case TARGET_SO_BROADCAST:
3267 optname = SO_BROADCAST;
3269 case TARGET_SO_SNDBUF:
3270 optname = SO_SNDBUF;
3272 case TARGET_SO_RCVBUF:
3273 optname = SO_RCVBUF;
3275 case TARGET_SO_KEEPALIVE:
3276 optname = SO_KEEPALIVE;
3278 case TARGET_SO_OOBINLINE:
3279 optname = SO_OOBINLINE;
3281 case TARGET_SO_NO_CHECK:
3282 optname = SO_NO_CHECK;
3284 case TARGET_SO_PRIORITY:
3285 optname = SO_PRIORITY;
3288 case TARGET_SO_BSDCOMPAT:
3289 optname = SO_BSDCOMPAT;
3292 case TARGET_SO_PASSCRED:
3293 optname = SO_PASSCRED;
3295 case TARGET_SO_TIMESTAMP:
3296 optname = SO_TIMESTAMP;
3298 case TARGET_SO_RCVLOWAT:
3299 optname = SO_RCVLOWAT;
3301 case TARGET_SO_ACCEPTCONN:
3302 optname = SO_ACCEPTCONN;
3309 /* TCP options all take an 'int' value. */
3311 if (get_user_u32(len, optlen))
3312 return -TARGET_EFAULT;
3314 return -TARGET_EINVAL;
3316 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
3319 if (optname == SO_TYPE) {
3320 val = host_to_target_sock_type(val);
3325 if (put_user_u32(val, optval_addr))
3326 return -TARGET_EFAULT;
3328 if (put_user_u8(val, optval_addr))
3329 return -TARGET_EFAULT;
3331 if (put_user_u32(len, optlen))
3332 return -TARGET_EFAULT;
3339 case IP_ROUTER_ALERT:
3343 case IP_MTU_DISCOVER:
3349 case IP_MULTICAST_TTL:
3350 case IP_MULTICAST_LOOP:
3351 if (get_user_u32(len, optlen))
3352 return -TARGET_EFAULT;
3354 return -TARGET_EINVAL;
3356 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
3359 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
3361 if (put_user_u32(len, optlen)
3362 || put_user_u8(val, optval_addr))
3363 return -TARGET_EFAULT;
3365 if (len > sizeof(int))
3367 if (put_user_u32(len, optlen)
3368 || put_user_u32(val, optval_addr))
3369 return -TARGET_EFAULT;
3373 ret = -TARGET_ENOPROTOOPT;
3379 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
3381 ret = -TARGET_EOPNOTSUPP;
3387 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
3388 abi_ulong count, int copy)
3390 struct target_iovec *target_vec;
3392 abi_ulong total_len, max_len;
3395 bool bad_address = false;
3401 if (count > IOV_MAX) {
3406 vec = g_try_new0(struct iovec, count);
3412 target_vec = lock_user(VERIFY_READ, target_addr,
3413 count * sizeof(struct target_iovec), 1);
3414 if (target_vec == NULL) {
3419 /* ??? If host page size > target page size, this will result in a
3420 value larger than what we can actually support. */
3421 max_len = 0x7fffffff & TARGET_PAGE_MASK;
3424 for (i = 0; i < count; i++) {
3425 abi_ulong base = tswapal(target_vec[i].iov_base);
3426 abi_long len = tswapal(target_vec[i].iov_len);
3431 } else if (len == 0) {
3432 /* Zero length pointer is ignored. */
3433 vec[i].iov_base = 0;
3435 vec[i].iov_base = lock_user(type, base, len, copy);
3436 /* If the first buffer pointer is bad, this is a fault. But
3437 * subsequent bad buffers will result in a partial write; this
3438 * is realized by filling the vector with null pointers and
3440 if (!vec[i].iov_base) {
3451 if (len > max_len - total_len) {
3452 len = max_len - total_len;
3455 vec[i].iov_len = len;
3459 unlock_user(target_vec, target_addr, 0);
3464 if (tswapal(target_vec[i].iov_len) > 0) {
3465 unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
3468 unlock_user(target_vec, target_addr, 0);
3475 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
3476 abi_ulong count, int copy)
3478 struct target_iovec *target_vec;
3481 target_vec = lock_user(VERIFY_READ, target_addr,
3482 count * sizeof(struct target_iovec), 1);
3484 for (i = 0; i < count; i++) {
3485 abi_ulong base = tswapal(target_vec[i].iov_base);
3486 abi_long len = tswapal(target_vec[i].iov_len);
3490 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
3492 unlock_user(target_vec, target_addr, 0);
3498 static inline int target_to_host_sock_type(int *type)
3501 int target_type = *type;
3503 switch (target_type & TARGET_SOCK_TYPE_MASK) {
3504 case TARGET_SOCK_DGRAM:
3505 host_type = SOCK_DGRAM;
3507 case TARGET_SOCK_STREAM:
3508 host_type = SOCK_STREAM;
3511 host_type = target_type & TARGET_SOCK_TYPE_MASK;
3514 if (target_type & TARGET_SOCK_CLOEXEC) {
3515 #if defined(SOCK_CLOEXEC)
3516 host_type |= SOCK_CLOEXEC;
3518 return -TARGET_EINVAL;
3521 if (target_type & TARGET_SOCK_NONBLOCK) {
3522 #if defined(SOCK_NONBLOCK)
3523 host_type |= SOCK_NONBLOCK;
3524 #elif !defined(O_NONBLOCK)
3525 return -TARGET_EINVAL;
3532 /* Try to emulate socket type flags after socket creation. */
3533 static int sock_flags_fixup(int fd, int target_type)
3535 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3536 if (target_type & TARGET_SOCK_NONBLOCK) {
3537 int flags = fcntl(fd, F_GETFL);
3538 if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
3540 return -TARGET_EINVAL;
3547 static abi_long packet_target_to_host_sockaddr(void *host_addr,
3548 abi_ulong target_addr,
3551 struct sockaddr *addr = host_addr;
3552 struct target_sockaddr *target_saddr;
3554 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
3555 if (!target_saddr) {
3556 return -TARGET_EFAULT;
3559 memcpy(addr, target_saddr, len);
3560 addr->sa_family = tswap16(target_saddr->sa_family);
3561 /* spkt_protocol is big-endian */
3563 unlock_user(target_saddr, target_addr, 0);
3567 static TargetFdTrans target_packet_trans = {
3568 .target_to_host_addr = packet_target_to_host_sockaddr,
3571 #ifdef CONFIG_RTNETLINK
3572 static abi_long netlink_route_target_to_host(void *buf, size_t len)
3576 ret = target_to_host_nlmsg_route(buf, len);
3584 static abi_long netlink_route_host_to_target(void *buf, size_t len)
3588 ret = host_to_target_nlmsg_route(buf, len);
3596 static TargetFdTrans target_netlink_route_trans = {
3597 .target_to_host_data = netlink_route_target_to_host,
3598 .host_to_target_data = netlink_route_host_to_target,
3600 #endif /* CONFIG_RTNETLINK */
3602 static abi_long netlink_audit_target_to_host(void *buf, size_t len)
3606 ret = target_to_host_nlmsg_audit(buf, len);
3614 static abi_long netlink_audit_host_to_target(void *buf, size_t len)
3618 ret = host_to_target_nlmsg_audit(buf, len);
3626 static TargetFdTrans target_netlink_audit_trans = {
3627 .target_to_host_data = netlink_audit_target_to_host,
3628 .host_to_target_data = netlink_audit_host_to_target,
3631 /* do_socket() Must return target values and target errnos. */
3632 static abi_long do_socket(int domain, int type, int protocol)
3634 int target_type = type;
3637 ret = target_to_host_sock_type(&type);
3642 if (domain == PF_NETLINK && !(
3643 #ifdef CONFIG_RTNETLINK
3644 protocol == NETLINK_ROUTE ||
3646 protocol == NETLINK_KOBJECT_UEVENT ||
3647 protocol == NETLINK_AUDIT)) {
3648 return -EPFNOSUPPORT;
3651 if (domain == AF_PACKET ||
3652 (domain == AF_INET && type == SOCK_PACKET)) {
3653 protocol = tswap16(protocol);
3656 ret = get_errno(socket(domain, type, protocol));
3658 ret = sock_flags_fixup(ret, target_type);
3659 if (type == SOCK_PACKET) {
3660 /* Manage an obsolete case :
3661 * if socket type is SOCK_PACKET, bind by name
3663 fd_trans_register(ret, &target_packet_trans);
3664 } else if (domain == PF_NETLINK) {
3666 #ifdef CONFIG_RTNETLINK
3668 fd_trans_register(ret, &target_netlink_route_trans);
3671 case NETLINK_KOBJECT_UEVENT:
3672 /* nothing to do: messages are strings */
3675 fd_trans_register(ret, &target_netlink_audit_trans);
3678 g_assert_not_reached();
3685 /* do_bind() Must return target values and target errnos. */
3686 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3692 if ((int)addrlen < 0) {
3693 return -TARGET_EINVAL;
3696 addr = alloca(addrlen+1);
3698 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3702 return get_errno(bind(sockfd, addr, addrlen));
3705 /* do_connect() Must return target values and target errnos. */
3706 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3712 if ((int)addrlen < 0) {
3713 return -TARGET_EINVAL;
3716 addr = alloca(addrlen+1);
3718 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3722 return get_errno(safe_connect(sockfd, addr, addrlen));
3725 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3726 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3727 int flags, int send)
3733 abi_ulong target_vec;
3735 if (msgp->msg_name) {
3736 msg.msg_namelen = tswap32(msgp->msg_namelen);
3737 msg.msg_name = alloca(msg.msg_namelen+1);
3738 ret = target_to_host_sockaddr(fd, msg.msg_name,
3739 tswapal(msgp->msg_name),
3741 if (ret == -TARGET_EFAULT) {
3742 /* For connected sockets msg_name and msg_namelen must
3743 * be ignored, so returning EFAULT immediately is wrong.
3744 * Instead, pass a bad msg_name to the host kernel, and
3745 * let it decide whether to return EFAULT or not.
3747 msg.msg_name = (void *)-1;
3752 msg.msg_name = NULL;
3753 msg.msg_namelen = 0;
3755 msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3756 msg.msg_control = alloca(msg.msg_controllen);
3757 msg.msg_flags = tswap32(msgp->msg_flags);
3759 count = tswapal(msgp->msg_iovlen);
3760 target_vec = tswapal(msgp->msg_iov);
3762 if (count > IOV_MAX) {
3763 /* sendrcvmsg returns a different errno for this condition than
3764 * readv/writev, so we must catch it here before lock_iovec() does.
3766 ret = -TARGET_EMSGSIZE;
3770 vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3771 target_vec, count, send);
3773 ret = -host_to_target_errno(errno);
3776 msg.msg_iovlen = count;
3780 if (fd_trans_target_to_host_data(fd)) {
3783 host_msg = g_malloc(msg.msg_iov->iov_len);
3784 memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3785 ret = fd_trans_target_to_host_data(fd)(host_msg,
3786 msg.msg_iov->iov_len);
3788 msg.msg_iov->iov_base = host_msg;
3789 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3793 ret = target_to_host_cmsg(&msg, msgp);
3795 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3799 ret = get_errno(safe_recvmsg(fd, &msg, flags));
3800 if (!is_error(ret)) {
3802 if (fd_trans_host_to_target_data(fd)) {
3803 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3806 ret = host_to_target_cmsg(msgp, &msg);
3808 if (!is_error(ret)) {
3809 msgp->msg_namelen = tswap32(msg.msg_namelen);
3810 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3811 ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3812 msg.msg_name, msg.msg_namelen);
3824 unlock_iovec(vec, target_vec, count, !send);
3829 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3830 int flags, int send)
3833 struct target_msghdr *msgp;
3835 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3839 return -TARGET_EFAULT;
3841 ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3842 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3846 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3847 * so it might not have this *mmsg-specific flag either.
3849 #ifndef MSG_WAITFORONE
3850 #define MSG_WAITFORONE 0x10000
3853 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3854 unsigned int vlen, unsigned int flags,
3857 struct target_mmsghdr *mmsgp;
3861 if (vlen > UIO_MAXIOV) {
3865 mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3867 return -TARGET_EFAULT;
3870 for (i = 0; i < vlen; i++) {
3871 ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3872 if (is_error(ret)) {
3875 mmsgp[i].msg_len = tswap32(ret);
3876 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3877 if (flags & MSG_WAITFORONE) {
3878 flags |= MSG_DONTWAIT;
3882 unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3884 /* Return number of datagrams sent if we sent any at all;
3885 * otherwise return the error.
3893 /* do_accept4() Must return target values and target errnos. */
3894 static abi_long do_accept4(int fd, abi_ulong target_addr,
3895 abi_ulong target_addrlen_addr, int flags)
3902 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
3904 if (target_addr == 0) {
3905 return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3908 /* linux returns EINVAL if addrlen pointer is invalid */
3909 if (get_user_u32(addrlen, target_addrlen_addr))
3910 return -TARGET_EINVAL;
3912 if ((int)addrlen < 0) {
3913 return -TARGET_EINVAL;
3916 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3917 return -TARGET_EINVAL;
3919 addr = alloca(addrlen);
3921 ret = get_errno(safe_accept4(fd, addr, &addrlen, host_flags));
3922 if (!is_error(ret)) {
3923 host_to_target_sockaddr(target_addr, addr, addrlen);
3924 if (put_user_u32(addrlen, target_addrlen_addr))
3925 ret = -TARGET_EFAULT;
3930 /* do_getpeername() Must return target values and target errnos. */
3931 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3932 abi_ulong target_addrlen_addr)
3938 if (get_user_u32(addrlen, target_addrlen_addr))
3939 return -TARGET_EFAULT;
3941 if ((int)addrlen < 0) {
3942 return -TARGET_EINVAL;
3945 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3946 return -TARGET_EFAULT;
3948 addr = alloca(addrlen);
3950 ret = get_errno(getpeername(fd, addr, &addrlen));
3951 if (!is_error(ret)) {
3952 host_to_target_sockaddr(target_addr, addr, addrlen);
3953 if (put_user_u32(addrlen, target_addrlen_addr))
3954 ret = -TARGET_EFAULT;
3959 /* do_getsockname() Must return target values and target errnos. */
3960 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3961 abi_ulong target_addrlen_addr)
3967 if (get_user_u32(addrlen, target_addrlen_addr))
3968 return -TARGET_EFAULT;
3970 if ((int)addrlen < 0) {
3971 return -TARGET_EINVAL;
3974 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3975 return -TARGET_EFAULT;
3977 addr = alloca(addrlen);
3979 ret = get_errno(getsockname(fd, addr, &addrlen));
3980 if (!is_error(ret)) {
3981 host_to_target_sockaddr(target_addr, addr, addrlen);
3982 if (put_user_u32(addrlen, target_addrlen_addr))
3983 ret = -TARGET_EFAULT;
3988 /* do_socketpair() Must return target values and target errnos. */
3989 static abi_long do_socketpair(int domain, int type, int protocol,
3990 abi_ulong target_tab_addr)
3995 target_to_host_sock_type(&type);
3997 ret = get_errno(socketpair(domain, type, protocol, tab));
3998 if (!is_error(ret)) {
3999 if (put_user_s32(tab[0], target_tab_addr)
4000 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
4001 ret = -TARGET_EFAULT;
4006 /* do_sendto() Must return target values and target errnos. */
4007 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
4008 abi_ulong target_addr, socklen_t addrlen)
4012 void *copy_msg = NULL;
4015 if ((int)addrlen < 0) {
4016 return -TARGET_EINVAL;
4019 host_msg = lock_user(VERIFY_READ, msg, len, 1);
4021 return -TARGET_EFAULT;
4022 if (fd_trans_target_to_host_data(fd)) {
4023 copy_msg = host_msg;
4024 host_msg = g_malloc(len);
4025 memcpy(host_msg, copy_msg, len);
4026 ret = fd_trans_target_to_host_data(fd)(host_msg, len);
4032 addr = alloca(addrlen+1);
4033 ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
4037 ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
4039 ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
4044 host_msg = copy_msg;
4046 unlock_user(host_msg, msg, 0);
4050 /* do_recvfrom() Must return target values and target errnos. */
4051 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
4052 abi_ulong target_addr,
4053 abi_ulong target_addrlen)
4060 host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
4062 return -TARGET_EFAULT;
4064 if (get_user_u32(addrlen, target_addrlen)) {
4065 ret = -TARGET_EFAULT;
4068 if ((int)addrlen < 0) {
4069 ret = -TARGET_EINVAL;
4072 addr = alloca(addrlen);
4073 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
4076 addr = NULL; /* To keep compiler quiet. */
4077 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
4079 if (!is_error(ret)) {
4080 if (fd_trans_host_to_target_data(fd)) {
4081 ret = fd_trans_host_to_target_data(fd)(host_msg, ret);
4084 host_to_target_sockaddr(target_addr, addr, addrlen);
4085 if (put_user_u32(addrlen, target_addrlen)) {
4086 ret = -TARGET_EFAULT;
4090 unlock_user(host_msg, msg, len);
4093 unlock_user(host_msg, msg, 0);
4098 #ifdef TARGET_NR_socketcall
4099 /* do_socketcall() must return target values and target errnos. */
4100 static abi_long do_socketcall(int num, abi_ulong vptr)
4102 static const unsigned nargs[] = { /* number of arguments per operation */
4103 [TARGET_SYS_SOCKET] = 3, /* domain, type, protocol */
4104 [TARGET_SYS_BIND] = 3, /* fd, addr, addrlen */
4105 [TARGET_SYS_CONNECT] = 3, /* fd, addr, addrlen */
4106 [TARGET_SYS_LISTEN] = 2, /* fd, backlog */
4107 [TARGET_SYS_ACCEPT] = 3, /* fd, addr, addrlen */
4108 [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
4109 [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
4110 [TARGET_SYS_SOCKETPAIR] = 4, /* domain, type, protocol, tab */
4111 [TARGET_SYS_SEND] = 4, /* fd, msg, len, flags */
4112 [TARGET_SYS_RECV] = 4, /* fd, msg, len, flags */
4113 [TARGET_SYS_SENDTO] = 6, /* fd, msg, len, flags, addr, addrlen */
4114 [TARGET_SYS_RECVFROM] = 6, /* fd, msg, len, flags, addr, addrlen */
4115 [TARGET_SYS_SHUTDOWN] = 2, /* fd, how */
4116 [TARGET_SYS_SETSOCKOPT] = 5, /* fd, level, optname, optval, optlen */
4117 [TARGET_SYS_GETSOCKOPT] = 5, /* fd, level, optname, optval, optlen */
4118 [TARGET_SYS_SENDMSG] = 3, /* fd, msg, flags */
4119 [TARGET_SYS_RECVMSG] = 3, /* fd, msg, flags */
4120 [TARGET_SYS_ACCEPT4] = 4, /* fd, addr, addrlen, flags */
4121 [TARGET_SYS_RECVMMSG] = 4, /* fd, msgvec, vlen, flags */
4122 [TARGET_SYS_SENDMMSG] = 4, /* fd, msgvec, vlen, flags */
4124 abi_long a[6]; /* max 6 args */
4127 /* check the range of the first argument num */
4128 /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
4129 if (num < 1 || num > TARGET_SYS_SENDMMSG) {
4130 return -TARGET_EINVAL;
4132 /* ensure we have space for args */
4133 if (nargs[num] > ARRAY_SIZE(a)) {
4134 return -TARGET_EINVAL;
4136 /* collect the arguments in a[] according to nargs[] */
4137 for (i = 0; i < nargs[num]; ++i) {
4138 if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
4139 return -TARGET_EFAULT;
4142 /* now when we have the args, invoke the appropriate underlying function */
4144 case TARGET_SYS_SOCKET: /* domain, type, protocol */
4145 return do_socket(a[0], a[1], a[2]);
4146 case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
4147 return do_bind(a[0], a[1], a[2]);
4148 case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
4149 return do_connect(a[0], a[1], a[2]);
4150 case TARGET_SYS_LISTEN: /* sockfd, backlog */
4151 return get_errno(listen(a[0], a[1]));
4152 case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
4153 return do_accept4(a[0], a[1], a[2], 0);
4154 case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
4155 return do_getsockname(a[0], a[1], a[2]);
4156 case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
4157 return do_getpeername(a[0], a[1], a[2]);
4158 case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
4159 return do_socketpair(a[0], a[1], a[2], a[3]);
4160 case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
4161 return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
4162 case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
4163 return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
4164 case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
4165 return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
4166 case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
4167 return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
4168 case TARGET_SYS_SHUTDOWN: /* sockfd, how */
4169 return get_errno(shutdown(a[0], a[1]));
4170 case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
4171 return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
4172 case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
4173 return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
4174 case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
4175 return do_sendrecvmsg(a[0], a[1], a[2], 1);
4176 case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
4177 return do_sendrecvmsg(a[0], a[1], a[2], 0);
4178 case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
4179 return do_accept4(a[0], a[1], a[2], a[3]);
4180 case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
4181 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
4182 case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
4183 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
4185 gemu_log("Unsupported socketcall: %d\n", num);
4186 return -TARGET_EINVAL;
4191 #define N_SHM_REGIONS 32
4193 static struct shm_region {
4197 } shm_regions[N_SHM_REGIONS];
4199 #ifndef TARGET_SEMID64_DS
4200 /* asm-generic version of this struct */
4201 struct target_semid64_ds
4203 struct target_ipc_perm sem_perm;
4204 abi_ulong sem_otime;
4205 #if TARGET_ABI_BITS == 32
4206 abi_ulong __unused1;
4208 abi_ulong sem_ctime;
4209 #if TARGET_ABI_BITS == 32
4210 abi_ulong __unused2;
4212 abi_ulong sem_nsems;
4213 abi_ulong __unused3;
4214 abi_ulong __unused4;
4218 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
4219 abi_ulong target_addr)
4221 struct target_ipc_perm *target_ip;
4222 struct target_semid64_ds *target_sd;
4224 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4225 return -TARGET_EFAULT;
4226 target_ip = &(target_sd->sem_perm);
4227 host_ip->__key = tswap32(target_ip->__key);
4228 host_ip->uid = tswap32(target_ip->uid);
4229 host_ip->gid = tswap32(target_ip->gid);
4230 host_ip->cuid = tswap32(target_ip->cuid);
4231 host_ip->cgid = tswap32(target_ip->cgid);
4232 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
4233 host_ip->mode = tswap32(target_ip->mode);
4235 host_ip->mode = tswap16(target_ip->mode);
4237 #if defined(TARGET_PPC)
4238 host_ip->__seq = tswap32(target_ip->__seq);
4240 host_ip->__seq = tswap16(target_ip->__seq);
4242 unlock_user_struct(target_sd, target_addr, 0);
4246 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
4247 struct ipc_perm *host_ip)
4249 struct target_ipc_perm *target_ip;
4250 struct target_semid64_ds *target_sd;
4252 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4253 return -TARGET_EFAULT;
4254 target_ip = &(target_sd->sem_perm);
4255 target_ip->__key = tswap32(host_ip->__key);
4256 target_ip->uid = tswap32(host_ip->uid);
4257 target_ip->gid = tswap32(host_ip->gid);
4258 target_ip->cuid = tswap32(host_ip->cuid);
4259 target_ip->cgid = tswap32(host_ip->cgid);
4260 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
4261 target_ip->mode = tswap32(host_ip->mode);
4263 target_ip->mode = tswap16(host_ip->mode);
4265 #if defined(TARGET_PPC)
4266 target_ip->__seq = tswap32(host_ip->__seq);
4268 target_ip->__seq = tswap16(host_ip->__seq);
4270 unlock_user_struct(target_sd, target_addr, 1);
4274 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
4275 abi_ulong target_addr)
4277 struct target_semid64_ds *target_sd;
4279 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4280 return -TARGET_EFAULT;
4281 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
4282 return -TARGET_EFAULT;
4283 host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
4284 host_sd->sem_otime = tswapal(target_sd->sem_otime);
4285 host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
4286 unlock_user_struct(target_sd, target_addr, 0);
4290 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
4291 struct semid_ds *host_sd)
4293 struct target_semid64_ds *target_sd;
4295 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4296 return -TARGET_EFAULT;
4297 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
4298 return -TARGET_EFAULT;
4299 target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
4300 target_sd->sem_otime = tswapal(host_sd->sem_otime);
4301 target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
4302 unlock_user_struct(target_sd, target_addr, 1);
4306 struct target_seminfo {
4319 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
4320 struct seminfo *host_seminfo)
4322 struct target_seminfo *target_seminfo;
4323 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
4324 return -TARGET_EFAULT;
4325 __put_user(host_seminfo->semmap, &target_seminfo->semmap);
4326 __put_user(host_seminfo->semmni, &target_seminfo->semmni);
4327 __put_user(host_seminfo->semmns, &target_seminfo->semmns);
4328 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
4329 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
4330 __put_user(host_seminfo->semopm, &target_seminfo->semopm);
4331 __put_user(host_seminfo->semume, &target_seminfo->semume);
4332 __put_user(host_seminfo->semusz, &target_seminfo->semusz);
4333 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
4334 __put_user(host_seminfo->semaem, &target_seminfo->semaem);
4335 unlock_user_struct(target_seminfo, target_addr, 1);
4341 struct semid_ds *buf;
4342 unsigned short *array;
4343 struct seminfo *__buf;
4346 union target_semun {
4353 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
4354 abi_ulong target_addr)
4357 unsigned short *array;
4359 struct semid_ds semid_ds;
4362 semun.buf = &semid_ds;
4364 ret = semctl(semid, 0, IPC_STAT, semun);
4366 return get_errno(ret);
4368 nsems = semid_ds.sem_nsems;
4370 *host_array = g_try_new(unsigned short, nsems);
4372 return -TARGET_ENOMEM;
4374 array = lock_user(VERIFY_READ, target_addr,
4375 nsems*sizeof(unsigned short), 1);
4377 g_free(*host_array);
4378 return -TARGET_EFAULT;
4381 for(i=0; i<nsems; i++) {
4382 __get_user((*host_array)[i], &array[i]);
4384 unlock_user(array, target_addr, 0);
4389 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
4390 unsigned short **host_array)
4393 unsigned short *array;
4395 struct semid_ds semid_ds;
4398 semun.buf = &semid_ds;
4400 ret = semctl(semid, 0, IPC_STAT, semun);
4402 return get_errno(ret);
4404 nsems = semid_ds.sem_nsems;
4406 array = lock_user(VERIFY_WRITE, target_addr,
4407 nsems*sizeof(unsigned short), 0);
4409 return -TARGET_EFAULT;
4411 for(i=0; i<nsems; i++) {
4412 __put_user((*host_array)[i], &array[i]);
4414 g_free(*host_array);
4415 unlock_user(array, target_addr, 1);
4420 static inline abi_long do_semctl(int semid, int semnum, int cmd,
4421 abi_ulong target_arg)
4423 union target_semun target_su = { .buf = target_arg };
4425 struct semid_ds dsarg;
4426 unsigned short *array = NULL;
4427 struct seminfo seminfo;
4428 abi_long ret = -TARGET_EINVAL;
4435 /* In 64 bit cross-endian situations, we will erroneously pick up
4436 * the wrong half of the union for the "val" element. To rectify
4437 * this, the entire 8-byte structure is byteswapped, followed by
4438 * a swap of the 4 byte val field. In other cases, the data is
4439 * already in proper host byte order. */
4440 if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
4441 target_su.buf = tswapal(target_su.buf);
4442 arg.val = tswap32(target_su.val);
4444 arg.val = target_su.val;
4446 ret = get_errno(semctl(semid, semnum, cmd, arg));
4450 err = target_to_host_semarray(semid, &array, target_su.array);
4454 ret = get_errno(semctl(semid, semnum, cmd, arg));
4455 err = host_to_target_semarray(semid, target_su.array, &array);
4462 err = target_to_host_semid_ds(&dsarg, target_su.buf);
4466 ret = get_errno(semctl(semid, semnum, cmd, arg));
4467 err = host_to_target_semid_ds(target_su.buf, &dsarg);
4473 arg.__buf = &seminfo;
4474 ret = get_errno(semctl(semid, semnum, cmd, arg));
4475 err = host_to_target_seminfo(target_su.__buf, &seminfo);
4483 ret = get_errno(semctl(semid, semnum, cmd, NULL));
4490 struct target_sembuf {
4491 unsigned short sem_num;
4496 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
4497 abi_ulong target_addr,
4500 struct target_sembuf *target_sembuf;
4503 target_sembuf = lock_user(VERIFY_READ, target_addr,
4504 nsops*sizeof(struct target_sembuf), 1);
4506 return -TARGET_EFAULT;
4508 for(i=0; i<nsops; i++) {
4509 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
4510 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
4511 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
4514 unlock_user(target_sembuf, target_addr, 0);
4519 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
4521 struct sembuf sops[nsops];
4523 if (target_to_host_sembuf(sops, ptr, nsops))
4524 return -TARGET_EFAULT;
4526 return get_errno(safe_semtimedop(semid, sops, nsops, NULL));
4529 struct target_msqid_ds
4531 struct target_ipc_perm msg_perm;
4532 abi_ulong msg_stime;
4533 #if TARGET_ABI_BITS == 32
4534 abi_ulong __unused1;
4536 abi_ulong msg_rtime;
4537 #if TARGET_ABI_BITS == 32
4538 abi_ulong __unused2;
4540 abi_ulong msg_ctime;
4541 #if TARGET_ABI_BITS == 32
4542 abi_ulong __unused3;
4544 abi_ulong __msg_cbytes;
4546 abi_ulong msg_qbytes;
4547 abi_ulong msg_lspid;
4548 abi_ulong msg_lrpid;
4549 abi_ulong __unused4;
4550 abi_ulong __unused5;
4553 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
4554 abi_ulong target_addr)
4556 struct target_msqid_ds *target_md;
4558 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
4559 return -TARGET_EFAULT;
4560 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
4561 return -TARGET_EFAULT;
4562 host_md->msg_stime = tswapal(target_md->msg_stime);
4563 host_md->msg_rtime = tswapal(target_md->msg_rtime);
4564 host_md->msg_ctime = tswapal(target_md->msg_ctime);
4565 host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
4566 host_md->msg_qnum = tswapal(target_md->msg_qnum);
4567 host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
4568 host_md->msg_lspid = tswapal(target_md->msg_lspid);
4569 host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
4570 unlock_user_struct(target_md, target_addr, 0);
4574 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
4575 struct msqid_ds *host_md)
4577 struct target_msqid_ds *target_md;
4579 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
4580 return -TARGET_EFAULT;
4581 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
4582 return -TARGET_EFAULT;
4583 target_md->msg_stime = tswapal(host_md->msg_stime);
4584 target_md->msg_rtime = tswapal(host_md->msg_rtime);
4585 target_md->msg_ctime = tswapal(host_md->msg_ctime);
4586 target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
4587 target_md->msg_qnum = tswapal(host_md->msg_qnum);
4588 target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
4589 target_md->msg_lspid = tswapal(host_md->msg_lspid);
4590 target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
4591 unlock_user_struct(target_md, target_addr, 1);
4595 struct target_msginfo {
4603 unsigned short int msgseg;
4606 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
4607 struct msginfo *host_msginfo)
4609 struct target_msginfo *target_msginfo;
4610 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
4611 return -TARGET_EFAULT;
4612 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
4613 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
4614 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
4615 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
4616 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
4617 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
4618 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4619 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4620 unlock_user_struct(target_msginfo, target_addr, 1);
4624 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4626 struct msqid_ds dsarg;
4627 struct msginfo msginfo;
4628 abi_long ret = -TARGET_EINVAL;
4636 if (target_to_host_msqid_ds(&dsarg,ptr))
4637 return -TARGET_EFAULT;
4638 ret = get_errno(msgctl(msgid, cmd, &dsarg));
4639 if (host_to_target_msqid_ds(ptr,&dsarg))
4640 return -TARGET_EFAULT;
4643 ret = get_errno(msgctl(msgid, cmd, NULL));
4647 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4648 if (host_to_target_msginfo(ptr, &msginfo))
4649 return -TARGET_EFAULT;
4656 struct target_msgbuf {
4661 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4662 ssize_t msgsz, int msgflg)
4664 struct target_msgbuf *target_mb;
4665 struct msgbuf *host_mb;
4669 return -TARGET_EINVAL;
4672 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4673 return -TARGET_EFAULT;
4674 host_mb = g_try_malloc(msgsz + sizeof(long));
4676 unlock_user_struct(target_mb, msgp, 0);
4677 return -TARGET_ENOMEM;
4679 host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4680 memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4681 ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4683 unlock_user_struct(target_mb, msgp, 0);
4688 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4689 ssize_t msgsz, abi_long msgtyp,
4692 struct target_msgbuf *target_mb;
4694 struct msgbuf *host_mb;
4698 return -TARGET_EINVAL;
4701 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4702 return -TARGET_EFAULT;
4704 host_mb = g_try_malloc(msgsz + sizeof(long));
4706 ret = -TARGET_ENOMEM;
4709 ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4712 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4713 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4714 if (!target_mtext) {
4715 ret = -TARGET_EFAULT;
4718 memcpy(target_mb->mtext, host_mb->mtext, ret);
4719 unlock_user(target_mtext, target_mtext_addr, ret);
4722 target_mb->mtype = tswapal(host_mb->mtype);
4726 unlock_user_struct(target_mb, msgp, 1);
4731 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4732 abi_ulong target_addr)
4734 struct target_shmid_ds *target_sd;
4736 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4737 return -TARGET_EFAULT;
4738 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4739 return -TARGET_EFAULT;
4740 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4741 __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4742 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4743 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4744 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4745 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4746 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4747 unlock_user_struct(target_sd, target_addr, 0);
4751 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4752 struct shmid_ds *host_sd)
4754 struct target_shmid_ds *target_sd;
4756 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4757 return -TARGET_EFAULT;
4758 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4759 return -TARGET_EFAULT;
4760 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4761 __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4762 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4763 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4764 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4765 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4766 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4767 unlock_user_struct(target_sd, target_addr, 1);
4771 struct target_shminfo {
4779 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4780 struct shminfo *host_shminfo)
4782 struct target_shminfo *target_shminfo;
4783 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4784 return -TARGET_EFAULT;
4785 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4786 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4787 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4788 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4789 __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4790 unlock_user_struct(target_shminfo, target_addr, 1);
4794 struct target_shm_info {
4799 abi_ulong swap_attempts;
4800 abi_ulong swap_successes;
4803 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4804 struct shm_info *host_shm_info)
4806 struct target_shm_info *target_shm_info;
4807 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4808 return -TARGET_EFAULT;
4809 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4810 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4811 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4812 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4813 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4814 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4815 unlock_user_struct(target_shm_info, target_addr, 1);
4819 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4821 struct shmid_ds dsarg;
4822 struct shminfo shminfo;
4823 struct shm_info shm_info;
4824 abi_long ret = -TARGET_EINVAL;
4832 if (target_to_host_shmid_ds(&dsarg, buf))
4833 return -TARGET_EFAULT;
4834 ret = get_errno(shmctl(shmid, cmd, &dsarg));
4835 if (host_to_target_shmid_ds(buf, &dsarg))
4836 return -TARGET_EFAULT;
4839 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4840 if (host_to_target_shminfo(buf, &shminfo))
4841 return -TARGET_EFAULT;
4844 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4845 if (host_to_target_shm_info(buf, &shm_info))
4846 return -TARGET_EFAULT;
4851 ret = get_errno(shmctl(shmid, cmd, NULL));
4858 #ifndef TARGET_FORCE_SHMLBA
4859 /* For most architectures, SHMLBA is the same as the page size;
4860 * some architectures have larger values, in which case they should
4861 * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4862 * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4863 * and defining its own value for SHMLBA.
4865 * The kernel also permits SHMLBA to be set by the architecture to a
4866 * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4867 * this means that addresses are rounded to the large size if
4868 * SHM_RND is set but addresses not aligned to that size are not rejected
4869 * as long as they are at least page-aligned. Since the only architecture
4870 * which uses this is ia64 this code doesn't provide for that oddity.
4872 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
4874 return TARGET_PAGE_SIZE;
4878 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
4879 int shmid, abi_ulong shmaddr, int shmflg)
4883 struct shmid_ds shm_info;
4887 /* find out the length of the shared memory segment */
4888 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4889 if (is_error(ret)) {
4890 /* can't get length, bail out */
4894 shmlba = target_shmlba(cpu_env);
4896 if (shmaddr & (shmlba - 1)) {
4897 if (shmflg & SHM_RND) {
4898 shmaddr &= ~(shmlba - 1);
4900 return -TARGET_EINVAL;
4903 if (!guest_range_valid(shmaddr, shm_info.shm_segsz)) {
4904 return -TARGET_EINVAL;
4910 host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
4912 abi_ulong mmap_start;
4914 mmap_start = mmap_find_vma(0, shm_info.shm_segsz);
4916 if (mmap_start == -1) {
4918 host_raddr = (void *)-1;
4920 host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
4923 if (host_raddr == (void *)-1) {
4925 return get_errno((long)host_raddr);
4927 raddr=h2g((unsigned long)host_raddr);
4929 page_set_flags(raddr, raddr + shm_info.shm_segsz,
4930 PAGE_VALID | PAGE_READ |
4931 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
4933 for (i = 0; i < N_SHM_REGIONS; i++) {
4934 if (!shm_regions[i].in_use) {
4935 shm_regions[i].in_use = true;
4936 shm_regions[i].start = raddr;
4937 shm_regions[i].size = shm_info.shm_segsz;
4947 static inline abi_long do_shmdt(abi_ulong shmaddr)
4951 for (i = 0; i < N_SHM_REGIONS; ++i) {
4952 if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4953 shm_regions[i].in_use = false;
4954 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
4959 return get_errno(shmdt(g2h(shmaddr)));
4962 #ifdef TARGET_NR_ipc
4963 /* ??? This only works with linear mappings. */
4964 /* do_ipc() must return target values and target errnos. */
4965 static abi_long do_ipc(CPUArchState *cpu_env,
4966 unsigned int call, abi_long first,
4967 abi_long second, abi_long third,
4968 abi_long ptr, abi_long fifth)
4973 version = call >> 16;
4978 ret = do_semop(first, ptr, second);
4982 ret = get_errno(semget(first, second, third));
4985 case IPCOP_semctl: {
4986 /* The semun argument to semctl is passed by value, so dereference the
4989 get_user_ual(atptr, ptr);
4990 ret = do_semctl(first, second, third, atptr);
4995 ret = get_errno(msgget(first, second));
4999 ret = do_msgsnd(first, ptr, second, third);
5003 ret = do_msgctl(first, second, ptr);
5010 struct target_ipc_kludge {
5015 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
5016 ret = -TARGET_EFAULT;
5020 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
5022 unlock_user_struct(tmp, ptr, 0);
5026 ret = do_msgrcv(first, ptr, second, fifth, third);
5035 raddr = do_shmat(cpu_env, first, ptr, second);
5036 if (is_error(raddr))
5037 return get_errno(raddr);
5038 if (put_user_ual(raddr, third))
5039 return -TARGET_EFAULT;
5043 ret = -TARGET_EINVAL;
5048 ret = do_shmdt(ptr);
5052 /* IPC_* flag values are the same on all linux platforms */
5053 ret = get_errno(shmget(first, second, third));
5056 /* IPC_* and SHM_* command values are the same on all linux platforms */
5058 ret = do_shmctl(first, second, ptr);
5061 gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
5062 ret = -TARGET_ENOSYS;
5069 /* kernel structure types definitions */
5071 #define STRUCT(name, ...) STRUCT_ ## name,
5072 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
5074 #include "syscall_types.h"
5078 #undef STRUCT_SPECIAL
5080 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
5081 #define STRUCT_SPECIAL(name)
5082 #include "syscall_types.h"
5084 #undef STRUCT_SPECIAL
5086 typedef struct IOCTLEntry IOCTLEntry;
5088 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
5089 int fd, int cmd, abi_long arg);
5093 unsigned int host_cmd;
5096 do_ioctl_fn *do_ioctl;
5097 const argtype arg_type[5];
5100 #define IOC_R 0x0001
5101 #define IOC_W 0x0002
5102 #define IOC_RW (IOC_R | IOC_W)
5104 #define MAX_STRUCT_SIZE 4096
5106 #ifdef CONFIG_FIEMAP
5107 /* So fiemap access checks don't overflow on 32 bit systems.
5108 * This is very slightly smaller than the limit imposed by
5109 * the underlying kernel.
5111 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
5112 / sizeof(struct fiemap_extent))
5114 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
5115 int fd, int cmd, abi_long arg)
5117 /* The parameter for this ioctl is a struct fiemap followed
5118 * by an array of struct fiemap_extent whose size is set
5119 * in fiemap->fm_extent_count. The array is filled in by the
5122 int target_size_in, target_size_out;
5124 const argtype *arg_type = ie->arg_type;
5125 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
5128 int i, extent_size = thunk_type_size(extent_arg_type, 0);
5132 assert(arg_type[0] == TYPE_PTR);
5133 assert(ie->access == IOC_RW);
5135 target_size_in = thunk_type_size(arg_type, 0);
5136 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
5138 return -TARGET_EFAULT;
5140 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5141 unlock_user(argptr, arg, 0);
5142 fm = (struct fiemap *)buf_temp;
5143 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
5144 return -TARGET_EINVAL;
5147 outbufsz = sizeof (*fm) +
5148 (sizeof(struct fiemap_extent) * fm->fm_extent_count);
5150 if (outbufsz > MAX_STRUCT_SIZE) {
5151 /* We can't fit all the extents into the fixed size buffer.
5152 * Allocate one that is large enough and use it instead.
5154 fm = g_try_malloc(outbufsz);
5156 return -TARGET_ENOMEM;
5158 memcpy(fm, buf_temp, sizeof(struct fiemap));
5161 ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
5162 if (!is_error(ret)) {
5163 target_size_out = target_size_in;
5164 /* An extent_count of 0 means we were only counting the extents
5165 * so there are no structs to copy
5167 if (fm->fm_extent_count != 0) {
5168 target_size_out += fm->fm_mapped_extents * extent_size;
5170 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
5172 ret = -TARGET_EFAULT;
5174 /* Convert the struct fiemap */
5175 thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
5176 if (fm->fm_extent_count != 0) {
5177 p = argptr + target_size_in;
5178 /* ...and then all the struct fiemap_extents */
5179 for (i = 0; i < fm->fm_mapped_extents; i++) {
5180 thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
5185 unlock_user(argptr, arg, target_size_out);
5195 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
5196 int fd, int cmd, abi_long arg)
5198 const argtype *arg_type = ie->arg_type;
5202 struct ifconf *host_ifconf;
5204 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
5205 int target_ifreq_size;
5210 abi_long target_ifc_buf;
5214 assert(arg_type[0] == TYPE_PTR);
5215 assert(ie->access == IOC_RW);
5218 target_size = thunk_type_size(arg_type, 0);
5220 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5222 return -TARGET_EFAULT;
5223 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5224 unlock_user(argptr, arg, 0);
5226 host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
5227 target_ifc_len = host_ifconf->ifc_len;
5228 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
5230 target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
5231 nb_ifreq = target_ifc_len / target_ifreq_size;
5232 host_ifc_len = nb_ifreq * sizeof(struct ifreq);
5234 outbufsz = sizeof(*host_ifconf) + host_ifc_len;
5235 if (outbufsz > MAX_STRUCT_SIZE) {
5236 /* We can't fit all the extents into the fixed size buffer.
5237 * Allocate one that is large enough and use it instead.
5239 host_ifconf = malloc(outbufsz);
5241 return -TARGET_ENOMEM;
5243 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
5246 host_ifc_buf = (char*)host_ifconf + sizeof(*host_ifconf);
5248 host_ifconf->ifc_len = host_ifc_len;
5249 host_ifconf->ifc_buf = host_ifc_buf;
5251 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
5252 if (!is_error(ret)) {
5253 /* convert host ifc_len to target ifc_len */
5255 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
5256 target_ifc_len = nb_ifreq * target_ifreq_size;
5257 host_ifconf->ifc_len = target_ifc_len;
5259 /* restore target ifc_buf */
5261 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
5263 /* copy struct ifconf to target user */
5265 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5267 return -TARGET_EFAULT;
5268 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
5269 unlock_user(argptr, arg, target_size);
5271 /* copy ifreq[] to target user */
5273 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
5274 for (i = 0; i < nb_ifreq ; i++) {
5275 thunk_convert(argptr + i * target_ifreq_size,
5276 host_ifc_buf + i * sizeof(struct ifreq),
5277 ifreq_arg_type, THUNK_TARGET);
5279 unlock_user(argptr, target_ifc_buf, target_ifc_len);
5289 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5290 int cmd, abi_long arg)
5293 struct dm_ioctl *host_dm;
5294 abi_long guest_data;
5295 uint32_t guest_data_size;
5297 const argtype *arg_type = ie->arg_type;
5299 void *big_buf = NULL;
5303 target_size = thunk_type_size(arg_type, 0);
5304 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5306 ret = -TARGET_EFAULT;
5309 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5310 unlock_user(argptr, arg, 0);
5312 /* buf_temp is too small, so fetch things into a bigger buffer */
5313 big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
5314 memcpy(big_buf, buf_temp, target_size);
5318 guest_data = arg + host_dm->data_start;
5319 if ((guest_data - arg) < 0) {
5320 ret = -TARGET_EINVAL;
5323 guest_data_size = host_dm->data_size - host_dm->data_start;
5324 host_data = (char*)host_dm + host_dm->data_start;
5326 argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
5328 ret = -TARGET_EFAULT;
5332 switch (ie->host_cmd) {
5334 case DM_LIST_DEVICES:
5337 case DM_DEV_SUSPEND:
5340 case DM_TABLE_STATUS:
5341 case DM_TABLE_CLEAR:
5343 case DM_LIST_VERSIONS:
5347 case DM_DEV_SET_GEOMETRY:
5348 /* data contains only strings */
5349 memcpy(host_data, argptr, guest_data_size);
5352 memcpy(host_data, argptr, guest_data_size);
5353 *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
5357 void *gspec = argptr;
5358 void *cur_data = host_data;
5359 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5360 int spec_size = thunk_type_size(arg_type, 0);
5363 for (i = 0; i < host_dm->target_count; i++) {
5364 struct dm_target_spec *spec = cur_data;
5368 thunk_convert(spec, gspec, arg_type, THUNK_HOST);
5369 slen = strlen((char*)gspec + spec_size) + 1;
5371 spec->next = sizeof(*spec) + slen;
5372 strcpy((char*)&spec[1], gspec + spec_size);
5374 cur_data += spec->next;
5379 ret = -TARGET_EINVAL;
5380 unlock_user(argptr, guest_data, 0);
5383 unlock_user(argptr, guest_data, 0);
5385 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5386 if (!is_error(ret)) {
5387 guest_data = arg + host_dm->data_start;
5388 guest_data_size = host_dm->data_size - host_dm->data_start;
5389 argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
5390 switch (ie->host_cmd) {
5395 case DM_DEV_SUSPEND:
5398 case DM_TABLE_CLEAR:
5400 case DM_DEV_SET_GEOMETRY:
5401 /* no return data */
5403 case DM_LIST_DEVICES:
5405 struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
5406 uint32_t remaining_data = guest_data_size;
5407 void *cur_data = argptr;
5408 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
5409 int nl_size = 12; /* can't use thunk_size due to alignment */
5412 uint32_t next = nl->next;
5414 nl->next = nl_size + (strlen(nl->name) + 1);
5416 if (remaining_data < nl->next) {
5417 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5420 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
5421 strcpy(cur_data + nl_size, nl->name);
5422 cur_data += nl->next;
5423 remaining_data -= nl->next;
5427 nl = (void*)nl + next;
5432 case DM_TABLE_STATUS:
5434 struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5435 void *cur_data = argptr;
5436 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5437 int spec_size = thunk_type_size(arg_type, 0);
5440 for (i = 0; i < host_dm->target_count; i++) {
5441 uint32_t next = spec->next;
5442 int slen = strlen((char*)&spec[1]) + 1;
5443 spec->next = (cur_data - argptr) + spec_size + slen;
5444 if (guest_data_size < spec->next) {
5445 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5448 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
5449 strcpy(cur_data + spec_size, (char*)&spec[1]);
5450 cur_data = argptr + spec->next;
5451 spec = (void*)host_dm + host_dm->data_start + next;
5457 void *hdata = (void*)host_dm + host_dm->data_start;
5458 int count = *(uint32_t*)hdata;
5459 uint64_t *hdev = hdata + 8;
5460 uint64_t *gdev = argptr + 8;
5463 *(uint32_t*)argptr = tswap32(count);
5464 for (i = 0; i < count; i++) {
5465 *gdev = tswap64(*hdev);
5471 case DM_LIST_VERSIONS:
5473 struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5474 uint32_t remaining_data = guest_data_size;
5475 void *cur_data = argptr;
5476 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5477 int vers_size = thunk_type_size(arg_type, 0);
5480 uint32_t next = vers->next;
5482 vers->next = vers_size + (strlen(vers->name) + 1);
5484 if (remaining_data < vers->next) {
5485 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5488 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
5489 strcpy(cur_data + vers_size, vers->name);
5490 cur_data += vers->next;
5491 remaining_data -= vers->next;
5495 vers = (void*)vers + next;
5500 unlock_user(argptr, guest_data, 0);
5501 ret = -TARGET_EINVAL;
5504 unlock_user(argptr, guest_data, guest_data_size);
5506 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5508 ret = -TARGET_EFAULT;
5511 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5512 unlock_user(argptr, arg, target_size);
5519 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5520 int cmd, abi_long arg)
5524 const argtype *arg_type = ie->arg_type;
5525 const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5528 struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5529 struct blkpg_partition host_part;
5531 /* Read and convert blkpg */
5533 target_size = thunk_type_size(arg_type, 0);
5534 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5536 ret = -TARGET_EFAULT;
5539 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5540 unlock_user(argptr, arg, 0);
5542 switch (host_blkpg->op) {
5543 case BLKPG_ADD_PARTITION:
5544 case BLKPG_DEL_PARTITION:
5545 /* payload is struct blkpg_partition */
5548 /* Unknown opcode */
5549 ret = -TARGET_EINVAL;
5553 /* Read and convert blkpg->data */
5554 arg = (abi_long)(uintptr_t)host_blkpg->data;
5555 target_size = thunk_type_size(part_arg_type, 0);
5556 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5558 ret = -TARGET_EFAULT;
5561 thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5562 unlock_user(argptr, arg, 0);
5564 /* Swizzle the data pointer to our local copy and call! */
5565 host_blkpg->data = &host_part;
5566 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5572 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5573 int fd, int cmd, abi_long arg)
5575 const argtype *arg_type = ie->arg_type;
5576 const StructEntry *se;
5577 const argtype *field_types;
5578 const int *dst_offsets, *src_offsets;
5581 abi_ulong *target_rt_dev_ptr;
5582 unsigned long *host_rt_dev_ptr;
5586 assert(ie->access == IOC_W);
5587 assert(*arg_type == TYPE_PTR);
5589 assert(*arg_type == TYPE_STRUCT);
5590 target_size = thunk_type_size(arg_type, 0);
5591 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5593 return -TARGET_EFAULT;
5596 assert(*arg_type == (int)STRUCT_rtentry);
5597 se = struct_entries + *arg_type++;
5598 assert(se->convert[0] == NULL);
5599 /* convert struct here to be able to catch rt_dev string */
5600 field_types = se->field_types;
5601 dst_offsets = se->field_offsets[THUNK_HOST];
5602 src_offsets = se->field_offsets[THUNK_TARGET];
5603 for (i = 0; i < se->nb_fields; i++) {
5604 if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5605 assert(*field_types == TYPE_PTRVOID);
5606 target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
5607 host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5608 if (*target_rt_dev_ptr != 0) {
5609 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5610 tswapal(*target_rt_dev_ptr));
5611 if (!*host_rt_dev_ptr) {
5612 unlock_user(argptr, arg, 0);
5613 return -TARGET_EFAULT;
5616 *host_rt_dev_ptr = 0;
5621 field_types = thunk_convert(buf_temp + dst_offsets[i],
5622 argptr + src_offsets[i],
5623 field_types, THUNK_HOST);
5625 unlock_user(argptr, arg, 0);
5627 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5628 if (*host_rt_dev_ptr != 0) {
5629 unlock_user((void *)*host_rt_dev_ptr,
5630 *target_rt_dev_ptr, 0);
5635 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5636 int fd, int cmd, abi_long arg)
5638 int sig = target_to_host_signal(arg);
5639 return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5643 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5644 int fd, int cmd, abi_long arg)
5646 int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5647 return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5651 static IOCTLEntry ioctl_entries[] = {
5652 #define IOCTL(cmd, access, ...) \
5653 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
5654 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
5655 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
5656 #define IOCTL_IGNORE(cmd) \
5657 { TARGET_ ## cmd, 0, #cmd },
5662 /* ??? Implement proper locking for ioctls. */
5663 /* do_ioctl() Must return target values and target errnos. */
5664 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5666 const IOCTLEntry *ie;
5667 const argtype *arg_type;
5669 uint8_t buf_temp[MAX_STRUCT_SIZE];
5675 if (ie->target_cmd == 0) {
5676 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5677 return -TARGET_ENOSYS;
5679 if (ie->target_cmd == cmd)
5683 arg_type = ie->arg_type;
5685 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd, ie->name);
5688 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5689 } else if (!ie->host_cmd) {
5690 /* Some architectures define BSD ioctls in their headers
5691 that are not implemented in Linux. */
5692 return -TARGET_ENOSYS;
5695 switch(arg_type[0]) {
5698 ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5702 ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5706 target_size = thunk_type_size(arg_type, 0);
5707 switch(ie->access) {
5709 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5710 if (!is_error(ret)) {
5711 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5713 return -TARGET_EFAULT;
5714 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5715 unlock_user(argptr, arg, target_size);
5719 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5721 return -TARGET_EFAULT;
5722 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5723 unlock_user(argptr, arg, 0);
5724 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5728 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5730 return -TARGET_EFAULT;
5731 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5732 unlock_user(argptr, arg, 0);
5733 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5734 if (!is_error(ret)) {
5735 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5737 return -TARGET_EFAULT;
5738 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5739 unlock_user(argptr, arg, target_size);
5745 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5746 (long)cmd, arg_type[0]);
5747 ret = -TARGET_ENOSYS;
5753 static const bitmask_transtbl iflag_tbl[] = {
5754 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5755 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5756 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5757 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5758 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5759 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5760 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5761 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5762 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5763 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5764 { TARGET_IXON, TARGET_IXON, IXON, IXON },
5765 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5766 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5767 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5771 static const bitmask_transtbl oflag_tbl[] = {
5772 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5773 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5774 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5775 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5776 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5777 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5778 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5779 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5780 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5781 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5782 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5783 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5784 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5785 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5786 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5787 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5788 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5789 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5790 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5791 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5792 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5793 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5794 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5795 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5799 static const bitmask_transtbl cflag_tbl[] = {
5800 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5801 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5802 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5803 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5804 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5805 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5806 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5807 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5808 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5809 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5810 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5811 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5812 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5813 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5814 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5815 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5816 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5817 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5818 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5819 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5820 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5821 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5822 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5823 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5824 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5825 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5826 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5827 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5828 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5829 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5830 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5834 static const bitmask_transtbl lflag_tbl[] = {
5835 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5836 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5837 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5838 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5839 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5840 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5841 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5842 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5843 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5844 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5845 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5846 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5847 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5848 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5849 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5853 static void target_to_host_termios (void *dst, const void *src)
5855 struct host_termios *host = dst;
5856 const struct target_termios *target = src;
5859 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5861 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5863 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5865 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5866 host->c_line = target->c_line;
5868 memset(host->c_cc, 0, sizeof(host->c_cc));
5869 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5870 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5871 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5872 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5873 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5874 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5875 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5876 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5877 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5878 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5879 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5880 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5881 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5882 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5883 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5884 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5885 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5888 static void host_to_target_termios (void *dst, const void *src)
5890 struct target_termios *target = dst;
5891 const struct host_termios *host = src;
5894 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5896 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5898 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5900 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5901 target->c_line = host->c_line;
5903 memset(target->c_cc, 0, sizeof(target->c_cc));
5904 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5905 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5906 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5907 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5908 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5909 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5910 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5911 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5912 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5913 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5914 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5915 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5916 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5917 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5918 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5919 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5920 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5923 static const StructEntry struct_termios_def = {
5924 .convert = { host_to_target_termios, target_to_host_termios },
5925 .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5926 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5929 static bitmask_transtbl mmap_flags_tbl[] = {
5930 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
5931 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
5932 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5933 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
5934 MAP_ANONYMOUS, MAP_ANONYMOUS },
5935 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
5936 MAP_GROWSDOWN, MAP_GROWSDOWN },
5937 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
5938 MAP_DENYWRITE, MAP_DENYWRITE },
5939 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
5940 MAP_EXECUTABLE, MAP_EXECUTABLE },
5941 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5942 { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
5943 MAP_NORESERVE, MAP_NORESERVE },
5944 { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
5945 /* MAP_STACK had been ignored by the kernel for quite some time.
5946 Recognize it for the target insofar as we do not want to pass
5947 it through to the host. */
5948 { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
5952 #if defined(TARGET_I386)
5954 /* NOTE: there is really one LDT for all the threads */
5955 static uint8_t *ldt_table;
5957 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
5964 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
5965 if (size > bytecount)
5967 p = lock_user(VERIFY_WRITE, ptr, size, 0);
5969 return -TARGET_EFAULT;
5970 /* ??? Should this by byteswapped? */
5971 memcpy(p, ldt_table, size);
5972 unlock_user(p, ptr, size);
5976 /* XXX: add locking support */
5977 static abi_long write_ldt(CPUX86State *env,
5978 abi_ulong ptr, unsigned long bytecount, int oldmode)
5980 struct target_modify_ldt_ldt_s ldt_info;
5981 struct target_modify_ldt_ldt_s *target_ldt_info;
5982 int seg_32bit, contents, read_exec_only, limit_in_pages;
5983 int seg_not_present, useable, lm;
5984 uint32_t *lp, entry_1, entry_2;
5986 if (bytecount != sizeof(ldt_info))
5987 return -TARGET_EINVAL;
5988 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
5989 return -TARGET_EFAULT;
5990 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5991 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5992 ldt_info.limit = tswap32(target_ldt_info->limit);
5993 ldt_info.flags = tswap32(target_ldt_info->flags);
5994 unlock_user_struct(target_ldt_info, ptr, 0);
5996 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
5997 return -TARGET_EINVAL;
5998 seg_32bit = ldt_info.flags & 1;
5999 contents = (ldt_info.flags >> 1) & 3;
6000 read_exec_only = (ldt_info.flags >> 3) & 1;
6001 limit_in_pages = (ldt_info.flags >> 4) & 1;
6002 seg_not_present = (ldt_info.flags >> 5) & 1;
6003 useable = (ldt_info.flags >> 6) & 1;
6007 lm = (ldt_info.flags >> 7) & 1;
6009 if (contents == 3) {
6011 return -TARGET_EINVAL;
6012 if (seg_not_present == 0)
6013 return -TARGET_EINVAL;
6015 /* allocate the LDT */
6017 env->ldt.base = target_mmap(0,
6018 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
6019 PROT_READ|PROT_WRITE,
6020 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
6021 if (env->ldt.base == -1)
6022 return -TARGET_ENOMEM;
6023 memset(g2h(env->ldt.base), 0,
6024 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
6025 env->ldt.limit = 0xffff;
6026 ldt_table = g2h(env->ldt.base);
6029 /* NOTE: same code as Linux kernel */
6030 /* Allow LDTs to be cleared by the user. */
6031 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6034 read_exec_only == 1 &&
6036 limit_in_pages == 0 &&
6037 seg_not_present == 1 &&
6045 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6046 (ldt_info.limit & 0x0ffff);
6047 entry_2 = (ldt_info.base_addr & 0xff000000) |
6048 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6049 (ldt_info.limit & 0xf0000) |
6050 ((read_exec_only ^ 1) << 9) |
6052 ((seg_not_present ^ 1) << 15) |
6054 (limit_in_pages << 23) |
6058 entry_2 |= (useable << 20);
6060 /* Install the new entry ... */
6062 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
6063 lp[0] = tswap32(entry_1);
6064 lp[1] = tswap32(entry_2);
6068 /* specific and weird i386 syscalls */
6069 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
6070 unsigned long bytecount)
6076 ret = read_ldt(ptr, bytecount);
6079 ret = write_ldt(env, ptr, bytecount, 1);
6082 ret = write_ldt(env, ptr, bytecount, 0);
6085 ret = -TARGET_ENOSYS;
6091 #if defined(TARGET_I386) && defined(TARGET_ABI32)
6092 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
6094 uint64_t *gdt_table = g2h(env->gdt.base);
6095 struct target_modify_ldt_ldt_s ldt_info;
6096 struct target_modify_ldt_ldt_s *target_ldt_info;
6097 int seg_32bit, contents, read_exec_only, limit_in_pages;
6098 int seg_not_present, useable, lm;
6099 uint32_t *lp, entry_1, entry_2;
6102 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6103 if (!target_ldt_info)
6104 return -TARGET_EFAULT;
6105 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6106 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6107 ldt_info.limit = tswap32(target_ldt_info->limit);
6108 ldt_info.flags = tswap32(target_ldt_info->flags);
6109 if (ldt_info.entry_number == -1) {
6110 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
6111 if (gdt_table[i] == 0) {
6112 ldt_info.entry_number = i;
6113 target_ldt_info->entry_number = tswap32(i);
6118 unlock_user_struct(target_ldt_info, ptr, 1);
6120 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
6121 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
6122 return -TARGET_EINVAL;
6123 seg_32bit = ldt_info.flags & 1;
6124 contents = (ldt_info.flags >> 1) & 3;
6125 read_exec_only = (ldt_info.flags >> 3) & 1;
6126 limit_in_pages = (ldt_info.flags >> 4) & 1;
6127 seg_not_present = (ldt_info.flags >> 5) & 1;
6128 useable = (ldt_info.flags >> 6) & 1;
6132 lm = (ldt_info.flags >> 7) & 1;
6135 if (contents == 3) {
6136 if (seg_not_present == 0)
6137 return -TARGET_EINVAL;
6140 /* NOTE: same code as Linux kernel */
6141 /* Allow LDTs to be cleared by the user. */
6142 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6143 if ((contents == 0 &&
6144 read_exec_only == 1 &&
6146 limit_in_pages == 0 &&
6147 seg_not_present == 1 &&
6155 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6156 (ldt_info.limit & 0x0ffff);
6157 entry_2 = (ldt_info.base_addr & 0xff000000) |
6158 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6159 (ldt_info.limit & 0xf0000) |
6160 ((read_exec_only ^ 1) << 9) |
6162 ((seg_not_present ^ 1) << 15) |
6164 (limit_in_pages << 23) |
6169 /* Install the new entry ... */
6171 lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
6172 lp[0] = tswap32(entry_1);
6173 lp[1] = tswap32(entry_2);
6177 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
6179 struct target_modify_ldt_ldt_s *target_ldt_info;
6180 uint64_t *gdt_table = g2h(env->gdt.base);
6181 uint32_t base_addr, limit, flags;
6182 int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
6183 int seg_not_present, useable, lm;
6184 uint32_t *lp, entry_1, entry_2;
6186 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6187 if (!target_ldt_info)
6188 return -TARGET_EFAULT;
6189 idx = tswap32(target_ldt_info->entry_number);
6190 if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
6191 idx > TARGET_GDT_ENTRY_TLS_MAX) {
6192 unlock_user_struct(target_ldt_info, ptr, 1);
6193 return -TARGET_EINVAL;
6195 lp = (uint32_t *)(gdt_table + idx);
6196 entry_1 = tswap32(lp[0]);
6197 entry_2 = tswap32(lp[1]);
6199 read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
6200 contents = (entry_2 >> 10) & 3;
6201 seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
6202 seg_32bit = (entry_2 >> 22) & 1;
6203 limit_in_pages = (entry_2 >> 23) & 1;
6204 useable = (entry_2 >> 20) & 1;
6208 lm = (entry_2 >> 21) & 1;
6210 flags = (seg_32bit << 0) | (contents << 1) |
6211 (read_exec_only << 3) | (limit_in_pages << 4) |
6212 (seg_not_present << 5) | (useable << 6) | (lm << 7);
6213 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000);
6214 base_addr = (entry_1 >> 16) |
6215 (entry_2 & 0xff000000) |
6216 ((entry_2 & 0xff) << 16);
6217 target_ldt_info->base_addr = tswapal(base_addr);
6218 target_ldt_info->limit = tswap32(limit);
6219 target_ldt_info->flags = tswap32(flags);
6220 unlock_user_struct(target_ldt_info, ptr, 1);
6223 #endif /* TARGET_I386 && TARGET_ABI32 */
6225 #ifndef TARGET_ABI32
6226 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6233 case TARGET_ARCH_SET_GS:
6234 case TARGET_ARCH_SET_FS:
6235 if (code == TARGET_ARCH_SET_GS)
6239 cpu_x86_load_seg(env, idx, 0);
6240 env->segs[idx].base = addr;
6242 case TARGET_ARCH_GET_GS:
6243 case TARGET_ARCH_GET_FS:
6244 if (code == TARGET_ARCH_GET_GS)
6248 val = env->segs[idx].base;
6249 if (put_user(val, addr, abi_ulong))
6250 ret = -TARGET_EFAULT;
6253 ret = -TARGET_EINVAL;
6260 #endif /* defined(TARGET_I386) */
6262 #define NEW_STACK_SIZE 0x40000
6265 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
6268 pthread_mutex_t mutex;
6269 pthread_cond_t cond;
6272 abi_ulong child_tidptr;
6273 abi_ulong parent_tidptr;
6277 static void *clone_func(void *arg)
6279 new_thread_info *info = arg;
6284 rcu_register_thread();
6285 tcg_register_thread();
6287 cpu = ENV_GET_CPU(env);
6289 ts = (TaskState *)cpu->opaque;
6290 info->tid = gettid();
6292 if (info->child_tidptr)
6293 put_user_u32(info->tid, info->child_tidptr);
6294 if (info->parent_tidptr)
6295 put_user_u32(info->tid, info->parent_tidptr);
6296 /* Enable signals. */
6297 sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
6298 /* Signal to the parent that we're ready. */
6299 pthread_mutex_lock(&info->mutex);
6300 pthread_cond_broadcast(&info->cond);
6301 pthread_mutex_unlock(&info->mutex);
6302 /* Wait until the parent has finished initializing the tls state. */
6303 pthread_mutex_lock(&clone_lock);
6304 pthread_mutex_unlock(&clone_lock);
6310 /* do_fork() Must return host values and target errnos (unlike most
6311 do_*() functions). */
6312 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
6313 abi_ulong parent_tidptr, target_ulong newtls,
6314 abi_ulong child_tidptr)
6316 CPUState *cpu = ENV_GET_CPU(env);
6320 CPUArchState *new_env;
6323 flags &= ~CLONE_IGNORED_FLAGS;
6325 /* Emulate vfork() with fork() */
6326 if (flags & CLONE_VFORK)
6327 flags &= ~(CLONE_VFORK | CLONE_VM);
6329 if (flags & CLONE_VM) {
6330 TaskState *parent_ts = (TaskState *)cpu->opaque;
6331 new_thread_info info;
6332 pthread_attr_t attr;
6334 if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
6335 (flags & CLONE_INVALID_THREAD_FLAGS)) {
6336 return -TARGET_EINVAL;
6339 ts = g_new0(TaskState, 1);
6340 init_task_state(ts);
6341 /* we create a new CPU instance. */
6342 new_env = cpu_copy(env);
6343 /* Init regs that differ from the parent. */
6344 cpu_clone_regs(new_env, newsp);
6345 new_cpu = ENV_GET_CPU(new_env);
6346 new_cpu->opaque = ts;
6347 ts->bprm = parent_ts->bprm;
6348 ts->info = parent_ts->info;
6349 ts->signal_mask = parent_ts->signal_mask;
6351 if (flags & CLONE_CHILD_CLEARTID) {
6352 ts->child_tidptr = child_tidptr;
6355 if (flags & CLONE_SETTLS) {
6356 cpu_set_tls (new_env, newtls);
6359 /* Grab a mutex so that thread setup appears atomic. */
6360 pthread_mutex_lock(&clone_lock);
6362 memset(&info, 0, sizeof(info));
6363 pthread_mutex_init(&info.mutex, NULL);
6364 pthread_mutex_lock(&info.mutex);
6365 pthread_cond_init(&info.cond, NULL);
6367 if (flags & CLONE_CHILD_SETTID) {
6368 info.child_tidptr = child_tidptr;
6370 if (flags & CLONE_PARENT_SETTID) {
6371 info.parent_tidptr = parent_tidptr;
6374 ret = pthread_attr_init(&attr);
6375 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6376 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6377 /* It is not safe to deliver signals until the child has finished
6378 initializing, so temporarily block all signals. */
6379 sigfillset(&sigmask);
6380 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6382 /* If this is our first additional thread, we need to ensure we
6383 * generate code for parallel execution and flush old translations.
6385 if (!parallel_cpus) {
6386 parallel_cpus = true;
6390 ret = pthread_create(&info.thread, &attr, clone_func, &info);
6391 /* TODO: Free new CPU state if thread creation failed. */
6393 sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6394 pthread_attr_destroy(&attr);
6396 /* Wait for the child to initialize. */
6397 pthread_cond_wait(&info.cond, &info.mutex);
6402 pthread_mutex_unlock(&info.mutex);
6403 pthread_cond_destroy(&info.cond);
6404 pthread_mutex_destroy(&info.mutex);
6405 pthread_mutex_unlock(&clone_lock);
6407 /* if no CLONE_VM, we consider it is a fork */
6408 if (flags & CLONE_INVALID_FORK_FLAGS) {
6409 return -TARGET_EINVAL;
6412 /* We can't support custom termination signals */
6413 if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6414 return -TARGET_EINVAL;
6417 if (block_signals()) {
6418 return -TARGET_ERESTARTSYS;
6424 /* Child Process. */
6425 cpu_clone_regs(env, newsp);
6427 /* There is a race condition here. The parent process could
6428 theoretically read the TID in the child process before the child
6429 tid is set. This would require using either ptrace
6430 (not implemented) or having *_tidptr to point at a shared memory
6431 mapping. We can't repeat the spinlock hack used above because
6432 the child process gets its own copy of the lock. */
6433 if (flags & CLONE_CHILD_SETTID)
6434 put_user_u32(gettid(), child_tidptr);
6435 if (flags & CLONE_PARENT_SETTID)
6436 put_user_u32(gettid(), parent_tidptr);
6437 ts = (TaskState *)cpu->opaque;
6438 if (flags & CLONE_SETTLS)
6439 cpu_set_tls (env, newtls);
6440 if (flags & CLONE_CHILD_CLEARTID)
6441 ts->child_tidptr = child_tidptr;
6449 /* warning : doesn't handle linux specific flags... */
6450 static int target_to_host_fcntl_cmd(int cmd)
6453 case TARGET_F_DUPFD:
6454 case TARGET_F_GETFD:
6455 case TARGET_F_SETFD:
6456 case TARGET_F_GETFL:
6457 case TARGET_F_SETFL:
6459 case TARGET_F_GETLK:
6461 case TARGET_F_SETLK:
6463 case TARGET_F_SETLKW:
6465 case TARGET_F_GETOWN:
6467 case TARGET_F_SETOWN:
6469 case TARGET_F_GETSIG:
6471 case TARGET_F_SETSIG:
6473 #if TARGET_ABI_BITS == 32
6474 case TARGET_F_GETLK64:
6476 case TARGET_F_SETLK64:
6478 case TARGET_F_SETLKW64:
6481 case TARGET_F_SETLEASE:
6483 case TARGET_F_GETLEASE:
6485 #ifdef F_DUPFD_CLOEXEC
6486 case TARGET_F_DUPFD_CLOEXEC:
6487 return F_DUPFD_CLOEXEC;
6489 case TARGET_F_NOTIFY:
6492 case TARGET_F_GETOWN_EX:
6496 case TARGET_F_SETOWN_EX:
6500 case TARGET_F_SETPIPE_SZ:
6501 return F_SETPIPE_SZ;
6502 case TARGET_F_GETPIPE_SZ:
6503 return F_GETPIPE_SZ;
6506 return -TARGET_EINVAL;
6508 return -TARGET_EINVAL;
6511 #define TRANSTBL_CONVERT(a) { -1, TARGET_##a, -1, a }
6512 static const bitmask_transtbl flock_tbl[] = {
6513 TRANSTBL_CONVERT(F_RDLCK),
6514 TRANSTBL_CONVERT(F_WRLCK),
6515 TRANSTBL_CONVERT(F_UNLCK),
6516 TRANSTBL_CONVERT(F_EXLCK),
6517 TRANSTBL_CONVERT(F_SHLCK),
6521 static inline abi_long copy_from_user_flock(struct flock64 *fl,
6522 abi_ulong target_flock_addr)
6524 struct target_flock *target_fl;
6527 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6528 return -TARGET_EFAULT;
6531 __get_user(l_type, &target_fl->l_type);
6532 fl->l_type = target_to_host_bitmask(l_type, flock_tbl);
6533 __get_user(fl->l_whence, &target_fl->l_whence);
6534 __get_user(fl->l_start, &target_fl->l_start);
6535 __get_user(fl->l_len, &target_fl->l_len);
6536 __get_user(fl->l_pid, &target_fl->l_pid);
6537 unlock_user_struct(target_fl, target_flock_addr, 0);
6541 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6542 const struct flock64 *fl)
6544 struct target_flock *target_fl;
6547 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6548 return -TARGET_EFAULT;
6551 l_type = host_to_target_bitmask(fl->l_type, flock_tbl);
6552 __put_user(l_type, &target_fl->l_type);
6553 __put_user(fl->l_whence, &target_fl->l_whence);
6554 __put_user(fl->l_start, &target_fl->l_start);
6555 __put_user(fl->l_len, &target_fl->l_len);
6556 __put_user(fl->l_pid, &target_fl->l_pid);
6557 unlock_user_struct(target_fl, target_flock_addr, 1);
6561 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
6562 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
6564 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6565 static inline abi_long copy_from_user_eabi_flock64(struct flock64 *fl,
6566 abi_ulong target_flock_addr)
6568 struct target_eabi_flock64 *target_fl;
6571 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6572 return -TARGET_EFAULT;
6575 __get_user(l_type, &target_fl->l_type);
6576 fl->l_type = target_to_host_bitmask(l_type, flock_tbl);
6577 __get_user(fl->l_whence, &target_fl->l_whence);
6578 __get_user(fl->l_start, &target_fl->l_start);
6579 __get_user(fl->l_len, &target_fl->l_len);
6580 __get_user(fl->l_pid, &target_fl->l_pid);
6581 unlock_user_struct(target_fl, target_flock_addr, 0);
6585 static inline abi_long copy_to_user_eabi_flock64(abi_ulong target_flock_addr,
6586 const struct flock64 *fl)
6588 struct target_eabi_flock64 *target_fl;
6591 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6592 return -TARGET_EFAULT;
6595 l_type = host_to_target_bitmask(fl->l_type, flock_tbl);
6596 __put_user(l_type, &target_fl->l_type);
6597 __put_user(fl->l_whence, &target_fl->l_whence);
6598 __put_user(fl->l_start, &target_fl->l_start);
6599 __put_user(fl->l_len, &target_fl->l_len);
6600 __put_user(fl->l_pid, &target_fl->l_pid);
6601 unlock_user_struct(target_fl, target_flock_addr, 1);
6606 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
6607 abi_ulong target_flock_addr)
6609 struct target_flock64 *target_fl;
6612 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6613 return -TARGET_EFAULT;
6616 __get_user(l_type, &target_fl->l_type);
6617 fl->l_type = target_to_host_bitmask(l_type, flock_tbl);
6618 __get_user(fl->l_whence, &target_fl->l_whence);
6619 __get_user(fl->l_start, &target_fl->l_start);
6620 __get_user(fl->l_len, &target_fl->l_len);
6621 __get_user(fl->l_pid, &target_fl->l_pid);
6622 unlock_user_struct(target_fl, target_flock_addr, 0);
6626 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
6627 const struct flock64 *fl)
6629 struct target_flock64 *target_fl;
6632 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6633 return -TARGET_EFAULT;
6636 l_type = host_to_target_bitmask(fl->l_type, flock_tbl);
6637 __put_user(l_type, &target_fl->l_type);
6638 __put_user(fl->l_whence, &target_fl->l_whence);
6639 __put_user(fl->l_start, &target_fl->l_start);
6640 __put_user(fl->l_len, &target_fl->l_len);
6641 __put_user(fl->l_pid, &target_fl->l_pid);
6642 unlock_user_struct(target_fl, target_flock_addr, 1);
6646 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
6648 struct flock64 fl64;
6650 struct f_owner_ex fox;
6651 struct target_f_owner_ex *target_fox;
6654 int host_cmd = target_to_host_fcntl_cmd(cmd);
6656 if (host_cmd == -TARGET_EINVAL)
6660 case TARGET_F_GETLK:
6661 ret = copy_from_user_flock(&fl64, arg);
6665 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6667 ret = copy_to_user_flock(arg, &fl64);
6671 case TARGET_F_SETLK:
6672 case TARGET_F_SETLKW:
6673 ret = copy_from_user_flock(&fl64, arg);
6677 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6680 case TARGET_F_GETLK64:
6681 ret = copy_from_user_flock64(&fl64, arg);
6685 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6687 ret = copy_to_user_flock64(arg, &fl64);
6690 case TARGET_F_SETLK64:
6691 case TARGET_F_SETLKW64:
6692 ret = copy_from_user_flock64(&fl64, arg);
6696 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6699 case TARGET_F_GETFL:
6700 ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6702 ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
6706 case TARGET_F_SETFL:
6707 ret = get_errno(safe_fcntl(fd, host_cmd,
6708 target_to_host_bitmask(arg,
6713 case TARGET_F_GETOWN_EX:
6714 ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6716 if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
6717 return -TARGET_EFAULT;
6718 target_fox->type = tswap32(fox.type);
6719 target_fox->pid = tswap32(fox.pid);
6720 unlock_user_struct(target_fox, arg, 1);
6726 case TARGET_F_SETOWN_EX:
6727 if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
6728 return -TARGET_EFAULT;
6729 fox.type = tswap32(target_fox->type);
6730 fox.pid = tswap32(target_fox->pid);
6731 unlock_user_struct(target_fox, arg, 0);
6732 ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6736 case TARGET_F_SETOWN:
6737 case TARGET_F_GETOWN:
6738 case TARGET_F_SETSIG:
6739 case TARGET_F_GETSIG:
6740 case TARGET_F_SETLEASE:
6741 case TARGET_F_GETLEASE:
6742 case TARGET_F_SETPIPE_SZ:
6743 case TARGET_F_GETPIPE_SZ:
6744 ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6748 ret = get_errno(safe_fcntl(fd, cmd, arg));
6756 static inline int high2lowuid(int uid)
6764 static inline int high2lowgid(int gid)
6772 static inline int low2highuid(int uid)
6774 if ((int16_t)uid == -1)
6780 static inline int low2highgid(int gid)
6782 if ((int16_t)gid == -1)
6787 static inline int tswapid(int id)
6792 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6794 #else /* !USE_UID16 */
6795 static inline int high2lowuid(int uid)
6799 static inline int high2lowgid(int gid)
6803 static inline int low2highuid(int uid)
6807 static inline int low2highgid(int gid)
6811 static inline int tswapid(int id)
6816 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6818 #endif /* USE_UID16 */
6820 /* We must do direct syscalls for setting UID/GID, because we want to
6821 * implement the Linux system call semantics of "change only for this thread",
6822 * not the libc/POSIX semantics of "change for all threads in process".
6823 * (See http://ewontfix.com/17/ for more details.)
6824 * We use the 32-bit version of the syscalls if present; if it is not
6825 * then either the host architecture supports 32-bit UIDs natively with
6826 * the standard syscall, or the 16-bit UID is the best we can do.
6828 #ifdef __NR_setuid32
6829 #define __NR_sys_setuid __NR_setuid32
6831 #define __NR_sys_setuid __NR_setuid
6833 #ifdef __NR_setgid32
6834 #define __NR_sys_setgid __NR_setgid32
6836 #define __NR_sys_setgid __NR_setgid
6838 #ifdef __NR_setresuid32
6839 #define __NR_sys_setresuid __NR_setresuid32
6841 #define __NR_sys_setresuid __NR_setresuid
6843 #ifdef __NR_setresgid32
6844 #define __NR_sys_setresgid __NR_setresgid32
6846 #define __NR_sys_setresgid __NR_setresgid
6849 _syscall1(int, sys_setuid, uid_t, uid)
6850 _syscall1(int, sys_setgid, gid_t, gid)
6851 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
6852 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
6854 void syscall_init(void)
6857 const argtype *arg_type;
6861 thunk_init(STRUCT_MAX);
6863 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6864 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6865 #include "syscall_types.h"
6867 #undef STRUCT_SPECIAL
6869 /* Build target_to_host_errno_table[] table from
6870 * host_to_target_errno_table[]. */
6871 for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
6872 target_to_host_errno_table[host_to_target_errno_table[i]] = i;
6875 /* we patch the ioctl size if necessary. We rely on the fact that
6876 no ioctl has all the bits at '1' in the size field */
6878 while (ie->target_cmd != 0) {
6879 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
6880 TARGET_IOC_SIZEMASK) {
6881 arg_type = ie->arg_type;
6882 if (arg_type[0] != TYPE_PTR) {
6883 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
6888 size = thunk_type_size(arg_type, 0);
6889 ie->target_cmd = (ie->target_cmd &
6890 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
6891 (size << TARGET_IOC_SIZESHIFT);
6894 /* automatic consistency check if same arch */
6895 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
6896 (defined(__x86_64__) && defined(TARGET_X86_64))
6897 if (unlikely(ie->target_cmd != ie->host_cmd)) {
6898 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
6899 ie->name, ie->target_cmd, ie->host_cmd);
6906 #if TARGET_ABI_BITS == 32
6907 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
6909 #ifdef TARGET_WORDS_BIGENDIAN
6910 return ((uint64_t)word0 << 32) | word1;
6912 return ((uint64_t)word1 << 32) | word0;
6915 #else /* TARGET_ABI_BITS == 32 */
6916 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
6920 #endif /* TARGET_ABI_BITS != 32 */
6922 #ifdef TARGET_NR_truncate64
6923 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
6928 if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
6932 return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
6936 #ifdef TARGET_NR_ftruncate64
6937 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
6942 if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
6946 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
6950 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
6951 abi_ulong target_addr)
6953 struct target_timespec *target_ts;
6955 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1))
6956 return -TARGET_EFAULT;
6957 __get_user(host_ts->tv_sec, &target_ts->tv_sec);
6958 __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
6959 unlock_user_struct(target_ts, target_addr, 0);
6963 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
6964 struct timespec *host_ts)
6966 struct target_timespec *target_ts;
6968 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0))
6969 return -TARGET_EFAULT;
6970 __put_user(host_ts->tv_sec, &target_ts->tv_sec);
6971 __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
6972 unlock_user_struct(target_ts, target_addr, 1);
6976 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_itspec,
6977 abi_ulong target_addr)
6979 struct target_itimerspec *target_itspec;
6981 if (!lock_user_struct(VERIFY_READ, target_itspec, target_addr, 1)) {
6982 return -TARGET_EFAULT;
6985 host_itspec->it_interval.tv_sec =
6986 tswapal(target_itspec->it_interval.tv_sec);
6987 host_itspec->it_interval.tv_nsec =
6988 tswapal(target_itspec->it_interval.tv_nsec);
6989 host_itspec->it_value.tv_sec = tswapal(target_itspec->it_value.tv_sec);
6990 host_itspec->it_value.tv_nsec = tswapal(target_itspec->it_value.tv_nsec);
6992 unlock_user_struct(target_itspec, target_addr, 1);
6996 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
6997 struct itimerspec *host_its)
6999 struct target_itimerspec *target_itspec;
7001 if (!lock_user_struct(VERIFY_WRITE, target_itspec, target_addr, 0)) {
7002 return -TARGET_EFAULT;
7005 target_itspec->it_interval.tv_sec = tswapal(host_its->it_interval.tv_sec);
7006 target_itspec->it_interval.tv_nsec = tswapal(host_its->it_interval.tv_nsec);
7008 target_itspec->it_value.tv_sec = tswapal(host_its->it_value.tv_sec);
7009 target_itspec->it_value.tv_nsec = tswapal(host_its->it_value.tv_nsec);
7011 unlock_user_struct(target_itspec, target_addr, 0);
7015 static inline abi_long target_to_host_timex(struct timex *host_tx,
7016 abi_long target_addr)
7018 struct target_timex *target_tx;
7020 if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7021 return -TARGET_EFAULT;
7024 __get_user(host_tx->modes, &target_tx->modes);
7025 __get_user(host_tx->offset, &target_tx->offset);
7026 __get_user(host_tx->freq, &target_tx->freq);
7027 __get_user(host_tx->maxerror, &target_tx->maxerror);
7028 __get_user(host_tx->esterror, &target_tx->esterror);
7029 __get_user(host_tx->status, &target_tx->status);
7030 __get_user(host_tx->constant, &target_tx->constant);
7031 __get_user(host_tx->precision, &target_tx->precision);
7032 __get_user(host_tx->tolerance, &target_tx->tolerance);
7033 __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7034 __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7035 __get_user(host_tx->tick, &target_tx->tick);
7036 __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7037 __get_user(host_tx->jitter, &target_tx->jitter);
7038 __get_user(host_tx->shift, &target_tx->shift);
7039 __get_user(host_tx->stabil, &target_tx->stabil);
7040 __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7041 __get_user(host_tx->calcnt, &target_tx->calcnt);
7042 __get_user(host_tx->errcnt, &target_tx->errcnt);
7043 __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7044 __get_user(host_tx->tai, &target_tx->tai);
7046 unlock_user_struct(target_tx, target_addr, 0);
7050 static inline abi_long host_to_target_timex(abi_long target_addr,
7051 struct timex *host_tx)
7053 struct target_timex *target_tx;
7055 if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7056 return -TARGET_EFAULT;
7059 __put_user(host_tx->modes, &target_tx->modes);
7060 __put_user(host_tx->offset, &target_tx->offset);
7061 __put_user(host_tx->freq, &target_tx->freq);
7062 __put_user(host_tx->maxerror, &target_tx->maxerror);
7063 __put_user(host_tx->esterror, &target_tx->esterror);
7064 __put_user(host_tx->status, &target_tx->status);
7065 __put_user(host_tx->constant, &target_tx->constant);
7066 __put_user(host_tx->precision, &target_tx->precision);
7067 __put_user(host_tx->tolerance, &target_tx->tolerance);
7068 __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7069 __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7070 __put_user(host_tx->tick, &target_tx->tick);
7071 __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7072 __put_user(host_tx->jitter, &target_tx->jitter);
7073 __put_user(host_tx->shift, &target_tx->shift);
7074 __put_user(host_tx->stabil, &target_tx->stabil);
7075 __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7076 __put_user(host_tx->calcnt, &target_tx->calcnt);
7077 __put_user(host_tx->errcnt, &target_tx->errcnt);
7078 __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7079 __put_user(host_tx->tai, &target_tx->tai);
7081 unlock_user_struct(target_tx, target_addr, 1);
7086 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
7087 abi_ulong target_addr)
7089 struct target_sigevent *target_sevp;
7091 if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
7092 return -TARGET_EFAULT;
7095 /* This union is awkward on 64 bit systems because it has a 32 bit
7096 * integer and a pointer in it; we follow the conversion approach
7097 * used for handling sigval types in signal.c so the guest should get
7098 * the correct value back even if we did a 64 bit byteswap and it's
7099 * using the 32 bit integer.
7101 host_sevp->sigev_value.sival_ptr =
7102 (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
7103 host_sevp->sigev_signo =
7104 target_to_host_signal(tswap32(target_sevp->sigev_signo));
7105 host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
7106 host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid);
7108 unlock_user_struct(target_sevp, target_addr, 1);
7112 #if defined(TARGET_NR_mlockall)
7113 static inline int target_to_host_mlockall_arg(int arg)
7117 if (arg & TARGET_MLOCKALL_MCL_CURRENT) {
7118 result |= MCL_CURRENT;
7120 if (arg & TARGET_MLOCKALL_MCL_FUTURE) {
7121 result |= MCL_FUTURE;
7127 static inline abi_long host_to_target_stat64(void *cpu_env,
7128 abi_ulong target_addr,
7129 struct stat *host_st)
7131 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7132 if (((CPUARMState *)cpu_env)->eabi) {
7133 struct target_eabi_stat64 *target_st;
7135 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7136 return -TARGET_EFAULT;
7137 memset(target_st, 0, sizeof(struct target_eabi_stat64));
7138 __put_user(host_st->st_dev, &target_st->st_dev);
7139 __put_user(host_st->st_ino, &target_st->st_ino);
7140 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7141 __put_user(host_st->st_ino, &target_st->__st_ino);
7143 __put_user(host_st->st_mode, &target_st->st_mode);
7144 __put_user(host_st->st_nlink, &target_st->st_nlink);
7145 __put_user(host_st->st_uid, &target_st->st_uid);
7146 __put_user(host_st->st_gid, &target_st->st_gid);
7147 __put_user(host_st->st_rdev, &target_st->st_rdev);
7148 __put_user(host_st->st_size, &target_st->st_size);
7149 __put_user(host_st->st_blksize, &target_st->st_blksize);
7150 __put_user(host_st->st_blocks, &target_st->st_blocks);
7151 __put_user(host_st->st_atime, &target_st->target_st_atime);
7152 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7153 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7154 unlock_user_struct(target_st, target_addr, 1);
7158 #if defined(TARGET_HAS_STRUCT_STAT64)
7159 struct target_stat64 *target_st;
7161 struct target_stat *target_st;
7164 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7165 return -TARGET_EFAULT;
7166 memset(target_st, 0, sizeof(*target_st));
7167 __put_user(host_st->st_dev, &target_st->st_dev);
7168 __put_user(host_st->st_ino, &target_st->st_ino);
7169 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7170 __put_user(host_st->st_ino, &target_st->__st_ino);
7172 __put_user(host_st->st_mode, &target_st->st_mode);
7173 __put_user(host_st->st_nlink, &target_st->st_nlink);
7174 __put_user(host_st->st_uid, &target_st->st_uid);
7175 __put_user(host_st->st_gid, &target_st->st_gid);
7176 __put_user(host_st->st_rdev, &target_st->st_rdev);
7177 /* XXX: better use of kernel struct */
7178 __put_user(host_st->st_size, &target_st->st_size);
7179 __put_user(host_st->st_blksize, &target_st->st_blksize);
7180 __put_user(host_st->st_blocks, &target_st->st_blocks);
7181 __put_user(host_st->st_atime, &target_st->target_st_atime);
7182 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7183 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7184 unlock_user_struct(target_st, target_addr, 1);
7190 /* ??? Using host futex calls even when target atomic operations
7191 are not really atomic probably breaks things. However implementing
7192 futexes locally would make futexes shared between multiple processes
7193 tricky. However they're probably useless because guest atomic
7194 operations won't work either. */
7195 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
7196 target_ulong uaddr2, int val3)
7198 struct timespec ts, *pts;
7201 /* ??? We assume FUTEX_* constants are the same on both host
7203 #ifdef FUTEX_CMD_MASK
7204 base_op = op & FUTEX_CMD_MASK;
7210 case FUTEX_WAIT_BITSET:
7213 target_to_host_timespec(pts, timeout);
7217 return get_errno(safe_futex(g2h(uaddr), op, tswap32(val),
7220 return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
7222 return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
7224 case FUTEX_CMP_REQUEUE:
7226 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7227 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7228 But the prototype takes a `struct timespec *'; insert casts
7229 to satisfy the compiler. We do not need to tswap TIMEOUT
7230 since it's not compared to guest memory. */
7231 pts = (struct timespec *)(uintptr_t) timeout;
7232 return get_errno(safe_futex(g2h(uaddr), op, val, pts,
7234 (base_op == FUTEX_CMP_REQUEUE
7238 return -TARGET_ENOSYS;
7241 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7242 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
7243 abi_long handle, abi_long mount_id,
7246 struct file_handle *target_fh;
7247 struct file_handle *fh;
7251 unsigned int size, total_size;
7253 if (get_user_s32(size, handle)) {
7254 return -TARGET_EFAULT;
7257 name = lock_user_string(pathname);
7259 return -TARGET_EFAULT;
7262 total_size = sizeof(struct file_handle) + size;
7263 target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
7265 unlock_user(name, pathname, 0);
7266 return -TARGET_EFAULT;
7269 fh = g_malloc0(total_size);
7270 fh->handle_bytes = size;
7272 ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
7273 unlock_user(name, pathname, 0);
7275 /* man name_to_handle_at(2):
7276 * Other than the use of the handle_bytes field, the caller should treat
7277 * the file_handle structure as an opaque data type
7280 memcpy(target_fh, fh, total_size);
7281 target_fh->handle_bytes = tswap32(fh->handle_bytes);
7282 target_fh->handle_type = tswap32(fh->handle_type);
7284 unlock_user(target_fh, handle, total_size);
7286 if (put_user_s32(mid, mount_id)) {
7287 return -TARGET_EFAULT;
7295 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7296 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
7299 struct file_handle *target_fh;
7300 struct file_handle *fh;
7301 unsigned int size, total_size;
7304 if (get_user_s32(size, handle)) {
7305 return -TARGET_EFAULT;
7308 total_size = sizeof(struct file_handle) + size;
7309 target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
7311 return -TARGET_EFAULT;
7314 fh = g_memdup(target_fh, total_size);
7315 fh->handle_bytes = size;
7316 fh->handle_type = tswap32(target_fh->handle_type);
7318 ret = get_errno(open_by_handle_at(mount_fd, fh,
7319 target_to_host_bitmask(flags, fcntl_flags_tbl)));
7323 unlock_user(target_fh, handle, total_size);
7329 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7331 /* signalfd siginfo conversion */
7334 host_to_target_signalfd_siginfo(struct signalfd_siginfo *tinfo,
7335 const struct signalfd_siginfo *info)
7337 int sig = host_to_target_signal(info->ssi_signo);
7339 /* linux/signalfd.h defines a ssi_addr_lsb
7340 * not defined in sys/signalfd.h but used by some kernels
7343 #ifdef BUS_MCEERR_AO
7344 if (tinfo->ssi_signo == SIGBUS &&
7345 (tinfo->ssi_code == BUS_MCEERR_AR ||
7346 tinfo->ssi_code == BUS_MCEERR_AO)) {
7347 uint16_t *ssi_addr_lsb = (uint16_t *)(&info->ssi_addr + 1);
7348 uint16_t *tssi_addr_lsb = (uint16_t *)(&tinfo->ssi_addr + 1);
7349 *tssi_addr_lsb = tswap16(*ssi_addr_lsb);
7353 tinfo->ssi_signo = tswap32(sig);
7354 tinfo->ssi_errno = tswap32(tinfo->ssi_errno);
7355 tinfo->ssi_code = tswap32(info->ssi_code);
7356 tinfo->ssi_pid = tswap32(info->ssi_pid);
7357 tinfo->ssi_uid = tswap32(info->ssi_uid);
7358 tinfo->ssi_fd = tswap32(info->ssi_fd);
7359 tinfo->ssi_tid = tswap32(info->ssi_tid);
7360 tinfo->ssi_band = tswap32(info->ssi_band);
7361 tinfo->ssi_overrun = tswap32(info->ssi_overrun);
7362 tinfo->ssi_trapno = tswap32(info->ssi_trapno);
7363 tinfo->ssi_status = tswap32(info->ssi_status);
7364 tinfo->ssi_int = tswap32(info->ssi_int);
7365 tinfo->ssi_ptr = tswap64(info->ssi_ptr);
7366 tinfo->ssi_utime = tswap64(info->ssi_utime);
7367 tinfo->ssi_stime = tswap64(info->ssi_stime);
7368 tinfo->ssi_addr = tswap64(info->ssi_addr);
7371 static abi_long host_to_target_data_signalfd(void *buf, size_t len)
7375 for (i = 0; i < len; i += sizeof(struct signalfd_siginfo)) {
7376 host_to_target_signalfd_siginfo(buf + i, buf + i);
7382 static TargetFdTrans target_signalfd_trans = {
7383 .host_to_target_data = host_to_target_data_signalfd,
7386 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
7389 target_sigset_t *target_mask;
7393 if (flags & ~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)) {
7394 return -TARGET_EINVAL;
7396 if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
7397 return -TARGET_EFAULT;
7400 target_to_host_sigset(&host_mask, target_mask);
7402 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
7404 ret = get_errno(signalfd(fd, &host_mask, host_flags));
7406 fd_trans_register(ret, &target_signalfd_trans);
7409 unlock_user_struct(target_mask, mask, 0);
7415 /* Map host to target signal numbers for the wait family of syscalls.
7416 Assume all other status bits are the same. */
7417 int host_to_target_waitstatus(int status)
7419 if (WIFSIGNALED(status)) {
7420 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
7422 if (WIFSTOPPED(status)) {
7423 return (host_to_target_signal(WSTOPSIG(status)) << 8)
7429 static int open_self_cmdline(void *cpu_env, int fd)
7431 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
7432 struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
7435 for (i = 0; i < bprm->argc; i++) {
7436 size_t len = strlen(bprm->argv[i]) + 1;
7438 if (write(fd, bprm->argv[i], len) != len) {
7446 static int open_self_maps(void *cpu_env, int fd)
7448 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
7449 TaskState *ts = cpu->opaque;
7455 fp = fopen("/proc/self/maps", "r");
7460 while ((read = getline(&line, &len, fp)) != -1) {
7461 int fields, dev_maj, dev_min, inode;
7462 uint64_t min, max, offset;
7463 char flag_r, flag_w, flag_x, flag_p;
7464 char path[512] = "";
7465 fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d"
7466 " %512s", &min, &max, &flag_r, &flag_w, &flag_x,
7467 &flag_p, &offset, &dev_maj, &dev_min, &inode, path);
7469 if ((fields < 10) || (fields > 11)) {
7472 if (h2g_valid(min)) {
7473 int flags = page_get_flags(h2g(min));
7474 max = h2g_valid(max - 1) ? max : (uintptr_t)g2h(GUEST_ADDR_MAX) + 1;
7475 if (page_check_range(h2g(min), max - min, flags) == -1) {
7478 if (h2g(min) == ts->info->stack_limit) {
7479 pstrcpy(path, sizeof(path), " [stack]");
7481 dprintf(fd, TARGET_ABI_FMT_lx "-" TARGET_ABI_FMT_lx
7482 " %c%c%c%c %08" PRIx64 " %02x:%02x %d %s%s\n",
7483 h2g(min), h2g(max - 1) + 1, flag_r, flag_w,
7484 flag_x, flag_p, offset, dev_maj, dev_min, inode,
7485 path[0] ? " " : "", path);
7495 static int open_self_stat(void *cpu_env, int fd)
7497 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
7498 TaskState *ts = cpu->opaque;
7499 abi_ulong start_stack = ts->info->start_stack;
7502 for (i = 0; i < 44; i++) {
7510 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
7511 } else if (i == 1) {
7513 snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]);
7514 } else if (i == 27) {
7517 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
7519 /* for the rest, there is MasterCard */
7520 snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' ');
7524 if (write(fd, buf, len) != len) {
7532 static int open_self_auxv(void *cpu_env, int fd)
7534 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
7535 TaskState *ts = cpu->opaque;
7536 abi_ulong auxv = ts->info->saved_auxv;
7537 abi_ulong len = ts->info->auxv_len;
7541 * Auxiliary vector is stored in target process stack.
7542 * read in whole auxv vector and copy it to file
7544 ptr = lock_user(VERIFY_READ, auxv, len, 0);
7548 r = write(fd, ptr, len);
7555 lseek(fd, 0, SEEK_SET);
7556 unlock_user(ptr, auxv, len);
7562 static int is_proc_myself(const char *filename, const char *entry)
7564 if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
7565 filename += strlen("/proc/");
7566 if (!strncmp(filename, "self/", strlen("self/"))) {
7567 filename += strlen("self/");
7568 } else if (*filename >= '1' && *filename <= '9') {
7570 snprintf(myself, sizeof(myself), "%d/", getpid());
7571 if (!strncmp(filename, myself, strlen(myself))) {
7572 filename += strlen(myself);
7579 if (!strcmp(filename, entry)) {
7586 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7587 static int is_proc(const char *filename, const char *entry)
7589 return strcmp(filename, entry) == 0;
7592 static int open_net_route(void *cpu_env, int fd)
7599 fp = fopen("/proc/net/route", "r");
7606 read = getline(&line, &len, fp);
7607 dprintf(fd, "%s", line);
7611 while ((read = getline(&line, &len, fp)) != -1) {
7613 uint32_t dest, gw, mask;
7614 unsigned int flags, refcnt, use, metric, mtu, window, irtt;
7615 sscanf(line, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7616 iface, &dest, &gw, &flags, &refcnt, &use, &metric,
7617 &mask, &mtu, &window, &irtt);
7618 dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7619 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
7620 metric, tswap32(mask), mtu, window, irtt);
7630 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
7633 const char *filename;
7634 int (*fill)(void *cpu_env, int fd);
7635 int (*cmp)(const char *s1, const char *s2);
7637 const struct fake_open *fake_open;
7638 static const struct fake_open fakes[] = {
7639 { "maps", open_self_maps, is_proc_myself },
7640 { "stat", open_self_stat, is_proc_myself },
7641 { "auxv", open_self_auxv, is_proc_myself },
7642 { "cmdline", open_self_cmdline, is_proc_myself },
7643 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7644 { "/proc/net/route", open_net_route, is_proc },
7646 { NULL, NULL, NULL }
7649 if (is_proc_myself(pathname, "exe")) {
7650 int execfd = qemu_getauxval(AT_EXECFD);
7651 return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
7654 for (fake_open = fakes; fake_open->filename; fake_open++) {
7655 if (fake_open->cmp(pathname, fake_open->filename)) {
7660 if (fake_open->filename) {
7662 char filename[PATH_MAX];
7665 /* create temporary file to map stat to */
7666 tmpdir = getenv("TMPDIR");
7669 snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
7670 fd = mkstemp(filename);
7676 if ((r = fake_open->fill(cpu_env, fd))) {
7682 lseek(fd, 0, SEEK_SET);
7687 return safe_openat(dirfd, path(pathname), flags, mode);
7690 #define TIMER_MAGIC 0x0caf0000
7691 #define TIMER_MAGIC_MASK 0xffff0000
7693 /* Convert QEMU provided timer ID back to internal 16bit index format */
7694 static target_timer_t get_timer_id(abi_long arg)
7696 target_timer_t timerid = arg;
7698 if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
7699 return -TARGET_EINVAL;
7704 if (timerid >= ARRAY_SIZE(g_posix_timers)) {
7705 return -TARGET_EINVAL;
7711 static abi_long swap_data_eventfd(void *buf, size_t len)
7713 uint64_t *counter = buf;
7716 if (len < sizeof(uint64_t)) {
7720 for (i = 0; i < len; i += sizeof(uint64_t)) {
7721 *counter = tswap64(*counter);
7728 static TargetFdTrans target_eventfd_trans = {
7729 .host_to_target_data = swap_data_eventfd,
7730 .target_to_host_data = swap_data_eventfd,
7733 #if (defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)) || \
7734 (defined(CONFIG_INOTIFY1) && defined(TARGET_NR_inotify_init1) && \
7735 defined(__NR_inotify_init1))
7736 static abi_long host_to_target_data_inotify(void *buf, size_t len)
7738 struct inotify_event *ev;
7742 for (i = 0; i < len; i += sizeof(struct inotify_event) + name_len) {
7743 ev = (struct inotify_event *)((char *)buf + i);
7746 ev->wd = tswap32(ev->wd);
7747 ev->mask = tswap32(ev->mask);
7748 ev->cookie = tswap32(ev->cookie);
7749 ev->len = tswap32(name_len);
7755 static TargetFdTrans target_inotify_trans = {
7756 .host_to_target_data = host_to_target_data_inotify,
7760 static int target_to_host_cpu_mask(unsigned long *host_mask,
7762 abi_ulong target_addr,
7765 unsigned target_bits = sizeof(abi_ulong) * 8;
7766 unsigned host_bits = sizeof(*host_mask) * 8;
7767 abi_ulong *target_mask;
7770 assert(host_size >= target_size);
7772 target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
7774 return -TARGET_EFAULT;
7776 memset(host_mask, 0, host_size);
7778 for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
7779 unsigned bit = i * target_bits;
7782 __get_user(val, &target_mask[i]);
7783 for (j = 0; j < target_bits; j++, bit++) {
7784 if (val & (1UL << j)) {
7785 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
7790 unlock_user(target_mask, target_addr, 0);
7794 static int host_to_target_cpu_mask(const unsigned long *host_mask,
7796 abi_ulong target_addr,
7799 unsigned target_bits = sizeof(abi_ulong) * 8;
7800 unsigned host_bits = sizeof(*host_mask) * 8;
7801 abi_ulong *target_mask;
7804 assert(host_size >= target_size);
7806 target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
7808 return -TARGET_EFAULT;
7811 for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
7812 unsigned bit = i * target_bits;
7815 for (j = 0; j < target_bits; j++, bit++) {
7816 if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
7820 __put_user(val, &target_mask[i]);
7823 unlock_user(target_mask, target_addr, target_size);
7827 /* do_syscall() should always have a single exit point at the end so
7828 that actions, such as logging of syscall results, can be performed.
7829 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
7830 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
7831 abi_long arg2, abi_long arg3, abi_long arg4,
7832 abi_long arg5, abi_long arg6, abi_long arg7,
7835 CPUState *cpu = ENV_GET_CPU(cpu_env);
7841 #if defined(DEBUG_ERESTARTSYS)
7842 /* Debug-only code for exercising the syscall-restart code paths
7843 * in the per-architecture cpu main loops: restart every syscall
7844 * the guest makes once before letting it through.
7851 return -TARGET_ERESTARTSYS;
7857 gemu_log("syscall %d", num);
7859 trace_guest_user_syscall(cpu, num, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8);
7861 print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
7864 case TARGET_NR_exit:
7865 /* In old applications this may be used to implement _exit(2).
7866 However in threaded applictions it is used for thread termination,
7867 and _exit_group is used for application termination.
7868 Do thread termination if we have more then one thread. */
7870 if (block_signals()) {
7871 ret = -TARGET_ERESTARTSYS;
7877 if (CPU_NEXT(first_cpu)) {
7880 /* Remove the CPU from the list. */
7881 QTAILQ_REMOVE(&cpus, cpu, node);
7886 if (ts->child_tidptr) {
7887 put_user_u32(0, ts->child_tidptr);
7888 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
7892 object_unref(OBJECT(cpu));
7894 rcu_unregister_thread();
7902 gdb_exit(cpu_env, arg1);
7904 ret = 0; /* avoid warning */
7906 case TARGET_NR_read:
7910 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
7912 ret = get_errno(safe_read(arg1, p, arg3));
7914 fd_trans_host_to_target_data(arg1)) {
7915 ret = fd_trans_host_to_target_data(arg1)(p, ret);
7917 unlock_user(p, arg2, ret);
7920 case TARGET_NR_write:
7921 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
7923 if (fd_trans_target_to_host_data(arg1)) {
7924 void *copy = g_malloc(arg3);
7925 memcpy(copy, p, arg3);
7926 ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
7928 ret = get_errno(safe_write(arg1, copy, ret));
7932 ret = get_errno(safe_write(arg1, p, arg3));
7934 unlock_user(p, arg2, 0);
7936 #ifdef TARGET_NR_open
7937 case TARGET_NR_open:
7938 if (!(p = lock_user_string(arg1)))
7940 ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
7941 target_to_host_bitmask(arg2, fcntl_flags_tbl),
7943 fd_trans_unregister(ret);
7944 unlock_user(p, arg1, 0);
7947 case TARGET_NR_openat:
7948 if (!(p = lock_user_string(arg2)))
7950 ret = get_errno(do_openat(cpu_env, arg1, p,
7951 target_to_host_bitmask(arg3, fcntl_flags_tbl),
7953 fd_trans_unregister(ret);
7954 unlock_user(p, arg2, 0);
7956 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7957 case TARGET_NR_name_to_handle_at:
7958 ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
7961 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7962 case TARGET_NR_open_by_handle_at:
7963 ret = do_open_by_handle_at(arg1, arg2, arg3);
7964 fd_trans_unregister(ret);
7967 case TARGET_NR_close:
7968 fd_trans_unregister(arg1);
7969 ret = get_errno(close(arg1));
7974 #ifdef TARGET_NR_fork
7975 case TARGET_NR_fork:
7976 ret = get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
7979 #ifdef TARGET_NR_waitpid
7980 case TARGET_NR_waitpid:
7983 ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
7984 if (!is_error(ret) && arg2 && ret
7985 && put_user_s32(host_to_target_waitstatus(status), arg2))
7990 #ifdef TARGET_NR_waitid
7991 case TARGET_NR_waitid:
7995 ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
7996 if (!is_error(ret) && arg3 && info.si_pid != 0) {
7997 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
7999 host_to_target_siginfo(p, &info);
8000 unlock_user(p, arg3, sizeof(target_siginfo_t));
8005 #ifdef TARGET_NR_creat /* not on alpha */
8006 case TARGET_NR_creat:
8007 if (!(p = lock_user_string(arg1)))
8009 ret = get_errno(creat(p, arg2));
8010 fd_trans_unregister(ret);
8011 unlock_user(p, arg1, 0);
8014 #ifdef TARGET_NR_link
8015 case TARGET_NR_link:
8018 p = lock_user_string(arg1);
8019 p2 = lock_user_string(arg2);
8021 ret = -TARGET_EFAULT;
8023 ret = get_errno(link(p, p2));
8024 unlock_user(p2, arg2, 0);
8025 unlock_user(p, arg1, 0);
8029 #if defined(TARGET_NR_linkat)
8030 case TARGET_NR_linkat:
8035 p = lock_user_string(arg2);
8036 p2 = lock_user_string(arg4);
8038 ret = -TARGET_EFAULT;
8040 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
8041 unlock_user(p, arg2, 0);
8042 unlock_user(p2, arg4, 0);
8046 #ifdef TARGET_NR_unlink
8047 case TARGET_NR_unlink:
8048 if (!(p = lock_user_string(arg1)))
8050 ret = get_errno(unlink(p));
8051 unlock_user(p, arg1, 0);
8054 #if defined(TARGET_NR_unlinkat)
8055 case TARGET_NR_unlinkat:
8056 if (!(p = lock_user_string(arg2)))
8058 ret = get_errno(unlinkat(arg1, p, arg3));
8059 unlock_user(p, arg2, 0);
8062 case TARGET_NR_execve:
8064 char **argp, **envp;
8067 abi_ulong guest_argp;
8068 abi_ulong guest_envp;
8075 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
8076 if (get_user_ual(addr, gp))
8084 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
8085 if (get_user_ual(addr, gp))
8092 argp = g_new0(char *, argc + 1);
8093 envp = g_new0(char *, envc + 1);
8095 for (gp = guest_argp, q = argp; gp;
8096 gp += sizeof(abi_ulong), q++) {
8097 if (get_user_ual(addr, gp))
8101 if (!(*q = lock_user_string(addr)))
8103 total_size += strlen(*q) + 1;
8107 for (gp = guest_envp, q = envp; gp;
8108 gp += sizeof(abi_ulong), q++) {
8109 if (get_user_ual(addr, gp))
8113 if (!(*q = lock_user_string(addr)))
8115 total_size += strlen(*q) + 1;
8119 if (!(p = lock_user_string(arg1)))
8121 /* Although execve() is not an interruptible syscall it is
8122 * a special case where we must use the safe_syscall wrapper:
8123 * if we allow a signal to happen before we make the host
8124 * syscall then we will 'lose' it, because at the point of
8125 * execve the process leaves QEMU's control. So we use the
8126 * safe syscall wrapper to ensure that we either take the
8127 * signal as a guest signal, or else it does not happen
8128 * before the execve completes and makes it the other
8129 * program's problem.
8131 ret = get_errno(safe_execve(p, argp, envp));
8132 unlock_user(p, arg1, 0);
8137 ret = -TARGET_EFAULT;
8140 for (gp = guest_argp, q = argp; *q;
8141 gp += sizeof(abi_ulong), q++) {
8142 if (get_user_ual(addr, gp)
8145 unlock_user(*q, addr, 0);
8147 for (gp = guest_envp, q = envp; *q;
8148 gp += sizeof(abi_ulong), q++) {
8149 if (get_user_ual(addr, gp)
8152 unlock_user(*q, addr, 0);
8159 case TARGET_NR_chdir:
8160 if (!(p = lock_user_string(arg1)))
8162 ret = get_errno(chdir(p));
8163 unlock_user(p, arg1, 0);
8165 #ifdef TARGET_NR_time
8166 case TARGET_NR_time:
8169 ret = get_errno(time(&host_time));
8172 && put_user_sal(host_time, arg1))
8177 #ifdef TARGET_NR_mknod
8178 case TARGET_NR_mknod:
8179 if (!(p = lock_user_string(arg1)))
8181 ret = get_errno(mknod(p, arg2, arg3));
8182 unlock_user(p, arg1, 0);
8185 #if defined(TARGET_NR_mknodat)
8186 case TARGET_NR_mknodat:
8187 if (!(p = lock_user_string(arg2)))
8189 ret = get_errno(mknodat(arg1, p, arg3, arg4));
8190 unlock_user(p, arg2, 0);
8193 #ifdef TARGET_NR_chmod
8194 case TARGET_NR_chmod:
8195 if (!(p = lock_user_string(arg1)))
8197 ret = get_errno(chmod(p, arg2));
8198 unlock_user(p, arg1, 0);
8201 #ifdef TARGET_NR_break
8202 case TARGET_NR_break:
8205 #ifdef TARGET_NR_oldstat
8206 case TARGET_NR_oldstat:
8209 case TARGET_NR_lseek:
8210 ret = get_errno(lseek(arg1, arg2, arg3));
8212 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
8213 /* Alpha specific */
8214 case TARGET_NR_getxpid:
8215 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
8216 ret = get_errno(getpid());
8219 #ifdef TARGET_NR_getpid
8220 case TARGET_NR_getpid:
8221 ret = get_errno(getpid());
8224 case TARGET_NR_mount:
8226 /* need to look at the data field */
8230 p = lock_user_string(arg1);
8238 p2 = lock_user_string(arg2);
8241 unlock_user(p, arg1, 0);
8247 p3 = lock_user_string(arg3);
8250 unlock_user(p, arg1, 0);
8252 unlock_user(p2, arg2, 0);
8259 /* FIXME - arg5 should be locked, but it isn't clear how to
8260 * do that since it's not guaranteed to be a NULL-terminated
8264 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
8266 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5));
8268 ret = get_errno(ret);
8271 unlock_user(p, arg1, 0);
8273 unlock_user(p2, arg2, 0);
8275 unlock_user(p3, arg3, 0);
8279 #ifdef TARGET_NR_umount
8280 case TARGET_NR_umount:
8281 if (!(p = lock_user_string(arg1)))
8283 ret = get_errno(umount(p));
8284 unlock_user(p, arg1, 0);
8287 #ifdef TARGET_NR_stime /* not on alpha */
8288 case TARGET_NR_stime:
8291 if (get_user_sal(host_time, arg1))
8293 ret = get_errno(stime(&host_time));
8297 case TARGET_NR_ptrace:
8299 #ifdef TARGET_NR_alarm /* not on alpha */
8300 case TARGET_NR_alarm:
8304 #ifdef TARGET_NR_oldfstat
8305 case TARGET_NR_oldfstat:
8308 #ifdef TARGET_NR_pause /* not on alpha */
8309 case TARGET_NR_pause:
8310 if (!block_signals()) {
8311 sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
8313 ret = -TARGET_EINTR;
8316 #ifdef TARGET_NR_utime
8317 case TARGET_NR_utime:
8319 struct utimbuf tbuf, *host_tbuf;
8320 struct target_utimbuf *target_tbuf;
8322 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
8324 tbuf.actime = tswapal(target_tbuf->actime);
8325 tbuf.modtime = tswapal(target_tbuf->modtime);
8326 unlock_user_struct(target_tbuf, arg2, 0);
8331 if (!(p = lock_user_string(arg1)))
8333 ret = get_errno(utime(p, host_tbuf));
8334 unlock_user(p, arg1, 0);
8338 #ifdef TARGET_NR_utimes
8339 case TARGET_NR_utimes:
8341 struct timeval *tvp, tv[2];
8343 if (copy_from_user_timeval(&tv[0], arg2)
8344 || copy_from_user_timeval(&tv[1],
8345 arg2 + sizeof(struct target_timeval)))
8351 if (!(p = lock_user_string(arg1)))
8353 ret = get_errno(utimes(p, tvp));
8354 unlock_user(p, arg1, 0);
8358 #if defined(TARGET_NR_futimesat)
8359 case TARGET_NR_futimesat:
8361 struct timeval *tvp, tv[2];
8363 if (copy_from_user_timeval(&tv[0], arg3)
8364 || copy_from_user_timeval(&tv[1],
8365 arg3 + sizeof(struct target_timeval)))
8371 if (!(p = lock_user_string(arg2)))
8373 ret = get_errno(futimesat(arg1, path(p), tvp));
8374 unlock_user(p, arg2, 0);
8378 #ifdef TARGET_NR_stty
8379 case TARGET_NR_stty:
8382 #ifdef TARGET_NR_gtty
8383 case TARGET_NR_gtty:
8386 #ifdef TARGET_NR_access
8387 case TARGET_NR_access:
8388 if (!(p = lock_user_string(arg1)))
8390 ret = get_errno(access(path(p), arg2));
8391 unlock_user(p, arg1, 0);
8394 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
8395 case TARGET_NR_faccessat:
8396 if (!(p = lock_user_string(arg2)))
8398 ret = get_errno(faccessat(arg1, p, arg3, 0));
8399 unlock_user(p, arg2, 0);
8402 #ifdef TARGET_NR_nice /* not on alpha */
8403 case TARGET_NR_nice:
8404 ret = get_errno(nice(arg1));
8407 #ifdef TARGET_NR_ftime
8408 case TARGET_NR_ftime:
8411 case TARGET_NR_sync:
8415 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
8416 case TARGET_NR_syncfs:
8417 ret = get_errno(syncfs(arg1));
8420 case TARGET_NR_kill:
8421 ret = get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
8423 #ifdef TARGET_NR_rename
8424 case TARGET_NR_rename:
8427 p = lock_user_string(arg1);
8428 p2 = lock_user_string(arg2);
8430 ret = -TARGET_EFAULT;
8432 ret = get_errno(rename(p, p2));
8433 unlock_user(p2, arg2, 0);
8434 unlock_user(p, arg1, 0);
8438 #if defined(TARGET_NR_renameat)
8439 case TARGET_NR_renameat:
8442 p = lock_user_string(arg2);
8443 p2 = lock_user_string(arg4);
8445 ret = -TARGET_EFAULT;
8447 ret = get_errno(renameat(arg1, p, arg3, p2));
8448 unlock_user(p2, arg4, 0);
8449 unlock_user(p, arg2, 0);
8453 #if defined(TARGET_NR_renameat2)
8454 case TARGET_NR_renameat2:
8457 p = lock_user_string(arg2);
8458 p2 = lock_user_string(arg4);
8460 ret = -TARGET_EFAULT;
8462 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
8464 unlock_user(p2, arg4, 0);
8465 unlock_user(p, arg2, 0);
8469 #ifdef TARGET_NR_mkdir
8470 case TARGET_NR_mkdir:
8471 if (!(p = lock_user_string(arg1)))
8473 ret = get_errno(mkdir(p, arg2));
8474 unlock_user(p, arg1, 0);
8477 #if defined(TARGET_NR_mkdirat)
8478 case TARGET_NR_mkdirat:
8479 if (!(p = lock_user_string(arg2)))
8481 ret = get_errno(mkdirat(arg1, p, arg3));
8482 unlock_user(p, arg2, 0);
8485 #ifdef TARGET_NR_rmdir
8486 case TARGET_NR_rmdir:
8487 if (!(p = lock_user_string(arg1)))
8489 ret = get_errno(rmdir(p));
8490 unlock_user(p, arg1, 0);
8494 ret = get_errno(dup(arg1));
8496 fd_trans_dup(arg1, ret);
8499 #ifdef TARGET_NR_pipe
8500 case TARGET_NR_pipe:
8501 ret = do_pipe(cpu_env, arg1, 0, 0);
8504 #ifdef TARGET_NR_pipe2
8505 case TARGET_NR_pipe2:
8506 ret = do_pipe(cpu_env, arg1,
8507 target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
8510 case TARGET_NR_times:
8512 struct target_tms *tmsp;
8514 ret = get_errno(times(&tms));
8516 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
8519 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
8520 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
8521 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
8522 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
8525 ret = host_to_target_clock_t(ret);
8528 #ifdef TARGET_NR_prof
8529 case TARGET_NR_prof:
8532 #ifdef TARGET_NR_signal
8533 case TARGET_NR_signal:
8536 case TARGET_NR_acct:
8538 ret = get_errno(acct(NULL));
8540 if (!(p = lock_user_string(arg1)))
8542 ret = get_errno(acct(path(p)));
8543 unlock_user(p, arg1, 0);
8546 #ifdef TARGET_NR_umount2
8547 case TARGET_NR_umount2:
8548 if (!(p = lock_user_string(arg1)))
8550 ret = get_errno(umount2(p, arg2));
8551 unlock_user(p, arg1, 0);
8554 #ifdef TARGET_NR_lock
8555 case TARGET_NR_lock:
8558 case TARGET_NR_ioctl:
8559 ret = do_ioctl(arg1, arg2, arg3);
8561 #ifdef TARGET_NR_fcntl
8562 case TARGET_NR_fcntl:
8563 ret = do_fcntl(arg1, arg2, arg3);
8566 #ifdef TARGET_NR_mpx
8570 case TARGET_NR_setpgid:
8571 ret = get_errno(setpgid(arg1, arg2));
8573 #ifdef TARGET_NR_ulimit
8574 case TARGET_NR_ulimit:
8577 #ifdef TARGET_NR_oldolduname
8578 case TARGET_NR_oldolduname:
8581 case TARGET_NR_umask:
8582 ret = get_errno(umask(arg1));
8584 case TARGET_NR_chroot:
8585 if (!(p = lock_user_string(arg1)))
8587 ret = get_errno(chroot(p));
8588 unlock_user(p, arg1, 0);
8590 #ifdef TARGET_NR_ustat
8591 case TARGET_NR_ustat:
8594 #ifdef TARGET_NR_dup2
8595 case TARGET_NR_dup2:
8596 ret = get_errno(dup2(arg1, arg2));
8598 fd_trans_dup(arg1, arg2);
8602 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
8603 case TARGET_NR_dup3:
8607 if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
8610 host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
8611 ret = get_errno(dup3(arg1, arg2, host_flags));
8613 fd_trans_dup(arg1, arg2);
8618 #ifdef TARGET_NR_getppid /* not on alpha */
8619 case TARGET_NR_getppid:
8620 ret = get_errno(getppid());
8623 #ifdef TARGET_NR_getpgrp
8624 case TARGET_NR_getpgrp:
8625 ret = get_errno(getpgrp());
8628 case TARGET_NR_setsid:
8629 ret = get_errno(setsid());
8631 #ifdef TARGET_NR_sigaction
8632 case TARGET_NR_sigaction:
8634 #if defined(TARGET_ALPHA)
8635 struct target_sigaction act, oact, *pact = 0;
8636 struct target_old_sigaction *old_act;
8638 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8640 act._sa_handler = old_act->_sa_handler;
8641 target_siginitset(&act.sa_mask, old_act->sa_mask);
8642 act.sa_flags = old_act->sa_flags;
8643 act.sa_restorer = 0;
8644 unlock_user_struct(old_act, arg2, 0);
8647 ret = get_errno(do_sigaction(arg1, pact, &oact));
8648 if (!is_error(ret) && arg3) {
8649 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8651 old_act->_sa_handler = oact._sa_handler;
8652 old_act->sa_mask = oact.sa_mask.sig[0];
8653 old_act->sa_flags = oact.sa_flags;
8654 unlock_user_struct(old_act, arg3, 1);
8656 #elif defined(TARGET_MIPS)
8657 struct target_sigaction act, oact, *pact, *old_act;
8660 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8662 act._sa_handler = old_act->_sa_handler;
8663 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
8664 act.sa_flags = old_act->sa_flags;
8665 unlock_user_struct(old_act, arg2, 0);
8671 ret = get_errno(do_sigaction(arg1, pact, &oact));
8673 if (!is_error(ret) && arg3) {
8674 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8676 old_act->_sa_handler = oact._sa_handler;
8677 old_act->sa_flags = oact.sa_flags;
8678 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
8679 old_act->sa_mask.sig[1] = 0;
8680 old_act->sa_mask.sig[2] = 0;
8681 old_act->sa_mask.sig[3] = 0;
8682 unlock_user_struct(old_act, arg3, 1);
8685 struct target_old_sigaction *old_act;
8686 struct target_sigaction act, oact, *pact;
8688 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8690 act._sa_handler = old_act->_sa_handler;
8691 target_siginitset(&act.sa_mask, old_act->sa_mask);
8692 act.sa_flags = old_act->sa_flags;
8693 act.sa_restorer = old_act->sa_restorer;
8694 unlock_user_struct(old_act, arg2, 0);
8699 ret = get_errno(do_sigaction(arg1, pact, &oact));
8700 if (!is_error(ret) && arg3) {
8701 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8703 old_act->_sa_handler = oact._sa_handler;
8704 old_act->sa_mask = oact.sa_mask.sig[0];
8705 old_act->sa_flags = oact.sa_flags;
8706 old_act->sa_restorer = oact.sa_restorer;
8707 unlock_user_struct(old_act, arg3, 1);
8713 case TARGET_NR_rt_sigaction:
8715 #if defined(TARGET_ALPHA)
8716 /* For Alpha and SPARC this is a 5 argument syscall, with
8717 * a 'restorer' parameter which must be copied into the
8718 * sa_restorer field of the sigaction struct.
8719 * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
8720 * and arg5 is the sigsetsize.
8721 * Alpha also has a separate rt_sigaction struct that it uses
8722 * here; SPARC uses the usual sigaction struct.
8724 struct target_rt_sigaction *rt_act;
8725 struct target_sigaction act, oact, *pact = 0;
8727 if (arg4 != sizeof(target_sigset_t)) {
8728 ret = -TARGET_EINVAL;
8732 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
8734 act._sa_handler = rt_act->_sa_handler;
8735 act.sa_mask = rt_act->sa_mask;
8736 act.sa_flags = rt_act->sa_flags;
8737 act.sa_restorer = arg5;
8738 unlock_user_struct(rt_act, arg2, 0);
8741 ret = get_errno(do_sigaction(arg1, pact, &oact));
8742 if (!is_error(ret) && arg3) {
8743 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
8745 rt_act->_sa_handler = oact._sa_handler;
8746 rt_act->sa_mask = oact.sa_mask;
8747 rt_act->sa_flags = oact.sa_flags;
8748 unlock_user_struct(rt_act, arg3, 1);
8752 target_ulong restorer = arg4;
8753 target_ulong sigsetsize = arg5;
8755 target_ulong sigsetsize = arg4;
8757 struct target_sigaction *act;
8758 struct target_sigaction *oact;
8760 if (sigsetsize != sizeof(target_sigset_t)) {
8761 ret = -TARGET_EINVAL;
8765 if (!lock_user_struct(VERIFY_READ, act, arg2, 1)) {
8769 act->sa_restorer = restorer;
8775 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
8776 ret = -TARGET_EFAULT;
8777 goto rt_sigaction_fail;
8781 ret = get_errno(do_sigaction(arg1, act, oact));
8784 unlock_user_struct(act, arg2, 0);
8786 unlock_user_struct(oact, arg3, 1);
8790 #ifdef TARGET_NR_sgetmask /* not on alpha */
8791 case TARGET_NR_sgetmask:
8794 abi_ulong target_set;
8795 ret = do_sigprocmask(0, NULL, &cur_set);
8797 host_to_target_old_sigset(&target_set, &cur_set);
8803 #ifdef TARGET_NR_ssetmask /* not on alpha */
8804 case TARGET_NR_ssetmask:
8807 abi_ulong target_set = arg1;
8808 target_to_host_old_sigset(&set, &target_set);
8809 ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
8811 host_to_target_old_sigset(&target_set, &oset);
8817 #ifdef TARGET_NR_sigprocmask
8818 case TARGET_NR_sigprocmask:
8820 #if defined(TARGET_ALPHA)
8821 sigset_t set, oldset;
8826 case TARGET_SIG_BLOCK:
8829 case TARGET_SIG_UNBLOCK:
8832 case TARGET_SIG_SETMASK:
8836 ret = -TARGET_EINVAL;
8840 target_to_host_old_sigset(&set, &mask);
8842 ret = do_sigprocmask(how, &set, &oldset);
8843 if (!is_error(ret)) {
8844 host_to_target_old_sigset(&mask, &oldset);
8846 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
8849 sigset_t set, oldset, *set_ptr;
8854 case TARGET_SIG_BLOCK:
8857 case TARGET_SIG_UNBLOCK:
8860 case TARGET_SIG_SETMASK:
8864 ret = -TARGET_EINVAL;
8867 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
8869 target_to_host_old_sigset(&set, p);
8870 unlock_user(p, arg2, 0);
8876 ret = do_sigprocmask(how, set_ptr, &oldset);
8877 if (!is_error(ret) && arg3) {
8878 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
8880 host_to_target_old_sigset(p, &oldset);
8881 unlock_user(p, arg3, sizeof(target_sigset_t));
8887 case TARGET_NR_rt_sigprocmask:
8890 sigset_t set, oldset, *set_ptr;
8892 if (arg4 != sizeof(target_sigset_t)) {
8893 ret = -TARGET_EINVAL;
8899 case TARGET_SIG_BLOCK:
8902 case TARGET_SIG_UNBLOCK:
8905 case TARGET_SIG_SETMASK:
8909 ret = -TARGET_EINVAL;
8912 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
8914 target_to_host_sigset(&set, p);
8915 unlock_user(p, arg2, 0);
8921 ret = do_sigprocmask(how, set_ptr, &oldset);
8922 if (!is_error(ret) && arg3) {
8923 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
8925 host_to_target_sigset(p, &oldset);
8926 unlock_user(p, arg3, sizeof(target_sigset_t));
8930 #ifdef TARGET_NR_sigpending
8931 case TARGET_NR_sigpending:
8934 ret = get_errno(sigpending(&set));
8935 if (!is_error(ret)) {
8936 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8938 host_to_target_old_sigset(p, &set);
8939 unlock_user(p, arg1, sizeof(target_sigset_t));
8944 case TARGET_NR_rt_sigpending:
8948 /* Yes, this check is >, not != like most. We follow the kernel's
8949 * logic and it does it like this because it implements
8950 * NR_sigpending through the same code path, and in that case
8951 * the old_sigset_t is smaller in size.
8953 if (arg2 > sizeof(target_sigset_t)) {
8954 ret = -TARGET_EINVAL;
8958 ret = get_errno(sigpending(&set));
8959 if (!is_error(ret)) {
8960 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8962 host_to_target_sigset(p, &set);
8963 unlock_user(p, arg1, sizeof(target_sigset_t));
8967 #ifdef TARGET_NR_sigsuspend
8968 case TARGET_NR_sigsuspend:
8970 TaskState *ts = cpu->opaque;
8971 #if defined(TARGET_ALPHA)
8972 abi_ulong mask = arg1;
8973 target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
8975 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8977 target_to_host_old_sigset(&ts->sigsuspend_mask, p);
8978 unlock_user(p, arg1, 0);
8980 ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8982 if (ret != -TARGET_ERESTARTSYS) {
8983 ts->in_sigsuspend = 1;
8988 case TARGET_NR_rt_sigsuspend:
8990 TaskState *ts = cpu->opaque;
8992 if (arg2 != sizeof(target_sigset_t)) {
8993 ret = -TARGET_EINVAL;
8996 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8998 target_to_host_sigset(&ts->sigsuspend_mask, p);
8999 unlock_user(p, arg1, 0);
9000 ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
9002 if (ret != -TARGET_ERESTARTSYS) {
9003 ts->in_sigsuspend = 1;
9007 case TARGET_NR_rt_sigtimedwait:
9010 struct timespec uts, *puts;
9013 if (arg4 != sizeof(target_sigset_t)) {
9014 ret = -TARGET_EINVAL;
9018 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9020 target_to_host_sigset(&set, p);
9021 unlock_user(p, arg1, 0);
9024 target_to_host_timespec(puts, arg3);
9028 ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9030 if (!is_error(ret)) {
9032 p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
9037 host_to_target_siginfo(p, &uinfo);
9038 unlock_user(p, arg2, sizeof(target_siginfo_t));
9040 ret = host_to_target_signal(ret);
9044 case TARGET_NR_rt_sigqueueinfo:
9048 p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
9052 target_to_host_siginfo(&uinfo, p);
9053 unlock_user(p, arg3, 0);
9054 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
9057 case TARGET_NR_rt_tgsigqueueinfo:
9061 p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
9065 target_to_host_siginfo(&uinfo, p);
9066 unlock_user(p, arg4, 0);
9067 ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, arg3, &uinfo));
9070 #ifdef TARGET_NR_sigreturn
9071 case TARGET_NR_sigreturn:
9072 if (block_signals()) {
9073 ret = -TARGET_ERESTARTSYS;
9075 ret = do_sigreturn(cpu_env);
9079 case TARGET_NR_rt_sigreturn:
9080 if (block_signals()) {
9081 ret = -TARGET_ERESTARTSYS;
9083 ret = do_rt_sigreturn(cpu_env);
9086 case TARGET_NR_sethostname:
9087 if (!(p = lock_user_string(arg1)))
9089 ret = get_errno(sethostname(p, arg2));
9090 unlock_user(p, arg1, 0);
9092 case TARGET_NR_setrlimit:
9094 int resource = target_to_host_resource(arg1);
9095 struct target_rlimit *target_rlim;
9097 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
9099 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
9100 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
9101 unlock_user_struct(target_rlim, arg2, 0);
9102 ret = get_errno(setrlimit(resource, &rlim));
9105 case TARGET_NR_getrlimit:
9107 int resource = target_to_host_resource(arg1);
9108 struct target_rlimit *target_rlim;
9111 ret = get_errno(getrlimit(resource, &rlim));
9112 if (!is_error(ret)) {
9113 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
9115 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
9116 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
9117 unlock_user_struct(target_rlim, arg2, 1);
9121 case TARGET_NR_getrusage:
9123 struct rusage rusage;
9124 ret = get_errno(getrusage(arg1, &rusage));
9125 if (!is_error(ret)) {
9126 ret = host_to_target_rusage(arg2, &rusage);
9130 case TARGET_NR_gettimeofday:
9133 ret = get_errno(gettimeofday(&tv, NULL));
9134 if (!is_error(ret)) {
9135 if (copy_to_user_timeval(arg1, &tv))
9140 case TARGET_NR_settimeofday:
9142 struct timeval tv, *ptv = NULL;
9143 struct timezone tz, *ptz = NULL;
9146 if (copy_from_user_timeval(&tv, arg1)) {
9153 if (copy_from_user_timezone(&tz, arg2)) {
9159 ret = get_errno(settimeofday(ptv, ptz));
9162 #if defined(TARGET_NR_select)
9163 case TARGET_NR_select:
9164 #if defined(TARGET_WANT_NI_OLD_SELECT)
9165 /* some architectures used to have old_select here
9166 * but now ENOSYS it.
9168 ret = -TARGET_ENOSYS;
9169 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
9170 ret = do_old_select(arg1);
9172 ret = do_select(arg1, arg2, arg3, arg4, arg5);
9176 #ifdef TARGET_NR_pselect6
9177 case TARGET_NR_pselect6:
9179 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
9180 fd_set rfds, wfds, efds;
9181 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
9182 struct timespec ts, *ts_ptr;
9185 * The 6th arg is actually two args smashed together,
9186 * so we cannot use the C library.
9194 abi_ulong arg_sigset, arg_sigsize, *arg7;
9195 target_sigset_t *target_sigset;
9203 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
9207 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
9211 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
9217 * This takes a timespec, and not a timeval, so we cannot
9218 * use the do_select() helper ...
9221 if (target_to_host_timespec(&ts, ts_addr)) {
9229 /* Extract the two packed args for the sigset */
9232 sig.size = SIGSET_T_SIZE;
9234 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
9238 arg_sigset = tswapal(arg7[0]);
9239 arg_sigsize = tswapal(arg7[1]);
9240 unlock_user(arg7, arg6, 0);
9244 if (arg_sigsize != sizeof(*target_sigset)) {
9245 /* Like the kernel, we enforce correct size sigsets */
9246 ret = -TARGET_EINVAL;
9249 target_sigset = lock_user(VERIFY_READ, arg_sigset,
9250 sizeof(*target_sigset), 1);
9251 if (!target_sigset) {
9254 target_to_host_sigset(&set, target_sigset);
9255 unlock_user(target_sigset, arg_sigset, 0);
9263 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
9266 if (!is_error(ret)) {
9267 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
9269 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
9271 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
9274 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
9280 #ifdef TARGET_NR_symlink
9281 case TARGET_NR_symlink:
9284 p = lock_user_string(arg1);
9285 p2 = lock_user_string(arg2);
9287 ret = -TARGET_EFAULT;
9289 ret = get_errno(symlink(p, p2));
9290 unlock_user(p2, arg2, 0);
9291 unlock_user(p, arg1, 0);
9295 #if defined(TARGET_NR_symlinkat)
9296 case TARGET_NR_symlinkat:
9299 p = lock_user_string(arg1);
9300 p2 = lock_user_string(arg3);
9302 ret = -TARGET_EFAULT;
9304 ret = get_errno(symlinkat(p, arg2, p2));
9305 unlock_user(p2, arg3, 0);
9306 unlock_user(p, arg1, 0);
9310 #ifdef TARGET_NR_oldlstat
9311 case TARGET_NR_oldlstat:
9314 #ifdef TARGET_NR_readlink
9315 case TARGET_NR_readlink:
9318 p = lock_user_string(arg1);
9319 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9321 ret = -TARGET_EFAULT;
9323 /* Short circuit this for the magic exe check. */
9324 ret = -TARGET_EINVAL;
9325 } else if (is_proc_myself((const char *)p, "exe")) {
9326 char real[PATH_MAX], *temp;
9327 temp = realpath(exec_path, real);
9328 /* Return value is # of bytes that we wrote to the buffer. */
9330 ret = get_errno(-1);
9332 /* Don't worry about sign mismatch as earlier mapping
9333 * logic would have thrown a bad address error. */
9334 ret = MIN(strlen(real), arg3);
9335 /* We cannot NUL terminate the string. */
9336 memcpy(p2, real, ret);
9339 ret = get_errno(readlink(path(p), p2, arg3));
9341 unlock_user(p2, arg2, ret);
9342 unlock_user(p, arg1, 0);
9346 #if defined(TARGET_NR_readlinkat)
9347 case TARGET_NR_readlinkat:
9350 p = lock_user_string(arg2);
9351 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
9353 ret = -TARGET_EFAULT;
9354 } else if (is_proc_myself((const char *)p, "exe")) {
9355 char real[PATH_MAX], *temp;
9356 temp = realpath(exec_path, real);
9357 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
9358 snprintf((char *)p2, arg4, "%s", real);
9360 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
9362 unlock_user(p2, arg3, ret);
9363 unlock_user(p, arg2, 0);
9367 #ifdef TARGET_NR_uselib
9368 case TARGET_NR_uselib:
9371 #ifdef TARGET_NR_swapon
9372 case TARGET_NR_swapon:
9373 if (!(p = lock_user_string(arg1)))
9375 ret = get_errno(swapon(p, arg2));
9376 unlock_user(p, arg1, 0);
9379 case TARGET_NR_reboot:
9380 if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
9381 /* arg4 must be ignored in all other cases */
9382 p = lock_user_string(arg4);
9386 ret = get_errno(reboot(arg1, arg2, arg3, p));
9387 unlock_user(p, arg4, 0);
9389 ret = get_errno(reboot(arg1, arg2, arg3, NULL));
9392 #ifdef TARGET_NR_readdir
9393 case TARGET_NR_readdir:
9396 #ifdef TARGET_NR_mmap
9397 case TARGET_NR_mmap:
9398 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
9399 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
9400 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
9401 || defined(TARGET_S390X)
9404 abi_ulong v1, v2, v3, v4, v5, v6;
9405 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
9413 unlock_user(v, arg1, 0);
9414 ret = get_errno(target_mmap(v1, v2, v3,
9415 target_to_host_bitmask(v4, mmap_flags_tbl),
9419 ret = get_errno(target_mmap(arg1, arg2, arg3,
9420 target_to_host_bitmask(arg4, mmap_flags_tbl),
9426 #ifdef TARGET_NR_mmap2
9427 case TARGET_NR_mmap2:
9429 #define MMAP_SHIFT 12
9431 ret = get_errno(target_mmap(arg1, arg2, arg3,
9432 target_to_host_bitmask(arg4, mmap_flags_tbl),
9434 arg6 << MMAP_SHIFT));
9437 case TARGET_NR_munmap:
9438 ret = get_errno(target_munmap(arg1, arg2));
9440 case TARGET_NR_mprotect:
9442 TaskState *ts = cpu->opaque;
9443 /* Special hack to detect libc making the stack executable. */
9444 if ((arg3 & PROT_GROWSDOWN)
9445 && arg1 >= ts->info->stack_limit
9446 && arg1 <= ts->info->start_stack) {
9447 arg3 &= ~PROT_GROWSDOWN;
9448 arg2 = arg2 + arg1 - ts->info->stack_limit;
9449 arg1 = ts->info->stack_limit;
9452 ret = get_errno(target_mprotect(arg1, arg2, arg3));
9454 #ifdef TARGET_NR_mremap
9455 case TARGET_NR_mremap:
9456 ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
9459 /* ??? msync/mlock/munlock are broken for softmmu. */
9460 #ifdef TARGET_NR_msync
9461 case TARGET_NR_msync:
9462 ret = get_errno(msync(g2h(arg1), arg2, arg3));
9465 #ifdef TARGET_NR_mlock
9466 case TARGET_NR_mlock:
9467 ret = get_errno(mlock(g2h(arg1), arg2));
9470 #ifdef TARGET_NR_munlock
9471 case TARGET_NR_munlock:
9472 ret = get_errno(munlock(g2h(arg1), arg2));
9475 #ifdef TARGET_NR_mlockall
9476 case TARGET_NR_mlockall:
9477 ret = get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
9480 #ifdef TARGET_NR_munlockall
9481 case TARGET_NR_munlockall:
9482 ret = get_errno(munlockall());
9485 case TARGET_NR_truncate:
9486 if (!(p = lock_user_string(arg1)))
9488 ret = get_errno(truncate(p, arg2));
9489 unlock_user(p, arg1, 0);
9491 case TARGET_NR_ftruncate:
9492 ret = get_errno(ftruncate(arg1, arg2));
9494 case TARGET_NR_fchmod:
9495 ret = get_errno(fchmod(arg1, arg2));
9497 #if defined(TARGET_NR_fchmodat)
9498 case TARGET_NR_fchmodat:
9499 if (!(p = lock_user_string(arg2)))
9501 ret = get_errno(fchmodat(arg1, p, arg3, 0));
9502 unlock_user(p, arg2, 0);
9505 case TARGET_NR_getpriority:
9506 /* Note that negative values are valid for getpriority, so we must
9507 differentiate based on errno settings. */
9509 ret = getpriority(arg1, arg2);
9510 if (ret == -1 && errno != 0) {
9511 ret = -host_to_target_errno(errno);
9515 /* Return value is the unbiased priority. Signal no error. */
9516 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
9518 /* Return value is a biased priority to avoid negative numbers. */
9522 case TARGET_NR_setpriority:
9523 ret = get_errno(setpriority(arg1, arg2, arg3));
9525 #ifdef TARGET_NR_profil
9526 case TARGET_NR_profil:
9529 case TARGET_NR_statfs:
9530 if (!(p = lock_user_string(arg1)))
9532 ret = get_errno(statfs(path(p), &stfs));
9533 unlock_user(p, arg1, 0);
9535 if (!is_error(ret)) {
9536 struct target_statfs *target_stfs;
9538 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
9540 __put_user(stfs.f_type, &target_stfs->f_type);
9541 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9542 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9543 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9544 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9545 __put_user(stfs.f_files, &target_stfs->f_files);
9546 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9547 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9548 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9549 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9550 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9551 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9552 unlock_user_struct(target_stfs, arg2, 1);
9555 case TARGET_NR_fstatfs:
9556 ret = get_errno(fstatfs(arg1, &stfs));
9557 goto convert_statfs;
9558 #ifdef TARGET_NR_statfs64
9559 case TARGET_NR_statfs64:
9560 if (!(p = lock_user_string(arg1)))
9562 ret = get_errno(statfs(path(p), &stfs));
9563 unlock_user(p, arg1, 0);
9565 if (!is_error(ret)) {
9566 struct target_statfs64 *target_stfs;
9568 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
9570 __put_user(stfs.f_type, &target_stfs->f_type);
9571 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9572 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9573 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9574 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9575 __put_user(stfs.f_files, &target_stfs->f_files);
9576 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9577 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9578 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9579 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9580 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9581 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9582 unlock_user_struct(target_stfs, arg3, 1);
9585 case TARGET_NR_fstatfs64:
9586 ret = get_errno(fstatfs(arg1, &stfs));
9587 goto convert_statfs64;
9589 #ifdef TARGET_NR_ioperm
9590 case TARGET_NR_ioperm:
9593 #ifdef TARGET_NR_socketcall
9594 case TARGET_NR_socketcall:
9595 ret = do_socketcall(arg1, arg2);
9598 #ifdef TARGET_NR_accept
9599 case TARGET_NR_accept:
9600 ret = do_accept4(arg1, arg2, arg3, 0);
9603 #ifdef TARGET_NR_accept4
9604 case TARGET_NR_accept4:
9605 ret = do_accept4(arg1, arg2, arg3, arg4);
9608 #ifdef TARGET_NR_bind
9609 case TARGET_NR_bind:
9610 ret = do_bind(arg1, arg2, arg3);
9613 #ifdef TARGET_NR_connect
9614 case TARGET_NR_connect:
9615 ret = do_connect(arg1, arg2, arg3);
9618 #ifdef TARGET_NR_getpeername
9619 case TARGET_NR_getpeername:
9620 ret = do_getpeername(arg1, arg2, arg3);
9623 #ifdef TARGET_NR_getsockname
9624 case TARGET_NR_getsockname:
9625 ret = do_getsockname(arg1, arg2, arg3);
9628 #ifdef TARGET_NR_getsockopt
9629 case TARGET_NR_getsockopt:
9630 ret = do_getsockopt(arg1, arg2, arg3, arg4, arg5);
9633 #ifdef TARGET_NR_listen
9634 case TARGET_NR_listen:
9635 ret = get_errno(listen(arg1, arg2));
9638 #ifdef TARGET_NR_recv
9639 case TARGET_NR_recv:
9640 ret = do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
9643 #ifdef TARGET_NR_recvfrom
9644 case TARGET_NR_recvfrom:
9645 ret = do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
9648 #ifdef TARGET_NR_recvmsg
9649 case TARGET_NR_recvmsg:
9650 ret = do_sendrecvmsg(arg1, arg2, arg3, 0);
9653 #ifdef TARGET_NR_send
9654 case TARGET_NR_send:
9655 ret = do_sendto(arg1, arg2, arg3, arg4, 0, 0);
9658 #ifdef TARGET_NR_sendmsg
9659 case TARGET_NR_sendmsg:
9660 ret = do_sendrecvmsg(arg1, arg2, arg3, 1);
9663 #ifdef TARGET_NR_sendmmsg
9664 case TARGET_NR_sendmmsg:
9665 ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
9667 case TARGET_NR_recvmmsg:
9668 ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
9671 #ifdef TARGET_NR_sendto
9672 case TARGET_NR_sendto:
9673 ret = do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
9676 #ifdef TARGET_NR_shutdown
9677 case TARGET_NR_shutdown:
9678 ret = get_errno(shutdown(arg1, arg2));
9681 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
9682 case TARGET_NR_getrandom:
9683 p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
9687 ret = get_errno(getrandom(p, arg2, arg3));
9688 unlock_user(p, arg1, ret);
9691 #ifdef TARGET_NR_socket
9692 case TARGET_NR_socket:
9693 ret = do_socket(arg1, arg2, arg3);
9696 #ifdef TARGET_NR_socketpair
9697 case TARGET_NR_socketpair:
9698 ret = do_socketpair(arg1, arg2, arg3, arg4);
9701 #ifdef TARGET_NR_setsockopt
9702 case TARGET_NR_setsockopt:
9703 ret = do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
9706 #if defined(TARGET_NR_syslog)
9707 case TARGET_NR_syslog:
9712 case TARGET_SYSLOG_ACTION_CLOSE: /* Close log */
9713 case TARGET_SYSLOG_ACTION_OPEN: /* Open log */
9714 case TARGET_SYSLOG_ACTION_CLEAR: /* Clear ring buffer */
9715 case TARGET_SYSLOG_ACTION_CONSOLE_OFF: /* Disable logging */
9716 case TARGET_SYSLOG_ACTION_CONSOLE_ON: /* Enable logging */
9717 case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
9718 case TARGET_SYSLOG_ACTION_SIZE_UNREAD: /* Number of chars */
9719 case TARGET_SYSLOG_ACTION_SIZE_BUFFER: /* Size of the buffer */
9721 ret = get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
9724 case TARGET_SYSLOG_ACTION_READ: /* Read from log */
9725 case TARGET_SYSLOG_ACTION_READ_CLEAR: /* Read/clear msgs */
9726 case TARGET_SYSLOG_ACTION_READ_ALL: /* Read last messages */
9728 ret = -TARGET_EINVAL;
9736 p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9738 ret = -TARGET_EFAULT;
9741 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
9742 unlock_user(p, arg2, arg3);
9752 case TARGET_NR_setitimer:
9754 struct itimerval value, ovalue, *pvalue;
9758 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
9759 || copy_from_user_timeval(&pvalue->it_value,
9760 arg2 + sizeof(struct target_timeval)))
9765 ret = get_errno(setitimer(arg1, pvalue, &ovalue));
9766 if (!is_error(ret) && arg3) {
9767 if (copy_to_user_timeval(arg3,
9768 &ovalue.it_interval)
9769 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
9775 case TARGET_NR_getitimer:
9777 struct itimerval value;
9779 ret = get_errno(getitimer(arg1, &value));
9780 if (!is_error(ret) && arg2) {
9781 if (copy_to_user_timeval(arg2,
9783 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
9789 #ifdef TARGET_NR_stat
9790 case TARGET_NR_stat:
9791 if (!(p = lock_user_string(arg1)))
9793 ret = get_errno(stat(path(p), &st));
9794 unlock_user(p, arg1, 0);
9797 #ifdef TARGET_NR_lstat
9798 case TARGET_NR_lstat:
9799 if (!(p = lock_user_string(arg1)))
9801 ret = get_errno(lstat(path(p), &st));
9802 unlock_user(p, arg1, 0);
9805 case TARGET_NR_fstat:
9807 ret = get_errno(fstat(arg1, &st));
9808 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
9811 if (!is_error(ret)) {
9812 struct target_stat *target_st;
9814 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
9816 memset(target_st, 0, sizeof(*target_st));
9817 __put_user(st.st_dev, &target_st->st_dev);
9818 __put_user(st.st_ino, &target_st->st_ino);
9819 __put_user(st.st_mode, &target_st->st_mode);
9820 __put_user(st.st_uid, &target_st->st_uid);
9821 __put_user(st.st_gid, &target_st->st_gid);
9822 __put_user(st.st_nlink, &target_st->st_nlink);
9823 __put_user(st.st_rdev, &target_st->st_rdev);
9824 __put_user(st.st_size, &target_st->st_size);
9825 __put_user(st.st_blksize, &target_st->st_blksize);
9826 __put_user(st.st_blocks, &target_st->st_blocks);
9827 __put_user(st.st_atime, &target_st->target_st_atime);
9828 __put_user(st.st_mtime, &target_st->target_st_mtime);
9829 __put_user(st.st_ctime, &target_st->target_st_ctime);
9830 unlock_user_struct(target_st, arg2, 1);
9834 #ifdef TARGET_NR_olduname
9835 case TARGET_NR_olduname:
9838 #ifdef TARGET_NR_iopl
9839 case TARGET_NR_iopl:
9842 case TARGET_NR_vhangup:
9843 ret = get_errno(vhangup());
9845 #ifdef TARGET_NR_idle
9846 case TARGET_NR_idle:
9849 #ifdef TARGET_NR_syscall
9850 case TARGET_NR_syscall:
9851 ret = do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
9852 arg6, arg7, arg8, 0);
9855 case TARGET_NR_wait4:
9858 abi_long status_ptr = arg2;
9859 struct rusage rusage, *rusage_ptr;
9860 abi_ulong target_rusage = arg4;
9861 abi_long rusage_err;
9863 rusage_ptr = &rusage;
9866 ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
9867 if (!is_error(ret)) {
9868 if (status_ptr && ret) {
9869 status = host_to_target_waitstatus(status);
9870 if (put_user_s32(status, status_ptr))
9873 if (target_rusage) {
9874 rusage_err = host_to_target_rusage(target_rusage, &rusage);
9882 #ifdef TARGET_NR_swapoff
9883 case TARGET_NR_swapoff:
9884 if (!(p = lock_user_string(arg1)))
9886 ret = get_errno(swapoff(p));
9887 unlock_user(p, arg1, 0);
9890 case TARGET_NR_sysinfo:
9892 struct target_sysinfo *target_value;
9893 struct sysinfo value;
9894 ret = get_errno(sysinfo(&value));
9895 if (!is_error(ret) && arg1)
9897 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
9899 __put_user(value.uptime, &target_value->uptime);
9900 __put_user(value.loads[0], &target_value->loads[0]);
9901 __put_user(value.loads[1], &target_value->loads[1]);
9902 __put_user(value.loads[2], &target_value->loads[2]);
9903 __put_user(value.totalram, &target_value->totalram);
9904 __put_user(value.freeram, &target_value->freeram);
9905 __put_user(value.sharedram, &target_value->sharedram);
9906 __put_user(value.bufferram, &target_value->bufferram);
9907 __put_user(value.totalswap, &target_value->totalswap);
9908 __put_user(value.freeswap, &target_value->freeswap);
9909 __put_user(value.procs, &target_value->procs);
9910 __put_user(value.totalhigh, &target_value->totalhigh);
9911 __put_user(value.freehigh, &target_value->freehigh);
9912 __put_user(value.mem_unit, &target_value->mem_unit);
9913 unlock_user_struct(target_value, arg1, 1);
9917 #ifdef TARGET_NR_ipc
9919 ret = do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
9922 #ifdef TARGET_NR_semget
9923 case TARGET_NR_semget:
9924 ret = get_errno(semget(arg1, arg2, arg3));
9927 #ifdef TARGET_NR_semop
9928 case TARGET_NR_semop:
9929 ret = do_semop(arg1, arg2, arg3);
9932 #ifdef TARGET_NR_semctl
9933 case TARGET_NR_semctl:
9934 ret = do_semctl(arg1, arg2, arg3, arg4);
9937 #ifdef TARGET_NR_msgctl
9938 case TARGET_NR_msgctl:
9939 ret = do_msgctl(arg1, arg2, arg3);
9942 #ifdef TARGET_NR_msgget
9943 case TARGET_NR_msgget:
9944 ret = get_errno(msgget(arg1, arg2));
9947 #ifdef TARGET_NR_msgrcv
9948 case TARGET_NR_msgrcv:
9949 ret = do_msgrcv(arg1, arg2, arg3, arg4, arg5);
9952 #ifdef TARGET_NR_msgsnd
9953 case TARGET_NR_msgsnd:
9954 ret = do_msgsnd(arg1, arg2, arg3, arg4);
9957 #ifdef TARGET_NR_shmget
9958 case TARGET_NR_shmget:
9959 ret = get_errno(shmget(arg1, arg2, arg3));
9962 #ifdef TARGET_NR_shmctl
9963 case TARGET_NR_shmctl:
9964 ret = do_shmctl(arg1, arg2, arg3);
9967 #ifdef TARGET_NR_shmat
9968 case TARGET_NR_shmat:
9969 ret = do_shmat(cpu_env, arg1, arg2, arg3);
9972 #ifdef TARGET_NR_shmdt
9973 case TARGET_NR_shmdt:
9974 ret = do_shmdt(arg1);
9977 case TARGET_NR_fsync:
9978 ret = get_errno(fsync(arg1));
9980 case TARGET_NR_clone:
9981 /* Linux manages to have three different orderings for its
9982 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
9983 * match the kernel's CONFIG_CLONE_* settings.
9984 * Microblaze is further special in that it uses a sixth
9985 * implicit argument to clone for the TLS pointer.
9987 #if defined(TARGET_MICROBLAZE)
9988 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
9989 #elif defined(TARGET_CLONE_BACKWARDS)
9990 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
9991 #elif defined(TARGET_CLONE_BACKWARDS2)
9992 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
9994 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
9997 #ifdef __NR_exit_group
9998 /* new thread calls */
9999 case TARGET_NR_exit_group:
10000 #ifdef TARGET_GPROF
10003 gdb_exit(cpu_env, arg1);
10004 ret = get_errno(exit_group(arg1));
10007 case TARGET_NR_setdomainname:
10008 if (!(p = lock_user_string(arg1)))
10010 ret = get_errno(setdomainname(p, arg2));
10011 unlock_user(p, arg1, 0);
10013 case TARGET_NR_uname:
10014 /* no need to transcode because we use the linux syscall */
10016 struct new_utsname * buf;
10018 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
10020 ret = get_errno(sys_uname(buf));
10021 if (!is_error(ret)) {
10022 /* Overwrite the native machine name with whatever is being
10024 strcpy (buf->machine, cpu_to_uname_machine(cpu_env));
10025 /* Allow the user to override the reported release. */
10026 if (qemu_uname_release && *qemu_uname_release) {
10027 g_strlcpy(buf->release, qemu_uname_release,
10028 sizeof(buf->release));
10031 unlock_user_struct(buf, arg1, 1);
10035 case TARGET_NR_modify_ldt:
10036 ret = do_modify_ldt(cpu_env, arg1, arg2, arg3);
10038 #if !defined(TARGET_X86_64)
10039 case TARGET_NR_vm86old:
10040 goto unimplemented;
10041 case TARGET_NR_vm86:
10042 ret = do_vm86(cpu_env, arg1, arg2);
10046 case TARGET_NR_adjtimex:
10048 struct timex host_buf;
10050 if (target_to_host_timex(&host_buf, arg1) != 0) {
10053 ret = get_errno(adjtimex(&host_buf));
10054 if (!is_error(ret)) {
10055 if (host_to_target_timex(arg1, &host_buf) != 0) {
10061 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
10062 case TARGET_NR_clock_adjtime:
10064 struct timex htx, *phtx = &htx;
10066 if (target_to_host_timex(phtx, arg2) != 0) {
10069 ret = get_errno(clock_adjtime(arg1, phtx));
10070 if (!is_error(ret) && phtx) {
10071 if (host_to_target_timex(arg2, phtx) != 0) {
10078 #ifdef TARGET_NR_create_module
10079 case TARGET_NR_create_module:
10081 case TARGET_NR_init_module:
10082 case TARGET_NR_delete_module:
10083 #ifdef TARGET_NR_get_kernel_syms
10084 case TARGET_NR_get_kernel_syms:
10086 goto unimplemented;
10087 case TARGET_NR_quotactl:
10088 goto unimplemented;
10089 case TARGET_NR_getpgid:
10090 ret = get_errno(getpgid(arg1));
10092 case TARGET_NR_fchdir:
10093 ret = get_errno(fchdir(arg1));
10095 #ifdef TARGET_NR_bdflush /* not on x86_64 */
10096 case TARGET_NR_bdflush:
10097 goto unimplemented;
10099 #ifdef TARGET_NR_sysfs
10100 case TARGET_NR_sysfs:
10101 goto unimplemented;
10103 case TARGET_NR_personality:
10104 ret = get_errno(personality(arg1));
10106 #ifdef TARGET_NR_afs_syscall
10107 case TARGET_NR_afs_syscall:
10108 goto unimplemented;
10110 #ifdef TARGET_NR__llseek /* Not on alpha */
10111 case TARGET_NR__llseek:
10114 #if !defined(__NR_llseek)
10115 res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
10117 ret = get_errno(res);
10122 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
10124 if ((ret == 0) && put_user_s64(res, arg4)) {
10130 #ifdef TARGET_NR_getdents
10131 case TARGET_NR_getdents:
10132 #ifdef __NR_getdents
10133 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
10135 struct target_dirent *target_dirp;
10136 struct linux_dirent *dirp;
10137 abi_long count = arg3;
10139 dirp = g_try_malloc(count);
10141 ret = -TARGET_ENOMEM;
10145 ret = get_errno(sys_getdents(arg1, dirp, count));
10146 if (!is_error(ret)) {
10147 struct linux_dirent *de;
10148 struct target_dirent *tde;
10150 int reclen, treclen;
10151 int count1, tnamelen;
10155 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10159 reclen = de->d_reclen;
10160 tnamelen = reclen - offsetof(struct linux_dirent, d_name);
10161 assert(tnamelen >= 0);
10162 treclen = tnamelen + offsetof(struct target_dirent, d_name);
10163 assert(count1 + treclen <= count);
10164 tde->d_reclen = tswap16(treclen);
10165 tde->d_ino = tswapal(de->d_ino);
10166 tde->d_off = tswapal(de->d_off);
10167 memcpy(tde->d_name, de->d_name, tnamelen);
10168 de = (struct linux_dirent *)((char *)de + reclen);
10170 tde = (struct target_dirent *)((char *)tde + treclen);
10174 unlock_user(target_dirp, arg2, ret);
10180 struct linux_dirent *dirp;
10181 abi_long count = arg3;
10183 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10185 ret = get_errno(sys_getdents(arg1, dirp, count));
10186 if (!is_error(ret)) {
10187 struct linux_dirent *de;
10192 reclen = de->d_reclen;
10195 de->d_reclen = tswap16(reclen);
10196 tswapls(&de->d_ino);
10197 tswapls(&de->d_off);
10198 de = (struct linux_dirent *)((char *)de + reclen);
10202 unlock_user(dirp, arg2, ret);
10206 /* Implement getdents in terms of getdents64 */
10208 struct linux_dirent64 *dirp;
10209 abi_long count = arg3;
10211 dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
10215 ret = get_errno(sys_getdents64(arg1, dirp, count));
10216 if (!is_error(ret)) {
10217 /* Convert the dirent64 structs to target dirent. We do this
10218 * in-place, since we can guarantee that a target_dirent is no
10219 * larger than a dirent64; however this means we have to be
10220 * careful to read everything before writing in the new format.
10222 struct linux_dirent64 *de;
10223 struct target_dirent *tde;
10228 tde = (struct target_dirent *)dirp;
10230 int namelen, treclen;
10231 int reclen = de->d_reclen;
10232 uint64_t ino = de->d_ino;
10233 int64_t off = de->d_off;
10234 uint8_t type = de->d_type;
10236 namelen = strlen(de->d_name);
10237 treclen = offsetof(struct target_dirent, d_name)
10239 treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
10241 memmove(tde->d_name, de->d_name, namelen + 1);
10242 tde->d_ino = tswapal(ino);
10243 tde->d_off = tswapal(off);
10244 tde->d_reclen = tswap16(treclen);
10245 /* The target_dirent type is in what was formerly a padding
10246 * byte at the end of the structure:
10248 *(((char *)tde) + treclen - 1) = type;
10250 de = (struct linux_dirent64 *)((char *)de + reclen);
10251 tde = (struct target_dirent *)((char *)tde + treclen);
10257 unlock_user(dirp, arg2, ret);
10261 #endif /* TARGET_NR_getdents */
10262 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
10263 case TARGET_NR_getdents64:
10265 struct linux_dirent64 *dirp;
10266 abi_long count = arg3;
10267 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10269 ret = get_errno(sys_getdents64(arg1, dirp, count));
10270 if (!is_error(ret)) {
10271 struct linux_dirent64 *de;
10276 reclen = de->d_reclen;
10279 de->d_reclen = tswap16(reclen);
10280 tswap64s((uint64_t *)&de->d_ino);
10281 tswap64s((uint64_t *)&de->d_off);
10282 de = (struct linux_dirent64 *)((char *)de + reclen);
10286 unlock_user(dirp, arg2, ret);
10289 #endif /* TARGET_NR_getdents64 */
10290 #if defined(TARGET_NR__newselect)
10291 case TARGET_NR__newselect:
10292 ret = do_select(arg1, arg2, arg3, arg4, arg5);
10295 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
10296 # ifdef TARGET_NR_poll
10297 case TARGET_NR_poll:
10299 # ifdef TARGET_NR_ppoll
10300 case TARGET_NR_ppoll:
10303 struct target_pollfd *target_pfd;
10304 unsigned int nfds = arg2;
10305 struct pollfd *pfd;
10311 if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
10312 ret = -TARGET_EINVAL;
10316 target_pfd = lock_user(VERIFY_WRITE, arg1,
10317 sizeof(struct target_pollfd) * nfds, 1);
10322 pfd = alloca(sizeof(struct pollfd) * nfds);
10323 for (i = 0; i < nfds; i++) {
10324 pfd[i].fd = tswap32(target_pfd[i].fd);
10325 pfd[i].events = tswap16(target_pfd[i].events);
10330 # ifdef TARGET_NR_ppoll
10331 case TARGET_NR_ppoll:
10333 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
10334 target_sigset_t *target_set;
10335 sigset_t _set, *set = &_set;
10338 if (target_to_host_timespec(timeout_ts, arg3)) {
10339 unlock_user(target_pfd, arg1, 0);
10347 if (arg5 != sizeof(target_sigset_t)) {
10348 unlock_user(target_pfd, arg1, 0);
10349 ret = -TARGET_EINVAL;
10353 target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
10355 unlock_user(target_pfd, arg1, 0);
10358 target_to_host_sigset(set, target_set);
10363 ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
10364 set, SIGSET_T_SIZE));
10366 if (!is_error(ret) && arg3) {
10367 host_to_target_timespec(arg3, timeout_ts);
10370 unlock_user(target_set, arg4, 0);
10375 # ifdef TARGET_NR_poll
10376 case TARGET_NR_poll:
10378 struct timespec ts, *pts;
10381 /* Convert ms to secs, ns */
10382 ts.tv_sec = arg3 / 1000;
10383 ts.tv_nsec = (arg3 % 1000) * 1000000LL;
10386 /* -ve poll() timeout means "infinite" */
10389 ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
10394 g_assert_not_reached();
10397 if (!is_error(ret)) {
10398 for(i = 0; i < nfds; i++) {
10399 target_pfd[i].revents = tswap16(pfd[i].revents);
10402 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
10406 case TARGET_NR_flock:
10407 /* NOTE: the flock constant seems to be the same for every
10409 ret = get_errno(safe_flock(arg1, arg2));
10411 case TARGET_NR_readv:
10413 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10415 ret = get_errno(safe_readv(arg1, vec, arg3));
10416 unlock_iovec(vec, arg2, arg3, 1);
10418 ret = -host_to_target_errno(errno);
10422 case TARGET_NR_writev:
10424 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10426 ret = get_errno(safe_writev(arg1, vec, arg3));
10427 unlock_iovec(vec, arg2, arg3, 0);
10429 ret = -host_to_target_errno(errno);
10433 #if defined(TARGET_NR_preadv)
10434 case TARGET_NR_preadv:
10436 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10438 ret = get_errno(safe_preadv(arg1, vec, arg3, arg4, arg5));
10439 unlock_iovec(vec, arg2, arg3, 1);
10441 ret = -host_to_target_errno(errno);
10446 #if defined(TARGET_NR_pwritev)
10447 case TARGET_NR_pwritev:
10449 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10451 ret = get_errno(safe_pwritev(arg1, vec, arg3, arg4, arg5));
10452 unlock_iovec(vec, arg2, arg3, 0);
10454 ret = -host_to_target_errno(errno);
10459 case TARGET_NR_getsid:
10460 ret = get_errno(getsid(arg1));
10462 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10463 case TARGET_NR_fdatasync:
10464 ret = get_errno(fdatasync(arg1));
10467 #ifdef TARGET_NR__sysctl
10468 case TARGET_NR__sysctl:
10469 /* We don't implement this, but ENOTDIR is always a safe
10471 ret = -TARGET_ENOTDIR;
10474 case TARGET_NR_sched_getaffinity:
10476 unsigned int mask_size;
10477 unsigned long *mask;
10480 * sched_getaffinity needs multiples of ulong, so need to take
10481 * care of mismatches between target ulong and host ulong sizes.
10483 if (arg2 & (sizeof(abi_ulong) - 1)) {
10484 ret = -TARGET_EINVAL;
10487 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10489 mask = alloca(mask_size);
10490 memset(mask, 0, mask_size);
10491 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
10493 if (!is_error(ret)) {
10495 /* More data returned than the caller's buffer will fit.
10496 * This only happens if sizeof(abi_long) < sizeof(long)
10497 * and the caller passed us a buffer holding an odd number
10498 * of abi_longs. If the host kernel is actually using the
10499 * extra 4 bytes then fail EINVAL; otherwise we can just
10500 * ignore them and only copy the interesting part.
10502 int numcpus = sysconf(_SC_NPROCESSORS_CONF);
10503 if (numcpus > arg2 * 8) {
10504 ret = -TARGET_EINVAL;
10510 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
10516 case TARGET_NR_sched_setaffinity:
10518 unsigned int mask_size;
10519 unsigned long *mask;
10522 * sched_setaffinity needs multiples of ulong, so need to take
10523 * care of mismatches between target ulong and host ulong sizes.
10525 if (arg2 & (sizeof(abi_ulong) - 1)) {
10526 ret = -TARGET_EINVAL;
10529 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10530 mask = alloca(mask_size);
10532 ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
10537 ret = get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
10540 case TARGET_NR_getcpu:
10542 unsigned cpu, node;
10543 ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
10544 arg2 ? &node : NULL,
10546 if (is_error(ret)) {
10549 if (arg1 && put_user_u32(cpu, arg1)) {
10552 if (arg2 && put_user_u32(node, arg2)) {
10557 case TARGET_NR_sched_setparam:
10559 struct sched_param *target_schp;
10560 struct sched_param schp;
10563 return -TARGET_EINVAL;
10565 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
10567 schp.sched_priority = tswap32(target_schp->sched_priority);
10568 unlock_user_struct(target_schp, arg2, 0);
10569 ret = get_errno(sched_setparam(arg1, &schp));
10572 case TARGET_NR_sched_getparam:
10574 struct sched_param *target_schp;
10575 struct sched_param schp;
10578 return -TARGET_EINVAL;
10580 ret = get_errno(sched_getparam(arg1, &schp));
10581 if (!is_error(ret)) {
10582 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
10584 target_schp->sched_priority = tswap32(schp.sched_priority);
10585 unlock_user_struct(target_schp, arg2, 1);
10589 case TARGET_NR_sched_setscheduler:
10591 struct sched_param *target_schp;
10592 struct sched_param schp;
10594 return -TARGET_EINVAL;
10596 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
10598 schp.sched_priority = tswap32(target_schp->sched_priority);
10599 unlock_user_struct(target_schp, arg3, 0);
10600 ret = get_errno(sched_setscheduler(arg1, arg2, &schp));
10603 case TARGET_NR_sched_getscheduler:
10604 ret = get_errno(sched_getscheduler(arg1));
10606 case TARGET_NR_sched_yield:
10607 ret = get_errno(sched_yield());
10609 case TARGET_NR_sched_get_priority_max:
10610 ret = get_errno(sched_get_priority_max(arg1));
10612 case TARGET_NR_sched_get_priority_min:
10613 ret = get_errno(sched_get_priority_min(arg1));
10615 case TARGET_NR_sched_rr_get_interval:
10617 struct timespec ts;
10618 ret = get_errno(sched_rr_get_interval(arg1, &ts));
10619 if (!is_error(ret)) {
10620 ret = host_to_target_timespec(arg2, &ts);
10624 case TARGET_NR_nanosleep:
10626 struct timespec req, rem;
10627 target_to_host_timespec(&req, arg1);
10628 ret = get_errno(safe_nanosleep(&req, &rem));
10629 if (is_error(ret) && arg2) {
10630 host_to_target_timespec(arg2, &rem);
10634 #ifdef TARGET_NR_query_module
10635 case TARGET_NR_query_module:
10636 goto unimplemented;
10638 #ifdef TARGET_NR_nfsservctl
10639 case TARGET_NR_nfsservctl:
10640 goto unimplemented;
10642 case TARGET_NR_prctl:
10644 case PR_GET_PDEATHSIG:
10647 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
10648 if (!is_error(ret) && arg2
10649 && put_user_ual(deathsig, arg2)) {
10657 void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
10661 ret = get_errno(prctl(arg1, (unsigned long)name,
10662 arg3, arg4, arg5));
10663 unlock_user(name, arg2, 16);
10668 void *name = lock_user(VERIFY_READ, arg2, 16, 1);
10672 ret = get_errno(prctl(arg1, (unsigned long)name,
10673 arg3, arg4, arg5));
10674 unlock_user(name, arg2, 0);
10678 #ifdef TARGET_AARCH64
10679 case TARGET_PR_SVE_SET_VL:
10680 /* We cannot support either PR_SVE_SET_VL_ONEXEC
10681 or PR_SVE_VL_INHERIT. Therefore, anything above
10682 ARM_MAX_VQ results in EINVAL. */
10683 ret = -TARGET_EINVAL;
10684 if (arm_feature(cpu_env, ARM_FEATURE_SVE)
10685 && arg2 >= 0 && arg2 <= ARM_MAX_VQ * 16 && !(arg2 & 15)) {
10686 CPUARMState *env = cpu_env;
10687 int old_vq = (env->vfp.zcr_el[1] & 0xf) + 1;
10688 int vq = MAX(arg2 / 16, 1);
10691 aarch64_sve_narrow_vq(env, vq);
10693 env->vfp.zcr_el[1] = vq - 1;
10697 case TARGET_PR_SVE_GET_VL:
10698 ret = -TARGET_EINVAL;
10699 if (arm_feature(cpu_env, ARM_FEATURE_SVE)) {
10700 CPUARMState *env = cpu_env;
10701 ret = ((env->vfp.zcr_el[1] & 0xf) + 1) * 16;
10704 #endif /* AARCH64 */
10705 case PR_GET_SECCOMP:
10706 case PR_SET_SECCOMP:
10707 /* Disable seccomp to prevent the target disabling syscalls we
10709 ret = -TARGET_EINVAL;
10712 /* Most prctl options have no pointer arguments */
10713 ret = get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
10717 #ifdef TARGET_NR_arch_prctl
10718 case TARGET_NR_arch_prctl:
10719 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
10720 ret = do_arch_prctl(cpu_env, arg1, arg2);
10723 goto unimplemented;
10726 #ifdef TARGET_NR_pread64
10727 case TARGET_NR_pread64:
10728 if (regpairs_aligned(cpu_env, num)) {
10732 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
10734 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
10735 unlock_user(p, arg2, ret);
10737 case TARGET_NR_pwrite64:
10738 if (regpairs_aligned(cpu_env, num)) {
10742 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
10744 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
10745 unlock_user(p, arg2, 0);
10748 case TARGET_NR_getcwd:
10749 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
10751 ret = get_errno(sys_getcwd1(p, arg2));
10752 unlock_user(p, arg1, ret);
10754 case TARGET_NR_capget:
10755 case TARGET_NR_capset:
10757 struct target_user_cap_header *target_header;
10758 struct target_user_cap_data *target_data = NULL;
10759 struct __user_cap_header_struct header;
10760 struct __user_cap_data_struct data[2];
10761 struct __user_cap_data_struct *dataptr = NULL;
10762 int i, target_datalen;
10763 int data_items = 1;
10765 if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
10768 header.version = tswap32(target_header->version);
10769 header.pid = tswap32(target_header->pid);
10771 if (header.version != _LINUX_CAPABILITY_VERSION) {
10772 /* Version 2 and up takes pointer to two user_data structs */
10776 target_datalen = sizeof(*target_data) * data_items;
10779 if (num == TARGET_NR_capget) {
10780 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
10782 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
10784 if (!target_data) {
10785 unlock_user_struct(target_header, arg1, 0);
10789 if (num == TARGET_NR_capset) {
10790 for (i = 0; i < data_items; i++) {
10791 data[i].effective = tswap32(target_data[i].effective);
10792 data[i].permitted = tswap32(target_data[i].permitted);
10793 data[i].inheritable = tswap32(target_data[i].inheritable);
10800 if (num == TARGET_NR_capget) {
10801 ret = get_errno(capget(&header, dataptr));
10803 ret = get_errno(capset(&header, dataptr));
10806 /* The kernel always updates version for both capget and capset */
10807 target_header->version = tswap32(header.version);
10808 unlock_user_struct(target_header, arg1, 1);
10811 if (num == TARGET_NR_capget) {
10812 for (i = 0; i < data_items; i++) {
10813 target_data[i].effective = tswap32(data[i].effective);
10814 target_data[i].permitted = tswap32(data[i].permitted);
10815 target_data[i].inheritable = tswap32(data[i].inheritable);
10817 unlock_user(target_data, arg2, target_datalen);
10819 unlock_user(target_data, arg2, 0);
10824 case TARGET_NR_sigaltstack:
10825 ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUArchState *)cpu_env));
10828 #ifdef CONFIG_SENDFILE
10829 case TARGET_NR_sendfile:
10831 off_t *offp = NULL;
10834 ret = get_user_sal(off, arg3);
10835 if (is_error(ret)) {
10840 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
10841 if (!is_error(ret) && arg3) {
10842 abi_long ret2 = put_user_sal(off, arg3);
10843 if (is_error(ret2)) {
10849 #ifdef TARGET_NR_sendfile64
10850 case TARGET_NR_sendfile64:
10852 off_t *offp = NULL;
10855 ret = get_user_s64(off, arg3);
10856 if (is_error(ret)) {
10861 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
10862 if (!is_error(ret) && arg3) {
10863 abi_long ret2 = put_user_s64(off, arg3);
10864 if (is_error(ret2)) {
10872 case TARGET_NR_sendfile:
10873 #ifdef TARGET_NR_sendfile64
10874 case TARGET_NR_sendfile64:
10876 goto unimplemented;
10879 #ifdef TARGET_NR_getpmsg
10880 case TARGET_NR_getpmsg:
10881 goto unimplemented;
10883 #ifdef TARGET_NR_putpmsg
10884 case TARGET_NR_putpmsg:
10885 goto unimplemented;
10887 #ifdef TARGET_NR_vfork
10888 case TARGET_NR_vfork:
10889 ret = get_errno(do_fork(cpu_env,
10890 CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
10894 #ifdef TARGET_NR_ugetrlimit
10895 case TARGET_NR_ugetrlimit:
10897 struct rlimit rlim;
10898 int resource = target_to_host_resource(arg1);
10899 ret = get_errno(getrlimit(resource, &rlim));
10900 if (!is_error(ret)) {
10901 struct target_rlimit *target_rlim;
10902 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
10904 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
10905 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
10906 unlock_user_struct(target_rlim, arg2, 1);
10911 #ifdef TARGET_NR_truncate64
10912 case TARGET_NR_truncate64:
10913 if (!(p = lock_user_string(arg1)))
10915 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
10916 unlock_user(p, arg1, 0);
10919 #ifdef TARGET_NR_ftruncate64
10920 case TARGET_NR_ftruncate64:
10921 ret = target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
10924 #ifdef TARGET_NR_stat64
10925 case TARGET_NR_stat64:
10926 if (!(p = lock_user_string(arg1)))
10928 ret = get_errno(stat(path(p), &st));
10929 unlock_user(p, arg1, 0);
10930 if (!is_error(ret))
10931 ret = host_to_target_stat64(cpu_env, arg2, &st);
10934 #ifdef TARGET_NR_lstat64
10935 case TARGET_NR_lstat64:
10936 if (!(p = lock_user_string(arg1)))
10938 ret = get_errno(lstat(path(p), &st));
10939 unlock_user(p, arg1, 0);
10940 if (!is_error(ret))
10941 ret = host_to_target_stat64(cpu_env, arg2, &st);
10944 #ifdef TARGET_NR_fstat64
10945 case TARGET_NR_fstat64:
10946 ret = get_errno(fstat(arg1, &st));
10947 if (!is_error(ret))
10948 ret = host_to_target_stat64(cpu_env, arg2, &st);
10951 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
10952 #ifdef TARGET_NR_fstatat64
10953 case TARGET_NR_fstatat64:
10955 #ifdef TARGET_NR_newfstatat
10956 case TARGET_NR_newfstatat:
10958 if (!(p = lock_user_string(arg2)))
10960 ret = get_errno(fstatat(arg1, path(p), &st, arg4));
10961 if (!is_error(ret))
10962 ret = host_to_target_stat64(cpu_env, arg3, &st);
10965 #ifdef TARGET_NR_lchown
10966 case TARGET_NR_lchown:
10967 if (!(p = lock_user_string(arg1)))
10969 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
10970 unlock_user(p, arg1, 0);
10973 #ifdef TARGET_NR_getuid
10974 case TARGET_NR_getuid:
10975 ret = get_errno(high2lowuid(getuid()));
10978 #ifdef TARGET_NR_getgid
10979 case TARGET_NR_getgid:
10980 ret = get_errno(high2lowgid(getgid()));
10983 #ifdef TARGET_NR_geteuid
10984 case TARGET_NR_geteuid:
10985 ret = get_errno(high2lowuid(geteuid()));
10988 #ifdef TARGET_NR_getegid
10989 case TARGET_NR_getegid:
10990 ret = get_errno(high2lowgid(getegid()));
10993 case TARGET_NR_setreuid:
10994 ret = get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
10996 case TARGET_NR_setregid:
10997 ret = get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
10999 case TARGET_NR_getgroups:
11001 int gidsetsize = arg1;
11002 target_id *target_grouplist;
11006 grouplist = alloca(gidsetsize * sizeof(gid_t));
11007 ret = get_errno(getgroups(gidsetsize, grouplist));
11008 if (gidsetsize == 0)
11010 if (!is_error(ret)) {
11011 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
11012 if (!target_grouplist)
11014 for(i = 0;i < ret; i++)
11015 target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
11016 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
11020 case TARGET_NR_setgroups:
11022 int gidsetsize = arg1;
11023 target_id *target_grouplist;
11024 gid_t *grouplist = NULL;
11027 grouplist = alloca(gidsetsize * sizeof(gid_t));
11028 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
11029 if (!target_grouplist) {
11030 ret = -TARGET_EFAULT;
11033 for (i = 0; i < gidsetsize; i++) {
11034 grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
11036 unlock_user(target_grouplist, arg2, 0);
11038 ret = get_errno(setgroups(gidsetsize, grouplist));
11041 case TARGET_NR_fchown:
11042 ret = get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
11044 #if defined(TARGET_NR_fchownat)
11045 case TARGET_NR_fchownat:
11046 if (!(p = lock_user_string(arg2)))
11048 ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
11049 low2highgid(arg4), arg5));
11050 unlock_user(p, arg2, 0);
11053 #ifdef TARGET_NR_setresuid
11054 case TARGET_NR_setresuid:
11055 ret = get_errno(sys_setresuid(low2highuid(arg1),
11057 low2highuid(arg3)));
11060 #ifdef TARGET_NR_getresuid
11061 case TARGET_NR_getresuid:
11063 uid_t ruid, euid, suid;
11064 ret = get_errno(getresuid(&ruid, &euid, &suid));
11065 if (!is_error(ret)) {
11066 if (put_user_id(high2lowuid(ruid), arg1)
11067 || put_user_id(high2lowuid(euid), arg2)
11068 || put_user_id(high2lowuid(suid), arg3))
11074 #ifdef TARGET_NR_getresgid
11075 case TARGET_NR_setresgid:
11076 ret = get_errno(sys_setresgid(low2highgid(arg1),
11078 low2highgid(arg3)));
11081 #ifdef TARGET_NR_getresgid
11082 case TARGET_NR_getresgid:
11084 gid_t rgid, egid, sgid;
11085 ret = get_errno(getresgid(&rgid, &egid, &sgid));
11086 if (!is_error(ret)) {
11087 if (put_user_id(high2lowgid(rgid), arg1)
11088 || put_user_id(high2lowgid(egid), arg2)
11089 || put_user_id(high2lowgid(sgid), arg3))
11095 #ifdef TARGET_NR_chown
11096 case TARGET_NR_chown:
11097 if (!(p = lock_user_string(arg1)))
11099 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
11100 unlock_user(p, arg1, 0);
11103 case TARGET_NR_setuid:
11104 ret = get_errno(sys_setuid(low2highuid(arg1)));
11106 case TARGET_NR_setgid:
11107 ret = get_errno(sys_setgid(low2highgid(arg1)));
11109 case TARGET_NR_setfsuid:
11110 ret = get_errno(setfsuid(arg1));
11112 case TARGET_NR_setfsgid:
11113 ret = get_errno(setfsgid(arg1));
11116 #ifdef TARGET_NR_lchown32
11117 case TARGET_NR_lchown32:
11118 if (!(p = lock_user_string(arg1)))
11120 ret = get_errno(lchown(p, arg2, arg3));
11121 unlock_user(p, arg1, 0);
11124 #ifdef TARGET_NR_getuid32
11125 case TARGET_NR_getuid32:
11126 ret = get_errno(getuid());
11130 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
11131 /* Alpha specific */
11132 case TARGET_NR_getxuid:
11136 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
11138 ret = get_errno(getuid());
11141 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
11142 /* Alpha specific */
11143 case TARGET_NR_getxgid:
11147 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
11149 ret = get_errno(getgid());
11152 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
11153 /* Alpha specific */
11154 case TARGET_NR_osf_getsysinfo:
11155 ret = -TARGET_EOPNOTSUPP;
11157 case TARGET_GSI_IEEE_FP_CONTROL:
11159 uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env);
11161 /* Copied from linux ieee_fpcr_to_swcr. */
11162 swcr = (fpcr >> 35) & SWCR_STATUS_MASK;
11163 swcr |= (fpcr >> 36) & SWCR_MAP_DMZ;
11164 swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV
11165 | SWCR_TRAP_ENABLE_DZE
11166 | SWCR_TRAP_ENABLE_OVF);
11167 swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF
11168 | SWCR_TRAP_ENABLE_INE);
11169 swcr |= (fpcr >> 47) & SWCR_MAP_UMZ;
11170 swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO;
11172 if (put_user_u64 (swcr, arg2))
11178 /* case GSI_IEEE_STATE_AT_SIGNAL:
11179 -- Not implemented in linux kernel.
11181 -- Retrieves current unaligned access state; not much used.
11182 case GSI_PROC_TYPE:
11183 -- Retrieves implver information; surely not used.
11184 case GSI_GET_HWRPB:
11185 -- Grabs a copy of the HWRPB; surely not used.
11190 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
11191 /* Alpha specific */
11192 case TARGET_NR_osf_setsysinfo:
11193 ret = -TARGET_EOPNOTSUPP;
11195 case TARGET_SSI_IEEE_FP_CONTROL:
11197 uint64_t swcr, fpcr, orig_fpcr;
11199 if (get_user_u64 (swcr, arg2)) {
11202 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
11203 fpcr = orig_fpcr & FPCR_DYN_MASK;
11205 /* Copied from linux ieee_swcr_to_fpcr. */
11206 fpcr |= (swcr & SWCR_STATUS_MASK) << 35;
11207 fpcr |= (swcr & SWCR_MAP_DMZ) << 36;
11208 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV
11209 | SWCR_TRAP_ENABLE_DZE
11210 | SWCR_TRAP_ENABLE_OVF)) << 48;
11211 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF
11212 | SWCR_TRAP_ENABLE_INE)) << 57;
11213 fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0);
11214 fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41;
11216 cpu_alpha_store_fpcr(cpu_env, fpcr);
11221 case TARGET_SSI_IEEE_RAISE_EXCEPTION:
11223 uint64_t exc, fpcr, orig_fpcr;
11226 if (get_user_u64(exc, arg2)) {
11230 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
11232 /* We only add to the exception status here. */
11233 fpcr = orig_fpcr | ((exc & SWCR_STATUS_MASK) << 35);
11235 cpu_alpha_store_fpcr(cpu_env, fpcr);
11238 /* Old exceptions are not signaled. */
11239 fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK);
11241 /* If any exceptions set by this call,
11242 and are unmasked, send a signal. */
11244 if ((fpcr & (FPCR_INE | FPCR_INED)) == FPCR_INE) {
11245 si_code = TARGET_FPE_FLTRES;
11247 if ((fpcr & (FPCR_UNF | FPCR_UNFD)) == FPCR_UNF) {
11248 si_code = TARGET_FPE_FLTUND;
11250 if ((fpcr & (FPCR_OVF | FPCR_OVFD)) == FPCR_OVF) {
11251 si_code = TARGET_FPE_FLTOVF;
11253 if ((fpcr & (FPCR_DZE | FPCR_DZED)) == FPCR_DZE) {
11254 si_code = TARGET_FPE_FLTDIV;
11256 if ((fpcr & (FPCR_INV | FPCR_INVD)) == FPCR_INV) {
11257 si_code = TARGET_FPE_FLTINV;
11259 if (si_code != 0) {
11260 target_siginfo_t info;
11261 info.si_signo = SIGFPE;
11263 info.si_code = si_code;
11264 info._sifields._sigfault._addr
11265 = ((CPUArchState *)cpu_env)->pc;
11266 queue_signal((CPUArchState *)cpu_env, info.si_signo,
11267 QEMU_SI_FAULT, &info);
11272 /* case SSI_NVPAIRS:
11273 -- Used with SSIN_UACPROC to enable unaligned accesses.
11274 case SSI_IEEE_STATE_AT_SIGNAL:
11275 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
11276 -- Not implemented in linux kernel
11281 #ifdef TARGET_NR_osf_sigprocmask
11282 /* Alpha specific. */
11283 case TARGET_NR_osf_sigprocmask:
11287 sigset_t set, oldset;
11290 case TARGET_SIG_BLOCK:
11293 case TARGET_SIG_UNBLOCK:
11296 case TARGET_SIG_SETMASK:
11300 ret = -TARGET_EINVAL;
11304 target_to_host_old_sigset(&set, &mask);
11305 ret = do_sigprocmask(how, &set, &oldset);
11307 host_to_target_old_sigset(&mask, &oldset);
11314 #ifdef TARGET_NR_getgid32
11315 case TARGET_NR_getgid32:
11316 ret = get_errno(getgid());
11319 #ifdef TARGET_NR_geteuid32
11320 case TARGET_NR_geteuid32:
11321 ret = get_errno(geteuid());
11324 #ifdef TARGET_NR_getegid32
11325 case TARGET_NR_getegid32:
11326 ret = get_errno(getegid());
11329 #ifdef TARGET_NR_setreuid32
11330 case TARGET_NR_setreuid32:
11331 ret = get_errno(setreuid(arg1, arg2));
11334 #ifdef TARGET_NR_setregid32
11335 case TARGET_NR_setregid32:
11336 ret = get_errno(setregid(arg1, arg2));
11339 #ifdef TARGET_NR_getgroups32
11340 case TARGET_NR_getgroups32:
11342 int gidsetsize = arg1;
11343 uint32_t *target_grouplist;
11347 grouplist = alloca(gidsetsize * sizeof(gid_t));
11348 ret = get_errno(getgroups(gidsetsize, grouplist));
11349 if (gidsetsize == 0)
11351 if (!is_error(ret)) {
11352 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
11353 if (!target_grouplist) {
11354 ret = -TARGET_EFAULT;
11357 for(i = 0;i < ret; i++)
11358 target_grouplist[i] = tswap32(grouplist[i]);
11359 unlock_user(target_grouplist, arg2, gidsetsize * 4);
11364 #ifdef TARGET_NR_setgroups32
11365 case TARGET_NR_setgroups32:
11367 int gidsetsize = arg1;
11368 uint32_t *target_grouplist;
11372 grouplist = alloca(gidsetsize * sizeof(gid_t));
11373 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
11374 if (!target_grouplist) {
11375 ret = -TARGET_EFAULT;
11378 for(i = 0;i < gidsetsize; i++)
11379 grouplist[i] = tswap32(target_grouplist[i]);
11380 unlock_user(target_grouplist, arg2, 0);
11381 ret = get_errno(setgroups(gidsetsize, grouplist));
11385 #ifdef TARGET_NR_fchown32
11386 case TARGET_NR_fchown32:
11387 ret = get_errno(fchown(arg1, arg2, arg3));
11390 #ifdef TARGET_NR_setresuid32
11391 case TARGET_NR_setresuid32:
11392 ret = get_errno(sys_setresuid(arg1, arg2, arg3));
11395 #ifdef TARGET_NR_getresuid32
11396 case TARGET_NR_getresuid32:
11398 uid_t ruid, euid, suid;
11399 ret = get_errno(getresuid(&ruid, &euid, &suid));
11400 if (!is_error(ret)) {
11401 if (put_user_u32(ruid, arg1)
11402 || put_user_u32(euid, arg2)
11403 || put_user_u32(suid, arg3))
11409 #ifdef TARGET_NR_setresgid32
11410 case TARGET_NR_setresgid32:
11411 ret = get_errno(sys_setresgid(arg1, arg2, arg3));
11414 #ifdef TARGET_NR_getresgid32
11415 case TARGET_NR_getresgid32:
11417 gid_t rgid, egid, sgid;
11418 ret = get_errno(getresgid(&rgid, &egid, &sgid));
11419 if (!is_error(ret)) {
11420 if (put_user_u32(rgid, arg1)
11421 || put_user_u32(egid, arg2)
11422 || put_user_u32(sgid, arg3))
11428 #ifdef TARGET_NR_chown32
11429 case TARGET_NR_chown32:
11430 if (!(p = lock_user_string(arg1)))
11432 ret = get_errno(chown(p, arg2, arg3));
11433 unlock_user(p, arg1, 0);
11436 #ifdef TARGET_NR_setuid32
11437 case TARGET_NR_setuid32:
11438 ret = get_errno(sys_setuid(arg1));
11441 #ifdef TARGET_NR_setgid32
11442 case TARGET_NR_setgid32:
11443 ret = get_errno(sys_setgid(arg1));
11446 #ifdef TARGET_NR_setfsuid32
11447 case TARGET_NR_setfsuid32:
11448 ret = get_errno(setfsuid(arg1));
11451 #ifdef TARGET_NR_setfsgid32
11452 case TARGET_NR_setfsgid32:
11453 ret = get_errno(setfsgid(arg1));
11457 case TARGET_NR_pivot_root:
11458 goto unimplemented;
11459 #ifdef TARGET_NR_mincore
11460 case TARGET_NR_mincore:
11463 ret = -TARGET_ENOMEM;
11464 a = lock_user(VERIFY_READ, arg1, arg2, 0);
11468 ret = -TARGET_EFAULT;
11469 p = lock_user_string(arg3);
11473 ret = get_errno(mincore(a, arg2, p));
11474 unlock_user(p, arg3, ret);
11476 unlock_user(a, arg1, 0);
11480 #ifdef TARGET_NR_arm_fadvise64_64
11481 case TARGET_NR_arm_fadvise64_64:
11482 /* arm_fadvise64_64 looks like fadvise64_64 but
11483 * with different argument order: fd, advice, offset, len
11484 * rather than the usual fd, offset, len, advice.
11485 * Note that offset and len are both 64-bit so appear as
11486 * pairs of 32-bit registers.
11488 ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
11489 target_offset64(arg5, arg6), arg2);
11490 ret = -host_to_target_errno(ret);
11494 #if TARGET_ABI_BITS == 32
11496 #ifdef TARGET_NR_fadvise64_64
11497 case TARGET_NR_fadvise64_64:
11498 #if defined(TARGET_PPC)
11499 /* 6 args: fd, advice, offset (high, low), len (high, low) */
11507 /* 6 args: fd, offset (high, low), len (high, low), advice */
11508 if (regpairs_aligned(cpu_env, num)) {
11509 /* offset is in (3,4), len in (5,6) and advice in 7 */
11517 ret = -host_to_target_errno(posix_fadvise(arg1,
11518 target_offset64(arg2, arg3),
11519 target_offset64(arg4, arg5),
11524 #ifdef TARGET_NR_fadvise64
11525 case TARGET_NR_fadvise64:
11526 /* 5 args: fd, offset (high, low), len, advice */
11527 if (regpairs_aligned(cpu_env, num)) {
11528 /* offset is in (3,4), len in 5 and advice in 6 */
11534 ret = -host_to_target_errno(posix_fadvise(arg1,
11535 target_offset64(arg2, arg3),
11540 #else /* not a 32-bit ABI */
11541 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11542 #ifdef TARGET_NR_fadvise64_64
11543 case TARGET_NR_fadvise64_64:
11545 #ifdef TARGET_NR_fadvise64
11546 case TARGET_NR_fadvise64:
11548 #ifdef TARGET_S390X
11550 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
11551 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
11552 case 6: arg4 = POSIX_FADV_DONTNEED; break;
11553 case 7: arg4 = POSIX_FADV_NOREUSE; break;
11557 ret = -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
11560 #endif /* end of 64-bit ABI fadvise handling */
11562 #ifdef TARGET_NR_madvise
11563 case TARGET_NR_madvise:
11564 /* A straight passthrough may not be safe because qemu sometimes
11565 turns private file-backed mappings into anonymous mappings.
11566 This will break MADV_DONTNEED.
11567 This is a hint, so ignoring and returning success is ok. */
11568 ret = get_errno(0);
11571 #if TARGET_ABI_BITS == 32
11572 case TARGET_NR_fcntl64:
11576 from_flock64_fn *copyfrom = copy_from_user_flock64;
11577 to_flock64_fn *copyto = copy_to_user_flock64;
11580 if (((CPUARMState *)cpu_env)->eabi) {
11581 copyfrom = copy_from_user_eabi_flock64;
11582 copyto = copy_to_user_eabi_flock64;
11586 cmd = target_to_host_fcntl_cmd(arg2);
11587 if (cmd == -TARGET_EINVAL) {
11593 case TARGET_F_GETLK64:
11594 ret = copyfrom(&fl, arg3);
11598 ret = get_errno(fcntl(arg1, cmd, &fl));
11600 ret = copyto(arg3, &fl);
11604 case TARGET_F_SETLK64:
11605 case TARGET_F_SETLKW64:
11606 ret = copyfrom(&fl, arg3);
11610 ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11613 ret = do_fcntl(arg1, arg2, arg3);
11619 #ifdef TARGET_NR_cacheflush
11620 case TARGET_NR_cacheflush:
11621 /* self-modifying code is handled automatically, so nothing needed */
11625 #ifdef TARGET_NR_security
11626 case TARGET_NR_security:
11627 goto unimplemented;
11629 #ifdef TARGET_NR_getpagesize
11630 case TARGET_NR_getpagesize:
11631 ret = TARGET_PAGE_SIZE;
11634 case TARGET_NR_gettid:
11635 ret = get_errno(gettid());
11637 #ifdef TARGET_NR_readahead
11638 case TARGET_NR_readahead:
11639 #if TARGET_ABI_BITS == 32
11640 if (regpairs_aligned(cpu_env, num)) {
11645 ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
11647 ret = get_errno(readahead(arg1, arg2, arg3));
11652 #ifdef TARGET_NR_setxattr
11653 case TARGET_NR_listxattr:
11654 case TARGET_NR_llistxattr:
11658 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11660 ret = -TARGET_EFAULT;
11664 p = lock_user_string(arg1);
11666 if (num == TARGET_NR_listxattr) {
11667 ret = get_errno(listxattr(p, b, arg3));
11669 ret = get_errno(llistxattr(p, b, arg3));
11672 ret = -TARGET_EFAULT;
11674 unlock_user(p, arg1, 0);
11675 unlock_user(b, arg2, arg3);
11678 case TARGET_NR_flistxattr:
11682 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11684 ret = -TARGET_EFAULT;
11688 ret = get_errno(flistxattr(arg1, b, arg3));
11689 unlock_user(b, arg2, arg3);
11692 case TARGET_NR_setxattr:
11693 case TARGET_NR_lsetxattr:
11695 void *p, *n, *v = 0;
11697 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11699 ret = -TARGET_EFAULT;
11703 p = lock_user_string(arg1);
11704 n = lock_user_string(arg2);
11706 if (num == TARGET_NR_setxattr) {
11707 ret = get_errno(setxattr(p, n, v, arg4, arg5));
11709 ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
11712 ret = -TARGET_EFAULT;
11714 unlock_user(p, arg1, 0);
11715 unlock_user(n, arg2, 0);
11716 unlock_user(v, arg3, 0);
11719 case TARGET_NR_fsetxattr:
11723 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11725 ret = -TARGET_EFAULT;
11729 n = lock_user_string(arg2);
11731 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
11733 ret = -TARGET_EFAULT;
11735 unlock_user(n, arg2, 0);
11736 unlock_user(v, arg3, 0);
11739 case TARGET_NR_getxattr:
11740 case TARGET_NR_lgetxattr:
11742 void *p, *n, *v = 0;
11744 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11746 ret = -TARGET_EFAULT;
11750 p = lock_user_string(arg1);
11751 n = lock_user_string(arg2);
11753 if (num == TARGET_NR_getxattr) {
11754 ret = get_errno(getxattr(p, n, v, arg4));
11756 ret = get_errno(lgetxattr(p, n, v, arg4));
11759 ret = -TARGET_EFAULT;
11761 unlock_user(p, arg1, 0);
11762 unlock_user(n, arg2, 0);
11763 unlock_user(v, arg3, arg4);
11766 case TARGET_NR_fgetxattr:
11770 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11772 ret = -TARGET_EFAULT;
11776 n = lock_user_string(arg2);
11778 ret = get_errno(fgetxattr(arg1, n, v, arg4));
11780 ret = -TARGET_EFAULT;
11782 unlock_user(n, arg2, 0);
11783 unlock_user(v, arg3, arg4);
11786 case TARGET_NR_removexattr:
11787 case TARGET_NR_lremovexattr:
11790 p = lock_user_string(arg1);
11791 n = lock_user_string(arg2);
11793 if (num == TARGET_NR_removexattr) {
11794 ret = get_errno(removexattr(p, n));
11796 ret = get_errno(lremovexattr(p, n));
11799 ret = -TARGET_EFAULT;
11801 unlock_user(p, arg1, 0);
11802 unlock_user(n, arg2, 0);
11805 case TARGET_NR_fremovexattr:
11808 n = lock_user_string(arg2);
11810 ret = get_errno(fremovexattr(arg1, n));
11812 ret = -TARGET_EFAULT;
11814 unlock_user(n, arg2, 0);
11818 #endif /* CONFIG_ATTR */
11819 #ifdef TARGET_NR_set_thread_area
11820 case TARGET_NR_set_thread_area:
11821 #if defined(TARGET_MIPS)
11822 ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
11825 #elif defined(TARGET_CRIS)
11827 ret = -TARGET_EINVAL;
11829 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
11833 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
11834 ret = do_set_thread_area(cpu_env, arg1);
11836 #elif defined(TARGET_M68K)
11838 TaskState *ts = cpu->opaque;
11839 ts->tp_value = arg1;
11844 goto unimplemented_nowarn;
11847 #ifdef TARGET_NR_get_thread_area
11848 case TARGET_NR_get_thread_area:
11849 #if defined(TARGET_I386) && defined(TARGET_ABI32)
11850 ret = do_get_thread_area(cpu_env, arg1);
11852 #elif defined(TARGET_M68K)
11854 TaskState *ts = cpu->opaque;
11855 ret = ts->tp_value;
11859 goto unimplemented_nowarn;
11862 #ifdef TARGET_NR_getdomainname
11863 case TARGET_NR_getdomainname:
11864 goto unimplemented_nowarn;
11867 #ifdef TARGET_NR_clock_gettime
11868 case TARGET_NR_clock_gettime:
11870 struct timespec ts;
11871 ret = get_errno(clock_gettime(arg1, &ts));
11872 if (!is_error(ret)) {
11873 host_to_target_timespec(arg2, &ts);
11878 #ifdef TARGET_NR_clock_getres
11879 case TARGET_NR_clock_getres:
11881 struct timespec ts;
11882 ret = get_errno(clock_getres(arg1, &ts));
11883 if (!is_error(ret)) {
11884 host_to_target_timespec(arg2, &ts);
11889 #ifdef TARGET_NR_clock_nanosleep
11890 case TARGET_NR_clock_nanosleep:
11892 struct timespec ts;
11893 target_to_host_timespec(&ts, arg3);
11894 ret = get_errno(safe_clock_nanosleep(arg1, arg2,
11895 &ts, arg4 ? &ts : NULL));
11897 host_to_target_timespec(arg4, &ts);
11899 #if defined(TARGET_PPC)
11900 /* clock_nanosleep is odd in that it returns positive errno values.
11901 * On PPC, CR0 bit 3 should be set in such a situation. */
11902 if (ret && ret != -TARGET_ERESTARTSYS) {
11903 ((CPUPPCState *)cpu_env)->crf[0] |= 1;
11910 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
11911 case TARGET_NR_set_tid_address:
11912 ret = get_errno(set_tid_address((int *)g2h(arg1)));
11916 case TARGET_NR_tkill:
11917 ret = get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
11920 case TARGET_NR_tgkill:
11921 ret = get_errno(safe_tgkill((int)arg1, (int)arg2,
11922 target_to_host_signal(arg3)));
11925 #ifdef TARGET_NR_set_robust_list
11926 case TARGET_NR_set_robust_list:
11927 case TARGET_NR_get_robust_list:
11928 /* The ABI for supporting robust futexes has userspace pass
11929 * the kernel a pointer to a linked list which is updated by
11930 * userspace after the syscall; the list is walked by the kernel
11931 * when the thread exits. Since the linked list in QEMU guest
11932 * memory isn't a valid linked list for the host and we have
11933 * no way to reliably intercept the thread-death event, we can't
11934 * support these. Silently return ENOSYS so that guest userspace
11935 * falls back to a non-robust futex implementation (which should
11936 * be OK except in the corner case of the guest crashing while
11937 * holding a mutex that is shared with another process via
11940 goto unimplemented_nowarn;
11943 #if defined(TARGET_NR_utimensat)
11944 case TARGET_NR_utimensat:
11946 struct timespec *tsp, ts[2];
11950 target_to_host_timespec(ts, arg3);
11951 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
11955 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
11957 if (!(p = lock_user_string(arg2))) {
11958 ret = -TARGET_EFAULT;
11961 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
11962 unlock_user(p, arg2, 0);
11967 case TARGET_NR_futex:
11968 ret = do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
11970 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
11971 case TARGET_NR_inotify_init:
11972 ret = get_errno(sys_inotify_init());
11974 fd_trans_register(ret, &target_inotify_trans);
11978 #ifdef CONFIG_INOTIFY1
11979 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
11980 case TARGET_NR_inotify_init1:
11981 ret = get_errno(sys_inotify_init1(target_to_host_bitmask(arg1,
11982 fcntl_flags_tbl)));
11984 fd_trans_register(ret, &target_inotify_trans);
11989 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
11990 case TARGET_NR_inotify_add_watch:
11991 p = lock_user_string(arg2);
11992 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
11993 unlock_user(p, arg2, 0);
11996 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
11997 case TARGET_NR_inotify_rm_watch:
11998 ret = get_errno(sys_inotify_rm_watch(arg1, arg2));
12002 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
12003 case TARGET_NR_mq_open:
12005 struct mq_attr posix_mq_attr;
12006 struct mq_attr *pposix_mq_attr;
12009 host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
12010 pposix_mq_attr = NULL;
12012 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
12015 pposix_mq_attr = &posix_mq_attr;
12017 p = lock_user_string(arg1 - 1);
12021 ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
12022 unlock_user (p, arg1, 0);
12026 case TARGET_NR_mq_unlink:
12027 p = lock_user_string(arg1 - 1);
12029 ret = -TARGET_EFAULT;
12032 ret = get_errno(mq_unlink(p));
12033 unlock_user (p, arg1, 0);
12036 case TARGET_NR_mq_timedsend:
12038 struct timespec ts;
12040 p = lock_user (VERIFY_READ, arg2, arg3, 1);
12042 target_to_host_timespec(&ts, arg5);
12043 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12044 host_to_target_timespec(arg5, &ts);
12046 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12048 unlock_user (p, arg2, arg3);
12052 case TARGET_NR_mq_timedreceive:
12054 struct timespec ts;
12057 p = lock_user (VERIFY_READ, arg2, arg3, 1);
12059 target_to_host_timespec(&ts, arg5);
12060 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12062 host_to_target_timespec(arg5, &ts);
12064 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12067 unlock_user (p, arg2, arg3);
12069 put_user_u32(prio, arg4);
12073 /* Not implemented for now... */
12074 /* case TARGET_NR_mq_notify: */
12077 case TARGET_NR_mq_getsetattr:
12079 struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
12082 ret = mq_getattr(arg1, &posix_mq_attr_out);
12083 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
12086 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
12087 ret |= mq_setattr(arg1, &posix_mq_attr_in, &posix_mq_attr_out);
12094 #ifdef CONFIG_SPLICE
12095 #ifdef TARGET_NR_tee
12096 case TARGET_NR_tee:
12098 ret = get_errno(tee(arg1,arg2,arg3,arg4));
12102 #ifdef TARGET_NR_splice
12103 case TARGET_NR_splice:
12105 loff_t loff_in, loff_out;
12106 loff_t *ploff_in = NULL, *ploff_out = NULL;
12108 if (get_user_u64(loff_in, arg2)) {
12111 ploff_in = &loff_in;
12114 if (get_user_u64(loff_out, arg4)) {
12117 ploff_out = &loff_out;
12119 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
12121 if (put_user_u64(loff_in, arg2)) {
12126 if (put_user_u64(loff_out, arg4)) {
12133 #ifdef TARGET_NR_vmsplice
12134 case TARGET_NR_vmsplice:
12136 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
12138 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
12139 unlock_iovec(vec, arg2, arg3, 0);
12141 ret = -host_to_target_errno(errno);
12146 #endif /* CONFIG_SPLICE */
12147 #ifdef CONFIG_EVENTFD
12148 #if defined(TARGET_NR_eventfd)
12149 case TARGET_NR_eventfd:
12150 ret = get_errno(eventfd(arg1, 0));
12152 fd_trans_register(ret, &target_eventfd_trans);
12156 #if defined(TARGET_NR_eventfd2)
12157 case TARGET_NR_eventfd2:
12159 int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
12160 if (arg2 & TARGET_O_NONBLOCK) {
12161 host_flags |= O_NONBLOCK;
12163 if (arg2 & TARGET_O_CLOEXEC) {
12164 host_flags |= O_CLOEXEC;
12166 ret = get_errno(eventfd(arg1, host_flags));
12168 fd_trans_register(ret, &target_eventfd_trans);
12173 #endif /* CONFIG_EVENTFD */
12174 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
12175 case TARGET_NR_fallocate:
12176 #if TARGET_ABI_BITS == 32
12177 ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
12178 target_offset64(arg5, arg6)));
12180 ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
12184 #if defined(CONFIG_SYNC_FILE_RANGE)
12185 #if defined(TARGET_NR_sync_file_range)
12186 case TARGET_NR_sync_file_range:
12187 #if TARGET_ABI_BITS == 32
12188 #if defined(TARGET_MIPS)
12189 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12190 target_offset64(arg5, arg6), arg7));
12192 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
12193 target_offset64(arg4, arg5), arg6));
12194 #endif /* !TARGET_MIPS */
12196 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
12200 #if defined(TARGET_NR_sync_file_range2)
12201 case TARGET_NR_sync_file_range2:
12202 /* This is like sync_file_range but the arguments are reordered */
12203 #if TARGET_ABI_BITS == 32
12204 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12205 target_offset64(arg5, arg6), arg2));
12207 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
12212 #if defined(TARGET_NR_signalfd4)
12213 case TARGET_NR_signalfd4:
12214 ret = do_signalfd4(arg1, arg2, arg4);
12217 #if defined(TARGET_NR_signalfd)
12218 case TARGET_NR_signalfd:
12219 ret = do_signalfd4(arg1, arg2, 0);
12222 #if defined(CONFIG_EPOLL)
12223 #if defined(TARGET_NR_epoll_create)
12224 case TARGET_NR_epoll_create:
12225 ret = get_errno(epoll_create(arg1));
12228 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
12229 case TARGET_NR_epoll_create1:
12230 ret = get_errno(epoll_create1(arg1));
12233 #if defined(TARGET_NR_epoll_ctl)
12234 case TARGET_NR_epoll_ctl:
12236 struct epoll_event ep;
12237 struct epoll_event *epp = 0;
12239 struct target_epoll_event *target_ep;
12240 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
12243 ep.events = tswap32(target_ep->events);
12244 /* The epoll_data_t union is just opaque data to the kernel,
12245 * so we transfer all 64 bits across and need not worry what
12246 * actual data type it is.
12248 ep.data.u64 = tswap64(target_ep->data.u64);
12249 unlock_user_struct(target_ep, arg4, 0);
12252 ret = get_errno(epoll_ctl(arg1, arg2, arg3, epp));
12257 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
12258 #if defined(TARGET_NR_epoll_wait)
12259 case TARGET_NR_epoll_wait:
12261 #if defined(TARGET_NR_epoll_pwait)
12262 case TARGET_NR_epoll_pwait:
12265 struct target_epoll_event *target_ep;
12266 struct epoll_event *ep;
12268 int maxevents = arg3;
12269 int timeout = arg4;
12271 if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
12272 ret = -TARGET_EINVAL;
12276 target_ep = lock_user(VERIFY_WRITE, arg2,
12277 maxevents * sizeof(struct target_epoll_event), 1);
12282 ep = g_try_new(struct epoll_event, maxevents);
12284 unlock_user(target_ep, arg2, 0);
12285 ret = -TARGET_ENOMEM;
12290 #if defined(TARGET_NR_epoll_pwait)
12291 case TARGET_NR_epoll_pwait:
12293 target_sigset_t *target_set;
12294 sigset_t _set, *set = &_set;
12297 if (arg6 != sizeof(target_sigset_t)) {
12298 ret = -TARGET_EINVAL;
12302 target_set = lock_user(VERIFY_READ, arg5,
12303 sizeof(target_sigset_t), 1);
12305 ret = -TARGET_EFAULT;
12308 target_to_host_sigset(set, target_set);
12309 unlock_user(target_set, arg5, 0);
12314 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12315 set, SIGSET_T_SIZE));
12319 #if defined(TARGET_NR_epoll_wait)
12320 case TARGET_NR_epoll_wait:
12321 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12326 ret = -TARGET_ENOSYS;
12328 if (!is_error(ret)) {
12330 for (i = 0; i < ret; i++) {
12331 target_ep[i].events = tswap32(ep[i].events);
12332 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
12334 unlock_user(target_ep, arg2,
12335 ret * sizeof(struct target_epoll_event));
12337 unlock_user(target_ep, arg2, 0);
12344 #ifdef TARGET_NR_prlimit64
12345 case TARGET_NR_prlimit64:
12347 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
12348 struct target_rlimit64 *target_rnew, *target_rold;
12349 struct host_rlimit64 rnew, rold, *rnewp = 0;
12350 int resource = target_to_host_resource(arg2);
12352 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
12355 rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
12356 rnew.rlim_max = tswap64(target_rnew->rlim_max);
12357 unlock_user_struct(target_rnew, arg3, 0);
12361 ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
12362 if (!is_error(ret) && arg4) {
12363 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
12366 target_rold->rlim_cur = tswap64(rold.rlim_cur);
12367 target_rold->rlim_max = tswap64(rold.rlim_max);
12368 unlock_user_struct(target_rold, arg4, 1);
12373 #ifdef TARGET_NR_gethostname
12374 case TARGET_NR_gethostname:
12376 char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
12378 ret = get_errno(gethostname(name, arg2));
12379 unlock_user(name, arg1, arg2);
12381 ret = -TARGET_EFAULT;
12386 #ifdef TARGET_NR_atomic_cmpxchg_32
12387 case TARGET_NR_atomic_cmpxchg_32:
12389 /* should use start_exclusive from main.c */
12390 abi_ulong mem_value;
12391 if (get_user_u32(mem_value, arg6)) {
12392 target_siginfo_t info;
12393 info.si_signo = SIGSEGV;
12395 info.si_code = TARGET_SEGV_MAPERR;
12396 info._sifields._sigfault._addr = arg6;
12397 queue_signal((CPUArchState *)cpu_env, info.si_signo,
12398 QEMU_SI_FAULT, &info);
12402 if (mem_value == arg2)
12403 put_user_u32(arg1, arg6);
12408 #ifdef TARGET_NR_atomic_barrier
12409 case TARGET_NR_atomic_barrier:
12411 /* Like the kernel implementation and the qemu arm barrier, no-op this? */
12417 #ifdef TARGET_NR_timer_create
12418 case TARGET_NR_timer_create:
12420 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
12422 struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
12425 int timer_index = next_free_host_timer();
12427 if (timer_index < 0) {
12428 ret = -TARGET_EAGAIN;
12430 timer_t *phtimer = g_posix_timers + timer_index;
12433 phost_sevp = &host_sevp;
12434 ret = target_to_host_sigevent(phost_sevp, arg2);
12440 ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
12444 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
12453 #ifdef TARGET_NR_timer_settime
12454 case TARGET_NR_timer_settime:
12456 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
12457 * struct itimerspec * old_value */
12458 target_timer_t timerid = get_timer_id(arg1);
12462 } else if (arg3 == 0) {
12463 ret = -TARGET_EINVAL;
12465 timer_t htimer = g_posix_timers[timerid];
12466 struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12468 if (target_to_host_itimerspec(&hspec_new, arg3)) {
12472 timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12473 if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
12481 #ifdef TARGET_NR_timer_gettime
12482 case TARGET_NR_timer_gettime:
12484 /* args: timer_t timerid, struct itimerspec *curr_value */
12485 target_timer_t timerid = get_timer_id(arg1);
12489 } else if (!arg2) {
12490 ret = -TARGET_EFAULT;
12492 timer_t htimer = g_posix_timers[timerid];
12493 struct itimerspec hspec;
12494 ret = get_errno(timer_gettime(htimer, &hspec));
12496 if (host_to_target_itimerspec(arg2, &hspec)) {
12497 ret = -TARGET_EFAULT;
12504 #ifdef TARGET_NR_timer_getoverrun
12505 case TARGET_NR_timer_getoverrun:
12507 /* args: timer_t timerid */
12508 target_timer_t timerid = get_timer_id(arg1);
12513 timer_t htimer = g_posix_timers[timerid];
12514 ret = get_errno(timer_getoverrun(htimer));
12516 fd_trans_unregister(ret);
12521 #ifdef TARGET_NR_timer_delete
12522 case TARGET_NR_timer_delete:
12524 /* args: timer_t timerid */
12525 target_timer_t timerid = get_timer_id(arg1);
12530 timer_t htimer = g_posix_timers[timerid];
12531 ret = get_errno(timer_delete(htimer));
12532 g_posix_timers[timerid] = 0;
12538 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
12539 case TARGET_NR_timerfd_create:
12540 ret = get_errno(timerfd_create(arg1,
12541 target_to_host_bitmask(arg2, fcntl_flags_tbl)));
12545 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
12546 case TARGET_NR_timerfd_gettime:
12548 struct itimerspec its_curr;
12550 ret = get_errno(timerfd_gettime(arg1, &its_curr));
12552 if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
12559 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
12560 case TARGET_NR_timerfd_settime:
12562 struct itimerspec its_new, its_old, *p_new;
12565 if (target_to_host_itimerspec(&its_new, arg3)) {
12573 ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
12575 if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
12582 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
12583 case TARGET_NR_ioprio_get:
12584 ret = get_errno(ioprio_get(arg1, arg2));
12588 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
12589 case TARGET_NR_ioprio_set:
12590 ret = get_errno(ioprio_set(arg1, arg2, arg3));
12594 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
12595 case TARGET_NR_setns:
12596 ret = get_errno(setns(arg1, arg2));
12599 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
12600 case TARGET_NR_unshare:
12601 ret = get_errno(unshare(arg1));
12604 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
12605 case TARGET_NR_kcmp:
12606 ret = get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
12612 gemu_log("qemu: Unsupported syscall: %d\n", num);
12613 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
12614 unimplemented_nowarn:
12616 ret = -TARGET_ENOSYS;
12621 gemu_log(" = " TARGET_ABI_FMT_ld "\n", ret);
12624 print_syscall_ret(num, ret);
12625 trace_guest_user_syscall_ret(cpu, num, ret);
12628 ret = -TARGET_EFAULT;