4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
29 #include <sys/mount.h>
31 #include <sys/fsuid.h>
32 #include <sys/personality.h>
33 #include <sys/prctl.h>
34 #include <sys/resource.h>
36 #include <linux/capability.h>
38 #include <sys/timex.h>
39 #include <sys/socket.h>
43 #include <sys/times.h>
46 #include <sys/statfs.h>
48 #include <sys/sysinfo.h>
49 #include <sys/signalfd.h>
50 //#include <sys/user.h>
51 #include <netinet/ip.h>
52 #include <netinet/tcp.h>
53 #include <linux/wireless.h>
54 #include <linux/icmp.h>
55 #include <linux/icmpv6.h>
56 #include <linux/errqueue.h>
57 #include <linux/random.h>
58 #include "qemu-common.h"
60 #include <sys/timerfd.h>
66 #include <sys/eventfd.h>
69 #include <sys/epoll.h>
72 #include "qemu/xattr.h"
74 #ifdef CONFIG_SENDFILE
75 #include <sys/sendfile.h>
78 #define termios host_termios
79 #define winsize host_winsize
80 #define termio host_termio
81 #define sgttyb host_sgttyb /* same as target */
82 #define tchars host_tchars /* same as target */
83 #define ltchars host_ltchars /* same as target */
85 #include <linux/termios.h>
86 #include <linux/unistd.h>
87 #include <linux/cdrom.h>
88 #include <linux/hdreg.h>
89 #include <linux/soundcard.h>
91 #include <linux/mtio.h>
93 #if defined(CONFIG_FIEMAP)
94 #include <linux/fiemap.h>
98 #include <linux/dm-ioctl.h>
99 #include <linux/reboot.h>
100 #include <linux/route.h>
101 #include <linux/filter.h>
102 #include <linux/blkpg.h>
103 #include <netpacket/packet.h>
104 #include <linux/netlink.h>
105 #ifdef CONFIG_RTNETLINK
106 #include <linux/rtnetlink.h>
107 #include <linux/if_bridge.h>
109 #include <linux/audit.h>
110 #include "linux_loop.h"
116 #define CLONE_IO 0x80000000 /* Clone io context */
119 /* We can't directly call the host clone syscall, because this will
120 * badly confuse libc (breaking mutexes, for example). So we must
121 * divide clone flags into:
122 * * flag combinations that look like pthread_create()
123 * * flag combinations that look like fork()
124 * * flags we can implement within QEMU itself
125 * * flags we can't support and will return an error for
127 /* For thread creation, all these flags must be present; for
128 * fork, none must be present.
130 #define CLONE_THREAD_FLAGS \
131 (CLONE_VM | CLONE_FS | CLONE_FILES | \
132 CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
134 /* These flags are ignored:
135 * CLONE_DETACHED is now ignored by the kernel;
136 * CLONE_IO is just an optimisation hint to the I/O scheduler
138 #define CLONE_IGNORED_FLAGS \
139 (CLONE_DETACHED | CLONE_IO)
141 /* Flags for fork which we can implement within QEMU itself */
142 #define CLONE_OPTIONAL_FORK_FLAGS \
143 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
144 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
146 /* Flags for thread creation which we can implement within QEMU itself */
147 #define CLONE_OPTIONAL_THREAD_FLAGS \
148 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
149 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
151 #define CLONE_INVALID_FORK_FLAGS \
152 (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
154 #define CLONE_INVALID_THREAD_FLAGS \
155 (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS | \
156 CLONE_IGNORED_FLAGS))
158 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
159 * have almost all been allocated. We cannot support any of
160 * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
161 * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
162 * The checks against the invalid thread masks above will catch these.
163 * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
167 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
168 * once. This exercises the codepaths for restart.
170 //#define DEBUG_ERESTARTSYS
172 //#include <linux/msdos_fs.h>
173 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
174 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
184 #define _syscall0(type,name) \
185 static type name (void) \
187 return syscall(__NR_##name); \
190 #define _syscall1(type,name,type1,arg1) \
191 static type name (type1 arg1) \
193 return syscall(__NR_##name, arg1); \
196 #define _syscall2(type,name,type1,arg1,type2,arg2) \
197 static type name (type1 arg1,type2 arg2) \
199 return syscall(__NR_##name, arg1, arg2); \
202 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
203 static type name (type1 arg1,type2 arg2,type3 arg3) \
205 return syscall(__NR_##name, arg1, arg2, arg3); \
208 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
209 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
211 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
214 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
216 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
218 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
222 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
223 type5,arg5,type6,arg6) \
224 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
227 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
231 #define __NR_sys_uname __NR_uname
232 #define __NR_sys_getcwd1 __NR_getcwd
233 #define __NR_sys_getdents __NR_getdents
234 #define __NR_sys_getdents64 __NR_getdents64
235 #define __NR_sys_getpriority __NR_getpriority
236 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
237 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
238 #define __NR_sys_syslog __NR_syslog
239 #define __NR_sys_futex __NR_futex
240 #define __NR_sys_inotify_init __NR_inotify_init
241 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
242 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
244 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
245 #define __NR__llseek __NR_lseek
248 /* Newer kernel ports have llseek() instead of _llseek() */
249 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
250 #define TARGET_NR__llseek TARGET_NR_llseek
254 _syscall0(int, gettid)
256 /* This is a replacement for the host gettid() and must return a host
258 static int gettid(void) {
263 /* For the 64-bit guest on 32-bit host case we must emulate
264 * getdents using getdents64, because otherwise the host
265 * might hand us back more dirent records than we can fit
266 * into the guest buffer after structure format conversion.
267 * Otherwise we emulate getdents with getdents if the host has it.
269 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
270 #define EMULATE_GETDENTS_WITH_GETDENTS
273 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
274 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
276 #if (defined(TARGET_NR_getdents) && \
277 !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
278 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
279 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
281 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
282 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
283 loff_t *, res, uint, wh);
285 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
286 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
288 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
289 #ifdef __NR_exit_group
290 _syscall1(int,exit_group,int,error_code)
292 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
293 _syscall1(int,set_tid_address,int *,tidptr)
295 #if defined(TARGET_NR_futex) && defined(__NR_futex)
296 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
297 const struct timespec *,timeout,int *,uaddr2,int,val3)
299 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
300 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
301 unsigned long *, user_mask_ptr);
302 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
303 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
304 unsigned long *, user_mask_ptr);
305 #define __NR_sys_getcpu __NR_getcpu
306 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
307 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
309 _syscall2(int, capget, struct __user_cap_header_struct *, header,
310 struct __user_cap_data_struct *, data);
311 _syscall2(int, capset, struct __user_cap_header_struct *, header,
312 struct __user_cap_data_struct *, data);
313 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
314 _syscall2(int, ioprio_get, int, which, int, who)
316 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
317 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
319 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
320 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
323 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
324 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
325 unsigned long, idx1, unsigned long, idx2)
328 static bitmask_transtbl fcntl_flags_tbl[] = {
329 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
330 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, },
331 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, },
332 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, },
333 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, },
334 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, },
335 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, },
336 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, },
337 { TARGET_O_SYNC, TARGET_O_DSYNC, O_SYNC, O_DSYNC, },
338 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, },
339 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, },
340 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
341 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, },
342 #if defined(O_DIRECT)
343 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, },
345 #if defined(O_NOATIME)
346 { TARGET_O_NOATIME, TARGET_O_NOATIME, O_NOATIME, O_NOATIME },
348 #if defined(O_CLOEXEC)
349 { TARGET_O_CLOEXEC, TARGET_O_CLOEXEC, O_CLOEXEC, O_CLOEXEC },
352 { TARGET_O_PATH, TARGET_O_PATH, O_PATH, O_PATH },
354 #if defined(O_TMPFILE)
355 { TARGET_O_TMPFILE, TARGET_O_TMPFILE, O_TMPFILE, O_TMPFILE },
357 /* Don't terminate the list prematurely on 64-bit host+guest. */
358 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
359 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
366 QEMU_IFLA_BR_FORWARD_DELAY,
367 QEMU_IFLA_BR_HELLO_TIME,
368 QEMU_IFLA_BR_MAX_AGE,
369 QEMU_IFLA_BR_AGEING_TIME,
370 QEMU_IFLA_BR_STP_STATE,
371 QEMU_IFLA_BR_PRIORITY,
372 QEMU_IFLA_BR_VLAN_FILTERING,
373 QEMU_IFLA_BR_VLAN_PROTOCOL,
374 QEMU_IFLA_BR_GROUP_FWD_MASK,
375 QEMU_IFLA_BR_ROOT_ID,
376 QEMU_IFLA_BR_BRIDGE_ID,
377 QEMU_IFLA_BR_ROOT_PORT,
378 QEMU_IFLA_BR_ROOT_PATH_COST,
379 QEMU_IFLA_BR_TOPOLOGY_CHANGE,
380 QEMU_IFLA_BR_TOPOLOGY_CHANGE_DETECTED,
381 QEMU_IFLA_BR_HELLO_TIMER,
382 QEMU_IFLA_BR_TCN_TIMER,
383 QEMU_IFLA_BR_TOPOLOGY_CHANGE_TIMER,
384 QEMU_IFLA_BR_GC_TIMER,
385 QEMU_IFLA_BR_GROUP_ADDR,
386 QEMU_IFLA_BR_FDB_FLUSH,
387 QEMU_IFLA_BR_MCAST_ROUTER,
388 QEMU_IFLA_BR_MCAST_SNOOPING,
389 QEMU_IFLA_BR_MCAST_QUERY_USE_IFADDR,
390 QEMU_IFLA_BR_MCAST_QUERIER,
391 QEMU_IFLA_BR_MCAST_HASH_ELASTICITY,
392 QEMU_IFLA_BR_MCAST_HASH_MAX,
393 QEMU_IFLA_BR_MCAST_LAST_MEMBER_CNT,
394 QEMU_IFLA_BR_MCAST_STARTUP_QUERY_CNT,
395 QEMU_IFLA_BR_MCAST_LAST_MEMBER_INTVL,
396 QEMU_IFLA_BR_MCAST_MEMBERSHIP_INTVL,
397 QEMU_IFLA_BR_MCAST_QUERIER_INTVL,
398 QEMU_IFLA_BR_MCAST_QUERY_INTVL,
399 QEMU_IFLA_BR_MCAST_QUERY_RESPONSE_INTVL,
400 QEMU_IFLA_BR_MCAST_STARTUP_QUERY_INTVL,
401 QEMU_IFLA_BR_NF_CALL_IPTABLES,
402 QEMU_IFLA_BR_NF_CALL_IP6TABLES,
403 QEMU_IFLA_BR_NF_CALL_ARPTABLES,
404 QEMU_IFLA_BR_VLAN_DEFAULT_PVID,
406 QEMU_IFLA_BR_VLAN_STATS_ENABLED,
407 QEMU_IFLA_BR_MCAST_STATS_ENABLED,
408 QEMU_IFLA_BR_MCAST_IGMP_VERSION,
409 QEMU_IFLA_BR_MCAST_MLD_VERSION,
433 QEMU_IFLA_NET_NS_PID,
436 QEMU_IFLA_VFINFO_LIST,
444 QEMU_IFLA_PROMISCUITY,
445 QEMU_IFLA_NUM_TX_QUEUES,
446 QEMU_IFLA_NUM_RX_QUEUES,
448 QEMU_IFLA_PHYS_PORT_ID,
449 QEMU_IFLA_CARRIER_CHANGES,
450 QEMU_IFLA_PHYS_SWITCH_ID,
451 QEMU_IFLA_LINK_NETNSID,
452 QEMU_IFLA_PHYS_PORT_NAME,
453 QEMU_IFLA_PROTO_DOWN,
454 QEMU_IFLA_GSO_MAX_SEGS,
455 QEMU_IFLA_GSO_MAX_SIZE,
459 QEMU_IFLA_NEW_NETNSID,
460 QEMU_IFLA_IF_NETNSID,
461 QEMU_IFLA_CARRIER_UP_COUNT,
462 QEMU_IFLA_CARRIER_DOWN_COUNT,
463 QEMU_IFLA_NEW_IFINDEX,
468 QEMU_IFLA_BRPORT_UNSPEC,
469 QEMU_IFLA_BRPORT_STATE,
470 QEMU_IFLA_BRPORT_PRIORITY,
471 QEMU_IFLA_BRPORT_COST,
472 QEMU_IFLA_BRPORT_MODE,
473 QEMU_IFLA_BRPORT_GUARD,
474 QEMU_IFLA_BRPORT_PROTECT,
475 QEMU_IFLA_BRPORT_FAST_LEAVE,
476 QEMU_IFLA_BRPORT_LEARNING,
477 QEMU_IFLA_BRPORT_UNICAST_FLOOD,
478 QEMU_IFLA_BRPORT_PROXYARP,
479 QEMU_IFLA_BRPORT_LEARNING_SYNC,
480 QEMU_IFLA_BRPORT_PROXYARP_WIFI,
481 QEMU_IFLA_BRPORT_ROOT_ID,
482 QEMU_IFLA_BRPORT_BRIDGE_ID,
483 QEMU_IFLA_BRPORT_DESIGNATED_PORT,
484 QEMU_IFLA_BRPORT_DESIGNATED_COST,
487 QEMU_IFLA_BRPORT_TOPOLOGY_CHANGE_ACK,
488 QEMU_IFLA_BRPORT_CONFIG_PENDING,
489 QEMU_IFLA_BRPORT_MESSAGE_AGE_TIMER,
490 QEMU_IFLA_BRPORT_FORWARD_DELAY_TIMER,
491 QEMU_IFLA_BRPORT_HOLD_TIMER,
492 QEMU_IFLA_BRPORT_FLUSH,
493 QEMU_IFLA_BRPORT_MULTICAST_ROUTER,
494 QEMU_IFLA_BRPORT_PAD,
495 QEMU_IFLA_BRPORT_MCAST_FLOOD,
496 QEMU_IFLA_BRPORT_MCAST_TO_UCAST,
497 QEMU_IFLA_BRPORT_VLAN_TUNNEL,
498 QEMU_IFLA_BRPORT_BCAST_FLOOD,
499 QEMU_IFLA_BRPORT_GROUP_FWD_MASK,
500 QEMU_IFLA_BRPORT_NEIGH_SUPPRESS,
501 QEMU___IFLA_BRPORT_MAX
505 QEMU_IFLA_INFO_UNSPEC,
508 QEMU_IFLA_INFO_XSTATS,
509 QEMU_IFLA_INFO_SLAVE_KIND,
510 QEMU_IFLA_INFO_SLAVE_DATA,
511 QEMU___IFLA_INFO_MAX,
515 QEMU_IFLA_INET_UNSPEC,
517 QEMU___IFLA_INET_MAX,
521 QEMU_IFLA_INET6_UNSPEC,
522 QEMU_IFLA_INET6_FLAGS,
523 QEMU_IFLA_INET6_CONF,
524 QEMU_IFLA_INET6_STATS,
525 QEMU_IFLA_INET6_MCAST,
526 QEMU_IFLA_INET6_CACHEINFO,
527 QEMU_IFLA_INET6_ICMP6STATS,
528 QEMU_IFLA_INET6_TOKEN,
529 QEMU_IFLA_INET6_ADDR_GEN_MODE,
530 QEMU___IFLA_INET6_MAX
534 QEMU_IFLA_XDP_UNSPEC,
536 QEMU_IFLA_XDP_ATTACHED,
538 QEMU_IFLA_XDP_PROG_ID,
542 typedef abi_long (*TargetFdDataFunc)(void *, size_t);
543 typedef abi_long (*TargetFdAddrFunc)(void *, abi_ulong, socklen_t);
544 typedef struct TargetFdTrans {
545 TargetFdDataFunc host_to_target_data;
546 TargetFdDataFunc target_to_host_data;
547 TargetFdAddrFunc target_to_host_addr;
550 static TargetFdTrans **target_fd_trans;
552 static unsigned int target_fd_max;
554 static TargetFdDataFunc fd_trans_target_to_host_data(int fd)
556 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
557 return target_fd_trans[fd]->target_to_host_data;
562 static TargetFdDataFunc fd_trans_host_to_target_data(int fd)
564 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
565 return target_fd_trans[fd]->host_to_target_data;
570 static TargetFdAddrFunc fd_trans_target_to_host_addr(int fd)
572 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
573 return target_fd_trans[fd]->target_to_host_addr;
578 static void fd_trans_register(int fd, TargetFdTrans *trans)
582 if (fd >= target_fd_max) {
583 oldmax = target_fd_max;
584 target_fd_max = ((fd >> 6) + 1) << 6; /* by slice of 64 entries */
585 target_fd_trans = g_renew(TargetFdTrans *,
586 target_fd_trans, target_fd_max);
587 memset((void *)(target_fd_trans + oldmax), 0,
588 (target_fd_max - oldmax) * sizeof(TargetFdTrans *));
590 target_fd_trans[fd] = trans;
593 static void fd_trans_unregister(int fd)
595 if (fd >= 0 && fd < target_fd_max) {
596 target_fd_trans[fd] = NULL;
600 static void fd_trans_dup(int oldfd, int newfd)
602 fd_trans_unregister(newfd);
603 if (oldfd < target_fd_max && target_fd_trans[oldfd]) {
604 fd_trans_register(newfd, target_fd_trans[oldfd]);
608 static int sys_getcwd1(char *buf, size_t size)
610 if (getcwd(buf, size) == NULL) {
611 /* getcwd() sets errno */
614 return strlen(buf)+1;
617 #ifdef TARGET_NR_utimensat
618 #if defined(__NR_utimensat)
619 #define __NR_sys_utimensat __NR_utimensat
620 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
621 const struct timespec *,tsp,int,flags)
623 static int sys_utimensat(int dirfd, const char *pathname,
624 const struct timespec times[2], int flags)
630 #endif /* TARGET_NR_utimensat */
632 #ifdef TARGET_NR_renameat2
633 #if defined(__NR_renameat2)
634 #define __NR_sys_renameat2 __NR_renameat2
635 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
636 const char *, new, unsigned int, flags)
638 static int sys_renameat2(int oldfd, const char *old,
639 int newfd, const char *new, int flags)
642 return renameat(oldfd, old, newfd, new);
648 #endif /* TARGET_NR_renameat2 */
650 #ifdef CONFIG_INOTIFY
651 #include <sys/inotify.h>
653 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
654 static int sys_inotify_init(void)
656 return (inotify_init());
659 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
660 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
662 return (inotify_add_watch(fd, pathname, mask));
665 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
666 static int sys_inotify_rm_watch(int fd, int32_t wd)
668 return (inotify_rm_watch(fd, wd));
671 #ifdef CONFIG_INOTIFY1
672 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
673 static int sys_inotify_init1(int flags)
675 return (inotify_init1(flags));
680 /* Userspace can usually survive runtime without inotify */
681 #undef TARGET_NR_inotify_init
682 #undef TARGET_NR_inotify_init1
683 #undef TARGET_NR_inotify_add_watch
684 #undef TARGET_NR_inotify_rm_watch
685 #endif /* CONFIG_INOTIFY */
687 #if defined(TARGET_NR_prlimit64)
688 #ifndef __NR_prlimit64
689 # define __NR_prlimit64 -1
691 #define __NR_sys_prlimit64 __NR_prlimit64
692 /* The glibc rlimit structure may not be that used by the underlying syscall */
693 struct host_rlimit64 {
697 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
698 const struct host_rlimit64 *, new_limit,
699 struct host_rlimit64 *, old_limit)
703 #if defined(TARGET_NR_timer_create)
704 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
705 static timer_t g_posix_timers[32] = { 0, } ;
707 static inline int next_free_host_timer(void)
710 /* FIXME: Does finding the next free slot require a lock? */
711 for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
712 if (g_posix_timers[k] == 0) {
713 g_posix_timers[k] = (timer_t) 1;
721 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
723 static inline int regpairs_aligned(void *cpu_env, int num)
725 return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
727 #elif defined(TARGET_MIPS) && (TARGET_ABI_BITS == 32)
728 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
729 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
730 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
731 * of registers which translates to the same as ARM/MIPS, because we start with
733 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
734 #elif defined(TARGET_SH4)
735 /* SH4 doesn't align register pairs, except for p{read,write}64 */
736 static inline int regpairs_aligned(void *cpu_env, int num)
739 case TARGET_NR_pread64:
740 case TARGET_NR_pwrite64:
747 #elif defined(TARGET_XTENSA)
748 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
750 static inline int regpairs_aligned(void *cpu_env, int num) { return 0; }
753 #define ERRNO_TABLE_SIZE 1200
755 /* target_to_host_errno_table[] is initialized from
756 * host_to_target_errno_table[] in syscall_init(). */
757 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
761 * This list is the union of errno values overridden in asm-<arch>/errno.h
762 * minus the errnos that are not actually generic to all archs.
764 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
765 [EAGAIN] = TARGET_EAGAIN,
766 [EIDRM] = TARGET_EIDRM,
767 [ECHRNG] = TARGET_ECHRNG,
768 [EL2NSYNC] = TARGET_EL2NSYNC,
769 [EL3HLT] = TARGET_EL3HLT,
770 [EL3RST] = TARGET_EL3RST,
771 [ELNRNG] = TARGET_ELNRNG,
772 [EUNATCH] = TARGET_EUNATCH,
773 [ENOCSI] = TARGET_ENOCSI,
774 [EL2HLT] = TARGET_EL2HLT,
775 [EDEADLK] = TARGET_EDEADLK,
776 [ENOLCK] = TARGET_ENOLCK,
777 [EBADE] = TARGET_EBADE,
778 [EBADR] = TARGET_EBADR,
779 [EXFULL] = TARGET_EXFULL,
780 [ENOANO] = TARGET_ENOANO,
781 [EBADRQC] = TARGET_EBADRQC,
782 [EBADSLT] = TARGET_EBADSLT,
783 [EBFONT] = TARGET_EBFONT,
784 [ENOSTR] = TARGET_ENOSTR,
785 [ENODATA] = TARGET_ENODATA,
786 [ETIME] = TARGET_ETIME,
787 [ENOSR] = TARGET_ENOSR,
788 [ENONET] = TARGET_ENONET,
789 [ENOPKG] = TARGET_ENOPKG,
790 [EREMOTE] = TARGET_EREMOTE,
791 [ENOLINK] = TARGET_ENOLINK,
792 [EADV] = TARGET_EADV,
793 [ESRMNT] = TARGET_ESRMNT,
794 [ECOMM] = TARGET_ECOMM,
795 [EPROTO] = TARGET_EPROTO,
796 [EDOTDOT] = TARGET_EDOTDOT,
797 [EMULTIHOP] = TARGET_EMULTIHOP,
798 [EBADMSG] = TARGET_EBADMSG,
799 [ENAMETOOLONG] = TARGET_ENAMETOOLONG,
800 [EOVERFLOW] = TARGET_EOVERFLOW,
801 [ENOTUNIQ] = TARGET_ENOTUNIQ,
802 [EBADFD] = TARGET_EBADFD,
803 [EREMCHG] = TARGET_EREMCHG,
804 [ELIBACC] = TARGET_ELIBACC,
805 [ELIBBAD] = TARGET_ELIBBAD,
806 [ELIBSCN] = TARGET_ELIBSCN,
807 [ELIBMAX] = TARGET_ELIBMAX,
808 [ELIBEXEC] = TARGET_ELIBEXEC,
809 [EILSEQ] = TARGET_EILSEQ,
810 [ENOSYS] = TARGET_ENOSYS,
811 [ELOOP] = TARGET_ELOOP,
812 [ERESTART] = TARGET_ERESTART,
813 [ESTRPIPE] = TARGET_ESTRPIPE,
814 [ENOTEMPTY] = TARGET_ENOTEMPTY,
815 [EUSERS] = TARGET_EUSERS,
816 [ENOTSOCK] = TARGET_ENOTSOCK,
817 [EDESTADDRREQ] = TARGET_EDESTADDRREQ,
818 [EMSGSIZE] = TARGET_EMSGSIZE,
819 [EPROTOTYPE] = TARGET_EPROTOTYPE,
820 [ENOPROTOOPT] = TARGET_ENOPROTOOPT,
821 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT,
822 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT,
823 [EOPNOTSUPP] = TARGET_EOPNOTSUPP,
824 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT,
825 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT,
826 [EADDRINUSE] = TARGET_EADDRINUSE,
827 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL,
828 [ENETDOWN] = TARGET_ENETDOWN,
829 [ENETUNREACH] = TARGET_ENETUNREACH,
830 [ENETRESET] = TARGET_ENETRESET,
831 [ECONNABORTED] = TARGET_ECONNABORTED,
832 [ECONNRESET] = TARGET_ECONNRESET,
833 [ENOBUFS] = TARGET_ENOBUFS,
834 [EISCONN] = TARGET_EISCONN,
835 [ENOTCONN] = TARGET_ENOTCONN,
836 [EUCLEAN] = TARGET_EUCLEAN,
837 [ENOTNAM] = TARGET_ENOTNAM,
838 [ENAVAIL] = TARGET_ENAVAIL,
839 [EISNAM] = TARGET_EISNAM,
840 [EREMOTEIO] = TARGET_EREMOTEIO,
841 [EDQUOT] = TARGET_EDQUOT,
842 [ESHUTDOWN] = TARGET_ESHUTDOWN,
843 [ETOOMANYREFS] = TARGET_ETOOMANYREFS,
844 [ETIMEDOUT] = TARGET_ETIMEDOUT,
845 [ECONNREFUSED] = TARGET_ECONNREFUSED,
846 [EHOSTDOWN] = TARGET_EHOSTDOWN,
847 [EHOSTUNREACH] = TARGET_EHOSTUNREACH,
848 [EALREADY] = TARGET_EALREADY,
849 [EINPROGRESS] = TARGET_EINPROGRESS,
850 [ESTALE] = TARGET_ESTALE,
851 [ECANCELED] = TARGET_ECANCELED,
852 [ENOMEDIUM] = TARGET_ENOMEDIUM,
853 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE,
855 [ENOKEY] = TARGET_ENOKEY,
858 [EKEYEXPIRED] = TARGET_EKEYEXPIRED,
861 [EKEYREVOKED] = TARGET_EKEYREVOKED,
864 [EKEYREJECTED] = TARGET_EKEYREJECTED,
867 [EOWNERDEAD] = TARGET_EOWNERDEAD,
869 #ifdef ENOTRECOVERABLE
870 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE,
873 [ENOMSG] = TARGET_ENOMSG,
876 [ERFKILL] = TARGET_ERFKILL,
879 [EHWPOISON] = TARGET_EHWPOISON,
883 static inline int host_to_target_errno(int err)
885 if (err >= 0 && err < ERRNO_TABLE_SIZE &&
886 host_to_target_errno_table[err]) {
887 return host_to_target_errno_table[err];
892 static inline int target_to_host_errno(int err)
894 if (err >= 0 && err < ERRNO_TABLE_SIZE &&
895 target_to_host_errno_table[err]) {
896 return target_to_host_errno_table[err];
901 static inline abi_long get_errno(abi_long ret)
904 return -host_to_target_errno(errno);
909 static inline int is_error(abi_long ret)
911 return (abi_ulong)ret >= (abi_ulong)(-4096);
914 const char *target_strerror(int err)
916 if (err == TARGET_ERESTARTSYS) {
917 return "To be restarted";
919 if (err == TARGET_QEMU_ESIGRETURN) {
920 return "Successful exit from sigreturn";
923 if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
926 return strerror(target_to_host_errno(err));
929 #define safe_syscall0(type, name) \
930 static type safe_##name(void) \
932 return safe_syscall(__NR_##name); \
935 #define safe_syscall1(type, name, type1, arg1) \
936 static type safe_##name(type1 arg1) \
938 return safe_syscall(__NR_##name, arg1); \
941 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
942 static type safe_##name(type1 arg1, type2 arg2) \
944 return safe_syscall(__NR_##name, arg1, arg2); \
947 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
948 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
950 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
953 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
955 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
957 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
960 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
961 type4, arg4, type5, arg5) \
962 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
965 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
968 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
969 type4, arg4, type5, arg5, type6, arg6) \
970 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
971 type5 arg5, type6 arg6) \
973 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
976 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
977 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
978 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
979 int, flags, mode_t, mode)
980 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
981 struct rusage *, rusage)
982 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
983 int, options, struct rusage *, rusage)
984 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
985 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
986 fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
987 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
988 struct timespec *, tsp, const sigset_t *, sigmask,
990 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
991 int, maxevents, int, timeout, const sigset_t *, sigmask,
993 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
994 const struct timespec *,timeout,int *,uaddr2,int,val3)
995 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
996 safe_syscall2(int, kill, pid_t, pid, int, sig)
997 safe_syscall2(int, tkill, int, tid, int, sig)
998 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
999 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
1000 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
1001 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
1002 unsigned long, pos_l, unsigned long, pos_h)
1003 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
1004 unsigned long, pos_l, unsigned long, pos_h)
1005 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
1007 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
1008 int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
1009 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
1010 int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
1011 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
1012 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
1013 safe_syscall2(int, flock, int, fd, int, operation)
1014 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
1015 const struct timespec *, uts, size_t, sigsetsize)
1016 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
1018 safe_syscall2(int, nanosleep, const struct timespec *, req,
1019 struct timespec *, rem)
1020 #ifdef TARGET_NR_clock_nanosleep
1021 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
1022 const struct timespec *, req, struct timespec *, rem)
1025 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
1027 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
1028 long, msgtype, int, flags)
1029 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
1030 unsigned, nsops, const struct timespec *, timeout)
1032 /* This host kernel architecture uses a single ipc syscall; fake up
1033 * wrappers for the sub-operations to hide this implementation detail.
1034 * Annoyingly we can't include linux/ipc.h to get the constant definitions
1035 * for the call parameter because some structs in there conflict with the
1036 * sys/ipc.h ones. So we just define them here, and rely on them being
1037 * the same for all host architectures.
1039 #define Q_SEMTIMEDOP 4
1042 #define Q_IPCCALL(VERSION, OP) ((VERSION) << 16 | (OP))
1044 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
1045 void *, ptr, long, fifth)
1046 static int safe_msgsnd(int msgid, const void *msgp, size_t sz, int flags)
1048 return safe_ipc(Q_IPCCALL(0, Q_MSGSND), msgid, sz, flags, (void *)msgp, 0);
1050 static int safe_msgrcv(int msgid, void *msgp, size_t sz, long type, int flags)
1052 return safe_ipc(Q_IPCCALL(1, Q_MSGRCV), msgid, sz, flags, msgp, type);
1054 static int safe_semtimedop(int semid, struct sembuf *tsops, unsigned nsops,
1055 const struct timespec *timeout)
1057 return safe_ipc(Q_IPCCALL(0, Q_SEMTIMEDOP), semid, nsops, 0, tsops,
1061 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1062 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
1063 size_t, len, unsigned, prio, const struct timespec *, timeout)
1064 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
1065 size_t, len, unsigned *, prio, const struct timespec *, timeout)
1067 /* We do ioctl like this rather than via safe_syscall3 to preserve the
1068 * "third argument might be integer or pointer or not present" behaviour of
1069 * the libc function.
1071 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
1072 /* Similarly for fcntl. Note that callers must always:
1073 * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
1074 * use the flock64 struct rather than unsuffixed flock
1075 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
1078 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
1080 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
1083 static inline int host_to_target_sock_type(int host_type)
1087 switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
1089 target_type = TARGET_SOCK_DGRAM;
1092 target_type = TARGET_SOCK_STREAM;
1095 target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
1099 #if defined(SOCK_CLOEXEC)
1100 if (host_type & SOCK_CLOEXEC) {
1101 target_type |= TARGET_SOCK_CLOEXEC;
1105 #if defined(SOCK_NONBLOCK)
1106 if (host_type & SOCK_NONBLOCK) {
1107 target_type |= TARGET_SOCK_NONBLOCK;
1114 static abi_ulong target_brk;
1115 static abi_ulong target_original_brk;
1116 static abi_ulong brk_page;
1118 void target_set_brk(abi_ulong new_brk)
1120 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
1121 brk_page = HOST_PAGE_ALIGN(target_brk);
1124 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
1125 #define DEBUGF_BRK(message, args...)
1127 /* do_brk() must return target values and target errnos. */
1128 abi_long do_brk(abi_ulong new_brk)
1130 abi_long mapped_addr;
1131 abi_ulong new_alloc_size;
1133 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
1136 DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
1139 if (new_brk < target_original_brk) {
1140 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
1145 /* If the new brk is less than the highest page reserved to the
1146 * target heap allocation, set it and we're almost done... */
1147 if (new_brk <= brk_page) {
1148 /* Heap contents are initialized to zero, as for anonymous
1150 if (new_brk > target_brk) {
1151 memset(g2h(target_brk), 0, new_brk - target_brk);
1153 target_brk = new_brk;
1154 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
1158 /* We need to allocate more memory after the brk... Note that
1159 * we don't use MAP_FIXED because that will map over the top of
1160 * any existing mapping (like the one with the host libc or qemu
1161 * itself); instead we treat "mapped but at wrong address" as
1162 * a failure and unmap again.
1164 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
1165 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
1166 PROT_READ|PROT_WRITE,
1167 MAP_ANON|MAP_PRIVATE, 0, 0));
1169 if (mapped_addr == brk_page) {
1170 /* Heap contents are initialized to zero, as for anonymous
1171 * mapped pages. Technically the new pages are already
1172 * initialized to zero since they *are* anonymous mapped
1173 * pages, however we have to take care with the contents that
1174 * come from the remaining part of the previous page: it may
1175 * contains garbage data due to a previous heap usage (grown
1176 * then shrunken). */
1177 memset(g2h(target_brk), 0, brk_page - target_brk);
1179 target_brk = new_brk;
1180 brk_page = HOST_PAGE_ALIGN(target_brk);
1181 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
1184 } else if (mapped_addr != -1) {
1185 /* Mapped but at wrong address, meaning there wasn't actually
1186 * enough space for this brk.
1188 target_munmap(mapped_addr, new_alloc_size);
1190 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
1193 DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
1196 #if defined(TARGET_ALPHA)
1197 /* We (partially) emulate OSF/1 on Alpha, which requires we
1198 return a proper errno, not an unchanged brk value. */
1199 return -TARGET_ENOMEM;
1201 /* For everything else, return the previous break. */
1205 static inline abi_long copy_from_user_fdset(fd_set *fds,
1206 abi_ulong target_fds_addr,
1210 abi_ulong b, *target_fds;
1212 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
1213 if (!(target_fds = lock_user(VERIFY_READ,
1215 sizeof(abi_ulong) * nw,
1217 return -TARGET_EFAULT;
1221 for (i = 0; i < nw; i++) {
1222 /* grab the abi_ulong */
1223 __get_user(b, &target_fds[i]);
1224 for (j = 0; j < TARGET_ABI_BITS; j++) {
1225 /* check the bit inside the abi_ulong */
1232 unlock_user(target_fds, target_fds_addr, 0);
1237 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
1238 abi_ulong target_fds_addr,
1241 if (target_fds_addr) {
1242 if (copy_from_user_fdset(fds, target_fds_addr, n))
1243 return -TARGET_EFAULT;
1251 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
1257 abi_ulong *target_fds;
1259 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
1260 if (!(target_fds = lock_user(VERIFY_WRITE,
1262 sizeof(abi_ulong) * nw,
1264 return -TARGET_EFAULT;
1267 for (i = 0; i < nw; i++) {
1269 for (j = 0; j < TARGET_ABI_BITS; j++) {
1270 v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
1273 __put_user(v, &target_fds[i]);
1276 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
1281 #if defined(__alpha__)
1282 #define HOST_HZ 1024
1287 static inline abi_long host_to_target_clock_t(long ticks)
1289 #if HOST_HZ == TARGET_HZ
1292 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
1296 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
1297 const struct rusage *rusage)
1299 struct target_rusage *target_rusage;
1301 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
1302 return -TARGET_EFAULT;
1303 target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
1304 target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
1305 target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1306 target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1307 target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1308 target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1309 target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1310 target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1311 target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1312 target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1313 target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1314 target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1315 target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1316 target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1317 target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1318 target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1319 target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1320 target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1321 unlock_user_struct(target_rusage, target_addr, 1);
1326 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1328 abi_ulong target_rlim_swap;
1331 target_rlim_swap = tswapal(target_rlim);
1332 if (target_rlim_swap == TARGET_RLIM_INFINITY)
1333 return RLIM_INFINITY;
1335 result = target_rlim_swap;
1336 if (target_rlim_swap != (rlim_t)result)
1337 return RLIM_INFINITY;
1342 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1344 abi_ulong target_rlim_swap;
1347 if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1348 target_rlim_swap = TARGET_RLIM_INFINITY;
1350 target_rlim_swap = rlim;
1351 result = tswapal(target_rlim_swap);
1356 static inline int target_to_host_resource(int code)
1359 case TARGET_RLIMIT_AS:
1361 case TARGET_RLIMIT_CORE:
1363 case TARGET_RLIMIT_CPU:
1365 case TARGET_RLIMIT_DATA:
1367 case TARGET_RLIMIT_FSIZE:
1368 return RLIMIT_FSIZE;
1369 case TARGET_RLIMIT_LOCKS:
1370 return RLIMIT_LOCKS;
1371 case TARGET_RLIMIT_MEMLOCK:
1372 return RLIMIT_MEMLOCK;
1373 case TARGET_RLIMIT_MSGQUEUE:
1374 return RLIMIT_MSGQUEUE;
1375 case TARGET_RLIMIT_NICE:
1377 case TARGET_RLIMIT_NOFILE:
1378 return RLIMIT_NOFILE;
1379 case TARGET_RLIMIT_NPROC:
1380 return RLIMIT_NPROC;
1381 case TARGET_RLIMIT_RSS:
1383 case TARGET_RLIMIT_RTPRIO:
1384 return RLIMIT_RTPRIO;
1385 case TARGET_RLIMIT_SIGPENDING:
1386 return RLIMIT_SIGPENDING;
1387 case TARGET_RLIMIT_STACK:
1388 return RLIMIT_STACK;
1394 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1395 abi_ulong target_tv_addr)
1397 struct target_timeval *target_tv;
1399 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1))
1400 return -TARGET_EFAULT;
1402 __get_user(tv->tv_sec, &target_tv->tv_sec);
1403 __get_user(tv->tv_usec, &target_tv->tv_usec);
1405 unlock_user_struct(target_tv, target_tv_addr, 0);
1410 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1411 const struct timeval *tv)
1413 struct target_timeval *target_tv;
1415 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0))
1416 return -TARGET_EFAULT;
1418 __put_user(tv->tv_sec, &target_tv->tv_sec);
1419 __put_user(tv->tv_usec, &target_tv->tv_usec);
1421 unlock_user_struct(target_tv, target_tv_addr, 1);
1426 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1427 abi_ulong target_tz_addr)
1429 struct target_timezone *target_tz;
1431 if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1432 return -TARGET_EFAULT;
1435 __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1436 __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1438 unlock_user_struct(target_tz, target_tz_addr, 0);
1443 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1446 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1447 abi_ulong target_mq_attr_addr)
1449 struct target_mq_attr *target_mq_attr;
1451 if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1452 target_mq_attr_addr, 1))
1453 return -TARGET_EFAULT;
1455 __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1456 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1457 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1458 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1460 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1465 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1466 const struct mq_attr *attr)
1468 struct target_mq_attr *target_mq_attr;
1470 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1471 target_mq_attr_addr, 0))
1472 return -TARGET_EFAULT;
1474 __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1475 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1476 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1477 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1479 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1485 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1486 /* do_select() must return target values and target errnos. */
1487 static abi_long do_select(int n,
1488 abi_ulong rfd_addr, abi_ulong wfd_addr,
1489 abi_ulong efd_addr, abi_ulong target_tv_addr)
1491 fd_set rfds, wfds, efds;
1492 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1494 struct timespec ts, *ts_ptr;
1497 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1501 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1505 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1510 if (target_tv_addr) {
1511 if (copy_from_user_timeval(&tv, target_tv_addr))
1512 return -TARGET_EFAULT;
1513 ts.tv_sec = tv.tv_sec;
1514 ts.tv_nsec = tv.tv_usec * 1000;
1520 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1523 if (!is_error(ret)) {
1524 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1525 return -TARGET_EFAULT;
1526 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1527 return -TARGET_EFAULT;
1528 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1529 return -TARGET_EFAULT;
1531 if (target_tv_addr) {
1532 tv.tv_sec = ts.tv_sec;
1533 tv.tv_usec = ts.tv_nsec / 1000;
1534 if (copy_to_user_timeval(target_tv_addr, &tv)) {
1535 return -TARGET_EFAULT;
1543 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1544 static abi_long do_old_select(abi_ulong arg1)
1546 struct target_sel_arg_struct *sel;
1547 abi_ulong inp, outp, exp, tvp;
1550 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1551 return -TARGET_EFAULT;
1554 nsel = tswapal(sel->n);
1555 inp = tswapal(sel->inp);
1556 outp = tswapal(sel->outp);
1557 exp = tswapal(sel->exp);
1558 tvp = tswapal(sel->tvp);
1560 unlock_user_struct(sel, arg1, 0);
1562 return do_select(nsel, inp, outp, exp, tvp);
1567 static abi_long do_pipe2(int host_pipe[], int flags)
1570 return pipe2(host_pipe, flags);
1576 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1577 int flags, int is_pipe2)
1581 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1584 return get_errno(ret);
1586 /* Several targets have special calling conventions for the original
1587 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1589 #if defined(TARGET_ALPHA)
1590 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1591 return host_pipe[0];
1592 #elif defined(TARGET_MIPS)
1593 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1594 return host_pipe[0];
1595 #elif defined(TARGET_SH4)
1596 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1597 return host_pipe[0];
1598 #elif defined(TARGET_SPARC)
1599 ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1600 return host_pipe[0];
1604 if (put_user_s32(host_pipe[0], pipedes)
1605 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1606 return -TARGET_EFAULT;
1607 return get_errno(ret);
1610 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1611 abi_ulong target_addr,
1614 struct target_ip_mreqn *target_smreqn;
1616 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1618 return -TARGET_EFAULT;
1619 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1620 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1621 if (len == sizeof(struct target_ip_mreqn))
1622 mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1623 unlock_user(target_smreqn, target_addr, 0);
1628 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1629 abi_ulong target_addr,
1632 const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1633 sa_family_t sa_family;
1634 struct target_sockaddr *target_saddr;
1636 if (fd_trans_target_to_host_addr(fd)) {
1637 return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1640 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1642 return -TARGET_EFAULT;
1644 sa_family = tswap16(target_saddr->sa_family);
1646 /* Oops. The caller might send a incomplete sun_path; sun_path
1647 * must be terminated by \0 (see the manual page), but
1648 * unfortunately it is quite common to specify sockaddr_un
1649 * length as "strlen(x->sun_path)" while it should be
1650 * "strlen(...) + 1". We'll fix that here if needed.
1651 * Linux kernel has a similar feature.
1654 if (sa_family == AF_UNIX) {
1655 if (len < unix_maxlen && len > 0) {
1656 char *cp = (char*)target_saddr;
1658 if ( cp[len-1] && !cp[len] )
1661 if (len > unix_maxlen)
1665 memcpy(addr, target_saddr, len);
1666 addr->sa_family = sa_family;
1667 if (sa_family == AF_NETLINK) {
1668 struct sockaddr_nl *nladdr;
1670 nladdr = (struct sockaddr_nl *)addr;
1671 nladdr->nl_pid = tswap32(nladdr->nl_pid);
1672 nladdr->nl_groups = tswap32(nladdr->nl_groups);
1673 } else if (sa_family == AF_PACKET) {
1674 struct target_sockaddr_ll *lladdr;
1676 lladdr = (struct target_sockaddr_ll *)addr;
1677 lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1678 lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1680 unlock_user(target_saddr, target_addr, 0);
1685 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1686 struct sockaddr *addr,
1689 struct target_sockaddr *target_saddr;
1696 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1698 return -TARGET_EFAULT;
1699 memcpy(target_saddr, addr, len);
1700 if (len >= offsetof(struct target_sockaddr, sa_family) +
1701 sizeof(target_saddr->sa_family)) {
1702 target_saddr->sa_family = tswap16(addr->sa_family);
1704 if (addr->sa_family == AF_NETLINK && len >= sizeof(struct sockaddr_nl)) {
1705 struct sockaddr_nl *target_nl = (struct sockaddr_nl *)target_saddr;
1706 target_nl->nl_pid = tswap32(target_nl->nl_pid);
1707 target_nl->nl_groups = tswap32(target_nl->nl_groups);
1708 } else if (addr->sa_family == AF_PACKET) {
1709 struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1710 target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1711 target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1712 } else if (addr->sa_family == AF_INET6 &&
1713 len >= sizeof(struct target_sockaddr_in6)) {
1714 struct target_sockaddr_in6 *target_in6 =
1715 (struct target_sockaddr_in6 *)target_saddr;
1716 target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1718 unlock_user(target_saddr, target_addr, len);
1723 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1724 struct target_msghdr *target_msgh)
1726 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1727 abi_long msg_controllen;
1728 abi_ulong target_cmsg_addr;
1729 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1730 socklen_t space = 0;
1732 msg_controllen = tswapal(target_msgh->msg_controllen);
1733 if (msg_controllen < sizeof (struct target_cmsghdr))
1735 target_cmsg_addr = tswapal(target_msgh->msg_control);
1736 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1737 target_cmsg_start = target_cmsg;
1739 return -TARGET_EFAULT;
1741 while (cmsg && target_cmsg) {
1742 void *data = CMSG_DATA(cmsg);
1743 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1745 int len = tswapal(target_cmsg->cmsg_len)
1746 - sizeof(struct target_cmsghdr);
1748 space += CMSG_SPACE(len);
1749 if (space > msgh->msg_controllen) {
1750 space -= CMSG_SPACE(len);
1751 /* This is a QEMU bug, since we allocated the payload
1752 * area ourselves (unlike overflow in host-to-target
1753 * conversion, which is just the guest giving us a buffer
1754 * that's too small). It can't happen for the payload types
1755 * we currently support; if it becomes an issue in future
1756 * we would need to improve our allocation strategy to
1757 * something more intelligent than "twice the size of the
1758 * target buffer we're reading from".
1760 gemu_log("Host cmsg overflow\n");
1764 if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1765 cmsg->cmsg_level = SOL_SOCKET;
1767 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1769 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1770 cmsg->cmsg_len = CMSG_LEN(len);
1772 if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1773 int *fd = (int *)data;
1774 int *target_fd = (int *)target_data;
1775 int i, numfds = len / sizeof(int);
1777 for (i = 0; i < numfds; i++) {
1778 __get_user(fd[i], target_fd + i);
1780 } else if (cmsg->cmsg_level == SOL_SOCKET
1781 && cmsg->cmsg_type == SCM_CREDENTIALS) {
1782 struct ucred *cred = (struct ucred *)data;
1783 struct target_ucred *target_cred =
1784 (struct target_ucred *)target_data;
1786 __get_user(cred->pid, &target_cred->pid);
1787 __get_user(cred->uid, &target_cred->uid);
1788 __get_user(cred->gid, &target_cred->gid);
1790 gemu_log("Unsupported ancillary data: %d/%d\n",
1791 cmsg->cmsg_level, cmsg->cmsg_type);
1792 memcpy(data, target_data, len);
1795 cmsg = CMSG_NXTHDR(msgh, cmsg);
1796 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1799 unlock_user(target_cmsg, target_cmsg_addr, 0);
1801 msgh->msg_controllen = space;
1805 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1806 struct msghdr *msgh)
1808 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1809 abi_long msg_controllen;
1810 abi_ulong target_cmsg_addr;
1811 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1812 socklen_t space = 0;
1814 msg_controllen = tswapal(target_msgh->msg_controllen);
1815 if (msg_controllen < sizeof (struct target_cmsghdr))
1817 target_cmsg_addr = tswapal(target_msgh->msg_control);
1818 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1819 target_cmsg_start = target_cmsg;
1821 return -TARGET_EFAULT;
1823 while (cmsg && target_cmsg) {
1824 void *data = CMSG_DATA(cmsg);
1825 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1827 int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1828 int tgt_len, tgt_space;
1830 /* We never copy a half-header but may copy half-data;
1831 * this is Linux's behaviour in put_cmsg(). Note that
1832 * truncation here is a guest problem (which we report
1833 * to the guest via the CTRUNC bit), unlike truncation
1834 * in target_to_host_cmsg, which is a QEMU bug.
1836 if (msg_controllen < sizeof(struct target_cmsghdr)) {
1837 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1841 if (cmsg->cmsg_level == SOL_SOCKET) {
1842 target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1844 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1846 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1848 /* Payload types which need a different size of payload on
1849 * the target must adjust tgt_len here.
1852 switch (cmsg->cmsg_level) {
1854 switch (cmsg->cmsg_type) {
1856 tgt_len = sizeof(struct target_timeval);
1866 if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1867 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1868 tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1871 /* We must now copy-and-convert len bytes of payload
1872 * into tgt_len bytes of destination space. Bear in mind
1873 * that in both source and destination we may be dealing
1874 * with a truncated value!
1876 switch (cmsg->cmsg_level) {
1878 switch (cmsg->cmsg_type) {
1881 int *fd = (int *)data;
1882 int *target_fd = (int *)target_data;
1883 int i, numfds = tgt_len / sizeof(int);
1885 for (i = 0; i < numfds; i++) {
1886 __put_user(fd[i], target_fd + i);
1892 struct timeval *tv = (struct timeval *)data;
1893 struct target_timeval *target_tv =
1894 (struct target_timeval *)target_data;
1896 if (len != sizeof(struct timeval) ||
1897 tgt_len != sizeof(struct target_timeval)) {
1901 /* copy struct timeval to target */
1902 __put_user(tv->tv_sec, &target_tv->tv_sec);
1903 __put_user(tv->tv_usec, &target_tv->tv_usec);
1906 case SCM_CREDENTIALS:
1908 struct ucred *cred = (struct ucred *)data;
1909 struct target_ucred *target_cred =
1910 (struct target_ucred *)target_data;
1912 __put_user(cred->pid, &target_cred->pid);
1913 __put_user(cred->uid, &target_cred->uid);
1914 __put_user(cred->gid, &target_cred->gid);
1923 switch (cmsg->cmsg_type) {
1926 uint32_t *v = (uint32_t *)data;
1927 uint32_t *t_int = (uint32_t *)target_data;
1929 if (len != sizeof(uint32_t) ||
1930 tgt_len != sizeof(uint32_t)) {
1933 __put_user(*v, t_int);
1939 struct sock_extended_err ee;
1940 struct sockaddr_in offender;
1942 struct errhdr_t *errh = (struct errhdr_t *)data;
1943 struct errhdr_t *target_errh =
1944 (struct errhdr_t *)target_data;
1946 if (len != sizeof(struct errhdr_t) ||
1947 tgt_len != sizeof(struct errhdr_t)) {
1950 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1951 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1952 __put_user(errh->ee.ee_type, &target_errh->ee.ee_type);
1953 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1954 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1955 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1956 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1957 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1958 (void *) &errh->offender, sizeof(errh->offender));
1967 switch (cmsg->cmsg_type) {
1970 uint32_t *v = (uint32_t *)data;
1971 uint32_t *t_int = (uint32_t *)target_data;
1973 if (len != sizeof(uint32_t) ||
1974 tgt_len != sizeof(uint32_t)) {
1977 __put_user(*v, t_int);
1983 struct sock_extended_err ee;
1984 struct sockaddr_in6 offender;
1986 struct errhdr6_t *errh = (struct errhdr6_t *)data;
1987 struct errhdr6_t *target_errh =
1988 (struct errhdr6_t *)target_data;
1990 if (len != sizeof(struct errhdr6_t) ||
1991 tgt_len != sizeof(struct errhdr6_t)) {
1994 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1995 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1996 __put_user(errh->ee.ee_type, &target_errh->ee.ee_type);
1997 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1998 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1999 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2000 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2001 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2002 (void *) &errh->offender, sizeof(errh->offender));
2012 gemu_log("Unsupported ancillary data: %d/%d\n",
2013 cmsg->cmsg_level, cmsg->cmsg_type);
2014 memcpy(target_data, data, MIN(len, tgt_len));
2015 if (tgt_len > len) {
2016 memset(target_data + len, 0, tgt_len - len);
2020 target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
2021 tgt_space = TARGET_CMSG_SPACE(tgt_len);
2022 if (msg_controllen < tgt_space) {
2023 tgt_space = msg_controllen;
2025 msg_controllen -= tgt_space;
2027 cmsg = CMSG_NXTHDR(msgh, cmsg);
2028 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
2031 unlock_user(target_cmsg, target_cmsg_addr, space);
2033 target_msgh->msg_controllen = tswapal(space);
2037 static void tswap_nlmsghdr(struct nlmsghdr *nlh)
2039 nlh->nlmsg_len = tswap32(nlh->nlmsg_len);
2040 nlh->nlmsg_type = tswap16(nlh->nlmsg_type);
2041 nlh->nlmsg_flags = tswap16(nlh->nlmsg_flags);
2042 nlh->nlmsg_seq = tswap32(nlh->nlmsg_seq);
2043 nlh->nlmsg_pid = tswap32(nlh->nlmsg_pid);
2046 static abi_long host_to_target_for_each_nlmsg(struct nlmsghdr *nlh,
2048 abi_long (*host_to_target_nlmsg)
2049 (struct nlmsghdr *))
2054 while (len > sizeof(struct nlmsghdr)) {
2056 nlmsg_len = nlh->nlmsg_len;
2057 if (nlmsg_len < sizeof(struct nlmsghdr) ||
2062 switch (nlh->nlmsg_type) {
2064 tswap_nlmsghdr(nlh);
2070 struct nlmsgerr *e = NLMSG_DATA(nlh);
2071 e->error = tswap32(e->error);
2072 tswap_nlmsghdr(&e->msg);
2073 tswap_nlmsghdr(nlh);
2077 ret = host_to_target_nlmsg(nlh);
2079 tswap_nlmsghdr(nlh);
2084 tswap_nlmsghdr(nlh);
2085 len -= NLMSG_ALIGN(nlmsg_len);
2086 nlh = (struct nlmsghdr *)(((char*)nlh) + NLMSG_ALIGN(nlmsg_len));
2091 static abi_long target_to_host_for_each_nlmsg(struct nlmsghdr *nlh,
2093 abi_long (*target_to_host_nlmsg)
2094 (struct nlmsghdr *))
2098 while (len > sizeof(struct nlmsghdr)) {
2099 if (tswap32(nlh->nlmsg_len) < sizeof(struct nlmsghdr) ||
2100 tswap32(nlh->nlmsg_len) > len) {
2103 tswap_nlmsghdr(nlh);
2104 switch (nlh->nlmsg_type) {
2111 struct nlmsgerr *e = NLMSG_DATA(nlh);
2112 e->error = tswap32(e->error);
2113 tswap_nlmsghdr(&e->msg);
2117 ret = target_to_host_nlmsg(nlh);
2122 len -= NLMSG_ALIGN(nlh->nlmsg_len);
2123 nlh = (struct nlmsghdr *)(((char *)nlh) + NLMSG_ALIGN(nlh->nlmsg_len));
2128 #ifdef CONFIG_RTNETLINK
2129 static abi_long host_to_target_for_each_nlattr(struct nlattr *nlattr,
2130 size_t len, void *context,
2131 abi_long (*host_to_target_nlattr)
2135 unsigned short nla_len;
2138 while (len > sizeof(struct nlattr)) {
2139 nla_len = nlattr->nla_len;
2140 if (nla_len < sizeof(struct nlattr) ||
2144 ret = host_to_target_nlattr(nlattr, context);
2145 nlattr->nla_len = tswap16(nlattr->nla_len);
2146 nlattr->nla_type = tswap16(nlattr->nla_type);
2150 len -= NLA_ALIGN(nla_len);
2151 nlattr = (struct nlattr *)(((char *)nlattr) + NLA_ALIGN(nla_len));
2156 static abi_long host_to_target_for_each_rtattr(struct rtattr *rtattr,
2158 abi_long (*host_to_target_rtattr)
2161 unsigned short rta_len;
2164 while (len > sizeof(struct rtattr)) {
2165 rta_len = rtattr->rta_len;
2166 if (rta_len < sizeof(struct rtattr) ||
2170 ret = host_to_target_rtattr(rtattr);
2171 rtattr->rta_len = tswap16(rtattr->rta_len);
2172 rtattr->rta_type = tswap16(rtattr->rta_type);
2176 len -= RTA_ALIGN(rta_len);
2177 rtattr = (struct rtattr *)(((char *)rtattr) + RTA_ALIGN(rta_len));
2182 #define NLA_DATA(nla) ((void *)((char *)(nla)) + NLA_HDRLEN)
2184 static abi_long host_to_target_data_bridge_nlattr(struct nlattr *nlattr,
2191 switch (nlattr->nla_type) {
2193 case QEMU_IFLA_BR_FDB_FLUSH:
2196 case QEMU_IFLA_BR_GROUP_ADDR:
2199 case QEMU_IFLA_BR_VLAN_FILTERING:
2200 case QEMU_IFLA_BR_TOPOLOGY_CHANGE:
2201 case QEMU_IFLA_BR_TOPOLOGY_CHANGE_DETECTED:
2202 case QEMU_IFLA_BR_MCAST_ROUTER:
2203 case QEMU_IFLA_BR_MCAST_SNOOPING:
2204 case QEMU_IFLA_BR_MCAST_QUERY_USE_IFADDR:
2205 case QEMU_IFLA_BR_MCAST_QUERIER:
2206 case QEMU_IFLA_BR_NF_CALL_IPTABLES:
2207 case QEMU_IFLA_BR_NF_CALL_IP6TABLES:
2208 case QEMU_IFLA_BR_NF_CALL_ARPTABLES:
2209 case QEMU_IFLA_BR_VLAN_STATS_ENABLED:
2210 case QEMU_IFLA_BR_MCAST_STATS_ENABLED:
2211 case QEMU_IFLA_BR_MCAST_IGMP_VERSION:
2212 case QEMU_IFLA_BR_MCAST_MLD_VERSION:
2215 case QEMU_IFLA_BR_PRIORITY:
2216 case QEMU_IFLA_BR_VLAN_PROTOCOL:
2217 case QEMU_IFLA_BR_GROUP_FWD_MASK:
2218 case QEMU_IFLA_BR_ROOT_PORT:
2219 case QEMU_IFLA_BR_VLAN_DEFAULT_PVID:
2220 u16 = NLA_DATA(nlattr);
2221 *u16 = tswap16(*u16);
2224 case QEMU_IFLA_BR_FORWARD_DELAY:
2225 case QEMU_IFLA_BR_HELLO_TIME:
2226 case QEMU_IFLA_BR_MAX_AGE:
2227 case QEMU_IFLA_BR_AGEING_TIME:
2228 case QEMU_IFLA_BR_STP_STATE:
2229 case QEMU_IFLA_BR_ROOT_PATH_COST:
2230 case QEMU_IFLA_BR_MCAST_HASH_ELASTICITY:
2231 case QEMU_IFLA_BR_MCAST_HASH_MAX:
2232 case QEMU_IFLA_BR_MCAST_LAST_MEMBER_CNT:
2233 case QEMU_IFLA_BR_MCAST_STARTUP_QUERY_CNT:
2234 u32 = NLA_DATA(nlattr);
2235 *u32 = tswap32(*u32);
2238 case QEMU_IFLA_BR_HELLO_TIMER:
2239 case QEMU_IFLA_BR_TCN_TIMER:
2240 case QEMU_IFLA_BR_GC_TIMER:
2241 case QEMU_IFLA_BR_TOPOLOGY_CHANGE_TIMER:
2242 case QEMU_IFLA_BR_MCAST_LAST_MEMBER_INTVL:
2243 case QEMU_IFLA_BR_MCAST_MEMBERSHIP_INTVL:
2244 case QEMU_IFLA_BR_MCAST_QUERIER_INTVL:
2245 case QEMU_IFLA_BR_MCAST_QUERY_INTVL:
2246 case QEMU_IFLA_BR_MCAST_QUERY_RESPONSE_INTVL:
2247 case QEMU_IFLA_BR_MCAST_STARTUP_QUERY_INTVL:
2248 u64 = NLA_DATA(nlattr);
2249 *u64 = tswap64(*u64);
2251 /* ifla_bridge_id: uin8_t[] */
2252 case QEMU_IFLA_BR_ROOT_ID:
2253 case QEMU_IFLA_BR_BRIDGE_ID:
2256 gemu_log("Unknown QEMU_IFLA_BR type %d\n", nlattr->nla_type);
2262 static abi_long host_to_target_slave_data_bridge_nlattr(struct nlattr *nlattr,
2269 switch (nlattr->nla_type) {
2271 case QEMU_IFLA_BRPORT_STATE:
2272 case QEMU_IFLA_BRPORT_MODE:
2273 case QEMU_IFLA_BRPORT_GUARD:
2274 case QEMU_IFLA_BRPORT_PROTECT:
2275 case QEMU_IFLA_BRPORT_FAST_LEAVE:
2276 case QEMU_IFLA_BRPORT_LEARNING:
2277 case QEMU_IFLA_BRPORT_UNICAST_FLOOD:
2278 case QEMU_IFLA_BRPORT_PROXYARP:
2279 case QEMU_IFLA_BRPORT_LEARNING_SYNC:
2280 case QEMU_IFLA_BRPORT_PROXYARP_WIFI:
2281 case QEMU_IFLA_BRPORT_TOPOLOGY_CHANGE_ACK:
2282 case QEMU_IFLA_BRPORT_CONFIG_PENDING:
2283 case QEMU_IFLA_BRPORT_MULTICAST_ROUTER:
2284 case QEMU_IFLA_BRPORT_MCAST_FLOOD:
2285 case QEMU_IFLA_BRPORT_MCAST_TO_UCAST:
2286 case QEMU_IFLA_BRPORT_VLAN_TUNNEL:
2287 case QEMU_IFLA_BRPORT_BCAST_FLOOD:
2288 case QEMU_IFLA_BRPORT_NEIGH_SUPPRESS:
2291 case QEMU_IFLA_BRPORT_PRIORITY:
2292 case QEMU_IFLA_BRPORT_DESIGNATED_PORT:
2293 case QEMU_IFLA_BRPORT_DESIGNATED_COST:
2294 case QEMU_IFLA_BRPORT_ID:
2295 case QEMU_IFLA_BRPORT_NO:
2296 case QEMU_IFLA_BRPORT_GROUP_FWD_MASK:
2297 u16 = NLA_DATA(nlattr);
2298 *u16 = tswap16(*u16);
2301 case QEMU_IFLA_BRPORT_COST:
2302 u32 = NLA_DATA(nlattr);
2303 *u32 = tswap32(*u32);
2306 case QEMU_IFLA_BRPORT_MESSAGE_AGE_TIMER:
2307 case QEMU_IFLA_BRPORT_FORWARD_DELAY_TIMER:
2308 case QEMU_IFLA_BRPORT_HOLD_TIMER:
2309 u64 = NLA_DATA(nlattr);
2310 *u64 = tswap64(*u64);
2312 /* ifla_bridge_id: uint8_t[] */
2313 case QEMU_IFLA_BRPORT_ROOT_ID:
2314 case QEMU_IFLA_BRPORT_BRIDGE_ID:
2317 gemu_log("Unknown QEMU_IFLA_BRPORT type %d\n", nlattr->nla_type);
2323 struct linkinfo_context {
2330 static abi_long host_to_target_data_linkinfo_nlattr(struct nlattr *nlattr,
2333 struct linkinfo_context *li_context = context;
2335 switch (nlattr->nla_type) {
2337 case QEMU_IFLA_INFO_KIND:
2338 li_context->name = NLA_DATA(nlattr);
2339 li_context->len = nlattr->nla_len - NLA_HDRLEN;
2341 case QEMU_IFLA_INFO_SLAVE_KIND:
2342 li_context->slave_name = NLA_DATA(nlattr);
2343 li_context->slave_len = nlattr->nla_len - NLA_HDRLEN;
2346 case QEMU_IFLA_INFO_XSTATS:
2347 /* FIXME: only used by CAN */
2350 case QEMU_IFLA_INFO_DATA:
2351 if (strncmp(li_context->name, "bridge",
2352 li_context->len) == 0) {
2353 return host_to_target_for_each_nlattr(NLA_DATA(nlattr),
2356 host_to_target_data_bridge_nlattr);
2358 gemu_log("Unknown QEMU_IFLA_INFO_KIND %s\n", li_context->name);
2361 case QEMU_IFLA_INFO_SLAVE_DATA:
2362 if (strncmp(li_context->slave_name, "bridge",
2363 li_context->slave_len) == 0) {
2364 return host_to_target_for_each_nlattr(NLA_DATA(nlattr),
2367 host_to_target_slave_data_bridge_nlattr);
2369 gemu_log("Unknown QEMU_IFLA_INFO_SLAVE_KIND %s\n",
2370 li_context->slave_name);
2374 gemu_log("Unknown host QEMU_IFLA_INFO type: %d\n", nlattr->nla_type);
2381 static abi_long host_to_target_data_inet_nlattr(struct nlattr *nlattr,
2387 switch (nlattr->nla_type) {
2388 case QEMU_IFLA_INET_CONF:
2389 u32 = NLA_DATA(nlattr);
2390 for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u32);
2392 u32[i] = tswap32(u32[i]);
2396 gemu_log("Unknown host AF_INET type: %d\n", nlattr->nla_type);
2401 static abi_long host_to_target_data_inet6_nlattr(struct nlattr *nlattr,
2406 struct ifla_cacheinfo *ci;
2409 switch (nlattr->nla_type) {
2411 case QEMU_IFLA_INET6_TOKEN:
2414 case QEMU_IFLA_INET6_ADDR_GEN_MODE:
2417 case QEMU_IFLA_INET6_FLAGS:
2418 u32 = NLA_DATA(nlattr);
2419 *u32 = tswap32(*u32);
2422 case QEMU_IFLA_INET6_CONF:
2423 u32 = NLA_DATA(nlattr);
2424 for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u32);
2426 u32[i] = tswap32(u32[i]);
2429 /* ifla_cacheinfo */
2430 case QEMU_IFLA_INET6_CACHEINFO:
2431 ci = NLA_DATA(nlattr);
2432 ci->max_reasm_len = tswap32(ci->max_reasm_len);
2433 ci->tstamp = tswap32(ci->tstamp);
2434 ci->reachable_time = tswap32(ci->reachable_time);
2435 ci->retrans_time = tswap32(ci->retrans_time);
2438 case QEMU_IFLA_INET6_STATS:
2439 case QEMU_IFLA_INET6_ICMP6STATS:
2440 u64 = NLA_DATA(nlattr);
2441 for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u64);
2443 u64[i] = tswap64(u64[i]);
2447 gemu_log("Unknown host AF_INET6 type: %d\n", nlattr->nla_type);
2452 static abi_long host_to_target_data_spec_nlattr(struct nlattr *nlattr,
2455 switch (nlattr->nla_type) {
2457 return host_to_target_for_each_nlattr(NLA_DATA(nlattr), nlattr->nla_len,
2459 host_to_target_data_inet_nlattr);
2461 return host_to_target_for_each_nlattr(NLA_DATA(nlattr), nlattr->nla_len,
2463 host_to_target_data_inet6_nlattr);
2465 gemu_log("Unknown host AF_SPEC type: %d\n", nlattr->nla_type);
2471 static abi_long host_to_target_data_xdp_nlattr(struct nlattr *nlattr,
2476 switch (nlattr->nla_type) {
2478 case QEMU_IFLA_XDP_ATTACHED:
2481 case QEMU_IFLA_XDP_PROG_ID:
2482 u32 = NLA_DATA(nlattr);
2483 *u32 = tswap32(*u32);
2486 gemu_log("Unknown host XDP type: %d\n", nlattr->nla_type);
2492 static abi_long host_to_target_data_link_rtattr(struct rtattr *rtattr)
2495 struct rtnl_link_stats *st;
2496 struct rtnl_link_stats64 *st64;
2497 struct rtnl_link_ifmap *map;
2498 struct linkinfo_context li_context;
2500 switch (rtattr->rta_type) {
2502 case QEMU_IFLA_ADDRESS:
2503 case QEMU_IFLA_BROADCAST:
2505 case QEMU_IFLA_IFNAME:
2506 case QEMU_IFLA_QDISC:
2509 case QEMU_IFLA_OPERSTATE:
2510 case QEMU_IFLA_LINKMODE:
2511 case QEMU_IFLA_CARRIER:
2512 case QEMU_IFLA_PROTO_DOWN:
2516 case QEMU_IFLA_LINK:
2517 case QEMU_IFLA_WEIGHT:
2518 case QEMU_IFLA_TXQLEN:
2519 case QEMU_IFLA_CARRIER_CHANGES:
2520 case QEMU_IFLA_NUM_RX_QUEUES:
2521 case QEMU_IFLA_NUM_TX_QUEUES:
2522 case QEMU_IFLA_PROMISCUITY:
2523 case QEMU_IFLA_EXT_MASK:
2524 case QEMU_IFLA_LINK_NETNSID:
2525 case QEMU_IFLA_GROUP:
2526 case QEMU_IFLA_MASTER:
2527 case QEMU_IFLA_NUM_VF:
2528 case QEMU_IFLA_GSO_MAX_SEGS:
2529 case QEMU_IFLA_GSO_MAX_SIZE:
2530 u32 = RTA_DATA(rtattr);
2531 *u32 = tswap32(*u32);
2533 /* struct rtnl_link_stats */
2534 case QEMU_IFLA_STATS:
2535 st = RTA_DATA(rtattr);
2536 st->rx_packets = tswap32(st->rx_packets);
2537 st->tx_packets = tswap32(st->tx_packets);
2538 st->rx_bytes = tswap32(st->rx_bytes);
2539 st->tx_bytes = tswap32(st->tx_bytes);
2540 st->rx_errors = tswap32(st->rx_errors);
2541 st->tx_errors = tswap32(st->tx_errors);
2542 st->rx_dropped = tswap32(st->rx_dropped);
2543 st->tx_dropped = tswap32(st->tx_dropped);
2544 st->multicast = tswap32(st->multicast);
2545 st->collisions = tswap32(st->collisions);
2547 /* detailed rx_errors: */
2548 st->rx_length_errors = tswap32(st->rx_length_errors);
2549 st->rx_over_errors = tswap32(st->rx_over_errors);
2550 st->rx_crc_errors = tswap32(st->rx_crc_errors);
2551 st->rx_frame_errors = tswap32(st->rx_frame_errors);
2552 st->rx_fifo_errors = tswap32(st->rx_fifo_errors);
2553 st->rx_missed_errors = tswap32(st->rx_missed_errors);
2555 /* detailed tx_errors */
2556 st->tx_aborted_errors = tswap32(st->tx_aborted_errors);
2557 st->tx_carrier_errors = tswap32(st->tx_carrier_errors);
2558 st->tx_fifo_errors = tswap32(st->tx_fifo_errors);
2559 st->tx_heartbeat_errors = tswap32(st->tx_heartbeat_errors);
2560 st->tx_window_errors = tswap32(st->tx_window_errors);
2563 st->rx_compressed = tswap32(st->rx_compressed);
2564 st->tx_compressed = tswap32(st->tx_compressed);
2566 /* struct rtnl_link_stats64 */
2567 case QEMU_IFLA_STATS64:
2568 st64 = RTA_DATA(rtattr);
2569 st64->rx_packets = tswap64(st64->rx_packets);
2570 st64->tx_packets = tswap64(st64->tx_packets);
2571 st64->rx_bytes = tswap64(st64->rx_bytes);
2572 st64->tx_bytes = tswap64(st64->tx_bytes);
2573 st64->rx_errors = tswap64(st64->rx_errors);
2574 st64->tx_errors = tswap64(st64->tx_errors);
2575 st64->rx_dropped = tswap64(st64->rx_dropped);
2576 st64->tx_dropped = tswap64(st64->tx_dropped);
2577 st64->multicast = tswap64(st64->multicast);
2578 st64->collisions = tswap64(st64->collisions);
2580 /* detailed rx_errors: */
2581 st64->rx_length_errors = tswap64(st64->rx_length_errors);
2582 st64->rx_over_errors = tswap64(st64->rx_over_errors);
2583 st64->rx_crc_errors = tswap64(st64->rx_crc_errors);
2584 st64->rx_frame_errors = tswap64(st64->rx_frame_errors);
2585 st64->rx_fifo_errors = tswap64(st64->rx_fifo_errors);
2586 st64->rx_missed_errors = tswap64(st64->rx_missed_errors);
2588 /* detailed tx_errors */
2589 st64->tx_aborted_errors = tswap64(st64->tx_aborted_errors);
2590 st64->tx_carrier_errors = tswap64(st64->tx_carrier_errors);
2591 st64->tx_fifo_errors = tswap64(st64->tx_fifo_errors);
2592 st64->tx_heartbeat_errors = tswap64(st64->tx_heartbeat_errors);
2593 st64->tx_window_errors = tswap64(st64->tx_window_errors);
2596 st64->rx_compressed = tswap64(st64->rx_compressed);
2597 st64->tx_compressed = tswap64(st64->tx_compressed);
2599 /* struct rtnl_link_ifmap */
2601 map = RTA_DATA(rtattr);
2602 map->mem_start = tswap64(map->mem_start);
2603 map->mem_end = tswap64(map->mem_end);
2604 map->base_addr = tswap64(map->base_addr);
2605 map->irq = tswap16(map->irq);
2608 case QEMU_IFLA_LINKINFO:
2609 memset(&li_context, 0, sizeof(li_context));
2610 return host_to_target_for_each_nlattr(RTA_DATA(rtattr), rtattr->rta_len,
2612 host_to_target_data_linkinfo_nlattr);
2613 case QEMU_IFLA_AF_SPEC:
2614 return host_to_target_for_each_nlattr(RTA_DATA(rtattr), rtattr->rta_len,
2616 host_to_target_data_spec_nlattr);
2618 return host_to_target_for_each_nlattr(RTA_DATA(rtattr), rtattr->rta_len,
2620 host_to_target_data_xdp_nlattr);
2622 gemu_log("Unknown host QEMU_IFLA type: %d\n", rtattr->rta_type);
2628 static abi_long host_to_target_data_addr_rtattr(struct rtattr *rtattr)
2631 struct ifa_cacheinfo *ci;
2633 switch (rtattr->rta_type) {
2634 /* binary: depends on family type */
2644 u32 = RTA_DATA(rtattr);
2645 *u32 = tswap32(*u32);
2647 /* struct ifa_cacheinfo */
2649 ci = RTA_DATA(rtattr);
2650 ci->ifa_prefered = tswap32(ci->ifa_prefered);
2651 ci->ifa_valid = tswap32(ci->ifa_valid);
2652 ci->cstamp = tswap32(ci->cstamp);
2653 ci->tstamp = tswap32(ci->tstamp);
2656 gemu_log("Unknown host IFA type: %d\n", rtattr->rta_type);
2662 static abi_long host_to_target_data_route_rtattr(struct rtattr *rtattr)
2665 switch (rtattr->rta_type) {
2666 /* binary: depends on family type */
2675 u32 = RTA_DATA(rtattr);
2676 *u32 = tswap32(*u32);
2679 gemu_log("Unknown host RTA type: %d\n", rtattr->rta_type);
2685 static abi_long host_to_target_link_rtattr(struct rtattr *rtattr,
2686 uint32_t rtattr_len)
2688 return host_to_target_for_each_rtattr(rtattr, rtattr_len,
2689 host_to_target_data_link_rtattr);
2692 static abi_long host_to_target_addr_rtattr(struct rtattr *rtattr,
2693 uint32_t rtattr_len)
2695 return host_to_target_for_each_rtattr(rtattr, rtattr_len,
2696 host_to_target_data_addr_rtattr);
2699 static abi_long host_to_target_route_rtattr(struct rtattr *rtattr,
2700 uint32_t rtattr_len)
2702 return host_to_target_for_each_rtattr(rtattr, rtattr_len,
2703 host_to_target_data_route_rtattr);
2706 static abi_long host_to_target_data_route(struct nlmsghdr *nlh)
2709 struct ifinfomsg *ifi;
2710 struct ifaddrmsg *ifa;
2713 nlmsg_len = nlh->nlmsg_len;
2714 switch (nlh->nlmsg_type) {
2718 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifi))) {
2719 ifi = NLMSG_DATA(nlh);
2720 ifi->ifi_type = tswap16(ifi->ifi_type);
2721 ifi->ifi_index = tswap32(ifi->ifi_index);
2722 ifi->ifi_flags = tswap32(ifi->ifi_flags);
2723 ifi->ifi_change = tswap32(ifi->ifi_change);
2724 host_to_target_link_rtattr(IFLA_RTA(ifi),
2725 nlmsg_len - NLMSG_LENGTH(sizeof(*ifi)));
2731 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifa))) {
2732 ifa = NLMSG_DATA(nlh);
2733 ifa->ifa_index = tswap32(ifa->ifa_index);
2734 host_to_target_addr_rtattr(IFA_RTA(ifa),
2735 nlmsg_len - NLMSG_LENGTH(sizeof(*ifa)));
2741 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*rtm))) {
2742 rtm = NLMSG_DATA(nlh);
2743 rtm->rtm_flags = tswap32(rtm->rtm_flags);
2744 host_to_target_route_rtattr(RTM_RTA(rtm),
2745 nlmsg_len - NLMSG_LENGTH(sizeof(*rtm)));
2749 return -TARGET_EINVAL;
2754 static inline abi_long host_to_target_nlmsg_route(struct nlmsghdr *nlh,
2757 return host_to_target_for_each_nlmsg(nlh, len, host_to_target_data_route);
2760 static abi_long target_to_host_for_each_rtattr(struct rtattr *rtattr,
2762 abi_long (*target_to_host_rtattr)
2767 while (len >= sizeof(struct rtattr)) {
2768 if (tswap16(rtattr->rta_len) < sizeof(struct rtattr) ||
2769 tswap16(rtattr->rta_len) > len) {
2772 rtattr->rta_len = tswap16(rtattr->rta_len);
2773 rtattr->rta_type = tswap16(rtattr->rta_type);
2774 ret = target_to_host_rtattr(rtattr);
2778 len -= RTA_ALIGN(rtattr->rta_len);
2779 rtattr = (struct rtattr *)(((char *)rtattr) +
2780 RTA_ALIGN(rtattr->rta_len));
2785 static abi_long target_to_host_data_link_rtattr(struct rtattr *rtattr)
2787 switch (rtattr->rta_type) {
2789 gemu_log("Unknown target QEMU_IFLA type: %d\n", rtattr->rta_type);
2795 static abi_long target_to_host_data_addr_rtattr(struct rtattr *rtattr)
2797 switch (rtattr->rta_type) {
2798 /* binary: depends on family type */
2803 gemu_log("Unknown target IFA type: %d\n", rtattr->rta_type);
2809 static abi_long target_to_host_data_route_rtattr(struct rtattr *rtattr)
2812 switch (rtattr->rta_type) {
2813 /* binary: depends on family type */
2821 u32 = RTA_DATA(rtattr);
2822 *u32 = tswap32(*u32);
2825 gemu_log("Unknown target RTA type: %d\n", rtattr->rta_type);
2831 static void target_to_host_link_rtattr(struct rtattr *rtattr,
2832 uint32_t rtattr_len)
2834 target_to_host_for_each_rtattr(rtattr, rtattr_len,
2835 target_to_host_data_link_rtattr);
2838 static void target_to_host_addr_rtattr(struct rtattr *rtattr,
2839 uint32_t rtattr_len)
2841 target_to_host_for_each_rtattr(rtattr, rtattr_len,
2842 target_to_host_data_addr_rtattr);
2845 static void target_to_host_route_rtattr(struct rtattr *rtattr,
2846 uint32_t rtattr_len)
2848 target_to_host_for_each_rtattr(rtattr, rtattr_len,
2849 target_to_host_data_route_rtattr);
2852 static abi_long target_to_host_data_route(struct nlmsghdr *nlh)
2854 struct ifinfomsg *ifi;
2855 struct ifaddrmsg *ifa;
2858 switch (nlh->nlmsg_type) {
2863 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifi))) {
2864 ifi = NLMSG_DATA(nlh);
2865 ifi->ifi_type = tswap16(ifi->ifi_type);
2866 ifi->ifi_index = tswap32(ifi->ifi_index);
2867 ifi->ifi_flags = tswap32(ifi->ifi_flags);
2868 ifi->ifi_change = tswap32(ifi->ifi_change);
2869 target_to_host_link_rtattr(IFLA_RTA(ifi), nlh->nlmsg_len -
2870 NLMSG_LENGTH(sizeof(*ifi)));
2876 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifa))) {
2877 ifa = NLMSG_DATA(nlh);
2878 ifa->ifa_index = tswap32(ifa->ifa_index);
2879 target_to_host_addr_rtattr(IFA_RTA(ifa), nlh->nlmsg_len -
2880 NLMSG_LENGTH(sizeof(*ifa)));
2887 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*rtm))) {
2888 rtm = NLMSG_DATA(nlh);
2889 rtm->rtm_flags = tswap32(rtm->rtm_flags);
2890 target_to_host_route_rtattr(RTM_RTA(rtm), nlh->nlmsg_len -
2891 NLMSG_LENGTH(sizeof(*rtm)));
2895 return -TARGET_EOPNOTSUPP;
2900 static abi_long target_to_host_nlmsg_route(struct nlmsghdr *nlh, size_t len)
2902 return target_to_host_for_each_nlmsg(nlh, len, target_to_host_data_route);
2904 #endif /* CONFIG_RTNETLINK */
2906 static abi_long host_to_target_data_audit(struct nlmsghdr *nlh)
2908 switch (nlh->nlmsg_type) {
2910 gemu_log("Unknown host audit message type %d\n",
2912 return -TARGET_EINVAL;
2917 static inline abi_long host_to_target_nlmsg_audit(struct nlmsghdr *nlh,
2920 return host_to_target_for_each_nlmsg(nlh, len, host_to_target_data_audit);
2923 static abi_long target_to_host_data_audit(struct nlmsghdr *nlh)
2925 switch (nlh->nlmsg_type) {
2927 case AUDIT_FIRST_USER_MSG ... AUDIT_LAST_USER_MSG:
2928 case AUDIT_FIRST_USER_MSG2 ... AUDIT_LAST_USER_MSG2:
2931 gemu_log("Unknown target audit message type %d\n",
2933 return -TARGET_EINVAL;
2939 static abi_long target_to_host_nlmsg_audit(struct nlmsghdr *nlh, size_t len)
2941 return target_to_host_for_each_nlmsg(nlh, len, target_to_host_data_audit);
2944 /* do_setsockopt() Must return target values and target errnos. */
2945 static abi_long do_setsockopt(int sockfd, int level, int optname,
2946 abi_ulong optval_addr, socklen_t optlen)
2950 struct ip_mreqn *ip_mreq;
2951 struct ip_mreq_source *ip_mreq_source;
2955 /* TCP options all take an 'int' value. */
2956 if (optlen < sizeof(uint32_t))
2957 return -TARGET_EINVAL;
2959 if (get_user_u32(val, optval_addr))
2960 return -TARGET_EFAULT;
2961 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2968 case IP_ROUTER_ALERT:
2972 case IP_MTU_DISCOVER:
2979 case IP_MULTICAST_TTL:
2980 case IP_MULTICAST_LOOP:
2982 if (optlen >= sizeof(uint32_t)) {
2983 if (get_user_u32(val, optval_addr))
2984 return -TARGET_EFAULT;
2985 } else if (optlen >= 1) {
2986 if (get_user_u8(val, optval_addr))
2987 return -TARGET_EFAULT;
2989 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2991 case IP_ADD_MEMBERSHIP:
2992 case IP_DROP_MEMBERSHIP:
2993 if (optlen < sizeof (struct target_ip_mreq) ||
2994 optlen > sizeof (struct target_ip_mreqn))
2995 return -TARGET_EINVAL;
2997 ip_mreq = (struct ip_mreqn *) alloca(optlen);
2998 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
2999 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
3002 case IP_BLOCK_SOURCE:
3003 case IP_UNBLOCK_SOURCE:
3004 case IP_ADD_SOURCE_MEMBERSHIP:
3005 case IP_DROP_SOURCE_MEMBERSHIP:
3006 if (optlen != sizeof (struct target_ip_mreq_source))
3007 return -TARGET_EINVAL;
3009 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
3010 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
3011 unlock_user (ip_mreq_source, optval_addr, 0);
3020 case IPV6_MTU_DISCOVER:
3023 case IPV6_RECVPKTINFO:
3024 case IPV6_UNICAST_HOPS:
3026 case IPV6_RECVHOPLIMIT:
3027 case IPV6_2292HOPLIMIT:
3030 if (optlen < sizeof(uint32_t)) {
3031 return -TARGET_EINVAL;
3033 if (get_user_u32(val, optval_addr)) {
3034 return -TARGET_EFAULT;
3036 ret = get_errno(setsockopt(sockfd, level, optname,
3037 &val, sizeof(val)));
3041 struct in6_pktinfo pki;
3043 if (optlen < sizeof(pki)) {
3044 return -TARGET_EINVAL;
3047 if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
3048 return -TARGET_EFAULT;
3051 pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
3053 ret = get_errno(setsockopt(sockfd, level, optname,
3054 &pki, sizeof(pki)));
3065 struct icmp6_filter icmp6f;
3067 if (optlen > sizeof(icmp6f)) {
3068 optlen = sizeof(icmp6f);
3071 if (copy_from_user(&icmp6f, optval_addr, optlen)) {
3072 return -TARGET_EFAULT;
3075 for (val = 0; val < 8; val++) {
3076 icmp6f.data[val] = tswap32(icmp6f.data[val]);
3079 ret = get_errno(setsockopt(sockfd, level, optname,
3091 /* those take an u32 value */
3092 if (optlen < sizeof(uint32_t)) {
3093 return -TARGET_EINVAL;
3096 if (get_user_u32(val, optval_addr)) {
3097 return -TARGET_EFAULT;
3099 ret = get_errno(setsockopt(sockfd, level, optname,
3100 &val, sizeof(val)));
3107 case TARGET_SOL_SOCKET:
3109 case TARGET_SO_RCVTIMEO:
3113 optname = SO_RCVTIMEO;
3116 if (optlen != sizeof(struct target_timeval)) {
3117 return -TARGET_EINVAL;
3120 if (copy_from_user_timeval(&tv, optval_addr)) {
3121 return -TARGET_EFAULT;
3124 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
3128 case TARGET_SO_SNDTIMEO:
3129 optname = SO_SNDTIMEO;
3131 case TARGET_SO_ATTACH_FILTER:
3133 struct target_sock_fprog *tfprog;
3134 struct target_sock_filter *tfilter;
3135 struct sock_fprog fprog;
3136 struct sock_filter *filter;
3139 if (optlen != sizeof(*tfprog)) {
3140 return -TARGET_EINVAL;
3142 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
3143 return -TARGET_EFAULT;
3145 if (!lock_user_struct(VERIFY_READ, tfilter,
3146 tswapal(tfprog->filter), 0)) {
3147 unlock_user_struct(tfprog, optval_addr, 1);
3148 return -TARGET_EFAULT;
3151 fprog.len = tswap16(tfprog->len);
3152 filter = g_try_new(struct sock_filter, fprog.len);
3153 if (filter == NULL) {
3154 unlock_user_struct(tfilter, tfprog->filter, 1);
3155 unlock_user_struct(tfprog, optval_addr, 1);
3156 return -TARGET_ENOMEM;
3158 for (i = 0; i < fprog.len; i++) {
3159 filter[i].code = tswap16(tfilter[i].code);
3160 filter[i].jt = tfilter[i].jt;
3161 filter[i].jf = tfilter[i].jf;
3162 filter[i].k = tswap32(tfilter[i].k);
3164 fprog.filter = filter;
3166 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
3167 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
3170 unlock_user_struct(tfilter, tfprog->filter, 1);
3171 unlock_user_struct(tfprog, optval_addr, 1);
3174 case TARGET_SO_BINDTODEVICE:
3176 char *dev_ifname, *addr_ifname;
3178 if (optlen > IFNAMSIZ - 1) {
3179 optlen = IFNAMSIZ - 1;
3181 dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
3183 return -TARGET_EFAULT;
3185 optname = SO_BINDTODEVICE;
3186 addr_ifname = alloca(IFNAMSIZ);
3187 memcpy(addr_ifname, dev_ifname, optlen);
3188 addr_ifname[optlen] = 0;
3189 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
3190 addr_ifname, optlen));
3191 unlock_user (dev_ifname, optval_addr, 0);
3194 /* Options with 'int' argument. */
3195 case TARGET_SO_DEBUG:
3198 case TARGET_SO_REUSEADDR:
3199 optname = SO_REUSEADDR;
3201 case TARGET_SO_TYPE:
3204 case TARGET_SO_ERROR:
3207 case TARGET_SO_DONTROUTE:
3208 optname = SO_DONTROUTE;
3210 case TARGET_SO_BROADCAST:
3211 optname = SO_BROADCAST;
3213 case TARGET_SO_SNDBUF:
3214 optname = SO_SNDBUF;
3216 case TARGET_SO_SNDBUFFORCE:
3217 optname = SO_SNDBUFFORCE;
3219 case TARGET_SO_RCVBUF:
3220 optname = SO_RCVBUF;
3222 case TARGET_SO_RCVBUFFORCE:
3223 optname = SO_RCVBUFFORCE;
3225 case TARGET_SO_KEEPALIVE:
3226 optname = SO_KEEPALIVE;
3228 case TARGET_SO_OOBINLINE:
3229 optname = SO_OOBINLINE;
3231 case TARGET_SO_NO_CHECK:
3232 optname = SO_NO_CHECK;
3234 case TARGET_SO_PRIORITY:
3235 optname = SO_PRIORITY;
3238 case TARGET_SO_BSDCOMPAT:
3239 optname = SO_BSDCOMPAT;
3242 case TARGET_SO_PASSCRED:
3243 optname = SO_PASSCRED;
3245 case TARGET_SO_PASSSEC:
3246 optname = SO_PASSSEC;
3248 case TARGET_SO_TIMESTAMP:
3249 optname = SO_TIMESTAMP;
3251 case TARGET_SO_RCVLOWAT:
3252 optname = SO_RCVLOWAT;
3257 if (optlen < sizeof(uint32_t))
3258 return -TARGET_EINVAL;
3260 if (get_user_u32(val, optval_addr))
3261 return -TARGET_EFAULT;
3262 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
3266 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname);
3267 ret = -TARGET_ENOPROTOOPT;
3272 /* do_getsockopt() Must return target values and target errnos. */
3273 static abi_long do_getsockopt(int sockfd, int level, int optname,
3274 abi_ulong optval_addr, abi_ulong optlen)
3281 case TARGET_SOL_SOCKET:
3284 /* These don't just return a single integer */
3285 case TARGET_SO_LINGER:
3286 case TARGET_SO_RCVTIMEO:
3287 case TARGET_SO_SNDTIMEO:
3288 case TARGET_SO_PEERNAME:
3290 case TARGET_SO_PEERCRED: {
3293 struct target_ucred *tcr;
3295 if (get_user_u32(len, optlen)) {
3296 return -TARGET_EFAULT;
3299 return -TARGET_EINVAL;
3303 ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
3311 if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
3312 return -TARGET_EFAULT;
3314 __put_user(cr.pid, &tcr->pid);
3315 __put_user(cr.uid, &tcr->uid);
3316 __put_user(cr.gid, &tcr->gid);
3317 unlock_user_struct(tcr, optval_addr, 1);
3318 if (put_user_u32(len, optlen)) {
3319 return -TARGET_EFAULT;
3323 /* Options with 'int' argument. */
3324 case TARGET_SO_DEBUG:
3327 case TARGET_SO_REUSEADDR:
3328 optname = SO_REUSEADDR;
3330 case TARGET_SO_TYPE:
3333 case TARGET_SO_ERROR:
3336 case TARGET_SO_DONTROUTE:
3337 optname = SO_DONTROUTE;
3339 case TARGET_SO_BROADCAST:
3340 optname = SO_BROADCAST;
3342 case TARGET_SO_SNDBUF:
3343 optname = SO_SNDBUF;
3345 case TARGET_SO_RCVBUF:
3346 optname = SO_RCVBUF;
3348 case TARGET_SO_KEEPALIVE:
3349 optname = SO_KEEPALIVE;
3351 case TARGET_SO_OOBINLINE:
3352 optname = SO_OOBINLINE;
3354 case TARGET_SO_NO_CHECK:
3355 optname = SO_NO_CHECK;
3357 case TARGET_SO_PRIORITY:
3358 optname = SO_PRIORITY;
3361 case TARGET_SO_BSDCOMPAT:
3362 optname = SO_BSDCOMPAT;
3365 case TARGET_SO_PASSCRED:
3366 optname = SO_PASSCRED;
3368 case TARGET_SO_TIMESTAMP:
3369 optname = SO_TIMESTAMP;
3371 case TARGET_SO_RCVLOWAT:
3372 optname = SO_RCVLOWAT;
3374 case TARGET_SO_ACCEPTCONN:
3375 optname = SO_ACCEPTCONN;
3382 /* TCP options all take an 'int' value. */
3384 if (get_user_u32(len, optlen))
3385 return -TARGET_EFAULT;
3387 return -TARGET_EINVAL;
3389 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
3392 if (optname == SO_TYPE) {
3393 val = host_to_target_sock_type(val);
3398 if (put_user_u32(val, optval_addr))
3399 return -TARGET_EFAULT;
3401 if (put_user_u8(val, optval_addr))
3402 return -TARGET_EFAULT;
3404 if (put_user_u32(len, optlen))
3405 return -TARGET_EFAULT;
3412 case IP_ROUTER_ALERT:
3416 case IP_MTU_DISCOVER:
3422 case IP_MULTICAST_TTL:
3423 case IP_MULTICAST_LOOP:
3424 if (get_user_u32(len, optlen))
3425 return -TARGET_EFAULT;
3427 return -TARGET_EINVAL;
3429 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
3432 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
3434 if (put_user_u32(len, optlen)
3435 || put_user_u8(val, optval_addr))
3436 return -TARGET_EFAULT;
3438 if (len > sizeof(int))
3440 if (put_user_u32(len, optlen)
3441 || put_user_u32(val, optval_addr))
3442 return -TARGET_EFAULT;
3446 ret = -TARGET_ENOPROTOOPT;
3452 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
3454 ret = -TARGET_EOPNOTSUPP;
3460 /* Convert target low/high pair representing file offset into the host
3461 * low/high pair. This function doesn't handle offsets bigger than 64 bits
3462 * as the kernel doesn't handle them either.
3464 static void target_to_host_low_high(abi_ulong tlow,
3466 unsigned long *hlow,
3467 unsigned long *hhigh)
3469 uint64_t off = tlow |
3470 ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
3471 TARGET_LONG_BITS / 2;
3474 *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
3477 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
3478 abi_ulong count, int copy)
3480 struct target_iovec *target_vec;
3482 abi_ulong total_len, max_len;
3485 bool bad_address = false;
3491 if (count > IOV_MAX) {
3496 vec = g_try_new0(struct iovec, count);
3502 target_vec = lock_user(VERIFY_READ, target_addr,
3503 count * sizeof(struct target_iovec), 1);
3504 if (target_vec == NULL) {
3509 /* ??? If host page size > target page size, this will result in a
3510 value larger than what we can actually support. */
3511 max_len = 0x7fffffff & TARGET_PAGE_MASK;
3514 for (i = 0; i < count; i++) {
3515 abi_ulong base = tswapal(target_vec[i].iov_base);
3516 abi_long len = tswapal(target_vec[i].iov_len);
3521 } else if (len == 0) {
3522 /* Zero length pointer is ignored. */
3523 vec[i].iov_base = 0;
3525 vec[i].iov_base = lock_user(type, base, len, copy);
3526 /* If the first buffer pointer is bad, this is a fault. But
3527 * subsequent bad buffers will result in a partial write; this
3528 * is realized by filling the vector with null pointers and
3530 if (!vec[i].iov_base) {
3541 if (len > max_len - total_len) {
3542 len = max_len - total_len;
3545 vec[i].iov_len = len;
3549 unlock_user(target_vec, target_addr, 0);
3554 if (tswapal(target_vec[i].iov_len) > 0) {
3555 unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
3558 unlock_user(target_vec, target_addr, 0);
3565 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
3566 abi_ulong count, int copy)
3568 struct target_iovec *target_vec;
3571 target_vec = lock_user(VERIFY_READ, target_addr,
3572 count * sizeof(struct target_iovec), 1);
3574 for (i = 0; i < count; i++) {
3575 abi_ulong base = tswapal(target_vec[i].iov_base);
3576 abi_long len = tswapal(target_vec[i].iov_len);
3580 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
3582 unlock_user(target_vec, target_addr, 0);
3588 static inline int target_to_host_sock_type(int *type)
3591 int target_type = *type;
3593 switch (target_type & TARGET_SOCK_TYPE_MASK) {
3594 case TARGET_SOCK_DGRAM:
3595 host_type = SOCK_DGRAM;
3597 case TARGET_SOCK_STREAM:
3598 host_type = SOCK_STREAM;
3601 host_type = target_type & TARGET_SOCK_TYPE_MASK;
3604 if (target_type & TARGET_SOCK_CLOEXEC) {
3605 #if defined(SOCK_CLOEXEC)
3606 host_type |= SOCK_CLOEXEC;
3608 return -TARGET_EINVAL;
3611 if (target_type & TARGET_SOCK_NONBLOCK) {
3612 #if defined(SOCK_NONBLOCK)
3613 host_type |= SOCK_NONBLOCK;
3614 #elif !defined(O_NONBLOCK)
3615 return -TARGET_EINVAL;
3622 /* Try to emulate socket type flags after socket creation. */
3623 static int sock_flags_fixup(int fd, int target_type)
3625 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3626 if (target_type & TARGET_SOCK_NONBLOCK) {
3627 int flags = fcntl(fd, F_GETFL);
3628 if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
3630 return -TARGET_EINVAL;
3637 static abi_long packet_target_to_host_sockaddr(void *host_addr,
3638 abi_ulong target_addr,
3641 struct sockaddr *addr = host_addr;
3642 struct target_sockaddr *target_saddr;
3644 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
3645 if (!target_saddr) {
3646 return -TARGET_EFAULT;
3649 memcpy(addr, target_saddr, len);
3650 addr->sa_family = tswap16(target_saddr->sa_family);
3651 /* spkt_protocol is big-endian */
3653 unlock_user(target_saddr, target_addr, 0);
3657 static TargetFdTrans target_packet_trans = {
3658 .target_to_host_addr = packet_target_to_host_sockaddr,
3661 #ifdef CONFIG_RTNETLINK
3662 static abi_long netlink_route_target_to_host(void *buf, size_t len)
3666 ret = target_to_host_nlmsg_route(buf, len);
3674 static abi_long netlink_route_host_to_target(void *buf, size_t len)
3678 ret = host_to_target_nlmsg_route(buf, len);
3686 static TargetFdTrans target_netlink_route_trans = {
3687 .target_to_host_data = netlink_route_target_to_host,
3688 .host_to_target_data = netlink_route_host_to_target,
3690 #endif /* CONFIG_RTNETLINK */
3692 static abi_long netlink_audit_target_to_host(void *buf, size_t len)
3696 ret = target_to_host_nlmsg_audit(buf, len);
3704 static abi_long netlink_audit_host_to_target(void *buf, size_t len)
3708 ret = host_to_target_nlmsg_audit(buf, len);
3716 static TargetFdTrans target_netlink_audit_trans = {
3717 .target_to_host_data = netlink_audit_target_to_host,
3718 .host_to_target_data = netlink_audit_host_to_target,
3721 /* do_socket() Must return target values and target errnos. */
3722 static abi_long do_socket(int domain, int type, int protocol)
3724 int target_type = type;
3727 ret = target_to_host_sock_type(&type);
3732 if (domain == PF_NETLINK && !(
3733 #ifdef CONFIG_RTNETLINK
3734 protocol == NETLINK_ROUTE ||
3736 protocol == NETLINK_KOBJECT_UEVENT ||
3737 protocol == NETLINK_AUDIT)) {
3738 return -EPFNOSUPPORT;
3741 if (domain == AF_PACKET ||
3742 (domain == AF_INET && type == SOCK_PACKET)) {
3743 protocol = tswap16(protocol);
3746 ret = get_errno(socket(domain, type, protocol));
3748 ret = sock_flags_fixup(ret, target_type);
3749 if (type == SOCK_PACKET) {
3750 /* Manage an obsolete case :
3751 * if socket type is SOCK_PACKET, bind by name
3753 fd_trans_register(ret, &target_packet_trans);
3754 } else if (domain == PF_NETLINK) {
3756 #ifdef CONFIG_RTNETLINK
3758 fd_trans_register(ret, &target_netlink_route_trans);
3761 case NETLINK_KOBJECT_UEVENT:
3762 /* nothing to do: messages are strings */
3765 fd_trans_register(ret, &target_netlink_audit_trans);
3768 g_assert_not_reached();
3775 /* do_bind() Must return target values and target errnos. */
3776 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3782 if ((int)addrlen < 0) {
3783 return -TARGET_EINVAL;
3786 addr = alloca(addrlen+1);
3788 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3792 return get_errno(bind(sockfd, addr, addrlen));
3795 /* do_connect() Must return target values and target errnos. */
3796 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3802 if ((int)addrlen < 0) {
3803 return -TARGET_EINVAL;
3806 addr = alloca(addrlen+1);
3808 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3812 return get_errno(safe_connect(sockfd, addr, addrlen));
3815 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3816 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3817 int flags, int send)
3823 abi_ulong target_vec;
3825 if (msgp->msg_name) {
3826 msg.msg_namelen = tswap32(msgp->msg_namelen);
3827 msg.msg_name = alloca(msg.msg_namelen+1);
3828 ret = target_to_host_sockaddr(fd, msg.msg_name,
3829 tswapal(msgp->msg_name),
3831 if (ret == -TARGET_EFAULT) {
3832 /* For connected sockets msg_name and msg_namelen must
3833 * be ignored, so returning EFAULT immediately is wrong.
3834 * Instead, pass a bad msg_name to the host kernel, and
3835 * let it decide whether to return EFAULT or not.
3837 msg.msg_name = (void *)-1;
3842 msg.msg_name = NULL;
3843 msg.msg_namelen = 0;
3845 msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3846 msg.msg_control = alloca(msg.msg_controllen);
3847 msg.msg_flags = tswap32(msgp->msg_flags);
3849 count = tswapal(msgp->msg_iovlen);
3850 target_vec = tswapal(msgp->msg_iov);
3852 if (count > IOV_MAX) {
3853 /* sendrcvmsg returns a different errno for this condition than
3854 * readv/writev, so we must catch it here before lock_iovec() does.
3856 ret = -TARGET_EMSGSIZE;
3860 vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3861 target_vec, count, send);
3863 ret = -host_to_target_errno(errno);
3866 msg.msg_iovlen = count;
3870 if (fd_trans_target_to_host_data(fd)) {
3873 host_msg = g_malloc(msg.msg_iov->iov_len);
3874 memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3875 ret = fd_trans_target_to_host_data(fd)(host_msg,
3876 msg.msg_iov->iov_len);
3878 msg.msg_iov->iov_base = host_msg;
3879 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3883 ret = target_to_host_cmsg(&msg, msgp);
3885 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3889 ret = get_errno(safe_recvmsg(fd, &msg, flags));
3890 if (!is_error(ret)) {
3892 if (fd_trans_host_to_target_data(fd)) {
3893 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3896 ret = host_to_target_cmsg(msgp, &msg);
3898 if (!is_error(ret)) {
3899 msgp->msg_namelen = tswap32(msg.msg_namelen);
3900 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3901 ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3902 msg.msg_name, msg.msg_namelen);
3914 unlock_iovec(vec, target_vec, count, !send);
3919 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3920 int flags, int send)
3923 struct target_msghdr *msgp;
3925 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3929 return -TARGET_EFAULT;
3931 ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3932 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3936 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3937 * so it might not have this *mmsg-specific flag either.
3939 #ifndef MSG_WAITFORONE
3940 #define MSG_WAITFORONE 0x10000
3943 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3944 unsigned int vlen, unsigned int flags,
3947 struct target_mmsghdr *mmsgp;
3951 if (vlen > UIO_MAXIOV) {
3955 mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3957 return -TARGET_EFAULT;
3960 for (i = 0; i < vlen; i++) {
3961 ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3962 if (is_error(ret)) {
3965 mmsgp[i].msg_len = tswap32(ret);
3966 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3967 if (flags & MSG_WAITFORONE) {
3968 flags |= MSG_DONTWAIT;
3972 unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3974 /* Return number of datagrams sent if we sent any at all;
3975 * otherwise return the error.
3983 /* do_accept4() Must return target values and target errnos. */
3984 static abi_long do_accept4(int fd, abi_ulong target_addr,
3985 abi_ulong target_addrlen_addr, int flags)
3992 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
3994 if (target_addr == 0) {
3995 return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3998 /* linux returns EINVAL if addrlen pointer is invalid */
3999 if (get_user_u32(addrlen, target_addrlen_addr))
4000 return -TARGET_EINVAL;
4002 if ((int)addrlen < 0) {
4003 return -TARGET_EINVAL;
4006 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
4007 return -TARGET_EINVAL;
4009 addr = alloca(addrlen);
4011 ret = get_errno(safe_accept4(fd, addr, &addrlen, host_flags));
4012 if (!is_error(ret)) {
4013 host_to_target_sockaddr(target_addr, addr, addrlen);
4014 if (put_user_u32(addrlen, target_addrlen_addr))
4015 ret = -TARGET_EFAULT;
4020 /* do_getpeername() Must return target values and target errnos. */
4021 static abi_long do_getpeername(int fd, abi_ulong target_addr,
4022 abi_ulong target_addrlen_addr)
4028 if (get_user_u32(addrlen, target_addrlen_addr))
4029 return -TARGET_EFAULT;
4031 if ((int)addrlen < 0) {
4032 return -TARGET_EINVAL;
4035 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
4036 return -TARGET_EFAULT;
4038 addr = alloca(addrlen);
4040 ret = get_errno(getpeername(fd, addr, &addrlen));
4041 if (!is_error(ret)) {
4042 host_to_target_sockaddr(target_addr, addr, addrlen);
4043 if (put_user_u32(addrlen, target_addrlen_addr))
4044 ret = -TARGET_EFAULT;
4049 /* do_getsockname() Must return target values and target errnos. */
4050 static abi_long do_getsockname(int fd, abi_ulong target_addr,
4051 abi_ulong target_addrlen_addr)
4057 if (get_user_u32(addrlen, target_addrlen_addr))
4058 return -TARGET_EFAULT;
4060 if ((int)addrlen < 0) {
4061 return -TARGET_EINVAL;
4064 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
4065 return -TARGET_EFAULT;
4067 addr = alloca(addrlen);
4069 ret = get_errno(getsockname(fd, addr, &addrlen));
4070 if (!is_error(ret)) {
4071 host_to_target_sockaddr(target_addr, addr, addrlen);
4072 if (put_user_u32(addrlen, target_addrlen_addr))
4073 ret = -TARGET_EFAULT;
4078 /* do_socketpair() Must return target values and target errnos. */
4079 static abi_long do_socketpair(int domain, int type, int protocol,
4080 abi_ulong target_tab_addr)
4085 target_to_host_sock_type(&type);
4087 ret = get_errno(socketpair(domain, type, protocol, tab));
4088 if (!is_error(ret)) {
4089 if (put_user_s32(tab[0], target_tab_addr)
4090 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
4091 ret = -TARGET_EFAULT;
4096 /* do_sendto() Must return target values and target errnos. */
4097 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
4098 abi_ulong target_addr, socklen_t addrlen)
4102 void *copy_msg = NULL;
4105 if ((int)addrlen < 0) {
4106 return -TARGET_EINVAL;
4109 host_msg = lock_user(VERIFY_READ, msg, len, 1);
4111 return -TARGET_EFAULT;
4112 if (fd_trans_target_to_host_data(fd)) {
4113 copy_msg = host_msg;
4114 host_msg = g_malloc(len);
4115 memcpy(host_msg, copy_msg, len);
4116 ret = fd_trans_target_to_host_data(fd)(host_msg, len);
4122 addr = alloca(addrlen+1);
4123 ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
4127 ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
4129 ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
4134 host_msg = copy_msg;
4136 unlock_user(host_msg, msg, 0);
4140 /* do_recvfrom() Must return target values and target errnos. */
4141 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
4142 abi_ulong target_addr,
4143 abi_ulong target_addrlen)
4150 host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
4152 return -TARGET_EFAULT;
4154 if (get_user_u32(addrlen, target_addrlen)) {
4155 ret = -TARGET_EFAULT;
4158 if ((int)addrlen < 0) {
4159 ret = -TARGET_EINVAL;
4162 addr = alloca(addrlen);
4163 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
4166 addr = NULL; /* To keep compiler quiet. */
4167 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
4169 if (!is_error(ret)) {
4170 if (fd_trans_host_to_target_data(fd)) {
4171 ret = fd_trans_host_to_target_data(fd)(host_msg, ret);
4174 host_to_target_sockaddr(target_addr, addr, addrlen);
4175 if (put_user_u32(addrlen, target_addrlen)) {
4176 ret = -TARGET_EFAULT;
4180 unlock_user(host_msg, msg, len);
4183 unlock_user(host_msg, msg, 0);
4188 #ifdef TARGET_NR_socketcall
4189 /* do_socketcall() must return target values and target errnos. */
4190 static abi_long do_socketcall(int num, abi_ulong vptr)
4192 static const unsigned nargs[] = { /* number of arguments per operation */
4193 [TARGET_SYS_SOCKET] = 3, /* domain, type, protocol */
4194 [TARGET_SYS_BIND] = 3, /* fd, addr, addrlen */
4195 [TARGET_SYS_CONNECT] = 3, /* fd, addr, addrlen */
4196 [TARGET_SYS_LISTEN] = 2, /* fd, backlog */
4197 [TARGET_SYS_ACCEPT] = 3, /* fd, addr, addrlen */
4198 [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
4199 [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
4200 [TARGET_SYS_SOCKETPAIR] = 4, /* domain, type, protocol, tab */
4201 [TARGET_SYS_SEND] = 4, /* fd, msg, len, flags */
4202 [TARGET_SYS_RECV] = 4, /* fd, msg, len, flags */
4203 [TARGET_SYS_SENDTO] = 6, /* fd, msg, len, flags, addr, addrlen */
4204 [TARGET_SYS_RECVFROM] = 6, /* fd, msg, len, flags, addr, addrlen */
4205 [TARGET_SYS_SHUTDOWN] = 2, /* fd, how */
4206 [TARGET_SYS_SETSOCKOPT] = 5, /* fd, level, optname, optval, optlen */
4207 [TARGET_SYS_GETSOCKOPT] = 5, /* fd, level, optname, optval, optlen */
4208 [TARGET_SYS_SENDMSG] = 3, /* fd, msg, flags */
4209 [TARGET_SYS_RECVMSG] = 3, /* fd, msg, flags */
4210 [TARGET_SYS_ACCEPT4] = 4, /* fd, addr, addrlen, flags */
4211 [TARGET_SYS_RECVMMSG] = 4, /* fd, msgvec, vlen, flags */
4212 [TARGET_SYS_SENDMMSG] = 4, /* fd, msgvec, vlen, flags */
4214 abi_long a[6]; /* max 6 args */
4217 /* check the range of the first argument num */
4218 /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
4219 if (num < 1 || num > TARGET_SYS_SENDMMSG) {
4220 return -TARGET_EINVAL;
4222 /* ensure we have space for args */
4223 if (nargs[num] > ARRAY_SIZE(a)) {
4224 return -TARGET_EINVAL;
4226 /* collect the arguments in a[] according to nargs[] */
4227 for (i = 0; i < nargs[num]; ++i) {
4228 if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
4229 return -TARGET_EFAULT;
4232 /* now when we have the args, invoke the appropriate underlying function */
4234 case TARGET_SYS_SOCKET: /* domain, type, protocol */
4235 return do_socket(a[0], a[1], a[2]);
4236 case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
4237 return do_bind(a[0], a[1], a[2]);
4238 case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
4239 return do_connect(a[0], a[1], a[2]);
4240 case TARGET_SYS_LISTEN: /* sockfd, backlog */
4241 return get_errno(listen(a[0], a[1]));
4242 case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
4243 return do_accept4(a[0], a[1], a[2], 0);
4244 case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
4245 return do_getsockname(a[0], a[1], a[2]);
4246 case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
4247 return do_getpeername(a[0], a[1], a[2]);
4248 case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
4249 return do_socketpair(a[0], a[1], a[2], a[3]);
4250 case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
4251 return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
4252 case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
4253 return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
4254 case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
4255 return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
4256 case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
4257 return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
4258 case TARGET_SYS_SHUTDOWN: /* sockfd, how */
4259 return get_errno(shutdown(a[0], a[1]));
4260 case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
4261 return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
4262 case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
4263 return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
4264 case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
4265 return do_sendrecvmsg(a[0], a[1], a[2], 1);
4266 case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
4267 return do_sendrecvmsg(a[0], a[1], a[2], 0);
4268 case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
4269 return do_accept4(a[0], a[1], a[2], a[3]);
4270 case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
4271 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
4272 case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
4273 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
4275 gemu_log("Unsupported socketcall: %d\n", num);
4276 return -TARGET_EINVAL;
4281 #define N_SHM_REGIONS 32
4283 static struct shm_region {
4287 } shm_regions[N_SHM_REGIONS];
4289 #ifndef TARGET_SEMID64_DS
4290 /* asm-generic version of this struct */
4291 struct target_semid64_ds
4293 struct target_ipc_perm sem_perm;
4294 abi_ulong sem_otime;
4295 #if TARGET_ABI_BITS == 32
4296 abi_ulong __unused1;
4298 abi_ulong sem_ctime;
4299 #if TARGET_ABI_BITS == 32
4300 abi_ulong __unused2;
4302 abi_ulong sem_nsems;
4303 abi_ulong __unused3;
4304 abi_ulong __unused4;
4308 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
4309 abi_ulong target_addr)
4311 struct target_ipc_perm *target_ip;
4312 struct target_semid64_ds *target_sd;
4314 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4315 return -TARGET_EFAULT;
4316 target_ip = &(target_sd->sem_perm);
4317 host_ip->__key = tswap32(target_ip->__key);
4318 host_ip->uid = tswap32(target_ip->uid);
4319 host_ip->gid = tswap32(target_ip->gid);
4320 host_ip->cuid = tswap32(target_ip->cuid);
4321 host_ip->cgid = tswap32(target_ip->cgid);
4322 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
4323 host_ip->mode = tswap32(target_ip->mode);
4325 host_ip->mode = tswap16(target_ip->mode);
4327 #if defined(TARGET_PPC)
4328 host_ip->__seq = tswap32(target_ip->__seq);
4330 host_ip->__seq = tswap16(target_ip->__seq);
4332 unlock_user_struct(target_sd, target_addr, 0);
4336 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
4337 struct ipc_perm *host_ip)
4339 struct target_ipc_perm *target_ip;
4340 struct target_semid64_ds *target_sd;
4342 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4343 return -TARGET_EFAULT;
4344 target_ip = &(target_sd->sem_perm);
4345 target_ip->__key = tswap32(host_ip->__key);
4346 target_ip->uid = tswap32(host_ip->uid);
4347 target_ip->gid = tswap32(host_ip->gid);
4348 target_ip->cuid = tswap32(host_ip->cuid);
4349 target_ip->cgid = tswap32(host_ip->cgid);
4350 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
4351 target_ip->mode = tswap32(host_ip->mode);
4353 target_ip->mode = tswap16(host_ip->mode);
4355 #if defined(TARGET_PPC)
4356 target_ip->__seq = tswap32(host_ip->__seq);
4358 target_ip->__seq = tswap16(host_ip->__seq);
4360 unlock_user_struct(target_sd, target_addr, 1);
4364 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
4365 abi_ulong target_addr)
4367 struct target_semid64_ds *target_sd;
4369 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4370 return -TARGET_EFAULT;
4371 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
4372 return -TARGET_EFAULT;
4373 host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
4374 host_sd->sem_otime = tswapal(target_sd->sem_otime);
4375 host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
4376 unlock_user_struct(target_sd, target_addr, 0);
4380 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
4381 struct semid_ds *host_sd)
4383 struct target_semid64_ds *target_sd;
4385 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4386 return -TARGET_EFAULT;
4387 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
4388 return -TARGET_EFAULT;
4389 target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
4390 target_sd->sem_otime = tswapal(host_sd->sem_otime);
4391 target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
4392 unlock_user_struct(target_sd, target_addr, 1);
4396 struct target_seminfo {
4409 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
4410 struct seminfo *host_seminfo)
4412 struct target_seminfo *target_seminfo;
4413 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
4414 return -TARGET_EFAULT;
4415 __put_user(host_seminfo->semmap, &target_seminfo->semmap);
4416 __put_user(host_seminfo->semmni, &target_seminfo->semmni);
4417 __put_user(host_seminfo->semmns, &target_seminfo->semmns);
4418 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
4419 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
4420 __put_user(host_seminfo->semopm, &target_seminfo->semopm);
4421 __put_user(host_seminfo->semume, &target_seminfo->semume);
4422 __put_user(host_seminfo->semusz, &target_seminfo->semusz);
4423 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
4424 __put_user(host_seminfo->semaem, &target_seminfo->semaem);
4425 unlock_user_struct(target_seminfo, target_addr, 1);
4431 struct semid_ds *buf;
4432 unsigned short *array;
4433 struct seminfo *__buf;
4436 union target_semun {
4443 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
4444 abi_ulong target_addr)
4447 unsigned short *array;
4449 struct semid_ds semid_ds;
4452 semun.buf = &semid_ds;
4454 ret = semctl(semid, 0, IPC_STAT, semun);
4456 return get_errno(ret);
4458 nsems = semid_ds.sem_nsems;
4460 *host_array = g_try_new(unsigned short, nsems);
4462 return -TARGET_ENOMEM;
4464 array = lock_user(VERIFY_READ, target_addr,
4465 nsems*sizeof(unsigned short), 1);
4467 g_free(*host_array);
4468 return -TARGET_EFAULT;
4471 for(i=0; i<nsems; i++) {
4472 __get_user((*host_array)[i], &array[i]);
4474 unlock_user(array, target_addr, 0);
4479 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
4480 unsigned short **host_array)
4483 unsigned short *array;
4485 struct semid_ds semid_ds;
4488 semun.buf = &semid_ds;
4490 ret = semctl(semid, 0, IPC_STAT, semun);
4492 return get_errno(ret);
4494 nsems = semid_ds.sem_nsems;
4496 array = lock_user(VERIFY_WRITE, target_addr,
4497 nsems*sizeof(unsigned short), 0);
4499 return -TARGET_EFAULT;
4501 for(i=0; i<nsems; i++) {
4502 __put_user((*host_array)[i], &array[i]);
4504 g_free(*host_array);
4505 unlock_user(array, target_addr, 1);
4510 static inline abi_long do_semctl(int semid, int semnum, int cmd,
4511 abi_ulong target_arg)
4513 union target_semun target_su = { .buf = target_arg };
4515 struct semid_ds dsarg;
4516 unsigned short *array = NULL;
4517 struct seminfo seminfo;
4518 abi_long ret = -TARGET_EINVAL;
4525 /* In 64 bit cross-endian situations, we will erroneously pick up
4526 * the wrong half of the union for the "val" element. To rectify
4527 * this, the entire 8-byte structure is byteswapped, followed by
4528 * a swap of the 4 byte val field. In other cases, the data is
4529 * already in proper host byte order. */
4530 if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
4531 target_su.buf = tswapal(target_su.buf);
4532 arg.val = tswap32(target_su.val);
4534 arg.val = target_su.val;
4536 ret = get_errno(semctl(semid, semnum, cmd, arg));
4540 err = target_to_host_semarray(semid, &array, target_su.array);
4544 ret = get_errno(semctl(semid, semnum, cmd, arg));
4545 err = host_to_target_semarray(semid, target_su.array, &array);
4552 err = target_to_host_semid_ds(&dsarg, target_su.buf);
4556 ret = get_errno(semctl(semid, semnum, cmd, arg));
4557 err = host_to_target_semid_ds(target_su.buf, &dsarg);
4563 arg.__buf = &seminfo;
4564 ret = get_errno(semctl(semid, semnum, cmd, arg));
4565 err = host_to_target_seminfo(target_su.__buf, &seminfo);
4573 ret = get_errno(semctl(semid, semnum, cmd, NULL));
4580 struct target_sembuf {
4581 unsigned short sem_num;
4586 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
4587 abi_ulong target_addr,
4590 struct target_sembuf *target_sembuf;
4593 target_sembuf = lock_user(VERIFY_READ, target_addr,
4594 nsops*sizeof(struct target_sembuf), 1);
4596 return -TARGET_EFAULT;
4598 for(i=0; i<nsops; i++) {
4599 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
4600 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
4601 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
4604 unlock_user(target_sembuf, target_addr, 0);
4609 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
4611 struct sembuf sops[nsops];
4613 if (target_to_host_sembuf(sops, ptr, nsops))
4614 return -TARGET_EFAULT;
4616 return get_errno(safe_semtimedop(semid, sops, nsops, NULL));
4619 struct target_msqid_ds
4621 struct target_ipc_perm msg_perm;
4622 abi_ulong msg_stime;
4623 #if TARGET_ABI_BITS == 32
4624 abi_ulong __unused1;
4626 abi_ulong msg_rtime;
4627 #if TARGET_ABI_BITS == 32
4628 abi_ulong __unused2;
4630 abi_ulong msg_ctime;
4631 #if TARGET_ABI_BITS == 32
4632 abi_ulong __unused3;
4634 abi_ulong __msg_cbytes;
4636 abi_ulong msg_qbytes;
4637 abi_ulong msg_lspid;
4638 abi_ulong msg_lrpid;
4639 abi_ulong __unused4;
4640 abi_ulong __unused5;
4643 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
4644 abi_ulong target_addr)
4646 struct target_msqid_ds *target_md;
4648 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
4649 return -TARGET_EFAULT;
4650 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
4651 return -TARGET_EFAULT;
4652 host_md->msg_stime = tswapal(target_md->msg_stime);
4653 host_md->msg_rtime = tswapal(target_md->msg_rtime);
4654 host_md->msg_ctime = tswapal(target_md->msg_ctime);
4655 host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
4656 host_md->msg_qnum = tswapal(target_md->msg_qnum);
4657 host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
4658 host_md->msg_lspid = tswapal(target_md->msg_lspid);
4659 host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
4660 unlock_user_struct(target_md, target_addr, 0);
4664 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
4665 struct msqid_ds *host_md)
4667 struct target_msqid_ds *target_md;
4669 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
4670 return -TARGET_EFAULT;
4671 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
4672 return -TARGET_EFAULT;
4673 target_md->msg_stime = tswapal(host_md->msg_stime);
4674 target_md->msg_rtime = tswapal(host_md->msg_rtime);
4675 target_md->msg_ctime = tswapal(host_md->msg_ctime);
4676 target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
4677 target_md->msg_qnum = tswapal(host_md->msg_qnum);
4678 target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
4679 target_md->msg_lspid = tswapal(host_md->msg_lspid);
4680 target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
4681 unlock_user_struct(target_md, target_addr, 1);
4685 struct target_msginfo {
4693 unsigned short int msgseg;
4696 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
4697 struct msginfo *host_msginfo)
4699 struct target_msginfo *target_msginfo;
4700 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
4701 return -TARGET_EFAULT;
4702 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
4703 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
4704 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
4705 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
4706 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
4707 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
4708 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4709 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4710 unlock_user_struct(target_msginfo, target_addr, 1);
4714 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4716 struct msqid_ds dsarg;
4717 struct msginfo msginfo;
4718 abi_long ret = -TARGET_EINVAL;
4726 if (target_to_host_msqid_ds(&dsarg,ptr))
4727 return -TARGET_EFAULT;
4728 ret = get_errno(msgctl(msgid, cmd, &dsarg));
4729 if (host_to_target_msqid_ds(ptr,&dsarg))
4730 return -TARGET_EFAULT;
4733 ret = get_errno(msgctl(msgid, cmd, NULL));
4737 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4738 if (host_to_target_msginfo(ptr, &msginfo))
4739 return -TARGET_EFAULT;
4746 struct target_msgbuf {
4751 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4752 ssize_t msgsz, int msgflg)
4754 struct target_msgbuf *target_mb;
4755 struct msgbuf *host_mb;
4759 return -TARGET_EINVAL;
4762 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4763 return -TARGET_EFAULT;
4764 host_mb = g_try_malloc(msgsz + sizeof(long));
4766 unlock_user_struct(target_mb, msgp, 0);
4767 return -TARGET_ENOMEM;
4769 host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4770 memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4771 ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4773 unlock_user_struct(target_mb, msgp, 0);
4778 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4779 ssize_t msgsz, abi_long msgtyp,
4782 struct target_msgbuf *target_mb;
4784 struct msgbuf *host_mb;
4788 return -TARGET_EINVAL;
4791 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4792 return -TARGET_EFAULT;
4794 host_mb = g_try_malloc(msgsz + sizeof(long));
4796 ret = -TARGET_ENOMEM;
4799 ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4802 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4803 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4804 if (!target_mtext) {
4805 ret = -TARGET_EFAULT;
4808 memcpy(target_mb->mtext, host_mb->mtext, ret);
4809 unlock_user(target_mtext, target_mtext_addr, ret);
4812 target_mb->mtype = tswapal(host_mb->mtype);
4816 unlock_user_struct(target_mb, msgp, 1);
4821 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4822 abi_ulong target_addr)
4824 struct target_shmid_ds *target_sd;
4826 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4827 return -TARGET_EFAULT;
4828 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4829 return -TARGET_EFAULT;
4830 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4831 __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4832 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4833 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4834 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4835 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4836 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4837 unlock_user_struct(target_sd, target_addr, 0);
4841 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4842 struct shmid_ds *host_sd)
4844 struct target_shmid_ds *target_sd;
4846 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4847 return -TARGET_EFAULT;
4848 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4849 return -TARGET_EFAULT;
4850 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4851 __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4852 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4853 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4854 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4855 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4856 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4857 unlock_user_struct(target_sd, target_addr, 1);
4861 struct target_shminfo {
4869 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4870 struct shminfo *host_shminfo)
4872 struct target_shminfo *target_shminfo;
4873 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4874 return -TARGET_EFAULT;
4875 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4876 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4877 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4878 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4879 __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4880 unlock_user_struct(target_shminfo, target_addr, 1);
4884 struct target_shm_info {
4889 abi_ulong swap_attempts;
4890 abi_ulong swap_successes;
4893 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4894 struct shm_info *host_shm_info)
4896 struct target_shm_info *target_shm_info;
4897 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4898 return -TARGET_EFAULT;
4899 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4900 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4901 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4902 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4903 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4904 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4905 unlock_user_struct(target_shm_info, target_addr, 1);
4909 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4911 struct shmid_ds dsarg;
4912 struct shminfo shminfo;
4913 struct shm_info shm_info;
4914 abi_long ret = -TARGET_EINVAL;
4922 if (target_to_host_shmid_ds(&dsarg, buf))
4923 return -TARGET_EFAULT;
4924 ret = get_errno(shmctl(shmid, cmd, &dsarg));
4925 if (host_to_target_shmid_ds(buf, &dsarg))
4926 return -TARGET_EFAULT;
4929 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4930 if (host_to_target_shminfo(buf, &shminfo))
4931 return -TARGET_EFAULT;
4934 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4935 if (host_to_target_shm_info(buf, &shm_info))
4936 return -TARGET_EFAULT;
4941 ret = get_errno(shmctl(shmid, cmd, NULL));
4948 #ifndef TARGET_FORCE_SHMLBA
4949 /* For most architectures, SHMLBA is the same as the page size;
4950 * some architectures have larger values, in which case they should
4951 * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4952 * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4953 * and defining its own value for SHMLBA.
4955 * The kernel also permits SHMLBA to be set by the architecture to a
4956 * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4957 * this means that addresses are rounded to the large size if
4958 * SHM_RND is set but addresses not aligned to that size are not rejected
4959 * as long as they are at least page-aligned. Since the only architecture
4960 * which uses this is ia64 this code doesn't provide for that oddity.
4962 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
4964 return TARGET_PAGE_SIZE;
4968 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
4969 int shmid, abi_ulong shmaddr, int shmflg)
4973 struct shmid_ds shm_info;
4977 /* find out the length of the shared memory segment */
4978 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4979 if (is_error(ret)) {
4980 /* can't get length, bail out */
4984 shmlba = target_shmlba(cpu_env);
4986 if (shmaddr & (shmlba - 1)) {
4987 if (shmflg & SHM_RND) {
4988 shmaddr &= ~(shmlba - 1);
4990 return -TARGET_EINVAL;
4993 if (!guest_range_valid(shmaddr, shm_info.shm_segsz)) {
4994 return -TARGET_EINVAL;
5000 host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
5002 abi_ulong mmap_start;
5004 mmap_start = mmap_find_vma(0, shm_info.shm_segsz);
5006 if (mmap_start == -1) {
5008 host_raddr = (void *)-1;
5010 host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
5013 if (host_raddr == (void *)-1) {
5015 return get_errno((long)host_raddr);
5017 raddr=h2g((unsigned long)host_raddr);
5019 page_set_flags(raddr, raddr + shm_info.shm_segsz,
5020 PAGE_VALID | PAGE_READ |
5021 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
5023 for (i = 0; i < N_SHM_REGIONS; i++) {
5024 if (!shm_regions[i].in_use) {
5025 shm_regions[i].in_use = true;
5026 shm_regions[i].start = raddr;
5027 shm_regions[i].size = shm_info.shm_segsz;
5037 static inline abi_long do_shmdt(abi_ulong shmaddr)
5044 for (i = 0; i < N_SHM_REGIONS; ++i) {
5045 if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
5046 shm_regions[i].in_use = false;
5047 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
5051 rv = get_errno(shmdt(g2h(shmaddr)));
5058 #ifdef TARGET_NR_ipc
5059 /* ??? This only works with linear mappings. */
5060 /* do_ipc() must return target values and target errnos. */
5061 static abi_long do_ipc(CPUArchState *cpu_env,
5062 unsigned int call, abi_long first,
5063 abi_long second, abi_long third,
5064 abi_long ptr, abi_long fifth)
5069 version = call >> 16;
5074 ret = do_semop(first, ptr, second);
5078 ret = get_errno(semget(first, second, third));
5081 case IPCOP_semctl: {
5082 /* The semun argument to semctl is passed by value, so dereference the
5085 get_user_ual(atptr, ptr);
5086 ret = do_semctl(first, second, third, atptr);
5091 ret = get_errno(msgget(first, second));
5095 ret = do_msgsnd(first, ptr, second, third);
5099 ret = do_msgctl(first, second, ptr);
5106 struct target_ipc_kludge {
5111 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
5112 ret = -TARGET_EFAULT;
5116 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
5118 unlock_user_struct(tmp, ptr, 0);
5122 ret = do_msgrcv(first, ptr, second, fifth, third);
5131 raddr = do_shmat(cpu_env, first, ptr, second);
5132 if (is_error(raddr))
5133 return get_errno(raddr);
5134 if (put_user_ual(raddr, third))
5135 return -TARGET_EFAULT;
5139 ret = -TARGET_EINVAL;
5144 ret = do_shmdt(ptr);
5148 /* IPC_* flag values are the same on all linux platforms */
5149 ret = get_errno(shmget(first, second, third));
5152 /* IPC_* and SHM_* command values are the same on all linux platforms */
5154 ret = do_shmctl(first, second, ptr);
5157 gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
5158 ret = -TARGET_ENOSYS;
5165 /* kernel structure types definitions */
5167 #define STRUCT(name, ...) STRUCT_ ## name,
5168 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
5170 #include "syscall_types.h"
5174 #undef STRUCT_SPECIAL
5176 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
5177 #define STRUCT_SPECIAL(name)
5178 #include "syscall_types.h"
5180 #undef STRUCT_SPECIAL
5182 typedef struct IOCTLEntry IOCTLEntry;
5184 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
5185 int fd, int cmd, abi_long arg);
5189 unsigned int host_cmd;
5192 do_ioctl_fn *do_ioctl;
5193 const argtype arg_type[5];
5196 #define IOC_R 0x0001
5197 #define IOC_W 0x0002
5198 #define IOC_RW (IOC_R | IOC_W)
5200 #define MAX_STRUCT_SIZE 4096
5202 #ifdef CONFIG_FIEMAP
5203 /* So fiemap access checks don't overflow on 32 bit systems.
5204 * This is very slightly smaller than the limit imposed by
5205 * the underlying kernel.
5207 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
5208 / sizeof(struct fiemap_extent))
5210 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
5211 int fd, int cmd, abi_long arg)
5213 /* The parameter for this ioctl is a struct fiemap followed
5214 * by an array of struct fiemap_extent whose size is set
5215 * in fiemap->fm_extent_count. The array is filled in by the
5218 int target_size_in, target_size_out;
5220 const argtype *arg_type = ie->arg_type;
5221 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
5224 int i, extent_size = thunk_type_size(extent_arg_type, 0);
5228 assert(arg_type[0] == TYPE_PTR);
5229 assert(ie->access == IOC_RW);
5231 target_size_in = thunk_type_size(arg_type, 0);
5232 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
5234 return -TARGET_EFAULT;
5236 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5237 unlock_user(argptr, arg, 0);
5238 fm = (struct fiemap *)buf_temp;
5239 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
5240 return -TARGET_EINVAL;
5243 outbufsz = sizeof (*fm) +
5244 (sizeof(struct fiemap_extent) * fm->fm_extent_count);
5246 if (outbufsz > MAX_STRUCT_SIZE) {
5247 /* We can't fit all the extents into the fixed size buffer.
5248 * Allocate one that is large enough and use it instead.
5250 fm = g_try_malloc(outbufsz);
5252 return -TARGET_ENOMEM;
5254 memcpy(fm, buf_temp, sizeof(struct fiemap));
5257 ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
5258 if (!is_error(ret)) {
5259 target_size_out = target_size_in;
5260 /* An extent_count of 0 means we were only counting the extents
5261 * so there are no structs to copy
5263 if (fm->fm_extent_count != 0) {
5264 target_size_out += fm->fm_mapped_extents * extent_size;
5266 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
5268 ret = -TARGET_EFAULT;
5270 /* Convert the struct fiemap */
5271 thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
5272 if (fm->fm_extent_count != 0) {
5273 p = argptr + target_size_in;
5274 /* ...and then all the struct fiemap_extents */
5275 for (i = 0; i < fm->fm_mapped_extents; i++) {
5276 thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
5281 unlock_user(argptr, arg, target_size_out);
5291 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
5292 int fd, int cmd, abi_long arg)
5294 const argtype *arg_type = ie->arg_type;
5298 struct ifconf *host_ifconf;
5300 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
5301 int target_ifreq_size;
5306 abi_long target_ifc_buf;
5310 assert(arg_type[0] == TYPE_PTR);
5311 assert(ie->access == IOC_RW);
5314 target_size = thunk_type_size(arg_type, 0);
5316 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5318 return -TARGET_EFAULT;
5319 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5320 unlock_user(argptr, arg, 0);
5322 host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
5323 target_ifc_len = host_ifconf->ifc_len;
5324 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
5326 target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
5327 nb_ifreq = target_ifc_len / target_ifreq_size;
5328 host_ifc_len = nb_ifreq * sizeof(struct ifreq);
5330 outbufsz = sizeof(*host_ifconf) + host_ifc_len;
5331 if (outbufsz > MAX_STRUCT_SIZE) {
5332 /* We can't fit all the extents into the fixed size buffer.
5333 * Allocate one that is large enough and use it instead.
5335 host_ifconf = malloc(outbufsz);
5337 return -TARGET_ENOMEM;
5339 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
5342 host_ifc_buf = (char*)host_ifconf + sizeof(*host_ifconf);
5344 host_ifconf->ifc_len = host_ifc_len;
5345 host_ifconf->ifc_buf = host_ifc_buf;
5347 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
5348 if (!is_error(ret)) {
5349 /* convert host ifc_len to target ifc_len */
5351 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
5352 target_ifc_len = nb_ifreq * target_ifreq_size;
5353 host_ifconf->ifc_len = target_ifc_len;
5355 /* restore target ifc_buf */
5357 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
5359 /* copy struct ifconf to target user */
5361 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5363 return -TARGET_EFAULT;
5364 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
5365 unlock_user(argptr, arg, target_size);
5367 /* copy ifreq[] to target user */
5369 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
5370 for (i = 0; i < nb_ifreq ; i++) {
5371 thunk_convert(argptr + i * target_ifreq_size,
5372 host_ifc_buf + i * sizeof(struct ifreq),
5373 ifreq_arg_type, THUNK_TARGET);
5375 unlock_user(argptr, target_ifc_buf, target_ifc_len);
5385 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5386 int cmd, abi_long arg)
5389 struct dm_ioctl *host_dm;
5390 abi_long guest_data;
5391 uint32_t guest_data_size;
5393 const argtype *arg_type = ie->arg_type;
5395 void *big_buf = NULL;
5399 target_size = thunk_type_size(arg_type, 0);
5400 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5402 ret = -TARGET_EFAULT;
5405 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5406 unlock_user(argptr, arg, 0);
5408 /* buf_temp is too small, so fetch things into a bigger buffer */
5409 big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
5410 memcpy(big_buf, buf_temp, target_size);
5414 guest_data = arg + host_dm->data_start;
5415 if ((guest_data - arg) < 0) {
5416 ret = -TARGET_EINVAL;
5419 guest_data_size = host_dm->data_size - host_dm->data_start;
5420 host_data = (char*)host_dm + host_dm->data_start;
5422 argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
5424 ret = -TARGET_EFAULT;
5428 switch (ie->host_cmd) {
5430 case DM_LIST_DEVICES:
5433 case DM_DEV_SUSPEND:
5436 case DM_TABLE_STATUS:
5437 case DM_TABLE_CLEAR:
5439 case DM_LIST_VERSIONS:
5443 case DM_DEV_SET_GEOMETRY:
5444 /* data contains only strings */
5445 memcpy(host_data, argptr, guest_data_size);
5448 memcpy(host_data, argptr, guest_data_size);
5449 *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
5453 void *gspec = argptr;
5454 void *cur_data = host_data;
5455 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5456 int spec_size = thunk_type_size(arg_type, 0);
5459 for (i = 0; i < host_dm->target_count; i++) {
5460 struct dm_target_spec *spec = cur_data;
5464 thunk_convert(spec, gspec, arg_type, THUNK_HOST);
5465 slen = strlen((char*)gspec + spec_size) + 1;
5467 spec->next = sizeof(*spec) + slen;
5468 strcpy((char*)&spec[1], gspec + spec_size);
5470 cur_data += spec->next;
5475 ret = -TARGET_EINVAL;
5476 unlock_user(argptr, guest_data, 0);
5479 unlock_user(argptr, guest_data, 0);
5481 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5482 if (!is_error(ret)) {
5483 guest_data = arg + host_dm->data_start;
5484 guest_data_size = host_dm->data_size - host_dm->data_start;
5485 argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
5486 switch (ie->host_cmd) {
5491 case DM_DEV_SUSPEND:
5494 case DM_TABLE_CLEAR:
5496 case DM_DEV_SET_GEOMETRY:
5497 /* no return data */
5499 case DM_LIST_DEVICES:
5501 struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
5502 uint32_t remaining_data = guest_data_size;
5503 void *cur_data = argptr;
5504 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
5505 int nl_size = 12; /* can't use thunk_size due to alignment */
5508 uint32_t next = nl->next;
5510 nl->next = nl_size + (strlen(nl->name) + 1);
5512 if (remaining_data < nl->next) {
5513 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5516 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
5517 strcpy(cur_data + nl_size, nl->name);
5518 cur_data += nl->next;
5519 remaining_data -= nl->next;
5523 nl = (void*)nl + next;
5528 case DM_TABLE_STATUS:
5530 struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5531 void *cur_data = argptr;
5532 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5533 int spec_size = thunk_type_size(arg_type, 0);
5536 for (i = 0; i < host_dm->target_count; i++) {
5537 uint32_t next = spec->next;
5538 int slen = strlen((char*)&spec[1]) + 1;
5539 spec->next = (cur_data - argptr) + spec_size + slen;
5540 if (guest_data_size < spec->next) {
5541 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5544 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
5545 strcpy(cur_data + spec_size, (char*)&spec[1]);
5546 cur_data = argptr + spec->next;
5547 spec = (void*)host_dm + host_dm->data_start + next;
5553 void *hdata = (void*)host_dm + host_dm->data_start;
5554 int count = *(uint32_t*)hdata;
5555 uint64_t *hdev = hdata + 8;
5556 uint64_t *gdev = argptr + 8;
5559 *(uint32_t*)argptr = tswap32(count);
5560 for (i = 0; i < count; i++) {
5561 *gdev = tswap64(*hdev);
5567 case DM_LIST_VERSIONS:
5569 struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5570 uint32_t remaining_data = guest_data_size;
5571 void *cur_data = argptr;
5572 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5573 int vers_size = thunk_type_size(arg_type, 0);
5576 uint32_t next = vers->next;
5578 vers->next = vers_size + (strlen(vers->name) + 1);
5580 if (remaining_data < vers->next) {
5581 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5584 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
5585 strcpy(cur_data + vers_size, vers->name);
5586 cur_data += vers->next;
5587 remaining_data -= vers->next;
5591 vers = (void*)vers + next;
5596 unlock_user(argptr, guest_data, 0);
5597 ret = -TARGET_EINVAL;
5600 unlock_user(argptr, guest_data, guest_data_size);
5602 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5604 ret = -TARGET_EFAULT;
5607 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5608 unlock_user(argptr, arg, target_size);
5615 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5616 int cmd, abi_long arg)
5620 const argtype *arg_type = ie->arg_type;
5621 const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5624 struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5625 struct blkpg_partition host_part;
5627 /* Read and convert blkpg */
5629 target_size = thunk_type_size(arg_type, 0);
5630 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5632 ret = -TARGET_EFAULT;
5635 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5636 unlock_user(argptr, arg, 0);
5638 switch (host_blkpg->op) {
5639 case BLKPG_ADD_PARTITION:
5640 case BLKPG_DEL_PARTITION:
5641 /* payload is struct blkpg_partition */
5644 /* Unknown opcode */
5645 ret = -TARGET_EINVAL;
5649 /* Read and convert blkpg->data */
5650 arg = (abi_long)(uintptr_t)host_blkpg->data;
5651 target_size = thunk_type_size(part_arg_type, 0);
5652 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5654 ret = -TARGET_EFAULT;
5657 thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5658 unlock_user(argptr, arg, 0);
5660 /* Swizzle the data pointer to our local copy and call! */
5661 host_blkpg->data = &host_part;
5662 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5668 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5669 int fd, int cmd, abi_long arg)
5671 const argtype *arg_type = ie->arg_type;
5672 const StructEntry *se;
5673 const argtype *field_types;
5674 const int *dst_offsets, *src_offsets;
5677 abi_ulong *target_rt_dev_ptr;
5678 unsigned long *host_rt_dev_ptr;
5682 assert(ie->access == IOC_W);
5683 assert(*arg_type == TYPE_PTR);
5685 assert(*arg_type == TYPE_STRUCT);
5686 target_size = thunk_type_size(arg_type, 0);
5687 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5689 return -TARGET_EFAULT;
5692 assert(*arg_type == (int)STRUCT_rtentry);
5693 se = struct_entries + *arg_type++;
5694 assert(se->convert[0] == NULL);
5695 /* convert struct here to be able to catch rt_dev string */
5696 field_types = se->field_types;
5697 dst_offsets = se->field_offsets[THUNK_HOST];
5698 src_offsets = se->field_offsets[THUNK_TARGET];
5699 for (i = 0; i < se->nb_fields; i++) {
5700 if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5701 assert(*field_types == TYPE_PTRVOID);
5702 target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
5703 host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5704 if (*target_rt_dev_ptr != 0) {
5705 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5706 tswapal(*target_rt_dev_ptr));
5707 if (!*host_rt_dev_ptr) {
5708 unlock_user(argptr, arg, 0);
5709 return -TARGET_EFAULT;
5712 *host_rt_dev_ptr = 0;
5717 field_types = thunk_convert(buf_temp + dst_offsets[i],
5718 argptr + src_offsets[i],
5719 field_types, THUNK_HOST);
5721 unlock_user(argptr, arg, 0);
5723 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5724 if (*host_rt_dev_ptr != 0) {
5725 unlock_user((void *)*host_rt_dev_ptr,
5726 *target_rt_dev_ptr, 0);
5731 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5732 int fd, int cmd, abi_long arg)
5734 int sig = target_to_host_signal(arg);
5735 return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5739 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5740 int fd, int cmd, abi_long arg)
5742 int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5743 return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5747 static IOCTLEntry ioctl_entries[] = {
5748 #define IOCTL(cmd, access, ...) \
5749 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
5750 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
5751 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
5752 #define IOCTL_IGNORE(cmd) \
5753 { TARGET_ ## cmd, 0, #cmd },
5758 /* ??? Implement proper locking for ioctls. */
5759 /* do_ioctl() Must return target values and target errnos. */
5760 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5762 const IOCTLEntry *ie;
5763 const argtype *arg_type;
5765 uint8_t buf_temp[MAX_STRUCT_SIZE];
5771 if (ie->target_cmd == 0) {
5772 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5773 return -TARGET_ENOSYS;
5775 if (ie->target_cmd == cmd)
5779 arg_type = ie->arg_type;
5781 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd, ie->name);
5784 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5785 } else if (!ie->host_cmd) {
5786 /* Some architectures define BSD ioctls in their headers
5787 that are not implemented in Linux. */
5788 return -TARGET_ENOSYS;
5791 switch(arg_type[0]) {
5794 ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5798 ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5802 target_size = thunk_type_size(arg_type, 0);
5803 switch(ie->access) {
5805 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5806 if (!is_error(ret)) {
5807 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5809 return -TARGET_EFAULT;
5810 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5811 unlock_user(argptr, arg, target_size);
5815 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5817 return -TARGET_EFAULT;
5818 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5819 unlock_user(argptr, arg, 0);
5820 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5824 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5826 return -TARGET_EFAULT;
5827 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5828 unlock_user(argptr, arg, 0);
5829 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5830 if (!is_error(ret)) {
5831 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5833 return -TARGET_EFAULT;
5834 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5835 unlock_user(argptr, arg, target_size);
5841 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5842 (long)cmd, arg_type[0]);
5843 ret = -TARGET_ENOSYS;
5849 static const bitmask_transtbl iflag_tbl[] = {
5850 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5851 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5852 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5853 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5854 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5855 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5856 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5857 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5858 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5859 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5860 { TARGET_IXON, TARGET_IXON, IXON, IXON },
5861 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5862 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5863 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5867 static const bitmask_transtbl oflag_tbl[] = {
5868 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5869 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5870 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5871 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5872 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5873 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5874 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5875 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5876 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5877 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5878 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5879 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5880 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5881 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5882 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5883 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5884 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5885 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5886 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5887 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5888 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5889 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5890 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5891 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5895 static const bitmask_transtbl cflag_tbl[] = {
5896 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5897 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5898 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5899 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5900 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5901 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5902 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5903 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5904 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5905 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5906 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5907 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5908 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5909 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5910 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5911 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5912 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5913 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5914 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5915 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5916 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5917 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5918 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5919 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5920 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5921 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5922 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5923 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5924 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5925 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5926 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5930 static const bitmask_transtbl lflag_tbl[] = {
5931 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5932 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5933 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5934 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5935 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5936 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5937 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5938 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5939 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5940 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5941 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5942 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5943 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5944 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5945 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5949 static void target_to_host_termios (void *dst, const void *src)
5951 struct host_termios *host = dst;
5952 const struct target_termios *target = src;
5955 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5957 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5959 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5961 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5962 host->c_line = target->c_line;
5964 memset(host->c_cc, 0, sizeof(host->c_cc));
5965 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5966 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5967 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5968 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5969 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5970 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5971 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5972 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5973 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5974 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5975 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5976 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5977 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5978 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5979 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5980 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5981 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5984 static void host_to_target_termios (void *dst, const void *src)
5986 struct target_termios *target = dst;
5987 const struct host_termios *host = src;
5990 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5992 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5994 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5996 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5997 target->c_line = host->c_line;
5999 memset(target->c_cc, 0, sizeof(target->c_cc));
6000 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
6001 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
6002 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
6003 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
6004 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
6005 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
6006 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
6007 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
6008 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
6009 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
6010 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
6011 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
6012 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
6013 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
6014 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
6015 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
6016 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
6019 static const StructEntry struct_termios_def = {
6020 .convert = { host_to_target_termios, target_to_host_termios },
6021 .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
6022 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
6025 static bitmask_transtbl mmap_flags_tbl[] = {
6026 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
6027 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
6028 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
6029 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
6030 MAP_ANONYMOUS, MAP_ANONYMOUS },
6031 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
6032 MAP_GROWSDOWN, MAP_GROWSDOWN },
6033 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
6034 MAP_DENYWRITE, MAP_DENYWRITE },
6035 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
6036 MAP_EXECUTABLE, MAP_EXECUTABLE },
6037 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
6038 { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
6039 MAP_NORESERVE, MAP_NORESERVE },
6040 { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
6041 /* MAP_STACK had been ignored by the kernel for quite some time.
6042 Recognize it for the target insofar as we do not want to pass
6043 it through to the host. */
6044 { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
6048 #if defined(TARGET_I386)
6050 /* NOTE: there is really one LDT for all the threads */
6051 static uint8_t *ldt_table;
6053 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
6060 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
6061 if (size > bytecount)
6063 p = lock_user(VERIFY_WRITE, ptr, size, 0);
6065 return -TARGET_EFAULT;
6066 /* ??? Should this by byteswapped? */
6067 memcpy(p, ldt_table, size);
6068 unlock_user(p, ptr, size);
6072 /* XXX: add locking support */
6073 static abi_long write_ldt(CPUX86State *env,
6074 abi_ulong ptr, unsigned long bytecount, int oldmode)
6076 struct target_modify_ldt_ldt_s ldt_info;
6077 struct target_modify_ldt_ldt_s *target_ldt_info;
6078 int seg_32bit, contents, read_exec_only, limit_in_pages;
6079 int seg_not_present, useable, lm;
6080 uint32_t *lp, entry_1, entry_2;
6082 if (bytecount != sizeof(ldt_info))
6083 return -TARGET_EINVAL;
6084 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
6085 return -TARGET_EFAULT;
6086 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6087 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6088 ldt_info.limit = tswap32(target_ldt_info->limit);
6089 ldt_info.flags = tswap32(target_ldt_info->flags);
6090 unlock_user_struct(target_ldt_info, ptr, 0);
6092 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
6093 return -TARGET_EINVAL;
6094 seg_32bit = ldt_info.flags & 1;
6095 contents = (ldt_info.flags >> 1) & 3;
6096 read_exec_only = (ldt_info.flags >> 3) & 1;
6097 limit_in_pages = (ldt_info.flags >> 4) & 1;
6098 seg_not_present = (ldt_info.flags >> 5) & 1;
6099 useable = (ldt_info.flags >> 6) & 1;
6103 lm = (ldt_info.flags >> 7) & 1;
6105 if (contents == 3) {
6107 return -TARGET_EINVAL;
6108 if (seg_not_present == 0)
6109 return -TARGET_EINVAL;
6111 /* allocate the LDT */
6113 env->ldt.base = target_mmap(0,
6114 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
6115 PROT_READ|PROT_WRITE,
6116 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
6117 if (env->ldt.base == -1)
6118 return -TARGET_ENOMEM;
6119 memset(g2h(env->ldt.base), 0,
6120 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
6121 env->ldt.limit = 0xffff;
6122 ldt_table = g2h(env->ldt.base);
6125 /* NOTE: same code as Linux kernel */
6126 /* Allow LDTs to be cleared by the user. */
6127 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6130 read_exec_only == 1 &&
6132 limit_in_pages == 0 &&
6133 seg_not_present == 1 &&
6141 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6142 (ldt_info.limit & 0x0ffff);
6143 entry_2 = (ldt_info.base_addr & 0xff000000) |
6144 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6145 (ldt_info.limit & 0xf0000) |
6146 ((read_exec_only ^ 1) << 9) |
6148 ((seg_not_present ^ 1) << 15) |
6150 (limit_in_pages << 23) |
6154 entry_2 |= (useable << 20);
6156 /* Install the new entry ... */
6158 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
6159 lp[0] = tswap32(entry_1);
6160 lp[1] = tswap32(entry_2);
6164 /* specific and weird i386 syscalls */
6165 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
6166 unsigned long bytecount)
6172 ret = read_ldt(ptr, bytecount);
6175 ret = write_ldt(env, ptr, bytecount, 1);
6178 ret = write_ldt(env, ptr, bytecount, 0);
6181 ret = -TARGET_ENOSYS;
6187 #if defined(TARGET_I386) && defined(TARGET_ABI32)
6188 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
6190 uint64_t *gdt_table = g2h(env->gdt.base);
6191 struct target_modify_ldt_ldt_s ldt_info;
6192 struct target_modify_ldt_ldt_s *target_ldt_info;
6193 int seg_32bit, contents, read_exec_only, limit_in_pages;
6194 int seg_not_present, useable, lm;
6195 uint32_t *lp, entry_1, entry_2;
6198 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6199 if (!target_ldt_info)
6200 return -TARGET_EFAULT;
6201 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6202 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6203 ldt_info.limit = tswap32(target_ldt_info->limit);
6204 ldt_info.flags = tswap32(target_ldt_info->flags);
6205 if (ldt_info.entry_number == -1) {
6206 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
6207 if (gdt_table[i] == 0) {
6208 ldt_info.entry_number = i;
6209 target_ldt_info->entry_number = tswap32(i);
6214 unlock_user_struct(target_ldt_info, ptr, 1);
6216 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
6217 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
6218 return -TARGET_EINVAL;
6219 seg_32bit = ldt_info.flags & 1;
6220 contents = (ldt_info.flags >> 1) & 3;
6221 read_exec_only = (ldt_info.flags >> 3) & 1;
6222 limit_in_pages = (ldt_info.flags >> 4) & 1;
6223 seg_not_present = (ldt_info.flags >> 5) & 1;
6224 useable = (ldt_info.flags >> 6) & 1;
6228 lm = (ldt_info.flags >> 7) & 1;
6231 if (contents == 3) {
6232 if (seg_not_present == 0)
6233 return -TARGET_EINVAL;
6236 /* NOTE: same code as Linux kernel */
6237 /* Allow LDTs to be cleared by the user. */
6238 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6239 if ((contents == 0 &&
6240 read_exec_only == 1 &&
6242 limit_in_pages == 0 &&
6243 seg_not_present == 1 &&
6251 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6252 (ldt_info.limit & 0x0ffff);
6253 entry_2 = (ldt_info.base_addr & 0xff000000) |
6254 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6255 (ldt_info.limit & 0xf0000) |
6256 ((read_exec_only ^ 1) << 9) |
6258 ((seg_not_present ^ 1) << 15) |
6260 (limit_in_pages << 23) |
6265 /* Install the new entry ... */
6267 lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
6268 lp[0] = tswap32(entry_1);
6269 lp[1] = tswap32(entry_2);
6273 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
6275 struct target_modify_ldt_ldt_s *target_ldt_info;
6276 uint64_t *gdt_table = g2h(env->gdt.base);
6277 uint32_t base_addr, limit, flags;
6278 int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
6279 int seg_not_present, useable, lm;
6280 uint32_t *lp, entry_1, entry_2;
6282 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6283 if (!target_ldt_info)
6284 return -TARGET_EFAULT;
6285 idx = tswap32(target_ldt_info->entry_number);
6286 if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
6287 idx > TARGET_GDT_ENTRY_TLS_MAX) {
6288 unlock_user_struct(target_ldt_info, ptr, 1);
6289 return -TARGET_EINVAL;
6291 lp = (uint32_t *)(gdt_table + idx);
6292 entry_1 = tswap32(lp[0]);
6293 entry_2 = tswap32(lp[1]);
6295 read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
6296 contents = (entry_2 >> 10) & 3;
6297 seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
6298 seg_32bit = (entry_2 >> 22) & 1;
6299 limit_in_pages = (entry_2 >> 23) & 1;
6300 useable = (entry_2 >> 20) & 1;
6304 lm = (entry_2 >> 21) & 1;
6306 flags = (seg_32bit << 0) | (contents << 1) |
6307 (read_exec_only << 3) | (limit_in_pages << 4) |
6308 (seg_not_present << 5) | (useable << 6) | (lm << 7);
6309 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000);
6310 base_addr = (entry_1 >> 16) |
6311 (entry_2 & 0xff000000) |
6312 ((entry_2 & 0xff) << 16);
6313 target_ldt_info->base_addr = tswapal(base_addr);
6314 target_ldt_info->limit = tswap32(limit);
6315 target_ldt_info->flags = tswap32(flags);
6316 unlock_user_struct(target_ldt_info, ptr, 1);
6319 #endif /* TARGET_I386 && TARGET_ABI32 */
6321 #ifndef TARGET_ABI32
6322 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6329 case TARGET_ARCH_SET_GS:
6330 case TARGET_ARCH_SET_FS:
6331 if (code == TARGET_ARCH_SET_GS)
6335 cpu_x86_load_seg(env, idx, 0);
6336 env->segs[idx].base = addr;
6338 case TARGET_ARCH_GET_GS:
6339 case TARGET_ARCH_GET_FS:
6340 if (code == TARGET_ARCH_GET_GS)
6344 val = env->segs[idx].base;
6345 if (put_user(val, addr, abi_ulong))
6346 ret = -TARGET_EFAULT;
6349 ret = -TARGET_EINVAL;
6356 #endif /* defined(TARGET_I386) */
6358 #define NEW_STACK_SIZE 0x40000
6361 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
6364 pthread_mutex_t mutex;
6365 pthread_cond_t cond;
6368 abi_ulong child_tidptr;
6369 abi_ulong parent_tidptr;
6373 static void *clone_func(void *arg)
6375 new_thread_info *info = arg;
6380 rcu_register_thread();
6381 tcg_register_thread();
6383 cpu = ENV_GET_CPU(env);
6385 ts = (TaskState *)cpu->opaque;
6386 info->tid = gettid();
6388 if (info->child_tidptr)
6389 put_user_u32(info->tid, info->child_tidptr);
6390 if (info->parent_tidptr)
6391 put_user_u32(info->tid, info->parent_tidptr);
6392 /* Enable signals. */
6393 sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
6394 /* Signal to the parent that we're ready. */
6395 pthread_mutex_lock(&info->mutex);
6396 pthread_cond_broadcast(&info->cond);
6397 pthread_mutex_unlock(&info->mutex);
6398 /* Wait until the parent has finished initializing the tls state. */
6399 pthread_mutex_lock(&clone_lock);
6400 pthread_mutex_unlock(&clone_lock);
6406 /* do_fork() Must return host values and target errnos (unlike most
6407 do_*() functions). */
6408 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
6409 abi_ulong parent_tidptr, target_ulong newtls,
6410 abi_ulong child_tidptr)
6412 CPUState *cpu = ENV_GET_CPU(env);
6416 CPUArchState *new_env;
6419 flags &= ~CLONE_IGNORED_FLAGS;
6421 /* Emulate vfork() with fork() */
6422 if (flags & CLONE_VFORK)
6423 flags &= ~(CLONE_VFORK | CLONE_VM);
6425 if (flags & CLONE_VM) {
6426 TaskState *parent_ts = (TaskState *)cpu->opaque;
6427 new_thread_info info;
6428 pthread_attr_t attr;
6430 if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
6431 (flags & CLONE_INVALID_THREAD_FLAGS)) {
6432 return -TARGET_EINVAL;
6435 ts = g_new0(TaskState, 1);
6436 init_task_state(ts);
6438 /* Grab a mutex so that thread setup appears atomic. */
6439 pthread_mutex_lock(&clone_lock);
6441 /* we create a new CPU instance. */
6442 new_env = cpu_copy(env);
6443 /* Init regs that differ from the parent. */
6444 cpu_clone_regs(new_env, newsp);
6445 new_cpu = ENV_GET_CPU(new_env);
6446 new_cpu->opaque = ts;
6447 ts->bprm = parent_ts->bprm;
6448 ts->info = parent_ts->info;
6449 ts->signal_mask = parent_ts->signal_mask;
6451 if (flags & CLONE_CHILD_CLEARTID) {
6452 ts->child_tidptr = child_tidptr;
6455 if (flags & CLONE_SETTLS) {
6456 cpu_set_tls (new_env, newtls);
6459 memset(&info, 0, sizeof(info));
6460 pthread_mutex_init(&info.mutex, NULL);
6461 pthread_mutex_lock(&info.mutex);
6462 pthread_cond_init(&info.cond, NULL);
6464 if (flags & CLONE_CHILD_SETTID) {
6465 info.child_tidptr = child_tidptr;
6467 if (flags & CLONE_PARENT_SETTID) {
6468 info.parent_tidptr = parent_tidptr;
6471 ret = pthread_attr_init(&attr);
6472 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6473 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6474 /* It is not safe to deliver signals until the child has finished
6475 initializing, so temporarily block all signals. */
6476 sigfillset(&sigmask);
6477 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6479 /* If this is our first additional thread, we need to ensure we
6480 * generate code for parallel execution and flush old translations.
6482 if (!parallel_cpus) {
6483 parallel_cpus = true;
6487 ret = pthread_create(&info.thread, &attr, clone_func, &info);
6488 /* TODO: Free new CPU state if thread creation failed. */
6490 sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6491 pthread_attr_destroy(&attr);
6493 /* Wait for the child to initialize. */
6494 pthread_cond_wait(&info.cond, &info.mutex);
6499 pthread_mutex_unlock(&info.mutex);
6500 pthread_cond_destroy(&info.cond);
6501 pthread_mutex_destroy(&info.mutex);
6502 pthread_mutex_unlock(&clone_lock);
6504 /* if no CLONE_VM, we consider it is a fork */
6505 if (flags & CLONE_INVALID_FORK_FLAGS) {
6506 return -TARGET_EINVAL;
6509 /* We can't support custom termination signals */
6510 if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6511 return -TARGET_EINVAL;
6514 if (block_signals()) {
6515 return -TARGET_ERESTARTSYS;
6521 /* Child Process. */
6522 cpu_clone_regs(env, newsp);
6524 /* There is a race condition here. The parent process could
6525 theoretically read the TID in the child process before the child
6526 tid is set. This would require using either ptrace
6527 (not implemented) or having *_tidptr to point at a shared memory
6528 mapping. We can't repeat the spinlock hack used above because
6529 the child process gets its own copy of the lock. */
6530 if (flags & CLONE_CHILD_SETTID)
6531 put_user_u32(gettid(), child_tidptr);
6532 if (flags & CLONE_PARENT_SETTID)
6533 put_user_u32(gettid(), parent_tidptr);
6534 ts = (TaskState *)cpu->opaque;
6535 if (flags & CLONE_SETTLS)
6536 cpu_set_tls (env, newtls);
6537 if (flags & CLONE_CHILD_CLEARTID)
6538 ts->child_tidptr = child_tidptr;
6546 /* warning : doesn't handle linux specific flags... */
6547 static int target_to_host_fcntl_cmd(int cmd)
6550 case TARGET_F_DUPFD:
6551 case TARGET_F_GETFD:
6552 case TARGET_F_SETFD:
6553 case TARGET_F_GETFL:
6554 case TARGET_F_SETFL:
6556 case TARGET_F_GETLK:
6558 case TARGET_F_SETLK:
6560 case TARGET_F_SETLKW:
6562 case TARGET_F_GETOWN:
6564 case TARGET_F_SETOWN:
6566 case TARGET_F_GETSIG:
6568 case TARGET_F_SETSIG:
6570 #if TARGET_ABI_BITS == 32
6571 case TARGET_F_GETLK64:
6573 case TARGET_F_SETLK64:
6575 case TARGET_F_SETLKW64:
6578 case TARGET_F_SETLEASE:
6580 case TARGET_F_GETLEASE:
6582 #ifdef F_DUPFD_CLOEXEC
6583 case TARGET_F_DUPFD_CLOEXEC:
6584 return F_DUPFD_CLOEXEC;
6586 case TARGET_F_NOTIFY:
6589 case TARGET_F_GETOWN_EX:
6593 case TARGET_F_SETOWN_EX:
6597 case TARGET_F_SETPIPE_SZ:
6598 return F_SETPIPE_SZ;
6599 case TARGET_F_GETPIPE_SZ:
6600 return F_GETPIPE_SZ;
6603 return -TARGET_EINVAL;
6605 return -TARGET_EINVAL;
6608 #define FLOCK_TRANSTBL \
6610 TRANSTBL_CONVERT(F_RDLCK); \
6611 TRANSTBL_CONVERT(F_WRLCK); \
6612 TRANSTBL_CONVERT(F_UNLCK); \
6613 TRANSTBL_CONVERT(F_EXLCK); \
6614 TRANSTBL_CONVERT(F_SHLCK); \
6617 static int target_to_host_flock(int type)
6619 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6621 #undef TRANSTBL_CONVERT
6622 return -TARGET_EINVAL;
6625 static int host_to_target_flock(int type)
6627 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6629 #undef TRANSTBL_CONVERT
6630 /* if we don't know how to convert the value coming
6631 * from the host we copy to the target field as-is
6636 static inline abi_long copy_from_user_flock(struct flock64 *fl,
6637 abi_ulong target_flock_addr)
6639 struct target_flock *target_fl;
6642 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6643 return -TARGET_EFAULT;
6646 __get_user(l_type, &target_fl->l_type);
6647 l_type = target_to_host_flock(l_type);
6651 fl->l_type = l_type;
6652 __get_user(fl->l_whence, &target_fl->l_whence);
6653 __get_user(fl->l_start, &target_fl->l_start);
6654 __get_user(fl->l_len, &target_fl->l_len);
6655 __get_user(fl->l_pid, &target_fl->l_pid);
6656 unlock_user_struct(target_fl, target_flock_addr, 0);
6660 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6661 const struct flock64 *fl)
6663 struct target_flock *target_fl;
6666 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6667 return -TARGET_EFAULT;
6670 l_type = host_to_target_flock(fl->l_type);
6671 __put_user(l_type, &target_fl->l_type);
6672 __put_user(fl->l_whence, &target_fl->l_whence);
6673 __put_user(fl->l_start, &target_fl->l_start);
6674 __put_user(fl->l_len, &target_fl->l_len);
6675 __put_user(fl->l_pid, &target_fl->l_pid);
6676 unlock_user_struct(target_fl, target_flock_addr, 1);
6680 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
6681 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
6683 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6684 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
6685 abi_ulong target_flock_addr)
6687 struct target_oabi_flock64 *target_fl;
6690 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6691 return -TARGET_EFAULT;
6694 __get_user(l_type, &target_fl->l_type);
6695 l_type = target_to_host_flock(l_type);
6699 fl->l_type = l_type;
6700 __get_user(fl->l_whence, &target_fl->l_whence);
6701 __get_user(fl->l_start, &target_fl->l_start);
6702 __get_user(fl->l_len, &target_fl->l_len);
6703 __get_user(fl->l_pid, &target_fl->l_pid);
6704 unlock_user_struct(target_fl, target_flock_addr, 0);
6708 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
6709 const struct flock64 *fl)
6711 struct target_oabi_flock64 *target_fl;
6714 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6715 return -TARGET_EFAULT;
6718 l_type = host_to_target_flock(fl->l_type);
6719 __put_user(l_type, &target_fl->l_type);
6720 __put_user(fl->l_whence, &target_fl->l_whence);
6721 __put_user(fl->l_start, &target_fl->l_start);
6722 __put_user(fl->l_len, &target_fl->l_len);
6723 __put_user(fl->l_pid, &target_fl->l_pid);
6724 unlock_user_struct(target_fl, target_flock_addr, 1);
6729 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
6730 abi_ulong target_flock_addr)
6732 struct target_flock64 *target_fl;
6735 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6736 return -TARGET_EFAULT;
6739 __get_user(l_type, &target_fl->l_type);
6740 l_type = target_to_host_flock(l_type);
6744 fl->l_type = l_type;
6745 __get_user(fl->l_whence, &target_fl->l_whence);
6746 __get_user(fl->l_start, &target_fl->l_start);
6747 __get_user(fl->l_len, &target_fl->l_len);
6748 __get_user(fl->l_pid, &target_fl->l_pid);
6749 unlock_user_struct(target_fl, target_flock_addr, 0);
6753 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
6754 const struct flock64 *fl)
6756 struct target_flock64 *target_fl;
6759 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6760 return -TARGET_EFAULT;
6763 l_type = host_to_target_flock(fl->l_type);
6764 __put_user(l_type, &target_fl->l_type);
6765 __put_user(fl->l_whence, &target_fl->l_whence);
6766 __put_user(fl->l_start, &target_fl->l_start);
6767 __put_user(fl->l_len, &target_fl->l_len);
6768 __put_user(fl->l_pid, &target_fl->l_pid);
6769 unlock_user_struct(target_fl, target_flock_addr, 1);
6773 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
6775 struct flock64 fl64;
6777 struct f_owner_ex fox;
6778 struct target_f_owner_ex *target_fox;
6781 int host_cmd = target_to_host_fcntl_cmd(cmd);
6783 if (host_cmd == -TARGET_EINVAL)
6787 case TARGET_F_GETLK:
6788 ret = copy_from_user_flock(&fl64, arg);
6792 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6794 ret = copy_to_user_flock(arg, &fl64);
6798 case TARGET_F_SETLK:
6799 case TARGET_F_SETLKW:
6800 ret = copy_from_user_flock(&fl64, arg);
6804 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6807 case TARGET_F_GETLK64:
6808 ret = copy_from_user_flock64(&fl64, arg);
6812 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6814 ret = copy_to_user_flock64(arg, &fl64);
6817 case TARGET_F_SETLK64:
6818 case TARGET_F_SETLKW64:
6819 ret = copy_from_user_flock64(&fl64, arg);
6823 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6826 case TARGET_F_GETFL:
6827 ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6829 ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
6833 case TARGET_F_SETFL:
6834 ret = get_errno(safe_fcntl(fd, host_cmd,
6835 target_to_host_bitmask(arg,
6840 case TARGET_F_GETOWN_EX:
6841 ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6843 if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
6844 return -TARGET_EFAULT;
6845 target_fox->type = tswap32(fox.type);
6846 target_fox->pid = tswap32(fox.pid);
6847 unlock_user_struct(target_fox, arg, 1);
6853 case TARGET_F_SETOWN_EX:
6854 if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
6855 return -TARGET_EFAULT;
6856 fox.type = tswap32(target_fox->type);
6857 fox.pid = tswap32(target_fox->pid);
6858 unlock_user_struct(target_fox, arg, 0);
6859 ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6863 case TARGET_F_SETOWN:
6864 case TARGET_F_GETOWN:
6865 case TARGET_F_SETSIG:
6866 case TARGET_F_GETSIG:
6867 case TARGET_F_SETLEASE:
6868 case TARGET_F_GETLEASE:
6869 case TARGET_F_SETPIPE_SZ:
6870 case TARGET_F_GETPIPE_SZ:
6871 ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6875 ret = get_errno(safe_fcntl(fd, cmd, arg));
6883 static inline int high2lowuid(int uid)
6891 static inline int high2lowgid(int gid)
6899 static inline int low2highuid(int uid)
6901 if ((int16_t)uid == -1)
6907 static inline int low2highgid(int gid)
6909 if ((int16_t)gid == -1)
6914 static inline int tswapid(int id)
6919 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6921 #else /* !USE_UID16 */
6922 static inline int high2lowuid(int uid)
6926 static inline int high2lowgid(int gid)
6930 static inline int low2highuid(int uid)
6934 static inline int low2highgid(int gid)
6938 static inline int tswapid(int id)
6943 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6945 #endif /* USE_UID16 */
6947 /* We must do direct syscalls for setting UID/GID, because we want to
6948 * implement the Linux system call semantics of "change only for this thread",
6949 * not the libc/POSIX semantics of "change for all threads in process".
6950 * (See http://ewontfix.com/17/ for more details.)
6951 * We use the 32-bit version of the syscalls if present; if it is not
6952 * then either the host architecture supports 32-bit UIDs natively with
6953 * the standard syscall, or the 16-bit UID is the best we can do.
6955 #ifdef __NR_setuid32
6956 #define __NR_sys_setuid __NR_setuid32
6958 #define __NR_sys_setuid __NR_setuid
6960 #ifdef __NR_setgid32
6961 #define __NR_sys_setgid __NR_setgid32
6963 #define __NR_sys_setgid __NR_setgid
6965 #ifdef __NR_setresuid32
6966 #define __NR_sys_setresuid __NR_setresuid32
6968 #define __NR_sys_setresuid __NR_setresuid
6970 #ifdef __NR_setresgid32
6971 #define __NR_sys_setresgid __NR_setresgid32
6973 #define __NR_sys_setresgid __NR_setresgid
6976 _syscall1(int, sys_setuid, uid_t, uid)
6977 _syscall1(int, sys_setgid, gid_t, gid)
6978 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
6979 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
6981 void syscall_init(void)
6984 const argtype *arg_type;
6988 thunk_init(STRUCT_MAX);
6990 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6991 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6992 #include "syscall_types.h"
6994 #undef STRUCT_SPECIAL
6996 /* Build target_to_host_errno_table[] table from
6997 * host_to_target_errno_table[]. */
6998 for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
6999 target_to_host_errno_table[host_to_target_errno_table[i]] = i;
7002 /* we patch the ioctl size if necessary. We rely on the fact that
7003 no ioctl has all the bits at '1' in the size field */
7005 while (ie->target_cmd != 0) {
7006 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
7007 TARGET_IOC_SIZEMASK) {
7008 arg_type = ie->arg_type;
7009 if (arg_type[0] != TYPE_PTR) {
7010 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
7015 size = thunk_type_size(arg_type, 0);
7016 ie->target_cmd = (ie->target_cmd &
7017 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
7018 (size << TARGET_IOC_SIZESHIFT);
7021 /* automatic consistency check if same arch */
7022 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7023 (defined(__x86_64__) && defined(TARGET_X86_64))
7024 if (unlikely(ie->target_cmd != ie->host_cmd)) {
7025 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
7026 ie->name, ie->target_cmd, ie->host_cmd);
7033 #if TARGET_ABI_BITS == 32
7034 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
7036 #ifdef TARGET_WORDS_BIGENDIAN
7037 return ((uint64_t)word0 << 32) | word1;
7039 return ((uint64_t)word1 << 32) | word0;
7042 #else /* TARGET_ABI_BITS == 32 */
7043 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
7047 #endif /* TARGET_ABI_BITS != 32 */
7049 #ifdef TARGET_NR_truncate64
7050 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
7055 if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
7059 return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
7063 #ifdef TARGET_NR_ftruncate64
7064 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
7069 if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
7073 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
7077 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
7078 abi_ulong target_addr)
7080 struct target_timespec *target_ts;
7082 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1))
7083 return -TARGET_EFAULT;
7084 __get_user(host_ts->tv_sec, &target_ts->tv_sec);
7085 __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
7086 unlock_user_struct(target_ts, target_addr, 0);
7090 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
7091 struct timespec *host_ts)
7093 struct target_timespec *target_ts;
7095 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0))
7096 return -TARGET_EFAULT;
7097 __put_user(host_ts->tv_sec, &target_ts->tv_sec);
7098 __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
7099 unlock_user_struct(target_ts, target_addr, 1);
7103 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_itspec,
7104 abi_ulong target_addr)
7106 struct target_itimerspec *target_itspec;
7108 if (!lock_user_struct(VERIFY_READ, target_itspec, target_addr, 1)) {
7109 return -TARGET_EFAULT;
7112 host_itspec->it_interval.tv_sec =
7113 tswapal(target_itspec->it_interval.tv_sec);
7114 host_itspec->it_interval.tv_nsec =
7115 tswapal(target_itspec->it_interval.tv_nsec);
7116 host_itspec->it_value.tv_sec = tswapal(target_itspec->it_value.tv_sec);
7117 host_itspec->it_value.tv_nsec = tswapal(target_itspec->it_value.tv_nsec);
7119 unlock_user_struct(target_itspec, target_addr, 1);
7123 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
7124 struct itimerspec *host_its)
7126 struct target_itimerspec *target_itspec;
7128 if (!lock_user_struct(VERIFY_WRITE, target_itspec, target_addr, 0)) {
7129 return -TARGET_EFAULT;
7132 target_itspec->it_interval.tv_sec = tswapal(host_its->it_interval.tv_sec);
7133 target_itspec->it_interval.tv_nsec = tswapal(host_its->it_interval.tv_nsec);
7135 target_itspec->it_value.tv_sec = tswapal(host_its->it_value.tv_sec);
7136 target_itspec->it_value.tv_nsec = tswapal(host_its->it_value.tv_nsec);
7138 unlock_user_struct(target_itspec, target_addr, 0);
7142 static inline abi_long target_to_host_timex(struct timex *host_tx,
7143 abi_long target_addr)
7145 struct target_timex *target_tx;
7147 if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7148 return -TARGET_EFAULT;
7151 __get_user(host_tx->modes, &target_tx->modes);
7152 __get_user(host_tx->offset, &target_tx->offset);
7153 __get_user(host_tx->freq, &target_tx->freq);
7154 __get_user(host_tx->maxerror, &target_tx->maxerror);
7155 __get_user(host_tx->esterror, &target_tx->esterror);
7156 __get_user(host_tx->status, &target_tx->status);
7157 __get_user(host_tx->constant, &target_tx->constant);
7158 __get_user(host_tx->precision, &target_tx->precision);
7159 __get_user(host_tx->tolerance, &target_tx->tolerance);
7160 __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7161 __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7162 __get_user(host_tx->tick, &target_tx->tick);
7163 __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7164 __get_user(host_tx->jitter, &target_tx->jitter);
7165 __get_user(host_tx->shift, &target_tx->shift);
7166 __get_user(host_tx->stabil, &target_tx->stabil);
7167 __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7168 __get_user(host_tx->calcnt, &target_tx->calcnt);
7169 __get_user(host_tx->errcnt, &target_tx->errcnt);
7170 __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7171 __get_user(host_tx->tai, &target_tx->tai);
7173 unlock_user_struct(target_tx, target_addr, 0);
7177 static inline abi_long host_to_target_timex(abi_long target_addr,
7178 struct timex *host_tx)
7180 struct target_timex *target_tx;
7182 if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7183 return -TARGET_EFAULT;
7186 __put_user(host_tx->modes, &target_tx->modes);
7187 __put_user(host_tx->offset, &target_tx->offset);
7188 __put_user(host_tx->freq, &target_tx->freq);
7189 __put_user(host_tx->maxerror, &target_tx->maxerror);
7190 __put_user(host_tx->esterror, &target_tx->esterror);
7191 __put_user(host_tx->status, &target_tx->status);
7192 __put_user(host_tx->constant, &target_tx->constant);
7193 __put_user(host_tx->precision, &target_tx->precision);
7194 __put_user(host_tx->tolerance, &target_tx->tolerance);
7195 __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7196 __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7197 __put_user(host_tx->tick, &target_tx->tick);
7198 __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7199 __put_user(host_tx->jitter, &target_tx->jitter);
7200 __put_user(host_tx->shift, &target_tx->shift);
7201 __put_user(host_tx->stabil, &target_tx->stabil);
7202 __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7203 __put_user(host_tx->calcnt, &target_tx->calcnt);
7204 __put_user(host_tx->errcnt, &target_tx->errcnt);
7205 __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7206 __put_user(host_tx->tai, &target_tx->tai);
7208 unlock_user_struct(target_tx, target_addr, 1);
7213 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
7214 abi_ulong target_addr)
7216 struct target_sigevent *target_sevp;
7218 if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
7219 return -TARGET_EFAULT;
7222 /* This union is awkward on 64 bit systems because it has a 32 bit
7223 * integer and a pointer in it; we follow the conversion approach
7224 * used for handling sigval types in signal.c so the guest should get
7225 * the correct value back even if we did a 64 bit byteswap and it's
7226 * using the 32 bit integer.
7228 host_sevp->sigev_value.sival_ptr =
7229 (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
7230 host_sevp->sigev_signo =
7231 target_to_host_signal(tswap32(target_sevp->sigev_signo));
7232 host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
7233 host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid);
7235 unlock_user_struct(target_sevp, target_addr, 1);
7239 #if defined(TARGET_NR_mlockall)
7240 static inline int target_to_host_mlockall_arg(int arg)
7244 if (arg & TARGET_MLOCKALL_MCL_CURRENT) {
7245 result |= MCL_CURRENT;
7247 if (arg & TARGET_MLOCKALL_MCL_FUTURE) {
7248 result |= MCL_FUTURE;
7254 static inline abi_long host_to_target_stat64(void *cpu_env,
7255 abi_ulong target_addr,
7256 struct stat *host_st)
7258 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7259 if (((CPUARMState *)cpu_env)->eabi) {
7260 struct target_eabi_stat64 *target_st;
7262 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7263 return -TARGET_EFAULT;
7264 memset(target_st, 0, sizeof(struct target_eabi_stat64));
7265 __put_user(host_st->st_dev, &target_st->st_dev);
7266 __put_user(host_st->st_ino, &target_st->st_ino);
7267 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7268 __put_user(host_st->st_ino, &target_st->__st_ino);
7270 __put_user(host_st->st_mode, &target_st->st_mode);
7271 __put_user(host_st->st_nlink, &target_st->st_nlink);
7272 __put_user(host_st->st_uid, &target_st->st_uid);
7273 __put_user(host_st->st_gid, &target_st->st_gid);
7274 __put_user(host_st->st_rdev, &target_st->st_rdev);
7275 __put_user(host_st->st_size, &target_st->st_size);
7276 __put_user(host_st->st_blksize, &target_st->st_blksize);
7277 __put_user(host_st->st_blocks, &target_st->st_blocks);
7278 __put_user(host_st->st_atime, &target_st->target_st_atime);
7279 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7280 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7281 unlock_user_struct(target_st, target_addr, 1);
7285 #if defined(TARGET_HAS_STRUCT_STAT64)
7286 struct target_stat64 *target_st;
7288 struct target_stat *target_st;
7291 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7292 return -TARGET_EFAULT;
7293 memset(target_st, 0, sizeof(*target_st));
7294 __put_user(host_st->st_dev, &target_st->st_dev);
7295 __put_user(host_st->st_ino, &target_st->st_ino);
7296 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7297 __put_user(host_st->st_ino, &target_st->__st_ino);
7299 __put_user(host_st->st_mode, &target_st->st_mode);
7300 __put_user(host_st->st_nlink, &target_st->st_nlink);
7301 __put_user(host_st->st_uid, &target_st->st_uid);
7302 __put_user(host_st->st_gid, &target_st->st_gid);
7303 __put_user(host_st->st_rdev, &target_st->st_rdev);
7304 /* XXX: better use of kernel struct */
7305 __put_user(host_st->st_size, &target_st->st_size);
7306 __put_user(host_st->st_blksize, &target_st->st_blksize);
7307 __put_user(host_st->st_blocks, &target_st->st_blocks);
7308 __put_user(host_st->st_atime, &target_st->target_st_atime);
7309 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7310 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7311 unlock_user_struct(target_st, target_addr, 1);
7317 /* ??? Using host futex calls even when target atomic operations
7318 are not really atomic probably breaks things. However implementing
7319 futexes locally would make futexes shared between multiple processes
7320 tricky. However they're probably useless because guest atomic
7321 operations won't work either. */
7322 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
7323 target_ulong uaddr2, int val3)
7325 struct timespec ts, *pts;
7328 /* ??? We assume FUTEX_* constants are the same on both host
7330 #ifdef FUTEX_CMD_MASK
7331 base_op = op & FUTEX_CMD_MASK;
7337 case FUTEX_WAIT_BITSET:
7340 target_to_host_timespec(pts, timeout);
7344 return get_errno(safe_futex(g2h(uaddr), op, tswap32(val),
7347 return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
7349 return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
7351 case FUTEX_CMP_REQUEUE:
7353 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7354 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7355 But the prototype takes a `struct timespec *'; insert casts
7356 to satisfy the compiler. We do not need to tswap TIMEOUT
7357 since it's not compared to guest memory. */
7358 pts = (struct timespec *)(uintptr_t) timeout;
7359 return get_errno(safe_futex(g2h(uaddr), op, val, pts,
7361 (base_op == FUTEX_CMP_REQUEUE
7365 return -TARGET_ENOSYS;
7368 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7369 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
7370 abi_long handle, abi_long mount_id,
7373 struct file_handle *target_fh;
7374 struct file_handle *fh;
7378 unsigned int size, total_size;
7380 if (get_user_s32(size, handle)) {
7381 return -TARGET_EFAULT;
7384 name = lock_user_string(pathname);
7386 return -TARGET_EFAULT;
7389 total_size = sizeof(struct file_handle) + size;
7390 target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
7392 unlock_user(name, pathname, 0);
7393 return -TARGET_EFAULT;
7396 fh = g_malloc0(total_size);
7397 fh->handle_bytes = size;
7399 ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
7400 unlock_user(name, pathname, 0);
7402 /* man name_to_handle_at(2):
7403 * Other than the use of the handle_bytes field, the caller should treat
7404 * the file_handle structure as an opaque data type
7407 memcpy(target_fh, fh, total_size);
7408 target_fh->handle_bytes = tswap32(fh->handle_bytes);
7409 target_fh->handle_type = tswap32(fh->handle_type);
7411 unlock_user(target_fh, handle, total_size);
7413 if (put_user_s32(mid, mount_id)) {
7414 return -TARGET_EFAULT;
7422 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7423 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
7426 struct file_handle *target_fh;
7427 struct file_handle *fh;
7428 unsigned int size, total_size;
7431 if (get_user_s32(size, handle)) {
7432 return -TARGET_EFAULT;
7435 total_size = sizeof(struct file_handle) + size;
7436 target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
7438 return -TARGET_EFAULT;
7441 fh = g_memdup(target_fh, total_size);
7442 fh->handle_bytes = size;
7443 fh->handle_type = tswap32(target_fh->handle_type);
7445 ret = get_errno(open_by_handle_at(mount_fd, fh,
7446 target_to_host_bitmask(flags, fcntl_flags_tbl)));
7450 unlock_user(target_fh, handle, total_size);
7456 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7458 /* signalfd siginfo conversion */
7461 host_to_target_signalfd_siginfo(struct signalfd_siginfo *tinfo,
7462 const struct signalfd_siginfo *info)
7464 int sig = host_to_target_signal(info->ssi_signo);
7466 /* linux/signalfd.h defines a ssi_addr_lsb
7467 * not defined in sys/signalfd.h but used by some kernels
7470 #ifdef BUS_MCEERR_AO
7471 if (tinfo->ssi_signo == SIGBUS &&
7472 (tinfo->ssi_code == BUS_MCEERR_AR ||
7473 tinfo->ssi_code == BUS_MCEERR_AO)) {
7474 uint16_t *ssi_addr_lsb = (uint16_t *)(&info->ssi_addr + 1);
7475 uint16_t *tssi_addr_lsb = (uint16_t *)(&tinfo->ssi_addr + 1);
7476 *tssi_addr_lsb = tswap16(*ssi_addr_lsb);
7480 tinfo->ssi_signo = tswap32(sig);
7481 tinfo->ssi_errno = tswap32(tinfo->ssi_errno);
7482 tinfo->ssi_code = tswap32(info->ssi_code);
7483 tinfo->ssi_pid = tswap32(info->ssi_pid);
7484 tinfo->ssi_uid = tswap32(info->ssi_uid);
7485 tinfo->ssi_fd = tswap32(info->ssi_fd);
7486 tinfo->ssi_tid = tswap32(info->ssi_tid);
7487 tinfo->ssi_band = tswap32(info->ssi_band);
7488 tinfo->ssi_overrun = tswap32(info->ssi_overrun);
7489 tinfo->ssi_trapno = tswap32(info->ssi_trapno);
7490 tinfo->ssi_status = tswap32(info->ssi_status);
7491 tinfo->ssi_int = tswap32(info->ssi_int);
7492 tinfo->ssi_ptr = tswap64(info->ssi_ptr);
7493 tinfo->ssi_utime = tswap64(info->ssi_utime);
7494 tinfo->ssi_stime = tswap64(info->ssi_stime);
7495 tinfo->ssi_addr = tswap64(info->ssi_addr);
7498 static abi_long host_to_target_data_signalfd(void *buf, size_t len)
7502 for (i = 0; i < len; i += sizeof(struct signalfd_siginfo)) {
7503 host_to_target_signalfd_siginfo(buf + i, buf + i);
7509 static TargetFdTrans target_signalfd_trans = {
7510 .host_to_target_data = host_to_target_data_signalfd,
7513 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
7516 target_sigset_t *target_mask;
7520 if (flags & ~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)) {
7521 return -TARGET_EINVAL;
7523 if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
7524 return -TARGET_EFAULT;
7527 target_to_host_sigset(&host_mask, target_mask);
7529 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
7531 ret = get_errno(signalfd(fd, &host_mask, host_flags));
7533 fd_trans_register(ret, &target_signalfd_trans);
7536 unlock_user_struct(target_mask, mask, 0);
7542 /* Map host to target signal numbers for the wait family of syscalls.
7543 Assume all other status bits are the same. */
7544 int host_to_target_waitstatus(int status)
7546 if (WIFSIGNALED(status)) {
7547 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
7549 if (WIFSTOPPED(status)) {
7550 return (host_to_target_signal(WSTOPSIG(status)) << 8)
7556 static int open_self_cmdline(void *cpu_env, int fd)
7558 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
7559 struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
7562 for (i = 0; i < bprm->argc; i++) {
7563 size_t len = strlen(bprm->argv[i]) + 1;
7565 if (write(fd, bprm->argv[i], len) != len) {
7573 static int open_self_maps(void *cpu_env, int fd)
7575 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
7576 TaskState *ts = cpu->opaque;
7582 fp = fopen("/proc/self/maps", "r");
7587 while ((read = getline(&line, &len, fp)) != -1) {
7588 int fields, dev_maj, dev_min, inode;
7589 uint64_t min, max, offset;
7590 char flag_r, flag_w, flag_x, flag_p;
7591 char path[512] = "";
7592 fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d"
7593 " %512s", &min, &max, &flag_r, &flag_w, &flag_x,
7594 &flag_p, &offset, &dev_maj, &dev_min, &inode, path);
7596 if ((fields < 10) || (fields > 11)) {
7599 if (h2g_valid(min)) {
7600 int flags = page_get_flags(h2g(min));
7601 max = h2g_valid(max - 1) ? max : (uintptr_t)g2h(GUEST_ADDR_MAX) + 1;
7602 if (page_check_range(h2g(min), max - min, flags) == -1) {
7605 if (h2g(min) == ts->info->stack_limit) {
7606 pstrcpy(path, sizeof(path), " [stack]");
7608 dprintf(fd, TARGET_ABI_FMT_lx "-" TARGET_ABI_FMT_lx
7609 " %c%c%c%c %08" PRIx64 " %02x:%02x %d %s%s\n",
7610 h2g(min), h2g(max - 1) + 1, flag_r, flag_w,
7611 flag_x, flag_p, offset, dev_maj, dev_min, inode,
7612 path[0] ? " " : "", path);
7622 static int open_self_stat(void *cpu_env, int fd)
7624 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
7625 TaskState *ts = cpu->opaque;
7626 abi_ulong start_stack = ts->info->start_stack;
7629 for (i = 0; i < 44; i++) {
7637 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
7638 } else if (i == 1) {
7640 snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]);
7641 } else if (i == 27) {
7644 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
7646 /* for the rest, there is MasterCard */
7647 snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' ');
7651 if (write(fd, buf, len) != len) {
7659 static int open_self_auxv(void *cpu_env, int fd)
7661 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
7662 TaskState *ts = cpu->opaque;
7663 abi_ulong auxv = ts->info->saved_auxv;
7664 abi_ulong len = ts->info->auxv_len;
7668 * Auxiliary vector is stored in target process stack.
7669 * read in whole auxv vector and copy it to file
7671 ptr = lock_user(VERIFY_READ, auxv, len, 0);
7675 r = write(fd, ptr, len);
7682 lseek(fd, 0, SEEK_SET);
7683 unlock_user(ptr, auxv, len);
7689 static int is_proc_myself(const char *filename, const char *entry)
7691 if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
7692 filename += strlen("/proc/");
7693 if (!strncmp(filename, "self/", strlen("self/"))) {
7694 filename += strlen("self/");
7695 } else if (*filename >= '1' && *filename <= '9') {
7697 snprintf(myself, sizeof(myself), "%d/", getpid());
7698 if (!strncmp(filename, myself, strlen(myself))) {
7699 filename += strlen(myself);
7706 if (!strcmp(filename, entry)) {
7713 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7714 static int is_proc(const char *filename, const char *entry)
7716 return strcmp(filename, entry) == 0;
7719 static int open_net_route(void *cpu_env, int fd)
7726 fp = fopen("/proc/net/route", "r");
7733 read = getline(&line, &len, fp);
7734 dprintf(fd, "%s", line);
7738 while ((read = getline(&line, &len, fp)) != -1) {
7740 uint32_t dest, gw, mask;
7741 unsigned int flags, refcnt, use, metric, mtu, window, irtt;
7742 sscanf(line, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7743 iface, &dest, &gw, &flags, &refcnt, &use, &metric,
7744 &mask, &mtu, &window, &irtt);
7745 dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7746 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
7747 metric, tswap32(mask), mtu, window, irtt);
7757 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
7760 const char *filename;
7761 int (*fill)(void *cpu_env, int fd);
7762 int (*cmp)(const char *s1, const char *s2);
7764 const struct fake_open *fake_open;
7765 static const struct fake_open fakes[] = {
7766 { "maps", open_self_maps, is_proc_myself },
7767 { "stat", open_self_stat, is_proc_myself },
7768 { "auxv", open_self_auxv, is_proc_myself },
7769 { "cmdline", open_self_cmdline, is_proc_myself },
7770 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7771 { "/proc/net/route", open_net_route, is_proc },
7773 { NULL, NULL, NULL }
7776 if (is_proc_myself(pathname, "exe")) {
7777 int execfd = qemu_getauxval(AT_EXECFD);
7778 return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
7781 for (fake_open = fakes; fake_open->filename; fake_open++) {
7782 if (fake_open->cmp(pathname, fake_open->filename)) {
7787 if (fake_open->filename) {
7789 char filename[PATH_MAX];
7792 /* create temporary file to map stat to */
7793 tmpdir = getenv("TMPDIR");
7796 snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
7797 fd = mkstemp(filename);
7803 if ((r = fake_open->fill(cpu_env, fd))) {
7809 lseek(fd, 0, SEEK_SET);
7814 return safe_openat(dirfd, path(pathname), flags, mode);
7817 #define TIMER_MAGIC 0x0caf0000
7818 #define TIMER_MAGIC_MASK 0xffff0000
7820 /* Convert QEMU provided timer ID back to internal 16bit index format */
7821 static target_timer_t get_timer_id(abi_long arg)
7823 target_timer_t timerid = arg;
7825 if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
7826 return -TARGET_EINVAL;
7831 if (timerid >= ARRAY_SIZE(g_posix_timers)) {
7832 return -TARGET_EINVAL;
7838 static abi_long swap_data_eventfd(void *buf, size_t len)
7840 uint64_t *counter = buf;
7843 if (len < sizeof(uint64_t)) {
7847 for (i = 0; i < len; i += sizeof(uint64_t)) {
7848 *counter = tswap64(*counter);
7855 static TargetFdTrans target_eventfd_trans = {
7856 .host_to_target_data = swap_data_eventfd,
7857 .target_to_host_data = swap_data_eventfd,
7860 #if (defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)) || \
7861 (defined(CONFIG_INOTIFY1) && defined(TARGET_NR_inotify_init1) && \
7862 defined(__NR_inotify_init1))
7863 static abi_long host_to_target_data_inotify(void *buf, size_t len)
7865 struct inotify_event *ev;
7869 for (i = 0; i < len; i += sizeof(struct inotify_event) + name_len) {
7870 ev = (struct inotify_event *)((char *)buf + i);
7873 ev->wd = tswap32(ev->wd);
7874 ev->mask = tswap32(ev->mask);
7875 ev->cookie = tswap32(ev->cookie);
7876 ev->len = tswap32(name_len);
7882 static TargetFdTrans target_inotify_trans = {
7883 .host_to_target_data = host_to_target_data_inotify,
7887 static int target_to_host_cpu_mask(unsigned long *host_mask,
7889 abi_ulong target_addr,
7892 unsigned target_bits = sizeof(abi_ulong) * 8;
7893 unsigned host_bits = sizeof(*host_mask) * 8;
7894 abi_ulong *target_mask;
7897 assert(host_size >= target_size);
7899 target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
7901 return -TARGET_EFAULT;
7903 memset(host_mask, 0, host_size);
7905 for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
7906 unsigned bit = i * target_bits;
7909 __get_user(val, &target_mask[i]);
7910 for (j = 0; j < target_bits; j++, bit++) {
7911 if (val & (1UL << j)) {
7912 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
7917 unlock_user(target_mask, target_addr, 0);
7921 static int host_to_target_cpu_mask(const unsigned long *host_mask,
7923 abi_ulong target_addr,
7926 unsigned target_bits = sizeof(abi_ulong) * 8;
7927 unsigned host_bits = sizeof(*host_mask) * 8;
7928 abi_ulong *target_mask;
7931 assert(host_size >= target_size);
7933 target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
7935 return -TARGET_EFAULT;
7938 for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
7939 unsigned bit = i * target_bits;
7942 for (j = 0; j < target_bits; j++, bit++) {
7943 if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
7947 __put_user(val, &target_mask[i]);
7950 unlock_user(target_mask, target_addr, target_size);
7954 /* do_syscall() should always have a single exit point at the end so
7955 that actions, such as logging of syscall results, can be performed.
7956 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
7957 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
7958 abi_long arg2, abi_long arg3, abi_long arg4,
7959 abi_long arg5, abi_long arg6, abi_long arg7,
7962 CPUState *cpu = ENV_GET_CPU(cpu_env);
7968 #if defined(DEBUG_ERESTARTSYS)
7969 /* Debug-only code for exercising the syscall-restart code paths
7970 * in the per-architecture cpu main loops: restart every syscall
7971 * the guest makes once before letting it through.
7978 return -TARGET_ERESTARTSYS;
7984 gemu_log("syscall %d", num);
7986 trace_guest_user_syscall(cpu, num, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8);
7988 print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
7991 case TARGET_NR_exit:
7992 /* In old applications this may be used to implement _exit(2).
7993 However in threaded applictions it is used for thread termination,
7994 and _exit_group is used for application termination.
7995 Do thread termination if we have more then one thread. */
7997 if (block_signals()) {
7998 ret = -TARGET_ERESTARTSYS;
8004 if (CPU_NEXT(first_cpu)) {
8007 /* Remove the CPU from the list. */
8008 QTAILQ_REMOVE(&cpus, cpu, node);
8013 if (ts->child_tidptr) {
8014 put_user_u32(0, ts->child_tidptr);
8015 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
8019 object_unref(OBJECT(cpu));
8021 rcu_unregister_thread();
8029 gdb_exit(cpu_env, arg1);
8031 ret = 0; /* avoid warning */
8033 case TARGET_NR_read:
8037 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
8039 ret = get_errno(safe_read(arg1, p, arg3));
8041 fd_trans_host_to_target_data(arg1)) {
8042 ret = fd_trans_host_to_target_data(arg1)(p, ret);
8044 unlock_user(p, arg2, ret);
8047 case TARGET_NR_write:
8048 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
8050 if (fd_trans_target_to_host_data(arg1)) {
8051 void *copy = g_malloc(arg3);
8052 memcpy(copy, p, arg3);
8053 ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
8055 ret = get_errno(safe_write(arg1, copy, ret));
8059 ret = get_errno(safe_write(arg1, p, arg3));
8061 unlock_user(p, arg2, 0);
8063 #ifdef TARGET_NR_open
8064 case TARGET_NR_open:
8065 if (!(p = lock_user_string(arg1)))
8067 ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
8068 target_to_host_bitmask(arg2, fcntl_flags_tbl),
8070 fd_trans_unregister(ret);
8071 unlock_user(p, arg1, 0);
8074 case TARGET_NR_openat:
8075 if (!(p = lock_user_string(arg2)))
8077 ret = get_errno(do_openat(cpu_env, arg1, p,
8078 target_to_host_bitmask(arg3, fcntl_flags_tbl),
8080 fd_trans_unregister(ret);
8081 unlock_user(p, arg2, 0);
8083 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8084 case TARGET_NR_name_to_handle_at:
8085 ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
8088 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8089 case TARGET_NR_open_by_handle_at:
8090 ret = do_open_by_handle_at(arg1, arg2, arg3);
8091 fd_trans_unregister(ret);
8094 case TARGET_NR_close:
8095 fd_trans_unregister(arg1);
8096 ret = get_errno(close(arg1));
8101 #ifdef TARGET_NR_fork
8102 case TARGET_NR_fork:
8103 ret = get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
8106 #ifdef TARGET_NR_waitpid
8107 case TARGET_NR_waitpid:
8110 ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
8111 if (!is_error(ret) && arg2 && ret
8112 && put_user_s32(host_to_target_waitstatus(status), arg2))
8117 #ifdef TARGET_NR_waitid
8118 case TARGET_NR_waitid:
8122 ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
8123 if (!is_error(ret) && arg3 && info.si_pid != 0) {
8124 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
8126 host_to_target_siginfo(p, &info);
8127 unlock_user(p, arg3, sizeof(target_siginfo_t));
8132 #ifdef TARGET_NR_creat /* not on alpha */
8133 case TARGET_NR_creat:
8134 if (!(p = lock_user_string(arg1)))
8136 ret = get_errno(creat(p, arg2));
8137 fd_trans_unregister(ret);
8138 unlock_user(p, arg1, 0);
8141 #ifdef TARGET_NR_link
8142 case TARGET_NR_link:
8145 p = lock_user_string(arg1);
8146 p2 = lock_user_string(arg2);
8148 ret = -TARGET_EFAULT;
8150 ret = get_errno(link(p, p2));
8151 unlock_user(p2, arg2, 0);
8152 unlock_user(p, arg1, 0);
8156 #if defined(TARGET_NR_linkat)
8157 case TARGET_NR_linkat:
8162 p = lock_user_string(arg2);
8163 p2 = lock_user_string(arg4);
8165 ret = -TARGET_EFAULT;
8167 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
8168 unlock_user(p, arg2, 0);
8169 unlock_user(p2, arg4, 0);
8173 #ifdef TARGET_NR_unlink
8174 case TARGET_NR_unlink:
8175 if (!(p = lock_user_string(arg1)))
8177 ret = get_errno(unlink(p));
8178 unlock_user(p, arg1, 0);
8181 #if defined(TARGET_NR_unlinkat)
8182 case TARGET_NR_unlinkat:
8183 if (!(p = lock_user_string(arg2)))
8185 ret = get_errno(unlinkat(arg1, p, arg3));
8186 unlock_user(p, arg2, 0);
8189 case TARGET_NR_execve:
8191 char **argp, **envp;
8194 abi_ulong guest_argp;
8195 abi_ulong guest_envp;
8202 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
8203 if (get_user_ual(addr, gp))
8211 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
8212 if (get_user_ual(addr, gp))
8219 argp = g_new0(char *, argc + 1);
8220 envp = g_new0(char *, envc + 1);
8222 for (gp = guest_argp, q = argp; gp;
8223 gp += sizeof(abi_ulong), q++) {
8224 if (get_user_ual(addr, gp))
8228 if (!(*q = lock_user_string(addr)))
8230 total_size += strlen(*q) + 1;
8234 for (gp = guest_envp, q = envp; gp;
8235 gp += sizeof(abi_ulong), q++) {
8236 if (get_user_ual(addr, gp))
8240 if (!(*q = lock_user_string(addr)))
8242 total_size += strlen(*q) + 1;
8246 if (!(p = lock_user_string(arg1)))
8248 /* Although execve() is not an interruptible syscall it is
8249 * a special case where we must use the safe_syscall wrapper:
8250 * if we allow a signal to happen before we make the host
8251 * syscall then we will 'lose' it, because at the point of
8252 * execve the process leaves QEMU's control. So we use the
8253 * safe syscall wrapper to ensure that we either take the
8254 * signal as a guest signal, or else it does not happen
8255 * before the execve completes and makes it the other
8256 * program's problem.
8258 ret = get_errno(safe_execve(p, argp, envp));
8259 unlock_user(p, arg1, 0);
8264 ret = -TARGET_EFAULT;
8267 for (gp = guest_argp, q = argp; *q;
8268 gp += sizeof(abi_ulong), q++) {
8269 if (get_user_ual(addr, gp)
8272 unlock_user(*q, addr, 0);
8274 for (gp = guest_envp, q = envp; *q;
8275 gp += sizeof(abi_ulong), q++) {
8276 if (get_user_ual(addr, gp)
8279 unlock_user(*q, addr, 0);
8286 case TARGET_NR_chdir:
8287 if (!(p = lock_user_string(arg1)))
8289 ret = get_errno(chdir(p));
8290 unlock_user(p, arg1, 0);
8292 #ifdef TARGET_NR_time
8293 case TARGET_NR_time:
8296 ret = get_errno(time(&host_time));
8299 && put_user_sal(host_time, arg1))
8304 #ifdef TARGET_NR_mknod
8305 case TARGET_NR_mknod:
8306 if (!(p = lock_user_string(arg1)))
8308 ret = get_errno(mknod(p, arg2, arg3));
8309 unlock_user(p, arg1, 0);
8312 #if defined(TARGET_NR_mknodat)
8313 case TARGET_NR_mknodat:
8314 if (!(p = lock_user_string(arg2)))
8316 ret = get_errno(mknodat(arg1, p, arg3, arg4));
8317 unlock_user(p, arg2, 0);
8320 #ifdef TARGET_NR_chmod
8321 case TARGET_NR_chmod:
8322 if (!(p = lock_user_string(arg1)))
8324 ret = get_errno(chmod(p, arg2));
8325 unlock_user(p, arg1, 0);
8328 #ifdef TARGET_NR_break
8329 case TARGET_NR_break:
8332 #ifdef TARGET_NR_oldstat
8333 case TARGET_NR_oldstat:
8336 case TARGET_NR_lseek:
8337 ret = get_errno(lseek(arg1, arg2, arg3));
8339 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
8340 /* Alpha specific */
8341 case TARGET_NR_getxpid:
8342 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
8343 ret = get_errno(getpid());
8346 #ifdef TARGET_NR_getpid
8347 case TARGET_NR_getpid:
8348 ret = get_errno(getpid());
8351 case TARGET_NR_mount:
8353 /* need to look at the data field */
8357 p = lock_user_string(arg1);
8365 p2 = lock_user_string(arg2);
8368 unlock_user(p, arg1, 0);
8374 p3 = lock_user_string(arg3);
8377 unlock_user(p, arg1, 0);
8379 unlock_user(p2, arg2, 0);
8386 /* FIXME - arg5 should be locked, but it isn't clear how to
8387 * do that since it's not guaranteed to be a NULL-terminated
8391 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
8393 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5));
8395 ret = get_errno(ret);
8398 unlock_user(p, arg1, 0);
8400 unlock_user(p2, arg2, 0);
8402 unlock_user(p3, arg3, 0);
8406 #ifdef TARGET_NR_umount
8407 case TARGET_NR_umount:
8408 if (!(p = lock_user_string(arg1)))
8410 ret = get_errno(umount(p));
8411 unlock_user(p, arg1, 0);
8414 #ifdef TARGET_NR_stime /* not on alpha */
8415 case TARGET_NR_stime:
8418 if (get_user_sal(host_time, arg1))
8420 ret = get_errno(stime(&host_time));
8424 case TARGET_NR_ptrace:
8426 #ifdef TARGET_NR_alarm /* not on alpha */
8427 case TARGET_NR_alarm:
8431 #ifdef TARGET_NR_oldfstat
8432 case TARGET_NR_oldfstat:
8435 #ifdef TARGET_NR_pause /* not on alpha */
8436 case TARGET_NR_pause:
8437 if (!block_signals()) {
8438 sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
8440 ret = -TARGET_EINTR;
8443 #ifdef TARGET_NR_utime
8444 case TARGET_NR_utime:
8446 struct utimbuf tbuf, *host_tbuf;
8447 struct target_utimbuf *target_tbuf;
8449 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
8451 tbuf.actime = tswapal(target_tbuf->actime);
8452 tbuf.modtime = tswapal(target_tbuf->modtime);
8453 unlock_user_struct(target_tbuf, arg2, 0);
8458 if (!(p = lock_user_string(arg1)))
8460 ret = get_errno(utime(p, host_tbuf));
8461 unlock_user(p, arg1, 0);
8465 #ifdef TARGET_NR_utimes
8466 case TARGET_NR_utimes:
8468 struct timeval *tvp, tv[2];
8470 if (copy_from_user_timeval(&tv[0], arg2)
8471 || copy_from_user_timeval(&tv[1],
8472 arg2 + sizeof(struct target_timeval)))
8478 if (!(p = lock_user_string(arg1)))
8480 ret = get_errno(utimes(p, tvp));
8481 unlock_user(p, arg1, 0);
8485 #if defined(TARGET_NR_futimesat)
8486 case TARGET_NR_futimesat:
8488 struct timeval *tvp, tv[2];
8490 if (copy_from_user_timeval(&tv[0], arg3)
8491 || copy_from_user_timeval(&tv[1],
8492 arg3 + sizeof(struct target_timeval)))
8498 if (!(p = lock_user_string(arg2)))
8500 ret = get_errno(futimesat(arg1, path(p), tvp));
8501 unlock_user(p, arg2, 0);
8505 #ifdef TARGET_NR_stty
8506 case TARGET_NR_stty:
8509 #ifdef TARGET_NR_gtty
8510 case TARGET_NR_gtty:
8513 #ifdef TARGET_NR_access
8514 case TARGET_NR_access:
8515 if (!(p = lock_user_string(arg1)))
8517 ret = get_errno(access(path(p), arg2));
8518 unlock_user(p, arg1, 0);
8521 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
8522 case TARGET_NR_faccessat:
8523 if (!(p = lock_user_string(arg2)))
8525 ret = get_errno(faccessat(arg1, p, arg3, 0));
8526 unlock_user(p, arg2, 0);
8529 #ifdef TARGET_NR_nice /* not on alpha */
8530 case TARGET_NR_nice:
8531 ret = get_errno(nice(arg1));
8534 #ifdef TARGET_NR_ftime
8535 case TARGET_NR_ftime:
8538 case TARGET_NR_sync:
8542 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
8543 case TARGET_NR_syncfs:
8544 ret = get_errno(syncfs(arg1));
8547 case TARGET_NR_kill:
8548 ret = get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
8550 #ifdef TARGET_NR_rename
8551 case TARGET_NR_rename:
8554 p = lock_user_string(arg1);
8555 p2 = lock_user_string(arg2);
8557 ret = -TARGET_EFAULT;
8559 ret = get_errno(rename(p, p2));
8560 unlock_user(p2, arg2, 0);
8561 unlock_user(p, arg1, 0);
8565 #if defined(TARGET_NR_renameat)
8566 case TARGET_NR_renameat:
8569 p = lock_user_string(arg2);
8570 p2 = lock_user_string(arg4);
8572 ret = -TARGET_EFAULT;
8574 ret = get_errno(renameat(arg1, p, arg3, p2));
8575 unlock_user(p2, arg4, 0);
8576 unlock_user(p, arg2, 0);
8580 #if defined(TARGET_NR_renameat2)
8581 case TARGET_NR_renameat2:
8584 p = lock_user_string(arg2);
8585 p2 = lock_user_string(arg4);
8587 ret = -TARGET_EFAULT;
8589 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
8591 unlock_user(p2, arg4, 0);
8592 unlock_user(p, arg2, 0);
8596 #ifdef TARGET_NR_mkdir
8597 case TARGET_NR_mkdir:
8598 if (!(p = lock_user_string(arg1)))
8600 ret = get_errno(mkdir(p, arg2));
8601 unlock_user(p, arg1, 0);
8604 #if defined(TARGET_NR_mkdirat)
8605 case TARGET_NR_mkdirat:
8606 if (!(p = lock_user_string(arg2)))
8608 ret = get_errno(mkdirat(arg1, p, arg3));
8609 unlock_user(p, arg2, 0);
8612 #ifdef TARGET_NR_rmdir
8613 case TARGET_NR_rmdir:
8614 if (!(p = lock_user_string(arg1)))
8616 ret = get_errno(rmdir(p));
8617 unlock_user(p, arg1, 0);
8621 ret = get_errno(dup(arg1));
8623 fd_trans_dup(arg1, ret);
8626 #ifdef TARGET_NR_pipe
8627 case TARGET_NR_pipe:
8628 ret = do_pipe(cpu_env, arg1, 0, 0);
8631 #ifdef TARGET_NR_pipe2
8632 case TARGET_NR_pipe2:
8633 ret = do_pipe(cpu_env, arg1,
8634 target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
8637 case TARGET_NR_times:
8639 struct target_tms *tmsp;
8641 ret = get_errno(times(&tms));
8643 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
8646 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
8647 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
8648 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
8649 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
8652 ret = host_to_target_clock_t(ret);
8655 #ifdef TARGET_NR_prof
8656 case TARGET_NR_prof:
8659 #ifdef TARGET_NR_signal
8660 case TARGET_NR_signal:
8663 case TARGET_NR_acct:
8665 ret = get_errno(acct(NULL));
8667 if (!(p = lock_user_string(arg1)))
8669 ret = get_errno(acct(path(p)));
8670 unlock_user(p, arg1, 0);
8673 #ifdef TARGET_NR_umount2
8674 case TARGET_NR_umount2:
8675 if (!(p = lock_user_string(arg1)))
8677 ret = get_errno(umount2(p, arg2));
8678 unlock_user(p, arg1, 0);
8681 #ifdef TARGET_NR_lock
8682 case TARGET_NR_lock:
8685 case TARGET_NR_ioctl:
8686 ret = do_ioctl(arg1, arg2, arg3);
8688 #ifdef TARGET_NR_fcntl
8689 case TARGET_NR_fcntl:
8690 ret = do_fcntl(arg1, arg2, arg3);
8693 #ifdef TARGET_NR_mpx
8697 case TARGET_NR_setpgid:
8698 ret = get_errno(setpgid(arg1, arg2));
8700 #ifdef TARGET_NR_ulimit
8701 case TARGET_NR_ulimit:
8704 #ifdef TARGET_NR_oldolduname
8705 case TARGET_NR_oldolduname:
8708 case TARGET_NR_umask:
8709 ret = get_errno(umask(arg1));
8711 case TARGET_NR_chroot:
8712 if (!(p = lock_user_string(arg1)))
8714 ret = get_errno(chroot(p));
8715 unlock_user(p, arg1, 0);
8717 #ifdef TARGET_NR_ustat
8718 case TARGET_NR_ustat:
8721 #ifdef TARGET_NR_dup2
8722 case TARGET_NR_dup2:
8723 ret = get_errno(dup2(arg1, arg2));
8725 fd_trans_dup(arg1, arg2);
8729 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
8730 case TARGET_NR_dup3:
8734 if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
8737 host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
8738 ret = get_errno(dup3(arg1, arg2, host_flags));
8740 fd_trans_dup(arg1, arg2);
8745 #ifdef TARGET_NR_getppid /* not on alpha */
8746 case TARGET_NR_getppid:
8747 ret = get_errno(getppid());
8750 #ifdef TARGET_NR_getpgrp
8751 case TARGET_NR_getpgrp:
8752 ret = get_errno(getpgrp());
8755 case TARGET_NR_setsid:
8756 ret = get_errno(setsid());
8758 #ifdef TARGET_NR_sigaction
8759 case TARGET_NR_sigaction:
8761 #if defined(TARGET_ALPHA)
8762 struct target_sigaction act, oact, *pact = 0;
8763 struct target_old_sigaction *old_act;
8765 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8767 act._sa_handler = old_act->_sa_handler;
8768 target_siginitset(&act.sa_mask, old_act->sa_mask);
8769 act.sa_flags = old_act->sa_flags;
8770 act.sa_restorer = 0;
8771 unlock_user_struct(old_act, arg2, 0);
8774 ret = get_errno(do_sigaction(arg1, pact, &oact));
8775 if (!is_error(ret) && arg3) {
8776 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8778 old_act->_sa_handler = oact._sa_handler;
8779 old_act->sa_mask = oact.sa_mask.sig[0];
8780 old_act->sa_flags = oact.sa_flags;
8781 unlock_user_struct(old_act, arg3, 1);
8783 #elif defined(TARGET_MIPS)
8784 struct target_sigaction act, oact, *pact, *old_act;
8787 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8789 act._sa_handler = old_act->_sa_handler;
8790 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
8791 act.sa_flags = old_act->sa_flags;
8792 unlock_user_struct(old_act, arg2, 0);
8798 ret = get_errno(do_sigaction(arg1, pact, &oact));
8800 if (!is_error(ret) && arg3) {
8801 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8803 old_act->_sa_handler = oact._sa_handler;
8804 old_act->sa_flags = oact.sa_flags;
8805 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
8806 old_act->sa_mask.sig[1] = 0;
8807 old_act->sa_mask.sig[2] = 0;
8808 old_act->sa_mask.sig[3] = 0;
8809 unlock_user_struct(old_act, arg3, 1);
8812 struct target_old_sigaction *old_act;
8813 struct target_sigaction act, oact, *pact;
8815 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8817 act._sa_handler = old_act->_sa_handler;
8818 target_siginitset(&act.sa_mask, old_act->sa_mask);
8819 act.sa_flags = old_act->sa_flags;
8820 act.sa_restorer = old_act->sa_restorer;
8821 #ifdef TARGET_ARCH_HAS_KA_RESTORER
8822 act.ka_restorer = 0;
8824 unlock_user_struct(old_act, arg2, 0);
8829 ret = get_errno(do_sigaction(arg1, pact, &oact));
8830 if (!is_error(ret) && arg3) {
8831 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8833 old_act->_sa_handler = oact._sa_handler;
8834 old_act->sa_mask = oact.sa_mask.sig[0];
8835 old_act->sa_flags = oact.sa_flags;
8836 old_act->sa_restorer = oact.sa_restorer;
8837 unlock_user_struct(old_act, arg3, 1);
8843 case TARGET_NR_rt_sigaction:
8845 #if defined(TARGET_ALPHA)
8846 /* For Alpha and SPARC this is a 5 argument syscall, with
8847 * a 'restorer' parameter which must be copied into the
8848 * sa_restorer field of the sigaction struct.
8849 * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
8850 * and arg5 is the sigsetsize.
8851 * Alpha also has a separate rt_sigaction struct that it uses
8852 * here; SPARC uses the usual sigaction struct.
8854 struct target_rt_sigaction *rt_act;
8855 struct target_sigaction act, oact, *pact = 0;
8857 if (arg4 != sizeof(target_sigset_t)) {
8858 ret = -TARGET_EINVAL;
8862 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
8864 act._sa_handler = rt_act->_sa_handler;
8865 act.sa_mask = rt_act->sa_mask;
8866 act.sa_flags = rt_act->sa_flags;
8867 act.sa_restorer = arg5;
8868 unlock_user_struct(rt_act, arg2, 0);
8871 ret = get_errno(do_sigaction(arg1, pact, &oact));
8872 if (!is_error(ret) && arg3) {
8873 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
8875 rt_act->_sa_handler = oact._sa_handler;
8876 rt_act->sa_mask = oact.sa_mask;
8877 rt_act->sa_flags = oact.sa_flags;
8878 unlock_user_struct(rt_act, arg3, 1);
8882 target_ulong restorer = arg4;
8883 target_ulong sigsetsize = arg5;
8885 target_ulong sigsetsize = arg4;
8887 struct target_sigaction *act;
8888 struct target_sigaction *oact;
8890 if (sigsetsize != sizeof(target_sigset_t)) {
8891 ret = -TARGET_EINVAL;
8895 if (!lock_user_struct(VERIFY_READ, act, arg2, 1)) {
8898 #ifdef TARGET_ARCH_HAS_KA_RESTORER
8899 act->ka_restorer = restorer;
8905 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
8906 ret = -TARGET_EFAULT;
8907 goto rt_sigaction_fail;
8911 ret = get_errno(do_sigaction(arg1, act, oact));
8914 unlock_user_struct(act, arg2, 0);
8916 unlock_user_struct(oact, arg3, 1);
8920 #ifdef TARGET_NR_sgetmask /* not on alpha */
8921 case TARGET_NR_sgetmask:
8924 abi_ulong target_set;
8925 ret = do_sigprocmask(0, NULL, &cur_set);
8927 host_to_target_old_sigset(&target_set, &cur_set);
8933 #ifdef TARGET_NR_ssetmask /* not on alpha */
8934 case TARGET_NR_ssetmask:
8937 abi_ulong target_set = arg1;
8938 target_to_host_old_sigset(&set, &target_set);
8939 ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
8941 host_to_target_old_sigset(&target_set, &oset);
8947 #ifdef TARGET_NR_sigprocmask
8948 case TARGET_NR_sigprocmask:
8950 #if defined(TARGET_ALPHA)
8951 sigset_t set, oldset;
8956 case TARGET_SIG_BLOCK:
8959 case TARGET_SIG_UNBLOCK:
8962 case TARGET_SIG_SETMASK:
8966 ret = -TARGET_EINVAL;
8970 target_to_host_old_sigset(&set, &mask);
8972 ret = do_sigprocmask(how, &set, &oldset);
8973 if (!is_error(ret)) {
8974 host_to_target_old_sigset(&mask, &oldset);
8976 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
8979 sigset_t set, oldset, *set_ptr;
8984 case TARGET_SIG_BLOCK:
8987 case TARGET_SIG_UNBLOCK:
8990 case TARGET_SIG_SETMASK:
8994 ret = -TARGET_EINVAL;
8997 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
8999 target_to_host_old_sigset(&set, p);
9000 unlock_user(p, arg2, 0);
9006 ret = do_sigprocmask(how, set_ptr, &oldset);
9007 if (!is_error(ret) && arg3) {
9008 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9010 host_to_target_old_sigset(p, &oldset);
9011 unlock_user(p, arg3, sizeof(target_sigset_t));
9017 case TARGET_NR_rt_sigprocmask:
9020 sigset_t set, oldset, *set_ptr;
9022 if (arg4 != sizeof(target_sigset_t)) {
9023 ret = -TARGET_EINVAL;
9029 case TARGET_SIG_BLOCK:
9032 case TARGET_SIG_UNBLOCK:
9035 case TARGET_SIG_SETMASK:
9039 ret = -TARGET_EINVAL;
9042 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
9044 target_to_host_sigset(&set, p);
9045 unlock_user(p, arg2, 0);
9051 ret = do_sigprocmask(how, set_ptr, &oldset);
9052 if (!is_error(ret) && arg3) {
9053 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9055 host_to_target_sigset(p, &oldset);
9056 unlock_user(p, arg3, sizeof(target_sigset_t));
9060 #ifdef TARGET_NR_sigpending
9061 case TARGET_NR_sigpending:
9064 ret = get_errno(sigpending(&set));
9065 if (!is_error(ret)) {
9066 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9068 host_to_target_old_sigset(p, &set);
9069 unlock_user(p, arg1, sizeof(target_sigset_t));
9074 case TARGET_NR_rt_sigpending:
9078 /* Yes, this check is >, not != like most. We follow the kernel's
9079 * logic and it does it like this because it implements
9080 * NR_sigpending through the same code path, and in that case
9081 * the old_sigset_t is smaller in size.
9083 if (arg2 > sizeof(target_sigset_t)) {
9084 ret = -TARGET_EINVAL;
9088 ret = get_errno(sigpending(&set));
9089 if (!is_error(ret)) {
9090 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9092 host_to_target_sigset(p, &set);
9093 unlock_user(p, arg1, sizeof(target_sigset_t));
9097 #ifdef TARGET_NR_sigsuspend
9098 case TARGET_NR_sigsuspend:
9100 TaskState *ts = cpu->opaque;
9101 #if defined(TARGET_ALPHA)
9102 abi_ulong mask = arg1;
9103 target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
9105 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9107 target_to_host_old_sigset(&ts->sigsuspend_mask, p);
9108 unlock_user(p, arg1, 0);
9110 ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
9112 if (ret != -TARGET_ERESTARTSYS) {
9113 ts->in_sigsuspend = 1;
9118 case TARGET_NR_rt_sigsuspend:
9120 TaskState *ts = cpu->opaque;
9122 if (arg2 != sizeof(target_sigset_t)) {
9123 ret = -TARGET_EINVAL;
9126 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9128 target_to_host_sigset(&ts->sigsuspend_mask, p);
9129 unlock_user(p, arg1, 0);
9130 ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
9132 if (ret != -TARGET_ERESTARTSYS) {
9133 ts->in_sigsuspend = 1;
9137 case TARGET_NR_rt_sigtimedwait:
9140 struct timespec uts, *puts;
9143 if (arg4 != sizeof(target_sigset_t)) {
9144 ret = -TARGET_EINVAL;
9148 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9150 target_to_host_sigset(&set, p);
9151 unlock_user(p, arg1, 0);
9154 target_to_host_timespec(puts, arg3);
9158 ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9160 if (!is_error(ret)) {
9162 p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
9167 host_to_target_siginfo(p, &uinfo);
9168 unlock_user(p, arg2, sizeof(target_siginfo_t));
9170 ret = host_to_target_signal(ret);
9174 case TARGET_NR_rt_sigqueueinfo:
9178 p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
9182 target_to_host_siginfo(&uinfo, p);
9183 unlock_user(p, arg3, 0);
9184 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
9187 case TARGET_NR_rt_tgsigqueueinfo:
9191 p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
9195 target_to_host_siginfo(&uinfo, p);
9196 unlock_user(p, arg4, 0);
9197 ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, arg3, &uinfo));
9200 #ifdef TARGET_NR_sigreturn
9201 case TARGET_NR_sigreturn:
9202 if (block_signals()) {
9203 ret = -TARGET_ERESTARTSYS;
9205 ret = do_sigreturn(cpu_env);
9209 case TARGET_NR_rt_sigreturn:
9210 if (block_signals()) {
9211 ret = -TARGET_ERESTARTSYS;
9213 ret = do_rt_sigreturn(cpu_env);
9216 case TARGET_NR_sethostname:
9217 if (!(p = lock_user_string(arg1)))
9219 ret = get_errno(sethostname(p, arg2));
9220 unlock_user(p, arg1, 0);
9222 case TARGET_NR_setrlimit:
9224 int resource = target_to_host_resource(arg1);
9225 struct target_rlimit *target_rlim;
9227 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
9229 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
9230 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
9231 unlock_user_struct(target_rlim, arg2, 0);
9232 ret = get_errno(setrlimit(resource, &rlim));
9235 case TARGET_NR_getrlimit:
9237 int resource = target_to_host_resource(arg1);
9238 struct target_rlimit *target_rlim;
9241 ret = get_errno(getrlimit(resource, &rlim));
9242 if (!is_error(ret)) {
9243 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
9245 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
9246 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
9247 unlock_user_struct(target_rlim, arg2, 1);
9251 case TARGET_NR_getrusage:
9253 struct rusage rusage;
9254 ret = get_errno(getrusage(arg1, &rusage));
9255 if (!is_error(ret)) {
9256 ret = host_to_target_rusage(arg2, &rusage);
9260 case TARGET_NR_gettimeofday:
9263 ret = get_errno(gettimeofday(&tv, NULL));
9264 if (!is_error(ret)) {
9265 if (copy_to_user_timeval(arg1, &tv))
9270 case TARGET_NR_settimeofday:
9272 struct timeval tv, *ptv = NULL;
9273 struct timezone tz, *ptz = NULL;
9276 if (copy_from_user_timeval(&tv, arg1)) {
9283 if (copy_from_user_timezone(&tz, arg2)) {
9289 ret = get_errno(settimeofday(ptv, ptz));
9292 #if defined(TARGET_NR_select)
9293 case TARGET_NR_select:
9294 #if defined(TARGET_WANT_NI_OLD_SELECT)
9295 /* some architectures used to have old_select here
9296 * but now ENOSYS it.
9298 ret = -TARGET_ENOSYS;
9299 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
9300 ret = do_old_select(arg1);
9302 ret = do_select(arg1, arg2, arg3, arg4, arg5);
9306 #ifdef TARGET_NR_pselect6
9307 case TARGET_NR_pselect6:
9309 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
9310 fd_set rfds, wfds, efds;
9311 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
9312 struct timespec ts, *ts_ptr;
9315 * The 6th arg is actually two args smashed together,
9316 * so we cannot use the C library.
9324 abi_ulong arg_sigset, arg_sigsize, *arg7;
9325 target_sigset_t *target_sigset;
9333 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
9337 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
9341 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
9347 * This takes a timespec, and not a timeval, so we cannot
9348 * use the do_select() helper ...
9351 if (target_to_host_timespec(&ts, ts_addr)) {
9359 /* Extract the two packed args for the sigset */
9362 sig.size = SIGSET_T_SIZE;
9364 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
9368 arg_sigset = tswapal(arg7[0]);
9369 arg_sigsize = tswapal(arg7[1]);
9370 unlock_user(arg7, arg6, 0);
9374 if (arg_sigsize != sizeof(*target_sigset)) {
9375 /* Like the kernel, we enforce correct size sigsets */
9376 ret = -TARGET_EINVAL;
9379 target_sigset = lock_user(VERIFY_READ, arg_sigset,
9380 sizeof(*target_sigset), 1);
9381 if (!target_sigset) {
9384 target_to_host_sigset(&set, target_sigset);
9385 unlock_user(target_sigset, arg_sigset, 0);
9393 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
9396 if (!is_error(ret)) {
9397 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
9399 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
9401 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
9404 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
9410 #ifdef TARGET_NR_symlink
9411 case TARGET_NR_symlink:
9414 p = lock_user_string(arg1);
9415 p2 = lock_user_string(arg2);
9417 ret = -TARGET_EFAULT;
9419 ret = get_errno(symlink(p, p2));
9420 unlock_user(p2, arg2, 0);
9421 unlock_user(p, arg1, 0);
9425 #if defined(TARGET_NR_symlinkat)
9426 case TARGET_NR_symlinkat:
9429 p = lock_user_string(arg1);
9430 p2 = lock_user_string(arg3);
9432 ret = -TARGET_EFAULT;
9434 ret = get_errno(symlinkat(p, arg2, p2));
9435 unlock_user(p2, arg3, 0);
9436 unlock_user(p, arg1, 0);
9440 #ifdef TARGET_NR_oldlstat
9441 case TARGET_NR_oldlstat:
9444 #ifdef TARGET_NR_readlink
9445 case TARGET_NR_readlink:
9448 p = lock_user_string(arg1);
9449 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9451 ret = -TARGET_EFAULT;
9453 /* Short circuit this for the magic exe check. */
9454 ret = -TARGET_EINVAL;
9455 } else if (is_proc_myself((const char *)p, "exe")) {
9456 char real[PATH_MAX], *temp;
9457 temp = realpath(exec_path, real);
9458 /* Return value is # of bytes that we wrote to the buffer. */
9460 ret = get_errno(-1);
9462 /* Don't worry about sign mismatch as earlier mapping
9463 * logic would have thrown a bad address error. */
9464 ret = MIN(strlen(real), arg3);
9465 /* We cannot NUL terminate the string. */
9466 memcpy(p2, real, ret);
9469 ret = get_errno(readlink(path(p), p2, arg3));
9471 unlock_user(p2, arg2, ret);
9472 unlock_user(p, arg1, 0);
9476 #if defined(TARGET_NR_readlinkat)
9477 case TARGET_NR_readlinkat:
9480 p = lock_user_string(arg2);
9481 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
9483 ret = -TARGET_EFAULT;
9484 } else if (is_proc_myself((const char *)p, "exe")) {
9485 char real[PATH_MAX], *temp;
9486 temp = realpath(exec_path, real);
9487 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
9488 snprintf((char *)p2, arg4, "%s", real);
9490 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
9492 unlock_user(p2, arg3, ret);
9493 unlock_user(p, arg2, 0);
9497 #ifdef TARGET_NR_uselib
9498 case TARGET_NR_uselib:
9501 #ifdef TARGET_NR_swapon
9502 case TARGET_NR_swapon:
9503 if (!(p = lock_user_string(arg1)))
9505 ret = get_errno(swapon(p, arg2));
9506 unlock_user(p, arg1, 0);
9509 case TARGET_NR_reboot:
9510 if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
9511 /* arg4 must be ignored in all other cases */
9512 p = lock_user_string(arg4);
9516 ret = get_errno(reboot(arg1, arg2, arg3, p));
9517 unlock_user(p, arg4, 0);
9519 ret = get_errno(reboot(arg1, arg2, arg3, NULL));
9522 #ifdef TARGET_NR_readdir
9523 case TARGET_NR_readdir:
9526 #ifdef TARGET_NR_mmap
9527 case TARGET_NR_mmap:
9528 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
9529 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
9530 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
9531 || defined(TARGET_S390X)
9534 abi_ulong v1, v2, v3, v4, v5, v6;
9535 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
9543 unlock_user(v, arg1, 0);
9544 ret = get_errno(target_mmap(v1, v2, v3,
9545 target_to_host_bitmask(v4, mmap_flags_tbl),
9549 ret = get_errno(target_mmap(arg1, arg2, arg3,
9550 target_to_host_bitmask(arg4, mmap_flags_tbl),
9556 #ifdef TARGET_NR_mmap2
9557 case TARGET_NR_mmap2:
9559 #define MMAP_SHIFT 12
9561 ret = get_errno(target_mmap(arg1, arg2, arg3,
9562 target_to_host_bitmask(arg4, mmap_flags_tbl),
9564 arg6 << MMAP_SHIFT));
9567 case TARGET_NR_munmap:
9568 ret = get_errno(target_munmap(arg1, arg2));
9570 case TARGET_NR_mprotect:
9572 TaskState *ts = cpu->opaque;
9573 /* Special hack to detect libc making the stack executable. */
9574 if ((arg3 & PROT_GROWSDOWN)
9575 && arg1 >= ts->info->stack_limit
9576 && arg1 <= ts->info->start_stack) {
9577 arg3 &= ~PROT_GROWSDOWN;
9578 arg2 = arg2 + arg1 - ts->info->stack_limit;
9579 arg1 = ts->info->stack_limit;
9582 ret = get_errno(target_mprotect(arg1, arg2, arg3));
9584 #ifdef TARGET_NR_mremap
9585 case TARGET_NR_mremap:
9586 ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
9589 /* ??? msync/mlock/munlock are broken for softmmu. */
9590 #ifdef TARGET_NR_msync
9591 case TARGET_NR_msync:
9592 ret = get_errno(msync(g2h(arg1), arg2, arg3));
9595 #ifdef TARGET_NR_mlock
9596 case TARGET_NR_mlock:
9597 ret = get_errno(mlock(g2h(arg1), arg2));
9600 #ifdef TARGET_NR_munlock
9601 case TARGET_NR_munlock:
9602 ret = get_errno(munlock(g2h(arg1), arg2));
9605 #ifdef TARGET_NR_mlockall
9606 case TARGET_NR_mlockall:
9607 ret = get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
9610 #ifdef TARGET_NR_munlockall
9611 case TARGET_NR_munlockall:
9612 ret = get_errno(munlockall());
9615 case TARGET_NR_truncate:
9616 if (!(p = lock_user_string(arg1)))
9618 ret = get_errno(truncate(p, arg2));
9619 unlock_user(p, arg1, 0);
9621 case TARGET_NR_ftruncate:
9622 ret = get_errno(ftruncate(arg1, arg2));
9624 case TARGET_NR_fchmod:
9625 ret = get_errno(fchmod(arg1, arg2));
9627 #if defined(TARGET_NR_fchmodat)
9628 case TARGET_NR_fchmodat:
9629 if (!(p = lock_user_string(arg2)))
9631 ret = get_errno(fchmodat(arg1, p, arg3, 0));
9632 unlock_user(p, arg2, 0);
9635 case TARGET_NR_getpriority:
9636 /* Note that negative values are valid for getpriority, so we must
9637 differentiate based on errno settings. */
9639 ret = getpriority(arg1, arg2);
9640 if (ret == -1 && errno != 0) {
9641 ret = -host_to_target_errno(errno);
9645 /* Return value is the unbiased priority. Signal no error. */
9646 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
9648 /* Return value is a biased priority to avoid negative numbers. */
9652 case TARGET_NR_setpriority:
9653 ret = get_errno(setpriority(arg1, arg2, arg3));
9655 #ifdef TARGET_NR_profil
9656 case TARGET_NR_profil:
9659 case TARGET_NR_statfs:
9660 if (!(p = lock_user_string(arg1)))
9662 ret = get_errno(statfs(path(p), &stfs));
9663 unlock_user(p, arg1, 0);
9665 if (!is_error(ret)) {
9666 struct target_statfs *target_stfs;
9668 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
9670 __put_user(stfs.f_type, &target_stfs->f_type);
9671 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9672 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9673 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9674 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9675 __put_user(stfs.f_files, &target_stfs->f_files);
9676 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9677 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9678 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9679 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9680 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9681 #ifdef _STATFS_F_FLAGS
9682 __put_user(stfs.f_flags, &target_stfs->f_flags);
9684 __put_user(0, &target_stfs->f_flags);
9686 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9687 unlock_user_struct(target_stfs, arg2, 1);
9690 case TARGET_NR_fstatfs:
9691 ret = get_errno(fstatfs(arg1, &stfs));
9692 goto convert_statfs;
9693 #ifdef TARGET_NR_statfs64
9694 case TARGET_NR_statfs64:
9695 if (!(p = lock_user_string(arg1)))
9697 ret = get_errno(statfs(path(p), &stfs));
9698 unlock_user(p, arg1, 0);
9700 if (!is_error(ret)) {
9701 struct target_statfs64 *target_stfs;
9703 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
9705 __put_user(stfs.f_type, &target_stfs->f_type);
9706 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9707 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9708 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9709 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9710 __put_user(stfs.f_files, &target_stfs->f_files);
9711 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9712 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9713 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9714 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9715 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9716 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9717 unlock_user_struct(target_stfs, arg3, 1);
9720 case TARGET_NR_fstatfs64:
9721 ret = get_errno(fstatfs(arg1, &stfs));
9722 goto convert_statfs64;
9724 #ifdef TARGET_NR_ioperm
9725 case TARGET_NR_ioperm:
9728 #ifdef TARGET_NR_socketcall
9729 case TARGET_NR_socketcall:
9730 ret = do_socketcall(arg1, arg2);
9733 #ifdef TARGET_NR_accept
9734 case TARGET_NR_accept:
9735 ret = do_accept4(arg1, arg2, arg3, 0);
9738 #ifdef TARGET_NR_accept4
9739 case TARGET_NR_accept4:
9740 ret = do_accept4(arg1, arg2, arg3, arg4);
9743 #ifdef TARGET_NR_bind
9744 case TARGET_NR_bind:
9745 ret = do_bind(arg1, arg2, arg3);
9748 #ifdef TARGET_NR_connect
9749 case TARGET_NR_connect:
9750 ret = do_connect(arg1, arg2, arg3);
9753 #ifdef TARGET_NR_getpeername
9754 case TARGET_NR_getpeername:
9755 ret = do_getpeername(arg1, arg2, arg3);
9758 #ifdef TARGET_NR_getsockname
9759 case TARGET_NR_getsockname:
9760 ret = do_getsockname(arg1, arg2, arg3);
9763 #ifdef TARGET_NR_getsockopt
9764 case TARGET_NR_getsockopt:
9765 ret = do_getsockopt(arg1, arg2, arg3, arg4, arg5);
9768 #ifdef TARGET_NR_listen
9769 case TARGET_NR_listen:
9770 ret = get_errno(listen(arg1, arg2));
9773 #ifdef TARGET_NR_recv
9774 case TARGET_NR_recv:
9775 ret = do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
9778 #ifdef TARGET_NR_recvfrom
9779 case TARGET_NR_recvfrom:
9780 ret = do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
9783 #ifdef TARGET_NR_recvmsg
9784 case TARGET_NR_recvmsg:
9785 ret = do_sendrecvmsg(arg1, arg2, arg3, 0);
9788 #ifdef TARGET_NR_send
9789 case TARGET_NR_send:
9790 ret = do_sendto(arg1, arg2, arg3, arg4, 0, 0);
9793 #ifdef TARGET_NR_sendmsg
9794 case TARGET_NR_sendmsg:
9795 ret = do_sendrecvmsg(arg1, arg2, arg3, 1);
9798 #ifdef TARGET_NR_sendmmsg
9799 case TARGET_NR_sendmmsg:
9800 ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
9802 case TARGET_NR_recvmmsg:
9803 ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
9806 #ifdef TARGET_NR_sendto
9807 case TARGET_NR_sendto:
9808 ret = do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
9811 #ifdef TARGET_NR_shutdown
9812 case TARGET_NR_shutdown:
9813 ret = get_errno(shutdown(arg1, arg2));
9816 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
9817 case TARGET_NR_getrandom:
9818 p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
9822 ret = get_errno(getrandom(p, arg2, arg3));
9823 unlock_user(p, arg1, ret);
9826 #ifdef TARGET_NR_socket
9827 case TARGET_NR_socket:
9828 ret = do_socket(arg1, arg2, arg3);
9831 #ifdef TARGET_NR_socketpair
9832 case TARGET_NR_socketpair:
9833 ret = do_socketpair(arg1, arg2, arg3, arg4);
9836 #ifdef TARGET_NR_setsockopt
9837 case TARGET_NR_setsockopt:
9838 ret = do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
9841 #if defined(TARGET_NR_syslog)
9842 case TARGET_NR_syslog:
9847 case TARGET_SYSLOG_ACTION_CLOSE: /* Close log */
9848 case TARGET_SYSLOG_ACTION_OPEN: /* Open log */
9849 case TARGET_SYSLOG_ACTION_CLEAR: /* Clear ring buffer */
9850 case TARGET_SYSLOG_ACTION_CONSOLE_OFF: /* Disable logging */
9851 case TARGET_SYSLOG_ACTION_CONSOLE_ON: /* Enable logging */
9852 case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
9853 case TARGET_SYSLOG_ACTION_SIZE_UNREAD: /* Number of chars */
9854 case TARGET_SYSLOG_ACTION_SIZE_BUFFER: /* Size of the buffer */
9856 ret = get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
9859 case TARGET_SYSLOG_ACTION_READ: /* Read from log */
9860 case TARGET_SYSLOG_ACTION_READ_CLEAR: /* Read/clear msgs */
9861 case TARGET_SYSLOG_ACTION_READ_ALL: /* Read last messages */
9863 ret = -TARGET_EINVAL;
9871 p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9873 ret = -TARGET_EFAULT;
9876 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
9877 unlock_user(p, arg2, arg3);
9887 case TARGET_NR_setitimer:
9889 struct itimerval value, ovalue, *pvalue;
9893 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
9894 || copy_from_user_timeval(&pvalue->it_value,
9895 arg2 + sizeof(struct target_timeval)))
9900 ret = get_errno(setitimer(arg1, pvalue, &ovalue));
9901 if (!is_error(ret) && arg3) {
9902 if (copy_to_user_timeval(arg3,
9903 &ovalue.it_interval)
9904 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
9910 case TARGET_NR_getitimer:
9912 struct itimerval value;
9914 ret = get_errno(getitimer(arg1, &value));
9915 if (!is_error(ret) && arg2) {
9916 if (copy_to_user_timeval(arg2,
9918 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
9924 #ifdef TARGET_NR_stat
9925 case TARGET_NR_stat:
9926 if (!(p = lock_user_string(arg1)))
9928 ret = get_errno(stat(path(p), &st));
9929 unlock_user(p, arg1, 0);
9932 #ifdef TARGET_NR_lstat
9933 case TARGET_NR_lstat:
9934 if (!(p = lock_user_string(arg1)))
9936 ret = get_errno(lstat(path(p), &st));
9937 unlock_user(p, arg1, 0);
9940 case TARGET_NR_fstat:
9942 ret = get_errno(fstat(arg1, &st));
9943 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
9946 if (!is_error(ret)) {
9947 struct target_stat *target_st;
9949 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
9951 memset(target_st, 0, sizeof(*target_st));
9952 __put_user(st.st_dev, &target_st->st_dev);
9953 __put_user(st.st_ino, &target_st->st_ino);
9954 __put_user(st.st_mode, &target_st->st_mode);
9955 __put_user(st.st_uid, &target_st->st_uid);
9956 __put_user(st.st_gid, &target_st->st_gid);
9957 __put_user(st.st_nlink, &target_st->st_nlink);
9958 __put_user(st.st_rdev, &target_st->st_rdev);
9959 __put_user(st.st_size, &target_st->st_size);
9960 __put_user(st.st_blksize, &target_st->st_blksize);
9961 __put_user(st.st_blocks, &target_st->st_blocks);
9962 __put_user(st.st_atime, &target_st->target_st_atime);
9963 __put_user(st.st_mtime, &target_st->target_st_mtime);
9964 __put_user(st.st_ctime, &target_st->target_st_ctime);
9965 unlock_user_struct(target_st, arg2, 1);
9969 #ifdef TARGET_NR_olduname
9970 case TARGET_NR_olduname:
9973 #ifdef TARGET_NR_iopl
9974 case TARGET_NR_iopl:
9977 case TARGET_NR_vhangup:
9978 ret = get_errno(vhangup());
9980 #ifdef TARGET_NR_idle
9981 case TARGET_NR_idle:
9984 #ifdef TARGET_NR_syscall
9985 case TARGET_NR_syscall:
9986 ret = do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
9987 arg6, arg7, arg8, 0);
9990 case TARGET_NR_wait4:
9993 abi_long status_ptr = arg2;
9994 struct rusage rusage, *rusage_ptr;
9995 abi_ulong target_rusage = arg4;
9996 abi_long rusage_err;
9998 rusage_ptr = &rusage;
10001 ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
10002 if (!is_error(ret)) {
10003 if (status_ptr && ret) {
10004 status = host_to_target_waitstatus(status);
10005 if (put_user_s32(status, status_ptr))
10008 if (target_rusage) {
10009 rusage_err = host_to_target_rusage(target_rusage, &rusage);
10017 #ifdef TARGET_NR_swapoff
10018 case TARGET_NR_swapoff:
10019 if (!(p = lock_user_string(arg1)))
10021 ret = get_errno(swapoff(p));
10022 unlock_user(p, arg1, 0);
10025 case TARGET_NR_sysinfo:
10027 struct target_sysinfo *target_value;
10028 struct sysinfo value;
10029 ret = get_errno(sysinfo(&value));
10030 if (!is_error(ret) && arg1)
10032 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
10034 __put_user(value.uptime, &target_value->uptime);
10035 __put_user(value.loads[0], &target_value->loads[0]);
10036 __put_user(value.loads[1], &target_value->loads[1]);
10037 __put_user(value.loads[2], &target_value->loads[2]);
10038 __put_user(value.totalram, &target_value->totalram);
10039 __put_user(value.freeram, &target_value->freeram);
10040 __put_user(value.sharedram, &target_value->sharedram);
10041 __put_user(value.bufferram, &target_value->bufferram);
10042 __put_user(value.totalswap, &target_value->totalswap);
10043 __put_user(value.freeswap, &target_value->freeswap);
10044 __put_user(value.procs, &target_value->procs);
10045 __put_user(value.totalhigh, &target_value->totalhigh);
10046 __put_user(value.freehigh, &target_value->freehigh);
10047 __put_user(value.mem_unit, &target_value->mem_unit);
10048 unlock_user_struct(target_value, arg1, 1);
10052 #ifdef TARGET_NR_ipc
10053 case TARGET_NR_ipc:
10054 ret = do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
10057 #ifdef TARGET_NR_semget
10058 case TARGET_NR_semget:
10059 ret = get_errno(semget(arg1, arg2, arg3));
10062 #ifdef TARGET_NR_semop
10063 case TARGET_NR_semop:
10064 ret = do_semop(arg1, arg2, arg3);
10067 #ifdef TARGET_NR_semctl
10068 case TARGET_NR_semctl:
10069 ret = do_semctl(arg1, arg2, arg3, arg4);
10072 #ifdef TARGET_NR_msgctl
10073 case TARGET_NR_msgctl:
10074 ret = do_msgctl(arg1, arg2, arg3);
10077 #ifdef TARGET_NR_msgget
10078 case TARGET_NR_msgget:
10079 ret = get_errno(msgget(arg1, arg2));
10082 #ifdef TARGET_NR_msgrcv
10083 case TARGET_NR_msgrcv:
10084 ret = do_msgrcv(arg1, arg2, arg3, arg4, arg5);
10087 #ifdef TARGET_NR_msgsnd
10088 case TARGET_NR_msgsnd:
10089 ret = do_msgsnd(arg1, arg2, arg3, arg4);
10092 #ifdef TARGET_NR_shmget
10093 case TARGET_NR_shmget:
10094 ret = get_errno(shmget(arg1, arg2, arg3));
10097 #ifdef TARGET_NR_shmctl
10098 case TARGET_NR_shmctl:
10099 ret = do_shmctl(arg1, arg2, arg3);
10102 #ifdef TARGET_NR_shmat
10103 case TARGET_NR_shmat:
10104 ret = do_shmat(cpu_env, arg1, arg2, arg3);
10107 #ifdef TARGET_NR_shmdt
10108 case TARGET_NR_shmdt:
10109 ret = do_shmdt(arg1);
10112 case TARGET_NR_fsync:
10113 ret = get_errno(fsync(arg1));
10115 case TARGET_NR_clone:
10116 /* Linux manages to have three different orderings for its
10117 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
10118 * match the kernel's CONFIG_CLONE_* settings.
10119 * Microblaze is further special in that it uses a sixth
10120 * implicit argument to clone for the TLS pointer.
10122 #if defined(TARGET_MICROBLAZE)
10123 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
10124 #elif defined(TARGET_CLONE_BACKWARDS)
10125 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
10126 #elif defined(TARGET_CLONE_BACKWARDS2)
10127 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
10129 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
10132 #ifdef __NR_exit_group
10133 /* new thread calls */
10134 case TARGET_NR_exit_group:
10135 #ifdef TARGET_GPROF
10138 gdb_exit(cpu_env, arg1);
10139 ret = get_errno(exit_group(arg1));
10142 case TARGET_NR_setdomainname:
10143 if (!(p = lock_user_string(arg1)))
10145 ret = get_errno(setdomainname(p, arg2));
10146 unlock_user(p, arg1, 0);
10148 case TARGET_NR_uname:
10149 /* no need to transcode because we use the linux syscall */
10151 struct new_utsname * buf;
10153 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
10155 ret = get_errno(sys_uname(buf));
10156 if (!is_error(ret)) {
10157 /* Overwrite the native machine name with whatever is being
10159 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
10160 sizeof(buf->machine));
10161 /* Allow the user to override the reported release. */
10162 if (qemu_uname_release && *qemu_uname_release) {
10163 g_strlcpy(buf->release, qemu_uname_release,
10164 sizeof(buf->release));
10167 unlock_user_struct(buf, arg1, 1);
10171 case TARGET_NR_modify_ldt:
10172 ret = do_modify_ldt(cpu_env, arg1, arg2, arg3);
10174 #if !defined(TARGET_X86_64)
10175 case TARGET_NR_vm86old:
10176 goto unimplemented;
10177 case TARGET_NR_vm86:
10178 ret = do_vm86(cpu_env, arg1, arg2);
10182 case TARGET_NR_adjtimex:
10184 struct timex host_buf;
10186 if (target_to_host_timex(&host_buf, arg1) != 0) {
10189 ret = get_errno(adjtimex(&host_buf));
10190 if (!is_error(ret)) {
10191 if (host_to_target_timex(arg1, &host_buf) != 0) {
10197 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
10198 case TARGET_NR_clock_adjtime:
10200 struct timex htx, *phtx = &htx;
10202 if (target_to_host_timex(phtx, arg2) != 0) {
10205 ret = get_errno(clock_adjtime(arg1, phtx));
10206 if (!is_error(ret) && phtx) {
10207 if (host_to_target_timex(arg2, phtx) != 0) {
10214 #ifdef TARGET_NR_create_module
10215 case TARGET_NR_create_module:
10217 case TARGET_NR_init_module:
10218 case TARGET_NR_delete_module:
10219 #ifdef TARGET_NR_get_kernel_syms
10220 case TARGET_NR_get_kernel_syms:
10222 goto unimplemented;
10223 case TARGET_NR_quotactl:
10224 goto unimplemented;
10225 case TARGET_NR_getpgid:
10226 ret = get_errno(getpgid(arg1));
10228 case TARGET_NR_fchdir:
10229 ret = get_errno(fchdir(arg1));
10231 #ifdef TARGET_NR_bdflush /* not on x86_64 */
10232 case TARGET_NR_bdflush:
10233 goto unimplemented;
10235 #ifdef TARGET_NR_sysfs
10236 case TARGET_NR_sysfs:
10237 goto unimplemented;
10239 case TARGET_NR_personality:
10240 ret = get_errno(personality(arg1));
10242 #ifdef TARGET_NR_afs_syscall
10243 case TARGET_NR_afs_syscall:
10244 goto unimplemented;
10246 #ifdef TARGET_NR__llseek /* Not on alpha */
10247 case TARGET_NR__llseek:
10250 #if !defined(__NR_llseek)
10251 res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
10253 ret = get_errno(res);
10258 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
10260 if ((ret == 0) && put_user_s64(res, arg4)) {
10266 #ifdef TARGET_NR_getdents
10267 case TARGET_NR_getdents:
10268 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
10269 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
10271 struct target_dirent *target_dirp;
10272 struct linux_dirent *dirp;
10273 abi_long count = arg3;
10275 dirp = g_try_malloc(count);
10277 ret = -TARGET_ENOMEM;
10281 ret = get_errno(sys_getdents(arg1, dirp, count));
10282 if (!is_error(ret)) {
10283 struct linux_dirent *de;
10284 struct target_dirent *tde;
10286 int reclen, treclen;
10287 int count1, tnamelen;
10291 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10295 reclen = de->d_reclen;
10296 tnamelen = reclen - offsetof(struct linux_dirent, d_name);
10297 assert(tnamelen >= 0);
10298 treclen = tnamelen + offsetof(struct target_dirent, d_name);
10299 assert(count1 + treclen <= count);
10300 tde->d_reclen = tswap16(treclen);
10301 tde->d_ino = tswapal(de->d_ino);
10302 tde->d_off = tswapal(de->d_off);
10303 memcpy(tde->d_name, de->d_name, tnamelen);
10304 de = (struct linux_dirent *)((char *)de + reclen);
10306 tde = (struct target_dirent *)((char *)tde + treclen);
10310 unlock_user(target_dirp, arg2, ret);
10316 struct linux_dirent *dirp;
10317 abi_long count = arg3;
10319 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10321 ret = get_errno(sys_getdents(arg1, dirp, count));
10322 if (!is_error(ret)) {
10323 struct linux_dirent *de;
10328 reclen = de->d_reclen;
10331 de->d_reclen = tswap16(reclen);
10332 tswapls(&de->d_ino);
10333 tswapls(&de->d_off);
10334 de = (struct linux_dirent *)((char *)de + reclen);
10338 unlock_user(dirp, arg2, ret);
10342 /* Implement getdents in terms of getdents64 */
10344 struct linux_dirent64 *dirp;
10345 abi_long count = arg3;
10347 dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
10351 ret = get_errno(sys_getdents64(arg1, dirp, count));
10352 if (!is_error(ret)) {
10353 /* Convert the dirent64 structs to target dirent. We do this
10354 * in-place, since we can guarantee that a target_dirent is no
10355 * larger than a dirent64; however this means we have to be
10356 * careful to read everything before writing in the new format.
10358 struct linux_dirent64 *de;
10359 struct target_dirent *tde;
10364 tde = (struct target_dirent *)dirp;
10366 int namelen, treclen;
10367 int reclen = de->d_reclen;
10368 uint64_t ino = de->d_ino;
10369 int64_t off = de->d_off;
10370 uint8_t type = de->d_type;
10372 namelen = strlen(de->d_name);
10373 treclen = offsetof(struct target_dirent, d_name)
10375 treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
10377 memmove(tde->d_name, de->d_name, namelen + 1);
10378 tde->d_ino = tswapal(ino);
10379 tde->d_off = tswapal(off);
10380 tde->d_reclen = tswap16(treclen);
10381 /* The target_dirent type is in what was formerly a padding
10382 * byte at the end of the structure:
10384 *(((char *)tde) + treclen - 1) = type;
10386 de = (struct linux_dirent64 *)((char *)de + reclen);
10387 tde = (struct target_dirent *)((char *)tde + treclen);
10393 unlock_user(dirp, arg2, ret);
10397 #endif /* TARGET_NR_getdents */
10398 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
10399 case TARGET_NR_getdents64:
10401 struct linux_dirent64 *dirp;
10402 abi_long count = arg3;
10403 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10405 ret = get_errno(sys_getdents64(arg1, dirp, count));
10406 if (!is_error(ret)) {
10407 struct linux_dirent64 *de;
10412 reclen = de->d_reclen;
10415 de->d_reclen = tswap16(reclen);
10416 tswap64s((uint64_t *)&de->d_ino);
10417 tswap64s((uint64_t *)&de->d_off);
10418 de = (struct linux_dirent64 *)((char *)de + reclen);
10422 unlock_user(dirp, arg2, ret);
10425 #endif /* TARGET_NR_getdents64 */
10426 #if defined(TARGET_NR__newselect)
10427 case TARGET_NR__newselect:
10428 ret = do_select(arg1, arg2, arg3, arg4, arg5);
10431 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
10432 # ifdef TARGET_NR_poll
10433 case TARGET_NR_poll:
10435 # ifdef TARGET_NR_ppoll
10436 case TARGET_NR_ppoll:
10439 struct target_pollfd *target_pfd;
10440 unsigned int nfds = arg2;
10441 struct pollfd *pfd;
10447 if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
10448 ret = -TARGET_EINVAL;
10452 target_pfd = lock_user(VERIFY_WRITE, arg1,
10453 sizeof(struct target_pollfd) * nfds, 1);
10458 pfd = alloca(sizeof(struct pollfd) * nfds);
10459 for (i = 0; i < nfds; i++) {
10460 pfd[i].fd = tswap32(target_pfd[i].fd);
10461 pfd[i].events = tswap16(target_pfd[i].events);
10466 # ifdef TARGET_NR_ppoll
10467 case TARGET_NR_ppoll:
10469 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
10470 target_sigset_t *target_set;
10471 sigset_t _set, *set = &_set;
10474 if (target_to_host_timespec(timeout_ts, arg3)) {
10475 unlock_user(target_pfd, arg1, 0);
10483 if (arg5 != sizeof(target_sigset_t)) {
10484 unlock_user(target_pfd, arg1, 0);
10485 ret = -TARGET_EINVAL;
10489 target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
10491 unlock_user(target_pfd, arg1, 0);
10494 target_to_host_sigset(set, target_set);
10499 ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
10500 set, SIGSET_T_SIZE));
10502 if (!is_error(ret) && arg3) {
10503 host_to_target_timespec(arg3, timeout_ts);
10506 unlock_user(target_set, arg4, 0);
10511 # ifdef TARGET_NR_poll
10512 case TARGET_NR_poll:
10514 struct timespec ts, *pts;
10517 /* Convert ms to secs, ns */
10518 ts.tv_sec = arg3 / 1000;
10519 ts.tv_nsec = (arg3 % 1000) * 1000000LL;
10522 /* -ve poll() timeout means "infinite" */
10525 ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
10530 g_assert_not_reached();
10533 if (!is_error(ret)) {
10534 for(i = 0; i < nfds; i++) {
10535 target_pfd[i].revents = tswap16(pfd[i].revents);
10538 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
10542 case TARGET_NR_flock:
10543 /* NOTE: the flock constant seems to be the same for every
10545 ret = get_errno(safe_flock(arg1, arg2));
10547 case TARGET_NR_readv:
10549 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10551 ret = get_errno(safe_readv(arg1, vec, arg3));
10552 unlock_iovec(vec, arg2, arg3, 1);
10554 ret = -host_to_target_errno(errno);
10558 case TARGET_NR_writev:
10560 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10562 ret = get_errno(safe_writev(arg1, vec, arg3));
10563 unlock_iovec(vec, arg2, arg3, 0);
10565 ret = -host_to_target_errno(errno);
10569 #if defined(TARGET_NR_preadv)
10570 case TARGET_NR_preadv:
10572 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10574 unsigned long low, high;
10576 target_to_host_low_high(arg4, arg5, &low, &high);
10577 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
10578 unlock_iovec(vec, arg2, arg3, 1);
10580 ret = -host_to_target_errno(errno);
10585 #if defined(TARGET_NR_pwritev)
10586 case TARGET_NR_pwritev:
10588 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10590 unsigned long low, high;
10592 target_to_host_low_high(arg4, arg5, &low, &high);
10593 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
10594 unlock_iovec(vec, arg2, arg3, 0);
10596 ret = -host_to_target_errno(errno);
10601 case TARGET_NR_getsid:
10602 ret = get_errno(getsid(arg1));
10604 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10605 case TARGET_NR_fdatasync:
10606 ret = get_errno(fdatasync(arg1));
10609 #ifdef TARGET_NR__sysctl
10610 case TARGET_NR__sysctl:
10611 /* We don't implement this, but ENOTDIR is always a safe
10613 ret = -TARGET_ENOTDIR;
10616 case TARGET_NR_sched_getaffinity:
10618 unsigned int mask_size;
10619 unsigned long *mask;
10622 * sched_getaffinity needs multiples of ulong, so need to take
10623 * care of mismatches between target ulong and host ulong sizes.
10625 if (arg2 & (sizeof(abi_ulong) - 1)) {
10626 ret = -TARGET_EINVAL;
10629 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10631 mask = alloca(mask_size);
10632 memset(mask, 0, mask_size);
10633 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
10635 if (!is_error(ret)) {
10637 /* More data returned than the caller's buffer will fit.
10638 * This only happens if sizeof(abi_long) < sizeof(long)
10639 * and the caller passed us a buffer holding an odd number
10640 * of abi_longs. If the host kernel is actually using the
10641 * extra 4 bytes then fail EINVAL; otherwise we can just
10642 * ignore them and only copy the interesting part.
10644 int numcpus = sysconf(_SC_NPROCESSORS_CONF);
10645 if (numcpus > arg2 * 8) {
10646 ret = -TARGET_EINVAL;
10652 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
10658 case TARGET_NR_sched_setaffinity:
10660 unsigned int mask_size;
10661 unsigned long *mask;
10664 * sched_setaffinity needs multiples of ulong, so need to take
10665 * care of mismatches between target ulong and host ulong sizes.
10667 if (arg2 & (sizeof(abi_ulong) - 1)) {
10668 ret = -TARGET_EINVAL;
10671 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10672 mask = alloca(mask_size);
10674 ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
10679 ret = get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
10682 case TARGET_NR_getcpu:
10684 unsigned cpu, node;
10685 ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
10686 arg2 ? &node : NULL,
10688 if (is_error(ret)) {
10691 if (arg1 && put_user_u32(cpu, arg1)) {
10694 if (arg2 && put_user_u32(node, arg2)) {
10699 case TARGET_NR_sched_setparam:
10701 struct sched_param *target_schp;
10702 struct sched_param schp;
10705 return -TARGET_EINVAL;
10707 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
10709 schp.sched_priority = tswap32(target_schp->sched_priority);
10710 unlock_user_struct(target_schp, arg2, 0);
10711 ret = get_errno(sched_setparam(arg1, &schp));
10714 case TARGET_NR_sched_getparam:
10716 struct sched_param *target_schp;
10717 struct sched_param schp;
10720 return -TARGET_EINVAL;
10722 ret = get_errno(sched_getparam(arg1, &schp));
10723 if (!is_error(ret)) {
10724 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
10726 target_schp->sched_priority = tswap32(schp.sched_priority);
10727 unlock_user_struct(target_schp, arg2, 1);
10731 case TARGET_NR_sched_setscheduler:
10733 struct sched_param *target_schp;
10734 struct sched_param schp;
10736 return -TARGET_EINVAL;
10738 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
10740 schp.sched_priority = tswap32(target_schp->sched_priority);
10741 unlock_user_struct(target_schp, arg3, 0);
10742 ret = get_errno(sched_setscheduler(arg1, arg2, &schp));
10745 case TARGET_NR_sched_getscheduler:
10746 ret = get_errno(sched_getscheduler(arg1));
10748 case TARGET_NR_sched_yield:
10749 ret = get_errno(sched_yield());
10751 case TARGET_NR_sched_get_priority_max:
10752 ret = get_errno(sched_get_priority_max(arg1));
10754 case TARGET_NR_sched_get_priority_min:
10755 ret = get_errno(sched_get_priority_min(arg1));
10757 case TARGET_NR_sched_rr_get_interval:
10759 struct timespec ts;
10760 ret = get_errno(sched_rr_get_interval(arg1, &ts));
10761 if (!is_error(ret)) {
10762 ret = host_to_target_timespec(arg2, &ts);
10766 case TARGET_NR_nanosleep:
10768 struct timespec req, rem;
10769 target_to_host_timespec(&req, arg1);
10770 ret = get_errno(safe_nanosleep(&req, &rem));
10771 if (is_error(ret) && arg2) {
10772 host_to_target_timespec(arg2, &rem);
10776 #ifdef TARGET_NR_query_module
10777 case TARGET_NR_query_module:
10778 goto unimplemented;
10780 #ifdef TARGET_NR_nfsservctl
10781 case TARGET_NR_nfsservctl:
10782 goto unimplemented;
10784 case TARGET_NR_prctl:
10786 case PR_GET_PDEATHSIG:
10789 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
10790 if (!is_error(ret) && arg2
10791 && put_user_ual(deathsig, arg2)) {
10799 void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
10803 ret = get_errno(prctl(arg1, (unsigned long)name,
10804 arg3, arg4, arg5));
10805 unlock_user(name, arg2, 16);
10810 void *name = lock_user(VERIFY_READ, arg2, 16, 1);
10814 ret = get_errno(prctl(arg1, (unsigned long)name,
10815 arg3, arg4, arg5));
10816 unlock_user(name, arg2, 0);
10820 #ifdef TARGET_AARCH64
10821 case TARGET_PR_SVE_SET_VL:
10822 /* We cannot support either PR_SVE_SET_VL_ONEXEC
10823 or PR_SVE_VL_INHERIT. Therefore, anything above
10824 ARM_MAX_VQ results in EINVAL. */
10825 ret = -TARGET_EINVAL;
10826 if (arm_feature(cpu_env, ARM_FEATURE_SVE)
10827 && arg2 >= 0 && arg2 <= ARM_MAX_VQ * 16 && !(arg2 & 15)) {
10828 CPUARMState *env = cpu_env;
10829 int old_vq = (env->vfp.zcr_el[1] & 0xf) + 1;
10830 int vq = MAX(arg2 / 16, 1);
10833 aarch64_sve_narrow_vq(env, vq);
10835 env->vfp.zcr_el[1] = vq - 1;
10839 case TARGET_PR_SVE_GET_VL:
10840 ret = -TARGET_EINVAL;
10841 if (arm_feature(cpu_env, ARM_FEATURE_SVE)) {
10842 CPUARMState *env = cpu_env;
10843 ret = ((env->vfp.zcr_el[1] & 0xf) + 1) * 16;
10846 #endif /* AARCH64 */
10847 case PR_GET_SECCOMP:
10848 case PR_SET_SECCOMP:
10849 /* Disable seccomp to prevent the target disabling syscalls we
10851 ret = -TARGET_EINVAL;
10854 /* Most prctl options have no pointer arguments */
10855 ret = get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
10859 #ifdef TARGET_NR_arch_prctl
10860 case TARGET_NR_arch_prctl:
10861 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
10862 ret = do_arch_prctl(cpu_env, arg1, arg2);
10865 goto unimplemented;
10868 #ifdef TARGET_NR_pread64
10869 case TARGET_NR_pread64:
10870 if (regpairs_aligned(cpu_env, num)) {
10874 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
10876 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
10877 unlock_user(p, arg2, ret);
10879 case TARGET_NR_pwrite64:
10880 if (regpairs_aligned(cpu_env, num)) {
10884 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
10886 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
10887 unlock_user(p, arg2, 0);
10890 case TARGET_NR_getcwd:
10891 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
10893 ret = get_errno(sys_getcwd1(p, arg2));
10894 unlock_user(p, arg1, ret);
10896 case TARGET_NR_capget:
10897 case TARGET_NR_capset:
10899 struct target_user_cap_header *target_header;
10900 struct target_user_cap_data *target_data = NULL;
10901 struct __user_cap_header_struct header;
10902 struct __user_cap_data_struct data[2];
10903 struct __user_cap_data_struct *dataptr = NULL;
10904 int i, target_datalen;
10905 int data_items = 1;
10907 if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
10910 header.version = tswap32(target_header->version);
10911 header.pid = tswap32(target_header->pid);
10913 if (header.version != _LINUX_CAPABILITY_VERSION) {
10914 /* Version 2 and up takes pointer to two user_data structs */
10918 target_datalen = sizeof(*target_data) * data_items;
10921 if (num == TARGET_NR_capget) {
10922 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
10924 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
10926 if (!target_data) {
10927 unlock_user_struct(target_header, arg1, 0);
10931 if (num == TARGET_NR_capset) {
10932 for (i = 0; i < data_items; i++) {
10933 data[i].effective = tswap32(target_data[i].effective);
10934 data[i].permitted = tswap32(target_data[i].permitted);
10935 data[i].inheritable = tswap32(target_data[i].inheritable);
10942 if (num == TARGET_NR_capget) {
10943 ret = get_errno(capget(&header, dataptr));
10945 ret = get_errno(capset(&header, dataptr));
10948 /* The kernel always updates version for both capget and capset */
10949 target_header->version = tswap32(header.version);
10950 unlock_user_struct(target_header, arg1, 1);
10953 if (num == TARGET_NR_capget) {
10954 for (i = 0; i < data_items; i++) {
10955 target_data[i].effective = tswap32(data[i].effective);
10956 target_data[i].permitted = tswap32(data[i].permitted);
10957 target_data[i].inheritable = tswap32(data[i].inheritable);
10959 unlock_user(target_data, arg2, target_datalen);
10961 unlock_user(target_data, arg2, 0);
10966 case TARGET_NR_sigaltstack:
10967 ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUArchState *)cpu_env));
10970 #ifdef CONFIG_SENDFILE
10971 case TARGET_NR_sendfile:
10973 off_t *offp = NULL;
10976 ret = get_user_sal(off, arg3);
10977 if (is_error(ret)) {
10982 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
10983 if (!is_error(ret) && arg3) {
10984 abi_long ret2 = put_user_sal(off, arg3);
10985 if (is_error(ret2)) {
10991 #ifdef TARGET_NR_sendfile64
10992 case TARGET_NR_sendfile64:
10994 off_t *offp = NULL;
10997 ret = get_user_s64(off, arg3);
10998 if (is_error(ret)) {
11003 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11004 if (!is_error(ret) && arg3) {
11005 abi_long ret2 = put_user_s64(off, arg3);
11006 if (is_error(ret2)) {
11014 case TARGET_NR_sendfile:
11015 #ifdef TARGET_NR_sendfile64
11016 case TARGET_NR_sendfile64:
11018 goto unimplemented;
11021 #ifdef TARGET_NR_getpmsg
11022 case TARGET_NR_getpmsg:
11023 goto unimplemented;
11025 #ifdef TARGET_NR_putpmsg
11026 case TARGET_NR_putpmsg:
11027 goto unimplemented;
11029 #ifdef TARGET_NR_vfork
11030 case TARGET_NR_vfork:
11031 ret = get_errno(do_fork(cpu_env,
11032 CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
11036 #ifdef TARGET_NR_ugetrlimit
11037 case TARGET_NR_ugetrlimit:
11039 struct rlimit rlim;
11040 int resource = target_to_host_resource(arg1);
11041 ret = get_errno(getrlimit(resource, &rlim));
11042 if (!is_error(ret)) {
11043 struct target_rlimit *target_rlim;
11044 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
11046 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
11047 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
11048 unlock_user_struct(target_rlim, arg2, 1);
11053 #ifdef TARGET_NR_truncate64
11054 case TARGET_NR_truncate64:
11055 if (!(p = lock_user_string(arg1)))
11057 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
11058 unlock_user(p, arg1, 0);
11061 #ifdef TARGET_NR_ftruncate64
11062 case TARGET_NR_ftruncate64:
11063 ret = target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
11066 #ifdef TARGET_NR_stat64
11067 case TARGET_NR_stat64:
11068 if (!(p = lock_user_string(arg1)))
11070 ret = get_errno(stat(path(p), &st));
11071 unlock_user(p, arg1, 0);
11072 if (!is_error(ret))
11073 ret = host_to_target_stat64(cpu_env, arg2, &st);
11076 #ifdef TARGET_NR_lstat64
11077 case TARGET_NR_lstat64:
11078 if (!(p = lock_user_string(arg1)))
11080 ret = get_errno(lstat(path(p), &st));
11081 unlock_user(p, arg1, 0);
11082 if (!is_error(ret))
11083 ret = host_to_target_stat64(cpu_env, arg2, &st);
11086 #ifdef TARGET_NR_fstat64
11087 case TARGET_NR_fstat64:
11088 ret = get_errno(fstat(arg1, &st));
11089 if (!is_error(ret))
11090 ret = host_to_target_stat64(cpu_env, arg2, &st);
11093 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
11094 #ifdef TARGET_NR_fstatat64
11095 case TARGET_NR_fstatat64:
11097 #ifdef TARGET_NR_newfstatat
11098 case TARGET_NR_newfstatat:
11100 if (!(p = lock_user_string(arg2)))
11102 ret = get_errno(fstatat(arg1, path(p), &st, arg4));
11103 if (!is_error(ret))
11104 ret = host_to_target_stat64(cpu_env, arg3, &st);
11107 #ifdef TARGET_NR_lchown
11108 case TARGET_NR_lchown:
11109 if (!(p = lock_user_string(arg1)))
11111 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
11112 unlock_user(p, arg1, 0);
11115 #ifdef TARGET_NR_getuid
11116 case TARGET_NR_getuid:
11117 ret = get_errno(high2lowuid(getuid()));
11120 #ifdef TARGET_NR_getgid
11121 case TARGET_NR_getgid:
11122 ret = get_errno(high2lowgid(getgid()));
11125 #ifdef TARGET_NR_geteuid
11126 case TARGET_NR_geteuid:
11127 ret = get_errno(high2lowuid(geteuid()));
11130 #ifdef TARGET_NR_getegid
11131 case TARGET_NR_getegid:
11132 ret = get_errno(high2lowgid(getegid()));
11135 case TARGET_NR_setreuid:
11136 ret = get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
11138 case TARGET_NR_setregid:
11139 ret = get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
11141 case TARGET_NR_getgroups:
11143 int gidsetsize = arg1;
11144 target_id *target_grouplist;
11148 grouplist = alloca(gidsetsize * sizeof(gid_t));
11149 ret = get_errno(getgroups(gidsetsize, grouplist));
11150 if (gidsetsize == 0)
11152 if (!is_error(ret)) {
11153 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
11154 if (!target_grouplist)
11156 for(i = 0;i < ret; i++)
11157 target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
11158 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
11162 case TARGET_NR_setgroups:
11164 int gidsetsize = arg1;
11165 target_id *target_grouplist;
11166 gid_t *grouplist = NULL;
11169 grouplist = alloca(gidsetsize * sizeof(gid_t));
11170 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
11171 if (!target_grouplist) {
11172 ret = -TARGET_EFAULT;
11175 for (i = 0; i < gidsetsize; i++) {
11176 grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
11178 unlock_user(target_grouplist, arg2, 0);
11180 ret = get_errno(setgroups(gidsetsize, grouplist));
11183 case TARGET_NR_fchown:
11184 ret = get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
11186 #if defined(TARGET_NR_fchownat)
11187 case TARGET_NR_fchownat:
11188 if (!(p = lock_user_string(arg2)))
11190 ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
11191 low2highgid(arg4), arg5));
11192 unlock_user(p, arg2, 0);
11195 #ifdef TARGET_NR_setresuid
11196 case TARGET_NR_setresuid:
11197 ret = get_errno(sys_setresuid(low2highuid(arg1),
11199 low2highuid(arg3)));
11202 #ifdef TARGET_NR_getresuid
11203 case TARGET_NR_getresuid:
11205 uid_t ruid, euid, suid;
11206 ret = get_errno(getresuid(&ruid, &euid, &suid));
11207 if (!is_error(ret)) {
11208 if (put_user_id(high2lowuid(ruid), arg1)
11209 || put_user_id(high2lowuid(euid), arg2)
11210 || put_user_id(high2lowuid(suid), arg3))
11216 #ifdef TARGET_NR_getresgid
11217 case TARGET_NR_setresgid:
11218 ret = get_errno(sys_setresgid(low2highgid(arg1),
11220 low2highgid(arg3)));
11223 #ifdef TARGET_NR_getresgid
11224 case TARGET_NR_getresgid:
11226 gid_t rgid, egid, sgid;
11227 ret = get_errno(getresgid(&rgid, &egid, &sgid));
11228 if (!is_error(ret)) {
11229 if (put_user_id(high2lowgid(rgid), arg1)
11230 || put_user_id(high2lowgid(egid), arg2)
11231 || put_user_id(high2lowgid(sgid), arg3))
11237 #ifdef TARGET_NR_chown
11238 case TARGET_NR_chown:
11239 if (!(p = lock_user_string(arg1)))
11241 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
11242 unlock_user(p, arg1, 0);
11245 case TARGET_NR_setuid:
11246 ret = get_errno(sys_setuid(low2highuid(arg1)));
11248 case TARGET_NR_setgid:
11249 ret = get_errno(sys_setgid(low2highgid(arg1)));
11251 case TARGET_NR_setfsuid:
11252 ret = get_errno(setfsuid(arg1));
11254 case TARGET_NR_setfsgid:
11255 ret = get_errno(setfsgid(arg1));
11258 #ifdef TARGET_NR_lchown32
11259 case TARGET_NR_lchown32:
11260 if (!(p = lock_user_string(arg1)))
11262 ret = get_errno(lchown(p, arg2, arg3));
11263 unlock_user(p, arg1, 0);
11266 #ifdef TARGET_NR_getuid32
11267 case TARGET_NR_getuid32:
11268 ret = get_errno(getuid());
11272 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
11273 /* Alpha specific */
11274 case TARGET_NR_getxuid:
11278 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
11280 ret = get_errno(getuid());
11283 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
11284 /* Alpha specific */
11285 case TARGET_NR_getxgid:
11289 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
11291 ret = get_errno(getgid());
11294 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
11295 /* Alpha specific */
11296 case TARGET_NR_osf_getsysinfo:
11297 ret = -TARGET_EOPNOTSUPP;
11299 case TARGET_GSI_IEEE_FP_CONTROL:
11301 uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env);
11303 /* Copied from linux ieee_fpcr_to_swcr. */
11304 swcr = (fpcr >> 35) & SWCR_STATUS_MASK;
11305 swcr |= (fpcr >> 36) & SWCR_MAP_DMZ;
11306 swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV
11307 | SWCR_TRAP_ENABLE_DZE
11308 | SWCR_TRAP_ENABLE_OVF);
11309 swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF
11310 | SWCR_TRAP_ENABLE_INE);
11311 swcr |= (fpcr >> 47) & SWCR_MAP_UMZ;
11312 swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO;
11314 if (put_user_u64 (swcr, arg2))
11320 /* case GSI_IEEE_STATE_AT_SIGNAL:
11321 -- Not implemented in linux kernel.
11323 -- Retrieves current unaligned access state; not much used.
11324 case GSI_PROC_TYPE:
11325 -- Retrieves implver information; surely not used.
11326 case GSI_GET_HWRPB:
11327 -- Grabs a copy of the HWRPB; surely not used.
11332 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
11333 /* Alpha specific */
11334 case TARGET_NR_osf_setsysinfo:
11335 ret = -TARGET_EOPNOTSUPP;
11337 case TARGET_SSI_IEEE_FP_CONTROL:
11339 uint64_t swcr, fpcr, orig_fpcr;
11341 if (get_user_u64 (swcr, arg2)) {
11344 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
11345 fpcr = orig_fpcr & FPCR_DYN_MASK;
11347 /* Copied from linux ieee_swcr_to_fpcr. */
11348 fpcr |= (swcr & SWCR_STATUS_MASK) << 35;
11349 fpcr |= (swcr & SWCR_MAP_DMZ) << 36;
11350 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV
11351 | SWCR_TRAP_ENABLE_DZE
11352 | SWCR_TRAP_ENABLE_OVF)) << 48;
11353 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF
11354 | SWCR_TRAP_ENABLE_INE)) << 57;
11355 fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0);
11356 fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41;
11358 cpu_alpha_store_fpcr(cpu_env, fpcr);
11363 case TARGET_SSI_IEEE_RAISE_EXCEPTION:
11365 uint64_t exc, fpcr, orig_fpcr;
11368 if (get_user_u64(exc, arg2)) {
11372 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
11374 /* We only add to the exception status here. */
11375 fpcr = orig_fpcr | ((exc & SWCR_STATUS_MASK) << 35);
11377 cpu_alpha_store_fpcr(cpu_env, fpcr);
11380 /* Old exceptions are not signaled. */
11381 fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK);
11383 /* If any exceptions set by this call,
11384 and are unmasked, send a signal. */
11386 if ((fpcr & (FPCR_INE | FPCR_INED)) == FPCR_INE) {
11387 si_code = TARGET_FPE_FLTRES;
11389 if ((fpcr & (FPCR_UNF | FPCR_UNFD)) == FPCR_UNF) {
11390 si_code = TARGET_FPE_FLTUND;
11392 if ((fpcr & (FPCR_OVF | FPCR_OVFD)) == FPCR_OVF) {
11393 si_code = TARGET_FPE_FLTOVF;
11395 if ((fpcr & (FPCR_DZE | FPCR_DZED)) == FPCR_DZE) {
11396 si_code = TARGET_FPE_FLTDIV;
11398 if ((fpcr & (FPCR_INV | FPCR_INVD)) == FPCR_INV) {
11399 si_code = TARGET_FPE_FLTINV;
11401 if (si_code != 0) {
11402 target_siginfo_t info;
11403 info.si_signo = SIGFPE;
11405 info.si_code = si_code;
11406 info._sifields._sigfault._addr
11407 = ((CPUArchState *)cpu_env)->pc;
11408 queue_signal((CPUArchState *)cpu_env, info.si_signo,
11409 QEMU_SI_FAULT, &info);
11414 /* case SSI_NVPAIRS:
11415 -- Used with SSIN_UACPROC to enable unaligned accesses.
11416 case SSI_IEEE_STATE_AT_SIGNAL:
11417 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
11418 -- Not implemented in linux kernel
11423 #ifdef TARGET_NR_osf_sigprocmask
11424 /* Alpha specific. */
11425 case TARGET_NR_osf_sigprocmask:
11429 sigset_t set, oldset;
11432 case TARGET_SIG_BLOCK:
11435 case TARGET_SIG_UNBLOCK:
11438 case TARGET_SIG_SETMASK:
11442 ret = -TARGET_EINVAL;
11446 target_to_host_old_sigset(&set, &mask);
11447 ret = do_sigprocmask(how, &set, &oldset);
11449 host_to_target_old_sigset(&mask, &oldset);
11456 #ifdef TARGET_NR_getgid32
11457 case TARGET_NR_getgid32:
11458 ret = get_errno(getgid());
11461 #ifdef TARGET_NR_geteuid32
11462 case TARGET_NR_geteuid32:
11463 ret = get_errno(geteuid());
11466 #ifdef TARGET_NR_getegid32
11467 case TARGET_NR_getegid32:
11468 ret = get_errno(getegid());
11471 #ifdef TARGET_NR_setreuid32
11472 case TARGET_NR_setreuid32:
11473 ret = get_errno(setreuid(arg1, arg2));
11476 #ifdef TARGET_NR_setregid32
11477 case TARGET_NR_setregid32:
11478 ret = get_errno(setregid(arg1, arg2));
11481 #ifdef TARGET_NR_getgroups32
11482 case TARGET_NR_getgroups32:
11484 int gidsetsize = arg1;
11485 uint32_t *target_grouplist;
11489 grouplist = alloca(gidsetsize * sizeof(gid_t));
11490 ret = get_errno(getgroups(gidsetsize, grouplist));
11491 if (gidsetsize == 0)
11493 if (!is_error(ret)) {
11494 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
11495 if (!target_grouplist) {
11496 ret = -TARGET_EFAULT;
11499 for(i = 0;i < ret; i++)
11500 target_grouplist[i] = tswap32(grouplist[i]);
11501 unlock_user(target_grouplist, arg2, gidsetsize * 4);
11506 #ifdef TARGET_NR_setgroups32
11507 case TARGET_NR_setgroups32:
11509 int gidsetsize = arg1;
11510 uint32_t *target_grouplist;
11514 grouplist = alloca(gidsetsize * sizeof(gid_t));
11515 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
11516 if (!target_grouplist) {
11517 ret = -TARGET_EFAULT;
11520 for(i = 0;i < gidsetsize; i++)
11521 grouplist[i] = tswap32(target_grouplist[i]);
11522 unlock_user(target_grouplist, arg2, 0);
11523 ret = get_errno(setgroups(gidsetsize, grouplist));
11527 #ifdef TARGET_NR_fchown32
11528 case TARGET_NR_fchown32:
11529 ret = get_errno(fchown(arg1, arg2, arg3));
11532 #ifdef TARGET_NR_setresuid32
11533 case TARGET_NR_setresuid32:
11534 ret = get_errno(sys_setresuid(arg1, arg2, arg3));
11537 #ifdef TARGET_NR_getresuid32
11538 case TARGET_NR_getresuid32:
11540 uid_t ruid, euid, suid;
11541 ret = get_errno(getresuid(&ruid, &euid, &suid));
11542 if (!is_error(ret)) {
11543 if (put_user_u32(ruid, arg1)
11544 || put_user_u32(euid, arg2)
11545 || put_user_u32(suid, arg3))
11551 #ifdef TARGET_NR_setresgid32
11552 case TARGET_NR_setresgid32:
11553 ret = get_errno(sys_setresgid(arg1, arg2, arg3));
11556 #ifdef TARGET_NR_getresgid32
11557 case TARGET_NR_getresgid32:
11559 gid_t rgid, egid, sgid;
11560 ret = get_errno(getresgid(&rgid, &egid, &sgid));
11561 if (!is_error(ret)) {
11562 if (put_user_u32(rgid, arg1)
11563 || put_user_u32(egid, arg2)
11564 || put_user_u32(sgid, arg3))
11570 #ifdef TARGET_NR_chown32
11571 case TARGET_NR_chown32:
11572 if (!(p = lock_user_string(arg1)))
11574 ret = get_errno(chown(p, arg2, arg3));
11575 unlock_user(p, arg1, 0);
11578 #ifdef TARGET_NR_setuid32
11579 case TARGET_NR_setuid32:
11580 ret = get_errno(sys_setuid(arg1));
11583 #ifdef TARGET_NR_setgid32
11584 case TARGET_NR_setgid32:
11585 ret = get_errno(sys_setgid(arg1));
11588 #ifdef TARGET_NR_setfsuid32
11589 case TARGET_NR_setfsuid32:
11590 ret = get_errno(setfsuid(arg1));
11593 #ifdef TARGET_NR_setfsgid32
11594 case TARGET_NR_setfsgid32:
11595 ret = get_errno(setfsgid(arg1));
11599 case TARGET_NR_pivot_root:
11600 goto unimplemented;
11601 #ifdef TARGET_NR_mincore
11602 case TARGET_NR_mincore:
11605 ret = -TARGET_ENOMEM;
11606 a = lock_user(VERIFY_READ, arg1, arg2, 0);
11610 ret = -TARGET_EFAULT;
11611 p = lock_user_string(arg3);
11615 ret = get_errno(mincore(a, arg2, p));
11616 unlock_user(p, arg3, ret);
11618 unlock_user(a, arg1, 0);
11622 #ifdef TARGET_NR_arm_fadvise64_64
11623 case TARGET_NR_arm_fadvise64_64:
11624 /* arm_fadvise64_64 looks like fadvise64_64 but
11625 * with different argument order: fd, advice, offset, len
11626 * rather than the usual fd, offset, len, advice.
11627 * Note that offset and len are both 64-bit so appear as
11628 * pairs of 32-bit registers.
11630 ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
11631 target_offset64(arg5, arg6), arg2);
11632 ret = -host_to_target_errno(ret);
11636 #if TARGET_ABI_BITS == 32
11638 #ifdef TARGET_NR_fadvise64_64
11639 case TARGET_NR_fadvise64_64:
11640 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
11641 /* 6 args: fd, advice, offset (high, low), len (high, low) */
11649 /* 6 args: fd, offset (high, low), len (high, low), advice */
11650 if (regpairs_aligned(cpu_env, num)) {
11651 /* offset is in (3,4), len in (5,6) and advice in 7 */
11659 ret = -host_to_target_errno(posix_fadvise(arg1,
11660 target_offset64(arg2, arg3),
11661 target_offset64(arg4, arg5),
11666 #ifdef TARGET_NR_fadvise64
11667 case TARGET_NR_fadvise64:
11668 /* 5 args: fd, offset (high, low), len, advice */
11669 if (regpairs_aligned(cpu_env, num)) {
11670 /* offset is in (3,4), len in 5 and advice in 6 */
11676 ret = -host_to_target_errno(posix_fadvise(arg1,
11677 target_offset64(arg2, arg3),
11682 #else /* not a 32-bit ABI */
11683 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11684 #ifdef TARGET_NR_fadvise64_64
11685 case TARGET_NR_fadvise64_64:
11687 #ifdef TARGET_NR_fadvise64
11688 case TARGET_NR_fadvise64:
11690 #ifdef TARGET_S390X
11692 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
11693 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
11694 case 6: arg4 = POSIX_FADV_DONTNEED; break;
11695 case 7: arg4 = POSIX_FADV_NOREUSE; break;
11699 ret = -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
11702 #endif /* end of 64-bit ABI fadvise handling */
11704 #ifdef TARGET_NR_madvise
11705 case TARGET_NR_madvise:
11706 /* A straight passthrough may not be safe because qemu sometimes
11707 turns private file-backed mappings into anonymous mappings.
11708 This will break MADV_DONTNEED.
11709 This is a hint, so ignoring and returning success is ok. */
11710 ret = get_errno(0);
11713 #if TARGET_ABI_BITS == 32
11714 case TARGET_NR_fcntl64:
11718 from_flock64_fn *copyfrom = copy_from_user_flock64;
11719 to_flock64_fn *copyto = copy_to_user_flock64;
11722 if (!((CPUARMState *)cpu_env)->eabi) {
11723 copyfrom = copy_from_user_oabi_flock64;
11724 copyto = copy_to_user_oabi_flock64;
11728 cmd = target_to_host_fcntl_cmd(arg2);
11729 if (cmd == -TARGET_EINVAL) {
11735 case TARGET_F_GETLK64:
11736 ret = copyfrom(&fl, arg3);
11740 ret = get_errno(fcntl(arg1, cmd, &fl));
11742 ret = copyto(arg3, &fl);
11746 case TARGET_F_SETLK64:
11747 case TARGET_F_SETLKW64:
11748 ret = copyfrom(&fl, arg3);
11752 ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11755 ret = do_fcntl(arg1, arg2, arg3);
11761 #ifdef TARGET_NR_cacheflush
11762 case TARGET_NR_cacheflush:
11763 /* self-modifying code is handled automatically, so nothing needed */
11767 #ifdef TARGET_NR_security
11768 case TARGET_NR_security:
11769 goto unimplemented;
11771 #ifdef TARGET_NR_getpagesize
11772 case TARGET_NR_getpagesize:
11773 ret = TARGET_PAGE_SIZE;
11776 case TARGET_NR_gettid:
11777 ret = get_errno(gettid());
11779 #ifdef TARGET_NR_readahead
11780 case TARGET_NR_readahead:
11781 #if TARGET_ABI_BITS == 32
11782 if (regpairs_aligned(cpu_env, num)) {
11787 ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
11789 ret = get_errno(readahead(arg1, arg2, arg3));
11794 #ifdef TARGET_NR_setxattr
11795 case TARGET_NR_listxattr:
11796 case TARGET_NR_llistxattr:
11800 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11802 ret = -TARGET_EFAULT;
11806 p = lock_user_string(arg1);
11808 if (num == TARGET_NR_listxattr) {
11809 ret = get_errno(listxattr(p, b, arg3));
11811 ret = get_errno(llistxattr(p, b, arg3));
11814 ret = -TARGET_EFAULT;
11816 unlock_user(p, arg1, 0);
11817 unlock_user(b, arg2, arg3);
11820 case TARGET_NR_flistxattr:
11824 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11826 ret = -TARGET_EFAULT;
11830 ret = get_errno(flistxattr(arg1, b, arg3));
11831 unlock_user(b, arg2, arg3);
11834 case TARGET_NR_setxattr:
11835 case TARGET_NR_lsetxattr:
11837 void *p, *n, *v = 0;
11839 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11841 ret = -TARGET_EFAULT;
11845 p = lock_user_string(arg1);
11846 n = lock_user_string(arg2);
11848 if (num == TARGET_NR_setxattr) {
11849 ret = get_errno(setxattr(p, n, v, arg4, arg5));
11851 ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
11854 ret = -TARGET_EFAULT;
11856 unlock_user(p, arg1, 0);
11857 unlock_user(n, arg2, 0);
11858 unlock_user(v, arg3, 0);
11861 case TARGET_NR_fsetxattr:
11865 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11867 ret = -TARGET_EFAULT;
11871 n = lock_user_string(arg2);
11873 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
11875 ret = -TARGET_EFAULT;
11877 unlock_user(n, arg2, 0);
11878 unlock_user(v, arg3, 0);
11881 case TARGET_NR_getxattr:
11882 case TARGET_NR_lgetxattr:
11884 void *p, *n, *v = 0;
11886 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11888 ret = -TARGET_EFAULT;
11892 p = lock_user_string(arg1);
11893 n = lock_user_string(arg2);
11895 if (num == TARGET_NR_getxattr) {
11896 ret = get_errno(getxattr(p, n, v, arg4));
11898 ret = get_errno(lgetxattr(p, n, v, arg4));
11901 ret = -TARGET_EFAULT;
11903 unlock_user(p, arg1, 0);
11904 unlock_user(n, arg2, 0);
11905 unlock_user(v, arg3, arg4);
11908 case TARGET_NR_fgetxattr:
11912 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11914 ret = -TARGET_EFAULT;
11918 n = lock_user_string(arg2);
11920 ret = get_errno(fgetxattr(arg1, n, v, arg4));
11922 ret = -TARGET_EFAULT;
11924 unlock_user(n, arg2, 0);
11925 unlock_user(v, arg3, arg4);
11928 case TARGET_NR_removexattr:
11929 case TARGET_NR_lremovexattr:
11932 p = lock_user_string(arg1);
11933 n = lock_user_string(arg2);
11935 if (num == TARGET_NR_removexattr) {
11936 ret = get_errno(removexattr(p, n));
11938 ret = get_errno(lremovexattr(p, n));
11941 ret = -TARGET_EFAULT;
11943 unlock_user(p, arg1, 0);
11944 unlock_user(n, arg2, 0);
11947 case TARGET_NR_fremovexattr:
11950 n = lock_user_string(arg2);
11952 ret = get_errno(fremovexattr(arg1, n));
11954 ret = -TARGET_EFAULT;
11956 unlock_user(n, arg2, 0);
11960 #endif /* CONFIG_ATTR */
11961 #ifdef TARGET_NR_set_thread_area
11962 case TARGET_NR_set_thread_area:
11963 #if defined(TARGET_MIPS)
11964 ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
11967 #elif defined(TARGET_CRIS)
11969 ret = -TARGET_EINVAL;
11971 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
11975 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
11976 ret = do_set_thread_area(cpu_env, arg1);
11978 #elif defined(TARGET_M68K)
11980 TaskState *ts = cpu->opaque;
11981 ts->tp_value = arg1;
11986 goto unimplemented_nowarn;
11989 #ifdef TARGET_NR_get_thread_area
11990 case TARGET_NR_get_thread_area:
11991 #if defined(TARGET_I386) && defined(TARGET_ABI32)
11992 ret = do_get_thread_area(cpu_env, arg1);
11994 #elif defined(TARGET_M68K)
11996 TaskState *ts = cpu->opaque;
11997 ret = ts->tp_value;
12001 goto unimplemented_nowarn;
12004 #ifdef TARGET_NR_getdomainname
12005 case TARGET_NR_getdomainname:
12006 goto unimplemented_nowarn;
12009 #ifdef TARGET_NR_clock_settime
12010 case TARGET_NR_clock_settime:
12012 struct timespec ts;
12014 ret = target_to_host_timespec(&ts, arg2);
12015 if (!is_error(ret)) {
12016 ret = get_errno(clock_settime(arg1, &ts));
12021 #ifdef TARGET_NR_clock_gettime
12022 case TARGET_NR_clock_gettime:
12024 struct timespec ts;
12025 ret = get_errno(clock_gettime(arg1, &ts));
12026 if (!is_error(ret)) {
12027 ret = host_to_target_timespec(arg2, &ts);
12032 #ifdef TARGET_NR_clock_getres
12033 case TARGET_NR_clock_getres:
12035 struct timespec ts;
12036 ret = get_errno(clock_getres(arg1, &ts));
12037 if (!is_error(ret)) {
12038 host_to_target_timespec(arg2, &ts);
12043 #ifdef TARGET_NR_clock_nanosleep
12044 case TARGET_NR_clock_nanosleep:
12046 struct timespec ts;
12047 target_to_host_timespec(&ts, arg3);
12048 ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12049 &ts, arg4 ? &ts : NULL));
12051 host_to_target_timespec(arg4, &ts);
12053 #if defined(TARGET_PPC)
12054 /* clock_nanosleep is odd in that it returns positive errno values.
12055 * On PPC, CR0 bit 3 should be set in such a situation. */
12056 if (ret && ret != -TARGET_ERESTARTSYS) {
12057 ((CPUPPCState *)cpu_env)->crf[0] |= 1;
12064 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
12065 case TARGET_NR_set_tid_address:
12066 ret = get_errno(set_tid_address((int *)g2h(arg1)));
12070 case TARGET_NR_tkill:
12071 ret = get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
12074 case TARGET_NR_tgkill:
12075 ret = get_errno(safe_tgkill((int)arg1, (int)arg2,
12076 target_to_host_signal(arg3)));
12079 #ifdef TARGET_NR_set_robust_list
12080 case TARGET_NR_set_robust_list:
12081 case TARGET_NR_get_robust_list:
12082 /* The ABI for supporting robust futexes has userspace pass
12083 * the kernel a pointer to a linked list which is updated by
12084 * userspace after the syscall; the list is walked by the kernel
12085 * when the thread exits. Since the linked list in QEMU guest
12086 * memory isn't a valid linked list for the host and we have
12087 * no way to reliably intercept the thread-death event, we can't
12088 * support these. Silently return ENOSYS so that guest userspace
12089 * falls back to a non-robust futex implementation (which should
12090 * be OK except in the corner case of the guest crashing while
12091 * holding a mutex that is shared with another process via
12094 goto unimplemented_nowarn;
12097 #if defined(TARGET_NR_utimensat)
12098 case TARGET_NR_utimensat:
12100 struct timespec *tsp, ts[2];
12104 target_to_host_timespec(ts, arg3);
12105 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
12109 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12111 if (!(p = lock_user_string(arg2))) {
12112 ret = -TARGET_EFAULT;
12115 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12116 unlock_user(p, arg2, 0);
12121 case TARGET_NR_futex:
12122 ret = do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
12124 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
12125 case TARGET_NR_inotify_init:
12126 ret = get_errno(sys_inotify_init());
12128 fd_trans_register(ret, &target_inotify_trans);
12132 #ifdef CONFIG_INOTIFY1
12133 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
12134 case TARGET_NR_inotify_init1:
12135 ret = get_errno(sys_inotify_init1(target_to_host_bitmask(arg1,
12136 fcntl_flags_tbl)));
12138 fd_trans_register(ret, &target_inotify_trans);
12143 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
12144 case TARGET_NR_inotify_add_watch:
12145 p = lock_user_string(arg2);
12146 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
12147 unlock_user(p, arg2, 0);
12150 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
12151 case TARGET_NR_inotify_rm_watch:
12152 ret = get_errno(sys_inotify_rm_watch(arg1, arg2));
12156 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
12157 case TARGET_NR_mq_open:
12159 struct mq_attr posix_mq_attr;
12160 struct mq_attr *pposix_mq_attr;
12163 host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
12164 pposix_mq_attr = NULL;
12166 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
12169 pposix_mq_attr = &posix_mq_attr;
12171 p = lock_user_string(arg1 - 1);
12175 ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
12176 unlock_user (p, arg1, 0);
12180 case TARGET_NR_mq_unlink:
12181 p = lock_user_string(arg1 - 1);
12183 ret = -TARGET_EFAULT;
12186 ret = get_errno(mq_unlink(p));
12187 unlock_user (p, arg1, 0);
12190 case TARGET_NR_mq_timedsend:
12192 struct timespec ts;
12194 p = lock_user (VERIFY_READ, arg2, arg3, 1);
12196 target_to_host_timespec(&ts, arg5);
12197 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12198 host_to_target_timespec(arg5, &ts);
12200 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12202 unlock_user (p, arg2, arg3);
12206 case TARGET_NR_mq_timedreceive:
12208 struct timespec ts;
12211 p = lock_user (VERIFY_READ, arg2, arg3, 1);
12213 target_to_host_timespec(&ts, arg5);
12214 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12216 host_to_target_timespec(arg5, &ts);
12218 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12221 unlock_user (p, arg2, arg3);
12223 put_user_u32(prio, arg4);
12227 /* Not implemented for now... */
12228 /* case TARGET_NR_mq_notify: */
12231 case TARGET_NR_mq_getsetattr:
12233 struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
12236 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
12237 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
12238 &posix_mq_attr_out));
12239 } else if (arg3 != 0) {
12240 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
12242 if (ret == 0 && arg3 != 0) {
12243 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
12249 #ifdef CONFIG_SPLICE
12250 #ifdef TARGET_NR_tee
12251 case TARGET_NR_tee:
12253 ret = get_errno(tee(arg1,arg2,arg3,arg4));
12257 #ifdef TARGET_NR_splice
12258 case TARGET_NR_splice:
12260 loff_t loff_in, loff_out;
12261 loff_t *ploff_in = NULL, *ploff_out = NULL;
12263 if (get_user_u64(loff_in, arg2)) {
12266 ploff_in = &loff_in;
12269 if (get_user_u64(loff_out, arg4)) {
12272 ploff_out = &loff_out;
12274 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
12276 if (put_user_u64(loff_in, arg2)) {
12281 if (put_user_u64(loff_out, arg4)) {
12288 #ifdef TARGET_NR_vmsplice
12289 case TARGET_NR_vmsplice:
12291 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
12293 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
12294 unlock_iovec(vec, arg2, arg3, 0);
12296 ret = -host_to_target_errno(errno);
12301 #endif /* CONFIG_SPLICE */
12302 #ifdef CONFIG_EVENTFD
12303 #if defined(TARGET_NR_eventfd)
12304 case TARGET_NR_eventfd:
12305 ret = get_errno(eventfd(arg1, 0));
12307 fd_trans_register(ret, &target_eventfd_trans);
12311 #if defined(TARGET_NR_eventfd2)
12312 case TARGET_NR_eventfd2:
12314 int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
12315 if (arg2 & TARGET_O_NONBLOCK) {
12316 host_flags |= O_NONBLOCK;
12318 if (arg2 & TARGET_O_CLOEXEC) {
12319 host_flags |= O_CLOEXEC;
12321 ret = get_errno(eventfd(arg1, host_flags));
12323 fd_trans_register(ret, &target_eventfd_trans);
12328 #endif /* CONFIG_EVENTFD */
12329 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
12330 case TARGET_NR_fallocate:
12331 #if TARGET_ABI_BITS == 32
12332 ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
12333 target_offset64(arg5, arg6)));
12335 ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
12339 #if defined(CONFIG_SYNC_FILE_RANGE)
12340 #if defined(TARGET_NR_sync_file_range)
12341 case TARGET_NR_sync_file_range:
12342 #if TARGET_ABI_BITS == 32
12343 #if defined(TARGET_MIPS)
12344 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12345 target_offset64(arg5, arg6), arg7));
12347 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
12348 target_offset64(arg4, arg5), arg6));
12349 #endif /* !TARGET_MIPS */
12351 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
12355 #if defined(TARGET_NR_sync_file_range2)
12356 case TARGET_NR_sync_file_range2:
12357 /* This is like sync_file_range but the arguments are reordered */
12358 #if TARGET_ABI_BITS == 32
12359 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12360 target_offset64(arg5, arg6), arg2));
12362 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
12367 #if defined(TARGET_NR_signalfd4)
12368 case TARGET_NR_signalfd4:
12369 ret = do_signalfd4(arg1, arg2, arg4);
12372 #if defined(TARGET_NR_signalfd)
12373 case TARGET_NR_signalfd:
12374 ret = do_signalfd4(arg1, arg2, 0);
12377 #if defined(CONFIG_EPOLL)
12378 #if defined(TARGET_NR_epoll_create)
12379 case TARGET_NR_epoll_create:
12380 ret = get_errno(epoll_create(arg1));
12383 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
12384 case TARGET_NR_epoll_create1:
12385 ret = get_errno(epoll_create1(arg1));
12388 #if defined(TARGET_NR_epoll_ctl)
12389 case TARGET_NR_epoll_ctl:
12391 struct epoll_event ep;
12392 struct epoll_event *epp = 0;
12394 struct target_epoll_event *target_ep;
12395 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
12398 ep.events = tswap32(target_ep->events);
12399 /* The epoll_data_t union is just opaque data to the kernel,
12400 * so we transfer all 64 bits across and need not worry what
12401 * actual data type it is.
12403 ep.data.u64 = tswap64(target_ep->data.u64);
12404 unlock_user_struct(target_ep, arg4, 0);
12407 ret = get_errno(epoll_ctl(arg1, arg2, arg3, epp));
12412 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
12413 #if defined(TARGET_NR_epoll_wait)
12414 case TARGET_NR_epoll_wait:
12416 #if defined(TARGET_NR_epoll_pwait)
12417 case TARGET_NR_epoll_pwait:
12420 struct target_epoll_event *target_ep;
12421 struct epoll_event *ep;
12423 int maxevents = arg3;
12424 int timeout = arg4;
12426 if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
12427 ret = -TARGET_EINVAL;
12431 target_ep = lock_user(VERIFY_WRITE, arg2,
12432 maxevents * sizeof(struct target_epoll_event), 1);
12437 ep = g_try_new(struct epoll_event, maxevents);
12439 unlock_user(target_ep, arg2, 0);
12440 ret = -TARGET_ENOMEM;
12445 #if defined(TARGET_NR_epoll_pwait)
12446 case TARGET_NR_epoll_pwait:
12448 target_sigset_t *target_set;
12449 sigset_t _set, *set = &_set;
12452 if (arg6 != sizeof(target_sigset_t)) {
12453 ret = -TARGET_EINVAL;
12457 target_set = lock_user(VERIFY_READ, arg5,
12458 sizeof(target_sigset_t), 1);
12460 ret = -TARGET_EFAULT;
12463 target_to_host_sigset(set, target_set);
12464 unlock_user(target_set, arg5, 0);
12469 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12470 set, SIGSET_T_SIZE));
12474 #if defined(TARGET_NR_epoll_wait)
12475 case TARGET_NR_epoll_wait:
12476 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12481 ret = -TARGET_ENOSYS;
12483 if (!is_error(ret)) {
12485 for (i = 0; i < ret; i++) {
12486 target_ep[i].events = tswap32(ep[i].events);
12487 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
12489 unlock_user(target_ep, arg2,
12490 ret * sizeof(struct target_epoll_event));
12492 unlock_user(target_ep, arg2, 0);
12499 #ifdef TARGET_NR_prlimit64
12500 case TARGET_NR_prlimit64:
12502 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
12503 struct target_rlimit64 *target_rnew, *target_rold;
12504 struct host_rlimit64 rnew, rold, *rnewp = 0;
12505 int resource = target_to_host_resource(arg2);
12507 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
12510 rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
12511 rnew.rlim_max = tswap64(target_rnew->rlim_max);
12512 unlock_user_struct(target_rnew, arg3, 0);
12516 ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
12517 if (!is_error(ret) && arg4) {
12518 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
12521 target_rold->rlim_cur = tswap64(rold.rlim_cur);
12522 target_rold->rlim_max = tswap64(rold.rlim_max);
12523 unlock_user_struct(target_rold, arg4, 1);
12528 #ifdef TARGET_NR_gethostname
12529 case TARGET_NR_gethostname:
12531 char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
12533 ret = get_errno(gethostname(name, arg2));
12534 unlock_user(name, arg1, arg2);
12536 ret = -TARGET_EFAULT;
12541 #ifdef TARGET_NR_atomic_cmpxchg_32
12542 case TARGET_NR_atomic_cmpxchg_32:
12544 /* should use start_exclusive from main.c */
12545 abi_ulong mem_value;
12546 if (get_user_u32(mem_value, arg6)) {
12547 target_siginfo_t info;
12548 info.si_signo = SIGSEGV;
12550 info.si_code = TARGET_SEGV_MAPERR;
12551 info._sifields._sigfault._addr = arg6;
12552 queue_signal((CPUArchState *)cpu_env, info.si_signo,
12553 QEMU_SI_FAULT, &info);
12557 if (mem_value == arg2)
12558 put_user_u32(arg1, arg6);
12563 #ifdef TARGET_NR_atomic_barrier
12564 case TARGET_NR_atomic_barrier:
12566 /* Like the kernel implementation and the qemu arm barrier, no-op this? */
12572 #ifdef TARGET_NR_timer_create
12573 case TARGET_NR_timer_create:
12575 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
12577 struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
12580 int timer_index = next_free_host_timer();
12582 if (timer_index < 0) {
12583 ret = -TARGET_EAGAIN;
12585 timer_t *phtimer = g_posix_timers + timer_index;
12588 phost_sevp = &host_sevp;
12589 ret = target_to_host_sigevent(phost_sevp, arg2);
12595 ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
12599 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
12608 #ifdef TARGET_NR_timer_settime
12609 case TARGET_NR_timer_settime:
12611 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
12612 * struct itimerspec * old_value */
12613 target_timer_t timerid = get_timer_id(arg1);
12617 } else if (arg3 == 0) {
12618 ret = -TARGET_EINVAL;
12620 timer_t htimer = g_posix_timers[timerid];
12621 struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12623 if (target_to_host_itimerspec(&hspec_new, arg3)) {
12627 timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12628 if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
12636 #ifdef TARGET_NR_timer_gettime
12637 case TARGET_NR_timer_gettime:
12639 /* args: timer_t timerid, struct itimerspec *curr_value */
12640 target_timer_t timerid = get_timer_id(arg1);
12644 } else if (!arg2) {
12645 ret = -TARGET_EFAULT;
12647 timer_t htimer = g_posix_timers[timerid];
12648 struct itimerspec hspec;
12649 ret = get_errno(timer_gettime(htimer, &hspec));
12651 if (host_to_target_itimerspec(arg2, &hspec)) {
12652 ret = -TARGET_EFAULT;
12659 #ifdef TARGET_NR_timer_getoverrun
12660 case TARGET_NR_timer_getoverrun:
12662 /* args: timer_t timerid */
12663 target_timer_t timerid = get_timer_id(arg1);
12668 timer_t htimer = g_posix_timers[timerid];
12669 ret = get_errno(timer_getoverrun(htimer));
12671 fd_trans_unregister(ret);
12676 #ifdef TARGET_NR_timer_delete
12677 case TARGET_NR_timer_delete:
12679 /* args: timer_t timerid */
12680 target_timer_t timerid = get_timer_id(arg1);
12685 timer_t htimer = g_posix_timers[timerid];
12686 ret = get_errno(timer_delete(htimer));
12687 g_posix_timers[timerid] = 0;
12693 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
12694 case TARGET_NR_timerfd_create:
12695 ret = get_errno(timerfd_create(arg1,
12696 target_to_host_bitmask(arg2, fcntl_flags_tbl)));
12700 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
12701 case TARGET_NR_timerfd_gettime:
12703 struct itimerspec its_curr;
12705 ret = get_errno(timerfd_gettime(arg1, &its_curr));
12707 if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
12714 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
12715 case TARGET_NR_timerfd_settime:
12717 struct itimerspec its_new, its_old, *p_new;
12720 if (target_to_host_itimerspec(&its_new, arg3)) {
12728 ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
12730 if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
12737 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
12738 case TARGET_NR_ioprio_get:
12739 ret = get_errno(ioprio_get(arg1, arg2));
12743 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
12744 case TARGET_NR_ioprio_set:
12745 ret = get_errno(ioprio_set(arg1, arg2, arg3));
12749 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
12750 case TARGET_NR_setns:
12751 ret = get_errno(setns(arg1, arg2));
12754 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
12755 case TARGET_NR_unshare:
12756 ret = get_errno(unshare(arg1));
12759 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
12760 case TARGET_NR_kcmp:
12761 ret = get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
12767 gemu_log("qemu: Unsupported syscall: %d\n", num);
12768 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
12769 unimplemented_nowarn:
12771 ret = -TARGET_ENOSYS;
12776 gemu_log(" = " TARGET_ABI_FMT_ld "\n", ret);
12779 print_syscall_ret(num, ret);
12780 trace_guest_user_syscall_ret(cpu, num, ret);
12783 ret = -TARGET_EFAULT;