4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
29 #include <sys/mount.h>
31 #include <sys/fsuid.h>
32 #include <sys/personality.h>
33 #include <sys/prctl.h>
34 #include <sys/resource.h>
36 #include <linux/capability.h>
38 #include <sys/timex.h>
39 #include <sys/socket.h>
43 #include <sys/times.h>
46 #include <sys/statfs.h>
49 #include <sys/sysinfo.h>
50 #include <sys/signalfd.h>
51 //#include <sys/user.h>
52 #include <netinet/ip.h>
53 #include <netinet/tcp.h>
54 #include <linux/wireless.h>
55 #include <linux/icmp.h>
56 #include <linux/icmpv6.h>
57 #include <linux/errqueue.h>
58 #include <linux/random.h>
59 #include "qemu-common.h"
61 #include <sys/timerfd.h>
67 #include <sys/eventfd.h>
70 #include <sys/epoll.h>
73 #include "qemu/xattr.h"
75 #ifdef CONFIG_SENDFILE
76 #include <sys/sendfile.h>
79 #define termios host_termios
80 #define winsize host_winsize
81 #define termio host_termio
82 #define sgttyb host_sgttyb /* same as target */
83 #define tchars host_tchars /* same as target */
84 #define ltchars host_ltchars /* same as target */
86 #include <linux/termios.h>
87 #include <linux/unistd.h>
88 #include <linux/cdrom.h>
89 #include <linux/hdreg.h>
90 #include <linux/soundcard.h>
92 #include <linux/mtio.h>
94 #if defined(CONFIG_FIEMAP)
95 #include <linux/fiemap.h>
99 #include <linux/dm-ioctl.h>
100 #include <linux/reboot.h>
101 #include <linux/route.h>
102 #include <linux/filter.h>
103 #include <linux/blkpg.h>
104 #include <netpacket/packet.h>
105 #include <linux/netlink.h>
106 #ifdef CONFIG_RTNETLINK
107 #include <linux/rtnetlink.h>
108 #include <linux/if_bridge.h>
110 #include <linux/audit.h>
111 #include "linux_loop.h"
117 #define CLONE_IO 0x80000000 /* Clone io context */
120 /* We can't directly call the host clone syscall, because this will
121 * badly confuse libc (breaking mutexes, for example). So we must
122 * divide clone flags into:
123 * * flag combinations that look like pthread_create()
124 * * flag combinations that look like fork()
125 * * flags we can implement within QEMU itself
126 * * flags we can't support and will return an error for
128 /* For thread creation, all these flags must be present; for
129 * fork, none must be present.
131 #define CLONE_THREAD_FLAGS \
132 (CLONE_VM | CLONE_FS | CLONE_FILES | \
133 CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
135 /* These flags are ignored:
136 * CLONE_DETACHED is now ignored by the kernel;
137 * CLONE_IO is just an optimisation hint to the I/O scheduler
139 #define CLONE_IGNORED_FLAGS \
140 (CLONE_DETACHED | CLONE_IO)
142 /* Flags for fork which we can implement within QEMU itself */
143 #define CLONE_OPTIONAL_FORK_FLAGS \
144 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
145 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
147 /* Flags for thread creation which we can implement within QEMU itself */
148 #define CLONE_OPTIONAL_THREAD_FLAGS \
149 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
150 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
152 #define CLONE_INVALID_FORK_FLAGS \
153 (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
155 #define CLONE_INVALID_THREAD_FLAGS \
156 (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS | \
157 CLONE_IGNORED_FLAGS))
159 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
160 * have almost all been allocated. We cannot support any of
161 * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
162 * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
163 * The checks against the invalid thread masks above will catch these.
164 * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
168 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
169 * once. This exercises the codepaths for restart.
171 //#define DEBUG_ERESTARTSYS
173 //#include <linux/msdos_fs.h>
174 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
175 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
185 #define _syscall0(type,name) \
186 static type name (void) \
188 return syscall(__NR_##name); \
191 #define _syscall1(type,name,type1,arg1) \
192 static type name (type1 arg1) \
194 return syscall(__NR_##name, arg1); \
197 #define _syscall2(type,name,type1,arg1,type2,arg2) \
198 static type name (type1 arg1,type2 arg2) \
200 return syscall(__NR_##name, arg1, arg2); \
203 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
204 static type name (type1 arg1,type2 arg2,type3 arg3) \
206 return syscall(__NR_##name, arg1, arg2, arg3); \
209 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
210 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
212 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
215 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
217 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
219 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
223 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
224 type5,arg5,type6,arg6) \
225 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
228 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
232 #define __NR_sys_uname __NR_uname
233 #define __NR_sys_getcwd1 __NR_getcwd
234 #define __NR_sys_getdents __NR_getdents
235 #define __NR_sys_getdents64 __NR_getdents64
236 #define __NR_sys_getpriority __NR_getpriority
237 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
238 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
239 #define __NR_sys_syslog __NR_syslog
240 #define __NR_sys_futex __NR_futex
241 #define __NR_sys_inotify_init __NR_inotify_init
242 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
243 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
245 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
246 #define __NR__llseek __NR_lseek
249 /* Newer kernel ports have llseek() instead of _llseek() */
250 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
251 #define TARGET_NR__llseek TARGET_NR_llseek
255 _syscall0(int, gettid)
257 /* This is a replacement for the host gettid() and must return a host
259 static int gettid(void) {
263 #if defined(TARGET_NR_getdents) && defined(__NR_getdents)
264 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
266 #if !defined(__NR_getdents) || \
267 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
268 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
270 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
271 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
272 loff_t *, res, uint, wh);
274 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
275 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
277 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
278 #ifdef __NR_exit_group
279 _syscall1(int,exit_group,int,error_code)
281 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
282 _syscall1(int,set_tid_address,int *,tidptr)
284 #if defined(TARGET_NR_futex) && defined(__NR_futex)
285 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
286 const struct timespec *,timeout,int *,uaddr2,int,val3)
288 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
289 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
290 unsigned long *, user_mask_ptr);
291 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
292 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
293 unsigned long *, user_mask_ptr);
294 #define __NR_sys_getcpu __NR_getcpu
295 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
296 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
298 _syscall2(int, capget, struct __user_cap_header_struct *, header,
299 struct __user_cap_data_struct *, data);
300 _syscall2(int, capset, struct __user_cap_header_struct *, header,
301 struct __user_cap_data_struct *, data);
302 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
303 _syscall2(int, ioprio_get, int, which, int, who)
305 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
306 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
308 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
309 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
312 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
313 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
314 unsigned long, idx1, unsigned long, idx2)
317 static bitmask_transtbl fcntl_flags_tbl[] = {
318 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
319 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, },
320 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, },
321 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, },
322 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, },
323 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, },
324 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, },
325 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, },
326 { TARGET_O_SYNC, TARGET_O_DSYNC, O_SYNC, O_DSYNC, },
327 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, },
328 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, },
329 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
330 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, },
331 #if defined(O_DIRECT)
332 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, },
334 #if defined(O_NOATIME)
335 { TARGET_O_NOATIME, TARGET_O_NOATIME, O_NOATIME, O_NOATIME },
337 #if defined(O_CLOEXEC)
338 { TARGET_O_CLOEXEC, TARGET_O_CLOEXEC, O_CLOEXEC, O_CLOEXEC },
341 { TARGET_O_PATH, TARGET_O_PATH, O_PATH, O_PATH },
343 #if defined(O_TMPFILE)
344 { TARGET_O_TMPFILE, TARGET_O_TMPFILE, O_TMPFILE, O_TMPFILE },
346 /* Don't terminate the list prematurely on 64-bit host+guest. */
347 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
348 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
355 QEMU_IFLA_BR_FORWARD_DELAY,
356 QEMU_IFLA_BR_HELLO_TIME,
357 QEMU_IFLA_BR_MAX_AGE,
358 QEMU_IFLA_BR_AGEING_TIME,
359 QEMU_IFLA_BR_STP_STATE,
360 QEMU_IFLA_BR_PRIORITY,
361 QEMU_IFLA_BR_VLAN_FILTERING,
362 QEMU_IFLA_BR_VLAN_PROTOCOL,
363 QEMU_IFLA_BR_GROUP_FWD_MASK,
364 QEMU_IFLA_BR_ROOT_ID,
365 QEMU_IFLA_BR_BRIDGE_ID,
366 QEMU_IFLA_BR_ROOT_PORT,
367 QEMU_IFLA_BR_ROOT_PATH_COST,
368 QEMU_IFLA_BR_TOPOLOGY_CHANGE,
369 QEMU_IFLA_BR_TOPOLOGY_CHANGE_DETECTED,
370 QEMU_IFLA_BR_HELLO_TIMER,
371 QEMU_IFLA_BR_TCN_TIMER,
372 QEMU_IFLA_BR_TOPOLOGY_CHANGE_TIMER,
373 QEMU_IFLA_BR_GC_TIMER,
374 QEMU_IFLA_BR_GROUP_ADDR,
375 QEMU_IFLA_BR_FDB_FLUSH,
376 QEMU_IFLA_BR_MCAST_ROUTER,
377 QEMU_IFLA_BR_MCAST_SNOOPING,
378 QEMU_IFLA_BR_MCAST_QUERY_USE_IFADDR,
379 QEMU_IFLA_BR_MCAST_QUERIER,
380 QEMU_IFLA_BR_MCAST_HASH_ELASTICITY,
381 QEMU_IFLA_BR_MCAST_HASH_MAX,
382 QEMU_IFLA_BR_MCAST_LAST_MEMBER_CNT,
383 QEMU_IFLA_BR_MCAST_STARTUP_QUERY_CNT,
384 QEMU_IFLA_BR_MCAST_LAST_MEMBER_INTVL,
385 QEMU_IFLA_BR_MCAST_MEMBERSHIP_INTVL,
386 QEMU_IFLA_BR_MCAST_QUERIER_INTVL,
387 QEMU_IFLA_BR_MCAST_QUERY_INTVL,
388 QEMU_IFLA_BR_MCAST_QUERY_RESPONSE_INTVL,
389 QEMU_IFLA_BR_MCAST_STARTUP_QUERY_INTVL,
390 QEMU_IFLA_BR_NF_CALL_IPTABLES,
391 QEMU_IFLA_BR_NF_CALL_IP6TABLES,
392 QEMU_IFLA_BR_NF_CALL_ARPTABLES,
393 QEMU_IFLA_BR_VLAN_DEFAULT_PVID,
395 QEMU_IFLA_BR_VLAN_STATS_ENABLED,
396 QEMU_IFLA_BR_MCAST_STATS_ENABLED,
420 QEMU_IFLA_NET_NS_PID,
423 QEMU_IFLA_VFINFO_LIST,
431 QEMU_IFLA_PROMISCUITY,
432 QEMU_IFLA_NUM_TX_QUEUES,
433 QEMU_IFLA_NUM_RX_QUEUES,
435 QEMU_IFLA_PHYS_PORT_ID,
436 QEMU_IFLA_CARRIER_CHANGES,
437 QEMU_IFLA_PHYS_SWITCH_ID,
438 QEMU_IFLA_LINK_NETNSID,
439 QEMU_IFLA_PHYS_PORT_NAME,
440 QEMU_IFLA_PROTO_DOWN,
441 QEMU_IFLA_GSO_MAX_SEGS,
442 QEMU_IFLA_GSO_MAX_SIZE,
449 QEMU_IFLA_BRPORT_UNSPEC,
450 QEMU_IFLA_BRPORT_STATE,
451 QEMU_IFLA_BRPORT_PRIORITY,
452 QEMU_IFLA_BRPORT_COST,
453 QEMU_IFLA_BRPORT_MODE,
454 QEMU_IFLA_BRPORT_GUARD,
455 QEMU_IFLA_BRPORT_PROTECT,
456 QEMU_IFLA_BRPORT_FAST_LEAVE,
457 QEMU_IFLA_BRPORT_LEARNING,
458 QEMU_IFLA_BRPORT_UNICAST_FLOOD,
459 QEMU_IFLA_BRPORT_PROXYARP,
460 QEMU_IFLA_BRPORT_LEARNING_SYNC,
461 QEMU_IFLA_BRPORT_PROXYARP_WIFI,
462 QEMU_IFLA_BRPORT_ROOT_ID,
463 QEMU_IFLA_BRPORT_BRIDGE_ID,
464 QEMU_IFLA_BRPORT_DESIGNATED_PORT,
465 QEMU_IFLA_BRPORT_DESIGNATED_COST,
468 QEMU_IFLA_BRPORT_TOPOLOGY_CHANGE_ACK,
469 QEMU_IFLA_BRPORT_CONFIG_PENDING,
470 QEMU_IFLA_BRPORT_MESSAGE_AGE_TIMER,
471 QEMU_IFLA_BRPORT_FORWARD_DELAY_TIMER,
472 QEMU_IFLA_BRPORT_HOLD_TIMER,
473 QEMU_IFLA_BRPORT_FLUSH,
474 QEMU_IFLA_BRPORT_MULTICAST_ROUTER,
475 QEMU_IFLA_BRPORT_PAD,
476 QEMU___IFLA_BRPORT_MAX
480 QEMU_IFLA_INFO_UNSPEC,
483 QEMU_IFLA_INFO_XSTATS,
484 QEMU_IFLA_INFO_SLAVE_KIND,
485 QEMU_IFLA_INFO_SLAVE_DATA,
486 QEMU___IFLA_INFO_MAX,
490 QEMU_IFLA_INET_UNSPEC,
492 QEMU___IFLA_INET_MAX,
496 QEMU_IFLA_INET6_UNSPEC,
497 QEMU_IFLA_INET6_FLAGS,
498 QEMU_IFLA_INET6_CONF,
499 QEMU_IFLA_INET6_STATS,
500 QEMU_IFLA_INET6_MCAST,
501 QEMU_IFLA_INET6_CACHEINFO,
502 QEMU_IFLA_INET6_ICMP6STATS,
503 QEMU_IFLA_INET6_TOKEN,
504 QEMU_IFLA_INET6_ADDR_GEN_MODE,
505 QEMU___IFLA_INET6_MAX
508 typedef abi_long (*TargetFdDataFunc)(void *, size_t);
509 typedef abi_long (*TargetFdAddrFunc)(void *, abi_ulong, socklen_t);
510 typedef struct TargetFdTrans {
511 TargetFdDataFunc host_to_target_data;
512 TargetFdDataFunc target_to_host_data;
513 TargetFdAddrFunc target_to_host_addr;
516 static TargetFdTrans **target_fd_trans;
518 static unsigned int target_fd_max;
520 static TargetFdDataFunc fd_trans_target_to_host_data(int fd)
522 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
523 return target_fd_trans[fd]->target_to_host_data;
528 static TargetFdDataFunc fd_trans_host_to_target_data(int fd)
530 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
531 return target_fd_trans[fd]->host_to_target_data;
536 static TargetFdAddrFunc fd_trans_target_to_host_addr(int fd)
538 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
539 return target_fd_trans[fd]->target_to_host_addr;
544 static void fd_trans_register(int fd, TargetFdTrans *trans)
548 if (fd >= target_fd_max) {
549 oldmax = target_fd_max;
550 target_fd_max = ((fd >> 6) + 1) << 6; /* by slice of 64 entries */
551 target_fd_trans = g_renew(TargetFdTrans *,
552 target_fd_trans, target_fd_max);
553 memset((void *)(target_fd_trans + oldmax), 0,
554 (target_fd_max - oldmax) * sizeof(TargetFdTrans *));
556 target_fd_trans[fd] = trans;
559 static void fd_trans_unregister(int fd)
561 if (fd >= 0 && fd < target_fd_max) {
562 target_fd_trans[fd] = NULL;
566 static void fd_trans_dup(int oldfd, int newfd)
568 fd_trans_unregister(newfd);
569 if (oldfd < target_fd_max && target_fd_trans[oldfd]) {
570 fd_trans_register(newfd, target_fd_trans[oldfd]);
574 static int sys_getcwd1(char *buf, size_t size)
576 if (getcwd(buf, size) == NULL) {
577 /* getcwd() sets errno */
580 return strlen(buf)+1;
583 #ifdef TARGET_NR_utimensat
584 #if defined(__NR_utimensat)
585 #define __NR_sys_utimensat __NR_utimensat
586 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
587 const struct timespec *,tsp,int,flags)
589 static int sys_utimensat(int dirfd, const char *pathname,
590 const struct timespec times[2], int flags)
596 #endif /* TARGET_NR_utimensat */
598 #ifdef TARGET_NR_renameat2
599 #if defined(__NR_renameat2)
600 #define __NR_sys_renameat2 __NR_renameat2
601 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
602 const char *, new, unsigned int, flags)
604 static int sys_renameat2(int oldfd, const char *old,
605 int newfd, const char *new, int flags)
608 return renameat(oldfd, old, newfd, new);
614 #endif /* TARGET_NR_renameat2 */
616 #ifdef CONFIG_INOTIFY
617 #include <sys/inotify.h>
619 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
620 static int sys_inotify_init(void)
622 return (inotify_init());
625 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
626 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
628 return (inotify_add_watch(fd, pathname, mask));
631 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
632 static int sys_inotify_rm_watch(int fd, int32_t wd)
634 return (inotify_rm_watch(fd, wd));
637 #ifdef CONFIG_INOTIFY1
638 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
639 static int sys_inotify_init1(int flags)
641 return (inotify_init1(flags));
646 /* Userspace can usually survive runtime without inotify */
647 #undef TARGET_NR_inotify_init
648 #undef TARGET_NR_inotify_init1
649 #undef TARGET_NR_inotify_add_watch
650 #undef TARGET_NR_inotify_rm_watch
651 #endif /* CONFIG_INOTIFY */
653 #if defined(TARGET_NR_prlimit64)
654 #ifndef __NR_prlimit64
655 # define __NR_prlimit64 -1
657 #define __NR_sys_prlimit64 __NR_prlimit64
658 /* The glibc rlimit structure may not be that used by the underlying syscall */
659 struct host_rlimit64 {
663 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
664 const struct host_rlimit64 *, new_limit,
665 struct host_rlimit64 *, old_limit)
669 #if defined(TARGET_NR_timer_create)
670 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
671 static timer_t g_posix_timers[32] = { 0, } ;
673 static inline int next_free_host_timer(void)
676 /* FIXME: Does finding the next free slot require a lock? */
677 for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
678 if (g_posix_timers[k] == 0) {
679 g_posix_timers[k] = (timer_t) 1;
687 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
689 static inline int regpairs_aligned(void *cpu_env, int num)
691 return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
693 #elif defined(TARGET_MIPS) && (TARGET_ABI_BITS == 32)
694 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
695 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
696 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
697 * of registers which translates to the same as ARM/MIPS, because we start with
699 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
700 #elif defined(TARGET_SH4)
701 /* SH4 doesn't align register pairs, except for p{read,write}64 */
702 static inline int regpairs_aligned(void *cpu_env, int num)
705 case TARGET_NR_pread64:
706 case TARGET_NR_pwrite64:
714 static inline int regpairs_aligned(void *cpu_env, int num) { return 0; }
717 #define ERRNO_TABLE_SIZE 1200
719 /* target_to_host_errno_table[] is initialized from
720 * host_to_target_errno_table[] in syscall_init(). */
721 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
725 * This list is the union of errno values overridden in asm-<arch>/errno.h
726 * minus the errnos that are not actually generic to all archs.
728 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
729 [EAGAIN] = TARGET_EAGAIN,
730 [EIDRM] = TARGET_EIDRM,
731 [ECHRNG] = TARGET_ECHRNG,
732 [EL2NSYNC] = TARGET_EL2NSYNC,
733 [EL3HLT] = TARGET_EL3HLT,
734 [EL3RST] = TARGET_EL3RST,
735 [ELNRNG] = TARGET_ELNRNG,
736 [EUNATCH] = TARGET_EUNATCH,
737 [ENOCSI] = TARGET_ENOCSI,
738 [EL2HLT] = TARGET_EL2HLT,
739 [EDEADLK] = TARGET_EDEADLK,
740 [ENOLCK] = TARGET_ENOLCK,
741 [EBADE] = TARGET_EBADE,
742 [EBADR] = TARGET_EBADR,
743 [EXFULL] = TARGET_EXFULL,
744 [ENOANO] = TARGET_ENOANO,
745 [EBADRQC] = TARGET_EBADRQC,
746 [EBADSLT] = TARGET_EBADSLT,
747 [EBFONT] = TARGET_EBFONT,
748 [ENOSTR] = TARGET_ENOSTR,
749 [ENODATA] = TARGET_ENODATA,
750 [ETIME] = TARGET_ETIME,
751 [ENOSR] = TARGET_ENOSR,
752 [ENONET] = TARGET_ENONET,
753 [ENOPKG] = TARGET_ENOPKG,
754 [EREMOTE] = TARGET_EREMOTE,
755 [ENOLINK] = TARGET_ENOLINK,
756 [EADV] = TARGET_EADV,
757 [ESRMNT] = TARGET_ESRMNT,
758 [ECOMM] = TARGET_ECOMM,
759 [EPROTO] = TARGET_EPROTO,
760 [EDOTDOT] = TARGET_EDOTDOT,
761 [EMULTIHOP] = TARGET_EMULTIHOP,
762 [EBADMSG] = TARGET_EBADMSG,
763 [ENAMETOOLONG] = TARGET_ENAMETOOLONG,
764 [EOVERFLOW] = TARGET_EOVERFLOW,
765 [ENOTUNIQ] = TARGET_ENOTUNIQ,
766 [EBADFD] = TARGET_EBADFD,
767 [EREMCHG] = TARGET_EREMCHG,
768 [ELIBACC] = TARGET_ELIBACC,
769 [ELIBBAD] = TARGET_ELIBBAD,
770 [ELIBSCN] = TARGET_ELIBSCN,
771 [ELIBMAX] = TARGET_ELIBMAX,
772 [ELIBEXEC] = TARGET_ELIBEXEC,
773 [EILSEQ] = TARGET_EILSEQ,
774 [ENOSYS] = TARGET_ENOSYS,
775 [ELOOP] = TARGET_ELOOP,
776 [ERESTART] = TARGET_ERESTART,
777 [ESTRPIPE] = TARGET_ESTRPIPE,
778 [ENOTEMPTY] = TARGET_ENOTEMPTY,
779 [EUSERS] = TARGET_EUSERS,
780 [ENOTSOCK] = TARGET_ENOTSOCK,
781 [EDESTADDRREQ] = TARGET_EDESTADDRREQ,
782 [EMSGSIZE] = TARGET_EMSGSIZE,
783 [EPROTOTYPE] = TARGET_EPROTOTYPE,
784 [ENOPROTOOPT] = TARGET_ENOPROTOOPT,
785 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT,
786 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT,
787 [EOPNOTSUPP] = TARGET_EOPNOTSUPP,
788 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT,
789 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT,
790 [EADDRINUSE] = TARGET_EADDRINUSE,
791 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL,
792 [ENETDOWN] = TARGET_ENETDOWN,
793 [ENETUNREACH] = TARGET_ENETUNREACH,
794 [ENETRESET] = TARGET_ENETRESET,
795 [ECONNABORTED] = TARGET_ECONNABORTED,
796 [ECONNRESET] = TARGET_ECONNRESET,
797 [ENOBUFS] = TARGET_ENOBUFS,
798 [EISCONN] = TARGET_EISCONN,
799 [ENOTCONN] = TARGET_ENOTCONN,
800 [EUCLEAN] = TARGET_EUCLEAN,
801 [ENOTNAM] = TARGET_ENOTNAM,
802 [ENAVAIL] = TARGET_ENAVAIL,
803 [EISNAM] = TARGET_EISNAM,
804 [EREMOTEIO] = TARGET_EREMOTEIO,
805 [EDQUOT] = TARGET_EDQUOT,
806 [ESHUTDOWN] = TARGET_ESHUTDOWN,
807 [ETOOMANYREFS] = TARGET_ETOOMANYREFS,
808 [ETIMEDOUT] = TARGET_ETIMEDOUT,
809 [ECONNREFUSED] = TARGET_ECONNREFUSED,
810 [EHOSTDOWN] = TARGET_EHOSTDOWN,
811 [EHOSTUNREACH] = TARGET_EHOSTUNREACH,
812 [EALREADY] = TARGET_EALREADY,
813 [EINPROGRESS] = TARGET_EINPROGRESS,
814 [ESTALE] = TARGET_ESTALE,
815 [ECANCELED] = TARGET_ECANCELED,
816 [ENOMEDIUM] = TARGET_ENOMEDIUM,
817 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE,
819 [ENOKEY] = TARGET_ENOKEY,
822 [EKEYEXPIRED] = TARGET_EKEYEXPIRED,
825 [EKEYREVOKED] = TARGET_EKEYREVOKED,
828 [EKEYREJECTED] = TARGET_EKEYREJECTED,
831 [EOWNERDEAD] = TARGET_EOWNERDEAD,
833 #ifdef ENOTRECOVERABLE
834 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE,
837 [ENOMSG] = TARGET_ENOMSG,
840 [ERFKILL] = TARGET_ERFKILL,
843 [EHWPOISON] = TARGET_EHWPOISON,
847 static inline int host_to_target_errno(int err)
849 if (err >= 0 && err < ERRNO_TABLE_SIZE &&
850 host_to_target_errno_table[err]) {
851 return host_to_target_errno_table[err];
856 static inline int target_to_host_errno(int err)
858 if (err >= 0 && err < ERRNO_TABLE_SIZE &&
859 target_to_host_errno_table[err]) {
860 return target_to_host_errno_table[err];
865 static inline abi_long get_errno(abi_long ret)
868 return -host_to_target_errno(errno);
873 static inline int is_error(abi_long ret)
875 return (abi_ulong)ret >= (abi_ulong)(-4096);
878 const char *target_strerror(int err)
880 if (err == TARGET_ERESTARTSYS) {
881 return "To be restarted";
883 if (err == TARGET_QEMU_ESIGRETURN) {
884 return "Successful exit from sigreturn";
887 if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
890 return strerror(target_to_host_errno(err));
893 #define safe_syscall0(type, name) \
894 static type safe_##name(void) \
896 return safe_syscall(__NR_##name); \
899 #define safe_syscall1(type, name, type1, arg1) \
900 static type safe_##name(type1 arg1) \
902 return safe_syscall(__NR_##name, arg1); \
905 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
906 static type safe_##name(type1 arg1, type2 arg2) \
908 return safe_syscall(__NR_##name, arg1, arg2); \
911 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
912 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
914 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
917 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
919 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
921 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
924 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
925 type4, arg4, type5, arg5) \
926 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
929 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
932 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
933 type4, arg4, type5, arg5, type6, arg6) \
934 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
935 type5 arg5, type6 arg6) \
937 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
940 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
941 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
942 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
943 int, flags, mode_t, mode)
944 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
945 struct rusage *, rusage)
946 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
947 int, options, struct rusage *, rusage)
948 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
949 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
950 fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
951 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
952 struct timespec *, tsp, const sigset_t *, sigmask,
954 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
955 int, maxevents, int, timeout, const sigset_t *, sigmask,
957 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
958 const struct timespec *,timeout,int *,uaddr2,int,val3)
959 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
960 safe_syscall2(int, kill, pid_t, pid, int, sig)
961 safe_syscall2(int, tkill, int, tid, int, sig)
962 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
963 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
964 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
965 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
966 unsigned long, pos_l, unsigned long, pos_h)
967 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
968 unsigned long, pos_l, unsigned long, pos_h)
969 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
971 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
972 int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
973 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
974 int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
975 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
976 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
977 safe_syscall2(int, flock, int, fd, int, operation)
978 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
979 const struct timespec *, uts, size_t, sigsetsize)
980 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
982 safe_syscall2(int, nanosleep, const struct timespec *, req,
983 struct timespec *, rem)
984 #ifdef TARGET_NR_clock_nanosleep
985 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
986 const struct timespec *, req, struct timespec *, rem)
989 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
991 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
992 long, msgtype, int, flags)
993 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
994 unsigned, nsops, const struct timespec *, timeout)
996 /* This host kernel architecture uses a single ipc syscall; fake up
997 * wrappers for the sub-operations to hide this implementation detail.
998 * Annoyingly we can't include linux/ipc.h to get the constant definitions
999 * for the call parameter because some structs in there conflict with the
1000 * sys/ipc.h ones. So we just define them here, and rely on them being
1001 * the same for all host architectures.
1003 #define Q_SEMTIMEDOP 4
1006 #define Q_IPCCALL(VERSION, OP) ((VERSION) << 16 | (OP))
1008 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
1009 void *, ptr, long, fifth)
1010 static int safe_msgsnd(int msgid, const void *msgp, size_t sz, int flags)
1012 return safe_ipc(Q_IPCCALL(0, Q_MSGSND), msgid, sz, flags, (void *)msgp, 0);
1014 static int safe_msgrcv(int msgid, void *msgp, size_t sz, long type, int flags)
1016 return safe_ipc(Q_IPCCALL(1, Q_MSGRCV), msgid, sz, flags, msgp, type);
1018 static int safe_semtimedop(int semid, struct sembuf *tsops, unsigned nsops,
1019 const struct timespec *timeout)
1021 return safe_ipc(Q_IPCCALL(0, Q_SEMTIMEDOP), semid, nsops, 0, tsops,
1025 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1026 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
1027 size_t, len, unsigned, prio, const struct timespec *, timeout)
1028 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
1029 size_t, len, unsigned *, prio, const struct timespec *, timeout)
1031 /* We do ioctl like this rather than via safe_syscall3 to preserve the
1032 * "third argument might be integer or pointer or not present" behaviour of
1033 * the libc function.
1035 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
1036 /* Similarly for fcntl. Note that callers must always:
1037 * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
1038 * use the flock64 struct rather than unsuffixed flock
1039 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
1042 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
1044 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
1047 static inline int host_to_target_sock_type(int host_type)
1051 switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
1053 target_type = TARGET_SOCK_DGRAM;
1056 target_type = TARGET_SOCK_STREAM;
1059 target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
1063 #if defined(SOCK_CLOEXEC)
1064 if (host_type & SOCK_CLOEXEC) {
1065 target_type |= TARGET_SOCK_CLOEXEC;
1069 #if defined(SOCK_NONBLOCK)
1070 if (host_type & SOCK_NONBLOCK) {
1071 target_type |= TARGET_SOCK_NONBLOCK;
1078 static abi_ulong target_brk;
1079 static abi_ulong target_original_brk;
1080 static abi_ulong brk_page;
1082 void target_set_brk(abi_ulong new_brk)
1084 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
1085 brk_page = HOST_PAGE_ALIGN(target_brk);
1088 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
1089 #define DEBUGF_BRK(message, args...)
1091 /* do_brk() must return target values and target errnos. */
1092 abi_long do_brk(abi_ulong new_brk)
1094 abi_long mapped_addr;
1095 abi_ulong new_alloc_size;
1097 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
1100 DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
1103 if (new_brk < target_original_brk) {
1104 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
1109 /* If the new brk is less than the highest page reserved to the
1110 * target heap allocation, set it and we're almost done... */
1111 if (new_brk <= brk_page) {
1112 /* Heap contents are initialized to zero, as for anonymous
1114 if (new_brk > target_brk) {
1115 memset(g2h(target_brk), 0, new_brk - target_brk);
1117 target_brk = new_brk;
1118 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
1122 /* We need to allocate more memory after the brk... Note that
1123 * we don't use MAP_FIXED because that will map over the top of
1124 * any existing mapping (like the one with the host libc or qemu
1125 * itself); instead we treat "mapped but at wrong address" as
1126 * a failure and unmap again.
1128 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
1129 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
1130 PROT_READ|PROT_WRITE,
1131 MAP_ANON|MAP_PRIVATE, 0, 0));
1133 if (mapped_addr == brk_page) {
1134 /* Heap contents are initialized to zero, as for anonymous
1135 * mapped pages. Technically the new pages are already
1136 * initialized to zero since they *are* anonymous mapped
1137 * pages, however we have to take care with the contents that
1138 * come from the remaining part of the previous page: it may
1139 * contains garbage data due to a previous heap usage (grown
1140 * then shrunken). */
1141 memset(g2h(target_brk), 0, brk_page - target_brk);
1143 target_brk = new_brk;
1144 brk_page = HOST_PAGE_ALIGN(target_brk);
1145 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
1148 } else if (mapped_addr != -1) {
1149 /* Mapped but at wrong address, meaning there wasn't actually
1150 * enough space for this brk.
1152 target_munmap(mapped_addr, new_alloc_size);
1154 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
1157 DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
1160 #if defined(TARGET_ALPHA)
1161 /* We (partially) emulate OSF/1 on Alpha, which requires we
1162 return a proper errno, not an unchanged brk value. */
1163 return -TARGET_ENOMEM;
1165 /* For everything else, return the previous break. */
1169 static inline abi_long copy_from_user_fdset(fd_set *fds,
1170 abi_ulong target_fds_addr,
1174 abi_ulong b, *target_fds;
1176 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
1177 if (!(target_fds = lock_user(VERIFY_READ,
1179 sizeof(abi_ulong) * nw,
1181 return -TARGET_EFAULT;
1185 for (i = 0; i < nw; i++) {
1186 /* grab the abi_ulong */
1187 __get_user(b, &target_fds[i]);
1188 for (j = 0; j < TARGET_ABI_BITS; j++) {
1189 /* check the bit inside the abi_ulong */
1196 unlock_user(target_fds, target_fds_addr, 0);
1201 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
1202 abi_ulong target_fds_addr,
1205 if (target_fds_addr) {
1206 if (copy_from_user_fdset(fds, target_fds_addr, n))
1207 return -TARGET_EFAULT;
1215 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
1221 abi_ulong *target_fds;
1223 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
1224 if (!(target_fds = lock_user(VERIFY_WRITE,
1226 sizeof(abi_ulong) * nw,
1228 return -TARGET_EFAULT;
1231 for (i = 0; i < nw; i++) {
1233 for (j = 0; j < TARGET_ABI_BITS; j++) {
1234 v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
1237 __put_user(v, &target_fds[i]);
1240 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
1245 #if defined(__alpha__)
1246 #define HOST_HZ 1024
1251 static inline abi_long host_to_target_clock_t(long ticks)
1253 #if HOST_HZ == TARGET_HZ
1256 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
1260 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
1261 const struct rusage *rusage)
1263 struct target_rusage *target_rusage;
1265 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
1266 return -TARGET_EFAULT;
1267 target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
1268 target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
1269 target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1270 target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1271 target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1272 target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1273 target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1274 target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1275 target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1276 target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1277 target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1278 target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1279 target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1280 target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1281 target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1282 target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1283 target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1284 target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1285 unlock_user_struct(target_rusage, target_addr, 1);
1290 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1292 abi_ulong target_rlim_swap;
1295 target_rlim_swap = tswapal(target_rlim);
1296 if (target_rlim_swap == TARGET_RLIM_INFINITY)
1297 return RLIM_INFINITY;
1299 result = target_rlim_swap;
1300 if (target_rlim_swap != (rlim_t)result)
1301 return RLIM_INFINITY;
1306 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1308 abi_ulong target_rlim_swap;
1311 if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1312 target_rlim_swap = TARGET_RLIM_INFINITY;
1314 target_rlim_swap = rlim;
1315 result = tswapal(target_rlim_swap);
1320 static inline int target_to_host_resource(int code)
1323 case TARGET_RLIMIT_AS:
1325 case TARGET_RLIMIT_CORE:
1327 case TARGET_RLIMIT_CPU:
1329 case TARGET_RLIMIT_DATA:
1331 case TARGET_RLIMIT_FSIZE:
1332 return RLIMIT_FSIZE;
1333 case TARGET_RLIMIT_LOCKS:
1334 return RLIMIT_LOCKS;
1335 case TARGET_RLIMIT_MEMLOCK:
1336 return RLIMIT_MEMLOCK;
1337 case TARGET_RLIMIT_MSGQUEUE:
1338 return RLIMIT_MSGQUEUE;
1339 case TARGET_RLIMIT_NICE:
1341 case TARGET_RLIMIT_NOFILE:
1342 return RLIMIT_NOFILE;
1343 case TARGET_RLIMIT_NPROC:
1344 return RLIMIT_NPROC;
1345 case TARGET_RLIMIT_RSS:
1347 case TARGET_RLIMIT_RTPRIO:
1348 return RLIMIT_RTPRIO;
1349 case TARGET_RLIMIT_SIGPENDING:
1350 return RLIMIT_SIGPENDING;
1351 case TARGET_RLIMIT_STACK:
1352 return RLIMIT_STACK;
1358 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1359 abi_ulong target_tv_addr)
1361 struct target_timeval *target_tv;
1363 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1))
1364 return -TARGET_EFAULT;
1366 __get_user(tv->tv_sec, &target_tv->tv_sec);
1367 __get_user(tv->tv_usec, &target_tv->tv_usec);
1369 unlock_user_struct(target_tv, target_tv_addr, 0);
1374 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1375 const struct timeval *tv)
1377 struct target_timeval *target_tv;
1379 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0))
1380 return -TARGET_EFAULT;
1382 __put_user(tv->tv_sec, &target_tv->tv_sec);
1383 __put_user(tv->tv_usec, &target_tv->tv_usec);
1385 unlock_user_struct(target_tv, target_tv_addr, 1);
1390 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1391 abi_ulong target_tz_addr)
1393 struct target_timezone *target_tz;
1395 if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1396 return -TARGET_EFAULT;
1399 __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1400 __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1402 unlock_user_struct(target_tz, target_tz_addr, 0);
1407 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1410 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1411 abi_ulong target_mq_attr_addr)
1413 struct target_mq_attr *target_mq_attr;
1415 if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1416 target_mq_attr_addr, 1))
1417 return -TARGET_EFAULT;
1419 __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1420 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1421 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1422 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1424 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1429 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1430 const struct mq_attr *attr)
1432 struct target_mq_attr *target_mq_attr;
1434 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1435 target_mq_attr_addr, 0))
1436 return -TARGET_EFAULT;
1438 __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1439 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1440 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1441 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1443 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1449 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1450 /* do_select() must return target values and target errnos. */
1451 static abi_long do_select(int n,
1452 abi_ulong rfd_addr, abi_ulong wfd_addr,
1453 abi_ulong efd_addr, abi_ulong target_tv_addr)
1455 fd_set rfds, wfds, efds;
1456 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1458 struct timespec ts, *ts_ptr;
1461 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1465 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1469 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1474 if (target_tv_addr) {
1475 if (copy_from_user_timeval(&tv, target_tv_addr))
1476 return -TARGET_EFAULT;
1477 ts.tv_sec = tv.tv_sec;
1478 ts.tv_nsec = tv.tv_usec * 1000;
1484 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1487 if (!is_error(ret)) {
1488 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1489 return -TARGET_EFAULT;
1490 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1491 return -TARGET_EFAULT;
1492 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1493 return -TARGET_EFAULT;
1495 if (target_tv_addr) {
1496 tv.tv_sec = ts.tv_sec;
1497 tv.tv_usec = ts.tv_nsec / 1000;
1498 if (copy_to_user_timeval(target_tv_addr, &tv)) {
1499 return -TARGET_EFAULT;
1507 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1508 static abi_long do_old_select(abi_ulong arg1)
1510 struct target_sel_arg_struct *sel;
1511 abi_ulong inp, outp, exp, tvp;
1514 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1515 return -TARGET_EFAULT;
1518 nsel = tswapal(sel->n);
1519 inp = tswapal(sel->inp);
1520 outp = tswapal(sel->outp);
1521 exp = tswapal(sel->exp);
1522 tvp = tswapal(sel->tvp);
1524 unlock_user_struct(sel, arg1, 0);
1526 return do_select(nsel, inp, outp, exp, tvp);
1531 static abi_long do_pipe2(int host_pipe[], int flags)
1534 return pipe2(host_pipe, flags);
1540 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1541 int flags, int is_pipe2)
1545 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1548 return get_errno(ret);
1550 /* Several targets have special calling conventions for the original
1551 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1553 #if defined(TARGET_ALPHA)
1554 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1555 return host_pipe[0];
1556 #elif defined(TARGET_MIPS)
1557 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1558 return host_pipe[0];
1559 #elif defined(TARGET_SH4)
1560 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1561 return host_pipe[0];
1562 #elif defined(TARGET_SPARC)
1563 ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1564 return host_pipe[0];
1568 if (put_user_s32(host_pipe[0], pipedes)
1569 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1570 return -TARGET_EFAULT;
1571 return get_errno(ret);
1574 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1575 abi_ulong target_addr,
1578 struct target_ip_mreqn *target_smreqn;
1580 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1582 return -TARGET_EFAULT;
1583 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1584 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1585 if (len == sizeof(struct target_ip_mreqn))
1586 mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1587 unlock_user(target_smreqn, target_addr, 0);
1592 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1593 abi_ulong target_addr,
1596 const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1597 sa_family_t sa_family;
1598 struct target_sockaddr *target_saddr;
1600 if (fd_trans_target_to_host_addr(fd)) {
1601 return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1604 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1606 return -TARGET_EFAULT;
1608 sa_family = tswap16(target_saddr->sa_family);
1610 /* Oops. The caller might send a incomplete sun_path; sun_path
1611 * must be terminated by \0 (see the manual page), but
1612 * unfortunately it is quite common to specify sockaddr_un
1613 * length as "strlen(x->sun_path)" while it should be
1614 * "strlen(...) + 1". We'll fix that here if needed.
1615 * Linux kernel has a similar feature.
1618 if (sa_family == AF_UNIX) {
1619 if (len < unix_maxlen && len > 0) {
1620 char *cp = (char*)target_saddr;
1622 if ( cp[len-1] && !cp[len] )
1625 if (len > unix_maxlen)
1629 memcpy(addr, target_saddr, len);
1630 addr->sa_family = sa_family;
1631 if (sa_family == AF_NETLINK) {
1632 struct sockaddr_nl *nladdr;
1634 nladdr = (struct sockaddr_nl *)addr;
1635 nladdr->nl_pid = tswap32(nladdr->nl_pid);
1636 nladdr->nl_groups = tswap32(nladdr->nl_groups);
1637 } else if (sa_family == AF_PACKET) {
1638 struct target_sockaddr_ll *lladdr;
1640 lladdr = (struct target_sockaddr_ll *)addr;
1641 lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1642 lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1644 unlock_user(target_saddr, target_addr, 0);
1649 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1650 struct sockaddr *addr,
1653 struct target_sockaddr *target_saddr;
1660 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1662 return -TARGET_EFAULT;
1663 memcpy(target_saddr, addr, len);
1664 if (len >= offsetof(struct target_sockaddr, sa_family) +
1665 sizeof(target_saddr->sa_family)) {
1666 target_saddr->sa_family = tswap16(addr->sa_family);
1668 if (addr->sa_family == AF_NETLINK && len >= sizeof(struct sockaddr_nl)) {
1669 struct sockaddr_nl *target_nl = (struct sockaddr_nl *)target_saddr;
1670 target_nl->nl_pid = tswap32(target_nl->nl_pid);
1671 target_nl->nl_groups = tswap32(target_nl->nl_groups);
1672 } else if (addr->sa_family == AF_PACKET) {
1673 struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1674 target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1675 target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1676 } else if (addr->sa_family == AF_INET6 &&
1677 len >= sizeof(struct target_sockaddr_in6)) {
1678 struct target_sockaddr_in6 *target_in6 =
1679 (struct target_sockaddr_in6 *)target_saddr;
1680 target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1682 unlock_user(target_saddr, target_addr, len);
1687 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1688 struct target_msghdr *target_msgh)
1690 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1691 abi_long msg_controllen;
1692 abi_ulong target_cmsg_addr;
1693 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1694 socklen_t space = 0;
1696 msg_controllen = tswapal(target_msgh->msg_controllen);
1697 if (msg_controllen < sizeof (struct target_cmsghdr))
1699 target_cmsg_addr = tswapal(target_msgh->msg_control);
1700 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1701 target_cmsg_start = target_cmsg;
1703 return -TARGET_EFAULT;
1705 while (cmsg && target_cmsg) {
1706 void *data = CMSG_DATA(cmsg);
1707 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1709 int len = tswapal(target_cmsg->cmsg_len)
1710 - sizeof(struct target_cmsghdr);
1712 space += CMSG_SPACE(len);
1713 if (space > msgh->msg_controllen) {
1714 space -= CMSG_SPACE(len);
1715 /* This is a QEMU bug, since we allocated the payload
1716 * area ourselves (unlike overflow in host-to-target
1717 * conversion, which is just the guest giving us a buffer
1718 * that's too small). It can't happen for the payload types
1719 * we currently support; if it becomes an issue in future
1720 * we would need to improve our allocation strategy to
1721 * something more intelligent than "twice the size of the
1722 * target buffer we're reading from".
1724 gemu_log("Host cmsg overflow\n");
1728 if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1729 cmsg->cmsg_level = SOL_SOCKET;
1731 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1733 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1734 cmsg->cmsg_len = CMSG_LEN(len);
1736 if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1737 int *fd = (int *)data;
1738 int *target_fd = (int *)target_data;
1739 int i, numfds = len / sizeof(int);
1741 for (i = 0; i < numfds; i++) {
1742 __get_user(fd[i], target_fd + i);
1744 } else if (cmsg->cmsg_level == SOL_SOCKET
1745 && cmsg->cmsg_type == SCM_CREDENTIALS) {
1746 struct ucred *cred = (struct ucred *)data;
1747 struct target_ucred *target_cred =
1748 (struct target_ucred *)target_data;
1750 __get_user(cred->pid, &target_cred->pid);
1751 __get_user(cred->uid, &target_cred->uid);
1752 __get_user(cred->gid, &target_cred->gid);
1754 gemu_log("Unsupported ancillary data: %d/%d\n",
1755 cmsg->cmsg_level, cmsg->cmsg_type);
1756 memcpy(data, target_data, len);
1759 cmsg = CMSG_NXTHDR(msgh, cmsg);
1760 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1763 unlock_user(target_cmsg, target_cmsg_addr, 0);
1765 msgh->msg_controllen = space;
1769 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1770 struct msghdr *msgh)
1772 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1773 abi_long msg_controllen;
1774 abi_ulong target_cmsg_addr;
1775 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1776 socklen_t space = 0;
1778 msg_controllen = tswapal(target_msgh->msg_controllen);
1779 if (msg_controllen < sizeof (struct target_cmsghdr))
1781 target_cmsg_addr = tswapal(target_msgh->msg_control);
1782 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1783 target_cmsg_start = target_cmsg;
1785 return -TARGET_EFAULT;
1787 while (cmsg && target_cmsg) {
1788 void *data = CMSG_DATA(cmsg);
1789 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1791 int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1792 int tgt_len, tgt_space;
1794 /* We never copy a half-header but may copy half-data;
1795 * this is Linux's behaviour in put_cmsg(). Note that
1796 * truncation here is a guest problem (which we report
1797 * to the guest via the CTRUNC bit), unlike truncation
1798 * in target_to_host_cmsg, which is a QEMU bug.
1800 if (msg_controllen < sizeof(struct target_cmsghdr)) {
1801 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1805 if (cmsg->cmsg_level == SOL_SOCKET) {
1806 target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1808 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1810 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1812 /* Payload types which need a different size of payload on
1813 * the target must adjust tgt_len here.
1815 switch (cmsg->cmsg_level) {
1817 switch (cmsg->cmsg_type) {
1819 tgt_len = sizeof(struct target_timeval);
1829 if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1830 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1831 tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1834 /* We must now copy-and-convert len bytes of payload
1835 * into tgt_len bytes of destination space. Bear in mind
1836 * that in both source and destination we may be dealing
1837 * with a truncated value!
1839 switch (cmsg->cmsg_level) {
1841 switch (cmsg->cmsg_type) {
1844 int *fd = (int *)data;
1845 int *target_fd = (int *)target_data;
1846 int i, numfds = tgt_len / sizeof(int);
1848 for (i = 0; i < numfds; i++) {
1849 __put_user(fd[i], target_fd + i);
1855 struct timeval *tv = (struct timeval *)data;
1856 struct target_timeval *target_tv =
1857 (struct target_timeval *)target_data;
1859 if (len != sizeof(struct timeval) ||
1860 tgt_len != sizeof(struct target_timeval)) {
1864 /* copy struct timeval to target */
1865 __put_user(tv->tv_sec, &target_tv->tv_sec);
1866 __put_user(tv->tv_usec, &target_tv->tv_usec);
1869 case SCM_CREDENTIALS:
1871 struct ucred *cred = (struct ucred *)data;
1872 struct target_ucred *target_cred =
1873 (struct target_ucred *)target_data;
1875 __put_user(cred->pid, &target_cred->pid);
1876 __put_user(cred->uid, &target_cred->uid);
1877 __put_user(cred->gid, &target_cred->gid);
1886 switch (cmsg->cmsg_type) {
1889 uint32_t *v = (uint32_t *)data;
1890 uint32_t *t_int = (uint32_t *)target_data;
1892 if (len != sizeof(uint32_t) ||
1893 tgt_len != sizeof(uint32_t)) {
1896 __put_user(*v, t_int);
1902 struct sock_extended_err ee;
1903 struct sockaddr_in offender;
1905 struct errhdr_t *errh = (struct errhdr_t *)data;
1906 struct errhdr_t *target_errh =
1907 (struct errhdr_t *)target_data;
1909 if (len != sizeof(struct errhdr_t) ||
1910 tgt_len != sizeof(struct errhdr_t)) {
1913 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1914 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1915 __put_user(errh->ee.ee_type, &target_errh->ee.ee_type);
1916 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1917 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1918 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1919 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1920 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1921 (void *) &errh->offender, sizeof(errh->offender));
1930 switch (cmsg->cmsg_type) {
1933 uint32_t *v = (uint32_t *)data;
1934 uint32_t *t_int = (uint32_t *)target_data;
1936 if (len != sizeof(uint32_t) ||
1937 tgt_len != sizeof(uint32_t)) {
1940 __put_user(*v, t_int);
1946 struct sock_extended_err ee;
1947 struct sockaddr_in6 offender;
1949 struct errhdr6_t *errh = (struct errhdr6_t *)data;
1950 struct errhdr6_t *target_errh =
1951 (struct errhdr6_t *)target_data;
1953 if (len != sizeof(struct errhdr6_t) ||
1954 tgt_len != sizeof(struct errhdr6_t)) {
1957 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1958 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1959 __put_user(errh->ee.ee_type, &target_errh->ee.ee_type);
1960 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1961 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1962 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1963 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1964 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1965 (void *) &errh->offender, sizeof(errh->offender));
1975 gemu_log("Unsupported ancillary data: %d/%d\n",
1976 cmsg->cmsg_level, cmsg->cmsg_type);
1977 memcpy(target_data, data, MIN(len, tgt_len));
1978 if (tgt_len > len) {
1979 memset(target_data + len, 0, tgt_len - len);
1983 target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
1984 tgt_space = TARGET_CMSG_SPACE(tgt_len);
1985 if (msg_controllen < tgt_space) {
1986 tgt_space = msg_controllen;
1988 msg_controllen -= tgt_space;
1990 cmsg = CMSG_NXTHDR(msgh, cmsg);
1991 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1994 unlock_user(target_cmsg, target_cmsg_addr, space);
1996 target_msgh->msg_controllen = tswapal(space);
2000 static void tswap_nlmsghdr(struct nlmsghdr *nlh)
2002 nlh->nlmsg_len = tswap32(nlh->nlmsg_len);
2003 nlh->nlmsg_type = tswap16(nlh->nlmsg_type);
2004 nlh->nlmsg_flags = tswap16(nlh->nlmsg_flags);
2005 nlh->nlmsg_seq = tswap32(nlh->nlmsg_seq);
2006 nlh->nlmsg_pid = tswap32(nlh->nlmsg_pid);
2009 static abi_long host_to_target_for_each_nlmsg(struct nlmsghdr *nlh,
2011 abi_long (*host_to_target_nlmsg)
2012 (struct nlmsghdr *))
2017 while (len > sizeof(struct nlmsghdr)) {
2019 nlmsg_len = nlh->nlmsg_len;
2020 if (nlmsg_len < sizeof(struct nlmsghdr) ||
2025 switch (nlh->nlmsg_type) {
2027 tswap_nlmsghdr(nlh);
2033 struct nlmsgerr *e = NLMSG_DATA(nlh);
2034 e->error = tswap32(e->error);
2035 tswap_nlmsghdr(&e->msg);
2036 tswap_nlmsghdr(nlh);
2040 ret = host_to_target_nlmsg(nlh);
2042 tswap_nlmsghdr(nlh);
2047 tswap_nlmsghdr(nlh);
2048 len -= NLMSG_ALIGN(nlmsg_len);
2049 nlh = (struct nlmsghdr *)(((char*)nlh) + NLMSG_ALIGN(nlmsg_len));
2054 static abi_long target_to_host_for_each_nlmsg(struct nlmsghdr *nlh,
2056 abi_long (*target_to_host_nlmsg)
2057 (struct nlmsghdr *))
2061 while (len > sizeof(struct nlmsghdr)) {
2062 if (tswap32(nlh->nlmsg_len) < sizeof(struct nlmsghdr) ||
2063 tswap32(nlh->nlmsg_len) > len) {
2066 tswap_nlmsghdr(nlh);
2067 switch (nlh->nlmsg_type) {
2074 struct nlmsgerr *e = NLMSG_DATA(nlh);
2075 e->error = tswap32(e->error);
2076 tswap_nlmsghdr(&e->msg);
2080 ret = target_to_host_nlmsg(nlh);
2085 len -= NLMSG_ALIGN(nlh->nlmsg_len);
2086 nlh = (struct nlmsghdr *)(((char *)nlh) + NLMSG_ALIGN(nlh->nlmsg_len));
2091 #ifdef CONFIG_RTNETLINK
2092 static abi_long host_to_target_for_each_nlattr(struct nlattr *nlattr,
2093 size_t len, void *context,
2094 abi_long (*host_to_target_nlattr)
2098 unsigned short nla_len;
2101 while (len > sizeof(struct nlattr)) {
2102 nla_len = nlattr->nla_len;
2103 if (nla_len < sizeof(struct nlattr) ||
2107 ret = host_to_target_nlattr(nlattr, context);
2108 nlattr->nla_len = tswap16(nlattr->nla_len);
2109 nlattr->nla_type = tswap16(nlattr->nla_type);
2113 len -= NLA_ALIGN(nla_len);
2114 nlattr = (struct nlattr *)(((char *)nlattr) + NLA_ALIGN(nla_len));
2119 static abi_long host_to_target_for_each_rtattr(struct rtattr *rtattr,
2121 abi_long (*host_to_target_rtattr)
2124 unsigned short rta_len;
2127 while (len > sizeof(struct rtattr)) {
2128 rta_len = rtattr->rta_len;
2129 if (rta_len < sizeof(struct rtattr) ||
2133 ret = host_to_target_rtattr(rtattr);
2134 rtattr->rta_len = tswap16(rtattr->rta_len);
2135 rtattr->rta_type = tswap16(rtattr->rta_type);
2139 len -= RTA_ALIGN(rta_len);
2140 rtattr = (struct rtattr *)(((char *)rtattr) + RTA_ALIGN(rta_len));
2145 #define NLA_DATA(nla) ((void *)((char *)(nla)) + NLA_HDRLEN)
2147 static abi_long host_to_target_data_bridge_nlattr(struct nlattr *nlattr,
2154 switch (nlattr->nla_type) {
2156 case QEMU_IFLA_BR_FDB_FLUSH:
2159 case QEMU_IFLA_BR_GROUP_ADDR:
2162 case QEMU_IFLA_BR_VLAN_FILTERING:
2163 case QEMU_IFLA_BR_TOPOLOGY_CHANGE:
2164 case QEMU_IFLA_BR_TOPOLOGY_CHANGE_DETECTED:
2165 case QEMU_IFLA_BR_MCAST_ROUTER:
2166 case QEMU_IFLA_BR_MCAST_SNOOPING:
2167 case QEMU_IFLA_BR_MCAST_QUERY_USE_IFADDR:
2168 case QEMU_IFLA_BR_MCAST_QUERIER:
2169 case QEMU_IFLA_BR_NF_CALL_IPTABLES:
2170 case QEMU_IFLA_BR_NF_CALL_IP6TABLES:
2171 case QEMU_IFLA_BR_NF_CALL_ARPTABLES:
2174 case QEMU_IFLA_BR_PRIORITY:
2175 case QEMU_IFLA_BR_VLAN_PROTOCOL:
2176 case QEMU_IFLA_BR_GROUP_FWD_MASK:
2177 case QEMU_IFLA_BR_ROOT_PORT:
2178 case QEMU_IFLA_BR_VLAN_DEFAULT_PVID:
2179 u16 = NLA_DATA(nlattr);
2180 *u16 = tswap16(*u16);
2183 case QEMU_IFLA_BR_FORWARD_DELAY:
2184 case QEMU_IFLA_BR_HELLO_TIME:
2185 case QEMU_IFLA_BR_MAX_AGE:
2186 case QEMU_IFLA_BR_AGEING_TIME:
2187 case QEMU_IFLA_BR_STP_STATE:
2188 case QEMU_IFLA_BR_ROOT_PATH_COST:
2189 case QEMU_IFLA_BR_MCAST_HASH_ELASTICITY:
2190 case QEMU_IFLA_BR_MCAST_HASH_MAX:
2191 case QEMU_IFLA_BR_MCAST_LAST_MEMBER_CNT:
2192 case QEMU_IFLA_BR_MCAST_STARTUP_QUERY_CNT:
2193 u32 = NLA_DATA(nlattr);
2194 *u32 = tswap32(*u32);
2197 case QEMU_IFLA_BR_HELLO_TIMER:
2198 case QEMU_IFLA_BR_TCN_TIMER:
2199 case QEMU_IFLA_BR_GC_TIMER:
2200 case QEMU_IFLA_BR_TOPOLOGY_CHANGE_TIMER:
2201 case QEMU_IFLA_BR_MCAST_LAST_MEMBER_INTVL:
2202 case QEMU_IFLA_BR_MCAST_MEMBERSHIP_INTVL:
2203 case QEMU_IFLA_BR_MCAST_QUERIER_INTVL:
2204 case QEMU_IFLA_BR_MCAST_QUERY_INTVL:
2205 case QEMU_IFLA_BR_MCAST_QUERY_RESPONSE_INTVL:
2206 case QEMU_IFLA_BR_MCAST_STARTUP_QUERY_INTVL:
2207 u64 = NLA_DATA(nlattr);
2208 *u64 = tswap64(*u64);
2210 /* ifla_bridge_id: uin8_t[] */
2211 case QEMU_IFLA_BR_ROOT_ID:
2212 case QEMU_IFLA_BR_BRIDGE_ID:
2215 gemu_log("Unknown QEMU_IFLA_BR type %d\n", nlattr->nla_type);
2221 static abi_long host_to_target_slave_data_bridge_nlattr(struct nlattr *nlattr,
2228 switch (nlattr->nla_type) {
2230 case QEMU_IFLA_BRPORT_STATE:
2231 case QEMU_IFLA_BRPORT_MODE:
2232 case QEMU_IFLA_BRPORT_GUARD:
2233 case QEMU_IFLA_BRPORT_PROTECT:
2234 case QEMU_IFLA_BRPORT_FAST_LEAVE:
2235 case QEMU_IFLA_BRPORT_LEARNING:
2236 case QEMU_IFLA_BRPORT_UNICAST_FLOOD:
2237 case QEMU_IFLA_BRPORT_PROXYARP:
2238 case QEMU_IFLA_BRPORT_LEARNING_SYNC:
2239 case QEMU_IFLA_BRPORT_PROXYARP_WIFI:
2240 case QEMU_IFLA_BRPORT_TOPOLOGY_CHANGE_ACK:
2241 case QEMU_IFLA_BRPORT_CONFIG_PENDING:
2242 case QEMU_IFLA_BRPORT_MULTICAST_ROUTER:
2245 case QEMU_IFLA_BRPORT_PRIORITY:
2246 case QEMU_IFLA_BRPORT_DESIGNATED_PORT:
2247 case QEMU_IFLA_BRPORT_DESIGNATED_COST:
2248 case QEMU_IFLA_BRPORT_ID:
2249 case QEMU_IFLA_BRPORT_NO:
2250 u16 = NLA_DATA(nlattr);
2251 *u16 = tswap16(*u16);
2254 case QEMU_IFLA_BRPORT_COST:
2255 u32 = NLA_DATA(nlattr);
2256 *u32 = tswap32(*u32);
2259 case QEMU_IFLA_BRPORT_MESSAGE_AGE_TIMER:
2260 case QEMU_IFLA_BRPORT_FORWARD_DELAY_TIMER:
2261 case QEMU_IFLA_BRPORT_HOLD_TIMER:
2262 u64 = NLA_DATA(nlattr);
2263 *u64 = tswap64(*u64);
2265 /* ifla_bridge_id: uint8_t[] */
2266 case QEMU_IFLA_BRPORT_ROOT_ID:
2267 case QEMU_IFLA_BRPORT_BRIDGE_ID:
2270 gemu_log("Unknown QEMU_IFLA_BRPORT type %d\n", nlattr->nla_type);
2276 struct linkinfo_context {
2283 static abi_long host_to_target_data_linkinfo_nlattr(struct nlattr *nlattr,
2286 struct linkinfo_context *li_context = context;
2288 switch (nlattr->nla_type) {
2290 case QEMU_IFLA_INFO_KIND:
2291 li_context->name = NLA_DATA(nlattr);
2292 li_context->len = nlattr->nla_len - NLA_HDRLEN;
2294 case QEMU_IFLA_INFO_SLAVE_KIND:
2295 li_context->slave_name = NLA_DATA(nlattr);
2296 li_context->slave_len = nlattr->nla_len - NLA_HDRLEN;
2299 case QEMU_IFLA_INFO_XSTATS:
2300 /* FIXME: only used by CAN */
2303 case QEMU_IFLA_INFO_DATA:
2304 if (strncmp(li_context->name, "bridge",
2305 li_context->len) == 0) {
2306 return host_to_target_for_each_nlattr(NLA_DATA(nlattr),
2309 host_to_target_data_bridge_nlattr);
2311 gemu_log("Unknown QEMU_IFLA_INFO_KIND %s\n", li_context->name);
2314 case QEMU_IFLA_INFO_SLAVE_DATA:
2315 if (strncmp(li_context->slave_name, "bridge",
2316 li_context->slave_len) == 0) {
2317 return host_to_target_for_each_nlattr(NLA_DATA(nlattr),
2320 host_to_target_slave_data_bridge_nlattr);
2322 gemu_log("Unknown QEMU_IFLA_INFO_SLAVE_KIND %s\n",
2323 li_context->slave_name);
2327 gemu_log("Unknown host QEMU_IFLA_INFO type: %d\n", nlattr->nla_type);
2334 static abi_long host_to_target_data_inet_nlattr(struct nlattr *nlattr,
2340 switch (nlattr->nla_type) {
2341 case QEMU_IFLA_INET_CONF:
2342 u32 = NLA_DATA(nlattr);
2343 for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u32);
2345 u32[i] = tswap32(u32[i]);
2349 gemu_log("Unknown host AF_INET type: %d\n", nlattr->nla_type);
2354 static abi_long host_to_target_data_inet6_nlattr(struct nlattr *nlattr,
2359 struct ifla_cacheinfo *ci;
2362 switch (nlattr->nla_type) {
2364 case QEMU_IFLA_INET6_TOKEN:
2367 case QEMU_IFLA_INET6_ADDR_GEN_MODE:
2370 case QEMU_IFLA_INET6_FLAGS:
2371 u32 = NLA_DATA(nlattr);
2372 *u32 = tswap32(*u32);
2375 case QEMU_IFLA_INET6_CONF:
2376 u32 = NLA_DATA(nlattr);
2377 for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u32);
2379 u32[i] = tswap32(u32[i]);
2382 /* ifla_cacheinfo */
2383 case QEMU_IFLA_INET6_CACHEINFO:
2384 ci = NLA_DATA(nlattr);
2385 ci->max_reasm_len = tswap32(ci->max_reasm_len);
2386 ci->tstamp = tswap32(ci->tstamp);
2387 ci->reachable_time = tswap32(ci->reachable_time);
2388 ci->retrans_time = tswap32(ci->retrans_time);
2391 case QEMU_IFLA_INET6_STATS:
2392 case QEMU_IFLA_INET6_ICMP6STATS:
2393 u64 = NLA_DATA(nlattr);
2394 for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u64);
2396 u64[i] = tswap64(u64[i]);
2400 gemu_log("Unknown host AF_INET6 type: %d\n", nlattr->nla_type);
2405 static abi_long host_to_target_data_spec_nlattr(struct nlattr *nlattr,
2408 switch (nlattr->nla_type) {
2410 return host_to_target_for_each_nlattr(NLA_DATA(nlattr), nlattr->nla_len,
2412 host_to_target_data_inet_nlattr);
2414 return host_to_target_for_each_nlattr(NLA_DATA(nlattr), nlattr->nla_len,
2416 host_to_target_data_inet6_nlattr);
2418 gemu_log("Unknown host AF_SPEC type: %d\n", nlattr->nla_type);
2424 static abi_long host_to_target_data_link_rtattr(struct rtattr *rtattr)
2427 struct rtnl_link_stats *st;
2428 struct rtnl_link_stats64 *st64;
2429 struct rtnl_link_ifmap *map;
2430 struct linkinfo_context li_context;
2432 switch (rtattr->rta_type) {
2434 case QEMU_IFLA_ADDRESS:
2435 case QEMU_IFLA_BROADCAST:
2437 case QEMU_IFLA_IFNAME:
2438 case QEMU_IFLA_QDISC:
2441 case QEMU_IFLA_OPERSTATE:
2442 case QEMU_IFLA_LINKMODE:
2443 case QEMU_IFLA_CARRIER:
2444 case QEMU_IFLA_PROTO_DOWN:
2448 case QEMU_IFLA_LINK:
2449 case QEMU_IFLA_WEIGHT:
2450 case QEMU_IFLA_TXQLEN:
2451 case QEMU_IFLA_CARRIER_CHANGES:
2452 case QEMU_IFLA_NUM_RX_QUEUES:
2453 case QEMU_IFLA_NUM_TX_QUEUES:
2454 case QEMU_IFLA_PROMISCUITY:
2455 case QEMU_IFLA_EXT_MASK:
2456 case QEMU_IFLA_LINK_NETNSID:
2457 case QEMU_IFLA_GROUP:
2458 case QEMU_IFLA_MASTER:
2459 case QEMU_IFLA_NUM_VF:
2460 case QEMU_IFLA_GSO_MAX_SEGS:
2461 case QEMU_IFLA_GSO_MAX_SIZE:
2462 u32 = RTA_DATA(rtattr);
2463 *u32 = tswap32(*u32);
2465 /* struct rtnl_link_stats */
2466 case QEMU_IFLA_STATS:
2467 st = RTA_DATA(rtattr);
2468 st->rx_packets = tswap32(st->rx_packets);
2469 st->tx_packets = tswap32(st->tx_packets);
2470 st->rx_bytes = tswap32(st->rx_bytes);
2471 st->tx_bytes = tswap32(st->tx_bytes);
2472 st->rx_errors = tswap32(st->rx_errors);
2473 st->tx_errors = tswap32(st->tx_errors);
2474 st->rx_dropped = tswap32(st->rx_dropped);
2475 st->tx_dropped = tswap32(st->tx_dropped);
2476 st->multicast = tswap32(st->multicast);
2477 st->collisions = tswap32(st->collisions);
2479 /* detailed rx_errors: */
2480 st->rx_length_errors = tswap32(st->rx_length_errors);
2481 st->rx_over_errors = tswap32(st->rx_over_errors);
2482 st->rx_crc_errors = tswap32(st->rx_crc_errors);
2483 st->rx_frame_errors = tswap32(st->rx_frame_errors);
2484 st->rx_fifo_errors = tswap32(st->rx_fifo_errors);
2485 st->rx_missed_errors = tswap32(st->rx_missed_errors);
2487 /* detailed tx_errors */
2488 st->tx_aborted_errors = tswap32(st->tx_aborted_errors);
2489 st->tx_carrier_errors = tswap32(st->tx_carrier_errors);
2490 st->tx_fifo_errors = tswap32(st->tx_fifo_errors);
2491 st->tx_heartbeat_errors = tswap32(st->tx_heartbeat_errors);
2492 st->tx_window_errors = tswap32(st->tx_window_errors);
2495 st->rx_compressed = tswap32(st->rx_compressed);
2496 st->tx_compressed = tswap32(st->tx_compressed);
2498 /* struct rtnl_link_stats64 */
2499 case QEMU_IFLA_STATS64:
2500 st64 = RTA_DATA(rtattr);
2501 st64->rx_packets = tswap64(st64->rx_packets);
2502 st64->tx_packets = tswap64(st64->tx_packets);
2503 st64->rx_bytes = tswap64(st64->rx_bytes);
2504 st64->tx_bytes = tswap64(st64->tx_bytes);
2505 st64->rx_errors = tswap64(st64->rx_errors);
2506 st64->tx_errors = tswap64(st64->tx_errors);
2507 st64->rx_dropped = tswap64(st64->rx_dropped);
2508 st64->tx_dropped = tswap64(st64->tx_dropped);
2509 st64->multicast = tswap64(st64->multicast);
2510 st64->collisions = tswap64(st64->collisions);
2512 /* detailed rx_errors: */
2513 st64->rx_length_errors = tswap64(st64->rx_length_errors);
2514 st64->rx_over_errors = tswap64(st64->rx_over_errors);
2515 st64->rx_crc_errors = tswap64(st64->rx_crc_errors);
2516 st64->rx_frame_errors = tswap64(st64->rx_frame_errors);
2517 st64->rx_fifo_errors = tswap64(st64->rx_fifo_errors);
2518 st64->rx_missed_errors = tswap64(st64->rx_missed_errors);
2520 /* detailed tx_errors */
2521 st64->tx_aborted_errors = tswap64(st64->tx_aborted_errors);
2522 st64->tx_carrier_errors = tswap64(st64->tx_carrier_errors);
2523 st64->tx_fifo_errors = tswap64(st64->tx_fifo_errors);
2524 st64->tx_heartbeat_errors = tswap64(st64->tx_heartbeat_errors);
2525 st64->tx_window_errors = tswap64(st64->tx_window_errors);
2528 st64->rx_compressed = tswap64(st64->rx_compressed);
2529 st64->tx_compressed = tswap64(st64->tx_compressed);
2531 /* struct rtnl_link_ifmap */
2533 map = RTA_DATA(rtattr);
2534 map->mem_start = tswap64(map->mem_start);
2535 map->mem_end = tswap64(map->mem_end);
2536 map->base_addr = tswap64(map->base_addr);
2537 map->irq = tswap16(map->irq);
2540 case QEMU_IFLA_LINKINFO:
2541 memset(&li_context, 0, sizeof(li_context));
2542 return host_to_target_for_each_nlattr(RTA_DATA(rtattr), rtattr->rta_len,
2544 host_to_target_data_linkinfo_nlattr);
2545 case QEMU_IFLA_AF_SPEC:
2546 return host_to_target_for_each_nlattr(RTA_DATA(rtattr), rtattr->rta_len,
2548 host_to_target_data_spec_nlattr);
2550 gemu_log("Unknown host QEMU_IFLA type: %d\n", rtattr->rta_type);
2556 static abi_long host_to_target_data_addr_rtattr(struct rtattr *rtattr)
2559 struct ifa_cacheinfo *ci;
2561 switch (rtattr->rta_type) {
2562 /* binary: depends on family type */
2572 u32 = RTA_DATA(rtattr);
2573 *u32 = tswap32(*u32);
2575 /* struct ifa_cacheinfo */
2577 ci = RTA_DATA(rtattr);
2578 ci->ifa_prefered = tswap32(ci->ifa_prefered);
2579 ci->ifa_valid = tswap32(ci->ifa_valid);
2580 ci->cstamp = tswap32(ci->cstamp);
2581 ci->tstamp = tswap32(ci->tstamp);
2584 gemu_log("Unknown host IFA type: %d\n", rtattr->rta_type);
2590 static abi_long host_to_target_data_route_rtattr(struct rtattr *rtattr)
2593 switch (rtattr->rta_type) {
2594 /* binary: depends on family type */
2603 u32 = RTA_DATA(rtattr);
2604 *u32 = tswap32(*u32);
2607 gemu_log("Unknown host RTA type: %d\n", rtattr->rta_type);
2613 static abi_long host_to_target_link_rtattr(struct rtattr *rtattr,
2614 uint32_t rtattr_len)
2616 return host_to_target_for_each_rtattr(rtattr, rtattr_len,
2617 host_to_target_data_link_rtattr);
2620 static abi_long host_to_target_addr_rtattr(struct rtattr *rtattr,
2621 uint32_t rtattr_len)
2623 return host_to_target_for_each_rtattr(rtattr, rtattr_len,
2624 host_to_target_data_addr_rtattr);
2627 static abi_long host_to_target_route_rtattr(struct rtattr *rtattr,
2628 uint32_t rtattr_len)
2630 return host_to_target_for_each_rtattr(rtattr, rtattr_len,
2631 host_to_target_data_route_rtattr);
2634 static abi_long host_to_target_data_route(struct nlmsghdr *nlh)
2637 struct ifinfomsg *ifi;
2638 struct ifaddrmsg *ifa;
2641 nlmsg_len = nlh->nlmsg_len;
2642 switch (nlh->nlmsg_type) {
2646 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifi))) {
2647 ifi = NLMSG_DATA(nlh);
2648 ifi->ifi_type = tswap16(ifi->ifi_type);
2649 ifi->ifi_index = tswap32(ifi->ifi_index);
2650 ifi->ifi_flags = tswap32(ifi->ifi_flags);
2651 ifi->ifi_change = tswap32(ifi->ifi_change);
2652 host_to_target_link_rtattr(IFLA_RTA(ifi),
2653 nlmsg_len - NLMSG_LENGTH(sizeof(*ifi)));
2659 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifa))) {
2660 ifa = NLMSG_DATA(nlh);
2661 ifa->ifa_index = tswap32(ifa->ifa_index);
2662 host_to_target_addr_rtattr(IFA_RTA(ifa),
2663 nlmsg_len - NLMSG_LENGTH(sizeof(*ifa)));
2669 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*rtm))) {
2670 rtm = NLMSG_DATA(nlh);
2671 rtm->rtm_flags = tswap32(rtm->rtm_flags);
2672 host_to_target_route_rtattr(RTM_RTA(rtm),
2673 nlmsg_len - NLMSG_LENGTH(sizeof(*rtm)));
2677 return -TARGET_EINVAL;
2682 static inline abi_long host_to_target_nlmsg_route(struct nlmsghdr *nlh,
2685 return host_to_target_for_each_nlmsg(nlh, len, host_to_target_data_route);
2688 static abi_long target_to_host_for_each_rtattr(struct rtattr *rtattr,
2690 abi_long (*target_to_host_rtattr)
2695 while (len >= sizeof(struct rtattr)) {
2696 if (tswap16(rtattr->rta_len) < sizeof(struct rtattr) ||
2697 tswap16(rtattr->rta_len) > len) {
2700 rtattr->rta_len = tswap16(rtattr->rta_len);
2701 rtattr->rta_type = tswap16(rtattr->rta_type);
2702 ret = target_to_host_rtattr(rtattr);
2706 len -= RTA_ALIGN(rtattr->rta_len);
2707 rtattr = (struct rtattr *)(((char *)rtattr) +
2708 RTA_ALIGN(rtattr->rta_len));
2713 static abi_long target_to_host_data_link_rtattr(struct rtattr *rtattr)
2715 switch (rtattr->rta_type) {
2717 gemu_log("Unknown target QEMU_IFLA type: %d\n", rtattr->rta_type);
2723 static abi_long target_to_host_data_addr_rtattr(struct rtattr *rtattr)
2725 switch (rtattr->rta_type) {
2726 /* binary: depends on family type */
2731 gemu_log("Unknown target IFA type: %d\n", rtattr->rta_type);
2737 static abi_long target_to_host_data_route_rtattr(struct rtattr *rtattr)
2740 switch (rtattr->rta_type) {
2741 /* binary: depends on family type */
2749 u32 = RTA_DATA(rtattr);
2750 *u32 = tswap32(*u32);
2753 gemu_log("Unknown target RTA type: %d\n", rtattr->rta_type);
2759 static void target_to_host_link_rtattr(struct rtattr *rtattr,
2760 uint32_t rtattr_len)
2762 target_to_host_for_each_rtattr(rtattr, rtattr_len,
2763 target_to_host_data_link_rtattr);
2766 static void target_to_host_addr_rtattr(struct rtattr *rtattr,
2767 uint32_t rtattr_len)
2769 target_to_host_for_each_rtattr(rtattr, rtattr_len,
2770 target_to_host_data_addr_rtattr);
2773 static void target_to_host_route_rtattr(struct rtattr *rtattr,
2774 uint32_t rtattr_len)
2776 target_to_host_for_each_rtattr(rtattr, rtattr_len,
2777 target_to_host_data_route_rtattr);
2780 static abi_long target_to_host_data_route(struct nlmsghdr *nlh)
2782 struct ifinfomsg *ifi;
2783 struct ifaddrmsg *ifa;
2786 switch (nlh->nlmsg_type) {
2791 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifi))) {
2792 ifi = NLMSG_DATA(nlh);
2793 ifi->ifi_type = tswap16(ifi->ifi_type);
2794 ifi->ifi_index = tswap32(ifi->ifi_index);
2795 ifi->ifi_flags = tswap32(ifi->ifi_flags);
2796 ifi->ifi_change = tswap32(ifi->ifi_change);
2797 target_to_host_link_rtattr(IFLA_RTA(ifi), nlh->nlmsg_len -
2798 NLMSG_LENGTH(sizeof(*ifi)));
2804 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifa))) {
2805 ifa = NLMSG_DATA(nlh);
2806 ifa->ifa_index = tswap32(ifa->ifa_index);
2807 target_to_host_addr_rtattr(IFA_RTA(ifa), nlh->nlmsg_len -
2808 NLMSG_LENGTH(sizeof(*ifa)));
2815 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*rtm))) {
2816 rtm = NLMSG_DATA(nlh);
2817 rtm->rtm_flags = tswap32(rtm->rtm_flags);
2818 target_to_host_route_rtattr(RTM_RTA(rtm), nlh->nlmsg_len -
2819 NLMSG_LENGTH(sizeof(*rtm)));
2823 return -TARGET_EOPNOTSUPP;
2828 static abi_long target_to_host_nlmsg_route(struct nlmsghdr *nlh, size_t len)
2830 return target_to_host_for_each_nlmsg(nlh, len, target_to_host_data_route);
2832 #endif /* CONFIG_RTNETLINK */
2834 static abi_long host_to_target_data_audit(struct nlmsghdr *nlh)
2836 switch (nlh->nlmsg_type) {
2838 gemu_log("Unknown host audit message type %d\n",
2840 return -TARGET_EINVAL;
2845 static inline abi_long host_to_target_nlmsg_audit(struct nlmsghdr *nlh,
2848 return host_to_target_for_each_nlmsg(nlh, len, host_to_target_data_audit);
2851 static abi_long target_to_host_data_audit(struct nlmsghdr *nlh)
2853 switch (nlh->nlmsg_type) {
2855 case AUDIT_FIRST_USER_MSG ... AUDIT_LAST_USER_MSG:
2856 case AUDIT_FIRST_USER_MSG2 ... AUDIT_LAST_USER_MSG2:
2859 gemu_log("Unknown target audit message type %d\n",
2861 return -TARGET_EINVAL;
2867 static abi_long target_to_host_nlmsg_audit(struct nlmsghdr *nlh, size_t len)
2869 return target_to_host_for_each_nlmsg(nlh, len, target_to_host_data_audit);
2872 /* do_setsockopt() Must return target values and target errnos. */
2873 static abi_long do_setsockopt(int sockfd, int level, int optname,
2874 abi_ulong optval_addr, socklen_t optlen)
2878 struct ip_mreqn *ip_mreq;
2879 struct ip_mreq_source *ip_mreq_source;
2883 /* TCP options all take an 'int' value. */
2884 if (optlen < sizeof(uint32_t))
2885 return -TARGET_EINVAL;
2887 if (get_user_u32(val, optval_addr))
2888 return -TARGET_EFAULT;
2889 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2896 case IP_ROUTER_ALERT:
2900 case IP_MTU_DISCOVER:
2907 case IP_MULTICAST_TTL:
2908 case IP_MULTICAST_LOOP:
2910 if (optlen >= sizeof(uint32_t)) {
2911 if (get_user_u32(val, optval_addr))
2912 return -TARGET_EFAULT;
2913 } else if (optlen >= 1) {
2914 if (get_user_u8(val, optval_addr))
2915 return -TARGET_EFAULT;
2917 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2919 case IP_ADD_MEMBERSHIP:
2920 case IP_DROP_MEMBERSHIP:
2921 if (optlen < sizeof (struct target_ip_mreq) ||
2922 optlen > sizeof (struct target_ip_mreqn))
2923 return -TARGET_EINVAL;
2925 ip_mreq = (struct ip_mreqn *) alloca(optlen);
2926 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
2927 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
2930 case IP_BLOCK_SOURCE:
2931 case IP_UNBLOCK_SOURCE:
2932 case IP_ADD_SOURCE_MEMBERSHIP:
2933 case IP_DROP_SOURCE_MEMBERSHIP:
2934 if (optlen != sizeof (struct target_ip_mreq_source))
2935 return -TARGET_EINVAL;
2937 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2938 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
2939 unlock_user (ip_mreq_source, optval_addr, 0);
2948 case IPV6_MTU_DISCOVER:
2951 case IPV6_RECVPKTINFO:
2952 case IPV6_UNICAST_HOPS:
2954 case IPV6_RECVHOPLIMIT:
2955 case IPV6_2292HOPLIMIT:
2958 if (optlen < sizeof(uint32_t)) {
2959 return -TARGET_EINVAL;
2961 if (get_user_u32(val, optval_addr)) {
2962 return -TARGET_EFAULT;
2964 ret = get_errno(setsockopt(sockfd, level, optname,
2965 &val, sizeof(val)));
2969 struct in6_pktinfo pki;
2971 if (optlen < sizeof(pki)) {
2972 return -TARGET_EINVAL;
2975 if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
2976 return -TARGET_EFAULT;
2979 pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
2981 ret = get_errno(setsockopt(sockfd, level, optname,
2982 &pki, sizeof(pki)));
2993 struct icmp6_filter icmp6f;
2995 if (optlen > sizeof(icmp6f)) {
2996 optlen = sizeof(icmp6f);
2999 if (copy_from_user(&icmp6f, optval_addr, optlen)) {
3000 return -TARGET_EFAULT;
3003 for (val = 0; val < 8; val++) {
3004 icmp6f.data[val] = tswap32(icmp6f.data[val]);
3007 ret = get_errno(setsockopt(sockfd, level, optname,
3019 /* those take an u32 value */
3020 if (optlen < sizeof(uint32_t)) {
3021 return -TARGET_EINVAL;
3024 if (get_user_u32(val, optval_addr)) {
3025 return -TARGET_EFAULT;
3027 ret = get_errno(setsockopt(sockfd, level, optname,
3028 &val, sizeof(val)));
3035 case TARGET_SOL_SOCKET:
3037 case TARGET_SO_RCVTIMEO:
3041 optname = SO_RCVTIMEO;
3044 if (optlen != sizeof(struct target_timeval)) {
3045 return -TARGET_EINVAL;
3048 if (copy_from_user_timeval(&tv, optval_addr)) {
3049 return -TARGET_EFAULT;
3052 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
3056 case TARGET_SO_SNDTIMEO:
3057 optname = SO_SNDTIMEO;
3059 case TARGET_SO_ATTACH_FILTER:
3061 struct target_sock_fprog *tfprog;
3062 struct target_sock_filter *tfilter;
3063 struct sock_fprog fprog;
3064 struct sock_filter *filter;
3067 if (optlen != sizeof(*tfprog)) {
3068 return -TARGET_EINVAL;
3070 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
3071 return -TARGET_EFAULT;
3073 if (!lock_user_struct(VERIFY_READ, tfilter,
3074 tswapal(tfprog->filter), 0)) {
3075 unlock_user_struct(tfprog, optval_addr, 1);
3076 return -TARGET_EFAULT;
3079 fprog.len = tswap16(tfprog->len);
3080 filter = g_try_new(struct sock_filter, fprog.len);
3081 if (filter == NULL) {
3082 unlock_user_struct(tfilter, tfprog->filter, 1);
3083 unlock_user_struct(tfprog, optval_addr, 1);
3084 return -TARGET_ENOMEM;
3086 for (i = 0; i < fprog.len; i++) {
3087 filter[i].code = tswap16(tfilter[i].code);
3088 filter[i].jt = tfilter[i].jt;
3089 filter[i].jf = tfilter[i].jf;
3090 filter[i].k = tswap32(tfilter[i].k);
3092 fprog.filter = filter;
3094 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
3095 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
3098 unlock_user_struct(tfilter, tfprog->filter, 1);
3099 unlock_user_struct(tfprog, optval_addr, 1);
3102 case TARGET_SO_BINDTODEVICE:
3104 char *dev_ifname, *addr_ifname;
3106 if (optlen > IFNAMSIZ - 1) {
3107 optlen = IFNAMSIZ - 1;
3109 dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
3111 return -TARGET_EFAULT;
3113 optname = SO_BINDTODEVICE;
3114 addr_ifname = alloca(IFNAMSIZ);
3115 memcpy(addr_ifname, dev_ifname, optlen);
3116 addr_ifname[optlen] = 0;
3117 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
3118 addr_ifname, optlen));
3119 unlock_user (dev_ifname, optval_addr, 0);
3122 /* Options with 'int' argument. */
3123 case TARGET_SO_DEBUG:
3126 case TARGET_SO_REUSEADDR:
3127 optname = SO_REUSEADDR;
3129 case TARGET_SO_TYPE:
3132 case TARGET_SO_ERROR:
3135 case TARGET_SO_DONTROUTE:
3136 optname = SO_DONTROUTE;
3138 case TARGET_SO_BROADCAST:
3139 optname = SO_BROADCAST;
3141 case TARGET_SO_SNDBUF:
3142 optname = SO_SNDBUF;
3144 case TARGET_SO_SNDBUFFORCE:
3145 optname = SO_SNDBUFFORCE;
3147 case TARGET_SO_RCVBUF:
3148 optname = SO_RCVBUF;
3150 case TARGET_SO_RCVBUFFORCE:
3151 optname = SO_RCVBUFFORCE;
3153 case TARGET_SO_KEEPALIVE:
3154 optname = SO_KEEPALIVE;
3156 case TARGET_SO_OOBINLINE:
3157 optname = SO_OOBINLINE;
3159 case TARGET_SO_NO_CHECK:
3160 optname = SO_NO_CHECK;
3162 case TARGET_SO_PRIORITY:
3163 optname = SO_PRIORITY;
3166 case TARGET_SO_BSDCOMPAT:
3167 optname = SO_BSDCOMPAT;
3170 case TARGET_SO_PASSCRED:
3171 optname = SO_PASSCRED;
3173 case TARGET_SO_PASSSEC:
3174 optname = SO_PASSSEC;
3176 case TARGET_SO_TIMESTAMP:
3177 optname = SO_TIMESTAMP;
3179 case TARGET_SO_RCVLOWAT:
3180 optname = SO_RCVLOWAT;
3185 if (optlen < sizeof(uint32_t))
3186 return -TARGET_EINVAL;
3188 if (get_user_u32(val, optval_addr))
3189 return -TARGET_EFAULT;
3190 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
3194 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname);
3195 ret = -TARGET_ENOPROTOOPT;
3200 /* do_getsockopt() Must return target values and target errnos. */
3201 static abi_long do_getsockopt(int sockfd, int level, int optname,
3202 abi_ulong optval_addr, abi_ulong optlen)
3209 case TARGET_SOL_SOCKET:
3212 /* These don't just return a single integer */
3213 case TARGET_SO_LINGER:
3214 case TARGET_SO_RCVTIMEO:
3215 case TARGET_SO_SNDTIMEO:
3216 case TARGET_SO_PEERNAME:
3218 case TARGET_SO_PEERCRED: {
3221 struct target_ucred *tcr;
3223 if (get_user_u32(len, optlen)) {
3224 return -TARGET_EFAULT;
3227 return -TARGET_EINVAL;
3231 ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
3239 if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
3240 return -TARGET_EFAULT;
3242 __put_user(cr.pid, &tcr->pid);
3243 __put_user(cr.uid, &tcr->uid);
3244 __put_user(cr.gid, &tcr->gid);
3245 unlock_user_struct(tcr, optval_addr, 1);
3246 if (put_user_u32(len, optlen)) {
3247 return -TARGET_EFAULT;
3251 /* Options with 'int' argument. */
3252 case TARGET_SO_DEBUG:
3255 case TARGET_SO_REUSEADDR:
3256 optname = SO_REUSEADDR;
3258 case TARGET_SO_TYPE:
3261 case TARGET_SO_ERROR:
3264 case TARGET_SO_DONTROUTE:
3265 optname = SO_DONTROUTE;
3267 case TARGET_SO_BROADCAST:
3268 optname = SO_BROADCAST;
3270 case TARGET_SO_SNDBUF:
3271 optname = SO_SNDBUF;
3273 case TARGET_SO_RCVBUF:
3274 optname = SO_RCVBUF;
3276 case TARGET_SO_KEEPALIVE:
3277 optname = SO_KEEPALIVE;
3279 case TARGET_SO_OOBINLINE:
3280 optname = SO_OOBINLINE;
3282 case TARGET_SO_NO_CHECK:
3283 optname = SO_NO_CHECK;
3285 case TARGET_SO_PRIORITY:
3286 optname = SO_PRIORITY;
3289 case TARGET_SO_BSDCOMPAT:
3290 optname = SO_BSDCOMPAT;
3293 case TARGET_SO_PASSCRED:
3294 optname = SO_PASSCRED;
3296 case TARGET_SO_TIMESTAMP:
3297 optname = SO_TIMESTAMP;
3299 case TARGET_SO_RCVLOWAT:
3300 optname = SO_RCVLOWAT;
3302 case TARGET_SO_ACCEPTCONN:
3303 optname = SO_ACCEPTCONN;
3310 /* TCP options all take an 'int' value. */
3312 if (get_user_u32(len, optlen))
3313 return -TARGET_EFAULT;
3315 return -TARGET_EINVAL;
3317 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
3320 if (optname == SO_TYPE) {
3321 val = host_to_target_sock_type(val);
3326 if (put_user_u32(val, optval_addr))
3327 return -TARGET_EFAULT;
3329 if (put_user_u8(val, optval_addr))
3330 return -TARGET_EFAULT;
3332 if (put_user_u32(len, optlen))
3333 return -TARGET_EFAULT;
3340 case IP_ROUTER_ALERT:
3344 case IP_MTU_DISCOVER:
3350 case IP_MULTICAST_TTL:
3351 case IP_MULTICAST_LOOP:
3352 if (get_user_u32(len, optlen))
3353 return -TARGET_EFAULT;
3355 return -TARGET_EINVAL;
3357 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
3360 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
3362 if (put_user_u32(len, optlen)
3363 || put_user_u8(val, optval_addr))
3364 return -TARGET_EFAULT;
3366 if (len > sizeof(int))
3368 if (put_user_u32(len, optlen)
3369 || put_user_u32(val, optval_addr))
3370 return -TARGET_EFAULT;
3374 ret = -TARGET_ENOPROTOOPT;
3380 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
3382 ret = -TARGET_EOPNOTSUPP;
3388 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
3389 abi_ulong count, int copy)
3391 struct target_iovec *target_vec;
3393 abi_ulong total_len, max_len;
3396 bool bad_address = false;
3402 if (count > IOV_MAX) {
3407 vec = g_try_new0(struct iovec, count);
3413 target_vec = lock_user(VERIFY_READ, target_addr,
3414 count * sizeof(struct target_iovec), 1);
3415 if (target_vec == NULL) {
3420 /* ??? If host page size > target page size, this will result in a
3421 value larger than what we can actually support. */
3422 max_len = 0x7fffffff & TARGET_PAGE_MASK;
3425 for (i = 0; i < count; i++) {
3426 abi_ulong base = tswapal(target_vec[i].iov_base);
3427 abi_long len = tswapal(target_vec[i].iov_len);
3432 } else if (len == 0) {
3433 /* Zero length pointer is ignored. */
3434 vec[i].iov_base = 0;
3436 vec[i].iov_base = lock_user(type, base, len, copy);
3437 /* If the first buffer pointer is bad, this is a fault. But
3438 * subsequent bad buffers will result in a partial write; this
3439 * is realized by filling the vector with null pointers and
3441 if (!vec[i].iov_base) {
3452 if (len > max_len - total_len) {
3453 len = max_len - total_len;
3456 vec[i].iov_len = len;
3460 unlock_user(target_vec, target_addr, 0);
3465 if (tswapal(target_vec[i].iov_len) > 0) {
3466 unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
3469 unlock_user(target_vec, target_addr, 0);
3476 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
3477 abi_ulong count, int copy)
3479 struct target_iovec *target_vec;
3482 target_vec = lock_user(VERIFY_READ, target_addr,
3483 count * sizeof(struct target_iovec), 1);
3485 for (i = 0; i < count; i++) {
3486 abi_ulong base = tswapal(target_vec[i].iov_base);
3487 abi_long len = tswapal(target_vec[i].iov_len);
3491 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
3493 unlock_user(target_vec, target_addr, 0);
3499 static inline int target_to_host_sock_type(int *type)
3502 int target_type = *type;
3504 switch (target_type & TARGET_SOCK_TYPE_MASK) {
3505 case TARGET_SOCK_DGRAM:
3506 host_type = SOCK_DGRAM;
3508 case TARGET_SOCK_STREAM:
3509 host_type = SOCK_STREAM;
3512 host_type = target_type & TARGET_SOCK_TYPE_MASK;
3515 if (target_type & TARGET_SOCK_CLOEXEC) {
3516 #if defined(SOCK_CLOEXEC)
3517 host_type |= SOCK_CLOEXEC;
3519 return -TARGET_EINVAL;
3522 if (target_type & TARGET_SOCK_NONBLOCK) {
3523 #if defined(SOCK_NONBLOCK)
3524 host_type |= SOCK_NONBLOCK;
3525 #elif !defined(O_NONBLOCK)
3526 return -TARGET_EINVAL;
3533 /* Try to emulate socket type flags after socket creation. */
3534 static int sock_flags_fixup(int fd, int target_type)
3536 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3537 if (target_type & TARGET_SOCK_NONBLOCK) {
3538 int flags = fcntl(fd, F_GETFL);
3539 if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
3541 return -TARGET_EINVAL;
3548 static abi_long packet_target_to_host_sockaddr(void *host_addr,
3549 abi_ulong target_addr,
3552 struct sockaddr *addr = host_addr;
3553 struct target_sockaddr *target_saddr;
3555 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
3556 if (!target_saddr) {
3557 return -TARGET_EFAULT;
3560 memcpy(addr, target_saddr, len);
3561 addr->sa_family = tswap16(target_saddr->sa_family);
3562 /* spkt_protocol is big-endian */
3564 unlock_user(target_saddr, target_addr, 0);
3568 static TargetFdTrans target_packet_trans = {
3569 .target_to_host_addr = packet_target_to_host_sockaddr,
3572 #ifdef CONFIG_RTNETLINK
3573 static abi_long netlink_route_target_to_host(void *buf, size_t len)
3577 ret = target_to_host_nlmsg_route(buf, len);
3585 static abi_long netlink_route_host_to_target(void *buf, size_t len)
3589 ret = host_to_target_nlmsg_route(buf, len);
3597 static TargetFdTrans target_netlink_route_trans = {
3598 .target_to_host_data = netlink_route_target_to_host,
3599 .host_to_target_data = netlink_route_host_to_target,
3601 #endif /* CONFIG_RTNETLINK */
3603 static abi_long netlink_audit_target_to_host(void *buf, size_t len)
3607 ret = target_to_host_nlmsg_audit(buf, len);
3615 static abi_long netlink_audit_host_to_target(void *buf, size_t len)
3619 ret = host_to_target_nlmsg_audit(buf, len);
3627 static TargetFdTrans target_netlink_audit_trans = {
3628 .target_to_host_data = netlink_audit_target_to_host,
3629 .host_to_target_data = netlink_audit_host_to_target,
3632 /* do_socket() Must return target values and target errnos. */
3633 static abi_long do_socket(int domain, int type, int protocol)
3635 int target_type = type;
3638 ret = target_to_host_sock_type(&type);
3643 if (domain == PF_NETLINK && !(
3644 #ifdef CONFIG_RTNETLINK
3645 protocol == NETLINK_ROUTE ||
3647 protocol == NETLINK_KOBJECT_UEVENT ||
3648 protocol == NETLINK_AUDIT)) {
3649 return -EPFNOSUPPORT;
3652 if (domain == AF_PACKET ||
3653 (domain == AF_INET && type == SOCK_PACKET)) {
3654 protocol = tswap16(protocol);
3657 ret = get_errno(socket(domain, type, protocol));
3659 ret = sock_flags_fixup(ret, target_type);
3660 if (type == SOCK_PACKET) {
3661 /* Manage an obsolete case :
3662 * if socket type is SOCK_PACKET, bind by name
3664 fd_trans_register(ret, &target_packet_trans);
3665 } else if (domain == PF_NETLINK) {
3667 #ifdef CONFIG_RTNETLINK
3669 fd_trans_register(ret, &target_netlink_route_trans);
3672 case NETLINK_KOBJECT_UEVENT:
3673 /* nothing to do: messages are strings */
3676 fd_trans_register(ret, &target_netlink_audit_trans);
3679 g_assert_not_reached();
3686 /* do_bind() Must return target values and target errnos. */
3687 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3693 if ((int)addrlen < 0) {
3694 return -TARGET_EINVAL;
3697 addr = alloca(addrlen+1);
3699 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3703 return get_errno(bind(sockfd, addr, addrlen));
3706 /* do_connect() Must return target values and target errnos. */
3707 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3713 if ((int)addrlen < 0) {
3714 return -TARGET_EINVAL;
3717 addr = alloca(addrlen+1);
3719 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3723 return get_errno(safe_connect(sockfd, addr, addrlen));
3726 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3727 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3728 int flags, int send)
3734 abi_ulong target_vec;
3736 if (msgp->msg_name) {
3737 msg.msg_namelen = tswap32(msgp->msg_namelen);
3738 msg.msg_name = alloca(msg.msg_namelen+1);
3739 ret = target_to_host_sockaddr(fd, msg.msg_name,
3740 tswapal(msgp->msg_name),
3742 if (ret == -TARGET_EFAULT) {
3743 /* For connected sockets msg_name and msg_namelen must
3744 * be ignored, so returning EFAULT immediately is wrong.
3745 * Instead, pass a bad msg_name to the host kernel, and
3746 * let it decide whether to return EFAULT or not.
3748 msg.msg_name = (void *)-1;
3753 msg.msg_name = NULL;
3754 msg.msg_namelen = 0;
3756 msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3757 msg.msg_control = alloca(msg.msg_controllen);
3758 msg.msg_flags = tswap32(msgp->msg_flags);
3760 count = tswapal(msgp->msg_iovlen);
3761 target_vec = tswapal(msgp->msg_iov);
3763 if (count > IOV_MAX) {
3764 /* sendrcvmsg returns a different errno for this condition than
3765 * readv/writev, so we must catch it here before lock_iovec() does.
3767 ret = -TARGET_EMSGSIZE;
3771 vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3772 target_vec, count, send);
3774 ret = -host_to_target_errno(errno);
3777 msg.msg_iovlen = count;
3781 if (fd_trans_target_to_host_data(fd)) {
3784 host_msg = g_malloc(msg.msg_iov->iov_len);
3785 memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3786 ret = fd_trans_target_to_host_data(fd)(host_msg,
3787 msg.msg_iov->iov_len);
3789 msg.msg_iov->iov_base = host_msg;
3790 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3794 ret = target_to_host_cmsg(&msg, msgp);
3796 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3800 ret = get_errno(safe_recvmsg(fd, &msg, flags));
3801 if (!is_error(ret)) {
3803 if (fd_trans_host_to_target_data(fd)) {
3804 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3807 ret = host_to_target_cmsg(msgp, &msg);
3809 if (!is_error(ret)) {
3810 msgp->msg_namelen = tswap32(msg.msg_namelen);
3811 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3812 ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3813 msg.msg_name, msg.msg_namelen);
3825 unlock_iovec(vec, target_vec, count, !send);
3830 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3831 int flags, int send)
3834 struct target_msghdr *msgp;
3836 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3840 return -TARGET_EFAULT;
3842 ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3843 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3847 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3848 * so it might not have this *mmsg-specific flag either.
3850 #ifndef MSG_WAITFORONE
3851 #define MSG_WAITFORONE 0x10000
3854 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3855 unsigned int vlen, unsigned int flags,
3858 struct target_mmsghdr *mmsgp;
3862 if (vlen > UIO_MAXIOV) {
3866 mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3868 return -TARGET_EFAULT;
3871 for (i = 0; i < vlen; i++) {
3872 ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3873 if (is_error(ret)) {
3876 mmsgp[i].msg_len = tswap32(ret);
3877 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3878 if (flags & MSG_WAITFORONE) {
3879 flags |= MSG_DONTWAIT;
3883 unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3885 /* Return number of datagrams sent if we sent any at all;
3886 * otherwise return the error.
3894 /* do_accept4() Must return target values and target errnos. */
3895 static abi_long do_accept4(int fd, abi_ulong target_addr,
3896 abi_ulong target_addrlen_addr, int flags)
3903 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
3905 if (target_addr == 0) {
3906 return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3909 /* linux returns EINVAL if addrlen pointer is invalid */
3910 if (get_user_u32(addrlen, target_addrlen_addr))
3911 return -TARGET_EINVAL;
3913 if ((int)addrlen < 0) {
3914 return -TARGET_EINVAL;
3917 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3918 return -TARGET_EINVAL;
3920 addr = alloca(addrlen);
3922 ret = get_errno(safe_accept4(fd, addr, &addrlen, host_flags));
3923 if (!is_error(ret)) {
3924 host_to_target_sockaddr(target_addr, addr, addrlen);
3925 if (put_user_u32(addrlen, target_addrlen_addr))
3926 ret = -TARGET_EFAULT;
3931 /* do_getpeername() Must return target values and target errnos. */
3932 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3933 abi_ulong target_addrlen_addr)
3939 if (get_user_u32(addrlen, target_addrlen_addr))
3940 return -TARGET_EFAULT;
3942 if ((int)addrlen < 0) {
3943 return -TARGET_EINVAL;
3946 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3947 return -TARGET_EFAULT;
3949 addr = alloca(addrlen);
3951 ret = get_errno(getpeername(fd, addr, &addrlen));
3952 if (!is_error(ret)) {
3953 host_to_target_sockaddr(target_addr, addr, addrlen);
3954 if (put_user_u32(addrlen, target_addrlen_addr))
3955 ret = -TARGET_EFAULT;
3960 /* do_getsockname() Must return target values and target errnos. */
3961 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3962 abi_ulong target_addrlen_addr)
3968 if (get_user_u32(addrlen, target_addrlen_addr))
3969 return -TARGET_EFAULT;
3971 if ((int)addrlen < 0) {
3972 return -TARGET_EINVAL;
3975 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3976 return -TARGET_EFAULT;
3978 addr = alloca(addrlen);
3980 ret = get_errno(getsockname(fd, addr, &addrlen));
3981 if (!is_error(ret)) {
3982 host_to_target_sockaddr(target_addr, addr, addrlen);
3983 if (put_user_u32(addrlen, target_addrlen_addr))
3984 ret = -TARGET_EFAULT;
3989 /* do_socketpair() Must return target values and target errnos. */
3990 static abi_long do_socketpair(int domain, int type, int protocol,
3991 abi_ulong target_tab_addr)
3996 target_to_host_sock_type(&type);
3998 ret = get_errno(socketpair(domain, type, protocol, tab));
3999 if (!is_error(ret)) {
4000 if (put_user_s32(tab[0], target_tab_addr)
4001 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
4002 ret = -TARGET_EFAULT;
4007 /* do_sendto() Must return target values and target errnos. */
4008 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
4009 abi_ulong target_addr, socklen_t addrlen)
4013 void *copy_msg = NULL;
4016 if ((int)addrlen < 0) {
4017 return -TARGET_EINVAL;
4020 host_msg = lock_user(VERIFY_READ, msg, len, 1);
4022 return -TARGET_EFAULT;
4023 if (fd_trans_target_to_host_data(fd)) {
4024 copy_msg = host_msg;
4025 host_msg = g_malloc(len);
4026 memcpy(host_msg, copy_msg, len);
4027 ret = fd_trans_target_to_host_data(fd)(host_msg, len);
4033 addr = alloca(addrlen+1);
4034 ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
4038 ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
4040 ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
4045 host_msg = copy_msg;
4047 unlock_user(host_msg, msg, 0);
4051 /* do_recvfrom() Must return target values and target errnos. */
4052 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
4053 abi_ulong target_addr,
4054 abi_ulong target_addrlen)
4061 host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
4063 return -TARGET_EFAULT;
4065 if (get_user_u32(addrlen, target_addrlen)) {
4066 ret = -TARGET_EFAULT;
4069 if ((int)addrlen < 0) {
4070 ret = -TARGET_EINVAL;
4073 addr = alloca(addrlen);
4074 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
4077 addr = NULL; /* To keep compiler quiet. */
4078 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
4080 if (!is_error(ret)) {
4081 if (fd_trans_host_to_target_data(fd)) {
4082 ret = fd_trans_host_to_target_data(fd)(host_msg, ret);
4085 host_to_target_sockaddr(target_addr, addr, addrlen);
4086 if (put_user_u32(addrlen, target_addrlen)) {
4087 ret = -TARGET_EFAULT;
4091 unlock_user(host_msg, msg, len);
4094 unlock_user(host_msg, msg, 0);
4099 #ifdef TARGET_NR_socketcall
4100 /* do_socketcall() must return target values and target errnos. */
4101 static abi_long do_socketcall(int num, abi_ulong vptr)
4103 static const unsigned nargs[] = { /* number of arguments per operation */
4104 [TARGET_SYS_SOCKET] = 3, /* domain, type, protocol */
4105 [TARGET_SYS_BIND] = 3, /* fd, addr, addrlen */
4106 [TARGET_SYS_CONNECT] = 3, /* fd, addr, addrlen */
4107 [TARGET_SYS_LISTEN] = 2, /* fd, backlog */
4108 [TARGET_SYS_ACCEPT] = 3, /* fd, addr, addrlen */
4109 [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
4110 [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
4111 [TARGET_SYS_SOCKETPAIR] = 4, /* domain, type, protocol, tab */
4112 [TARGET_SYS_SEND] = 4, /* fd, msg, len, flags */
4113 [TARGET_SYS_RECV] = 4, /* fd, msg, len, flags */
4114 [TARGET_SYS_SENDTO] = 6, /* fd, msg, len, flags, addr, addrlen */
4115 [TARGET_SYS_RECVFROM] = 6, /* fd, msg, len, flags, addr, addrlen */
4116 [TARGET_SYS_SHUTDOWN] = 2, /* fd, how */
4117 [TARGET_SYS_SETSOCKOPT] = 5, /* fd, level, optname, optval, optlen */
4118 [TARGET_SYS_GETSOCKOPT] = 5, /* fd, level, optname, optval, optlen */
4119 [TARGET_SYS_SENDMSG] = 3, /* fd, msg, flags */
4120 [TARGET_SYS_RECVMSG] = 3, /* fd, msg, flags */
4121 [TARGET_SYS_ACCEPT4] = 4, /* fd, addr, addrlen, flags */
4122 [TARGET_SYS_RECVMMSG] = 4, /* fd, msgvec, vlen, flags */
4123 [TARGET_SYS_SENDMMSG] = 4, /* fd, msgvec, vlen, flags */
4125 abi_long a[6]; /* max 6 args */
4128 /* check the range of the first argument num */
4129 /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
4130 if (num < 1 || num > TARGET_SYS_SENDMMSG) {
4131 return -TARGET_EINVAL;
4133 /* ensure we have space for args */
4134 if (nargs[num] > ARRAY_SIZE(a)) {
4135 return -TARGET_EINVAL;
4137 /* collect the arguments in a[] according to nargs[] */
4138 for (i = 0; i < nargs[num]; ++i) {
4139 if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
4140 return -TARGET_EFAULT;
4143 /* now when we have the args, invoke the appropriate underlying function */
4145 case TARGET_SYS_SOCKET: /* domain, type, protocol */
4146 return do_socket(a[0], a[1], a[2]);
4147 case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
4148 return do_bind(a[0], a[1], a[2]);
4149 case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
4150 return do_connect(a[0], a[1], a[2]);
4151 case TARGET_SYS_LISTEN: /* sockfd, backlog */
4152 return get_errno(listen(a[0], a[1]));
4153 case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
4154 return do_accept4(a[0], a[1], a[2], 0);
4155 case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
4156 return do_getsockname(a[0], a[1], a[2]);
4157 case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
4158 return do_getpeername(a[0], a[1], a[2]);
4159 case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
4160 return do_socketpair(a[0], a[1], a[2], a[3]);
4161 case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
4162 return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
4163 case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
4164 return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
4165 case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
4166 return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
4167 case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
4168 return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
4169 case TARGET_SYS_SHUTDOWN: /* sockfd, how */
4170 return get_errno(shutdown(a[0], a[1]));
4171 case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
4172 return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
4173 case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
4174 return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
4175 case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
4176 return do_sendrecvmsg(a[0], a[1], a[2], 1);
4177 case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
4178 return do_sendrecvmsg(a[0], a[1], a[2], 0);
4179 case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
4180 return do_accept4(a[0], a[1], a[2], a[3]);
4181 case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
4182 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
4183 case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
4184 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
4186 gemu_log("Unsupported socketcall: %d\n", num);
4187 return -TARGET_EINVAL;
4192 #define N_SHM_REGIONS 32
4194 static struct shm_region {
4198 } shm_regions[N_SHM_REGIONS];
4200 #ifndef TARGET_SEMID64_DS
4201 /* asm-generic version of this struct */
4202 struct target_semid64_ds
4204 struct target_ipc_perm sem_perm;
4205 abi_ulong sem_otime;
4206 #if TARGET_ABI_BITS == 32
4207 abi_ulong __unused1;
4209 abi_ulong sem_ctime;
4210 #if TARGET_ABI_BITS == 32
4211 abi_ulong __unused2;
4213 abi_ulong sem_nsems;
4214 abi_ulong __unused3;
4215 abi_ulong __unused4;
4219 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
4220 abi_ulong target_addr)
4222 struct target_ipc_perm *target_ip;
4223 struct target_semid64_ds *target_sd;
4225 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4226 return -TARGET_EFAULT;
4227 target_ip = &(target_sd->sem_perm);
4228 host_ip->__key = tswap32(target_ip->__key);
4229 host_ip->uid = tswap32(target_ip->uid);
4230 host_ip->gid = tswap32(target_ip->gid);
4231 host_ip->cuid = tswap32(target_ip->cuid);
4232 host_ip->cgid = tswap32(target_ip->cgid);
4233 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
4234 host_ip->mode = tswap32(target_ip->mode);
4236 host_ip->mode = tswap16(target_ip->mode);
4238 #if defined(TARGET_PPC)
4239 host_ip->__seq = tswap32(target_ip->__seq);
4241 host_ip->__seq = tswap16(target_ip->__seq);
4243 unlock_user_struct(target_sd, target_addr, 0);
4247 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
4248 struct ipc_perm *host_ip)
4250 struct target_ipc_perm *target_ip;
4251 struct target_semid64_ds *target_sd;
4253 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4254 return -TARGET_EFAULT;
4255 target_ip = &(target_sd->sem_perm);
4256 target_ip->__key = tswap32(host_ip->__key);
4257 target_ip->uid = tswap32(host_ip->uid);
4258 target_ip->gid = tswap32(host_ip->gid);
4259 target_ip->cuid = tswap32(host_ip->cuid);
4260 target_ip->cgid = tswap32(host_ip->cgid);
4261 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
4262 target_ip->mode = tswap32(host_ip->mode);
4264 target_ip->mode = tswap16(host_ip->mode);
4266 #if defined(TARGET_PPC)
4267 target_ip->__seq = tswap32(host_ip->__seq);
4269 target_ip->__seq = tswap16(host_ip->__seq);
4271 unlock_user_struct(target_sd, target_addr, 1);
4275 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
4276 abi_ulong target_addr)
4278 struct target_semid64_ds *target_sd;
4280 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4281 return -TARGET_EFAULT;
4282 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
4283 return -TARGET_EFAULT;
4284 host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
4285 host_sd->sem_otime = tswapal(target_sd->sem_otime);
4286 host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
4287 unlock_user_struct(target_sd, target_addr, 0);
4291 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
4292 struct semid_ds *host_sd)
4294 struct target_semid64_ds *target_sd;
4296 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4297 return -TARGET_EFAULT;
4298 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
4299 return -TARGET_EFAULT;
4300 target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
4301 target_sd->sem_otime = tswapal(host_sd->sem_otime);
4302 target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
4303 unlock_user_struct(target_sd, target_addr, 1);
4307 struct target_seminfo {
4320 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
4321 struct seminfo *host_seminfo)
4323 struct target_seminfo *target_seminfo;
4324 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
4325 return -TARGET_EFAULT;
4326 __put_user(host_seminfo->semmap, &target_seminfo->semmap);
4327 __put_user(host_seminfo->semmni, &target_seminfo->semmni);
4328 __put_user(host_seminfo->semmns, &target_seminfo->semmns);
4329 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
4330 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
4331 __put_user(host_seminfo->semopm, &target_seminfo->semopm);
4332 __put_user(host_seminfo->semume, &target_seminfo->semume);
4333 __put_user(host_seminfo->semusz, &target_seminfo->semusz);
4334 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
4335 __put_user(host_seminfo->semaem, &target_seminfo->semaem);
4336 unlock_user_struct(target_seminfo, target_addr, 1);
4342 struct semid_ds *buf;
4343 unsigned short *array;
4344 struct seminfo *__buf;
4347 union target_semun {
4354 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
4355 abi_ulong target_addr)
4358 unsigned short *array;
4360 struct semid_ds semid_ds;
4363 semun.buf = &semid_ds;
4365 ret = semctl(semid, 0, IPC_STAT, semun);
4367 return get_errno(ret);
4369 nsems = semid_ds.sem_nsems;
4371 *host_array = g_try_new(unsigned short, nsems);
4373 return -TARGET_ENOMEM;
4375 array = lock_user(VERIFY_READ, target_addr,
4376 nsems*sizeof(unsigned short), 1);
4378 g_free(*host_array);
4379 return -TARGET_EFAULT;
4382 for(i=0; i<nsems; i++) {
4383 __get_user((*host_array)[i], &array[i]);
4385 unlock_user(array, target_addr, 0);
4390 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
4391 unsigned short **host_array)
4394 unsigned short *array;
4396 struct semid_ds semid_ds;
4399 semun.buf = &semid_ds;
4401 ret = semctl(semid, 0, IPC_STAT, semun);
4403 return get_errno(ret);
4405 nsems = semid_ds.sem_nsems;
4407 array = lock_user(VERIFY_WRITE, target_addr,
4408 nsems*sizeof(unsigned short), 0);
4410 return -TARGET_EFAULT;
4412 for(i=0; i<nsems; i++) {
4413 __put_user((*host_array)[i], &array[i]);
4415 g_free(*host_array);
4416 unlock_user(array, target_addr, 1);
4421 static inline abi_long do_semctl(int semid, int semnum, int cmd,
4422 abi_ulong target_arg)
4424 union target_semun target_su = { .buf = target_arg };
4426 struct semid_ds dsarg;
4427 unsigned short *array = NULL;
4428 struct seminfo seminfo;
4429 abi_long ret = -TARGET_EINVAL;
4436 /* In 64 bit cross-endian situations, we will erroneously pick up
4437 * the wrong half of the union for the "val" element. To rectify
4438 * this, the entire 8-byte structure is byteswapped, followed by
4439 * a swap of the 4 byte val field. In other cases, the data is
4440 * already in proper host byte order. */
4441 if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
4442 target_su.buf = tswapal(target_su.buf);
4443 arg.val = tswap32(target_su.val);
4445 arg.val = target_su.val;
4447 ret = get_errno(semctl(semid, semnum, cmd, arg));
4451 err = target_to_host_semarray(semid, &array, target_su.array);
4455 ret = get_errno(semctl(semid, semnum, cmd, arg));
4456 err = host_to_target_semarray(semid, target_su.array, &array);
4463 err = target_to_host_semid_ds(&dsarg, target_su.buf);
4467 ret = get_errno(semctl(semid, semnum, cmd, arg));
4468 err = host_to_target_semid_ds(target_su.buf, &dsarg);
4474 arg.__buf = &seminfo;
4475 ret = get_errno(semctl(semid, semnum, cmd, arg));
4476 err = host_to_target_seminfo(target_su.__buf, &seminfo);
4484 ret = get_errno(semctl(semid, semnum, cmd, NULL));
4491 struct target_sembuf {
4492 unsigned short sem_num;
4497 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
4498 abi_ulong target_addr,
4501 struct target_sembuf *target_sembuf;
4504 target_sembuf = lock_user(VERIFY_READ, target_addr,
4505 nsops*sizeof(struct target_sembuf), 1);
4507 return -TARGET_EFAULT;
4509 for(i=0; i<nsops; i++) {
4510 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
4511 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
4512 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
4515 unlock_user(target_sembuf, target_addr, 0);
4520 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
4522 struct sembuf sops[nsops];
4524 if (target_to_host_sembuf(sops, ptr, nsops))
4525 return -TARGET_EFAULT;
4527 return get_errno(safe_semtimedop(semid, sops, nsops, NULL));
4530 struct target_msqid_ds
4532 struct target_ipc_perm msg_perm;
4533 abi_ulong msg_stime;
4534 #if TARGET_ABI_BITS == 32
4535 abi_ulong __unused1;
4537 abi_ulong msg_rtime;
4538 #if TARGET_ABI_BITS == 32
4539 abi_ulong __unused2;
4541 abi_ulong msg_ctime;
4542 #if TARGET_ABI_BITS == 32
4543 abi_ulong __unused3;
4545 abi_ulong __msg_cbytes;
4547 abi_ulong msg_qbytes;
4548 abi_ulong msg_lspid;
4549 abi_ulong msg_lrpid;
4550 abi_ulong __unused4;
4551 abi_ulong __unused5;
4554 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
4555 abi_ulong target_addr)
4557 struct target_msqid_ds *target_md;
4559 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
4560 return -TARGET_EFAULT;
4561 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
4562 return -TARGET_EFAULT;
4563 host_md->msg_stime = tswapal(target_md->msg_stime);
4564 host_md->msg_rtime = tswapal(target_md->msg_rtime);
4565 host_md->msg_ctime = tswapal(target_md->msg_ctime);
4566 host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
4567 host_md->msg_qnum = tswapal(target_md->msg_qnum);
4568 host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
4569 host_md->msg_lspid = tswapal(target_md->msg_lspid);
4570 host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
4571 unlock_user_struct(target_md, target_addr, 0);
4575 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
4576 struct msqid_ds *host_md)
4578 struct target_msqid_ds *target_md;
4580 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
4581 return -TARGET_EFAULT;
4582 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
4583 return -TARGET_EFAULT;
4584 target_md->msg_stime = tswapal(host_md->msg_stime);
4585 target_md->msg_rtime = tswapal(host_md->msg_rtime);
4586 target_md->msg_ctime = tswapal(host_md->msg_ctime);
4587 target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
4588 target_md->msg_qnum = tswapal(host_md->msg_qnum);
4589 target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
4590 target_md->msg_lspid = tswapal(host_md->msg_lspid);
4591 target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
4592 unlock_user_struct(target_md, target_addr, 1);
4596 struct target_msginfo {
4604 unsigned short int msgseg;
4607 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
4608 struct msginfo *host_msginfo)
4610 struct target_msginfo *target_msginfo;
4611 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
4612 return -TARGET_EFAULT;
4613 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
4614 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
4615 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
4616 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
4617 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
4618 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
4619 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4620 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4621 unlock_user_struct(target_msginfo, target_addr, 1);
4625 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4627 struct msqid_ds dsarg;
4628 struct msginfo msginfo;
4629 abi_long ret = -TARGET_EINVAL;
4637 if (target_to_host_msqid_ds(&dsarg,ptr))
4638 return -TARGET_EFAULT;
4639 ret = get_errno(msgctl(msgid, cmd, &dsarg));
4640 if (host_to_target_msqid_ds(ptr,&dsarg))
4641 return -TARGET_EFAULT;
4644 ret = get_errno(msgctl(msgid, cmd, NULL));
4648 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4649 if (host_to_target_msginfo(ptr, &msginfo))
4650 return -TARGET_EFAULT;
4657 struct target_msgbuf {
4662 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4663 ssize_t msgsz, int msgflg)
4665 struct target_msgbuf *target_mb;
4666 struct msgbuf *host_mb;
4670 return -TARGET_EINVAL;
4673 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4674 return -TARGET_EFAULT;
4675 host_mb = g_try_malloc(msgsz + sizeof(long));
4677 unlock_user_struct(target_mb, msgp, 0);
4678 return -TARGET_ENOMEM;
4680 host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4681 memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4682 ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4684 unlock_user_struct(target_mb, msgp, 0);
4689 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4690 ssize_t msgsz, abi_long msgtyp,
4693 struct target_msgbuf *target_mb;
4695 struct msgbuf *host_mb;
4699 return -TARGET_EINVAL;
4702 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4703 return -TARGET_EFAULT;
4705 host_mb = g_try_malloc(msgsz + sizeof(long));
4707 ret = -TARGET_ENOMEM;
4710 ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4713 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4714 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4715 if (!target_mtext) {
4716 ret = -TARGET_EFAULT;
4719 memcpy(target_mb->mtext, host_mb->mtext, ret);
4720 unlock_user(target_mtext, target_mtext_addr, ret);
4723 target_mb->mtype = tswapal(host_mb->mtype);
4727 unlock_user_struct(target_mb, msgp, 1);
4732 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4733 abi_ulong target_addr)
4735 struct target_shmid_ds *target_sd;
4737 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4738 return -TARGET_EFAULT;
4739 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4740 return -TARGET_EFAULT;
4741 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4742 __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4743 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4744 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4745 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4746 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4747 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4748 unlock_user_struct(target_sd, target_addr, 0);
4752 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4753 struct shmid_ds *host_sd)
4755 struct target_shmid_ds *target_sd;
4757 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4758 return -TARGET_EFAULT;
4759 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4760 return -TARGET_EFAULT;
4761 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4762 __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4763 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4764 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4765 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4766 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4767 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4768 unlock_user_struct(target_sd, target_addr, 1);
4772 struct target_shminfo {
4780 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4781 struct shminfo *host_shminfo)
4783 struct target_shminfo *target_shminfo;
4784 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4785 return -TARGET_EFAULT;
4786 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4787 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4788 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4789 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4790 __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4791 unlock_user_struct(target_shminfo, target_addr, 1);
4795 struct target_shm_info {
4800 abi_ulong swap_attempts;
4801 abi_ulong swap_successes;
4804 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4805 struct shm_info *host_shm_info)
4807 struct target_shm_info *target_shm_info;
4808 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4809 return -TARGET_EFAULT;
4810 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4811 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4812 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4813 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4814 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4815 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4816 unlock_user_struct(target_shm_info, target_addr, 1);
4820 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4822 struct shmid_ds dsarg;
4823 struct shminfo shminfo;
4824 struct shm_info shm_info;
4825 abi_long ret = -TARGET_EINVAL;
4833 if (target_to_host_shmid_ds(&dsarg, buf))
4834 return -TARGET_EFAULT;
4835 ret = get_errno(shmctl(shmid, cmd, &dsarg));
4836 if (host_to_target_shmid_ds(buf, &dsarg))
4837 return -TARGET_EFAULT;
4840 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4841 if (host_to_target_shminfo(buf, &shminfo))
4842 return -TARGET_EFAULT;
4845 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4846 if (host_to_target_shm_info(buf, &shm_info))
4847 return -TARGET_EFAULT;
4852 ret = get_errno(shmctl(shmid, cmd, NULL));
4859 #ifndef TARGET_FORCE_SHMLBA
4860 /* For most architectures, SHMLBA is the same as the page size;
4861 * some architectures have larger values, in which case they should
4862 * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4863 * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4864 * and defining its own value for SHMLBA.
4866 * The kernel also permits SHMLBA to be set by the architecture to a
4867 * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4868 * this means that addresses are rounded to the large size if
4869 * SHM_RND is set but addresses not aligned to that size are not rejected
4870 * as long as they are at least page-aligned. Since the only architecture
4871 * which uses this is ia64 this code doesn't provide for that oddity.
4873 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
4875 return TARGET_PAGE_SIZE;
4879 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
4880 int shmid, abi_ulong shmaddr, int shmflg)
4884 struct shmid_ds shm_info;
4888 /* find out the length of the shared memory segment */
4889 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4890 if (is_error(ret)) {
4891 /* can't get length, bail out */
4895 shmlba = target_shmlba(cpu_env);
4897 if (shmaddr & (shmlba - 1)) {
4898 if (shmflg & SHM_RND) {
4899 shmaddr &= ~(shmlba - 1);
4901 return -TARGET_EINVAL;
4908 host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
4910 abi_ulong mmap_start;
4912 mmap_start = mmap_find_vma(0, shm_info.shm_segsz);
4914 if (mmap_start == -1) {
4916 host_raddr = (void *)-1;
4918 host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
4921 if (host_raddr == (void *)-1) {
4923 return get_errno((long)host_raddr);
4925 raddr=h2g((unsigned long)host_raddr);
4927 page_set_flags(raddr, raddr + shm_info.shm_segsz,
4928 PAGE_VALID | PAGE_READ |
4929 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
4931 for (i = 0; i < N_SHM_REGIONS; i++) {
4932 if (!shm_regions[i].in_use) {
4933 shm_regions[i].in_use = true;
4934 shm_regions[i].start = raddr;
4935 shm_regions[i].size = shm_info.shm_segsz;
4945 static inline abi_long do_shmdt(abi_ulong shmaddr)
4949 for (i = 0; i < N_SHM_REGIONS; ++i) {
4950 if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4951 shm_regions[i].in_use = false;
4952 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
4957 return get_errno(shmdt(g2h(shmaddr)));
4960 #ifdef TARGET_NR_ipc
4961 /* ??? This only works with linear mappings. */
4962 /* do_ipc() must return target values and target errnos. */
4963 static abi_long do_ipc(CPUArchState *cpu_env,
4964 unsigned int call, abi_long first,
4965 abi_long second, abi_long third,
4966 abi_long ptr, abi_long fifth)
4971 version = call >> 16;
4976 ret = do_semop(first, ptr, second);
4980 ret = get_errno(semget(first, second, third));
4983 case IPCOP_semctl: {
4984 /* The semun argument to semctl is passed by value, so dereference the
4987 get_user_ual(atptr, ptr);
4988 ret = do_semctl(first, second, third, atptr);
4993 ret = get_errno(msgget(first, second));
4997 ret = do_msgsnd(first, ptr, second, third);
5001 ret = do_msgctl(first, second, ptr);
5008 struct target_ipc_kludge {
5013 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
5014 ret = -TARGET_EFAULT;
5018 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
5020 unlock_user_struct(tmp, ptr, 0);
5024 ret = do_msgrcv(first, ptr, second, fifth, third);
5033 raddr = do_shmat(cpu_env, first, ptr, second);
5034 if (is_error(raddr))
5035 return get_errno(raddr);
5036 if (put_user_ual(raddr, third))
5037 return -TARGET_EFAULT;
5041 ret = -TARGET_EINVAL;
5046 ret = do_shmdt(ptr);
5050 /* IPC_* flag values are the same on all linux platforms */
5051 ret = get_errno(shmget(first, second, third));
5054 /* IPC_* and SHM_* command values are the same on all linux platforms */
5056 ret = do_shmctl(first, second, ptr);
5059 gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
5060 ret = -TARGET_ENOSYS;
5067 /* kernel structure types definitions */
5069 #define STRUCT(name, ...) STRUCT_ ## name,
5070 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
5072 #include "syscall_types.h"
5076 #undef STRUCT_SPECIAL
5078 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
5079 #define STRUCT_SPECIAL(name)
5080 #include "syscall_types.h"
5082 #undef STRUCT_SPECIAL
5084 typedef struct IOCTLEntry IOCTLEntry;
5086 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
5087 int fd, int cmd, abi_long arg);
5091 unsigned int host_cmd;
5094 do_ioctl_fn *do_ioctl;
5095 const argtype arg_type[5];
5098 #define IOC_R 0x0001
5099 #define IOC_W 0x0002
5100 #define IOC_RW (IOC_R | IOC_W)
5102 #define MAX_STRUCT_SIZE 4096
5104 #ifdef CONFIG_FIEMAP
5105 /* So fiemap access checks don't overflow on 32 bit systems.
5106 * This is very slightly smaller than the limit imposed by
5107 * the underlying kernel.
5109 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
5110 / sizeof(struct fiemap_extent))
5112 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
5113 int fd, int cmd, abi_long arg)
5115 /* The parameter for this ioctl is a struct fiemap followed
5116 * by an array of struct fiemap_extent whose size is set
5117 * in fiemap->fm_extent_count. The array is filled in by the
5120 int target_size_in, target_size_out;
5122 const argtype *arg_type = ie->arg_type;
5123 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
5126 int i, extent_size = thunk_type_size(extent_arg_type, 0);
5130 assert(arg_type[0] == TYPE_PTR);
5131 assert(ie->access == IOC_RW);
5133 target_size_in = thunk_type_size(arg_type, 0);
5134 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
5136 return -TARGET_EFAULT;
5138 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5139 unlock_user(argptr, arg, 0);
5140 fm = (struct fiemap *)buf_temp;
5141 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
5142 return -TARGET_EINVAL;
5145 outbufsz = sizeof (*fm) +
5146 (sizeof(struct fiemap_extent) * fm->fm_extent_count);
5148 if (outbufsz > MAX_STRUCT_SIZE) {
5149 /* We can't fit all the extents into the fixed size buffer.
5150 * Allocate one that is large enough and use it instead.
5152 fm = g_try_malloc(outbufsz);
5154 return -TARGET_ENOMEM;
5156 memcpy(fm, buf_temp, sizeof(struct fiemap));
5159 ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
5160 if (!is_error(ret)) {
5161 target_size_out = target_size_in;
5162 /* An extent_count of 0 means we were only counting the extents
5163 * so there are no structs to copy
5165 if (fm->fm_extent_count != 0) {
5166 target_size_out += fm->fm_mapped_extents * extent_size;
5168 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
5170 ret = -TARGET_EFAULT;
5172 /* Convert the struct fiemap */
5173 thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
5174 if (fm->fm_extent_count != 0) {
5175 p = argptr + target_size_in;
5176 /* ...and then all the struct fiemap_extents */
5177 for (i = 0; i < fm->fm_mapped_extents; i++) {
5178 thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
5183 unlock_user(argptr, arg, target_size_out);
5193 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
5194 int fd, int cmd, abi_long arg)
5196 const argtype *arg_type = ie->arg_type;
5200 struct ifconf *host_ifconf;
5202 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
5203 int target_ifreq_size;
5208 abi_long target_ifc_buf;
5212 assert(arg_type[0] == TYPE_PTR);
5213 assert(ie->access == IOC_RW);
5216 target_size = thunk_type_size(arg_type, 0);
5218 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5220 return -TARGET_EFAULT;
5221 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5222 unlock_user(argptr, arg, 0);
5224 host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
5225 target_ifc_len = host_ifconf->ifc_len;
5226 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
5228 target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
5229 nb_ifreq = target_ifc_len / target_ifreq_size;
5230 host_ifc_len = nb_ifreq * sizeof(struct ifreq);
5232 outbufsz = sizeof(*host_ifconf) + host_ifc_len;
5233 if (outbufsz > MAX_STRUCT_SIZE) {
5234 /* We can't fit all the extents into the fixed size buffer.
5235 * Allocate one that is large enough and use it instead.
5237 host_ifconf = malloc(outbufsz);
5239 return -TARGET_ENOMEM;
5241 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
5244 host_ifc_buf = (char*)host_ifconf + sizeof(*host_ifconf);
5246 host_ifconf->ifc_len = host_ifc_len;
5247 host_ifconf->ifc_buf = host_ifc_buf;
5249 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
5250 if (!is_error(ret)) {
5251 /* convert host ifc_len to target ifc_len */
5253 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
5254 target_ifc_len = nb_ifreq * target_ifreq_size;
5255 host_ifconf->ifc_len = target_ifc_len;
5257 /* restore target ifc_buf */
5259 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
5261 /* copy struct ifconf to target user */
5263 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5265 return -TARGET_EFAULT;
5266 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
5267 unlock_user(argptr, arg, target_size);
5269 /* copy ifreq[] to target user */
5271 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
5272 for (i = 0; i < nb_ifreq ; i++) {
5273 thunk_convert(argptr + i * target_ifreq_size,
5274 host_ifc_buf + i * sizeof(struct ifreq),
5275 ifreq_arg_type, THUNK_TARGET);
5277 unlock_user(argptr, target_ifc_buf, target_ifc_len);
5287 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5288 int cmd, abi_long arg)
5291 struct dm_ioctl *host_dm;
5292 abi_long guest_data;
5293 uint32_t guest_data_size;
5295 const argtype *arg_type = ie->arg_type;
5297 void *big_buf = NULL;
5301 target_size = thunk_type_size(arg_type, 0);
5302 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5304 ret = -TARGET_EFAULT;
5307 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5308 unlock_user(argptr, arg, 0);
5310 /* buf_temp is too small, so fetch things into a bigger buffer */
5311 big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
5312 memcpy(big_buf, buf_temp, target_size);
5316 guest_data = arg + host_dm->data_start;
5317 if ((guest_data - arg) < 0) {
5318 ret = -TARGET_EINVAL;
5321 guest_data_size = host_dm->data_size - host_dm->data_start;
5322 host_data = (char*)host_dm + host_dm->data_start;
5324 argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
5326 ret = -TARGET_EFAULT;
5330 switch (ie->host_cmd) {
5332 case DM_LIST_DEVICES:
5335 case DM_DEV_SUSPEND:
5338 case DM_TABLE_STATUS:
5339 case DM_TABLE_CLEAR:
5341 case DM_LIST_VERSIONS:
5345 case DM_DEV_SET_GEOMETRY:
5346 /* data contains only strings */
5347 memcpy(host_data, argptr, guest_data_size);
5350 memcpy(host_data, argptr, guest_data_size);
5351 *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
5355 void *gspec = argptr;
5356 void *cur_data = host_data;
5357 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5358 int spec_size = thunk_type_size(arg_type, 0);
5361 for (i = 0; i < host_dm->target_count; i++) {
5362 struct dm_target_spec *spec = cur_data;
5366 thunk_convert(spec, gspec, arg_type, THUNK_HOST);
5367 slen = strlen((char*)gspec + spec_size) + 1;
5369 spec->next = sizeof(*spec) + slen;
5370 strcpy((char*)&spec[1], gspec + spec_size);
5372 cur_data += spec->next;
5377 ret = -TARGET_EINVAL;
5378 unlock_user(argptr, guest_data, 0);
5381 unlock_user(argptr, guest_data, 0);
5383 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5384 if (!is_error(ret)) {
5385 guest_data = arg + host_dm->data_start;
5386 guest_data_size = host_dm->data_size - host_dm->data_start;
5387 argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
5388 switch (ie->host_cmd) {
5393 case DM_DEV_SUSPEND:
5396 case DM_TABLE_CLEAR:
5398 case DM_DEV_SET_GEOMETRY:
5399 /* no return data */
5401 case DM_LIST_DEVICES:
5403 struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
5404 uint32_t remaining_data = guest_data_size;
5405 void *cur_data = argptr;
5406 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
5407 int nl_size = 12; /* can't use thunk_size due to alignment */
5410 uint32_t next = nl->next;
5412 nl->next = nl_size + (strlen(nl->name) + 1);
5414 if (remaining_data < nl->next) {
5415 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5418 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
5419 strcpy(cur_data + nl_size, nl->name);
5420 cur_data += nl->next;
5421 remaining_data -= nl->next;
5425 nl = (void*)nl + next;
5430 case DM_TABLE_STATUS:
5432 struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5433 void *cur_data = argptr;
5434 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5435 int spec_size = thunk_type_size(arg_type, 0);
5438 for (i = 0; i < host_dm->target_count; i++) {
5439 uint32_t next = spec->next;
5440 int slen = strlen((char*)&spec[1]) + 1;
5441 spec->next = (cur_data - argptr) + spec_size + slen;
5442 if (guest_data_size < spec->next) {
5443 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5446 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
5447 strcpy(cur_data + spec_size, (char*)&spec[1]);
5448 cur_data = argptr + spec->next;
5449 spec = (void*)host_dm + host_dm->data_start + next;
5455 void *hdata = (void*)host_dm + host_dm->data_start;
5456 int count = *(uint32_t*)hdata;
5457 uint64_t *hdev = hdata + 8;
5458 uint64_t *gdev = argptr + 8;
5461 *(uint32_t*)argptr = tswap32(count);
5462 for (i = 0; i < count; i++) {
5463 *gdev = tswap64(*hdev);
5469 case DM_LIST_VERSIONS:
5471 struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5472 uint32_t remaining_data = guest_data_size;
5473 void *cur_data = argptr;
5474 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5475 int vers_size = thunk_type_size(arg_type, 0);
5478 uint32_t next = vers->next;
5480 vers->next = vers_size + (strlen(vers->name) + 1);
5482 if (remaining_data < vers->next) {
5483 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5486 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
5487 strcpy(cur_data + vers_size, vers->name);
5488 cur_data += vers->next;
5489 remaining_data -= vers->next;
5493 vers = (void*)vers + next;
5498 unlock_user(argptr, guest_data, 0);
5499 ret = -TARGET_EINVAL;
5502 unlock_user(argptr, guest_data, guest_data_size);
5504 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5506 ret = -TARGET_EFAULT;
5509 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5510 unlock_user(argptr, arg, target_size);
5517 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5518 int cmd, abi_long arg)
5522 const argtype *arg_type = ie->arg_type;
5523 const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5526 struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5527 struct blkpg_partition host_part;
5529 /* Read and convert blkpg */
5531 target_size = thunk_type_size(arg_type, 0);
5532 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5534 ret = -TARGET_EFAULT;
5537 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5538 unlock_user(argptr, arg, 0);
5540 switch (host_blkpg->op) {
5541 case BLKPG_ADD_PARTITION:
5542 case BLKPG_DEL_PARTITION:
5543 /* payload is struct blkpg_partition */
5546 /* Unknown opcode */
5547 ret = -TARGET_EINVAL;
5551 /* Read and convert blkpg->data */
5552 arg = (abi_long)(uintptr_t)host_blkpg->data;
5553 target_size = thunk_type_size(part_arg_type, 0);
5554 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5556 ret = -TARGET_EFAULT;
5559 thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5560 unlock_user(argptr, arg, 0);
5562 /* Swizzle the data pointer to our local copy and call! */
5563 host_blkpg->data = &host_part;
5564 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5570 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5571 int fd, int cmd, abi_long arg)
5573 const argtype *arg_type = ie->arg_type;
5574 const StructEntry *se;
5575 const argtype *field_types;
5576 const int *dst_offsets, *src_offsets;
5579 abi_ulong *target_rt_dev_ptr;
5580 unsigned long *host_rt_dev_ptr;
5584 assert(ie->access == IOC_W);
5585 assert(*arg_type == TYPE_PTR);
5587 assert(*arg_type == TYPE_STRUCT);
5588 target_size = thunk_type_size(arg_type, 0);
5589 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5591 return -TARGET_EFAULT;
5594 assert(*arg_type == (int)STRUCT_rtentry);
5595 se = struct_entries + *arg_type++;
5596 assert(se->convert[0] == NULL);
5597 /* convert struct here to be able to catch rt_dev string */
5598 field_types = se->field_types;
5599 dst_offsets = se->field_offsets[THUNK_HOST];
5600 src_offsets = se->field_offsets[THUNK_TARGET];
5601 for (i = 0; i < se->nb_fields; i++) {
5602 if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5603 assert(*field_types == TYPE_PTRVOID);
5604 target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
5605 host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5606 if (*target_rt_dev_ptr != 0) {
5607 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5608 tswapal(*target_rt_dev_ptr));
5609 if (!*host_rt_dev_ptr) {
5610 unlock_user(argptr, arg, 0);
5611 return -TARGET_EFAULT;
5614 *host_rt_dev_ptr = 0;
5619 field_types = thunk_convert(buf_temp + dst_offsets[i],
5620 argptr + src_offsets[i],
5621 field_types, THUNK_HOST);
5623 unlock_user(argptr, arg, 0);
5625 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5626 if (*host_rt_dev_ptr != 0) {
5627 unlock_user((void *)*host_rt_dev_ptr,
5628 *target_rt_dev_ptr, 0);
5633 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5634 int fd, int cmd, abi_long arg)
5636 int sig = target_to_host_signal(arg);
5637 return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5640 static IOCTLEntry ioctl_entries[] = {
5641 #define IOCTL(cmd, access, ...) \
5642 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
5643 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
5644 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
5645 #define IOCTL_IGNORE(cmd) \
5646 { TARGET_ ## cmd, 0, #cmd },
5651 /* ??? Implement proper locking for ioctls. */
5652 /* do_ioctl() Must return target values and target errnos. */
5653 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5655 const IOCTLEntry *ie;
5656 const argtype *arg_type;
5658 uint8_t buf_temp[MAX_STRUCT_SIZE];
5664 if (ie->target_cmd == 0) {
5665 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5666 return -TARGET_ENOSYS;
5668 if (ie->target_cmd == cmd)
5672 arg_type = ie->arg_type;
5674 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd, ie->name);
5677 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5678 } else if (!ie->host_cmd) {
5679 /* Some architectures define BSD ioctls in their headers
5680 that are not implemented in Linux. */
5681 return -TARGET_ENOSYS;
5684 switch(arg_type[0]) {
5687 ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5691 ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5695 target_size = thunk_type_size(arg_type, 0);
5696 switch(ie->access) {
5698 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5699 if (!is_error(ret)) {
5700 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5702 return -TARGET_EFAULT;
5703 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5704 unlock_user(argptr, arg, target_size);
5708 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5710 return -TARGET_EFAULT;
5711 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5712 unlock_user(argptr, arg, 0);
5713 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5717 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5719 return -TARGET_EFAULT;
5720 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5721 unlock_user(argptr, arg, 0);
5722 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5723 if (!is_error(ret)) {
5724 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5726 return -TARGET_EFAULT;
5727 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5728 unlock_user(argptr, arg, target_size);
5734 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5735 (long)cmd, arg_type[0]);
5736 ret = -TARGET_ENOSYS;
5742 static const bitmask_transtbl iflag_tbl[] = {
5743 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5744 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5745 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5746 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5747 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5748 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5749 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5750 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5751 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5752 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5753 { TARGET_IXON, TARGET_IXON, IXON, IXON },
5754 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5755 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5756 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5760 static const bitmask_transtbl oflag_tbl[] = {
5761 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5762 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5763 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5764 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5765 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5766 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5767 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5768 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5769 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5770 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5771 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5772 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5773 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5774 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5775 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5776 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5777 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5778 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5779 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5780 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5781 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5782 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5783 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5784 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5788 static const bitmask_transtbl cflag_tbl[] = {
5789 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5790 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5791 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5792 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5793 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5794 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5795 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5796 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5797 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5798 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5799 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5800 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5801 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5802 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5803 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5804 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5805 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5806 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5807 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5808 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5809 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5810 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5811 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5812 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5813 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5814 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5815 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5816 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5817 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5818 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5819 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5823 static const bitmask_transtbl lflag_tbl[] = {
5824 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5825 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5826 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5827 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5828 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5829 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5830 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5831 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5832 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5833 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5834 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5835 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5836 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5837 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5838 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5842 static void target_to_host_termios (void *dst, const void *src)
5844 struct host_termios *host = dst;
5845 const struct target_termios *target = src;
5848 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5850 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5852 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5854 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5855 host->c_line = target->c_line;
5857 memset(host->c_cc, 0, sizeof(host->c_cc));
5858 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5859 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5860 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5861 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5862 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5863 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5864 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5865 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5866 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5867 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5868 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5869 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5870 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5871 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5872 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5873 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5874 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5877 static void host_to_target_termios (void *dst, const void *src)
5879 struct target_termios *target = dst;
5880 const struct host_termios *host = src;
5883 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5885 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5887 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5889 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5890 target->c_line = host->c_line;
5892 memset(target->c_cc, 0, sizeof(target->c_cc));
5893 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5894 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5895 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5896 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5897 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5898 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5899 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5900 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5901 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5902 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5903 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5904 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5905 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5906 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5907 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5908 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5909 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5912 static const StructEntry struct_termios_def = {
5913 .convert = { host_to_target_termios, target_to_host_termios },
5914 .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5915 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5918 static bitmask_transtbl mmap_flags_tbl[] = {
5919 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
5920 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
5921 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5922 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
5923 MAP_ANONYMOUS, MAP_ANONYMOUS },
5924 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
5925 MAP_GROWSDOWN, MAP_GROWSDOWN },
5926 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
5927 MAP_DENYWRITE, MAP_DENYWRITE },
5928 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
5929 MAP_EXECUTABLE, MAP_EXECUTABLE },
5930 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5931 { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
5932 MAP_NORESERVE, MAP_NORESERVE },
5933 { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
5934 /* MAP_STACK had been ignored by the kernel for quite some time.
5935 Recognize it for the target insofar as we do not want to pass
5936 it through to the host. */
5937 { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
5941 #if defined(TARGET_I386)
5943 /* NOTE: there is really one LDT for all the threads */
5944 static uint8_t *ldt_table;
5946 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
5953 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
5954 if (size > bytecount)
5956 p = lock_user(VERIFY_WRITE, ptr, size, 0);
5958 return -TARGET_EFAULT;
5959 /* ??? Should this by byteswapped? */
5960 memcpy(p, ldt_table, size);
5961 unlock_user(p, ptr, size);
5965 /* XXX: add locking support */
5966 static abi_long write_ldt(CPUX86State *env,
5967 abi_ulong ptr, unsigned long bytecount, int oldmode)
5969 struct target_modify_ldt_ldt_s ldt_info;
5970 struct target_modify_ldt_ldt_s *target_ldt_info;
5971 int seg_32bit, contents, read_exec_only, limit_in_pages;
5972 int seg_not_present, useable, lm;
5973 uint32_t *lp, entry_1, entry_2;
5975 if (bytecount != sizeof(ldt_info))
5976 return -TARGET_EINVAL;
5977 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
5978 return -TARGET_EFAULT;
5979 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5980 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5981 ldt_info.limit = tswap32(target_ldt_info->limit);
5982 ldt_info.flags = tswap32(target_ldt_info->flags);
5983 unlock_user_struct(target_ldt_info, ptr, 0);
5985 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
5986 return -TARGET_EINVAL;
5987 seg_32bit = ldt_info.flags & 1;
5988 contents = (ldt_info.flags >> 1) & 3;
5989 read_exec_only = (ldt_info.flags >> 3) & 1;
5990 limit_in_pages = (ldt_info.flags >> 4) & 1;
5991 seg_not_present = (ldt_info.flags >> 5) & 1;
5992 useable = (ldt_info.flags >> 6) & 1;
5996 lm = (ldt_info.flags >> 7) & 1;
5998 if (contents == 3) {
6000 return -TARGET_EINVAL;
6001 if (seg_not_present == 0)
6002 return -TARGET_EINVAL;
6004 /* allocate the LDT */
6006 env->ldt.base = target_mmap(0,
6007 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
6008 PROT_READ|PROT_WRITE,
6009 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
6010 if (env->ldt.base == -1)
6011 return -TARGET_ENOMEM;
6012 memset(g2h(env->ldt.base), 0,
6013 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
6014 env->ldt.limit = 0xffff;
6015 ldt_table = g2h(env->ldt.base);
6018 /* NOTE: same code as Linux kernel */
6019 /* Allow LDTs to be cleared by the user. */
6020 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6023 read_exec_only == 1 &&
6025 limit_in_pages == 0 &&
6026 seg_not_present == 1 &&
6034 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6035 (ldt_info.limit & 0x0ffff);
6036 entry_2 = (ldt_info.base_addr & 0xff000000) |
6037 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6038 (ldt_info.limit & 0xf0000) |
6039 ((read_exec_only ^ 1) << 9) |
6041 ((seg_not_present ^ 1) << 15) |
6043 (limit_in_pages << 23) |
6047 entry_2 |= (useable << 20);
6049 /* Install the new entry ... */
6051 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
6052 lp[0] = tswap32(entry_1);
6053 lp[1] = tswap32(entry_2);
6057 /* specific and weird i386 syscalls */
6058 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
6059 unsigned long bytecount)
6065 ret = read_ldt(ptr, bytecount);
6068 ret = write_ldt(env, ptr, bytecount, 1);
6071 ret = write_ldt(env, ptr, bytecount, 0);
6074 ret = -TARGET_ENOSYS;
6080 #if defined(TARGET_I386) && defined(TARGET_ABI32)
6081 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
6083 uint64_t *gdt_table = g2h(env->gdt.base);
6084 struct target_modify_ldt_ldt_s ldt_info;
6085 struct target_modify_ldt_ldt_s *target_ldt_info;
6086 int seg_32bit, contents, read_exec_only, limit_in_pages;
6087 int seg_not_present, useable, lm;
6088 uint32_t *lp, entry_1, entry_2;
6091 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6092 if (!target_ldt_info)
6093 return -TARGET_EFAULT;
6094 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6095 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6096 ldt_info.limit = tswap32(target_ldt_info->limit);
6097 ldt_info.flags = tswap32(target_ldt_info->flags);
6098 if (ldt_info.entry_number == -1) {
6099 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
6100 if (gdt_table[i] == 0) {
6101 ldt_info.entry_number = i;
6102 target_ldt_info->entry_number = tswap32(i);
6107 unlock_user_struct(target_ldt_info, ptr, 1);
6109 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
6110 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
6111 return -TARGET_EINVAL;
6112 seg_32bit = ldt_info.flags & 1;
6113 contents = (ldt_info.flags >> 1) & 3;
6114 read_exec_only = (ldt_info.flags >> 3) & 1;
6115 limit_in_pages = (ldt_info.flags >> 4) & 1;
6116 seg_not_present = (ldt_info.flags >> 5) & 1;
6117 useable = (ldt_info.flags >> 6) & 1;
6121 lm = (ldt_info.flags >> 7) & 1;
6124 if (contents == 3) {
6125 if (seg_not_present == 0)
6126 return -TARGET_EINVAL;
6129 /* NOTE: same code as Linux kernel */
6130 /* Allow LDTs to be cleared by the user. */
6131 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6132 if ((contents == 0 &&
6133 read_exec_only == 1 &&
6135 limit_in_pages == 0 &&
6136 seg_not_present == 1 &&
6144 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6145 (ldt_info.limit & 0x0ffff);
6146 entry_2 = (ldt_info.base_addr & 0xff000000) |
6147 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6148 (ldt_info.limit & 0xf0000) |
6149 ((read_exec_only ^ 1) << 9) |
6151 ((seg_not_present ^ 1) << 15) |
6153 (limit_in_pages << 23) |
6158 /* Install the new entry ... */
6160 lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
6161 lp[0] = tswap32(entry_1);
6162 lp[1] = tswap32(entry_2);
6166 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
6168 struct target_modify_ldt_ldt_s *target_ldt_info;
6169 uint64_t *gdt_table = g2h(env->gdt.base);
6170 uint32_t base_addr, limit, flags;
6171 int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
6172 int seg_not_present, useable, lm;
6173 uint32_t *lp, entry_1, entry_2;
6175 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6176 if (!target_ldt_info)
6177 return -TARGET_EFAULT;
6178 idx = tswap32(target_ldt_info->entry_number);
6179 if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
6180 idx > TARGET_GDT_ENTRY_TLS_MAX) {
6181 unlock_user_struct(target_ldt_info, ptr, 1);
6182 return -TARGET_EINVAL;
6184 lp = (uint32_t *)(gdt_table + idx);
6185 entry_1 = tswap32(lp[0]);
6186 entry_2 = tswap32(lp[1]);
6188 read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
6189 contents = (entry_2 >> 10) & 3;
6190 seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
6191 seg_32bit = (entry_2 >> 22) & 1;
6192 limit_in_pages = (entry_2 >> 23) & 1;
6193 useable = (entry_2 >> 20) & 1;
6197 lm = (entry_2 >> 21) & 1;
6199 flags = (seg_32bit << 0) | (contents << 1) |
6200 (read_exec_only << 3) | (limit_in_pages << 4) |
6201 (seg_not_present << 5) | (useable << 6) | (lm << 7);
6202 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000);
6203 base_addr = (entry_1 >> 16) |
6204 (entry_2 & 0xff000000) |
6205 ((entry_2 & 0xff) << 16);
6206 target_ldt_info->base_addr = tswapal(base_addr);
6207 target_ldt_info->limit = tswap32(limit);
6208 target_ldt_info->flags = tswap32(flags);
6209 unlock_user_struct(target_ldt_info, ptr, 1);
6212 #endif /* TARGET_I386 && TARGET_ABI32 */
6214 #ifndef TARGET_ABI32
6215 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6222 case TARGET_ARCH_SET_GS:
6223 case TARGET_ARCH_SET_FS:
6224 if (code == TARGET_ARCH_SET_GS)
6228 cpu_x86_load_seg(env, idx, 0);
6229 env->segs[idx].base = addr;
6231 case TARGET_ARCH_GET_GS:
6232 case TARGET_ARCH_GET_FS:
6233 if (code == TARGET_ARCH_GET_GS)
6237 val = env->segs[idx].base;
6238 if (put_user(val, addr, abi_ulong))
6239 ret = -TARGET_EFAULT;
6242 ret = -TARGET_EINVAL;
6249 #endif /* defined(TARGET_I386) */
6251 #define NEW_STACK_SIZE 0x40000
6254 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
6257 pthread_mutex_t mutex;
6258 pthread_cond_t cond;
6261 abi_ulong child_tidptr;
6262 abi_ulong parent_tidptr;
6266 static void *clone_func(void *arg)
6268 new_thread_info *info = arg;
6273 rcu_register_thread();
6274 tcg_register_thread();
6276 cpu = ENV_GET_CPU(env);
6278 ts = (TaskState *)cpu->opaque;
6279 info->tid = gettid();
6281 if (info->child_tidptr)
6282 put_user_u32(info->tid, info->child_tidptr);
6283 if (info->parent_tidptr)
6284 put_user_u32(info->tid, info->parent_tidptr);
6285 /* Enable signals. */
6286 sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
6287 /* Signal to the parent that we're ready. */
6288 pthread_mutex_lock(&info->mutex);
6289 pthread_cond_broadcast(&info->cond);
6290 pthread_mutex_unlock(&info->mutex);
6291 /* Wait until the parent has finished initializing the tls state. */
6292 pthread_mutex_lock(&clone_lock);
6293 pthread_mutex_unlock(&clone_lock);
6299 /* do_fork() Must return host values and target errnos (unlike most
6300 do_*() functions). */
6301 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
6302 abi_ulong parent_tidptr, target_ulong newtls,
6303 abi_ulong child_tidptr)
6305 CPUState *cpu = ENV_GET_CPU(env);
6309 CPUArchState *new_env;
6312 flags &= ~CLONE_IGNORED_FLAGS;
6314 /* Emulate vfork() with fork() */
6315 if (flags & CLONE_VFORK)
6316 flags &= ~(CLONE_VFORK | CLONE_VM);
6318 if (flags & CLONE_VM) {
6319 TaskState *parent_ts = (TaskState *)cpu->opaque;
6320 new_thread_info info;
6321 pthread_attr_t attr;
6323 if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
6324 (flags & CLONE_INVALID_THREAD_FLAGS)) {
6325 return -TARGET_EINVAL;
6328 ts = g_new0(TaskState, 1);
6329 init_task_state(ts);
6330 /* we create a new CPU instance. */
6331 new_env = cpu_copy(env);
6332 /* Init regs that differ from the parent. */
6333 cpu_clone_regs(new_env, newsp);
6334 new_cpu = ENV_GET_CPU(new_env);
6335 new_cpu->opaque = ts;
6336 ts->bprm = parent_ts->bprm;
6337 ts->info = parent_ts->info;
6338 ts->signal_mask = parent_ts->signal_mask;
6340 if (flags & CLONE_CHILD_CLEARTID) {
6341 ts->child_tidptr = child_tidptr;
6344 if (flags & CLONE_SETTLS) {
6345 cpu_set_tls (new_env, newtls);
6348 /* Grab a mutex so that thread setup appears atomic. */
6349 pthread_mutex_lock(&clone_lock);
6351 memset(&info, 0, sizeof(info));
6352 pthread_mutex_init(&info.mutex, NULL);
6353 pthread_mutex_lock(&info.mutex);
6354 pthread_cond_init(&info.cond, NULL);
6356 if (flags & CLONE_CHILD_SETTID) {
6357 info.child_tidptr = child_tidptr;
6359 if (flags & CLONE_PARENT_SETTID) {
6360 info.parent_tidptr = parent_tidptr;
6363 ret = pthread_attr_init(&attr);
6364 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6365 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6366 /* It is not safe to deliver signals until the child has finished
6367 initializing, so temporarily block all signals. */
6368 sigfillset(&sigmask);
6369 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6371 /* If this is our first additional thread, we need to ensure we
6372 * generate code for parallel execution and flush old translations.
6374 if (!parallel_cpus) {
6375 parallel_cpus = true;
6379 ret = pthread_create(&info.thread, &attr, clone_func, &info);
6380 /* TODO: Free new CPU state if thread creation failed. */
6382 sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6383 pthread_attr_destroy(&attr);
6385 /* Wait for the child to initialize. */
6386 pthread_cond_wait(&info.cond, &info.mutex);
6391 pthread_mutex_unlock(&info.mutex);
6392 pthread_cond_destroy(&info.cond);
6393 pthread_mutex_destroy(&info.mutex);
6394 pthread_mutex_unlock(&clone_lock);
6396 /* if no CLONE_VM, we consider it is a fork */
6397 if (flags & CLONE_INVALID_FORK_FLAGS) {
6398 return -TARGET_EINVAL;
6401 /* We can't support custom termination signals */
6402 if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6403 return -TARGET_EINVAL;
6406 if (block_signals()) {
6407 return -TARGET_ERESTARTSYS;
6413 /* Child Process. */
6414 cpu_clone_regs(env, newsp);
6416 /* There is a race condition here. The parent process could
6417 theoretically read the TID in the child process before the child
6418 tid is set. This would require using either ptrace
6419 (not implemented) or having *_tidptr to point at a shared memory
6420 mapping. We can't repeat the spinlock hack used above because
6421 the child process gets its own copy of the lock. */
6422 if (flags & CLONE_CHILD_SETTID)
6423 put_user_u32(gettid(), child_tidptr);
6424 if (flags & CLONE_PARENT_SETTID)
6425 put_user_u32(gettid(), parent_tidptr);
6426 ts = (TaskState *)cpu->opaque;
6427 if (flags & CLONE_SETTLS)
6428 cpu_set_tls (env, newtls);
6429 if (flags & CLONE_CHILD_CLEARTID)
6430 ts->child_tidptr = child_tidptr;
6438 /* warning : doesn't handle linux specific flags... */
6439 static int target_to_host_fcntl_cmd(int cmd)
6442 case TARGET_F_DUPFD:
6443 case TARGET_F_GETFD:
6444 case TARGET_F_SETFD:
6445 case TARGET_F_GETFL:
6446 case TARGET_F_SETFL:
6448 case TARGET_F_GETLK:
6450 case TARGET_F_SETLK:
6452 case TARGET_F_SETLKW:
6454 case TARGET_F_GETOWN:
6456 case TARGET_F_SETOWN:
6458 case TARGET_F_GETSIG:
6460 case TARGET_F_SETSIG:
6462 #if TARGET_ABI_BITS == 32
6463 case TARGET_F_GETLK64:
6465 case TARGET_F_SETLK64:
6467 case TARGET_F_SETLKW64:
6470 case TARGET_F_SETLEASE:
6472 case TARGET_F_GETLEASE:
6474 #ifdef F_DUPFD_CLOEXEC
6475 case TARGET_F_DUPFD_CLOEXEC:
6476 return F_DUPFD_CLOEXEC;
6478 case TARGET_F_NOTIFY:
6481 case TARGET_F_GETOWN_EX:
6485 case TARGET_F_SETOWN_EX:
6489 case TARGET_F_SETPIPE_SZ:
6490 return F_SETPIPE_SZ;
6491 case TARGET_F_GETPIPE_SZ:
6492 return F_GETPIPE_SZ;
6495 return -TARGET_EINVAL;
6497 return -TARGET_EINVAL;
6500 #define TRANSTBL_CONVERT(a) { -1, TARGET_##a, -1, a }
6501 static const bitmask_transtbl flock_tbl[] = {
6502 TRANSTBL_CONVERT(F_RDLCK),
6503 TRANSTBL_CONVERT(F_WRLCK),
6504 TRANSTBL_CONVERT(F_UNLCK),
6505 TRANSTBL_CONVERT(F_EXLCK),
6506 TRANSTBL_CONVERT(F_SHLCK),
6510 static inline abi_long copy_from_user_flock(struct flock64 *fl,
6511 abi_ulong target_flock_addr)
6513 struct target_flock *target_fl;
6516 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6517 return -TARGET_EFAULT;
6520 __get_user(l_type, &target_fl->l_type);
6521 fl->l_type = target_to_host_bitmask(l_type, flock_tbl);
6522 __get_user(fl->l_whence, &target_fl->l_whence);
6523 __get_user(fl->l_start, &target_fl->l_start);
6524 __get_user(fl->l_len, &target_fl->l_len);
6525 __get_user(fl->l_pid, &target_fl->l_pid);
6526 unlock_user_struct(target_fl, target_flock_addr, 0);
6530 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6531 const struct flock64 *fl)
6533 struct target_flock *target_fl;
6536 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6537 return -TARGET_EFAULT;
6540 l_type = host_to_target_bitmask(fl->l_type, flock_tbl);
6541 __put_user(l_type, &target_fl->l_type);
6542 __put_user(fl->l_whence, &target_fl->l_whence);
6543 __put_user(fl->l_start, &target_fl->l_start);
6544 __put_user(fl->l_len, &target_fl->l_len);
6545 __put_user(fl->l_pid, &target_fl->l_pid);
6546 unlock_user_struct(target_fl, target_flock_addr, 1);
6550 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
6551 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
6553 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6554 static inline abi_long copy_from_user_eabi_flock64(struct flock64 *fl,
6555 abi_ulong target_flock_addr)
6557 struct target_eabi_flock64 *target_fl;
6560 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6561 return -TARGET_EFAULT;
6564 __get_user(l_type, &target_fl->l_type);
6565 fl->l_type = target_to_host_bitmask(l_type, flock_tbl);
6566 __get_user(fl->l_whence, &target_fl->l_whence);
6567 __get_user(fl->l_start, &target_fl->l_start);
6568 __get_user(fl->l_len, &target_fl->l_len);
6569 __get_user(fl->l_pid, &target_fl->l_pid);
6570 unlock_user_struct(target_fl, target_flock_addr, 0);
6574 static inline abi_long copy_to_user_eabi_flock64(abi_ulong target_flock_addr,
6575 const struct flock64 *fl)
6577 struct target_eabi_flock64 *target_fl;
6580 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6581 return -TARGET_EFAULT;
6584 l_type = host_to_target_bitmask(fl->l_type, flock_tbl);
6585 __put_user(l_type, &target_fl->l_type);
6586 __put_user(fl->l_whence, &target_fl->l_whence);
6587 __put_user(fl->l_start, &target_fl->l_start);
6588 __put_user(fl->l_len, &target_fl->l_len);
6589 __put_user(fl->l_pid, &target_fl->l_pid);
6590 unlock_user_struct(target_fl, target_flock_addr, 1);
6595 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
6596 abi_ulong target_flock_addr)
6598 struct target_flock64 *target_fl;
6601 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6602 return -TARGET_EFAULT;
6605 __get_user(l_type, &target_fl->l_type);
6606 fl->l_type = target_to_host_bitmask(l_type, flock_tbl);
6607 __get_user(fl->l_whence, &target_fl->l_whence);
6608 __get_user(fl->l_start, &target_fl->l_start);
6609 __get_user(fl->l_len, &target_fl->l_len);
6610 __get_user(fl->l_pid, &target_fl->l_pid);
6611 unlock_user_struct(target_fl, target_flock_addr, 0);
6615 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
6616 const struct flock64 *fl)
6618 struct target_flock64 *target_fl;
6621 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6622 return -TARGET_EFAULT;
6625 l_type = host_to_target_bitmask(fl->l_type, flock_tbl);
6626 __put_user(l_type, &target_fl->l_type);
6627 __put_user(fl->l_whence, &target_fl->l_whence);
6628 __put_user(fl->l_start, &target_fl->l_start);
6629 __put_user(fl->l_len, &target_fl->l_len);
6630 __put_user(fl->l_pid, &target_fl->l_pid);
6631 unlock_user_struct(target_fl, target_flock_addr, 1);
6635 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
6637 struct flock64 fl64;
6639 struct f_owner_ex fox;
6640 struct target_f_owner_ex *target_fox;
6643 int host_cmd = target_to_host_fcntl_cmd(cmd);
6645 if (host_cmd == -TARGET_EINVAL)
6649 case TARGET_F_GETLK:
6650 ret = copy_from_user_flock(&fl64, arg);
6654 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6656 ret = copy_to_user_flock(arg, &fl64);
6660 case TARGET_F_SETLK:
6661 case TARGET_F_SETLKW:
6662 ret = copy_from_user_flock(&fl64, arg);
6666 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6669 case TARGET_F_GETLK64:
6670 ret = copy_from_user_flock64(&fl64, arg);
6674 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6676 ret = copy_to_user_flock64(arg, &fl64);
6679 case TARGET_F_SETLK64:
6680 case TARGET_F_SETLKW64:
6681 ret = copy_from_user_flock64(&fl64, arg);
6685 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6688 case TARGET_F_GETFL:
6689 ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6691 ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
6695 case TARGET_F_SETFL:
6696 ret = get_errno(safe_fcntl(fd, host_cmd,
6697 target_to_host_bitmask(arg,
6702 case TARGET_F_GETOWN_EX:
6703 ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6705 if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
6706 return -TARGET_EFAULT;
6707 target_fox->type = tswap32(fox.type);
6708 target_fox->pid = tswap32(fox.pid);
6709 unlock_user_struct(target_fox, arg, 1);
6715 case TARGET_F_SETOWN_EX:
6716 if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
6717 return -TARGET_EFAULT;
6718 fox.type = tswap32(target_fox->type);
6719 fox.pid = tswap32(target_fox->pid);
6720 unlock_user_struct(target_fox, arg, 0);
6721 ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6725 case TARGET_F_SETOWN:
6726 case TARGET_F_GETOWN:
6727 case TARGET_F_SETSIG:
6728 case TARGET_F_GETSIG:
6729 case TARGET_F_SETLEASE:
6730 case TARGET_F_GETLEASE:
6731 case TARGET_F_SETPIPE_SZ:
6732 case TARGET_F_GETPIPE_SZ:
6733 ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6737 ret = get_errno(safe_fcntl(fd, cmd, arg));
6745 static inline int high2lowuid(int uid)
6753 static inline int high2lowgid(int gid)
6761 static inline int low2highuid(int uid)
6763 if ((int16_t)uid == -1)
6769 static inline int low2highgid(int gid)
6771 if ((int16_t)gid == -1)
6776 static inline int tswapid(int id)
6781 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6783 #else /* !USE_UID16 */
6784 static inline int high2lowuid(int uid)
6788 static inline int high2lowgid(int gid)
6792 static inline int low2highuid(int uid)
6796 static inline int low2highgid(int gid)
6800 static inline int tswapid(int id)
6805 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6807 #endif /* USE_UID16 */
6809 /* We must do direct syscalls for setting UID/GID, because we want to
6810 * implement the Linux system call semantics of "change only for this thread",
6811 * not the libc/POSIX semantics of "change for all threads in process".
6812 * (See http://ewontfix.com/17/ for more details.)
6813 * We use the 32-bit version of the syscalls if present; if it is not
6814 * then either the host architecture supports 32-bit UIDs natively with
6815 * the standard syscall, or the 16-bit UID is the best we can do.
6817 #ifdef __NR_setuid32
6818 #define __NR_sys_setuid __NR_setuid32
6820 #define __NR_sys_setuid __NR_setuid
6822 #ifdef __NR_setgid32
6823 #define __NR_sys_setgid __NR_setgid32
6825 #define __NR_sys_setgid __NR_setgid
6827 #ifdef __NR_setresuid32
6828 #define __NR_sys_setresuid __NR_setresuid32
6830 #define __NR_sys_setresuid __NR_setresuid
6832 #ifdef __NR_setresgid32
6833 #define __NR_sys_setresgid __NR_setresgid32
6835 #define __NR_sys_setresgid __NR_setresgid
6838 _syscall1(int, sys_setuid, uid_t, uid)
6839 _syscall1(int, sys_setgid, gid_t, gid)
6840 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
6841 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
6843 void syscall_init(void)
6846 const argtype *arg_type;
6850 thunk_init(STRUCT_MAX);
6852 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6853 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6854 #include "syscall_types.h"
6856 #undef STRUCT_SPECIAL
6858 /* Build target_to_host_errno_table[] table from
6859 * host_to_target_errno_table[]. */
6860 for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
6861 target_to_host_errno_table[host_to_target_errno_table[i]] = i;
6864 /* we patch the ioctl size if necessary. We rely on the fact that
6865 no ioctl has all the bits at '1' in the size field */
6867 while (ie->target_cmd != 0) {
6868 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
6869 TARGET_IOC_SIZEMASK) {
6870 arg_type = ie->arg_type;
6871 if (arg_type[0] != TYPE_PTR) {
6872 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
6877 size = thunk_type_size(arg_type, 0);
6878 ie->target_cmd = (ie->target_cmd &
6879 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
6880 (size << TARGET_IOC_SIZESHIFT);
6883 /* automatic consistency check if same arch */
6884 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
6885 (defined(__x86_64__) && defined(TARGET_X86_64))
6886 if (unlikely(ie->target_cmd != ie->host_cmd)) {
6887 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
6888 ie->name, ie->target_cmd, ie->host_cmd);
6895 #if TARGET_ABI_BITS == 32
6896 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
6898 #ifdef TARGET_WORDS_BIGENDIAN
6899 return ((uint64_t)word0 << 32) | word1;
6901 return ((uint64_t)word1 << 32) | word0;
6904 #else /* TARGET_ABI_BITS == 32 */
6905 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
6909 #endif /* TARGET_ABI_BITS != 32 */
6911 #ifdef TARGET_NR_truncate64
6912 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
6917 if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
6921 return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
6925 #ifdef TARGET_NR_ftruncate64
6926 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
6931 if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
6935 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
6939 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
6940 abi_ulong target_addr)
6942 struct target_timespec *target_ts;
6944 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1))
6945 return -TARGET_EFAULT;
6946 __get_user(host_ts->tv_sec, &target_ts->tv_sec);
6947 __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
6948 unlock_user_struct(target_ts, target_addr, 0);
6952 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
6953 struct timespec *host_ts)
6955 struct target_timespec *target_ts;
6957 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0))
6958 return -TARGET_EFAULT;
6959 __put_user(host_ts->tv_sec, &target_ts->tv_sec);
6960 __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
6961 unlock_user_struct(target_ts, target_addr, 1);
6965 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_itspec,
6966 abi_ulong target_addr)
6968 struct target_itimerspec *target_itspec;
6970 if (!lock_user_struct(VERIFY_READ, target_itspec, target_addr, 1)) {
6971 return -TARGET_EFAULT;
6974 host_itspec->it_interval.tv_sec =
6975 tswapal(target_itspec->it_interval.tv_sec);
6976 host_itspec->it_interval.tv_nsec =
6977 tswapal(target_itspec->it_interval.tv_nsec);
6978 host_itspec->it_value.tv_sec = tswapal(target_itspec->it_value.tv_sec);
6979 host_itspec->it_value.tv_nsec = tswapal(target_itspec->it_value.tv_nsec);
6981 unlock_user_struct(target_itspec, target_addr, 1);
6985 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
6986 struct itimerspec *host_its)
6988 struct target_itimerspec *target_itspec;
6990 if (!lock_user_struct(VERIFY_WRITE, target_itspec, target_addr, 0)) {
6991 return -TARGET_EFAULT;
6994 target_itspec->it_interval.tv_sec = tswapal(host_its->it_interval.tv_sec);
6995 target_itspec->it_interval.tv_nsec = tswapal(host_its->it_interval.tv_nsec);
6997 target_itspec->it_value.tv_sec = tswapal(host_its->it_value.tv_sec);
6998 target_itspec->it_value.tv_nsec = tswapal(host_its->it_value.tv_nsec);
7000 unlock_user_struct(target_itspec, target_addr, 0);
7004 static inline abi_long target_to_host_timex(struct timex *host_tx,
7005 abi_long target_addr)
7007 struct target_timex *target_tx;
7009 if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7010 return -TARGET_EFAULT;
7013 __get_user(host_tx->modes, &target_tx->modes);
7014 __get_user(host_tx->offset, &target_tx->offset);
7015 __get_user(host_tx->freq, &target_tx->freq);
7016 __get_user(host_tx->maxerror, &target_tx->maxerror);
7017 __get_user(host_tx->esterror, &target_tx->esterror);
7018 __get_user(host_tx->status, &target_tx->status);
7019 __get_user(host_tx->constant, &target_tx->constant);
7020 __get_user(host_tx->precision, &target_tx->precision);
7021 __get_user(host_tx->tolerance, &target_tx->tolerance);
7022 __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7023 __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7024 __get_user(host_tx->tick, &target_tx->tick);
7025 __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7026 __get_user(host_tx->jitter, &target_tx->jitter);
7027 __get_user(host_tx->shift, &target_tx->shift);
7028 __get_user(host_tx->stabil, &target_tx->stabil);
7029 __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7030 __get_user(host_tx->calcnt, &target_tx->calcnt);
7031 __get_user(host_tx->errcnt, &target_tx->errcnt);
7032 __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7033 __get_user(host_tx->tai, &target_tx->tai);
7035 unlock_user_struct(target_tx, target_addr, 0);
7039 static inline abi_long host_to_target_timex(abi_long target_addr,
7040 struct timex *host_tx)
7042 struct target_timex *target_tx;
7044 if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7045 return -TARGET_EFAULT;
7048 __put_user(host_tx->modes, &target_tx->modes);
7049 __put_user(host_tx->offset, &target_tx->offset);
7050 __put_user(host_tx->freq, &target_tx->freq);
7051 __put_user(host_tx->maxerror, &target_tx->maxerror);
7052 __put_user(host_tx->esterror, &target_tx->esterror);
7053 __put_user(host_tx->status, &target_tx->status);
7054 __put_user(host_tx->constant, &target_tx->constant);
7055 __put_user(host_tx->precision, &target_tx->precision);
7056 __put_user(host_tx->tolerance, &target_tx->tolerance);
7057 __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7058 __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7059 __put_user(host_tx->tick, &target_tx->tick);
7060 __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7061 __put_user(host_tx->jitter, &target_tx->jitter);
7062 __put_user(host_tx->shift, &target_tx->shift);
7063 __put_user(host_tx->stabil, &target_tx->stabil);
7064 __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7065 __put_user(host_tx->calcnt, &target_tx->calcnt);
7066 __put_user(host_tx->errcnt, &target_tx->errcnt);
7067 __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7068 __put_user(host_tx->tai, &target_tx->tai);
7070 unlock_user_struct(target_tx, target_addr, 1);
7075 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
7076 abi_ulong target_addr)
7078 struct target_sigevent *target_sevp;
7080 if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
7081 return -TARGET_EFAULT;
7084 /* This union is awkward on 64 bit systems because it has a 32 bit
7085 * integer and a pointer in it; we follow the conversion approach
7086 * used for handling sigval types in signal.c so the guest should get
7087 * the correct value back even if we did a 64 bit byteswap and it's
7088 * using the 32 bit integer.
7090 host_sevp->sigev_value.sival_ptr =
7091 (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
7092 host_sevp->sigev_signo =
7093 target_to_host_signal(tswap32(target_sevp->sigev_signo));
7094 host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
7095 host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid);
7097 unlock_user_struct(target_sevp, target_addr, 1);
7101 #if defined(TARGET_NR_mlockall)
7102 static inline int target_to_host_mlockall_arg(int arg)
7106 if (arg & TARGET_MLOCKALL_MCL_CURRENT) {
7107 result |= MCL_CURRENT;
7109 if (arg & TARGET_MLOCKALL_MCL_FUTURE) {
7110 result |= MCL_FUTURE;
7116 static inline abi_long host_to_target_stat64(void *cpu_env,
7117 abi_ulong target_addr,
7118 struct stat *host_st)
7120 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7121 if (((CPUARMState *)cpu_env)->eabi) {
7122 struct target_eabi_stat64 *target_st;
7124 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7125 return -TARGET_EFAULT;
7126 memset(target_st, 0, sizeof(struct target_eabi_stat64));
7127 __put_user(host_st->st_dev, &target_st->st_dev);
7128 __put_user(host_st->st_ino, &target_st->st_ino);
7129 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7130 __put_user(host_st->st_ino, &target_st->__st_ino);
7132 __put_user(host_st->st_mode, &target_st->st_mode);
7133 __put_user(host_st->st_nlink, &target_st->st_nlink);
7134 __put_user(host_st->st_uid, &target_st->st_uid);
7135 __put_user(host_st->st_gid, &target_st->st_gid);
7136 __put_user(host_st->st_rdev, &target_st->st_rdev);
7137 __put_user(host_st->st_size, &target_st->st_size);
7138 __put_user(host_st->st_blksize, &target_st->st_blksize);
7139 __put_user(host_st->st_blocks, &target_st->st_blocks);
7140 __put_user(host_st->st_atime, &target_st->target_st_atime);
7141 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7142 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7143 unlock_user_struct(target_st, target_addr, 1);
7147 #if defined(TARGET_HAS_STRUCT_STAT64)
7148 struct target_stat64 *target_st;
7150 struct target_stat *target_st;
7153 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7154 return -TARGET_EFAULT;
7155 memset(target_st, 0, sizeof(*target_st));
7156 __put_user(host_st->st_dev, &target_st->st_dev);
7157 __put_user(host_st->st_ino, &target_st->st_ino);
7158 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7159 __put_user(host_st->st_ino, &target_st->__st_ino);
7161 __put_user(host_st->st_mode, &target_st->st_mode);
7162 __put_user(host_st->st_nlink, &target_st->st_nlink);
7163 __put_user(host_st->st_uid, &target_st->st_uid);
7164 __put_user(host_st->st_gid, &target_st->st_gid);
7165 __put_user(host_st->st_rdev, &target_st->st_rdev);
7166 /* XXX: better use of kernel struct */
7167 __put_user(host_st->st_size, &target_st->st_size);
7168 __put_user(host_st->st_blksize, &target_st->st_blksize);
7169 __put_user(host_st->st_blocks, &target_st->st_blocks);
7170 __put_user(host_st->st_atime, &target_st->target_st_atime);
7171 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7172 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7173 unlock_user_struct(target_st, target_addr, 1);
7179 /* ??? Using host futex calls even when target atomic operations
7180 are not really atomic probably breaks things. However implementing
7181 futexes locally would make futexes shared between multiple processes
7182 tricky. However they're probably useless because guest atomic
7183 operations won't work either. */
7184 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
7185 target_ulong uaddr2, int val3)
7187 struct timespec ts, *pts;
7190 /* ??? We assume FUTEX_* constants are the same on both host
7192 #ifdef FUTEX_CMD_MASK
7193 base_op = op & FUTEX_CMD_MASK;
7199 case FUTEX_WAIT_BITSET:
7202 target_to_host_timespec(pts, timeout);
7206 return get_errno(safe_futex(g2h(uaddr), op, tswap32(val),
7209 return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
7211 return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
7213 case FUTEX_CMP_REQUEUE:
7215 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7216 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7217 But the prototype takes a `struct timespec *'; insert casts
7218 to satisfy the compiler. We do not need to tswap TIMEOUT
7219 since it's not compared to guest memory. */
7220 pts = (struct timespec *)(uintptr_t) timeout;
7221 return get_errno(safe_futex(g2h(uaddr), op, val, pts,
7223 (base_op == FUTEX_CMP_REQUEUE
7227 return -TARGET_ENOSYS;
7230 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7231 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
7232 abi_long handle, abi_long mount_id,
7235 struct file_handle *target_fh;
7236 struct file_handle *fh;
7240 unsigned int size, total_size;
7242 if (get_user_s32(size, handle)) {
7243 return -TARGET_EFAULT;
7246 name = lock_user_string(pathname);
7248 return -TARGET_EFAULT;
7251 total_size = sizeof(struct file_handle) + size;
7252 target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
7254 unlock_user(name, pathname, 0);
7255 return -TARGET_EFAULT;
7258 fh = g_malloc0(total_size);
7259 fh->handle_bytes = size;
7261 ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
7262 unlock_user(name, pathname, 0);
7264 /* man name_to_handle_at(2):
7265 * Other than the use of the handle_bytes field, the caller should treat
7266 * the file_handle structure as an opaque data type
7269 memcpy(target_fh, fh, total_size);
7270 target_fh->handle_bytes = tswap32(fh->handle_bytes);
7271 target_fh->handle_type = tswap32(fh->handle_type);
7273 unlock_user(target_fh, handle, total_size);
7275 if (put_user_s32(mid, mount_id)) {
7276 return -TARGET_EFAULT;
7284 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7285 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
7288 struct file_handle *target_fh;
7289 struct file_handle *fh;
7290 unsigned int size, total_size;
7293 if (get_user_s32(size, handle)) {
7294 return -TARGET_EFAULT;
7297 total_size = sizeof(struct file_handle) + size;
7298 target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
7300 return -TARGET_EFAULT;
7303 fh = g_memdup(target_fh, total_size);
7304 fh->handle_bytes = size;
7305 fh->handle_type = tswap32(target_fh->handle_type);
7307 ret = get_errno(open_by_handle_at(mount_fd, fh,
7308 target_to_host_bitmask(flags, fcntl_flags_tbl)));
7312 unlock_user(target_fh, handle, total_size);
7318 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7320 /* signalfd siginfo conversion */
7323 host_to_target_signalfd_siginfo(struct signalfd_siginfo *tinfo,
7324 const struct signalfd_siginfo *info)
7326 int sig = host_to_target_signal(info->ssi_signo);
7328 /* linux/signalfd.h defines a ssi_addr_lsb
7329 * not defined in sys/signalfd.h but used by some kernels
7332 #ifdef BUS_MCEERR_AO
7333 if (tinfo->ssi_signo == SIGBUS &&
7334 (tinfo->ssi_code == BUS_MCEERR_AR ||
7335 tinfo->ssi_code == BUS_MCEERR_AO)) {
7336 uint16_t *ssi_addr_lsb = (uint16_t *)(&info->ssi_addr + 1);
7337 uint16_t *tssi_addr_lsb = (uint16_t *)(&tinfo->ssi_addr + 1);
7338 *tssi_addr_lsb = tswap16(*ssi_addr_lsb);
7342 tinfo->ssi_signo = tswap32(sig);
7343 tinfo->ssi_errno = tswap32(tinfo->ssi_errno);
7344 tinfo->ssi_code = tswap32(info->ssi_code);
7345 tinfo->ssi_pid = tswap32(info->ssi_pid);
7346 tinfo->ssi_uid = tswap32(info->ssi_uid);
7347 tinfo->ssi_fd = tswap32(info->ssi_fd);
7348 tinfo->ssi_tid = tswap32(info->ssi_tid);
7349 tinfo->ssi_band = tswap32(info->ssi_band);
7350 tinfo->ssi_overrun = tswap32(info->ssi_overrun);
7351 tinfo->ssi_trapno = tswap32(info->ssi_trapno);
7352 tinfo->ssi_status = tswap32(info->ssi_status);
7353 tinfo->ssi_int = tswap32(info->ssi_int);
7354 tinfo->ssi_ptr = tswap64(info->ssi_ptr);
7355 tinfo->ssi_utime = tswap64(info->ssi_utime);
7356 tinfo->ssi_stime = tswap64(info->ssi_stime);
7357 tinfo->ssi_addr = tswap64(info->ssi_addr);
7360 static abi_long host_to_target_data_signalfd(void *buf, size_t len)
7364 for (i = 0; i < len; i += sizeof(struct signalfd_siginfo)) {
7365 host_to_target_signalfd_siginfo(buf + i, buf + i);
7371 static TargetFdTrans target_signalfd_trans = {
7372 .host_to_target_data = host_to_target_data_signalfd,
7375 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
7378 target_sigset_t *target_mask;
7382 if (flags & ~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)) {
7383 return -TARGET_EINVAL;
7385 if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
7386 return -TARGET_EFAULT;
7389 target_to_host_sigset(&host_mask, target_mask);
7391 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
7393 ret = get_errno(signalfd(fd, &host_mask, host_flags));
7395 fd_trans_register(ret, &target_signalfd_trans);
7398 unlock_user_struct(target_mask, mask, 0);
7404 /* Map host to target signal numbers for the wait family of syscalls.
7405 Assume all other status bits are the same. */
7406 int host_to_target_waitstatus(int status)
7408 if (WIFSIGNALED(status)) {
7409 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
7411 if (WIFSTOPPED(status)) {
7412 return (host_to_target_signal(WSTOPSIG(status)) << 8)
7418 static int open_self_cmdline(void *cpu_env, int fd)
7420 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
7421 struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
7424 for (i = 0; i < bprm->argc; i++) {
7425 size_t len = strlen(bprm->argv[i]) + 1;
7427 if (write(fd, bprm->argv[i], len) != len) {
7435 static int open_self_maps(void *cpu_env, int fd)
7437 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
7438 TaskState *ts = cpu->opaque;
7444 fp = fopen("/proc/self/maps", "r");
7449 while ((read = getline(&line, &len, fp)) != -1) {
7450 int fields, dev_maj, dev_min, inode;
7451 uint64_t min, max, offset;
7452 char flag_r, flag_w, flag_x, flag_p;
7453 char path[512] = "";
7454 fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d"
7455 " %512s", &min, &max, &flag_r, &flag_w, &flag_x,
7456 &flag_p, &offset, &dev_maj, &dev_min, &inode, path);
7458 if ((fields < 10) || (fields > 11)) {
7461 if (h2g_valid(min)) {
7462 int flags = page_get_flags(h2g(min));
7463 max = h2g_valid(max - 1) ? max : (uintptr_t)g2h(GUEST_ADDR_MAX);
7464 if (page_check_range(h2g(min), max - min, flags) == -1) {
7467 if (h2g(min) == ts->info->stack_limit) {
7468 pstrcpy(path, sizeof(path), " [stack]");
7470 dprintf(fd, TARGET_ABI_FMT_lx "-" TARGET_ABI_FMT_lx
7471 " %c%c%c%c %08" PRIx64 " %02x:%02x %d %s%s\n",
7472 h2g(min), h2g(max - 1) + 1, flag_r, flag_w,
7473 flag_x, flag_p, offset, dev_maj, dev_min, inode,
7474 path[0] ? " " : "", path);
7484 static int open_self_stat(void *cpu_env, int fd)
7486 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
7487 TaskState *ts = cpu->opaque;
7488 abi_ulong start_stack = ts->info->start_stack;
7491 for (i = 0; i < 44; i++) {
7499 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
7500 } else if (i == 1) {
7502 snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]);
7503 } else if (i == 27) {
7506 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
7508 /* for the rest, there is MasterCard */
7509 snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' ');
7513 if (write(fd, buf, len) != len) {
7521 static int open_self_auxv(void *cpu_env, int fd)
7523 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
7524 TaskState *ts = cpu->opaque;
7525 abi_ulong auxv = ts->info->saved_auxv;
7526 abi_ulong len = ts->info->auxv_len;
7530 * Auxiliary vector is stored in target process stack.
7531 * read in whole auxv vector and copy it to file
7533 ptr = lock_user(VERIFY_READ, auxv, len, 0);
7537 r = write(fd, ptr, len);
7544 lseek(fd, 0, SEEK_SET);
7545 unlock_user(ptr, auxv, len);
7551 static int is_proc_myself(const char *filename, const char *entry)
7553 if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
7554 filename += strlen("/proc/");
7555 if (!strncmp(filename, "self/", strlen("self/"))) {
7556 filename += strlen("self/");
7557 } else if (*filename >= '1' && *filename <= '9') {
7559 snprintf(myself, sizeof(myself), "%d/", getpid());
7560 if (!strncmp(filename, myself, strlen(myself))) {
7561 filename += strlen(myself);
7568 if (!strcmp(filename, entry)) {
7575 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7576 static int is_proc(const char *filename, const char *entry)
7578 return strcmp(filename, entry) == 0;
7581 static int open_net_route(void *cpu_env, int fd)
7588 fp = fopen("/proc/net/route", "r");
7595 read = getline(&line, &len, fp);
7596 dprintf(fd, "%s", line);
7600 while ((read = getline(&line, &len, fp)) != -1) {
7602 uint32_t dest, gw, mask;
7603 unsigned int flags, refcnt, use, metric, mtu, window, irtt;
7604 sscanf(line, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7605 iface, &dest, &gw, &flags, &refcnt, &use, &metric,
7606 &mask, &mtu, &window, &irtt);
7607 dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7608 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
7609 metric, tswap32(mask), mtu, window, irtt);
7619 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
7622 const char *filename;
7623 int (*fill)(void *cpu_env, int fd);
7624 int (*cmp)(const char *s1, const char *s2);
7626 const struct fake_open *fake_open;
7627 static const struct fake_open fakes[] = {
7628 { "maps", open_self_maps, is_proc_myself },
7629 { "stat", open_self_stat, is_proc_myself },
7630 { "auxv", open_self_auxv, is_proc_myself },
7631 { "cmdline", open_self_cmdline, is_proc_myself },
7632 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7633 { "/proc/net/route", open_net_route, is_proc },
7635 { NULL, NULL, NULL }
7638 if (is_proc_myself(pathname, "exe")) {
7639 int execfd = qemu_getauxval(AT_EXECFD);
7640 return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
7643 for (fake_open = fakes; fake_open->filename; fake_open++) {
7644 if (fake_open->cmp(pathname, fake_open->filename)) {
7649 if (fake_open->filename) {
7651 char filename[PATH_MAX];
7654 /* create temporary file to map stat to */
7655 tmpdir = getenv("TMPDIR");
7658 snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
7659 fd = mkstemp(filename);
7665 if ((r = fake_open->fill(cpu_env, fd))) {
7671 lseek(fd, 0, SEEK_SET);
7676 return safe_openat(dirfd, path(pathname), flags, mode);
7679 #define TIMER_MAGIC 0x0caf0000
7680 #define TIMER_MAGIC_MASK 0xffff0000
7682 /* Convert QEMU provided timer ID back to internal 16bit index format */
7683 static target_timer_t get_timer_id(abi_long arg)
7685 target_timer_t timerid = arg;
7687 if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
7688 return -TARGET_EINVAL;
7693 if (timerid >= ARRAY_SIZE(g_posix_timers)) {
7694 return -TARGET_EINVAL;
7700 static abi_long swap_data_eventfd(void *buf, size_t len)
7702 uint64_t *counter = buf;
7705 if (len < sizeof(uint64_t)) {
7709 for (i = 0; i < len; i += sizeof(uint64_t)) {
7710 *counter = tswap64(*counter);
7717 static TargetFdTrans target_eventfd_trans = {
7718 .host_to_target_data = swap_data_eventfd,
7719 .target_to_host_data = swap_data_eventfd,
7722 #if (defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)) || \
7723 (defined(CONFIG_INOTIFY1) && defined(TARGET_NR_inotify_init1) && \
7724 defined(__NR_inotify_init1))
7725 static abi_long host_to_target_data_inotify(void *buf, size_t len)
7727 struct inotify_event *ev;
7731 for (i = 0; i < len; i += sizeof(struct inotify_event) + name_len) {
7732 ev = (struct inotify_event *)((char *)buf + i);
7735 ev->wd = tswap32(ev->wd);
7736 ev->mask = tswap32(ev->mask);
7737 ev->cookie = tswap32(ev->cookie);
7738 ev->len = tswap32(name_len);
7744 static TargetFdTrans target_inotify_trans = {
7745 .host_to_target_data = host_to_target_data_inotify,
7749 static int target_to_host_cpu_mask(unsigned long *host_mask,
7751 abi_ulong target_addr,
7754 unsigned target_bits = sizeof(abi_ulong) * 8;
7755 unsigned host_bits = sizeof(*host_mask) * 8;
7756 abi_ulong *target_mask;
7759 assert(host_size >= target_size);
7761 target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
7763 return -TARGET_EFAULT;
7765 memset(host_mask, 0, host_size);
7767 for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
7768 unsigned bit = i * target_bits;
7771 __get_user(val, &target_mask[i]);
7772 for (j = 0; j < target_bits; j++, bit++) {
7773 if (val & (1UL << j)) {
7774 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
7779 unlock_user(target_mask, target_addr, 0);
7783 static int host_to_target_cpu_mask(const unsigned long *host_mask,
7785 abi_ulong target_addr,
7788 unsigned target_bits = sizeof(abi_ulong) * 8;
7789 unsigned host_bits = sizeof(*host_mask) * 8;
7790 abi_ulong *target_mask;
7793 assert(host_size >= target_size);
7795 target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
7797 return -TARGET_EFAULT;
7800 for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
7801 unsigned bit = i * target_bits;
7804 for (j = 0; j < target_bits; j++, bit++) {
7805 if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
7809 __put_user(val, &target_mask[i]);
7812 unlock_user(target_mask, target_addr, target_size);
7816 /* do_syscall() should always have a single exit point at the end so
7817 that actions, such as logging of syscall results, can be performed.
7818 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
7819 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
7820 abi_long arg2, abi_long arg3, abi_long arg4,
7821 abi_long arg5, abi_long arg6, abi_long arg7,
7824 CPUState *cpu = ENV_GET_CPU(cpu_env);
7830 #if defined(DEBUG_ERESTARTSYS)
7831 /* Debug-only code for exercising the syscall-restart code paths
7832 * in the per-architecture cpu main loops: restart every syscall
7833 * the guest makes once before letting it through.
7840 return -TARGET_ERESTARTSYS;
7846 gemu_log("syscall %d", num);
7848 trace_guest_user_syscall(cpu, num, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8);
7850 print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
7853 case TARGET_NR_exit:
7854 /* In old applications this may be used to implement _exit(2).
7855 However in threaded applictions it is used for thread termination,
7856 and _exit_group is used for application termination.
7857 Do thread termination if we have more then one thread. */
7859 if (block_signals()) {
7860 ret = -TARGET_ERESTARTSYS;
7866 if (CPU_NEXT(first_cpu)) {
7869 /* Remove the CPU from the list. */
7870 QTAILQ_REMOVE(&cpus, cpu, node);
7875 if (ts->child_tidptr) {
7876 put_user_u32(0, ts->child_tidptr);
7877 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
7881 object_unref(OBJECT(cpu));
7883 rcu_unregister_thread();
7891 gdb_exit(cpu_env, arg1);
7893 ret = 0; /* avoid warning */
7895 case TARGET_NR_read:
7899 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
7901 ret = get_errno(safe_read(arg1, p, arg3));
7903 fd_trans_host_to_target_data(arg1)) {
7904 ret = fd_trans_host_to_target_data(arg1)(p, ret);
7906 unlock_user(p, arg2, ret);
7909 case TARGET_NR_write:
7910 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
7912 if (fd_trans_target_to_host_data(arg1)) {
7913 void *copy = g_malloc(arg3);
7914 memcpy(copy, p, arg3);
7915 ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
7917 ret = get_errno(safe_write(arg1, copy, ret));
7921 ret = get_errno(safe_write(arg1, p, arg3));
7923 unlock_user(p, arg2, 0);
7925 #ifdef TARGET_NR_open
7926 case TARGET_NR_open:
7927 if (!(p = lock_user_string(arg1)))
7929 ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
7930 target_to_host_bitmask(arg2, fcntl_flags_tbl),
7932 fd_trans_unregister(ret);
7933 unlock_user(p, arg1, 0);
7936 case TARGET_NR_openat:
7937 if (!(p = lock_user_string(arg2)))
7939 ret = get_errno(do_openat(cpu_env, arg1, p,
7940 target_to_host_bitmask(arg3, fcntl_flags_tbl),
7942 fd_trans_unregister(ret);
7943 unlock_user(p, arg2, 0);
7945 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7946 case TARGET_NR_name_to_handle_at:
7947 ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
7950 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7951 case TARGET_NR_open_by_handle_at:
7952 ret = do_open_by_handle_at(arg1, arg2, arg3);
7953 fd_trans_unregister(ret);
7956 case TARGET_NR_close:
7957 fd_trans_unregister(arg1);
7958 ret = get_errno(close(arg1));
7963 #ifdef TARGET_NR_fork
7964 case TARGET_NR_fork:
7965 ret = get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
7968 #ifdef TARGET_NR_waitpid
7969 case TARGET_NR_waitpid:
7972 ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
7973 if (!is_error(ret) && arg2 && ret
7974 && put_user_s32(host_to_target_waitstatus(status), arg2))
7979 #ifdef TARGET_NR_waitid
7980 case TARGET_NR_waitid:
7984 ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
7985 if (!is_error(ret) && arg3 && info.si_pid != 0) {
7986 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
7988 host_to_target_siginfo(p, &info);
7989 unlock_user(p, arg3, sizeof(target_siginfo_t));
7994 #ifdef TARGET_NR_creat /* not on alpha */
7995 case TARGET_NR_creat:
7996 if (!(p = lock_user_string(arg1)))
7998 ret = get_errno(creat(p, arg2));
7999 fd_trans_unregister(ret);
8000 unlock_user(p, arg1, 0);
8003 #ifdef TARGET_NR_link
8004 case TARGET_NR_link:
8007 p = lock_user_string(arg1);
8008 p2 = lock_user_string(arg2);
8010 ret = -TARGET_EFAULT;
8012 ret = get_errno(link(p, p2));
8013 unlock_user(p2, arg2, 0);
8014 unlock_user(p, arg1, 0);
8018 #if defined(TARGET_NR_linkat)
8019 case TARGET_NR_linkat:
8024 p = lock_user_string(arg2);
8025 p2 = lock_user_string(arg4);
8027 ret = -TARGET_EFAULT;
8029 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
8030 unlock_user(p, arg2, 0);
8031 unlock_user(p2, arg4, 0);
8035 #ifdef TARGET_NR_unlink
8036 case TARGET_NR_unlink:
8037 if (!(p = lock_user_string(arg1)))
8039 ret = get_errno(unlink(p));
8040 unlock_user(p, arg1, 0);
8043 #if defined(TARGET_NR_unlinkat)
8044 case TARGET_NR_unlinkat:
8045 if (!(p = lock_user_string(arg2)))
8047 ret = get_errno(unlinkat(arg1, p, arg3));
8048 unlock_user(p, arg2, 0);
8051 case TARGET_NR_execve:
8053 char **argp, **envp;
8056 abi_ulong guest_argp;
8057 abi_ulong guest_envp;
8064 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
8065 if (get_user_ual(addr, gp))
8073 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
8074 if (get_user_ual(addr, gp))
8081 argp = g_new0(char *, argc + 1);
8082 envp = g_new0(char *, envc + 1);
8084 for (gp = guest_argp, q = argp; gp;
8085 gp += sizeof(abi_ulong), q++) {
8086 if (get_user_ual(addr, gp))
8090 if (!(*q = lock_user_string(addr)))
8092 total_size += strlen(*q) + 1;
8096 for (gp = guest_envp, q = envp; gp;
8097 gp += sizeof(abi_ulong), q++) {
8098 if (get_user_ual(addr, gp))
8102 if (!(*q = lock_user_string(addr)))
8104 total_size += strlen(*q) + 1;
8108 if (!(p = lock_user_string(arg1)))
8110 /* Although execve() is not an interruptible syscall it is
8111 * a special case where we must use the safe_syscall wrapper:
8112 * if we allow a signal to happen before we make the host
8113 * syscall then we will 'lose' it, because at the point of
8114 * execve the process leaves QEMU's control. So we use the
8115 * safe syscall wrapper to ensure that we either take the
8116 * signal as a guest signal, or else it does not happen
8117 * before the execve completes and makes it the other
8118 * program's problem.
8120 ret = get_errno(safe_execve(p, argp, envp));
8121 unlock_user(p, arg1, 0);
8126 ret = -TARGET_EFAULT;
8129 for (gp = guest_argp, q = argp; *q;
8130 gp += sizeof(abi_ulong), q++) {
8131 if (get_user_ual(addr, gp)
8134 unlock_user(*q, addr, 0);
8136 for (gp = guest_envp, q = envp; *q;
8137 gp += sizeof(abi_ulong), q++) {
8138 if (get_user_ual(addr, gp)
8141 unlock_user(*q, addr, 0);
8148 case TARGET_NR_chdir:
8149 if (!(p = lock_user_string(arg1)))
8151 ret = get_errno(chdir(p));
8152 unlock_user(p, arg1, 0);
8154 #ifdef TARGET_NR_time
8155 case TARGET_NR_time:
8158 ret = get_errno(time(&host_time));
8161 && put_user_sal(host_time, arg1))
8166 #ifdef TARGET_NR_mknod
8167 case TARGET_NR_mknod:
8168 if (!(p = lock_user_string(arg1)))
8170 ret = get_errno(mknod(p, arg2, arg3));
8171 unlock_user(p, arg1, 0);
8174 #if defined(TARGET_NR_mknodat)
8175 case TARGET_NR_mknodat:
8176 if (!(p = lock_user_string(arg2)))
8178 ret = get_errno(mknodat(arg1, p, arg3, arg4));
8179 unlock_user(p, arg2, 0);
8182 #ifdef TARGET_NR_chmod
8183 case TARGET_NR_chmod:
8184 if (!(p = lock_user_string(arg1)))
8186 ret = get_errno(chmod(p, arg2));
8187 unlock_user(p, arg1, 0);
8190 #ifdef TARGET_NR_break
8191 case TARGET_NR_break:
8194 #ifdef TARGET_NR_oldstat
8195 case TARGET_NR_oldstat:
8198 case TARGET_NR_lseek:
8199 ret = get_errno(lseek(arg1, arg2, arg3));
8201 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
8202 /* Alpha specific */
8203 case TARGET_NR_getxpid:
8204 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
8205 ret = get_errno(getpid());
8208 #ifdef TARGET_NR_getpid
8209 case TARGET_NR_getpid:
8210 ret = get_errno(getpid());
8213 case TARGET_NR_mount:
8215 /* need to look at the data field */
8219 p = lock_user_string(arg1);
8227 p2 = lock_user_string(arg2);
8230 unlock_user(p, arg1, 0);
8236 p3 = lock_user_string(arg3);
8239 unlock_user(p, arg1, 0);
8241 unlock_user(p2, arg2, 0);
8248 /* FIXME - arg5 should be locked, but it isn't clear how to
8249 * do that since it's not guaranteed to be a NULL-terminated
8253 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
8255 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5));
8257 ret = get_errno(ret);
8260 unlock_user(p, arg1, 0);
8262 unlock_user(p2, arg2, 0);
8264 unlock_user(p3, arg3, 0);
8268 #ifdef TARGET_NR_umount
8269 case TARGET_NR_umount:
8270 if (!(p = lock_user_string(arg1)))
8272 ret = get_errno(umount(p));
8273 unlock_user(p, arg1, 0);
8276 #ifdef TARGET_NR_stime /* not on alpha */
8277 case TARGET_NR_stime:
8280 if (get_user_sal(host_time, arg1))
8282 ret = get_errno(stime(&host_time));
8286 case TARGET_NR_ptrace:
8288 #ifdef TARGET_NR_alarm /* not on alpha */
8289 case TARGET_NR_alarm:
8293 #ifdef TARGET_NR_oldfstat
8294 case TARGET_NR_oldfstat:
8297 #ifdef TARGET_NR_pause /* not on alpha */
8298 case TARGET_NR_pause:
8299 if (!block_signals()) {
8300 sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
8302 ret = -TARGET_EINTR;
8305 #ifdef TARGET_NR_utime
8306 case TARGET_NR_utime:
8308 struct utimbuf tbuf, *host_tbuf;
8309 struct target_utimbuf *target_tbuf;
8311 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
8313 tbuf.actime = tswapal(target_tbuf->actime);
8314 tbuf.modtime = tswapal(target_tbuf->modtime);
8315 unlock_user_struct(target_tbuf, arg2, 0);
8320 if (!(p = lock_user_string(arg1)))
8322 ret = get_errno(utime(p, host_tbuf));
8323 unlock_user(p, arg1, 0);
8327 #ifdef TARGET_NR_utimes
8328 case TARGET_NR_utimes:
8330 struct timeval *tvp, tv[2];
8332 if (copy_from_user_timeval(&tv[0], arg2)
8333 || copy_from_user_timeval(&tv[1],
8334 arg2 + sizeof(struct target_timeval)))
8340 if (!(p = lock_user_string(arg1)))
8342 ret = get_errno(utimes(p, tvp));
8343 unlock_user(p, arg1, 0);
8347 #if defined(TARGET_NR_futimesat)
8348 case TARGET_NR_futimesat:
8350 struct timeval *tvp, tv[2];
8352 if (copy_from_user_timeval(&tv[0], arg3)
8353 || copy_from_user_timeval(&tv[1],
8354 arg3 + sizeof(struct target_timeval)))
8360 if (!(p = lock_user_string(arg2)))
8362 ret = get_errno(futimesat(arg1, path(p), tvp));
8363 unlock_user(p, arg2, 0);
8367 #ifdef TARGET_NR_stty
8368 case TARGET_NR_stty:
8371 #ifdef TARGET_NR_gtty
8372 case TARGET_NR_gtty:
8375 #ifdef TARGET_NR_access
8376 case TARGET_NR_access:
8377 if (!(p = lock_user_string(arg1)))
8379 ret = get_errno(access(path(p), arg2));
8380 unlock_user(p, arg1, 0);
8383 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
8384 case TARGET_NR_faccessat:
8385 if (!(p = lock_user_string(arg2)))
8387 ret = get_errno(faccessat(arg1, p, arg3, 0));
8388 unlock_user(p, arg2, 0);
8391 #ifdef TARGET_NR_nice /* not on alpha */
8392 case TARGET_NR_nice:
8393 ret = get_errno(nice(arg1));
8396 #ifdef TARGET_NR_ftime
8397 case TARGET_NR_ftime:
8400 case TARGET_NR_sync:
8404 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
8405 case TARGET_NR_syncfs:
8406 ret = get_errno(syncfs(arg1));
8409 case TARGET_NR_kill:
8410 ret = get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
8412 #ifdef TARGET_NR_rename
8413 case TARGET_NR_rename:
8416 p = lock_user_string(arg1);
8417 p2 = lock_user_string(arg2);
8419 ret = -TARGET_EFAULT;
8421 ret = get_errno(rename(p, p2));
8422 unlock_user(p2, arg2, 0);
8423 unlock_user(p, arg1, 0);
8427 #if defined(TARGET_NR_renameat)
8428 case TARGET_NR_renameat:
8431 p = lock_user_string(arg2);
8432 p2 = lock_user_string(arg4);
8434 ret = -TARGET_EFAULT;
8436 ret = get_errno(renameat(arg1, p, arg3, p2));
8437 unlock_user(p2, arg4, 0);
8438 unlock_user(p, arg2, 0);
8442 #if defined(TARGET_NR_renameat2)
8443 case TARGET_NR_renameat2:
8446 p = lock_user_string(arg2);
8447 p2 = lock_user_string(arg4);
8449 ret = -TARGET_EFAULT;
8451 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
8453 unlock_user(p2, arg4, 0);
8454 unlock_user(p, arg2, 0);
8458 #ifdef TARGET_NR_mkdir
8459 case TARGET_NR_mkdir:
8460 if (!(p = lock_user_string(arg1)))
8462 ret = get_errno(mkdir(p, arg2));
8463 unlock_user(p, arg1, 0);
8466 #if defined(TARGET_NR_mkdirat)
8467 case TARGET_NR_mkdirat:
8468 if (!(p = lock_user_string(arg2)))
8470 ret = get_errno(mkdirat(arg1, p, arg3));
8471 unlock_user(p, arg2, 0);
8474 #ifdef TARGET_NR_rmdir
8475 case TARGET_NR_rmdir:
8476 if (!(p = lock_user_string(arg1)))
8478 ret = get_errno(rmdir(p));
8479 unlock_user(p, arg1, 0);
8483 ret = get_errno(dup(arg1));
8485 fd_trans_dup(arg1, ret);
8488 #ifdef TARGET_NR_pipe
8489 case TARGET_NR_pipe:
8490 ret = do_pipe(cpu_env, arg1, 0, 0);
8493 #ifdef TARGET_NR_pipe2
8494 case TARGET_NR_pipe2:
8495 ret = do_pipe(cpu_env, arg1,
8496 target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
8499 case TARGET_NR_times:
8501 struct target_tms *tmsp;
8503 ret = get_errno(times(&tms));
8505 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
8508 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
8509 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
8510 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
8511 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
8514 ret = host_to_target_clock_t(ret);
8517 #ifdef TARGET_NR_prof
8518 case TARGET_NR_prof:
8521 #ifdef TARGET_NR_signal
8522 case TARGET_NR_signal:
8525 case TARGET_NR_acct:
8527 ret = get_errno(acct(NULL));
8529 if (!(p = lock_user_string(arg1)))
8531 ret = get_errno(acct(path(p)));
8532 unlock_user(p, arg1, 0);
8535 #ifdef TARGET_NR_umount2
8536 case TARGET_NR_umount2:
8537 if (!(p = lock_user_string(arg1)))
8539 ret = get_errno(umount2(p, arg2));
8540 unlock_user(p, arg1, 0);
8543 #ifdef TARGET_NR_lock
8544 case TARGET_NR_lock:
8547 case TARGET_NR_ioctl:
8548 ret = do_ioctl(arg1, arg2, arg3);
8550 case TARGET_NR_fcntl:
8551 ret = do_fcntl(arg1, arg2, arg3);
8553 #ifdef TARGET_NR_mpx
8557 case TARGET_NR_setpgid:
8558 ret = get_errno(setpgid(arg1, arg2));
8560 #ifdef TARGET_NR_ulimit
8561 case TARGET_NR_ulimit:
8564 #ifdef TARGET_NR_oldolduname
8565 case TARGET_NR_oldolduname:
8568 case TARGET_NR_umask:
8569 ret = get_errno(umask(arg1));
8571 case TARGET_NR_chroot:
8572 if (!(p = lock_user_string(arg1)))
8574 ret = get_errno(chroot(p));
8575 unlock_user(p, arg1, 0);
8577 #ifdef TARGET_NR_ustat
8578 case TARGET_NR_ustat:
8581 #ifdef TARGET_NR_dup2
8582 case TARGET_NR_dup2:
8583 ret = get_errno(dup2(arg1, arg2));
8585 fd_trans_dup(arg1, arg2);
8589 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
8590 case TARGET_NR_dup3:
8594 if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
8597 host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
8598 ret = get_errno(dup3(arg1, arg2, host_flags));
8600 fd_trans_dup(arg1, arg2);
8605 #ifdef TARGET_NR_getppid /* not on alpha */
8606 case TARGET_NR_getppid:
8607 ret = get_errno(getppid());
8610 #ifdef TARGET_NR_getpgrp
8611 case TARGET_NR_getpgrp:
8612 ret = get_errno(getpgrp());
8615 case TARGET_NR_setsid:
8616 ret = get_errno(setsid());
8618 #ifdef TARGET_NR_sigaction
8619 case TARGET_NR_sigaction:
8621 #if defined(TARGET_ALPHA)
8622 struct target_sigaction act, oact, *pact = 0;
8623 struct target_old_sigaction *old_act;
8625 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8627 act._sa_handler = old_act->_sa_handler;
8628 target_siginitset(&act.sa_mask, old_act->sa_mask);
8629 act.sa_flags = old_act->sa_flags;
8630 act.sa_restorer = 0;
8631 unlock_user_struct(old_act, arg2, 0);
8634 ret = get_errno(do_sigaction(arg1, pact, &oact));
8635 if (!is_error(ret) && arg3) {
8636 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8638 old_act->_sa_handler = oact._sa_handler;
8639 old_act->sa_mask = oact.sa_mask.sig[0];
8640 old_act->sa_flags = oact.sa_flags;
8641 unlock_user_struct(old_act, arg3, 1);
8643 #elif defined(TARGET_MIPS)
8644 struct target_sigaction act, oact, *pact, *old_act;
8647 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8649 act._sa_handler = old_act->_sa_handler;
8650 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
8651 act.sa_flags = old_act->sa_flags;
8652 unlock_user_struct(old_act, arg2, 0);
8658 ret = get_errno(do_sigaction(arg1, pact, &oact));
8660 if (!is_error(ret) && arg3) {
8661 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8663 old_act->_sa_handler = oact._sa_handler;
8664 old_act->sa_flags = oact.sa_flags;
8665 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
8666 old_act->sa_mask.sig[1] = 0;
8667 old_act->sa_mask.sig[2] = 0;
8668 old_act->sa_mask.sig[3] = 0;
8669 unlock_user_struct(old_act, arg3, 1);
8672 struct target_old_sigaction *old_act;
8673 struct target_sigaction act, oact, *pact;
8675 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8677 act._sa_handler = old_act->_sa_handler;
8678 target_siginitset(&act.sa_mask, old_act->sa_mask);
8679 act.sa_flags = old_act->sa_flags;
8680 act.sa_restorer = old_act->sa_restorer;
8681 unlock_user_struct(old_act, arg2, 0);
8686 ret = get_errno(do_sigaction(arg1, pact, &oact));
8687 if (!is_error(ret) && arg3) {
8688 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8690 old_act->_sa_handler = oact._sa_handler;
8691 old_act->sa_mask = oact.sa_mask.sig[0];
8692 old_act->sa_flags = oact.sa_flags;
8693 old_act->sa_restorer = oact.sa_restorer;
8694 unlock_user_struct(old_act, arg3, 1);
8700 case TARGET_NR_rt_sigaction:
8702 #if defined(TARGET_ALPHA)
8703 /* For Alpha and SPARC this is a 5 argument syscall, with
8704 * a 'restorer' parameter which must be copied into the
8705 * sa_restorer field of the sigaction struct.
8706 * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
8707 * and arg5 is the sigsetsize.
8708 * Alpha also has a separate rt_sigaction struct that it uses
8709 * here; SPARC uses the usual sigaction struct.
8711 struct target_rt_sigaction *rt_act;
8712 struct target_sigaction act, oact, *pact = 0;
8714 if (arg4 != sizeof(target_sigset_t)) {
8715 ret = -TARGET_EINVAL;
8719 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
8721 act._sa_handler = rt_act->_sa_handler;
8722 act.sa_mask = rt_act->sa_mask;
8723 act.sa_flags = rt_act->sa_flags;
8724 act.sa_restorer = arg5;
8725 unlock_user_struct(rt_act, arg2, 0);
8728 ret = get_errno(do_sigaction(arg1, pact, &oact));
8729 if (!is_error(ret) && arg3) {
8730 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
8732 rt_act->_sa_handler = oact._sa_handler;
8733 rt_act->sa_mask = oact.sa_mask;
8734 rt_act->sa_flags = oact.sa_flags;
8735 unlock_user_struct(rt_act, arg3, 1);
8739 target_ulong restorer = arg4;
8740 target_ulong sigsetsize = arg5;
8742 target_ulong sigsetsize = arg4;
8744 struct target_sigaction *act;
8745 struct target_sigaction *oact;
8747 if (sigsetsize != sizeof(target_sigset_t)) {
8748 ret = -TARGET_EINVAL;
8752 if (!lock_user_struct(VERIFY_READ, act, arg2, 1)) {
8756 act->sa_restorer = restorer;
8762 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
8763 ret = -TARGET_EFAULT;
8764 goto rt_sigaction_fail;
8768 ret = get_errno(do_sigaction(arg1, act, oact));
8771 unlock_user_struct(act, arg2, 0);
8773 unlock_user_struct(oact, arg3, 1);
8777 #ifdef TARGET_NR_sgetmask /* not on alpha */
8778 case TARGET_NR_sgetmask:
8781 abi_ulong target_set;
8782 ret = do_sigprocmask(0, NULL, &cur_set);
8784 host_to_target_old_sigset(&target_set, &cur_set);
8790 #ifdef TARGET_NR_ssetmask /* not on alpha */
8791 case TARGET_NR_ssetmask:
8794 abi_ulong target_set = arg1;
8795 target_to_host_old_sigset(&set, &target_set);
8796 ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
8798 host_to_target_old_sigset(&target_set, &oset);
8804 #ifdef TARGET_NR_sigprocmask
8805 case TARGET_NR_sigprocmask:
8807 #if defined(TARGET_ALPHA)
8808 sigset_t set, oldset;
8813 case TARGET_SIG_BLOCK:
8816 case TARGET_SIG_UNBLOCK:
8819 case TARGET_SIG_SETMASK:
8823 ret = -TARGET_EINVAL;
8827 target_to_host_old_sigset(&set, &mask);
8829 ret = do_sigprocmask(how, &set, &oldset);
8830 if (!is_error(ret)) {
8831 host_to_target_old_sigset(&mask, &oldset);
8833 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
8836 sigset_t set, oldset, *set_ptr;
8841 case TARGET_SIG_BLOCK:
8844 case TARGET_SIG_UNBLOCK:
8847 case TARGET_SIG_SETMASK:
8851 ret = -TARGET_EINVAL;
8854 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
8856 target_to_host_old_sigset(&set, p);
8857 unlock_user(p, arg2, 0);
8863 ret = do_sigprocmask(how, set_ptr, &oldset);
8864 if (!is_error(ret) && arg3) {
8865 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
8867 host_to_target_old_sigset(p, &oldset);
8868 unlock_user(p, arg3, sizeof(target_sigset_t));
8874 case TARGET_NR_rt_sigprocmask:
8877 sigset_t set, oldset, *set_ptr;
8879 if (arg4 != sizeof(target_sigset_t)) {
8880 ret = -TARGET_EINVAL;
8886 case TARGET_SIG_BLOCK:
8889 case TARGET_SIG_UNBLOCK:
8892 case TARGET_SIG_SETMASK:
8896 ret = -TARGET_EINVAL;
8899 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
8901 target_to_host_sigset(&set, p);
8902 unlock_user(p, arg2, 0);
8908 ret = do_sigprocmask(how, set_ptr, &oldset);
8909 if (!is_error(ret) && arg3) {
8910 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
8912 host_to_target_sigset(p, &oldset);
8913 unlock_user(p, arg3, sizeof(target_sigset_t));
8917 #ifdef TARGET_NR_sigpending
8918 case TARGET_NR_sigpending:
8921 ret = get_errno(sigpending(&set));
8922 if (!is_error(ret)) {
8923 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8925 host_to_target_old_sigset(p, &set);
8926 unlock_user(p, arg1, sizeof(target_sigset_t));
8931 case TARGET_NR_rt_sigpending:
8935 /* Yes, this check is >, not != like most. We follow the kernel's
8936 * logic and it does it like this because it implements
8937 * NR_sigpending through the same code path, and in that case
8938 * the old_sigset_t is smaller in size.
8940 if (arg2 > sizeof(target_sigset_t)) {
8941 ret = -TARGET_EINVAL;
8945 ret = get_errno(sigpending(&set));
8946 if (!is_error(ret)) {
8947 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8949 host_to_target_sigset(p, &set);
8950 unlock_user(p, arg1, sizeof(target_sigset_t));
8954 #ifdef TARGET_NR_sigsuspend
8955 case TARGET_NR_sigsuspend:
8957 TaskState *ts = cpu->opaque;
8958 #if defined(TARGET_ALPHA)
8959 abi_ulong mask = arg1;
8960 target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
8962 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8964 target_to_host_old_sigset(&ts->sigsuspend_mask, p);
8965 unlock_user(p, arg1, 0);
8967 ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8969 if (ret != -TARGET_ERESTARTSYS) {
8970 ts->in_sigsuspend = 1;
8975 case TARGET_NR_rt_sigsuspend:
8977 TaskState *ts = cpu->opaque;
8979 if (arg2 != sizeof(target_sigset_t)) {
8980 ret = -TARGET_EINVAL;
8983 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8985 target_to_host_sigset(&ts->sigsuspend_mask, p);
8986 unlock_user(p, arg1, 0);
8987 ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8989 if (ret != -TARGET_ERESTARTSYS) {
8990 ts->in_sigsuspend = 1;
8994 case TARGET_NR_rt_sigtimedwait:
8997 struct timespec uts, *puts;
9000 if (arg4 != sizeof(target_sigset_t)) {
9001 ret = -TARGET_EINVAL;
9005 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9007 target_to_host_sigset(&set, p);
9008 unlock_user(p, arg1, 0);
9011 target_to_host_timespec(puts, arg3);
9015 ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9017 if (!is_error(ret)) {
9019 p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
9024 host_to_target_siginfo(p, &uinfo);
9025 unlock_user(p, arg2, sizeof(target_siginfo_t));
9027 ret = host_to_target_signal(ret);
9031 case TARGET_NR_rt_sigqueueinfo:
9035 p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
9039 target_to_host_siginfo(&uinfo, p);
9040 unlock_user(p, arg3, 0);
9041 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
9044 case TARGET_NR_rt_tgsigqueueinfo:
9048 p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
9052 target_to_host_siginfo(&uinfo, p);
9053 unlock_user(p, arg4, 0);
9054 ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, arg3, &uinfo));
9057 #ifdef TARGET_NR_sigreturn
9058 case TARGET_NR_sigreturn:
9059 if (block_signals()) {
9060 ret = -TARGET_ERESTARTSYS;
9062 ret = do_sigreturn(cpu_env);
9066 case TARGET_NR_rt_sigreturn:
9067 if (block_signals()) {
9068 ret = -TARGET_ERESTARTSYS;
9070 ret = do_rt_sigreturn(cpu_env);
9073 case TARGET_NR_sethostname:
9074 if (!(p = lock_user_string(arg1)))
9076 ret = get_errno(sethostname(p, arg2));
9077 unlock_user(p, arg1, 0);
9079 case TARGET_NR_setrlimit:
9081 int resource = target_to_host_resource(arg1);
9082 struct target_rlimit *target_rlim;
9084 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
9086 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
9087 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
9088 unlock_user_struct(target_rlim, arg2, 0);
9089 ret = get_errno(setrlimit(resource, &rlim));
9092 case TARGET_NR_getrlimit:
9094 int resource = target_to_host_resource(arg1);
9095 struct target_rlimit *target_rlim;
9098 ret = get_errno(getrlimit(resource, &rlim));
9099 if (!is_error(ret)) {
9100 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
9102 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
9103 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
9104 unlock_user_struct(target_rlim, arg2, 1);
9108 case TARGET_NR_getrusage:
9110 struct rusage rusage;
9111 ret = get_errno(getrusage(arg1, &rusage));
9112 if (!is_error(ret)) {
9113 ret = host_to_target_rusage(arg2, &rusage);
9117 case TARGET_NR_gettimeofday:
9120 ret = get_errno(gettimeofday(&tv, NULL));
9121 if (!is_error(ret)) {
9122 if (copy_to_user_timeval(arg1, &tv))
9127 case TARGET_NR_settimeofday:
9129 struct timeval tv, *ptv = NULL;
9130 struct timezone tz, *ptz = NULL;
9133 if (copy_from_user_timeval(&tv, arg1)) {
9140 if (copy_from_user_timezone(&tz, arg2)) {
9146 ret = get_errno(settimeofday(ptv, ptz));
9149 #if defined(TARGET_NR_select)
9150 case TARGET_NR_select:
9151 #if defined(TARGET_WANT_NI_OLD_SELECT)
9152 /* some architectures used to have old_select here
9153 * but now ENOSYS it.
9155 ret = -TARGET_ENOSYS;
9156 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
9157 ret = do_old_select(arg1);
9159 ret = do_select(arg1, arg2, arg3, arg4, arg5);
9163 #ifdef TARGET_NR_pselect6
9164 case TARGET_NR_pselect6:
9166 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
9167 fd_set rfds, wfds, efds;
9168 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
9169 struct timespec ts, *ts_ptr;
9172 * The 6th arg is actually two args smashed together,
9173 * so we cannot use the C library.
9181 abi_ulong arg_sigset, arg_sigsize, *arg7;
9182 target_sigset_t *target_sigset;
9190 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
9194 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
9198 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
9204 * This takes a timespec, and not a timeval, so we cannot
9205 * use the do_select() helper ...
9208 if (target_to_host_timespec(&ts, ts_addr)) {
9216 /* Extract the two packed args for the sigset */
9219 sig.size = SIGSET_T_SIZE;
9221 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
9225 arg_sigset = tswapal(arg7[0]);
9226 arg_sigsize = tswapal(arg7[1]);
9227 unlock_user(arg7, arg6, 0);
9231 if (arg_sigsize != sizeof(*target_sigset)) {
9232 /* Like the kernel, we enforce correct size sigsets */
9233 ret = -TARGET_EINVAL;
9236 target_sigset = lock_user(VERIFY_READ, arg_sigset,
9237 sizeof(*target_sigset), 1);
9238 if (!target_sigset) {
9241 target_to_host_sigset(&set, target_sigset);
9242 unlock_user(target_sigset, arg_sigset, 0);
9250 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
9253 if (!is_error(ret)) {
9254 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
9256 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
9258 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
9261 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
9267 #ifdef TARGET_NR_symlink
9268 case TARGET_NR_symlink:
9271 p = lock_user_string(arg1);
9272 p2 = lock_user_string(arg2);
9274 ret = -TARGET_EFAULT;
9276 ret = get_errno(symlink(p, p2));
9277 unlock_user(p2, arg2, 0);
9278 unlock_user(p, arg1, 0);
9282 #if defined(TARGET_NR_symlinkat)
9283 case TARGET_NR_symlinkat:
9286 p = lock_user_string(arg1);
9287 p2 = lock_user_string(arg3);
9289 ret = -TARGET_EFAULT;
9291 ret = get_errno(symlinkat(p, arg2, p2));
9292 unlock_user(p2, arg3, 0);
9293 unlock_user(p, arg1, 0);
9297 #ifdef TARGET_NR_oldlstat
9298 case TARGET_NR_oldlstat:
9301 #ifdef TARGET_NR_readlink
9302 case TARGET_NR_readlink:
9305 p = lock_user_string(arg1);
9306 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9308 ret = -TARGET_EFAULT;
9310 /* Short circuit this for the magic exe check. */
9311 ret = -TARGET_EINVAL;
9312 } else if (is_proc_myself((const char *)p, "exe")) {
9313 char real[PATH_MAX], *temp;
9314 temp = realpath(exec_path, real);
9315 /* Return value is # of bytes that we wrote to the buffer. */
9317 ret = get_errno(-1);
9319 /* Don't worry about sign mismatch as earlier mapping
9320 * logic would have thrown a bad address error. */
9321 ret = MIN(strlen(real), arg3);
9322 /* We cannot NUL terminate the string. */
9323 memcpy(p2, real, ret);
9326 ret = get_errno(readlink(path(p), p2, arg3));
9328 unlock_user(p2, arg2, ret);
9329 unlock_user(p, arg1, 0);
9333 #if defined(TARGET_NR_readlinkat)
9334 case TARGET_NR_readlinkat:
9337 p = lock_user_string(arg2);
9338 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
9340 ret = -TARGET_EFAULT;
9341 } else if (is_proc_myself((const char *)p, "exe")) {
9342 char real[PATH_MAX], *temp;
9343 temp = realpath(exec_path, real);
9344 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
9345 snprintf((char *)p2, arg4, "%s", real);
9347 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
9349 unlock_user(p2, arg3, ret);
9350 unlock_user(p, arg2, 0);
9354 #ifdef TARGET_NR_uselib
9355 case TARGET_NR_uselib:
9358 #ifdef TARGET_NR_swapon
9359 case TARGET_NR_swapon:
9360 if (!(p = lock_user_string(arg1)))
9362 ret = get_errno(swapon(p, arg2));
9363 unlock_user(p, arg1, 0);
9366 case TARGET_NR_reboot:
9367 if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
9368 /* arg4 must be ignored in all other cases */
9369 p = lock_user_string(arg4);
9373 ret = get_errno(reboot(arg1, arg2, arg3, p));
9374 unlock_user(p, arg4, 0);
9376 ret = get_errno(reboot(arg1, arg2, arg3, NULL));
9379 #ifdef TARGET_NR_readdir
9380 case TARGET_NR_readdir:
9383 #ifdef TARGET_NR_mmap
9384 case TARGET_NR_mmap:
9385 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
9386 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
9387 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
9388 || defined(TARGET_S390X)
9391 abi_ulong v1, v2, v3, v4, v5, v6;
9392 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
9400 unlock_user(v, arg1, 0);
9401 ret = get_errno(target_mmap(v1, v2, v3,
9402 target_to_host_bitmask(v4, mmap_flags_tbl),
9406 ret = get_errno(target_mmap(arg1, arg2, arg3,
9407 target_to_host_bitmask(arg4, mmap_flags_tbl),
9413 #ifdef TARGET_NR_mmap2
9414 case TARGET_NR_mmap2:
9416 #define MMAP_SHIFT 12
9418 ret = get_errno(target_mmap(arg1, arg2, arg3,
9419 target_to_host_bitmask(arg4, mmap_flags_tbl),
9421 arg6 << MMAP_SHIFT));
9424 case TARGET_NR_munmap:
9425 ret = get_errno(target_munmap(arg1, arg2));
9427 case TARGET_NR_mprotect:
9429 TaskState *ts = cpu->opaque;
9430 /* Special hack to detect libc making the stack executable. */
9431 if ((arg3 & PROT_GROWSDOWN)
9432 && arg1 >= ts->info->stack_limit
9433 && arg1 <= ts->info->start_stack) {
9434 arg3 &= ~PROT_GROWSDOWN;
9435 arg2 = arg2 + arg1 - ts->info->stack_limit;
9436 arg1 = ts->info->stack_limit;
9439 ret = get_errno(target_mprotect(arg1, arg2, arg3));
9441 #ifdef TARGET_NR_mremap
9442 case TARGET_NR_mremap:
9443 ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
9446 /* ??? msync/mlock/munlock are broken for softmmu. */
9447 #ifdef TARGET_NR_msync
9448 case TARGET_NR_msync:
9449 ret = get_errno(msync(g2h(arg1), arg2, arg3));
9452 #ifdef TARGET_NR_mlock
9453 case TARGET_NR_mlock:
9454 ret = get_errno(mlock(g2h(arg1), arg2));
9457 #ifdef TARGET_NR_munlock
9458 case TARGET_NR_munlock:
9459 ret = get_errno(munlock(g2h(arg1), arg2));
9462 #ifdef TARGET_NR_mlockall
9463 case TARGET_NR_mlockall:
9464 ret = get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
9467 #ifdef TARGET_NR_munlockall
9468 case TARGET_NR_munlockall:
9469 ret = get_errno(munlockall());
9472 case TARGET_NR_truncate:
9473 if (!(p = lock_user_string(arg1)))
9475 ret = get_errno(truncate(p, arg2));
9476 unlock_user(p, arg1, 0);
9478 case TARGET_NR_ftruncate:
9479 ret = get_errno(ftruncate(arg1, arg2));
9481 case TARGET_NR_fchmod:
9482 ret = get_errno(fchmod(arg1, arg2));
9484 #if defined(TARGET_NR_fchmodat)
9485 case TARGET_NR_fchmodat:
9486 if (!(p = lock_user_string(arg2)))
9488 ret = get_errno(fchmodat(arg1, p, arg3, 0));
9489 unlock_user(p, arg2, 0);
9492 case TARGET_NR_getpriority:
9493 /* Note that negative values are valid for getpriority, so we must
9494 differentiate based on errno settings. */
9496 ret = getpriority(arg1, arg2);
9497 if (ret == -1 && errno != 0) {
9498 ret = -host_to_target_errno(errno);
9502 /* Return value is the unbiased priority. Signal no error. */
9503 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
9505 /* Return value is a biased priority to avoid negative numbers. */
9509 case TARGET_NR_setpriority:
9510 ret = get_errno(setpriority(arg1, arg2, arg3));
9512 #ifdef TARGET_NR_profil
9513 case TARGET_NR_profil:
9516 case TARGET_NR_statfs:
9517 if (!(p = lock_user_string(arg1)))
9519 ret = get_errno(statfs(path(p), &stfs));
9520 unlock_user(p, arg1, 0);
9522 if (!is_error(ret)) {
9523 struct target_statfs *target_stfs;
9525 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
9527 __put_user(stfs.f_type, &target_stfs->f_type);
9528 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9529 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9530 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9531 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9532 __put_user(stfs.f_files, &target_stfs->f_files);
9533 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9534 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9535 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9536 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9537 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9538 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9539 unlock_user_struct(target_stfs, arg2, 1);
9542 case TARGET_NR_fstatfs:
9543 ret = get_errno(fstatfs(arg1, &stfs));
9544 goto convert_statfs;
9545 #ifdef TARGET_NR_statfs64
9546 case TARGET_NR_statfs64:
9547 if (!(p = lock_user_string(arg1)))
9549 ret = get_errno(statfs(path(p), &stfs));
9550 unlock_user(p, arg1, 0);
9552 if (!is_error(ret)) {
9553 struct target_statfs64 *target_stfs;
9555 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
9557 __put_user(stfs.f_type, &target_stfs->f_type);
9558 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9559 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9560 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9561 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9562 __put_user(stfs.f_files, &target_stfs->f_files);
9563 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9564 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9565 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9566 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9567 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9568 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9569 unlock_user_struct(target_stfs, arg3, 1);
9572 case TARGET_NR_fstatfs64:
9573 ret = get_errno(fstatfs(arg1, &stfs));
9574 goto convert_statfs64;
9576 #ifdef TARGET_NR_ioperm
9577 case TARGET_NR_ioperm:
9580 #ifdef TARGET_NR_socketcall
9581 case TARGET_NR_socketcall:
9582 ret = do_socketcall(arg1, arg2);
9585 #ifdef TARGET_NR_accept
9586 case TARGET_NR_accept:
9587 ret = do_accept4(arg1, arg2, arg3, 0);
9590 #ifdef TARGET_NR_accept4
9591 case TARGET_NR_accept4:
9592 ret = do_accept4(arg1, arg2, arg3, arg4);
9595 #ifdef TARGET_NR_bind
9596 case TARGET_NR_bind:
9597 ret = do_bind(arg1, arg2, arg3);
9600 #ifdef TARGET_NR_connect
9601 case TARGET_NR_connect:
9602 ret = do_connect(arg1, arg2, arg3);
9605 #ifdef TARGET_NR_getpeername
9606 case TARGET_NR_getpeername:
9607 ret = do_getpeername(arg1, arg2, arg3);
9610 #ifdef TARGET_NR_getsockname
9611 case TARGET_NR_getsockname:
9612 ret = do_getsockname(arg1, arg2, arg3);
9615 #ifdef TARGET_NR_getsockopt
9616 case TARGET_NR_getsockopt:
9617 ret = do_getsockopt(arg1, arg2, arg3, arg4, arg5);
9620 #ifdef TARGET_NR_listen
9621 case TARGET_NR_listen:
9622 ret = get_errno(listen(arg1, arg2));
9625 #ifdef TARGET_NR_recv
9626 case TARGET_NR_recv:
9627 ret = do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
9630 #ifdef TARGET_NR_recvfrom
9631 case TARGET_NR_recvfrom:
9632 ret = do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
9635 #ifdef TARGET_NR_recvmsg
9636 case TARGET_NR_recvmsg:
9637 ret = do_sendrecvmsg(arg1, arg2, arg3, 0);
9640 #ifdef TARGET_NR_send
9641 case TARGET_NR_send:
9642 ret = do_sendto(arg1, arg2, arg3, arg4, 0, 0);
9645 #ifdef TARGET_NR_sendmsg
9646 case TARGET_NR_sendmsg:
9647 ret = do_sendrecvmsg(arg1, arg2, arg3, 1);
9650 #ifdef TARGET_NR_sendmmsg
9651 case TARGET_NR_sendmmsg:
9652 ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
9654 case TARGET_NR_recvmmsg:
9655 ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
9658 #ifdef TARGET_NR_sendto
9659 case TARGET_NR_sendto:
9660 ret = do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
9663 #ifdef TARGET_NR_shutdown
9664 case TARGET_NR_shutdown:
9665 ret = get_errno(shutdown(arg1, arg2));
9668 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
9669 case TARGET_NR_getrandom:
9670 p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
9674 ret = get_errno(getrandom(p, arg2, arg3));
9675 unlock_user(p, arg1, ret);
9678 #ifdef TARGET_NR_socket
9679 case TARGET_NR_socket:
9680 ret = do_socket(arg1, arg2, arg3);
9683 #ifdef TARGET_NR_socketpair
9684 case TARGET_NR_socketpair:
9685 ret = do_socketpair(arg1, arg2, arg3, arg4);
9688 #ifdef TARGET_NR_setsockopt
9689 case TARGET_NR_setsockopt:
9690 ret = do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
9693 #if defined(TARGET_NR_syslog)
9694 case TARGET_NR_syslog:
9699 case TARGET_SYSLOG_ACTION_CLOSE: /* Close log */
9700 case TARGET_SYSLOG_ACTION_OPEN: /* Open log */
9701 case TARGET_SYSLOG_ACTION_CLEAR: /* Clear ring buffer */
9702 case TARGET_SYSLOG_ACTION_CONSOLE_OFF: /* Disable logging */
9703 case TARGET_SYSLOG_ACTION_CONSOLE_ON: /* Enable logging */
9704 case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
9705 case TARGET_SYSLOG_ACTION_SIZE_UNREAD: /* Number of chars */
9706 case TARGET_SYSLOG_ACTION_SIZE_BUFFER: /* Size of the buffer */
9708 ret = get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
9711 case TARGET_SYSLOG_ACTION_READ: /* Read from log */
9712 case TARGET_SYSLOG_ACTION_READ_CLEAR: /* Read/clear msgs */
9713 case TARGET_SYSLOG_ACTION_READ_ALL: /* Read last messages */
9715 ret = -TARGET_EINVAL;
9723 p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9725 ret = -TARGET_EFAULT;
9728 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
9729 unlock_user(p, arg2, arg3);
9739 case TARGET_NR_setitimer:
9741 struct itimerval value, ovalue, *pvalue;
9745 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
9746 || copy_from_user_timeval(&pvalue->it_value,
9747 arg2 + sizeof(struct target_timeval)))
9752 ret = get_errno(setitimer(arg1, pvalue, &ovalue));
9753 if (!is_error(ret) && arg3) {
9754 if (copy_to_user_timeval(arg3,
9755 &ovalue.it_interval)
9756 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
9762 case TARGET_NR_getitimer:
9764 struct itimerval value;
9766 ret = get_errno(getitimer(arg1, &value));
9767 if (!is_error(ret) && arg2) {
9768 if (copy_to_user_timeval(arg2,
9770 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
9776 #ifdef TARGET_NR_stat
9777 case TARGET_NR_stat:
9778 if (!(p = lock_user_string(arg1)))
9780 ret = get_errno(stat(path(p), &st));
9781 unlock_user(p, arg1, 0);
9784 #ifdef TARGET_NR_lstat
9785 case TARGET_NR_lstat:
9786 if (!(p = lock_user_string(arg1)))
9788 ret = get_errno(lstat(path(p), &st));
9789 unlock_user(p, arg1, 0);
9792 case TARGET_NR_fstat:
9794 ret = get_errno(fstat(arg1, &st));
9795 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
9798 if (!is_error(ret)) {
9799 struct target_stat *target_st;
9801 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
9803 memset(target_st, 0, sizeof(*target_st));
9804 __put_user(st.st_dev, &target_st->st_dev);
9805 __put_user(st.st_ino, &target_st->st_ino);
9806 __put_user(st.st_mode, &target_st->st_mode);
9807 __put_user(st.st_uid, &target_st->st_uid);
9808 __put_user(st.st_gid, &target_st->st_gid);
9809 __put_user(st.st_nlink, &target_st->st_nlink);
9810 __put_user(st.st_rdev, &target_st->st_rdev);
9811 __put_user(st.st_size, &target_st->st_size);
9812 __put_user(st.st_blksize, &target_st->st_blksize);
9813 __put_user(st.st_blocks, &target_st->st_blocks);
9814 __put_user(st.st_atime, &target_st->target_st_atime);
9815 __put_user(st.st_mtime, &target_st->target_st_mtime);
9816 __put_user(st.st_ctime, &target_st->target_st_ctime);
9817 unlock_user_struct(target_st, arg2, 1);
9821 #ifdef TARGET_NR_olduname
9822 case TARGET_NR_olduname:
9825 #ifdef TARGET_NR_iopl
9826 case TARGET_NR_iopl:
9829 case TARGET_NR_vhangup:
9830 ret = get_errno(vhangup());
9832 #ifdef TARGET_NR_idle
9833 case TARGET_NR_idle:
9836 #ifdef TARGET_NR_syscall
9837 case TARGET_NR_syscall:
9838 ret = do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
9839 arg6, arg7, arg8, 0);
9842 case TARGET_NR_wait4:
9845 abi_long status_ptr = arg2;
9846 struct rusage rusage, *rusage_ptr;
9847 abi_ulong target_rusage = arg4;
9848 abi_long rusage_err;
9850 rusage_ptr = &rusage;
9853 ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
9854 if (!is_error(ret)) {
9855 if (status_ptr && ret) {
9856 status = host_to_target_waitstatus(status);
9857 if (put_user_s32(status, status_ptr))
9860 if (target_rusage) {
9861 rusage_err = host_to_target_rusage(target_rusage, &rusage);
9869 #ifdef TARGET_NR_swapoff
9870 case TARGET_NR_swapoff:
9871 if (!(p = lock_user_string(arg1)))
9873 ret = get_errno(swapoff(p));
9874 unlock_user(p, arg1, 0);
9877 case TARGET_NR_sysinfo:
9879 struct target_sysinfo *target_value;
9880 struct sysinfo value;
9881 ret = get_errno(sysinfo(&value));
9882 if (!is_error(ret) && arg1)
9884 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
9886 __put_user(value.uptime, &target_value->uptime);
9887 __put_user(value.loads[0], &target_value->loads[0]);
9888 __put_user(value.loads[1], &target_value->loads[1]);
9889 __put_user(value.loads[2], &target_value->loads[2]);
9890 __put_user(value.totalram, &target_value->totalram);
9891 __put_user(value.freeram, &target_value->freeram);
9892 __put_user(value.sharedram, &target_value->sharedram);
9893 __put_user(value.bufferram, &target_value->bufferram);
9894 __put_user(value.totalswap, &target_value->totalswap);
9895 __put_user(value.freeswap, &target_value->freeswap);
9896 __put_user(value.procs, &target_value->procs);
9897 __put_user(value.totalhigh, &target_value->totalhigh);
9898 __put_user(value.freehigh, &target_value->freehigh);
9899 __put_user(value.mem_unit, &target_value->mem_unit);
9900 unlock_user_struct(target_value, arg1, 1);
9904 #ifdef TARGET_NR_ipc
9906 ret = do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
9909 #ifdef TARGET_NR_semget
9910 case TARGET_NR_semget:
9911 ret = get_errno(semget(arg1, arg2, arg3));
9914 #ifdef TARGET_NR_semop
9915 case TARGET_NR_semop:
9916 ret = do_semop(arg1, arg2, arg3);
9919 #ifdef TARGET_NR_semctl
9920 case TARGET_NR_semctl:
9921 ret = do_semctl(arg1, arg2, arg3, arg4);
9924 #ifdef TARGET_NR_msgctl
9925 case TARGET_NR_msgctl:
9926 ret = do_msgctl(arg1, arg2, arg3);
9929 #ifdef TARGET_NR_msgget
9930 case TARGET_NR_msgget:
9931 ret = get_errno(msgget(arg1, arg2));
9934 #ifdef TARGET_NR_msgrcv
9935 case TARGET_NR_msgrcv:
9936 ret = do_msgrcv(arg1, arg2, arg3, arg4, arg5);
9939 #ifdef TARGET_NR_msgsnd
9940 case TARGET_NR_msgsnd:
9941 ret = do_msgsnd(arg1, arg2, arg3, arg4);
9944 #ifdef TARGET_NR_shmget
9945 case TARGET_NR_shmget:
9946 ret = get_errno(shmget(arg1, arg2, arg3));
9949 #ifdef TARGET_NR_shmctl
9950 case TARGET_NR_shmctl:
9951 ret = do_shmctl(arg1, arg2, arg3);
9954 #ifdef TARGET_NR_shmat
9955 case TARGET_NR_shmat:
9956 ret = do_shmat(cpu_env, arg1, arg2, arg3);
9959 #ifdef TARGET_NR_shmdt
9960 case TARGET_NR_shmdt:
9961 ret = do_shmdt(arg1);
9964 case TARGET_NR_fsync:
9965 ret = get_errno(fsync(arg1));
9967 case TARGET_NR_clone:
9968 /* Linux manages to have three different orderings for its
9969 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
9970 * match the kernel's CONFIG_CLONE_* settings.
9971 * Microblaze is further special in that it uses a sixth
9972 * implicit argument to clone for the TLS pointer.
9974 #if defined(TARGET_MICROBLAZE)
9975 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
9976 #elif defined(TARGET_CLONE_BACKWARDS)
9977 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
9978 #elif defined(TARGET_CLONE_BACKWARDS2)
9979 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
9981 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
9984 #ifdef __NR_exit_group
9985 /* new thread calls */
9986 case TARGET_NR_exit_group:
9990 gdb_exit(cpu_env, arg1);
9991 ret = get_errno(exit_group(arg1));
9994 case TARGET_NR_setdomainname:
9995 if (!(p = lock_user_string(arg1)))
9997 ret = get_errno(setdomainname(p, arg2));
9998 unlock_user(p, arg1, 0);
10000 case TARGET_NR_uname:
10001 /* no need to transcode because we use the linux syscall */
10003 struct new_utsname * buf;
10005 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
10007 ret = get_errno(sys_uname(buf));
10008 if (!is_error(ret)) {
10009 /* Overwrite the native machine name with whatever is being
10011 strcpy (buf->machine, cpu_to_uname_machine(cpu_env));
10012 /* Allow the user to override the reported release. */
10013 if (qemu_uname_release && *qemu_uname_release) {
10014 g_strlcpy(buf->release, qemu_uname_release,
10015 sizeof(buf->release));
10018 unlock_user_struct(buf, arg1, 1);
10022 case TARGET_NR_modify_ldt:
10023 ret = do_modify_ldt(cpu_env, arg1, arg2, arg3);
10025 #if !defined(TARGET_X86_64)
10026 case TARGET_NR_vm86old:
10027 goto unimplemented;
10028 case TARGET_NR_vm86:
10029 ret = do_vm86(cpu_env, arg1, arg2);
10033 case TARGET_NR_adjtimex:
10035 struct timex host_buf;
10037 if (target_to_host_timex(&host_buf, arg1) != 0) {
10040 ret = get_errno(adjtimex(&host_buf));
10041 if (!is_error(ret)) {
10042 if (host_to_target_timex(arg1, &host_buf) != 0) {
10048 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
10049 case TARGET_NR_clock_adjtime:
10051 struct timex htx, *phtx = &htx;
10053 if (target_to_host_timex(phtx, arg2) != 0) {
10056 ret = get_errno(clock_adjtime(arg1, phtx));
10057 if (!is_error(ret) && phtx) {
10058 if (host_to_target_timex(arg2, phtx) != 0) {
10065 #ifdef TARGET_NR_create_module
10066 case TARGET_NR_create_module:
10068 case TARGET_NR_init_module:
10069 case TARGET_NR_delete_module:
10070 #ifdef TARGET_NR_get_kernel_syms
10071 case TARGET_NR_get_kernel_syms:
10073 goto unimplemented;
10074 case TARGET_NR_quotactl:
10075 goto unimplemented;
10076 case TARGET_NR_getpgid:
10077 ret = get_errno(getpgid(arg1));
10079 case TARGET_NR_fchdir:
10080 ret = get_errno(fchdir(arg1));
10082 #ifdef TARGET_NR_bdflush /* not on x86_64 */
10083 case TARGET_NR_bdflush:
10084 goto unimplemented;
10086 #ifdef TARGET_NR_sysfs
10087 case TARGET_NR_sysfs:
10088 goto unimplemented;
10090 case TARGET_NR_personality:
10091 ret = get_errno(personality(arg1));
10093 #ifdef TARGET_NR_afs_syscall
10094 case TARGET_NR_afs_syscall:
10095 goto unimplemented;
10097 #ifdef TARGET_NR__llseek /* Not on alpha */
10098 case TARGET_NR__llseek:
10101 #if !defined(__NR_llseek)
10102 res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
10104 ret = get_errno(res);
10109 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
10111 if ((ret == 0) && put_user_s64(res, arg4)) {
10117 #ifdef TARGET_NR_getdents
10118 case TARGET_NR_getdents:
10119 #ifdef __NR_getdents
10120 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
10122 struct target_dirent *target_dirp;
10123 struct linux_dirent *dirp;
10124 abi_long count = arg3;
10126 dirp = g_try_malloc(count);
10128 ret = -TARGET_ENOMEM;
10132 ret = get_errno(sys_getdents(arg1, dirp, count));
10133 if (!is_error(ret)) {
10134 struct linux_dirent *de;
10135 struct target_dirent *tde;
10137 int reclen, treclen;
10138 int count1, tnamelen;
10142 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10146 reclen = de->d_reclen;
10147 tnamelen = reclen - offsetof(struct linux_dirent, d_name);
10148 assert(tnamelen >= 0);
10149 treclen = tnamelen + offsetof(struct target_dirent, d_name);
10150 assert(count1 + treclen <= count);
10151 tde->d_reclen = tswap16(treclen);
10152 tde->d_ino = tswapal(de->d_ino);
10153 tde->d_off = tswapal(de->d_off);
10154 memcpy(tde->d_name, de->d_name, tnamelen);
10155 de = (struct linux_dirent *)((char *)de + reclen);
10157 tde = (struct target_dirent *)((char *)tde + treclen);
10161 unlock_user(target_dirp, arg2, ret);
10167 struct linux_dirent *dirp;
10168 abi_long count = arg3;
10170 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10172 ret = get_errno(sys_getdents(arg1, dirp, count));
10173 if (!is_error(ret)) {
10174 struct linux_dirent *de;
10179 reclen = de->d_reclen;
10182 de->d_reclen = tswap16(reclen);
10183 tswapls(&de->d_ino);
10184 tswapls(&de->d_off);
10185 de = (struct linux_dirent *)((char *)de + reclen);
10189 unlock_user(dirp, arg2, ret);
10193 /* Implement getdents in terms of getdents64 */
10195 struct linux_dirent64 *dirp;
10196 abi_long count = arg3;
10198 dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
10202 ret = get_errno(sys_getdents64(arg1, dirp, count));
10203 if (!is_error(ret)) {
10204 /* Convert the dirent64 structs to target dirent. We do this
10205 * in-place, since we can guarantee that a target_dirent is no
10206 * larger than a dirent64; however this means we have to be
10207 * careful to read everything before writing in the new format.
10209 struct linux_dirent64 *de;
10210 struct target_dirent *tde;
10215 tde = (struct target_dirent *)dirp;
10217 int namelen, treclen;
10218 int reclen = de->d_reclen;
10219 uint64_t ino = de->d_ino;
10220 int64_t off = de->d_off;
10221 uint8_t type = de->d_type;
10223 namelen = strlen(de->d_name);
10224 treclen = offsetof(struct target_dirent, d_name)
10226 treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
10228 memmove(tde->d_name, de->d_name, namelen + 1);
10229 tde->d_ino = tswapal(ino);
10230 tde->d_off = tswapal(off);
10231 tde->d_reclen = tswap16(treclen);
10232 /* The target_dirent type is in what was formerly a padding
10233 * byte at the end of the structure:
10235 *(((char *)tde) + treclen - 1) = type;
10237 de = (struct linux_dirent64 *)((char *)de + reclen);
10238 tde = (struct target_dirent *)((char *)tde + treclen);
10244 unlock_user(dirp, arg2, ret);
10248 #endif /* TARGET_NR_getdents */
10249 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
10250 case TARGET_NR_getdents64:
10252 struct linux_dirent64 *dirp;
10253 abi_long count = arg3;
10254 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10256 ret = get_errno(sys_getdents64(arg1, dirp, count));
10257 if (!is_error(ret)) {
10258 struct linux_dirent64 *de;
10263 reclen = de->d_reclen;
10266 de->d_reclen = tswap16(reclen);
10267 tswap64s((uint64_t *)&de->d_ino);
10268 tswap64s((uint64_t *)&de->d_off);
10269 de = (struct linux_dirent64 *)((char *)de + reclen);
10273 unlock_user(dirp, arg2, ret);
10276 #endif /* TARGET_NR_getdents64 */
10277 #if defined(TARGET_NR__newselect)
10278 case TARGET_NR__newselect:
10279 ret = do_select(arg1, arg2, arg3, arg4, arg5);
10282 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
10283 # ifdef TARGET_NR_poll
10284 case TARGET_NR_poll:
10286 # ifdef TARGET_NR_ppoll
10287 case TARGET_NR_ppoll:
10290 struct target_pollfd *target_pfd;
10291 unsigned int nfds = arg2;
10292 struct pollfd *pfd;
10298 if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
10299 ret = -TARGET_EINVAL;
10303 target_pfd = lock_user(VERIFY_WRITE, arg1,
10304 sizeof(struct target_pollfd) * nfds, 1);
10309 pfd = alloca(sizeof(struct pollfd) * nfds);
10310 for (i = 0; i < nfds; i++) {
10311 pfd[i].fd = tswap32(target_pfd[i].fd);
10312 pfd[i].events = tswap16(target_pfd[i].events);
10317 # ifdef TARGET_NR_ppoll
10318 case TARGET_NR_ppoll:
10320 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
10321 target_sigset_t *target_set;
10322 sigset_t _set, *set = &_set;
10325 if (target_to_host_timespec(timeout_ts, arg3)) {
10326 unlock_user(target_pfd, arg1, 0);
10334 if (arg5 != sizeof(target_sigset_t)) {
10335 unlock_user(target_pfd, arg1, 0);
10336 ret = -TARGET_EINVAL;
10340 target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
10342 unlock_user(target_pfd, arg1, 0);
10345 target_to_host_sigset(set, target_set);
10350 ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
10351 set, SIGSET_T_SIZE));
10353 if (!is_error(ret) && arg3) {
10354 host_to_target_timespec(arg3, timeout_ts);
10357 unlock_user(target_set, arg4, 0);
10362 # ifdef TARGET_NR_poll
10363 case TARGET_NR_poll:
10365 struct timespec ts, *pts;
10368 /* Convert ms to secs, ns */
10369 ts.tv_sec = arg3 / 1000;
10370 ts.tv_nsec = (arg3 % 1000) * 1000000LL;
10373 /* -ve poll() timeout means "infinite" */
10376 ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
10381 g_assert_not_reached();
10384 if (!is_error(ret)) {
10385 for(i = 0; i < nfds; i++) {
10386 target_pfd[i].revents = tswap16(pfd[i].revents);
10389 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
10393 case TARGET_NR_flock:
10394 /* NOTE: the flock constant seems to be the same for every
10396 ret = get_errno(safe_flock(arg1, arg2));
10398 case TARGET_NR_readv:
10400 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10402 ret = get_errno(safe_readv(arg1, vec, arg3));
10403 unlock_iovec(vec, arg2, arg3, 1);
10405 ret = -host_to_target_errno(errno);
10409 case TARGET_NR_writev:
10411 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10413 ret = get_errno(safe_writev(arg1, vec, arg3));
10414 unlock_iovec(vec, arg2, arg3, 0);
10416 ret = -host_to_target_errno(errno);
10420 #if defined(TARGET_NR_preadv)
10421 case TARGET_NR_preadv:
10423 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10425 ret = get_errno(safe_preadv(arg1, vec, arg3, arg4, arg5));
10426 unlock_iovec(vec, arg2, arg3, 1);
10428 ret = -host_to_target_errno(errno);
10433 #if defined(TARGET_NR_pwritev)
10434 case TARGET_NR_pwritev:
10436 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10438 ret = get_errno(safe_pwritev(arg1, vec, arg3, arg4, arg5));
10439 unlock_iovec(vec, arg2, arg3, 0);
10441 ret = -host_to_target_errno(errno);
10446 case TARGET_NR_getsid:
10447 ret = get_errno(getsid(arg1));
10449 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10450 case TARGET_NR_fdatasync:
10451 ret = get_errno(fdatasync(arg1));
10454 #ifdef TARGET_NR__sysctl
10455 case TARGET_NR__sysctl:
10456 /* We don't implement this, but ENOTDIR is always a safe
10458 ret = -TARGET_ENOTDIR;
10461 case TARGET_NR_sched_getaffinity:
10463 unsigned int mask_size;
10464 unsigned long *mask;
10467 * sched_getaffinity needs multiples of ulong, so need to take
10468 * care of mismatches between target ulong and host ulong sizes.
10470 if (arg2 & (sizeof(abi_ulong) - 1)) {
10471 ret = -TARGET_EINVAL;
10474 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10476 mask = alloca(mask_size);
10477 memset(mask, 0, mask_size);
10478 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
10480 if (!is_error(ret)) {
10482 /* More data returned than the caller's buffer will fit.
10483 * This only happens if sizeof(abi_long) < sizeof(long)
10484 * and the caller passed us a buffer holding an odd number
10485 * of abi_longs. If the host kernel is actually using the
10486 * extra 4 bytes then fail EINVAL; otherwise we can just
10487 * ignore them and only copy the interesting part.
10489 int numcpus = sysconf(_SC_NPROCESSORS_CONF);
10490 if (numcpus > arg2 * 8) {
10491 ret = -TARGET_EINVAL;
10497 ret = host_to_target_cpu_mask(mask, mask_size, arg3, arg2);
10501 case TARGET_NR_sched_setaffinity:
10503 unsigned int mask_size;
10504 unsigned long *mask;
10507 * sched_setaffinity needs multiples of ulong, so need to take
10508 * care of mismatches between target ulong and host ulong sizes.
10510 if (arg2 & (sizeof(abi_ulong) - 1)) {
10511 ret = -TARGET_EINVAL;
10514 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10515 mask = alloca(mask_size);
10517 ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
10522 ret = get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
10525 case TARGET_NR_getcpu:
10527 unsigned cpu, node;
10528 ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
10529 arg2 ? &node : NULL,
10531 if (is_error(ret)) {
10534 if (arg1 && put_user_u32(cpu, arg1)) {
10537 if (arg2 && put_user_u32(node, arg2)) {
10542 case TARGET_NR_sched_setparam:
10544 struct sched_param *target_schp;
10545 struct sched_param schp;
10548 return -TARGET_EINVAL;
10550 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
10552 schp.sched_priority = tswap32(target_schp->sched_priority);
10553 unlock_user_struct(target_schp, arg2, 0);
10554 ret = get_errno(sched_setparam(arg1, &schp));
10557 case TARGET_NR_sched_getparam:
10559 struct sched_param *target_schp;
10560 struct sched_param schp;
10563 return -TARGET_EINVAL;
10565 ret = get_errno(sched_getparam(arg1, &schp));
10566 if (!is_error(ret)) {
10567 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
10569 target_schp->sched_priority = tswap32(schp.sched_priority);
10570 unlock_user_struct(target_schp, arg2, 1);
10574 case TARGET_NR_sched_setscheduler:
10576 struct sched_param *target_schp;
10577 struct sched_param schp;
10579 return -TARGET_EINVAL;
10581 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
10583 schp.sched_priority = tswap32(target_schp->sched_priority);
10584 unlock_user_struct(target_schp, arg3, 0);
10585 ret = get_errno(sched_setscheduler(arg1, arg2, &schp));
10588 case TARGET_NR_sched_getscheduler:
10589 ret = get_errno(sched_getscheduler(arg1));
10591 case TARGET_NR_sched_yield:
10592 ret = get_errno(sched_yield());
10594 case TARGET_NR_sched_get_priority_max:
10595 ret = get_errno(sched_get_priority_max(arg1));
10597 case TARGET_NR_sched_get_priority_min:
10598 ret = get_errno(sched_get_priority_min(arg1));
10600 case TARGET_NR_sched_rr_get_interval:
10602 struct timespec ts;
10603 ret = get_errno(sched_rr_get_interval(arg1, &ts));
10604 if (!is_error(ret)) {
10605 ret = host_to_target_timespec(arg2, &ts);
10609 case TARGET_NR_nanosleep:
10611 struct timespec req, rem;
10612 target_to_host_timespec(&req, arg1);
10613 ret = get_errno(safe_nanosleep(&req, &rem));
10614 if (is_error(ret) && arg2) {
10615 host_to_target_timespec(arg2, &rem);
10619 #ifdef TARGET_NR_query_module
10620 case TARGET_NR_query_module:
10621 goto unimplemented;
10623 #ifdef TARGET_NR_nfsservctl
10624 case TARGET_NR_nfsservctl:
10625 goto unimplemented;
10627 case TARGET_NR_prctl:
10629 case PR_GET_PDEATHSIG:
10632 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
10633 if (!is_error(ret) && arg2
10634 && put_user_ual(deathsig, arg2)) {
10642 void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
10646 ret = get_errno(prctl(arg1, (unsigned long)name,
10647 arg3, arg4, arg5));
10648 unlock_user(name, arg2, 16);
10653 void *name = lock_user(VERIFY_READ, arg2, 16, 1);
10657 ret = get_errno(prctl(arg1, (unsigned long)name,
10658 arg3, arg4, arg5));
10659 unlock_user(name, arg2, 0);
10663 case PR_GET_SECCOMP:
10664 case PR_SET_SECCOMP:
10665 /* Disable seccomp to prevent the target disabling syscalls we
10667 ret = -TARGET_EINVAL;
10670 /* Most prctl options have no pointer arguments */
10671 ret = get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
10675 #ifdef TARGET_NR_arch_prctl
10676 case TARGET_NR_arch_prctl:
10677 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
10678 ret = do_arch_prctl(cpu_env, arg1, arg2);
10681 goto unimplemented;
10684 #ifdef TARGET_NR_pread64
10685 case TARGET_NR_pread64:
10686 if (regpairs_aligned(cpu_env, num)) {
10690 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
10692 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
10693 unlock_user(p, arg2, ret);
10695 case TARGET_NR_pwrite64:
10696 if (regpairs_aligned(cpu_env, num)) {
10700 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
10702 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
10703 unlock_user(p, arg2, 0);
10706 case TARGET_NR_getcwd:
10707 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
10709 ret = get_errno(sys_getcwd1(p, arg2));
10710 unlock_user(p, arg1, ret);
10712 case TARGET_NR_capget:
10713 case TARGET_NR_capset:
10715 struct target_user_cap_header *target_header;
10716 struct target_user_cap_data *target_data = NULL;
10717 struct __user_cap_header_struct header;
10718 struct __user_cap_data_struct data[2];
10719 struct __user_cap_data_struct *dataptr = NULL;
10720 int i, target_datalen;
10721 int data_items = 1;
10723 if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
10726 header.version = tswap32(target_header->version);
10727 header.pid = tswap32(target_header->pid);
10729 if (header.version != _LINUX_CAPABILITY_VERSION) {
10730 /* Version 2 and up takes pointer to two user_data structs */
10734 target_datalen = sizeof(*target_data) * data_items;
10737 if (num == TARGET_NR_capget) {
10738 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
10740 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
10742 if (!target_data) {
10743 unlock_user_struct(target_header, arg1, 0);
10747 if (num == TARGET_NR_capset) {
10748 for (i = 0; i < data_items; i++) {
10749 data[i].effective = tswap32(target_data[i].effective);
10750 data[i].permitted = tswap32(target_data[i].permitted);
10751 data[i].inheritable = tswap32(target_data[i].inheritable);
10758 if (num == TARGET_NR_capget) {
10759 ret = get_errno(capget(&header, dataptr));
10761 ret = get_errno(capset(&header, dataptr));
10764 /* The kernel always updates version for both capget and capset */
10765 target_header->version = tswap32(header.version);
10766 unlock_user_struct(target_header, arg1, 1);
10769 if (num == TARGET_NR_capget) {
10770 for (i = 0; i < data_items; i++) {
10771 target_data[i].effective = tswap32(data[i].effective);
10772 target_data[i].permitted = tswap32(data[i].permitted);
10773 target_data[i].inheritable = tswap32(data[i].inheritable);
10775 unlock_user(target_data, arg2, target_datalen);
10777 unlock_user(target_data, arg2, 0);
10782 case TARGET_NR_sigaltstack:
10783 ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUArchState *)cpu_env));
10786 #ifdef CONFIG_SENDFILE
10787 case TARGET_NR_sendfile:
10789 off_t *offp = NULL;
10792 ret = get_user_sal(off, arg3);
10793 if (is_error(ret)) {
10798 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
10799 if (!is_error(ret) && arg3) {
10800 abi_long ret2 = put_user_sal(off, arg3);
10801 if (is_error(ret2)) {
10807 #ifdef TARGET_NR_sendfile64
10808 case TARGET_NR_sendfile64:
10810 off_t *offp = NULL;
10813 ret = get_user_s64(off, arg3);
10814 if (is_error(ret)) {
10819 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
10820 if (!is_error(ret) && arg3) {
10821 abi_long ret2 = put_user_s64(off, arg3);
10822 if (is_error(ret2)) {
10830 case TARGET_NR_sendfile:
10831 #ifdef TARGET_NR_sendfile64
10832 case TARGET_NR_sendfile64:
10834 goto unimplemented;
10837 #ifdef TARGET_NR_getpmsg
10838 case TARGET_NR_getpmsg:
10839 goto unimplemented;
10841 #ifdef TARGET_NR_putpmsg
10842 case TARGET_NR_putpmsg:
10843 goto unimplemented;
10845 #ifdef TARGET_NR_vfork
10846 case TARGET_NR_vfork:
10847 ret = get_errno(do_fork(cpu_env,
10848 CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
10852 #ifdef TARGET_NR_ugetrlimit
10853 case TARGET_NR_ugetrlimit:
10855 struct rlimit rlim;
10856 int resource = target_to_host_resource(arg1);
10857 ret = get_errno(getrlimit(resource, &rlim));
10858 if (!is_error(ret)) {
10859 struct target_rlimit *target_rlim;
10860 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
10862 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
10863 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
10864 unlock_user_struct(target_rlim, arg2, 1);
10869 #ifdef TARGET_NR_truncate64
10870 case TARGET_NR_truncate64:
10871 if (!(p = lock_user_string(arg1)))
10873 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
10874 unlock_user(p, arg1, 0);
10877 #ifdef TARGET_NR_ftruncate64
10878 case TARGET_NR_ftruncate64:
10879 ret = target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
10882 #ifdef TARGET_NR_stat64
10883 case TARGET_NR_stat64:
10884 if (!(p = lock_user_string(arg1)))
10886 ret = get_errno(stat(path(p), &st));
10887 unlock_user(p, arg1, 0);
10888 if (!is_error(ret))
10889 ret = host_to_target_stat64(cpu_env, arg2, &st);
10892 #ifdef TARGET_NR_lstat64
10893 case TARGET_NR_lstat64:
10894 if (!(p = lock_user_string(arg1)))
10896 ret = get_errno(lstat(path(p), &st));
10897 unlock_user(p, arg1, 0);
10898 if (!is_error(ret))
10899 ret = host_to_target_stat64(cpu_env, arg2, &st);
10902 #ifdef TARGET_NR_fstat64
10903 case TARGET_NR_fstat64:
10904 ret = get_errno(fstat(arg1, &st));
10905 if (!is_error(ret))
10906 ret = host_to_target_stat64(cpu_env, arg2, &st);
10909 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
10910 #ifdef TARGET_NR_fstatat64
10911 case TARGET_NR_fstatat64:
10913 #ifdef TARGET_NR_newfstatat
10914 case TARGET_NR_newfstatat:
10916 if (!(p = lock_user_string(arg2)))
10918 ret = get_errno(fstatat(arg1, path(p), &st, arg4));
10919 if (!is_error(ret))
10920 ret = host_to_target_stat64(cpu_env, arg3, &st);
10923 #ifdef TARGET_NR_lchown
10924 case TARGET_NR_lchown:
10925 if (!(p = lock_user_string(arg1)))
10927 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
10928 unlock_user(p, arg1, 0);
10931 #ifdef TARGET_NR_getuid
10932 case TARGET_NR_getuid:
10933 ret = get_errno(high2lowuid(getuid()));
10936 #ifdef TARGET_NR_getgid
10937 case TARGET_NR_getgid:
10938 ret = get_errno(high2lowgid(getgid()));
10941 #ifdef TARGET_NR_geteuid
10942 case TARGET_NR_geteuid:
10943 ret = get_errno(high2lowuid(geteuid()));
10946 #ifdef TARGET_NR_getegid
10947 case TARGET_NR_getegid:
10948 ret = get_errno(high2lowgid(getegid()));
10951 case TARGET_NR_setreuid:
10952 ret = get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
10954 case TARGET_NR_setregid:
10955 ret = get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
10957 case TARGET_NR_getgroups:
10959 int gidsetsize = arg1;
10960 target_id *target_grouplist;
10964 grouplist = alloca(gidsetsize * sizeof(gid_t));
10965 ret = get_errno(getgroups(gidsetsize, grouplist));
10966 if (gidsetsize == 0)
10968 if (!is_error(ret)) {
10969 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
10970 if (!target_grouplist)
10972 for(i = 0;i < ret; i++)
10973 target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
10974 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
10978 case TARGET_NR_setgroups:
10980 int gidsetsize = arg1;
10981 target_id *target_grouplist;
10982 gid_t *grouplist = NULL;
10985 grouplist = alloca(gidsetsize * sizeof(gid_t));
10986 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
10987 if (!target_grouplist) {
10988 ret = -TARGET_EFAULT;
10991 for (i = 0; i < gidsetsize; i++) {
10992 grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
10994 unlock_user(target_grouplist, arg2, 0);
10996 ret = get_errno(setgroups(gidsetsize, grouplist));
10999 case TARGET_NR_fchown:
11000 ret = get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
11002 #if defined(TARGET_NR_fchownat)
11003 case TARGET_NR_fchownat:
11004 if (!(p = lock_user_string(arg2)))
11006 ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
11007 low2highgid(arg4), arg5));
11008 unlock_user(p, arg2, 0);
11011 #ifdef TARGET_NR_setresuid
11012 case TARGET_NR_setresuid:
11013 ret = get_errno(sys_setresuid(low2highuid(arg1),
11015 low2highuid(arg3)));
11018 #ifdef TARGET_NR_getresuid
11019 case TARGET_NR_getresuid:
11021 uid_t ruid, euid, suid;
11022 ret = get_errno(getresuid(&ruid, &euid, &suid));
11023 if (!is_error(ret)) {
11024 if (put_user_id(high2lowuid(ruid), arg1)
11025 || put_user_id(high2lowuid(euid), arg2)
11026 || put_user_id(high2lowuid(suid), arg3))
11032 #ifdef TARGET_NR_getresgid
11033 case TARGET_NR_setresgid:
11034 ret = get_errno(sys_setresgid(low2highgid(arg1),
11036 low2highgid(arg3)));
11039 #ifdef TARGET_NR_getresgid
11040 case TARGET_NR_getresgid:
11042 gid_t rgid, egid, sgid;
11043 ret = get_errno(getresgid(&rgid, &egid, &sgid));
11044 if (!is_error(ret)) {
11045 if (put_user_id(high2lowgid(rgid), arg1)
11046 || put_user_id(high2lowgid(egid), arg2)
11047 || put_user_id(high2lowgid(sgid), arg3))
11053 #ifdef TARGET_NR_chown
11054 case TARGET_NR_chown:
11055 if (!(p = lock_user_string(arg1)))
11057 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
11058 unlock_user(p, arg1, 0);
11061 case TARGET_NR_setuid:
11062 ret = get_errno(sys_setuid(low2highuid(arg1)));
11064 case TARGET_NR_setgid:
11065 ret = get_errno(sys_setgid(low2highgid(arg1)));
11067 case TARGET_NR_setfsuid:
11068 ret = get_errno(setfsuid(arg1));
11070 case TARGET_NR_setfsgid:
11071 ret = get_errno(setfsgid(arg1));
11074 #ifdef TARGET_NR_lchown32
11075 case TARGET_NR_lchown32:
11076 if (!(p = lock_user_string(arg1)))
11078 ret = get_errno(lchown(p, arg2, arg3));
11079 unlock_user(p, arg1, 0);
11082 #ifdef TARGET_NR_getuid32
11083 case TARGET_NR_getuid32:
11084 ret = get_errno(getuid());
11088 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
11089 /* Alpha specific */
11090 case TARGET_NR_getxuid:
11094 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
11096 ret = get_errno(getuid());
11099 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
11100 /* Alpha specific */
11101 case TARGET_NR_getxgid:
11105 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
11107 ret = get_errno(getgid());
11110 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
11111 /* Alpha specific */
11112 case TARGET_NR_osf_getsysinfo:
11113 ret = -TARGET_EOPNOTSUPP;
11115 case TARGET_GSI_IEEE_FP_CONTROL:
11117 uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env);
11119 /* Copied from linux ieee_fpcr_to_swcr. */
11120 swcr = (fpcr >> 35) & SWCR_STATUS_MASK;
11121 swcr |= (fpcr >> 36) & SWCR_MAP_DMZ;
11122 swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV
11123 | SWCR_TRAP_ENABLE_DZE
11124 | SWCR_TRAP_ENABLE_OVF);
11125 swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF
11126 | SWCR_TRAP_ENABLE_INE);
11127 swcr |= (fpcr >> 47) & SWCR_MAP_UMZ;
11128 swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO;
11130 if (put_user_u64 (swcr, arg2))
11136 /* case GSI_IEEE_STATE_AT_SIGNAL:
11137 -- Not implemented in linux kernel.
11139 -- Retrieves current unaligned access state; not much used.
11140 case GSI_PROC_TYPE:
11141 -- Retrieves implver information; surely not used.
11142 case GSI_GET_HWRPB:
11143 -- Grabs a copy of the HWRPB; surely not used.
11148 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
11149 /* Alpha specific */
11150 case TARGET_NR_osf_setsysinfo:
11151 ret = -TARGET_EOPNOTSUPP;
11153 case TARGET_SSI_IEEE_FP_CONTROL:
11155 uint64_t swcr, fpcr, orig_fpcr;
11157 if (get_user_u64 (swcr, arg2)) {
11160 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
11161 fpcr = orig_fpcr & FPCR_DYN_MASK;
11163 /* Copied from linux ieee_swcr_to_fpcr. */
11164 fpcr |= (swcr & SWCR_STATUS_MASK) << 35;
11165 fpcr |= (swcr & SWCR_MAP_DMZ) << 36;
11166 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV
11167 | SWCR_TRAP_ENABLE_DZE
11168 | SWCR_TRAP_ENABLE_OVF)) << 48;
11169 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF
11170 | SWCR_TRAP_ENABLE_INE)) << 57;
11171 fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0);
11172 fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41;
11174 cpu_alpha_store_fpcr(cpu_env, fpcr);
11179 case TARGET_SSI_IEEE_RAISE_EXCEPTION:
11181 uint64_t exc, fpcr, orig_fpcr;
11184 if (get_user_u64(exc, arg2)) {
11188 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
11190 /* We only add to the exception status here. */
11191 fpcr = orig_fpcr | ((exc & SWCR_STATUS_MASK) << 35);
11193 cpu_alpha_store_fpcr(cpu_env, fpcr);
11196 /* Old exceptions are not signaled. */
11197 fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK);
11199 /* If any exceptions set by this call,
11200 and are unmasked, send a signal. */
11202 if ((fpcr & (FPCR_INE | FPCR_INED)) == FPCR_INE) {
11203 si_code = TARGET_FPE_FLTRES;
11205 if ((fpcr & (FPCR_UNF | FPCR_UNFD)) == FPCR_UNF) {
11206 si_code = TARGET_FPE_FLTUND;
11208 if ((fpcr & (FPCR_OVF | FPCR_OVFD)) == FPCR_OVF) {
11209 si_code = TARGET_FPE_FLTOVF;
11211 if ((fpcr & (FPCR_DZE | FPCR_DZED)) == FPCR_DZE) {
11212 si_code = TARGET_FPE_FLTDIV;
11214 if ((fpcr & (FPCR_INV | FPCR_INVD)) == FPCR_INV) {
11215 si_code = TARGET_FPE_FLTINV;
11217 if (si_code != 0) {
11218 target_siginfo_t info;
11219 info.si_signo = SIGFPE;
11221 info.si_code = si_code;
11222 info._sifields._sigfault._addr
11223 = ((CPUArchState *)cpu_env)->pc;
11224 queue_signal((CPUArchState *)cpu_env, info.si_signo,
11225 QEMU_SI_FAULT, &info);
11230 /* case SSI_NVPAIRS:
11231 -- Used with SSIN_UACPROC to enable unaligned accesses.
11232 case SSI_IEEE_STATE_AT_SIGNAL:
11233 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
11234 -- Not implemented in linux kernel
11239 #ifdef TARGET_NR_osf_sigprocmask
11240 /* Alpha specific. */
11241 case TARGET_NR_osf_sigprocmask:
11245 sigset_t set, oldset;
11248 case TARGET_SIG_BLOCK:
11251 case TARGET_SIG_UNBLOCK:
11254 case TARGET_SIG_SETMASK:
11258 ret = -TARGET_EINVAL;
11262 target_to_host_old_sigset(&set, &mask);
11263 ret = do_sigprocmask(how, &set, &oldset);
11265 host_to_target_old_sigset(&mask, &oldset);
11272 #ifdef TARGET_NR_getgid32
11273 case TARGET_NR_getgid32:
11274 ret = get_errno(getgid());
11277 #ifdef TARGET_NR_geteuid32
11278 case TARGET_NR_geteuid32:
11279 ret = get_errno(geteuid());
11282 #ifdef TARGET_NR_getegid32
11283 case TARGET_NR_getegid32:
11284 ret = get_errno(getegid());
11287 #ifdef TARGET_NR_setreuid32
11288 case TARGET_NR_setreuid32:
11289 ret = get_errno(setreuid(arg1, arg2));
11292 #ifdef TARGET_NR_setregid32
11293 case TARGET_NR_setregid32:
11294 ret = get_errno(setregid(arg1, arg2));
11297 #ifdef TARGET_NR_getgroups32
11298 case TARGET_NR_getgroups32:
11300 int gidsetsize = arg1;
11301 uint32_t *target_grouplist;
11305 grouplist = alloca(gidsetsize * sizeof(gid_t));
11306 ret = get_errno(getgroups(gidsetsize, grouplist));
11307 if (gidsetsize == 0)
11309 if (!is_error(ret)) {
11310 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
11311 if (!target_grouplist) {
11312 ret = -TARGET_EFAULT;
11315 for(i = 0;i < ret; i++)
11316 target_grouplist[i] = tswap32(grouplist[i]);
11317 unlock_user(target_grouplist, arg2, gidsetsize * 4);
11322 #ifdef TARGET_NR_setgroups32
11323 case TARGET_NR_setgroups32:
11325 int gidsetsize = arg1;
11326 uint32_t *target_grouplist;
11330 grouplist = alloca(gidsetsize * sizeof(gid_t));
11331 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
11332 if (!target_grouplist) {
11333 ret = -TARGET_EFAULT;
11336 for(i = 0;i < gidsetsize; i++)
11337 grouplist[i] = tswap32(target_grouplist[i]);
11338 unlock_user(target_grouplist, arg2, 0);
11339 ret = get_errno(setgroups(gidsetsize, grouplist));
11343 #ifdef TARGET_NR_fchown32
11344 case TARGET_NR_fchown32:
11345 ret = get_errno(fchown(arg1, arg2, arg3));
11348 #ifdef TARGET_NR_setresuid32
11349 case TARGET_NR_setresuid32:
11350 ret = get_errno(sys_setresuid(arg1, arg2, arg3));
11353 #ifdef TARGET_NR_getresuid32
11354 case TARGET_NR_getresuid32:
11356 uid_t ruid, euid, suid;
11357 ret = get_errno(getresuid(&ruid, &euid, &suid));
11358 if (!is_error(ret)) {
11359 if (put_user_u32(ruid, arg1)
11360 || put_user_u32(euid, arg2)
11361 || put_user_u32(suid, arg3))
11367 #ifdef TARGET_NR_setresgid32
11368 case TARGET_NR_setresgid32:
11369 ret = get_errno(sys_setresgid(arg1, arg2, arg3));
11372 #ifdef TARGET_NR_getresgid32
11373 case TARGET_NR_getresgid32:
11375 gid_t rgid, egid, sgid;
11376 ret = get_errno(getresgid(&rgid, &egid, &sgid));
11377 if (!is_error(ret)) {
11378 if (put_user_u32(rgid, arg1)
11379 || put_user_u32(egid, arg2)
11380 || put_user_u32(sgid, arg3))
11386 #ifdef TARGET_NR_chown32
11387 case TARGET_NR_chown32:
11388 if (!(p = lock_user_string(arg1)))
11390 ret = get_errno(chown(p, arg2, arg3));
11391 unlock_user(p, arg1, 0);
11394 #ifdef TARGET_NR_setuid32
11395 case TARGET_NR_setuid32:
11396 ret = get_errno(sys_setuid(arg1));
11399 #ifdef TARGET_NR_setgid32
11400 case TARGET_NR_setgid32:
11401 ret = get_errno(sys_setgid(arg1));
11404 #ifdef TARGET_NR_setfsuid32
11405 case TARGET_NR_setfsuid32:
11406 ret = get_errno(setfsuid(arg1));
11409 #ifdef TARGET_NR_setfsgid32
11410 case TARGET_NR_setfsgid32:
11411 ret = get_errno(setfsgid(arg1));
11415 case TARGET_NR_pivot_root:
11416 goto unimplemented;
11417 #ifdef TARGET_NR_mincore
11418 case TARGET_NR_mincore:
11421 ret = -TARGET_ENOMEM;
11422 a = lock_user(VERIFY_READ, arg1, arg2, 0);
11426 ret = -TARGET_EFAULT;
11427 p = lock_user_string(arg3);
11431 ret = get_errno(mincore(a, arg2, p));
11432 unlock_user(p, arg3, ret);
11434 unlock_user(a, arg1, 0);
11438 #ifdef TARGET_NR_arm_fadvise64_64
11439 case TARGET_NR_arm_fadvise64_64:
11440 /* arm_fadvise64_64 looks like fadvise64_64 but
11441 * with different argument order: fd, advice, offset, len
11442 * rather than the usual fd, offset, len, advice.
11443 * Note that offset and len are both 64-bit so appear as
11444 * pairs of 32-bit registers.
11446 ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
11447 target_offset64(arg5, arg6), arg2);
11448 ret = -host_to_target_errno(ret);
11452 #if TARGET_ABI_BITS == 32
11454 #ifdef TARGET_NR_fadvise64_64
11455 case TARGET_NR_fadvise64_64:
11456 #if defined(TARGET_PPC)
11457 /* 6 args: fd, advice, offset (high, low), len (high, low) */
11465 /* 6 args: fd, offset (high, low), len (high, low), advice */
11466 if (regpairs_aligned(cpu_env, num)) {
11467 /* offset is in (3,4), len in (5,6) and advice in 7 */
11475 ret = -host_to_target_errno(posix_fadvise(arg1,
11476 target_offset64(arg2, arg3),
11477 target_offset64(arg4, arg5),
11482 #ifdef TARGET_NR_fadvise64
11483 case TARGET_NR_fadvise64:
11484 /* 5 args: fd, offset (high, low), len, advice */
11485 if (regpairs_aligned(cpu_env, num)) {
11486 /* offset is in (3,4), len in 5 and advice in 6 */
11492 ret = -host_to_target_errno(posix_fadvise(arg1,
11493 target_offset64(arg2, arg3),
11498 #else /* not a 32-bit ABI */
11499 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11500 #ifdef TARGET_NR_fadvise64_64
11501 case TARGET_NR_fadvise64_64:
11503 #ifdef TARGET_NR_fadvise64
11504 case TARGET_NR_fadvise64:
11506 #ifdef TARGET_S390X
11508 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
11509 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
11510 case 6: arg4 = POSIX_FADV_DONTNEED; break;
11511 case 7: arg4 = POSIX_FADV_NOREUSE; break;
11515 ret = -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
11518 #endif /* end of 64-bit ABI fadvise handling */
11520 #ifdef TARGET_NR_madvise
11521 case TARGET_NR_madvise:
11522 /* A straight passthrough may not be safe because qemu sometimes
11523 turns private file-backed mappings into anonymous mappings.
11524 This will break MADV_DONTNEED.
11525 This is a hint, so ignoring and returning success is ok. */
11526 ret = get_errno(0);
11529 #if TARGET_ABI_BITS == 32
11530 case TARGET_NR_fcntl64:
11534 from_flock64_fn *copyfrom = copy_from_user_flock64;
11535 to_flock64_fn *copyto = copy_to_user_flock64;
11538 if (((CPUARMState *)cpu_env)->eabi) {
11539 copyfrom = copy_from_user_eabi_flock64;
11540 copyto = copy_to_user_eabi_flock64;
11544 cmd = target_to_host_fcntl_cmd(arg2);
11545 if (cmd == -TARGET_EINVAL) {
11551 case TARGET_F_GETLK64:
11552 ret = copyfrom(&fl, arg3);
11556 ret = get_errno(fcntl(arg1, cmd, &fl));
11558 ret = copyto(arg3, &fl);
11562 case TARGET_F_SETLK64:
11563 case TARGET_F_SETLKW64:
11564 ret = copyfrom(&fl, arg3);
11568 ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11571 ret = do_fcntl(arg1, arg2, arg3);
11577 #ifdef TARGET_NR_cacheflush
11578 case TARGET_NR_cacheflush:
11579 /* self-modifying code is handled automatically, so nothing needed */
11583 #ifdef TARGET_NR_security
11584 case TARGET_NR_security:
11585 goto unimplemented;
11587 #ifdef TARGET_NR_getpagesize
11588 case TARGET_NR_getpagesize:
11589 ret = TARGET_PAGE_SIZE;
11592 case TARGET_NR_gettid:
11593 ret = get_errno(gettid());
11595 #ifdef TARGET_NR_readahead
11596 case TARGET_NR_readahead:
11597 #if TARGET_ABI_BITS == 32
11598 if (regpairs_aligned(cpu_env, num)) {
11603 ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
11605 ret = get_errno(readahead(arg1, arg2, arg3));
11610 #ifdef TARGET_NR_setxattr
11611 case TARGET_NR_listxattr:
11612 case TARGET_NR_llistxattr:
11616 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11618 ret = -TARGET_EFAULT;
11622 p = lock_user_string(arg1);
11624 if (num == TARGET_NR_listxattr) {
11625 ret = get_errno(listxattr(p, b, arg3));
11627 ret = get_errno(llistxattr(p, b, arg3));
11630 ret = -TARGET_EFAULT;
11632 unlock_user(p, arg1, 0);
11633 unlock_user(b, arg2, arg3);
11636 case TARGET_NR_flistxattr:
11640 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11642 ret = -TARGET_EFAULT;
11646 ret = get_errno(flistxattr(arg1, b, arg3));
11647 unlock_user(b, arg2, arg3);
11650 case TARGET_NR_setxattr:
11651 case TARGET_NR_lsetxattr:
11653 void *p, *n, *v = 0;
11655 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11657 ret = -TARGET_EFAULT;
11661 p = lock_user_string(arg1);
11662 n = lock_user_string(arg2);
11664 if (num == TARGET_NR_setxattr) {
11665 ret = get_errno(setxattr(p, n, v, arg4, arg5));
11667 ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
11670 ret = -TARGET_EFAULT;
11672 unlock_user(p, arg1, 0);
11673 unlock_user(n, arg2, 0);
11674 unlock_user(v, arg3, 0);
11677 case TARGET_NR_fsetxattr:
11681 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11683 ret = -TARGET_EFAULT;
11687 n = lock_user_string(arg2);
11689 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
11691 ret = -TARGET_EFAULT;
11693 unlock_user(n, arg2, 0);
11694 unlock_user(v, arg3, 0);
11697 case TARGET_NR_getxattr:
11698 case TARGET_NR_lgetxattr:
11700 void *p, *n, *v = 0;
11702 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11704 ret = -TARGET_EFAULT;
11708 p = lock_user_string(arg1);
11709 n = lock_user_string(arg2);
11711 if (num == TARGET_NR_getxattr) {
11712 ret = get_errno(getxattr(p, n, v, arg4));
11714 ret = get_errno(lgetxattr(p, n, v, arg4));
11717 ret = -TARGET_EFAULT;
11719 unlock_user(p, arg1, 0);
11720 unlock_user(n, arg2, 0);
11721 unlock_user(v, arg3, arg4);
11724 case TARGET_NR_fgetxattr:
11728 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11730 ret = -TARGET_EFAULT;
11734 n = lock_user_string(arg2);
11736 ret = get_errno(fgetxattr(arg1, n, v, arg4));
11738 ret = -TARGET_EFAULT;
11740 unlock_user(n, arg2, 0);
11741 unlock_user(v, arg3, arg4);
11744 case TARGET_NR_removexattr:
11745 case TARGET_NR_lremovexattr:
11748 p = lock_user_string(arg1);
11749 n = lock_user_string(arg2);
11751 if (num == TARGET_NR_removexattr) {
11752 ret = get_errno(removexattr(p, n));
11754 ret = get_errno(lremovexattr(p, n));
11757 ret = -TARGET_EFAULT;
11759 unlock_user(p, arg1, 0);
11760 unlock_user(n, arg2, 0);
11763 case TARGET_NR_fremovexattr:
11766 n = lock_user_string(arg2);
11768 ret = get_errno(fremovexattr(arg1, n));
11770 ret = -TARGET_EFAULT;
11772 unlock_user(n, arg2, 0);
11776 #endif /* CONFIG_ATTR */
11777 #ifdef TARGET_NR_set_thread_area
11778 case TARGET_NR_set_thread_area:
11779 #if defined(TARGET_MIPS)
11780 ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
11783 #elif defined(TARGET_CRIS)
11785 ret = -TARGET_EINVAL;
11787 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
11791 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
11792 ret = do_set_thread_area(cpu_env, arg1);
11794 #elif defined(TARGET_M68K)
11796 TaskState *ts = cpu->opaque;
11797 ts->tp_value = arg1;
11802 goto unimplemented_nowarn;
11805 #ifdef TARGET_NR_get_thread_area
11806 case TARGET_NR_get_thread_area:
11807 #if defined(TARGET_I386) && defined(TARGET_ABI32)
11808 ret = do_get_thread_area(cpu_env, arg1);
11810 #elif defined(TARGET_M68K)
11812 TaskState *ts = cpu->opaque;
11813 ret = ts->tp_value;
11817 goto unimplemented_nowarn;
11820 #ifdef TARGET_NR_getdomainname
11821 case TARGET_NR_getdomainname:
11822 goto unimplemented_nowarn;
11825 #ifdef TARGET_NR_clock_gettime
11826 case TARGET_NR_clock_gettime:
11828 struct timespec ts;
11829 ret = get_errno(clock_gettime(arg1, &ts));
11830 if (!is_error(ret)) {
11831 host_to_target_timespec(arg2, &ts);
11836 #ifdef TARGET_NR_clock_getres
11837 case TARGET_NR_clock_getres:
11839 struct timespec ts;
11840 ret = get_errno(clock_getres(arg1, &ts));
11841 if (!is_error(ret)) {
11842 host_to_target_timespec(arg2, &ts);
11847 #ifdef TARGET_NR_clock_nanosleep
11848 case TARGET_NR_clock_nanosleep:
11850 struct timespec ts;
11851 target_to_host_timespec(&ts, arg3);
11852 ret = get_errno(safe_clock_nanosleep(arg1, arg2,
11853 &ts, arg4 ? &ts : NULL));
11855 host_to_target_timespec(arg4, &ts);
11857 #if defined(TARGET_PPC)
11858 /* clock_nanosleep is odd in that it returns positive errno values.
11859 * On PPC, CR0 bit 3 should be set in such a situation. */
11860 if (ret && ret != -TARGET_ERESTARTSYS) {
11861 ((CPUPPCState *)cpu_env)->crf[0] |= 1;
11868 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
11869 case TARGET_NR_set_tid_address:
11870 ret = get_errno(set_tid_address((int *)g2h(arg1)));
11874 case TARGET_NR_tkill:
11875 ret = get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
11878 case TARGET_NR_tgkill:
11879 ret = get_errno(safe_tgkill((int)arg1, (int)arg2,
11880 target_to_host_signal(arg3)));
11883 #ifdef TARGET_NR_set_robust_list
11884 case TARGET_NR_set_robust_list:
11885 case TARGET_NR_get_robust_list:
11886 /* The ABI for supporting robust futexes has userspace pass
11887 * the kernel a pointer to a linked list which is updated by
11888 * userspace after the syscall; the list is walked by the kernel
11889 * when the thread exits. Since the linked list in QEMU guest
11890 * memory isn't a valid linked list for the host and we have
11891 * no way to reliably intercept the thread-death event, we can't
11892 * support these. Silently return ENOSYS so that guest userspace
11893 * falls back to a non-robust futex implementation (which should
11894 * be OK except in the corner case of the guest crashing while
11895 * holding a mutex that is shared with another process via
11898 goto unimplemented_nowarn;
11901 #if defined(TARGET_NR_utimensat)
11902 case TARGET_NR_utimensat:
11904 struct timespec *tsp, ts[2];
11908 target_to_host_timespec(ts, arg3);
11909 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
11913 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
11915 if (!(p = lock_user_string(arg2))) {
11916 ret = -TARGET_EFAULT;
11919 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
11920 unlock_user(p, arg2, 0);
11925 case TARGET_NR_futex:
11926 ret = do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
11928 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
11929 case TARGET_NR_inotify_init:
11930 ret = get_errno(sys_inotify_init());
11932 fd_trans_register(ret, &target_inotify_trans);
11936 #ifdef CONFIG_INOTIFY1
11937 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
11938 case TARGET_NR_inotify_init1:
11939 ret = get_errno(sys_inotify_init1(target_to_host_bitmask(arg1,
11940 fcntl_flags_tbl)));
11942 fd_trans_register(ret, &target_inotify_trans);
11947 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
11948 case TARGET_NR_inotify_add_watch:
11949 p = lock_user_string(arg2);
11950 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
11951 unlock_user(p, arg2, 0);
11954 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
11955 case TARGET_NR_inotify_rm_watch:
11956 ret = get_errno(sys_inotify_rm_watch(arg1, arg2));
11960 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
11961 case TARGET_NR_mq_open:
11963 struct mq_attr posix_mq_attr;
11964 struct mq_attr *pposix_mq_attr;
11967 host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
11968 pposix_mq_attr = NULL;
11970 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
11973 pposix_mq_attr = &posix_mq_attr;
11975 p = lock_user_string(arg1 - 1);
11979 ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
11980 unlock_user (p, arg1, 0);
11984 case TARGET_NR_mq_unlink:
11985 p = lock_user_string(arg1 - 1);
11987 ret = -TARGET_EFAULT;
11990 ret = get_errno(mq_unlink(p));
11991 unlock_user (p, arg1, 0);
11994 case TARGET_NR_mq_timedsend:
11996 struct timespec ts;
11998 p = lock_user (VERIFY_READ, arg2, arg3, 1);
12000 target_to_host_timespec(&ts, arg5);
12001 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12002 host_to_target_timespec(arg5, &ts);
12004 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12006 unlock_user (p, arg2, arg3);
12010 case TARGET_NR_mq_timedreceive:
12012 struct timespec ts;
12015 p = lock_user (VERIFY_READ, arg2, arg3, 1);
12017 target_to_host_timespec(&ts, arg5);
12018 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12020 host_to_target_timespec(arg5, &ts);
12022 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12025 unlock_user (p, arg2, arg3);
12027 put_user_u32(prio, arg4);
12031 /* Not implemented for now... */
12032 /* case TARGET_NR_mq_notify: */
12035 case TARGET_NR_mq_getsetattr:
12037 struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
12040 ret = mq_getattr(arg1, &posix_mq_attr_out);
12041 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
12044 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
12045 ret |= mq_setattr(arg1, &posix_mq_attr_in, &posix_mq_attr_out);
12052 #ifdef CONFIG_SPLICE
12053 #ifdef TARGET_NR_tee
12054 case TARGET_NR_tee:
12056 ret = get_errno(tee(arg1,arg2,arg3,arg4));
12060 #ifdef TARGET_NR_splice
12061 case TARGET_NR_splice:
12063 loff_t loff_in, loff_out;
12064 loff_t *ploff_in = NULL, *ploff_out = NULL;
12066 if (get_user_u64(loff_in, arg2)) {
12069 ploff_in = &loff_in;
12072 if (get_user_u64(loff_out, arg4)) {
12075 ploff_out = &loff_out;
12077 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
12079 if (put_user_u64(loff_in, arg2)) {
12084 if (put_user_u64(loff_out, arg4)) {
12091 #ifdef TARGET_NR_vmsplice
12092 case TARGET_NR_vmsplice:
12094 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
12096 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
12097 unlock_iovec(vec, arg2, arg3, 0);
12099 ret = -host_to_target_errno(errno);
12104 #endif /* CONFIG_SPLICE */
12105 #ifdef CONFIG_EVENTFD
12106 #if defined(TARGET_NR_eventfd)
12107 case TARGET_NR_eventfd:
12108 ret = get_errno(eventfd(arg1, 0));
12110 fd_trans_register(ret, &target_eventfd_trans);
12114 #if defined(TARGET_NR_eventfd2)
12115 case TARGET_NR_eventfd2:
12117 int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
12118 if (arg2 & TARGET_O_NONBLOCK) {
12119 host_flags |= O_NONBLOCK;
12121 if (arg2 & TARGET_O_CLOEXEC) {
12122 host_flags |= O_CLOEXEC;
12124 ret = get_errno(eventfd(arg1, host_flags));
12126 fd_trans_register(ret, &target_eventfd_trans);
12131 #endif /* CONFIG_EVENTFD */
12132 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
12133 case TARGET_NR_fallocate:
12134 #if TARGET_ABI_BITS == 32
12135 ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
12136 target_offset64(arg5, arg6)));
12138 ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
12142 #if defined(CONFIG_SYNC_FILE_RANGE)
12143 #if defined(TARGET_NR_sync_file_range)
12144 case TARGET_NR_sync_file_range:
12145 #if TARGET_ABI_BITS == 32
12146 #if defined(TARGET_MIPS)
12147 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12148 target_offset64(arg5, arg6), arg7));
12150 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
12151 target_offset64(arg4, arg5), arg6));
12152 #endif /* !TARGET_MIPS */
12154 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
12158 #if defined(TARGET_NR_sync_file_range2)
12159 case TARGET_NR_sync_file_range2:
12160 /* This is like sync_file_range but the arguments are reordered */
12161 #if TARGET_ABI_BITS == 32
12162 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12163 target_offset64(arg5, arg6), arg2));
12165 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
12170 #if defined(TARGET_NR_signalfd4)
12171 case TARGET_NR_signalfd4:
12172 ret = do_signalfd4(arg1, arg2, arg4);
12175 #if defined(TARGET_NR_signalfd)
12176 case TARGET_NR_signalfd:
12177 ret = do_signalfd4(arg1, arg2, 0);
12180 #if defined(CONFIG_EPOLL)
12181 #if defined(TARGET_NR_epoll_create)
12182 case TARGET_NR_epoll_create:
12183 ret = get_errno(epoll_create(arg1));
12186 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
12187 case TARGET_NR_epoll_create1:
12188 ret = get_errno(epoll_create1(arg1));
12191 #if defined(TARGET_NR_epoll_ctl)
12192 case TARGET_NR_epoll_ctl:
12194 struct epoll_event ep;
12195 struct epoll_event *epp = 0;
12197 struct target_epoll_event *target_ep;
12198 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
12201 ep.events = tswap32(target_ep->events);
12202 /* The epoll_data_t union is just opaque data to the kernel,
12203 * so we transfer all 64 bits across and need not worry what
12204 * actual data type it is.
12206 ep.data.u64 = tswap64(target_ep->data.u64);
12207 unlock_user_struct(target_ep, arg4, 0);
12210 ret = get_errno(epoll_ctl(arg1, arg2, arg3, epp));
12215 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
12216 #if defined(TARGET_NR_epoll_wait)
12217 case TARGET_NR_epoll_wait:
12219 #if defined(TARGET_NR_epoll_pwait)
12220 case TARGET_NR_epoll_pwait:
12223 struct target_epoll_event *target_ep;
12224 struct epoll_event *ep;
12226 int maxevents = arg3;
12227 int timeout = arg4;
12229 if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
12230 ret = -TARGET_EINVAL;
12234 target_ep = lock_user(VERIFY_WRITE, arg2,
12235 maxevents * sizeof(struct target_epoll_event), 1);
12240 ep = g_try_new(struct epoll_event, maxevents);
12242 unlock_user(target_ep, arg2, 0);
12243 ret = -TARGET_ENOMEM;
12248 #if defined(TARGET_NR_epoll_pwait)
12249 case TARGET_NR_epoll_pwait:
12251 target_sigset_t *target_set;
12252 sigset_t _set, *set = &_set;
12255 if (arg6 != sizeof(target_sigset_t)) {
12256 ret = -TARGET_EINVAL;
12260 target_set = lock_user(VERIFY_READ, arg5,
12261 sizeof(target_sigset_t), 1);
12263 ret = -TARGET_EFAULT;
12266 target_to_host_sigset(set, target_set);
12267 unlock_user(target_set, arg5, 0);
12272 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12273 set, SIGSET_T_SIZE));
12277 #if defined(TARGET_NR_epoll_wait)
12278 case TARGET_NR_epoll_wait:
12279 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12284 ret = -TARGET_ENOSYS;
12286 if (!is_error(ret)) {
12288 for (i = 0; i < ret; i++) {
12289 target_ep[i].events = tswap32(ep[i].events);
12290 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
12292 unlock_user(target_ep, arg2,
12293 ret * sizeof(struct target_epoll_event));
12295 unlock_user(target_ep, arg2, 0);
12302 #ifdef TARGET_NR_prlimit64
12303 case TARGET_NR_prlimit64:
12305 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
12306 struct target_rlimit64 *target_rnew, *target_rold;
12307 struct host_rlimit64 rnew, rold, *rnewp = 0;
12308 int resource = target_to_host_resource(arg2);
12310 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
12313 rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
12314 rnew.rlim_max = tswap64(target_rnew->rlim_max);
12315 unlock_user_struct(target_rnew, arg3, 0);
12319 ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
12320 if (!is_error(ret) && arg4) {
12321 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
12324 target_rold->rlim_cur = tswap64(rold.rlim_cur);
12325 target_rold->rlim_max = tswap64(rold.rlim_max);
12326 unlock_user_struct(target_rold, arg4, 1);
12331 #ifdef TARGET_NR_gethostname
12332 case TARGET_NR_gethostname:
12334 char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
12336 ret = get_errno(gethostname(name, arg2));
12337 unlock_user(name, arg1, arg2);
12339 ret = -TARGET_EFAULT;
12344 #ifdef TARGET_NR_atomic_cmpxchg_32
12345 case TARGET_NR_atomic_cmpxchg_32:
12347 /* should use start_exclusive from main.c */
12348 abi_ulong mem_value;
12349 if (get_user_u32(mem_value, arg6)) {
12350 target_siginfo_t info;
12351 info.si_signo = SIGSEGV;
12353 info.si_code = TARGET_SEGV_MAPERR;
12354 info._sifields._sigfault._addr = arg6;
12355 queue_signal((CPUArchState *)cpu_env, info.si_signo,
12356 QEMU_SI_FAULT, &info);
12360 if (mem_value == arg2)
12361 put_user_u32(arg1, arg6);
12366 #ifdef TARGET_NR_atomic_barrier
12367 case TARGET_NR_atomic_barrier:
12369 /* Like the kernel implementation and the qemu arm barrier, no-op this? */
12375 #ifdef TARGET_NR_timer_create
12376 case TARGET_NR_timer_create:
12378 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
12380 struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
12383 int timer_index = next_free_host_timer();
12385 if (timer_index < 0) {
12386 ret = -TARGET_EAGAIN;
12388 timer_t *phtimer = g_posix_timers + timer_index;
12391 phost_sevp = &host_sevp;
12392 ret = target_to_host_sigevent(phost_sevp, arg2);
12398 ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
12402 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
12411 #ifdef TARGET_NR_timer_settime
12412 case TARGET_NR_timer_settime:
12414 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
12415 * struct itimerspec * old_value */
12416 target_timer_t timerid = get_timer_id(arg1);
12420 } else if (arg3 == 0) {
12421 ret = -TARGET_EINVAL;
12423 timer_t htimer = g_posix_timers[timerid];
12424 struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12426 if (target_to_host_itimerspec(&hspec_new, arg3)) {
12430 timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12431 if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
12439 #ifdef TARGET_NR_timer_gettime
12440 case TARGET_NR_timer_gettime:
12442 /* args: timer_t timerid, struct itimerspec *curr_value */
12443 target_timer_t timerid = get_timer_id(arg1);
12447 } else if (!arg2) {
12448 ret = -TARGET_EFAULT;
12450 timer_t htimer = g_posix_timers[timerid];
12451 struct itimerspec hspec;
12452 ret = get_errno(timer_gettime(htimer, &hspec));
12454 if (host_to_target_itimerspec(arg2, &hspec)) {
12455 ret = -TARGET_EFAULT;
12462 #ifdef TARGET_NR_timer_getoverrun
12463 case TARGET_NR_timer_getoverrun:
12465 /* args: timer_t timerid */
12466 target_timer_t timerid = get_timer_id(arg1);
12471 timer_t htimer = g_posix_timers[timerid];
12472 ret = get_errno(timer_getoverrun(htimer));
12474 fd_trans_unregister(ret);
12479 #ifdef TARGET_NR_timer_delete
12480 case TARGET_NR_timer_delete:
12482 /* args: timer_t timerid */
12483 target_timer_t timerid = get_timer_id(arg1);
12488 timer_t htimer = g_posix_timers[timerid];
12489 ret = get_errno(timer_delete(htimer));
12490 g_posix_timers[timerid] = 0;
12496 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
12497 case TARGET_NR_timerfd_create:
12498 ret = get_errno(timerfd_create(arg1,
12499 target_to_host_bitmask(arg2, fcntl_flags_tbl)));
12503 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
12504 case TARGET_NR_timerfd_gettime:
12506 struct itimerspec its_curr;
12508 ret = get_errno(timerfd_gettime(arg1, &its_curr));
12510 if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
12517 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
12518 case TARGET_NR_timerfd_settime:
12520 struct itimerspec its_new, its_old, *p_new;
12523 if (target_to_host_itimerspec(&its_new, arg3)) {
12531 ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
12533 if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
12540 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
12541 case TARGET_NR_ioprio_get:
12542 ret = get_errno(ioprio_get(arg1, arg2));
12546 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
12547 case TARGET_NR_ioprio_set:
12548 ret = get_errno(ioprio_set(arg1, arg2, arg3));
12552 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
12553 case TARGET_NR_setns:
12554 ret = get_errno(setns(arg1, arg2));
12557 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
12558 case TARGET_NR_unshare:
12559 ret = get_errno(unshare(arg1));
12562 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
12563 case TARGET_NR_kcmp:
12564 ret = get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
12570 gemu_log("qemu: Unsupported syscall: %d\n", num);
12571 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
12572 unimplemented_nowarn:
12574 ret = -TARGET_ENOSYS;
12579 gemu_log(" = " TARGET_ABI_FMT_ld "\n", ret);
12582 print_syscall_ret(num, ret);
12583 trace_guest_user_syscall_ret(cpu, num, ret);
12586 ret = -TARGET_EFAULT;