4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
31 #include <sys/mount.h>
33 #include <sys/fsuid.h>
34 #include <sys/personality.h>
35 #include <sys/prctl.h>
36 #include <sys/resource.h>
38 #include <linux/capability.h>
40 #include <sys/timex.h>
41 #include <sys/socket.h>
42 #include <linux/sockios.h>
46 #include <sys/times.h>
49 #include <sys/statfs.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/in.h>
55 #include <netinet/ip.h>
56 #include <netinet/tcp.h>
57 #include <netinet/udp.h>
58 #include <linux/wireless.h>
59 #include <linux/icmp.h>
60 #include <linux/icmpv6.h>
61 #include <linux/if_tun.h>
62 #include <linux/in6.h>
63 #include <linux/errqueue.h>
64 #include <linux/random.h>
66 #include <sys/timerfd.h>
69 #include <sys/eventfd.h>
72 #include <sys/epoll.h>
75 #include "qemu/xattr.h"
77 #ifdef CONFIG_SENDFILE
78 #include <sys/sendfile.h>
80 #ifdef HAVE_SYS_KCOV_H
84 #define termios host_termios
85 #define winsize host_winsize
86 #define termio host_termio
87 #define sgttyb host_sgttyb /* same as target */
88 #define tchars host_tchars /* same as target */
89 #define ltchars host_ltchars /* same as target */
91 #include <linux/termios.h>
92 #include <linux/unistd.h>
93 #include <linux/cdrom.h>
94 #include <linux/hdreg.h>
95 #include <linux/soundcard.h>
97 #include <linux/mtio.h>
100 #if defined(CONFIG_FIEMAP)
101 #include <linux/fiemap.h>
103 #include <linux/fb.h>
104 #if defined(CONFIG_USBFS)
105 #include <linux/usbdevice_fs.h>
106 #include <linux/usb/ch9.h>
108 #include <linux/vt.h>
109 #include <linux/dm-ioctl.h>
110 #include <linux/reboot.h>
111 #include <linux/route.h>
112 #include <linux/filter.h>
113 #include <linux/blkpg.h>
114 #include <netpacket/packet.h>
115 #include <linux/netlink.h>
116 #include <linux/if_alg.h>
117 #include <linux/rtc.h>
118 #include <sound/asound.h>
120 #include <linux/btrfs.h>
123 #include <libdrm/drm.h>
124 #include <libdrm/i915_drm.h>
126 #include "linux_loop.h"
131 #include "signal-common.h"
133 #include "user-mmap.h"
134 #include "safe-syscall.h"
135 #include "qemu/guest-random.h"
136 #include "qemu/selfmap.h"
137 #include "user/syscall-trace.h"
138 #include "qapi/error.h"
139 #include "fd-trans.h"
143 #define CLONE_IO 0x80000000 /* Clone io context */
146 /* We can't directly call the host clone syscall, because this will
147 * badly confuse libc (breaking mutexes, for example). So we must
148 * divide clone flags into:
149 * * flag combinations that look like pthread_create()
150 * * flag combinations that look like fork()
151 * * flags we can implement within QEMU itself
152 * * flags we can't support and will return an error for
154 /* For thread creation, all these flags must be present; for
155 * fork, none must be present.
157 #define CLONE_THREAD_FLAGS \
158 (CLONE_VM | CLONE_FS | CLONE_FILES | \
159 CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
161 /* These flags are ignored:
162 * CLONE_DETACHED is now ignored by the kernel;
163 * CLONE_IO is just an optimisation hint to the I/O scheduler
165 #define CLONE_IGNORED_FLAGS \
166 (CLONE_DETACHED | CLONE_IO)
168 /* Flags for fork which we can implement within QEMU itself */
169 #define CLONE_OPTIONAL_FORK_FLAGS \
170 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
171 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
173 /* Flags for thread creation which we can implement within QEMU itself */
174 #define CLONE_OPTIONAL_THREAD_FLAGS \
175 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
176 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
178 #define CLONE_INVALID_FORK_FLAGS \
179 (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
181 #define CLONE_INVALID_THREAD_FLAGS \
182 (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS | \
183 CLONE_IGNORED_FLAGS))
185 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
186 * have almost all been allocated. We cannot support any of
187 * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
188 * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
189 * The checks against the invalid thread masks above will catch these.
190 * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
193 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
194 * once. This exercises the codepaths for restart.
196 //#define DEBUG_ERESTARTSYS
198 //#include <linux/msdos_fs.h>
199 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
200 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
210 #define _syscall0(type,name) \
211 static type name (void) \
213 return syscall(__NR_##name); \
216 #define _syscall1(type,name,type1,arg1) \
217 static type name (type1 arg1) \
219 return syscall(__NR_##name, arg1); \
222 #define _syscall2(type,name,type1,arg1,type2,arg2) \
223 static type name (type1 arg1,type2 arg2) \
225 return syscall(__NR_##name, arg1, arg2); \
228 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
229 static type name (type1 arg1,type2 arg2,type3 arg3) \
231 return syscall(__NR_##name, arg1, arg2, arg3); \
234 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
235 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
237 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
240 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
242 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
244 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
248 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
249 type5,arg5,type6,arg6) \
250 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
253 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
257 #define __NR_sys_uname __NR_uname
258 #define __NR_sys_getcwd1 __NR_getcwd
259 #define __NR_sys_getdents __NR_getdents
260 #define __NR_sys_getdents64 __NR_getdents64
261 #define __NR_sys_getpriority __NR_getpriority
262 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
263 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
264 #define __NR_sys_syslog __NR_syslog
265 #if defined(__NR_futex)
266 # define __NR_sys_futex __NR_futex
268 #if defined(__NR_futex_time64)
269 # define __NR_sys_futex_time64 __NR_futex_time64
271 #define __NR_sys_inotify_init __NR_inotify_init
272 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
273 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
274 #define __NR_sys_statx __NR_statx
276 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
277 #define __NR__llseek __NR_lseek
280 /* Newer kernel ports have llseek() instead of _llseek() */
281 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
282 #define TARGET_NR__llseek TARGET_NR_llseek
285 /* some platforms need to mask more bits than just TARGET_O_NONBLOCK */
286 #ifndef TARGET_O_NONBLOCK_MASK
287 #define TARGET_O_NONBLOCK_MASK TARGET_O_NONBLOCK
290 #define __NR_sys_gettid __NR_gettid
291 _syscall0(int, sys_gettid)
293 /* For the 64-bit guest on 32-bit host case we must emulate
294 * getdents using getdents64, because otherwise the host
295 * might hand us back more dirent records than we can fit
296 * into the guest buffer after structure format conversion.
297 * Otherwise we emulate getdents with getdents if the host has it.
299 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
300 #define EMULATE_GETDENTS_WITH_GETDENTS
303 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
304 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
306 #if (defined(TARGET_NR_getdents) && \
307 !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
308 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
309 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
311 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
312 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
313 loff_t *, res, uint, wh);
315 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
316 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
318 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
319 #ifdef __NR_exit_group
320 _syscall1(int,exit_group,int,error_code)
322 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
323 _syscall1(int,set_tid_address,int *,tidptr)
325 #if defined(__NR_futex)
326 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
327 const struct timespec *,timeout,int *,uaddr2,int,val3)
329 #if defined(__NR_futex_time64)
330 _syscall6(int,sys_futex_time64,int *,uaddr,int,op,int,val,
331 const struct timespec *,timeout,int *,uaddr2,int,val3)
333 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
334 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
335 unsigned long *, user_mask_ptr);
336 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
337 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
338 unsigned long *, user_mask_ptr);
339 #define __NR_sys_getcpu __NR_getcpu
340 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
341 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
343 _syscall2(int, capget, struct __user_cap_header_struct *, header,
344 struct __user_cap_data_struct *, data);
345 _syscall2(int, capset, struct __user_cap_header_struct *, header,
346 struct __user_cap_data_struct *, data);
347 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
348 _syscall2(int, ioprio_get, int, which, int, who)
350 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
351 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
353 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
354 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
357 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
358 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
359 unsigned long, idx1, unsigned long, idx2)
363 * It is assumed that struct statx is architecture independent.
365 #if defined(TARGET_NR_statx) && defined(__NR_statx)
366 _syscall5(int, sys_statx, int, dirfd, const char *, pathname, int, flags,
367 unsigned int, mask, struct target_statx *, statxbuf)
369 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
370 _syscall2(int, membarrier, int, cmd, int, flags)
373 static const bitmask_transtbl fcntl_flags_tbl[] = {
374 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
375 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, },
376 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, },
377 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, },
378 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, },
379 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, },
380 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, },
381 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, },
382 { TARGET_O_SYNC, TARGET_O_DSYNC, O_SYNC, O_DSYNC, },
383 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, },
384 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, },
385 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
386 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, },
387 #if defined(O_DIRECT)
388 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, },
390 #if defined(O_NOATIME)
391 { TARGET_O_NOATIME, TARGET_O_NOATIME, O_NOATIME, O_NOATIME },
393 #if defined(O_CLOEXEC)
394 { TARGET_O_CLOEXEC, TARGET_O_CLOEXEC, O_CLOEXEC, O_CLOEXEC },
397 { TARGET_O_PATH, TARGET_O_PATH, O_PATH, O_PATH },
399 #if defined(O_TMPFILE)
400 { TARGET_O_TMPFILE, TARGET_O_TMPFILE, O_TMPFILE, O_TMPFILE },
402 /* Don't terminate the list prematurely on 64-bit host+guest. */
403 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
404 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
409 _syscall2(int, sys_getcwd1, char *, buf, size_t, size)
411 #if defined(TARGET_NR_utimensat) || defined(TARGET_NR_utimensat_time64)
412 #if defined(__NR_utimensat)
413 #define __NR_sys_utimensat __NR_utimensat
414 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
415 const struct timespec *,tsp,int,flags)
417 static int sys_utimensat(int dirfd, const char *pathname,
418 const struct timespec times[2], int flags)
424 #endif /* TARGET_NR_utimensat */
426 #ifdef TARGET_NR_renameat2
427 #if defined(__NR_renameat2)
428 #define __NR_sys_renameat2 __NR_renameat2
429 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
430 const char *, new, unsigned int, flags)
432 static int sys_renameat2(int oldfd, const char *old,
433 int newfd, const char *new, int flags)
436 return renameat(oldfd, old, newfd, new);
442 #endif /* TARGET_NR_renameat2 */
444 #ifdef CONFIG_INOTIFY
445 #include <sys/inotify.h>
447 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
448 static int sys_inotify_init(void)
450 return (inotify_init());
453 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
454 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
456 return (inotify_add_watch(fd, pathname, mask));
459 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
460 static int sys_inotify_rm_watch(int fd, int32_t wd)
462 return (inotify_rm_watch(fd, wd));
465 #ifdef CONFIG_INOTIFY1
466 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
467 static int sys_inotify_init1(int flags)
469 return (inotify_init1(flags));
474 /* Userspace can usually survive runtime without inotify */
475 #undef TARGET_NR_inotify_init
476 #undef TARGET_NR_inotify_init1
477 #undef TARGET_NR_inotify_add_watch
478 #undef TARGET_NR_inotify_rm_watch
479 #endif /* CONFIG_INOTIFY */
481 #if defined(TARGET_NR_prlimit64)
482 #ifndef __NR_prlimit64
483 # define __NR_prlimit64 -1
485 #define __NR_sys_prlimit64 __NR_prlimit64
486 /* The glibc rlimit structure may not be that used by the underlying syscall */
487 struct host_rlimit64 {
491 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
492 const struct host_rlimit64 *, new_limit,
493 struct host_rlimit64 *, old_limit)
497 #if defined(TARGET_NR_timer_create)
498 /* Maximum of 32 active POSIX timers allowed at any one time. */
499 static timer_t g_posix_timers[32] = { 0, } ;
501 static inline int next_free_host_timer(void)
504 /* FIXME: Does finding the next free slot require a lock? */
505 for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
506 if (g_posix_timers[k] == 0) {
507 g_posix_timers[k] = (timer_t) 1;
515 static inline int host_to_target_errno(int host_errno)
517 switch (host_errno) {
518 #define E(X) case X: return TARGET_##X;
519 #include "errnos.c.inc"
526 static inline int target_to_host_errno(int target_errno)
528 switch (target_errno) {
529 #define E(X) case TARGET_##X: return X;
530 #include "errnos.c.inc"
537 static inline abi_long get_errno(abi_long ret)
540 return -host_to_target_errno(errno);
545 const char *target_strerror(int err)
547 if (err == TARGET_ERESTARTSYS) {
548 return "To be restarted";
550 if (err == TARGET_QEMU_ESIGRETURN) {
551 return "Successful exit from sigreturn";
554 return strerror(target_to_host_errno(err));
557 #define safe_syscall0(type, name) \
558 static type safe_##name(void) \
560 return safe_syscall(__NR_##name); \
563 #define safe_syscall1(type, name, type1, arg1) \
564 static type safe_##name(type1 arg1) \
566 return safe_syscall(__NR_##name, arg1); \
569 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
570 static type safe_##name(type1 arg1, type2 arg2) \
572 return safe_syscall(__NR_##name, arg1, arg2); \
575 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
576 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
578 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
581 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
583 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
585 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
588 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
589 type4, arg4, type5, arg5) \
590 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
593 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
596 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
597 type4, arg4, type5, arg5, type6, arg6) \
598 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
599 type5 arg5, type6 arg6) \
601 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
604 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
605 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
606 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
607 int, flags, mode_t, mode)
608 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
609 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
610 struct rusage *, rusage)
612 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
613 int, options, struct rusage *, rusage)
614 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
615 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
616 defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
617 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
618 fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
620 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_ppoll_time64)
621 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
622 struct timespec *, tsp, const sigset_t *, sigmask,
625 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
626 int, maxevents, int, timeout, const sigset_t *, sigmask,
628 #if defined(__NR_futex)
629 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
630 const struct timespec *,timeout,int *,uaddr2,int,val3)
632 #if defined(__NR_futex_time64)
633 safe_syscall6(int,futex_time64,int *,uaddr,int,op,int,val, \
634 const struct timespec *,timeout,int *,uaddr2,int,val3)
636 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
637 safe_syscall2(int, kill, pid_t, pid, int, sig)
638 safe_syscall2(int, tkill, int, tid, int, sig)
639 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
640 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
641 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
642 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
643 unsigned long, pos_l, unsigned long, pos_h)
644 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
645 unsigned long, pos_l, unsigned long, pos_h)
646 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
648 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
649 int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
650 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
651 int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
652 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
653 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
654 safe_syscall2(int, flock, int, fd, int, operation)
655 #if defined(TARGET_NR_rt_sigtimedwait) || defined(TARGET_NR_rt_sigtimedwait_time64)
656 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
657 const struct timespec *, uts, size_t, sigsetsize)
659 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
661 #if defined(TARGET_NR_nanosleep)
662 safe_syscall2(int, nanosleep, const struct timespec *, req,
663 struct timespec *, rem)
665 #if defined(TARGET_NR_clock_nanosleep) || \
666 defined(TARGET_NR_clock_nanosleep_time64)
667 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
668 const struct timespec *, req, struct timespec *, rem)
672 safe_syscall5(int, ipc, int, call, long, first, long, second, long, third,
675 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
676 void *, ptr, long, fifth)
680 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
684 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
685 long, msgtype, int, flags)
687 #ifdef __NR_semtimedop
688 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
689 unsigned, nsops, const struct timespec *, timeout)
691 #if defined(TARGET_NR_mq_timedsend) || \
692 defined(TARGET_NR_mq_timedsend_time64)
693 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
694 size_t, len, unsigned, prio, const struct timespec *, timeout)
696 #if defined(TARGET_NR_mq_timedreceive) || \
697 defined(TARGET_NR_mq_timedreceive_time64)
698 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
699 size_t, len, unsigned *, prio, const struct timespec *, timeout)
701 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
702 safe_syscall6(ssize_t, copy_file_range, int, infd, loff_t *, pinoff,
703 int, outfd, loff_t *, poutoff, size_t, length,
707 /* We do ioctl like this rather than via safe_syscall3 to preserve the
708 * "third argument might be integer or pointer or not present" behaviour of
711 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
712 /* Similarly for fcntl. Note that callers must always:
713 * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
714 * use the flock64 struct rather than unsuffixed flock
715 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
718 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
720 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
723 static inline int host_to_target_sock_type(int host_type)
727 switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
729 target_type = TARGET_SOCK_DGRAM;
732 target_type = TARGET_SOCK_STREAM;
735 target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
739 #if defined(SOCK_CLOEXEC)
740 if (host_type & SOCK_CLOEXEC) {
741 target_type |= TARGET_SOCK_CLOEXEC;
745 #if defined(SOCK_NONBLOCK)
746 if (host_type & SOCK_NONBLOCK) {
747 target_type |= TARGET_SOCK_NONBLOCK;
754 static abi_ulong target_brk;
755 static abi_ulong target_original_brk;
756 static abi_ulong brk_page;
758 void target_set_brk(abi_ulong new_brk)
760 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
761 brk_page = HOST_PAGE_ALIGN(target_brk);
764 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
765 #define DEBUGF_BRK(message, args...)
767 /* do_brk() must return target values and target errnos. */
768 abi_long do_brk(abi_ulong new_brk)
770 abi_long mapped_addr;
771 abi_ulong new_alloc_size;
773 /* brk pointers are always untagged */
775 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
778 DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
781 if (new_brk < target_original_brk) {
782 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
787 /* If the new brk is less than the highest page reserved to the
788 * target heap allocation, set it and we're almost done... */
789 if (new_brk <= brk_page) {
790 /* Heap contents are initialized to zero, as for anonymous
792 if (new_brk > target_brk) {
793 memset(g2h_untagged(target_brk), 0, new_brk - target_brk);
795 target_brk = new_brk;
796 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
800 /* We need to allocate more memory after the brk... Note that
801 * we don't use MAP_FIXED because that will map over the top of
802 * any existing mapping (like the one with the host libc or qemu
803 * itself); instead we treat "mapped but at wrong address" as
804 * a failure and unmap again.
806 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
807 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
808 PROT_READ|PROT_WRITE,
809 MAP_ANON|MAP_PRIVATE, 0, 0));
811 if (mapped_addr == brk_page) {
812 /* Heap contents are initialized to zero, as for anonymous
813 * mapped pages. Technically the new pages are already
814 * initialized to zero since they *are* anonymous mapped
815 * pages, however we have to take care with the contents that
816 * come from the remaining part of the previous page: it may
817 * contains garbage data due to a previous heap usage (grown
819 memset(g2h_untagged(target_brk), 0, brk_page - target_brk);
821 target_brk = new_brk;
822 brk_page = HOST_PAGE_ALIGN(target_brk);
823 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
826 } else if (mapped_addr != -1) {
827 /* Mapped but at wrong address, meaning there wasn't actually
828 * enough space for this brk.
830 target_munmap(mapped_addr, new_alloc_size);
832 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
835 DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
838 #if defined(TARGET_ALPHA)
839 /* We (partially) emulate OSF/1 on Alpha, which requires we
840 return a proper errno, not an unchanged brk value. */
841 return -TARGET_ENOMEM;
843 /* For everything else, return the previous break. */
847 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
848 defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
849 static inline abi_long copy_from_user_fdset(fd_set *fds,
850 abi_ulong target_fds_addr,
854 abi_ulong b, *target_fds;
856 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
857 if (!(target_fds = lock_user(VERIFY_READ,
859 sizeof(abi_ulong) * nw,
861 return -TARGET_EFAULT;
865 for (i = 0; i < nw; i++) {
866 /* grab the abi_ulong */
867 __get_user(b, &target_fds[i]);
868 for (j = 0; j < TARGET_ABI_BITS; j++) {
869 /* check the bit inside the abi_ulong */
876 unlock_user(target_fds, target_fds_addr, 0);
881 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
882 abi_ulong target_fds_addr,
885 if (target_fds_addr) {
886 if (copy_from_user_fdset(fds, target_fds_addr, n))
887 return -TARGET_EFAULT;
895 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
901 abi_ulong *target_fds;
903 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
904 if (!(target_fds = lock_user(VERIFY_WRITE,
906 sizeof(abi_ulong) * nw,
908 return -TARGET_EFAULT;
911 for (i = 0; i < nw; i++) {
913 for (j = 0; j < TARGET_ABI_BITS; j++) {
914 v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
917 __put_user(v, &target_fds[i]);
920 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
926 #if defined(__alpha__)
932 static inline abi_long host_to_target_clock_t(long ticks)
934 #if HOST_HZ == TARGET_HZ
937 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
941 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
942 const struct rusage *rusage)
944 struct target_rusage *target_rusage;
946 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
947 return -TARGET_EFAULT;
948 target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
949 target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
950 target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
951 target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
952 target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
953 target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
954 target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
955 target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
956 target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
957 target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
958 target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
959 target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
960 target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
961 target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
962 target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
963 target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
964 target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
965 target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
966 unlock_user_struct(target_rusage, target_addr, 1);
971 #ifdef TARGET_NR_setrlimit
972 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
974 abi_ulong target_rlim_swap;
977 target_rlim_swap = tswapal(target_rlim);
978 if (target_rlim_swap == TARGET_RLIM_INFINITY)
979 return RLIM_INFINITY;
981 result = target_rlim_swap;
982 if (target_rlim_swap != (rlim_t)result)
983 return RLIM_INFINITY;
989 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
990 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
992 abi_ulong target_rlim_swap;
995 if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
996 target_rlim_swap = TARGET_RLIM_INFINITY;
998 target_rlim_swap = rlim;
999 result = tswapal(target_rlim_swap);
1005 static inline int target_to_host_resource(int code)
1008 case TARGET_RLIMIT_AS:
1010 case TARGET_RLIMIT_CORE:
1012 case TARGET_RLIMIT_CPU:
1014 case TARGET_RLIMIT_DATA:
1016 case TARGET_RLIMIT_FSIZE:
1017 return RLIMIT_FSIZE;
1018 case TARGET_RLIMIT_LOCKS:
1019 return RLIMIT_LOCKS;
1020 case TARGET_RLIMIT_MEMLOCK:
1021 return RLIMIT_MEMLOCK;
1022 case TARGET_RLIMIT_MSGQUEUE:
1023 return RLIMIT_MSGQUEUE;
1024 case TARGET_RLIMIT_NICE:
1026 case TARGET_RLIMIT_NOFILE:
1027 return RLIMIT_NOFILE;
1028 case TARGET_RLIMIT_NPROC:
1029 return RLIMIT_NPROC;
1030 case TARGET_RLIMIT_RSS:
1032 case TARGET_RLIMIT_RTPRIO:
1033 return RLIMIT_RTPRIO;
1034 case TARGET_RLIMIT_SIGPENDING:
1035 return RLIMIT_SIGPENDING;
1036 case TARGET_RLIMIT_STACK:
1037 return RLIMIT_STACK;
1043 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1044 abi_ulong target_tv_addr)
1046 struct target_timeval *target_tv;
1048 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1049 return -TARGET_EFAULT;
1052 __get_user(tv->tv_sec, &target_tv->tv_sec);
1053 __get_user(tv->tv_usec, &target_tv->tv_usec);
1055 unlock_user_struct(target_tv, target_tv_addr, 0);
1060 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1061 const struct timeval *tv)
1063 struct target_timeval *target_tv;
1065 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1066 return -TARGET_EFAULT;
1069 __put_user(tv->tv_sec, &target_tv->tv_sec);
1070 __put_user(tv->tv_usec, &target_tv->tv_usec);
1072 unlock_user_struct(target_tv, target_tv_addr, 1);
1077 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
1078 static inline abi_long copy_from_user_timeval64(struct timeval *tv,
1079 abi_ulong target_tv_addr)
1081 struct target__kernel_sock_timeval *target_tv;
1083 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1084 return -TARGET_EFAULT;
1087 __get_user(tv->tv_sec, &target_tv->tv_sec);
1088 __get_user(tv->tv_usec, &target_tv->tv_usec);
1090 unlock_user_struct(target_tv, target_tv_addr, 0);
1096 static inline abi_long copy_to_user_timeval64(abi_ulong target_tv_addr,
1097 const struct timeval *tv)
1099 struct target__kernel_sock_timeval *target_tv;
1101 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1102 return -TARGET_EFAULT;
1105 __put_user(tv->tv_sec, &target_tv->tv_sec);
1106 __put_user(tv->tv_usec, &target_tv->tv_usec);
1108 unlock_user_struct(target_tv, target_tv_addr, 1);
1113 #if defined(TARGET_NR_futex) || \
1114 defined(TARGET_NR_rt_sigtimedwait) || \
1115 defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1116 defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1117 defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1118 defined(TARGET_NR_mq_timedreceive) || defined(TARGET_NR_ipc) || \
1119 defined(TARGET_NR_semop) || defined(TARGET_NR_semtimedop) || \
1120 defined(TARGET_NR_timer_settime) || \
1121 (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
1122 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
1123 abi_ulong target_addr)
1125 struct target_timespec *target_ts;
1127 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1128 return -TARGET_EFAULT;
1130 __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1131 __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1132 unlock_user_struct(target_ts, target_addr, 0);
1137 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64) || \
1138 defined(TARGET_NR_timer_settime64) || \
1139 defined(TARGET_NR_mq_timedsend_time64) || \
1140 defined(TARGET_NR_mq_timedreceive_time64) || \
1141 (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)) || \
1142 defined(TARGET_NR_clock_nanosleep_time64) || \
1143 defined(TARGET_NR_rt_sigtimedwait_time64) || \
1144 defined(TARGET_NR_utimensat) || \
1145 defined(TARGET_NR_utimensat_time64) || \
1146 defined(TARGET_NR_semtimedop_time64) || \
1147 defined(TARGET_NR_pselect6_time64) || defined(TARGET_NR_ppoll_time64)
1148 static inline abi_long target_to_host_timespec64(struct timespec *host_ts,
1149 abi_ulong target_addr)
1151 struct target__kernel_timespec *target_ts;
1153 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1154 return -TARGET_EFAULT;
1156 __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1157 __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1158 /* in 32bit mode, this drops the padding */
1159 host_ts->tv_nsec = (long)(abi_long)host_ts->tv_nsec;
1160 unlock_user_struct(target_ts, target_addr, 0);
1165 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
1166 struct timespec *host_ts)
1168 struct target_timespec *target_ts;
1170 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1171 return -TARGET_EFAULT;
1173 __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1174 __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1175 unlock_user_struct(target_ts, target_addr, 1);
1179 static inline abi_long host_to_target_timespec64(abi_ulong target_addr,
1180 struct timespec *host_ts)
1182 struct target__kernel_timespec *target_ts;
1184 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1185 return -TARGET_EFAULT;
1187 __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1188 __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1189 unlock_user_struct(target_ts, target_addr, 1);
1193 #if defined(TARGET_NR_gettimeofday)
1194 static inline abi_long copy_to_user_timezone(abi_ulong target_tz_addr,
1195 struct timezone *tz)
1197 struct target_timezone *target_tz;
1199 if (!lock_user_struct(VERIFY_WRITE, target_tz, target_tz_addr, 1)) {
1200 return -TARGET_EFAULT;
1203 __put_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1204 __put_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1206 unlock_user_struct(target_tz, target_tz_addr, 1);
1212 #if defined(TARGET_NR_settimeofday)
1213 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1214 abi_ulong target_tz_addr)
1216 struct target_timezone *target_tz;
1218 if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1219 return -TARGET_EFAULT;
1222 __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1223 __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1225 unlock_user_struct(target_tz, target_tz_addr, 0);
1231 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1234 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1235 abi_ulong target_mq_attr_addr)
1237 struct target_mq_attr *target_mq_attr;
1239 if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1240 target_mq_attr_addr, 1))
1241 return -TARGET_EFAULT;
1243 __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1244 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1245 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1246 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1248 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1253 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1254 const struct mq_attr *attr)
1256 struct target_mq_attr *target_mq_attr;
1258 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1259 target_mq_attr_addr, 0))
1260 return -TARGET_EFAULT;
1262 __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1263 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1264 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1265 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1267 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1273 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1274 /* do_select() must return target values and target errnos. */
1275 static abi_long do_select(int n,
1276 abi_ulong rfd_addr, abi_ulong wfd_addr,
1277 abi_ulong efd_addr, abi_ulong target_tv_addr)
1279 fd_set rfds, wfds, efds;
1280 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1282 struct timespec ts, *ts_ptr;
1285 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1289 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1293 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1298 if (target_tv_addr) {
1299 if (copy_from_user_timeval(&tv, target_tv_addr))
1300 return -TARGET_EFAULT;
1301 ts.tv_sec = tv.tv_sec;
1302 ts.tv_nsec = tv.tv_usec * 1000;
1308 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1311 if (!is_error(ret)) {
1312 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1313 return -TARGET_EFAULT;
1314 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1315 return -TARGET_EFAULT;
1316 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1317 return -TARGET_EFAULT;
1319 if (target_tv_addr) {
1320 tv.tv_sec = ts.tv_sec;
1321 tv.tv_usec = ts.tv_nsec / 1000;
1322 if (copy_to_user_timeval(target_tv_addr, &tv)) {
1323 return -TARGET_EFAULT;
1331 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1332 static abi_long do_old_select(abi_ulong arg1)
1334 struct target_sel_arg_struct *sel;
1335 abi_ulong inp, outp, exp, tvp;
1338 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1339 return -TARGET_EFAULT;
1342 nsel = tswapal(sel->n);
1343 inp = tswapal(sel->inp);
1344 outp = tswapal(sel->outp);
1345 exp = tswapal(sel->exp);
1346 tvp = tswapal(sel->tvp);
1348 unlock_user_struct(sel, arg1, 0);
1350 return do_select(nsel, inp, outp, exp, tvp);
1355 #if defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
1356 static abi_long do_pselect6(abi_long arg1, abi_long arg2, abi_long arg3,
1357 abi_long arg4, abi_long arg5, abi_long arg6,
1360 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
1361 fd_set rfds, wfds, efds;
1362 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1363 struct timespec ts, *ts_ptr;
1367 * The 6th arg is actually two args smashed together,
1368 * so we cannot use the C library.
1376 abi_ulong arg_sigset, arg_sigsize, *arg7;
1377 target_sigset_t *target_sigset;
1385 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1389 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1393 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1399 * This takes a timespec, and not a timeval, so we cannot
1400 * use the do_select() helper ...
1404 if (target_to_host_timespec64(&ts, ts_addr)) {
1405 return -TARGET_EFAULT;
1408 if (target_to_host_timespec(&ts, ts_addr)) {
1409 return -TARGET_EFAULT;
1417 /* Extract the two packed args for the sigset */
1420 sig.size = SIGSET_T_SIZE;
1422 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
1424 return -TARGET_EFAULT;
1426 arg_sigset = tswapal(arg7[0]);
1427 arg_sigsize = tswapal(arg7[1]);
1428 unlock_user(arg7, arg6, 0);
1432 if (arg_sigsize != sizeof(*target_sigset)) {
1433 /* Like the kernel, we enforce correct size sigsets */
1434 return -TARGET_EINVAL;
1436 target_sigset = lock_user(VERIFY_READ, arg_sigset,
1437 sizeof(*target_sigset), 1);
1438 if (!target_sigset) {
1439 return -TARGET_EFAULT;
1441 target_to_host_sigset(&set, target_sigset);
1442 unlock_user(target_sigset, arg_sigset, 0);
1450 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1453 if (!is_error(ret)) {
1454 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) {
1455 return -TARGET_EFAULT;
1457 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) {
1458 return -TARGET_EFAULT;
1460 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) {
1461 return -TARGET_EFAULT;
1464 if (ts_addr && host_to_target_timespec64(ts_addr, &ts)) {
1465 return -TARGET_EFAULT;
1468 if (ts_addr && host_to_target_timespec(ts_addr, &ts)) {
1469 return -TARGET_EFAULT;
1477 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) || \
1478 defined(TARGET_NR_ppoll_time64)
1479 static abi_long do_ppoll(abi_long arg1, abi_long arg2, abi_long arg3,
1480 abi_long arg4, abi_long arg5, bool ppoll, bool time64)
1482 struct target_pollfd *target_pfd;
1483 unsigned int nfds = arg2;
1491 if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
1492 return -TARGET_EINVAL;
1494 target_pfd = lock_user(VERIFY_WRITE, arg1,
1495 sizeof(struct target_pollfd) * nfds, 1);
1497 return -TARGET_EFAULT;
1500 pfd = alloca(sizeof(struct pollfd) * nfds);
1501 for (i = 0; i < nfds; i++) {
1502 pfd[i].fd = tswap32(target_pfd[i].fd);
1503 pfd[i].events = tswap16(target_pfd[i].events);
1507 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
1508 target_sigset_t *target_set;
1509 sigset_t _set, *set = &_set;
1513 if (target_to_host_timespec64(timeout_ts, arg3)) {
1514 unlock_user(target_pfd, arg1, 0);
1515 return -TARGET_EFAULT;
1518 if (target_to_host_timespec(timeout_ts, arg3)) {
1519 unlock_user(target_pfd, arg1, 0);
1520 return -TARGET_EFAULT;
1528 if (arg5 != sizeof(target_sigset_t)) {
1529 unlock_user(target_pfd, arg1, 0);
1530 return -TARGET_EINVAL;
1533 target_set = lock_user(VERIFY_READ, arg4,
1534 sizeof(target_sigset_t), 1);
1536 unlock_user(target_pfd, arg1, 0);
1537 return -TARGET_EFAULT;
1539 target_to_host_sigset(set, target_set);
1544 ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
1545 set, SIGSET_T_SIZE));
1547 if (!is_error(ret) && arg3) {
1549 if (host_to_target_timespec64(arg3, timeout_ts)) {
1550 return -TARGET_EFAULT;
1553 if (host_to_target_timespec(arg3, timeout_ts)) {
1554 return -TARGET_EFAULT;
1559 unlock_user(target_set, arg4, 0);
1562 struct timespec ts, *pts;
1565 /* Convert ms to secs, ns */
1566 ts.tv_sec = arg3 / 1000;
1567 ts.tv_nsec = (arg3 % 1000) * 1000000LL;
1570 /* -ve poll() timeout means "infinite" */
1573 ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
1576 if (!is_error(ret)) {
1577 for (i = 0; i < nfds; i++) {
1578 target_pfd[i].revents = tswap16(pfd[i].revents);
1581 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
1586 static abi_long do_pipe2(int host_pipe[], int flags)
1589 return pipe2(host_pipe, flags);
1595 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1596 int flags, int is_pipe2)
1600 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1603 return get_errno(ret);
1605 /* Several targets have special calling conventions for the original
1606 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1608 #if defined(TARGET_ALPHA)
1609 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1610 return host_pipe[0];
1611 #elif defined(TARGET_MIPS)
1612 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1613 return host_pipe[0];
1614 #elif defined(TARGET_SH4)
1615 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1616 return host_pipe[0];
1617 #elif defined(TARGET_SPARC)
1618 ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1619 return host_pipe[0];
1623 if (put_user_s32(host_pipe[0], pipedes)
1624 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1625 return -TARGET_EFAULT;
1626 return get_errno(ret);
1629 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1630 abi_ulong target_addr,
1633 struct target_ip_mreqn *target_smreqn;
1635 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1637 return -TARGET_EFAULT;
1638 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1639 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1640 if (len == sizeof(struct target_ip_mreqn))
1641 mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1642 unlock_user(target_smreqn, target_addr, 0);
1647 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1648 abi_ulong target_addr,
1651 const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1652 sa_family_t sa_family;
1653 struct target_sockaddr *target_saddr;
1655 if (fd_trans_target_to_host_addr(fd)) {
1656 return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1659 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1661 return -TARGET_EFAULT;
1663 sa_family = tswap16(target_saddr->sa_family);
1665 /* Oops. The caller might send a incomplete sun_path; sun_path
1666 * must be terminated by \0 (see the manual page), but
1667 * unfortunately it is quite common to specify sockaddr_un
1668 * length as "strlen(x->sun_path)" while it should be
1669 * "strlen(...) + 1". We'll fix that here if needed.
1670 * Linux kernel has a similar feature.
1673 if (sa_family == AF_UNIX) {
1674 if (len < unix_maxlen && len > 0) {
1675 char *cp = (char*)target_saddr;
1677 if ( cp[len-1] && !cp[len] )
1680 if (len > unix_maxlen)
1684 memcpy(addr, target_saddr, len);
1685 addr->sa_family = sa_family;
1686 if (sa_family == AF_NETLINK) {
1687 struct sockaddr_nl *nladdr;
1689 nladdr = (struct sockaddr_nl *)addr;
1690 nladdr->nl_pid = tswap32(nladdr->nl_pid);
1691 nladdr->nl_groups = tswap32(nladdr->nl_groups);
1692 } else if (sa_family == AF_PACKET) {
1693 struct target_sockaddr_ll *lladdr;
1695 lladdr = (struct target_sockaddr_ll *)addr;
1696 lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1697 lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1699 unlock_user(target_saddr, target_addr, 0);
1704 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1705 struct sockaddr *addr,
1708 struct target_sockaddr *target_saddr;
1715 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1717 return -TARGET_EFAULT;
1718 memcpy(target_saddr, addr, len);
1719 if (len >= offsetof(struct target_sockaddr, sa_family) +
1720 sizeof(target_saddr->sa_family)) {
1721 target_saddr->sa_family = tswap16(addr->sa_family);
1723 if (addr->sa_family == AF_NETLINK &&
1724 len >= sizeof(struct target_sockaddr_nl)) {
1725 struct target_sockaddr_nl *target_nl =
1726 (struct target_sockaddr_nl *)target_saddr;
1727 target_nl->nl_pid = tswap32(target_nl->nl_pid);
1728 target_nl->nl_groups = tswap32(target_nl->nl_groups);
1729 } else if (addr->sa_family == AF_PACKET) {
1730 struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1731 target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1732 target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1733 } else if (addr->sa_family == AF_INET6 &&
1734 len >= sizeof(struct target_sockaddr_in6)) {
1735 struct target_sockaddr_in6 *target_in6 =
1736 (struct target_sockaddr_in6 *)target_saddr;
1737 target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1739 unlock_user(target_saddr, target_addr, len);
1744 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1745 struct target_msghdr *target_msgh)
1747 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1748 abi_long msg_controllen;
1749 abi_ulong target_cmsg_addr;
1750 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1751 socklen_t space = 0;
1753 msg_controllen = tswapal(target_msgh->msg_controllen);
1754 if (msg_controllen < sizeof (struct target_cmsghdr))
1756 target_cmsg_addr = tswapal(target_msgh->msg_control);
1757 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1758 target_cmsg_start = target_cmsg;
1760 return -TARGET_EFAULT;
1762 while (cmsg && target_cmsg) {
1763 void *data = CMSG_DATA(cmsg);
1764 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1766 int len = tswapal(target_cmsg->cmsg_len)
1767 - sizeof(struct target_cmsghdr);
1769 space += CMSG_SPACE(len);
1770 if (space > msgh->msg_controllen) {
1771 space -= CMSG_SPACE(len);
1772 /* This is a QEMU bug, since we allocated the payload
1773 * area ourselves (unlike overflow in host-to-target
1774 * conversion, which is just the guest giving us a buffer
1775 * that's too small). It can't happen for the payload types
1776 * we currently support; if it becomes an issue in future
1777 * we would need to improve our allocation strategy to
1778 * something more intelligent than "twice the size of the
1779 * target buffer we're reading from".
1781 qemu_log_mask(LOG_UNIMP,
1782 ("Unsupported ancillary data %d/%d: "
1783 "unhandled msg size\n"),
1784 tswap32(target_cmsg->cmsg_level),
1785 tswap32(target_cmsg->cmsg_type));
1789 if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1790 cmsg->cmsg_level = SOL_SOCKET;
1792 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1794 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1795 cmsg->cmsg_len = CMSG_LEN(len);
1797 if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1798 int *fd = (int *)data;
1799 int *target_fd = (int *)target_data;
1800 int i, numfds = len / sizeof(int);
1802 for (i = 0; i < numfds; i++) {
1803 __get_user(fd[i], target_fd + i);
1805 } else if (cmsg->cmsg_level == SOL_SOCKET
1806 && cmsg->cmsg_type == SCM_CREDENTIALS) {
1807 struct ucred *cred = (struct ucred *)data;
1808 struct target_ucred *target_cred =
1809 (struct target_ucred *)target_data;
1811 __get_user(cred->pid, &target_cred->pid);
1812 __get_user(cred->uid, &target_cred->uid);
1813 __get_user(cred->gid, &target_cred->gid);
1815 qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
1816 cmsg->cmsg_level, cmsg->cmsg_type);
1817 memcpy(data, target_data, len);
1820 cmsg = CMSG_NXTHDR(msgh, cmsg);
1821 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1824 unlock_user(target_cmsg, target_cmsg_addr, 0);
1826 msgh->msg_controllen = space;
1830 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1831 struct msghdr *msgh)
1833 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1834 abi_long msg_controllen;
1835 abi_ulong target_cmsg_addr;
1836 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1837 socklen_t space = 0;
1839 msg_controllen = tswapal(target_msgh->msg_controllen);
1840 if (msg_controllen < sizeof (struct target_cmsghdr))
1842 target_cmsg_addr = tswapal(target_msgh->msg_control);
1843 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1844 target_cmsg_start = target_cmsg;
1846 return -TARGET_EFAULT;
1848 while (cmsg && target_cmsg) {
1849 void *data = CMSG_DATA(cmsg);
1850 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1852 int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1853 int tgt_len, tgt_space;
1855 /* We never copy a half-header but may copy half-data;
1856 * this is Linux's behaviour in put_cmsg(). Note that
1857 * truncation here is a guest problem (which we report
1858 * to the guest via the CTRUNC bit), unlike truncation
1859 * in target_to_host_cmsg, which is a QEMU bug.
1861 if (msg_controllen < sizeof(struct target_cmsghdr)) {
1862 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1866 if (cmsg->cmsg_level == SOL_SOCKET) {
1867 target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1869 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1871 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1873 /* Payload types which need a different size of payload on
1874 * the target must adjust tgt_len here.
1877 switch (cmsg->cmsg_level) {
1879 switch (cmsg->cmsg_type) {
1881 tgt_len = sizeof(struct target_timeval);
1891 if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1892 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1893 tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1896 /* We must now copy-and-convert len bytes of payload
1897 * into tgt_len bytes of destination space. Bear in mind
1898 * that in both source and destination we may be dealing
1899 * with a truncated value!
1901 switch (cmsg->cmsg_level) {
1903 switch (cmsg->cmsg_type) {
1906 int *fd = (int *)data;
1907 int *target_fd = (int *)target_data;
1908 int i, numfds = tgt_len / sizeof(int);
1910 for (i = 0; i < numfds; i++) {
1911 __put_user(fd[i], target_fd + i);
1917 struct timeval *tv = (struct timeval *)data;
1918 struct target_timeval *target_tv =
1919 (struct target_timeval *)target_data;
1921 if (len != sizeof(struct timeval) ||
1922 tgt_len != sizeof(struct target_timeval)) {
1926 /* copy struct timeval to target */
1927 __put_user(tv->tv_sec, &target_tv->tv_sec);
1928 __put_user(tv->tv_usec, &target_tv->tv_usec);
1931 case SCM_CREDENTIALS:
1933 struct ucred *cred = (struct ucred *)data;
1934 struct target_ucred *target_cred =
1935 (struct target_ucred *)target_data;
1937 __put_user(cred->pid, &target_cred->pid);
1938 __put_user(cred->uid, &target_cred->uid);
1939 __put_user(cred->gid, &target_cred->gid);
1948 switch (cmsg->cmsg_type) {
1951 uint32_t *v = (uint32_t *)data;
1952 uint32_t *t_int = (uint32_t *)target_data;
1954 if (len != sizeof(uint32_t) ||
1955 tgt_len != sizeof(uint32_t)) {
1958 __put_user(*v, t_int);
1964 struct sock_extended_err ee;
1965 struct sockaddr_in offender;
1967 struct errhdr_t *errh = (struct errhdr_t *)data;
1968 struct errhdr_t *target_errh =
1969 (struct errhdr_t *)target_data;
1971 if (len != sizeof(struct errhdr_t) ||
1972 tgt_len != sizeof(struct errhdr_t)) {
1975 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1976 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1977 __put_user(errh->ee.ee_type, &target_errh->ee.ee_type);
1978 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1979 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1980 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1981 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1982 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1983 (void *) &errh->offender, sizeof(errh->offender));
1992 switch (cmsg->cmsg_type) {
1995 uint32_t *v = (uint32_t *)data;
1996 uint32_t *t_int = (uint32_t *)target_data;
1998 if (len != sizeof(uint32_t) ||
1999 tgt_len != sizeof(uint32_t)) {
2002 __put_user(*v, t_int);
2008 struct sock_extended_err ee;
2009 struct sockaddr_in6 offender;
2011 struct errhdr6_t *errh = (struct errhdr6_t *)data;
2012 struct errhdr6_t *target_errh =
2013 (struct errhdr6_t *)target_data;
2015 if (len != sizeof(struct errhdr6_t) ||
2016 tgt_len != sizeof(struct errhdr6_t)) {
2019 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
2020 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
2021 __put_user(errh->ee.ee_type, &target_errh->ee.ee_type);
2022 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2023 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2024 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2025 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2026 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2027 (void *) &errh->offender, sizeof(errh->offender));
2037 qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
2038 cmsg->cmsg_level, cmsg->cmsg_type);
2039 memcpy(target_data, data, MIN(len, tgt_len));
2040 if (tgt_len > len) {
2041 memset(target_data + len, 0, tgt_len - len);
2045 target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
2046 tgt_space = TARGET_CMSG_SPACE(tgt_len);
2047 if (msg_controllen < tgt_space) {
2048 tgt_space = msg_controllen;
2050 msg_controllen -= tgt_space;
2052 cmsg = CMSG_NXTHDR(msgh, cmsg);
2053 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
2056 unlock_user(target_cmsg, target_cmsg_addr, space);
2058 target_msgh->msg_controllen = tswapal(space);
2062 /* do_setsockopt() Must return target values and target errnos. */
2063 static abi_long do_setsockopt(int sockfd, int level, int optname,
2064 abi_ulong optval_addr, socklen_t optlen)
2068 struct ip_mreqn *ip_mreq;
2069 struct ip_mreq_source *ip_mreq_source;
2074 /* TCP and UDP options all take an 'int' value. */
2075 if (optlen < sizeof(uint32_t))
2076 return -TARGET_EINVAL;
2078 if (get_user_u32(val, optval_addr))
2079 return -TARGET_EFAULT;
2080 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2087 case IP_ROUTER_ALERT:
2091 case IP_MTU_DISCOVER:
2098 case IP_MULTICAST_TTL:
2099 case IP_MULTICAST_LOOP:
2101 if (optlen >= sizeof(uint32_t)) {
2102 if (get_user_u32(val, optval_addr))
2103 return -TARGET_EFAULT;
2104 } else if (optlen >= 1) {
2105 if (get_user_u8(val, optval_addr))
2106 return -TARGET_EFAULT;
2108 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2110 case IP_ADD_MEMBERSHIP:
2111 case IP_DROP_MEMBERSHIP:
2112 if (optlen < sizeof (struct target_ip_mreq) ||
2113 optlen > sizeof (struct target_ip_mreqn))
2114 return -TARGET_EINVAL;
2116 ip_mreq = (struct ip_mreqn *) alloca(optlen);
2117 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
2118 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
2121 case IP_BLOCK_SOURCE:
2122 case IP_UNBLOCK_SOURCE:
2123 case IP_ADD_SOURCE_MEMBERSHIP:
2124 case IP_DROP_SOURCE_MEMBERSHIP:
2125 if (optlen != sizeof (struct target_ip_mreq_source))
2126 return -TARGET_EINVAL;
2128 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2129 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
2130 unlock_user (ip_mreq_source, optval_addr, 0);
2139 case IPV6_MTU_DISCOVER:
2142 case IPV6_RECVPKTINFO:
2143 case IPV6_UNICAST_HOPS:
2144 case IPV6_MULTICAST_HOPS:
2145 case IPV6_MULTICAST_LOOP:
2147 case IPV6_RECVHOPLIMIT:
2148 case IPV6_2292HOPLIMIT:
2151 case IPV6_2292PKTINFO:
2152 case IPV6_RECVTCLASS:
2153 case IPV6_RECVRTHDR:
2154 case IPV6_2292RTHDR:
2155 case IPV6_RECVHOPOPTS:
2156 case IPV6_2292HOPOPTS:
2157 case IPV6_RECVDSTOPTS:
2158 case IPV6_2292DSTOPTS:
2160 case IPV6_ADDR_PREFERENCES:
2161 #ifdef IPV6_RECVPATHMTU
2162 case IPV6_RECVPATHMTU:
2164 #ifdef IPV6_TRANSPARENT
2165 case IPV6_TRANSPARENT:
2167 #ifdef IPV6_FREEBIND
2170 #ifdef IPV6_RECVORIGDSTADDR
2171 case IPV6_RECVORIGDSTADDR:
2174 if (optlen < sizeof(uint32_t)) {
2175 return -TARGET_EINVAL;
2177 if (get_user_u32(val, optval_addr)) {
2178 return -TARGET_EFAULT;
2180 ret = get_errno(setsockopt(sockfd, level, optname,
2181 &val, sizeof(val)));
2185 struct in6_pktinfo pki;
2187 if (optlen < sizeof(pki)) {
2188 return -TARGET_EINVAL;
2191 if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
2192 return -TARGET_EFAULT;
2195 pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
2197 ret = get_errno(setsockopt(sockfd, level, optname,
2198 &pki, sizeof(pki)));
2201 case IPV6_ADD_MEMBERSHIP:
2202 case IPV6_DROP_MEMBERSHIP:
2204 struct ipv6_mreq ipv6mreq;
2206 if (optlen < sizeof(ipv6mreq)) {
2207 return -TARGET_EINVAL;
2210 if (copy_from_user(&ipv6mreq, optval_addr, sizeof(ipv6mreq))) {
2211 return -TARGET_EFAULT;
2214 ipv6mreq.ipv6mr_interface = tswap32(ipv6mreq.ipv6mr_interface);
2216 ret = get_errno(setsockopt(sockfd, level, optname,
2217 &ipv6mreq, sizeof(ipv6mreq)));
2228 struct icmp6_filter icmp6f;
2230 if (optlen > sizeof(icmp6f)) {
2231 optlen = sizeof(icmp6f);
2234 if (copy_from_user(&icmp6f, optval_addr, optlen)) {
2235 return -TARGET_EFAULT;
2238 for (val = 0; val < 8; val++) {
2239 icmp6f.data[val] = tswap32(icmp6f.data[val]);
2242 ret = get_errno(setsockopt(sockfd, level, optname,
2254 /* those take an u32 value */
2255 if (optlen < sizeof(uint32_t)) {
2256 return -TARGET_EINVAL;
2259 if (get_user_u32(val, optval_addr)) {
2260 return -TARGET_EFAULT;
2262 ret = get_errno(setsockopt(sockfd, level, optname,
2263 &val, sizeof(val)));
2270 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2275 char *alg_key = g_malloc(optlen);
2278 return -TARGET_ENOMEM;
2280 if (copy_from_user(alg_key, optval_addr, optlen)) {
2282 return -TARGET_EFAULT;
2284 ret = get_errno(setsockopt(sockfd, level, optname,
2289 case ALG_SET_AEAD_AUTHSIZE:
2291 ret = get_errno(setsockopt(sockfd, level, optname,
2300 case TARGET_SOL_SOCKET:
2302 case TARGET_SO_RCVTIMEO:
2306 optname = SO_RCVTIMEO;
2309 if (optlen != sizeof(struct target_timeval)) {
2310 return -TARGET_EINVAL;
2313 if (copy_from_user_timeval(&tv, optval_addr)) {
2314 return -TARGET_EFAULT;
2317 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2321 case TARGET_SO_SNDTIMEO:
2322 optname = SO_SNDTIMEO;
2324 case TARGET_SO_ATTACH_FILTER:
2326 struct target_sock_fprog *tfprog;
2327 struct target_sock_filter *tfilter;
2328 struct sock_fprog fprog;
2329 struct sock_filter *filter;
2332 if (optlen != sizeof(*tfprog)) {
2333 return -TARGET_EINVAL;
2335 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2336 return -TARGET_EFAULT;
2338 if (!lock_user_struct(VERIFY_READ, tfilter,
2339 tswapal(tfprog->filter), 0)) {
2340 unlock_user_struct(tfprog, optval_addr, 1);
2341 return -TARGET_EFAULT;
2344 fprog.len = tswap16(tfprog->len);
2345 filter = g_try_new(struct sock_filter, fprog.len);
2346 if (filter == NULL) {
2347 unlock_user_struct(tfilter, tfprog->filter, 1);
2348 unlock_user_struct(tfprog, optval_addr, 1);
2349 return -TARGET_ENOMEM;
2351 for (i = 0; i < fprog.len; i++) {
2352 filter[i].code = tswap16(tfilter[i].code);
2353 filter[i].jt = tfilter[i].jt;
2354 filter[i].jf = tfilter[i].jf;
2355 filter[i].k = tswap32(tfilter[i].k);
2357 fprog.filter = filter;
2359 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2360 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2363 unlock_user_struct(tfilter, tfprog->filter, 1);
2364 unlock_user_struct(tfprog, optval_addr, 1);
2367 case TARGET_SO_BINDTODEVICE:
2369 char *dev_ifname, *addr_ifname;
2371 if (optlen > IFNAMSIZ - 1) {
2372 optlen = IFNAMSIZ - 1;
2374 dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2376 return -TARGET_EFAULT;
2378 optname = SO_BINDTODEVICE;
2379 addr_ifname = alloca(IFNAMSIZ);
2380 memcpy(addr_ifname, dev_ifname, optlen);
2381 addr_ifname[optlen] = 0;
2382 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2383 addr_ifname, optlen));
2384 unlock_user (dev_ifname, optval_addr, 0);
2387 case TARGET_SO_LINGER:
2390 struct target_linger *tlg;
2392 if (optlen != sizeof(struct target_linger)) {
2393 return -TARGET_EINVAL;
2395 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2396 return -TARGET_EFAULT;
2398 __get_user(lg.l_onoff, &tlg->l_onoff);
2399 __get_user(lg.l_linger, &tlg->l_linger);
2400 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2402 unlock_user_struct(tlg, optval_addr, 0);
2405 /* Options with 'int' argument. */
2406 case TARGET_SO_DEBUG:
2409 case TARGET_SO_REUSEADDR:
2410 optname = SO_REUSEADDR;
2413 case TARGET_SO_REUSEPORT:
2414 optname = SO_REUSEPORT;
2417 case TARGET_SO_TYPE:
2420 case TARGET_SO_ERROR:
2423 case TARGET_SO_DONTROUTE:
2424 optname = SO_DONTROUTE;
2426 case TARGET_SO_BROADCAST:
2427 optname = SO_BROADCAST;
2429 case TARGET_SO_SNDBUF:
2430 optname = SO_SNDBUF;
2432 case TARGET_SO_SNDBUFFORCE:
2433 optname = SO_SNDBUFFORCE;
2435 case TARGET_SO_RCVBUF:
2436 optname = SO_RCVBUF;
2438 case TARGET_SO_RCVBUFFORCE:
2439 optname = SO_RCVBUFFORCE;
2441 case TARGET_SO_KEEPALIVE:
2442 optname = SO_KEEPALIVE;
2444 case TARGET_SO_OOBINLINE:
2445 optname = SO_OOBINLINE;
2447 case TARGET_SO_NO_CHECK:
2448 optname = SO_NO_CHECK;
2450 case TARGET_SO_PRIORITY:
2451 optname = SO_PRIORITY;
2454 case TARGET_SO_BSDCOMPAT:
2455 optname = SO_BSDCOMPAT;
2458 case TARGET_SO_PASSCRED:
2459 optname = SO_PASSCRED;
2461 case TARGET_SO_PASSSEC:
2462 optname = SO_PASSSEC;
2464 case TARGET_SO_TIMESTAMP:
2465 optname = SO_TIMESTAMP;
2467 case TARGET_SO_RCVLOWAT:
2468 optname = SO_RCVLOWAT;
2473 if (optlen < sizeof(uint32_t))
2474 return -TARGET_EINVAL;
2476 if (get_user_u32(val, optval_addr))
2477 return -TARGET_EFAULT;
2478 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2483 case NETLINK_PKTINFO:
2484 case NETLINK_ADD_MEMBERSHIP:
2485 case NETLINK_DROP_MEMBERSHIP:
2486 case NETLINK_BROADCAST_ERROR:
2487 case NETLINK_NO_ENOBUFS:
2488 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2489 case NETLINK_LISTEN_ALL_NSID:
2490 case NETLINK_CAP_ACK:
2491 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2492 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2493 case NETLINK_EXT_ACK:
2494 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2495 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2496 case NETLINK_GET_STRICT_CHK:
2497 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2503 if (optlen < sizeof(uint32_t)) {
2504 return -TARGET_EINVAL;
2506 if (get_user_u32(val, optval_addr)) {
2507 return -TARGET_EFAULT;
2509 ret = get_errno(setsockopt(sockfd, SOL_NETLINK, optname, &val,
2512 #endif /* SOL_NETLINK */
2515 qemu_log_mask(LOG_UNIMP, "Unsupported setsockopt level=%d optname=%d\n",
2517 ret = -TARGET_ENOPROTOOPT;
2522 /* do_getsockopt() Must return target values and target errnos. */
2523 static abi_long do_getsockopt(int sockfd, int level, int optname,
2524 abi_ulong optval_addr, abi_ulong optlen)
2531 case TARGET_SOL_SOCKET:
2534 /* These don't just return a single integer */
2535 case TARGET_SO_PEERNAME:
2537 case TARGET_SO_RCVTIMEO: {
2541 optname = SO_RCVTIMEO;
2544 if (get_user_u32(len, optlen)) {
2545 return -TARGET_EFAULT;
2548 return -TARGET_EINVAL;
2552 ret = get_errno(getsockopt(sockfd, level, optname,
2557 if (len > sizeof(struct target_timeval)) {
2558 len = sizeof(struct target_timeval);
2560 if (copy_to_user_timeval(optval_addr, &tv)) {
2561 return -TARGET_EFAULT;
2563 if (put_user_u32(len, optlen)) {
2564 return -TARGET_EFAULT;
2568 case TARGET_SO_SNDTIMEO:
2569 optname = SO_SNDTIMEO;
2571 case TARGET_SO_PEERCRED: {
2574 struct target_ucred *tcr;
2576 if (get_user_u32(len, optlen)) {
2577 return -TARGET_EFAULT;
2580 return -TARGET_EINVAL;
2584 ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2592 if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2593 return -TARGET_EFAULT;
2595 __put_user(cr.pid, &tcr->pid);
2596 __put_user(cr.uid, &tcr->uid);
2597 __put_user(cr.gid, &tcr->gid);
2598 unlock_user_struct(tcr, optval_addr, 1);
2599 if (put_user_u32(len, optlen)) {
2600 return -TARGET_EFAULT;
2604 case TARGET_SO_PEERSEC: {
2607 if (get_user_u32(len, optlen)) {
2608 return -TARGET_EFAULT;
2611 return -TARGET_EINVAL;
2613 name = lock_user(VERIFY_WRITE, optval_addr, len, 0);
2615 return -TARGET_EFAULT;
2618 ret = get_errno(getsockopt(sockfd, level, SO_PEERSEC,
2620 if (put_user_u32(lv, optlen)) {
2621 ret = -TARGET_EFAULT;
2623 unlock_user(name, optval_addr, lv);
2626 case TARGET_SO_LINGER:
2630 struct target_linger *tlg;
2632 if (get_user_u32(len, optlen)) {
2633 return -TARGET_EFAULT;
2636 return -TARGET_EINVAL;
2640 ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2648 if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2649 return -TARGET_EFAULT;
2651 __put_user(lg.l_onoff, &tlg->l_onoff);
2652 __put_user(lg.l_linger, &tlg->l_linger);
2653 unlock_user_struct(tlg, optval_addr, 1);
2654 if (put_user_u32(len, optlen)) {
2655 return -TARGET_EFAULT;
2659 /* Options with 'int' argument. */
2660 case TARGET_SO_DEBUG:
2663 case TARGET_SO_REUSEADDR:
2664 optname = SO_REUSEADDR;
2667 case TARGET_SO_REUSEPORT:
2668 optname = SO_REUSEPORT;
2671 case TARGET_SO_TYPE:
2674 case TARGET_SO_ERROR:
2677 case TARGET_SO_DONTROUTE:
2678 optname = SO_DONTROUTE;
2680 case TARGET_SO_BROADCAST:
2681 optname = SO_BROADCAST;
2683 case TARGET_SO_SNDBUF:
2684 optname = SO_SNDBUF;
2686 case TARGET_SO_RCVBUF:
2687 optname = SO_RCVBUF;
2689 case TARGET_SO_KEEPALIVE:
2690 optname = SO_KEEPALIVE;
2692 case TARGET_SO_OOBINLINE:
2693 optname = SO_OOBINLINE;
2695 case TARGET_SO_NO_CHECK:
2696 optname = SO_NO_CHECK;
2698 case TARGET_SO_PRIORITY:
2699 optname = SO_PRIORITY;
2702 case TARGET_SO_BSDCOMPAT:
2703 optname = SO_BSDCOMPAT;
2706 case TARGET_SO_PASSCRED:
2707 optname = SO_PASSCRED;
2709 case TARGET_SO_TIMESTAMP:
2710 optname = SO_TIMESTAMP;
2712 case TARGET_SO_RCVLOWAT:
2713 optname = SO_RCVLOWAT;
2715 case TARGET_SO_ACCEPTCONN:
2716 optname = SO_ACCEPTCONN;
2718 case TARGET_SO_PROTOCOL:
2719 optname = SO_PROTOCOL;
2721 case TARGET_SO_DOMAIN:
2722 optname = SO_DOMAIN;
2730 /* TCP and UDP options all take an 'int' value. */
2732 if (get_user_u32(len, optlen))
2733 return -TARGET_EFAULT;
2735 return -TARGET_EINVAL;
2737 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2740 if (optname == SO_TYPE) {
2741 val = host_to_target_sock_type(val);
2746 if (put_user_u32(val, optval_addr))
2747 return -TARGET_EFAULT;
2749 if (put_user_u8(val, optval_addr))
2750 return -TARGET_EFAULT;
2752 if (put_user_u32(len, optlen))
2753 return -TARGET_EFAULT;
2760 case IP_ROUTER_ALERT:
2764 case IP_MTU_DISCOVER:
2770 case IP_MULTICAST_TTL:
2771 case IP_MULTICAST_LOOP:
2772 if (get_user_u32(len, optlen))
2773 return -TARGET_EFAULT;
2775 return -TARGET_EINVAL;
2777 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2780 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2782 if (put_user_u32(len, optlen)
2783 || put_user_u8(val, optval_addr))
2784 return -TARGET_EFAULT;
2786 if (len > sizeof(int))
2788 if (put_user_u32(len, optlen)
2789 || put_user_u32(val, optval_addr))
2790 return -TARGET_EFAULT;
2794 ret = -TARGET_ENOPROTOOPT;
2800 case IPV6_MTU_DISCOVER:
2803 case IPV6_RECVPKTINFO:
2804 case IPV6_UNICAST_HOPS:
2805 case IPV6_MULTICAST_HOPS:
2806 case IPV6_MULTICAST_LOOP:
2808 case IPV6_RECVHOPLIMIT:
2809 case IPV6_2292HOPLIMIT:
2812 case IPV6_2292PKTINFO:
2813 case IPV6_RECVTCLASS:
2814 case IPV6_RECVRTHDR:
2815 case IPV6_2292RTHDR:
2816 case IPV6_RECVHOPOPTS:
2817 case IPV6_2292HOPOPTS:
2818 case IPV6_RECVDSTOPTS:
2819 case IPV6_2292DSTOPTS:
2821 case IPV6_ADDR_PREFERENCES:
2822 #ifdef IPV6_RECVPATHMTU
2823 case IPV6_RECVPATHMTU:
2825 #ifdef IPV6_TRANSPARENT
2826 case IPV6_TRANSPARENT:
2828 #ifdef IPV6_FREEBIND
2831 #ifdef IPV6_RECVORIGDSTADDR
2832 case IPV6_RECVORIGDSTADDR:
2834 if (get_user_u32(len, optlen))
2835 return -TARGET_EFAULT;
2837 return -TARGET_EINVAL;
2839 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2842 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2844 if (put_user_u32(len, optlen)
2845 || put_user_u8(val, optval_addr))
2846 return -TARGET_EFAULT;
2848 if (len > sizeof(int))
2850 if (put_user_u32(len, optlen)
2851 || put_user_u32(val, optval_addr))
2852 return -TARGET_EFAULT;
2856 ret = -TARGET_ENOPROTOOPT;
2863 case NETLINK_PKTINFO:
2864 case NETLINK_BROADCAST_ERROR:
2865 case NETLINK_NO_ENOBUFS:
2866 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2867 case NETLINK_LISTEN_ALL_NSID:
2868 case NETLINK_CAP_ACK:
2869 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2870 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2871 case NETLINK_EXT_ACK:
2872 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2873 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2874 case NETLINK_GET_STRICT_CHK:
2875 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2876 if (get_user_u32(len, optlen)) {
2877 return -TARGET_EFAULT;
2879 if (len != sizeof(val)) {
2880 return -TARGET_EINVAL;
2883 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2887 if (put_user_u32(lv, optlen)
2888 || put_user_u32(val, optval_addr)) {
2889 return -TARGET_EFAULT;
2892 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2893 case NETLINK_LIST_MEMBERSHIPS:
2897 if (get_user_u32(len, optlen)) {
2898 return -TARGET_EFAULT;
2901 return -TARGET_EINVAL;
2903 results = lock_user(VERIFY_WRITE, optval_addr, len, 1);
2904 if (!results && len > 0) {
2905 return -TARGET_EFAULT;
2908 ret = get_errno(getsockopt(sockfd, level, optname, results, &lv));
2910 unlock_user(results, optval_addr, 0);
2913 /* swap host endianess to target endianess. */
2914 for (i = 0; i < (len / sizeof(uint32_t)); i++) {
2915 results[i] = tswap32(results[i]);
2917 if (put_user_u32(lv, optlen)) {
2918 return -TARGET_EFAULT;
2920 unlock_user(results, optval_addr, 0);
2923 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2928 #endif /* SOL_NETLINK */
2931 qemu_log_mask(LOG_UNIMP,
2932 "getsockopt level=%d optname=%d not yet supported\n",
2934 ret = -TARGET_EOPNOTSUPP;
2940 /* Convert target low/high pair representing file offset into the host
2941 * low/high pair. This function doesn't handle offsets bigger than 64 bits
2942 * as the kernel doesn't handle them either.
2944 static void target_to_host_low_high(abi_ulong tlow,
2946 unsigned long *hlow,
2947 unsigned long *hhigh)
2949 uint64_t off = tlow |
2950 ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
2951 TARGET_LONG_BITS / 2;
2954 *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
2957 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
2958 abi_ulong count, int copy)
2960 struct target_iovec *target_vec;
2962 abi_ulong total_len, max_len;
2965 bool bad_address = false;
2971 if (count > IOV_MAX) {
2976 vec = g_try_new0(struct iovec, count);
2982 target_vec = lock_user(VERIFY_READ, target_addr,
2983 count * sizeof(struct target_iovec), 1);
2984 if (target_vec == NULL) {
2989 /* ??? If host page size > target page size, this will result in a
2990 value larger than what we can actually support. */
2991 max_len = 0x7fffffff & TARGET_PAGE_MASK;
2994 for (i = 0; i < count; i++) {
2995 abi_ulong base = tswapal(target_vec[i].iov_base);
2996 abi_long len = tswapal(target_vec[i].iov_len);
3001 } else if (len == 0) {
3002 /* Zero length pointer is ignored. */
3003 vec[i].iov_base = 0;
3005 vec[i].iov_base = lock_user(type, base, len, copy);
3006 /* If the first buffer pointer is bad, this is a fault. But
3007 * subsequent bad buffers will result in a partial write; this
3008 * is realized by filling the vector with null pointers and
3010 if (!vec[i].iov_base) {
3021 if (len > max_len - total_len) {
3022 len = max_len - total_len;
3025 vec[i].iov_len = len;
3029 unlock_user(target_vec, target_addr, 0);
3034 if (tswapal(target_vec[i].iov_len) > 0) {
3035 unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
3038 unlock_user(target_vec, target_addr, 0);
3045 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
3046 abi_ulong count, int copy)
3048 struct target_iovec *target_vec;
3051 target_vec = lock_user(VERIFY_READ, target_addr,
3052 count * sizeof(struct target_iovec), 1);
3054 for (i = 0; i < count; i++) {
3055 abi_ulong base = tswapal(target_vec[i].iov_base);
3056 abi_long len = tswapal(target_vec[i].iov_len);
3060 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
3062 unlock_user(target_vec, target_addr, 0);
3068 static inline int target_to_host_sock_type(int *type)
3071 int target_type = *type;
3073 switch (target_type & TARGET_SOCK_TYPE_MASK) {
3074 case TARGET_SOCK_DGRAM:
3075 host_type = SOCK_DGRAM;
3077 case TARGET_SOCK_STREAM:
3078 host_type = SOCK_STREAM;
3081 host_type = target_type & TARGET_SOCK_TYPE_MASK;
3084 if (target_type & TARGET_SOCK_CLOEXEC) {
3085 #if defined(SOCK_CLOEXEC)
3086 host_type |= SOCK_CLOEXEC;
3088 return -TARGET_EINVAL;
3091 if (target_type & TARGET_SOCK_NONBLOCK) {
3092 #if defined(SOCK_NONBLOCK)
3093 host_type |= SOCK_NONBLOCK;
3094 #elif !defined(O_NONBLOCK)
3095 return -TARGET_EINVAL;
3102 /* Try to emulate socket type flags after socket creation. */
3103 static int sock_flags_fixup(int fd, int target_type)
3105 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3106 if (target_type & TARGET_SOCK_NONBLOCK) {
3107 int flags = fcntl(fd, F_GETFL);
3108 if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
3110 return -TARGET_EINVAL;
3117 /* do_socket() Must return target values and target errnos. */
3118 static abi_long do_socket(int domain, int type, int protocol)
3120 int target_type = type;
3123 ret = target_to_host_sock_type(&type);
3128 if (domain == PF_NETLINK && !(
3129 #ifdef CONFIG_RTNETLINK
3130 protocol == NETLINK_ROUTE ||
3132 protocol == NETLINK_KOBJECT_UEVENT ||
3133 protocol == NETLINK_AUDIT)) {
3134 return -TARGET_EPROTONOSUPPORT;
3137 if (domain == AF_PACKET ||
3138 (domain == AF_INET && type == SOCK_PACKET)) {
3139 protocol = tswap16(protocol);
3142 ret = get_errno(socket(domain, type, protocol));
3144 ret = sock_flags_fixup(ret, target_type);
3145 if (type == SOCK_PACKET) {
3146 /* Manage an obsolete case :
3147 * if socket type is SOCK_PACKET, bind by name
3149 fd_trans_register(ret, &target_packet_trans);
3150 } else if (domain == PF_NETLINK) {
3152 #ifdef CONFIG_RTNETLINK
3154 fd_trans_register(ret, &target_netlink_route_trans);
3157 case NETLINK_KOBJECT_UEVENT:
3158 /* nothing to do: messages are strings */
3161 fd_trans_register(ret, &target_netlink_audit_trans);
3164 g_assert_not_reached();
3171 /* do_bind() Must return target values and target errnos. */
3172 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3178 if ((int)addrlen < 0) {
3179 return -TARGET_EINVAL;
3182 addr = alloca(addrlen+1);
3184 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3188 return get_errno(bind(sockfd, addr, addrlen));
3191 /* do_connect() Must return target values and target errnos. */
3192 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3198 if ((int)addrlen < 0) {
3199 return -TARGET_EINVAL;
3202 addr = alloca(addrlen+1);
3204 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3208 return get_errno(safe_connect(sockfd, addr, addrlen));
3211 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3212 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3213 int flags, int send)
3219 abi_ulong target_vec;
3221 if (msgp->msg_name) {
3222 msg.msg_namelen = tswap32(msgp->msg_namelen);
3223 msg.msg_name = alloca(msg.msg_namelen+1);
3224 ret = target_to_host_sockaddr(fd, msg.msg_name,
3225 tswapal(msgp->msg_name),
3227 if (ret == -TARGET_EFAULT) {
3228 /* For connected sockets msg_name and msg_namelen must
3229 * be ignored, so returning EFAULT immediately is wrong.
3230 * Instead, pass a bad msg_name to the host kernel, and
3231 * let it decide whether to return EFAULT or not.
3233 msg.msg_name = (void *)-1;
3238 msg.msg_name = NULL;
3239 msg.msg_namelen = 0;
3241 msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3242 msg.msg_control = alloca(msg.msg_controllen);
3243 memset(msg.msg_control, 0, msg.msg_controllen);
3245 msg.msg_flags = tswap32(msgp->msg_flags);
3247 count = tswapal(msgp->msg_iovlen);
3248 target_vec = tswapal(msgp->msg_iov);
3250 if (count > IOV_MAX) {
3251 /* sendrcvmsg returns a different errno for this condition than
3252 * readv/writev, so we must catch it here before lock_iovec() does.
3254 ret = -TARGET_EMSGSIZE;
3258 vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3259 target_vec, count, send);
3261 ret = -host_to_target_errno(errno);
3264 msg.msg_iovlen = count;
3268 if (fd_trans_target_to_host_data(fd)) {
3271 host_msg = g_malloc(msg.msg_iov->iov_len);
3272 memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3273 ret = fd_trans_target_to_host_data(fd)(host_msg,
3274 msg.msg_iov->iov_len);
3276 msg.msg_iov->iov_base = host_msg;
3277 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3281 ret = target_to_host_cmsg(&msg, msgp);
3283 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3287 ret = get_errno(safe_recvmsg(fd, &msg, flags));
3288 if (!is_error(ret)) {
3290 if (fd_trans_host_to_target_data(fd)) {
3291 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3292 MIN(msg.msg_iov->iov_len, len));
3294 ret = host_to_target_cmsg(msgp, &msg);
3296 if (!is_error(ret)) {
3297 msgp->msg_namelen = tswap32(msg.msg_namelen);
3298 msgp->msg_flags = tswap32(msg.msg_flags);
3299 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3300 ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3301 msg.msg_name, msg.msg_namelen);
3313 unlock_iovec(vec, target_vec, count, !send);
3318 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3319 int flags, int send)
3322 struct target_msghdr *msgp;
3324 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3328 return -TARGET_EFAULT;
3330 ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3331 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3335 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3336 * so it might not have this *mmsg-specific flag either.
3338 #ifndef MSG_WAITFORONE
3339 #define MSG_WAITFORONE 0x10000
3342 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3343 unsigned int vlen, unsigned int flags,
3346 struct target_mmsghdr *mmsgp;
3350 if (vlen > UIO_MAXIOV) {
3354 mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3356 return -TARGET_EFAULT;
3359 for (i = 0; i < vlen; i++) {
3360 ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3361 if (is_error(ret)) {
3364 mmsgp[i].msg_len = tswap32(ret);
3365 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3366 if (flags & MSG_WAITFORONE) {
3367 flags |= MSG_DONTWAIT;
3371 unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3373 /* Return number of datagrams sent if we sent any at all;
3374 * otherwise return the error.
3382 /* do_accept4() Must return target values and target errnos. */
3383 static abi_long do_accept4(int fd, abi_ulong target_addr,
3384 abi_ulong target_addrlen_addr, int flags)
3386 socklen_t addrlen, ret_addrlen;
3391 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
3393 if (target_addr == 0) {
3394 return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3397 /* linux returns EFAULT if addrlen pointer is invalid */
3398 if (get_user_u32(addrlen, target_addrlen_addr))
3399 return -TARGET_EFAULT;
3401 if ((int)addrlen < 0) {
3402 return -TARGET_EINVAL;
3405 if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3406 return -TARGET_EFAULT;
3409 addr = alloca(addrlen);
3411 ret_addrlen = addrlen;
3412 ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
3413 if (!is_error(ret)) {
3414 host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3415 if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3416 ret = -TARGET_EFAULT;
3422 /* do_getpeername() Must return target values and target errnos. */
3423 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3424 abi_ulong target_addrlen_addr)
3426 socklen_t addrlen, ret_addrlen;
3430 if (get_user_u32(addrlen, target_addrlen_addr))
3431 return -TARGET_EFAULT;
3433 if ((int)addrlen < 0) {
3434 return -TARGET_EINVAL;
3437 if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3438 return -TARGET_EFAULT;
3441 addr = alloca(addrlen);
3443 ret_addrlen = addrlen;
3444 ret = get_errno(getpeername(fd, addr, &ret_addrlen));
3445 if (!is_error(ret)) {
3446 host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3447 if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3448 ret = -TARGET_EFAULT;
3454 /* do_getsockname() Must return target values and target errnos. */
3455 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3456 abi_ulong target_addrlen_addr)
3458 socklen_t addrlen, ret_addrlen;
3462 if (get_user_u32(addrlen, target_addrlen_addr))
3463 return -TARGET_EFAULT;
3465 if ((int)addrlen < 0) {
3466 return -TARGET_EINVAL;
3469 if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3470 return -TARGET_EFAULT;
3473 addr = alloca(addrlen);
3475 ret_addrlen = addrlen;
3476 ret = get_errno(getsockname(fd, addr, &ret_addrlen));
3477 if (!is_error(ret)) {
3478 host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3479 if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3480 ret = -TARGET_EFAULT;
3486 /* do_socketpair() Must return target values and target errnos. */
3487 static abi_long do_socketpair(int domain, int type, int protocol,
3488 abi_ulong target_tab_addr)
3493 target_to_host_sock_type(&type);
3495 ret = get_errno(socketpair(domain, type, protocol, tab));
3496 if (!is_error(ret)) {
3497 if (put_user_s32(tab[0], target_tab_addr)
3498 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3499 ret = -TARGET_EFAULT;
3504 /* do_sendto() Must return target values and target errnos. */
3505 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3506 abi_ulong target_addr, socklen_t addrlen)
3510 void *copy_msg = NULL;
3513 if ((int)addrlen < 0) {
3514 return -TARGET_EINVAL;
3517 host_msg = lock_user(VERIFY_READ, msg, len, 1);
3519 return -TARGET_EFAULT;
3520 if (fd_trans_target_to_host_data(fd)) {
3521 copy_msg = host_msg;
3522 host_msg = g_malloc(len);
3523 memcpy(host_msg, copy_msg, len);
3524 ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3530 addr = alloca(addrlen+1);
3531 ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3535 ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3537 ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3542 host_msg = copy_msg;
3544 unlock_user(host_msg, msg, 0);
3548 /* do_recvfrom() Must return target values and target errnos. */
3549 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3550 abi_ulong target_addr,
3551 abi_ulong target_addrlen)
3553 socklen_t addrlen, ret_addrlen;
3561 host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3563 return -TARGET_EFAULT;
3567 if (get_user_u32(addrlen, target_addrlen)) {
3568 ret = -TARGET_EFAULT;
3571 if ((int)addrlen < 0) {
3572 ret = -TARGET_EINVAL;
3575 addr = alloca(addrlen);
3576 ret_addrlen = addrlen;
3577 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3578 addr, &ret_addrlen));
3580 addr = NULL; /* To keep compiler quiet. */
3581 addrlen = 0; /* To keep compiler quiet. */
3582 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3584 if (!is_error(ret)) {
3585 if (fd_trans_host_to_target_data(fd)) {
3587 trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3588 if (is_error(trans)) {
3594 host_to_target_sockaddr(target_addr, addr,
3595 MIN(addrlen, ret_addrlen));
3596 if (put_user_u32(ret_addrlen, target_addrlen)) {
3597 ret = -TARGET_EFAULT;
3601 unlock_user(host_msg, msg, len);
3604 unlock_user(host_msg, msg, 0);
3609 #ifdef TARGET_NR_socketcall
3610 /* do_socketcall() must return target values and target errnos. */
3611 static abi_long do_socketcall(int num, abi_ulong vptr)
3613 static const unsigned nargs[] = { /* number of arguments per operation */
3614 [TARGET_SYS_SOCKET] = 3, /* domain, type, protocol */
3615 [TARGET_SYS_BIND] = 3, /* fd, addr, addrlen */
3616 [TARGET_SYS_CONNECT] = 3, /* fd, addr, addrlen */
3617 [TARGET_SYS_LISTEN] = 2, /* fd, backlog */
3618 [TARGET_SYS_ACCEPT] = 3, /* fd, addr, addrlen */
3619 [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3620 [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3621 [TARGET_SYS_SOCKETPAIR] = 4, /* domain, type, protocol, tab */
3622 [TARGET_SYS_SEND] = 4, /* fd, msg, len, flags */
3623 [TARGET_SYS_RECV] = 4, /* fd, msg, len, flags */
3624 [TARGET_SYS_SENDTO] = 6, /* fd, msg, len, flags, addr, addrlen */
3625 [TARGET_SYS_RECVFROM] = 6, /* fd, msg, len, flags, addr, addrlen */
3626 [TARGET_SYS_SHUTDOWN] = 2, /* fd, how */
3627 [TARGET_SYS_SETSOCKOPT] = 5, /* fd, level, optname, optval, optlen */
3628 [TARGET_SYS_GETSOCKOPT] = 5, /* fd, level, optname, optval, optlen */
3629 [TARGET_SYS_SENDMSG] = 3, /* fd, msg, flags */
3630 [TARGET_SYS_RECVMSG] = 3, /* fd, msg, flags */
3631 [TARGET_SYS_ACCEPT4] = 4, /* fd, addr, addrlen, flags */
3632 [TARGET_SYS_RECVMMSG] = 4, /* fd, msgvec, vlen, flags */
3633 [TARGET_SYS_SENDMMSG] = 4, /* fd, msgvec, vlen, flags */
3635 abi_long a[6]; /* max 6 args */
3638 /* check the range of the first argument num */
3639 /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3640 if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3641 return -TARGET_EINVAL;
3643 /* ensure we have space for args */
3644 if (nargs[num] > ARRAY_SIZE(a)) {
3645 return -TARGET_EINVAL;
3647 /* collect the arguments in a[] according to nargs[] */
3648 for (i = 0; i < nargs[num]; ++i) {
3649 if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3650 return -TARGET_EFAULT;
3653 /* now when we have the args, invoke the appropriate underlying function */
3655 case TARGET_SYS_SOCKET: /* domain, type, protocol */
3656 return do_socket(a[0], a[1], a[2]);
3657 case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3658 return do_bind(a[0], a[1], a[2]);
3659 case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3660 return do_connect(a[0], a[1], a[2]);
3661 case TARGET_SYS_LISTEN: /* sockfd, backlog */
3662 return get_errno(listen(a[0], a[1]));
3663 case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3664 return do_accept4(a[0], a[1], a[2], 0);
3665 case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3666 return do_getsockname(a[0], a[1], a[2]);
3667 case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3668 return do_getpeername(a[0], a[1], a[2]);
3669 case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3670 return do_socketpair(a[0], a[1], a[2], a[3]);
3671 case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3672 return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3673 case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3674 return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3675 case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3676 return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3677 case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3678 return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3679 case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3680 return get_errno(shutdown(a[0], a[1]));
3681 case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3682 return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3683 case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3684 return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3685 case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3686 return do_sendrecvmsg(a[0], a[1], a[2], 1);
3687 case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3688 return do_sendrecvmsg(a[0], a[1], a[2], 0);
3689 case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3690 return do_accept4(a[0], a[1], a[2], a[3]);
3691 case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3692 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3693 case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3694 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3696 qemu_log_mask(LOG_UNIMP, "Unsupported socketcall: %d\n", num);
3697 return -TARGET_EINVAL;
3702 #define N_SHM_REGIONS 32
3704 static struct shm_region {
3708 } shm_regions[N_SHM_REGIONS];
3710 #ifndef TARGET_SEMID64_DS
3711 /* asm-generic version of this struct */
3712 struct target_semid64_ds
3714 struct target_ipc_perm sem_perm;
3715 abi_ulong sem_otime;
3716 #if TARGET_ABI_BITS == 32
3717 abi_ulong __unused1;
3719 abi_ulong sem_ctime;
3720 #if TARGET_ABI_BITS == 32
3721 abi_ulong __unused2;
3723 abi_ulong sem_nsems;
3724 abi_ulong __unused3;
3725 abi_ulong __unused4;
3729 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3730 abi_ulong target_addr)
3732 struct target_ipc_perm *target_ip;
3733 struct target_semid64_ds *target_sd;
3735 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3736 return -TARGET_EFAULT;
3737 target_ip = &(target_sd->sem_perm);
3738 host_ip->__key = tswap32(target_ip->__key);
3739 host_ip->uid = tswap32(target_ip->uid);
3740 host_ip->gid = tswap32(target_ip->gid);
3741 host_ip->cuid = tswap32(target_ip->cuid);
3742 host_ip->cgid = tswap32(target_ip->cgid);
3743 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3744 host_ip->mode = tswap32(target_ip->mode);
3746 host_ip->mode = tswap16(target_ip->mode);
3748 #if defined(TARGET_PPC)
3749 host_ip->__seq = tswap32(target_ip->__seq);
3751 host_ip->__seq = tswap16(target_ip->__seq);
3753 unlock_user_struct(target_sd, target_addr, 0);
3757 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3758 struct ipc_perm *host_ip)
3760 struct target_ipc_perm *target_ip;
3761 struct target_semid64_ds *target_sd;
3763 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3764 return -TARGET_EFAULT;
3765 target_ip = &(target_sd->sem_perm);
3766 target_ip->__key = tswap32(host_ip->__key);
3767 target_ip->uid = tswap32(host_ip->uid);
3768 target_ip->gid = tswap32(host_ip->gid);
3769 target_ip->cuid = tswap32(host_ip->cuid);
3770 target_ip->cgid = tswap32(host_ip->cgid);
3771 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3772 target_ip->mode = tswap32(host_ip->mode);
3774 target_ip->mode = tswap16(host_ip->mode);
3776 #if defined(TARGET_PPC)
3777 target_ip->__seq = tswap32(host_ip->__seq);
3779 target_ip->__seq = tswap16(host_ip->__seq);
3781 unlock_user_struct(target_sd, target_addr, 1);
3785 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3786 abi_ulong target_addr)
3788 struct target_semid64_ds *target_sd;
3790 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3791 return -TARGET_EFAULT;
3792 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3793 return -TARGET_EFAULT;
3794 host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3795 host_sd->sem_otime = tswapal(target_sd->sem_otime);
3796 host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3797 unlock_user_struct(target_sd, target_addr, 0);
3801 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3802 struct semid_ds *host_sd)
3804 struct target_semid64_ds *target_sd;
3806 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3807 return -TARGET_EFAULT;
3808 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3809 return -TARGET_EFAULT;
3810 target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3811 target_sd->sem_otime = tswapal(host_sd->sem_otime);
3812 target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3813 unlock_user_struct(target_sd, target_addr, 1);
3817 struct target_seminfo {
3830 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3831 struct seminfo *host_seminfo)
3833 struct target_seminfo *target_seminfo;
3834 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3835 return -TARGET_EFAULT;
3836 __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3837 __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3838 __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3839 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3840 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3841 __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3842 __put_user(host_seminfo->semume, &target_seminfo->semume);
3843 __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3844 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3845 __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3846 unlock_user_struct(target_seminfo, target_addr, 1);
3852 struct semid_ds *buf;
3853 unsigned short *array;
3854 struct seminfo *__buf;
3857 union target_semun {
3864 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3865 abi_ulong target_addr)
3868 unsigned short *array;
3870 struct semid_ds semid_ds;
3873 semun.buf = &semid_ds;
3875 ret = semctl(semid, 0, IPC_STAT, semun);
3877 return get_errno(ret);
3879 nsems = semid_ds.sem_nsems;
3881 *host_array = g_try_new(unsigned short, nsems);
3883 return -TARGET_ENOMEM;
3885 array = lock_user(VERIFY_READ, target_addr,
3886 nsems*sizeof(unsigned short), 1);
3888 g_free(*host_array);
3889 return -TARGET_EFAULT;
3892 for(i=0; i<nsems; i++) {
3893 __get_user((*host_array)[i], &array[i]);
3895 unlock_user(array, target_addr, 0);
3900 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3901 unsigned short **host_array)
3904 unsigned short *array;
3906 struct semid_ds semid_ds;
3909 semun.buf = &semid_ds;
3911 ret = semctl(semid, 0, IPC_STAT, semun);
3913 return get_errno(ret);
3915 nsems = semid_ds.sem_nsems;
3917 array = lock_user(VERIFY_WRITE, target_addr,
3918 nsems*sizeof(unsigned short), 0);
3920 return -TARGET_EFAULT;
3922 for(i=0; i<nsems; i++) {
3923 __put_user((*host_array)[i], &array[i]);
3925 g_free(*host_array);
3926 unlock_user(array, target_addr, 1);
3931 static inline abi_long do_semctl(int semid, int semnum, int cmd,
3932 abi_ulong target_arg)
3934 union target_semun target_su = { .buf = target_arg };
3936 struct semid_ds dsarg;
3937 unsigned short *array = NULL;
3938 struct seminfo seminfo;
3939 abi_long ret = -TARGET_EINVAL;
3946 /* In 64 bit cross-endian situations, we will erroneously pick up
3947 * the wrong half of the union for the "val" element. To rectify
3948 * this, the entire 8-byte structure is byteswapped, followed by
3949 * a swap of the 4 byte val field. In other cases, the data is
3950 * already in proper host byte order. */
3951 if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
3952 target_su.buf = tswapal(target_su.buf);
3953 arg.val = tswap32(target_su.val);
3955 arg.val = target_su.val;
3957 ret = get_errno(semctl(semid, semnum, cmd, arg));
3961 err = target_to_host_semarray(semid, &array, target_su.array);
3965 ret = get_errno(semctl(semid, semnum, cmd, arg));
3966 err = host_to_target_semarray(semid, target_su.array, &array);
3973 err = target_to_host_semid_ds(&dsarg, target_su.buf);
3977 ret = get_errno(semctl(semid, semnum, cmd, arg));
3978 err = host_to_target_semid_ds(target_su.buf, &dsarg);
3984 arg.__buf = &seminfo;
3985 ret = get_errno(semctl(semid, semnum, cmd, arg));
3986 err = host_to_target_seminfo(target_su.__buf, &seminfo);
3994 ret = get_errno(semctl(semid, semnum, cmd, NULL));
4001 struct target_sembuf {
4002 unsigned short sem_num;
4007 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
4008 abi_ulong target_addr,
4011 struct target_sembuf *target_sembuf;
4014 target_sembuf = lock_user(VERIFY_READ, target_addr,
4015 nsops*sizeof(struct target_sembuf), 1);
4017 return -TARGET_EFAULT;
4019 for(i=0; i<nsops; i++) {
4020 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
4021 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
4022 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
4025 unlock_user(target_sembuf, target_addr, 0);
4030 #if defined(TARGET_NR_ipc) || defined(TARGET_NR_semop) || \
4031 defined(TARGET_NR_semtimedop) || defined(TARGET_NR_semtimedop_time64)
4034 * This macro is required to handle the s390 variants, which passes the
4035 * arguments in a different order than default.
4038 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4039 (__nsops), (__timeout), (__sops)
4041 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4042 (__nsops), 0, (__sops), (__timeout)
4045 static inline abi_long do_semtimedop(int semid,
4048 abi_long timeout, bool time64)
4050 struct sembuf *sops;
4051 struct timespec ts, *pts = NULL;
4057 if (target_to_host_timespec64(pts, timeout)) {
4058 return -TARGET_EFAULT;
4061 if (target_to_host_timespec(pts, timeout)) {
4062 return -TARGET_EFAULT;
4067 if (nsops > TARGET_SEMOPM) {
4068 return -TARGET_E2BIG;
4071 sops = g_new(struct sembuf, nsops);
4073 if (target_to_host_sembuf(sops, ptr, nsops)) {
4075 return -TARGET_EFAULT;
4078 ret = -TARGET_ENOSYS;
4079 #ifdef __NR_semtimedop
4080 ret = get_errno(safe_semtimedop(semid, sops, nsops, pts));
4083 if (ret == -TARGET_ENOSYS) {
4084 ret = get_errno(safe_ipc(IPCOP_semtimedop, semid,
4085 SEMTIMEDOP_IPC_ARGS(nsops, sops, (long)pts)));
4093 struct target_msqid_ds
4095 struct target_ipc_perm msg_perm;
4096 abi_ulong msg_stime;
4097 #if TARGET_ABI_BITS == 32
4098 abi_ulong __unused1;
4100 abi_ulong msg_rtime;
4101 #if TARGET_ABI_BITS == 32
4102 abi_ulong __unused2;
4104 abi_ulong msg_ctime;
4105 #if TARGET_ABI_BITS == 32
4106 abi_ulong __unused3;
4108 abi_ulong __msg_cbytes;
4110 abi_ulong msg_qbytes;
4111 abi_ulong msg_lspid;
4112 abi_ulong msg_lrpid;
4113 abi_ulong __unused4;
4114 abi_ulong __unused5;
4117 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
4118 abi_ulong target_addr)
4120 struct target_msqid_ds *target_md;
4122 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
4123 return -TARGET_EFAULT;
4124 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
4125 return -TARGET_EFAULT;
4126 host_md->msg_stime = tswapal(target_md->msg_stime);
4127 host_md->msg_rtime = tswapal(target_md->msg_rtime);
4128 host_md->msg_ctime = tswapal(target_md->msg_ctime);
4129 host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
4130 host_md->msg_qnum = tswapal(target_md->msg_qnum);
4131 host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
4132 host_md->msg_lspid = tswapal(target_md->msg_lspid);
4133 host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
4134 unlock_user_struct(target_md, target_addr, 0);
4138 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
4139 struct msqid_ds *host_md)
4141 struct target_msqid_ds *target_md;
4143 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
4144 return -TARGET_EFAULT;
4145 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
4146 return -TARGET_EFAULT;
4147 target_md->msg_stime = tswapal(host_md->msg_stime);
4148 target_md->msg_rtime = tswapal(host_md->msg_rtime);
4149 target_md->msg_ctime = tswapal(host_md->msg_ctime);
4150 target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
4151 target_md->msg_qnum = tswapal(host_md->msg_qnum);
4152 target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
4153 target_md->msg_lspid = tswapal(host_md->msg_lspid);
4154 target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
4155 unlock_user_struct(target_md, target_addr, 1);
4159 struct target_msginfo {
4167 unsigned short int msgseg;
4170 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
4171 struct msginfo *host_msginfo)
4173 struct target_msginfo *target_msginfo;
4174 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
4175 return -TARGET_EFAULT;
4176 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
4177 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
4178 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
4179 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
4180 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
4181 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
4182 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4183 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4184 unlock_user_struct(target_msginfo, target_addr, 1);
4188 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4190 struct msqid_ds dsarg;
4191 struct msginfo msginfo;
4192 abi_long ret = -TARGET_EINVAL;
4200 if (target_to_host_msqid_ds(&dsarg,ptr))
4201 return -TARGET_EFAULT;
4202 ret = get_errno(msgctl(msgid, cmd, &dsarg));
4203 if (host_to_target_msqid_ds(ptr,&dsarg))
4204 return -TARGET_EFAULT;
4207 ret = get_errno(msgctl(msgid, cmd, NULL));
4211 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4212 if (host_to_target_msginfo(ptr, &msginfo))
4213 return -TARGET_EFAULT;
4220 struct target_msgbuf {
4225 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4226 ssize_t msgsz, int msgflg)
4228 struct target_msgbuf *target_mb;
4229 struct msgbuf *host_mb;
4233 return -TARGET_EINVAL;
4236 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4237 return -TARGET_EFAULT;
4238 host_mb = g_try_malloc(msgsz + sizeof(long));
4240 unlock_user_struct(target_mb, msgp, 0);
4241 return -TARGET_ENOMEM;
4243 host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4244 memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4245 ret = -TARGET_ENOSYS;
4247 ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4250 if (ret == -TARGET_ENOSYS) {
4252 ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4255 ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4261 unlock_user_struct(target_mb, msgp, 0);
4267 #if defined(__sparc__)
4268 /* SPARC for msgrcv it does not use the kludge on final 2 arguments. */
4269 #define MSGRCV_ARGS(__msgp, __msgtyp) __msgp, __msgtyp
4270 #elif defined(__s390x__)
4271 /* The s390 sys_ipc variant has only five parameters. */
4272 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4273 ((long int[]){(long int)__msgp, __msgtyp})
4275 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4276 ((long int[]){(long int)__msgp, __msgtyp}), 0
4280 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4281 ssize_t msgsz, abi_long msgtyp,
4284 struct target_msgbuf *target_mb;
4286 struct msgbuf *host_mb;
4290 return -TARGET_EINVAL;
4293 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4294 return -TARGET_EFAULT;
4296 host_mb = g_try_malloc(msgsz + sizeof(long));
4298 ret = -TARGET_ENOMEM;
4301 ret = -TARGET_ENOSYS;
4303 ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4306 if (ret == -TARGET_ENOSYS) {
4307 ret = get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv), msqid, msgsz,
4308 msgflg, MSGRCV_ARGS(host_mb, msgtyp)));
4313 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4314 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4315 if (!target_mtext) {
4316 ret = -TARGET_EFAULT;
4319 memcpy(target_mb->mtext, host_mb->mtext, ret);
4320 unlock_user(target_mtext, target_mtext_addr, ret);
4323 target_mb->mtype = tswapal(host_mb->mtype);
4327 unlock_user_struct(target_mb, msgp, 1);
4332 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4333 abi_ulong target_addr)
4335 struct target_shmid_ds *target_sd;
4337 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4338 return -TARGET_EFAULT;
4339 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4340 return -TARGET_EFAULT;
4341 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4342 __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4343 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4344 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4345 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4346 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4347 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4348 unlock_user_struct(target_sd, target_addr, 0);
4352 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4353 struct shmid_ds *host_sd)
4355 struct target_shmid_ds *target_sd;
4357 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4358 return -TARGET_EFAULT;
4359 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4360 return -TARGET_EFAULT;
4361 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4362 __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4363 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4364 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4365 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4366 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4367 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4368 unlock_user_struct(target_sd, target_addr, 1);
4372 struct target_shminfo {
4380 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4381 struct shminfo *host_shminfo)
4383 struct target_shminfo *target_shminfo;
4384 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4385 return -TARGET_EFAULT;
4386 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4387 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4388 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4389 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4390 __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4391 unlock_user_struct(target_shminfo, target_addr, 1);
4395 struct target_shm_info {
4400 abi_ulong swap_attempts;
4401 abi_ulong swap_successes;
4404 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4405 struct shm_info *host_shm_info)
4407 struct target_shm_info *target_shm_info;
4408 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4409 return -TARGET_EFAULT;
4410 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4411 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4412 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4413 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4414 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4415 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4416 unlock_user_struct(target_shm_info, target_addr, 1);
4420 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4422 struct shmid_ds dsarg;
4423 struct shminfo shminfo;
4424 struct shm_info shm_info;
4425 abi_long ret = -TARGET_EINVAL;
4433 if (target_to_host_shmid_ds(&dsarg, buf))
4434 return -TARGET_EFAULT;
4435 ret = get_errno(shmctl(shmid, cmd, &dsarg));
4436 if (host_to_target_shmid_ds(buf, &dsarg))
4437 return -TARGET_EFAULT;
4440 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4441 if (host_to_target_shminfo(buf, &shminfo))
4442 return -TARGET_EFAULT;
4445 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4446 if (host_to_target_shm_info(buf, &shm_info))
4447 return -TARGET_EFAULT;
4452 ret = get_errno(shmctl(shmid, cmd, NULL));
4459 #ifndef TARGET_FORCE_SHMLBA
4460 /* For most architectures, SHMLBA is the same as the page size;
4461 * some architectures have larger values, in which case they should
4462 * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4463 * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4464 * and defining its own value for SHMLBA.
4466 * The kernel also permits SHMLBA to be set by the architecture to a
4467 * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4468 * this means that addresses are rounded to the large size if
4469 * SHM_RND is set but addresses not aligned to that size are not rejected
4470 * as long as they are at least page-aligned. Since the only architecture
4471 * which uses this is ia64 this code doesn't provide for that oddity.
4473 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
4475 return TARGET_PAGE_SIZE;
4479 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
4480 int shmid, abi_ulong shmaddr, int shmflg)
4482 CPUState *cpu = env_cpu(cpu_env);
4485 struct shmid_ds shm_info;
4489 /* shmat pointers are always untagged */
4491 /* find out the length of the shared memory segment */
4492 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4493 if (is_error(ret)) {
4494 /* can't get length, bail out */
4498 shmlba = target_shmlba(cpu_env);
4500 if (shmaddr & (shmlba - 1)) {
4501 if (shmflg & SHM_RND) {
4502 shmaddr &= ~(shmlba - 1);
4504 return -TARGET_EINVAL;
4507 if (!guest_range_valid_untagged(shmaddr, shm_info.shm_segsz)) {
4508 return -TARGET_EINVAL;
4514 * We're mapping shared memory, so ensure we generate code for parallel
4515 * execution and flush old translations. This will work up to the level
4516 * supported by the host -- anything that requires EXCP_ATOMIC will not
4517 * be atomic with respect to an external process.
4519 if (!(cpu->tcg_cflags & CF_PARALLEL)) {
4520 cpu->tcg_cflags |= CF_PARALLEL;
4525 host_raddr = shmat(shmid, (void *)g2h_untagged(shmaddr), shmflg);
4527 abi_ulong mmap_start;
4529 /* In order to use the host shmat, we need to honor host SHMLBA. */
4530 mmap_start = mmap_find_vma(0, shm_info.shm_segsz, MAX(SHMLBA, shmlba));
4532 if (mmap_start == -1) {
4534 host_raddr = (void *)-1;
4536 host_raddr = shmat(shmid, g2h_untagged(mmap_start),
4537 shmflg | SHM_REMAP);
4540 if (host_raddr == (void *)-1) {
4542 return get_errno((long)host_raddr);
4544 raddr=h2g((unsigned long)host_raddr);
4546 page_set_flags(raddr, raddr + shm_info.shm_segsz,
4547 PAGE_VALID | PAGE_RESET | PAGE_READ |
4548 (shmflg & SHM_RDONLY ? 0 : PAGE_WRITE));
4550 for (i = 0; i < N_SHM_REGIONS; i++) {
4551 if (!shm_regions[i].in_use) {
4552 shm_regions[i].in_use = true;
4553 shm_regions[i].start = raddr;
4554 shm_regions[i].size = shm_info.shm_segsz;
4564 static inline abi_long do_shmdt(abi_ulong shmaddr)
4569 /* shmdt pointers are always untagged */
4573 for (i = 0; i < N_SHM_REGIONS; ++i) {
4574 if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4575 shm_regions[i].in_use = false;
4576 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
4580 rv = get_errno(shmdt(g2h_untagged(shmaddr)));
4587 #ifdef TARGET_NR_ipc
4588 /* ??? This only works with linear mappings. */
4589 /* do_ipc() must return target values and target errnos. */
4590 static abi_long do_ipc(CPUArchState *cpu_env,
4591 unsigned int call, abi_long first,
4592 abi_long second, abi_long third,
4593 abi_long ptr, abi_long fifth)
4598 version = call >> 16;
4603 ret = do_semtimedop(first, ptr, second, 0, false);
4605 case IPCOP_semtimedop:
4607 * The s390 sys_ipc variant has only five parameters instead of six
4608 * (as for default variant) and the only difference is the handling of
4609 * SEMTIMEDOP where on s390 the third parameter is used as a pointer
4610 * to a struct timespec where the generic variant uses fifth parameter.
4612 #if defined(TARGET_S390X)
4613 ret = do_semtimedop(first, ptr, second, third, TARGET_ABI_BITS == 64);
4615 ret = do_semtimedop(first, ptr, second, fifth, TARGET_ABI_BITS == 64);
4620 ret = get_errno(semget(first, second, third));
4623 case IPCOP_semctl: {
4624 /* The semun argument to semctl is passed by value, so dereference the
4627 get_user_ual(atptr, ptr);
4628 ret = do_semctl(first, second, third, atptr);
4633 ret = get_errno(msgget(first, second));
4637 ret = do_msgsnd(first, ptr, second, third);
4641 ret = do_msgctl(first, second, ptr);
4648 struct target_ipc_kludge {
4653 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4654 ret = -TARGET_EFAULT;
4658 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4660 unlock_user_struct(tmp, ptr, 0);
4664 ret = do_msgrcv(first, ptr, second, fifth, third);
4673 raddr = do_shmat(cpu_env, first, ptr, second);
4674 if (is_error(raddr))
4675 return get_errno(raddr);
4676 if (put_user_ual(raddr, third))
4677 return -TARGET_EFAULT;
4681 ret = -TARGET_EINVAL;
4686 ret = do_shmdt(ptr);
4690 /* IPC_* flag values are the same on all linux platforms */
4691 ret = get_errno(shmget(first, second, third));
4694 /* IPC_* and SHM_* command values are the same on all linux platforms */
4696 ret = do_shmctl(first, second, ptr);
4699 qemu_log_mask(LOG_UNIMP, "Unsupported ipc call: %d (version %d)\n",
4701 ret = -TARGET_ENOSYS;
4708 /* kernel structure types definitions */
4710 #define STRUCT(name, ...) STRUCT_ ## name,
4711 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4713 #include "syscall_types.h"
4717 #undef STRUCT_SPECIAL
4719 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
4720 #define STRUCT_SPECIAL(name)
4721 #include "syscall_types.h"
4723 #undef STRUCT_SPECIAL
4725 #define MAX_STRUCT_SIZE 4096
4727 #ifdef CONFIG_FIEMAP
4728 /* So fiemap access checks don't overflow on 32 bit systems.
4729 * This is very slightly smaller than the limit imposed by
4730 * the underlying kernel.
4732 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
4733 / sizeof(struct fiemap_extent))
4735 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4736 int fd, int cmd, abi_long arg)
4738 /* The parameter for this ioctl is a struct fiemap followed
4739 * by an array of struct fiemap_extent whose size is set
4740 * in fiemap->fm_extent_count. The array is filled in by the
4743 int target_size_in, target_size_out;
4745 const argtype *arg_type = ie->arg_type;
4746 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4749 int i, extent_size = thunk_type_size(extent_arg_type, 0);
4753 assert(arg_type[0] == TYPE_PTR);
4754 assert(ie->access == IOC_RW);
4756 target_size_in = thunk_type_size(arg_type, 0);
4757 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4759 return -TARGET_EFAULT;
4761 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4762 unlock_user(argptr, arg, 0);
4763 fm = (struct fiemap *)buf_temp;
4764 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4765 return -TARGET_EINVAL;
4768 outbufsz = sizeof (*fm) +
4769 (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4771 if (outbufsz > MAX_STRUCT_SIZE) {
4772 /* We can't fit all the extents into the fixed size buffer.
4773 * Allocate one that is large enough and use it instead.
4775 fm = g_try_malloc(outbufsz);
4777 return -TARGET_ENOMEM;
4779 memcpy(fm, buf_temp, sizeof(struct fiemap));
4782 ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4783 if (!is_error(ret)) {
4784 target_size_out = target_size_in;
4785 /* An extent_count of 0 means we were only counting the extents
4786 * so there are no structs to copy
4788 if (fm->fm_extent_count != 0) {
4789 target_size_out += fm->fm_mapped_extents * extent_size;
4791 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4793 ret = -TARGET_EFAULT;
4795 /* Convert the struct fiemap */
4796 thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4797 if (fm->fm_extent_count != 0) {
4798 p = argptr + target_size_in;
4799 /* ...and then all the struct fiemap_extents */
4800 for (i = 0; i < fm->fm_mapped_extents; i++) {
4801 thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4806 unlock_user(argptr, arg, target_size_out);
4816 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4817 int fd, int cmd, abi_long arg)
4819 const argtype *arg_type = ie->arg_type;
4823 struct ifconf *host_ifconf;
4825 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4826 const argtype ifreq_max_type[] = { MK_STRUCT(STRUCT_ifmap_ifreq) };
4827 int target_ifreq_size;
4832 abi_long target_ifc_buf;
4836 assert(arg_type[0] == TYPE_PTR);
4837 assert(ie->access == IOC_RW);
4840 target_size = thunk_type_size(arg_type, 0);
4842 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4844 return -TARGET_EFAULT;
4845 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4846 unlock_user(argptr, arg, 0);
4848 host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4849 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4850 target_ifreq_size = thunk_type_size(ifreq_max_type, 0);
4852 if (target_ifc_buf != 0) {
4853 target_ifc_len = host_ifconf->ifc_len;
4854 nb_ifreq = target_ifc_len / target_ifreq_size;
4855 host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4857 outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4858 if (outbufsz > MAX_STRUCT_SIZE) {
4860 * We can't fit all the extents into the fixed size buffer.
4861 * Allocate one that is large enough and use it instead.
4863 host_ifconf = malloc(outbufsz);
4865 return -TARGET_ENOMEM;
4867 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4870 host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4872 host_ifconf->ifc_len = host_ifc_len;
4874 host_ifc_buf = NULL;
4876 host_ifconf->ifc_buf = host_ifc_buf;
4878 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4879 if (!is_error(ret)) {
4880 /* convert host ifc_len to target ifc_len */
4882 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4883 target_ifc_len = nb_ifreq * target_ifreq_size;
4884 host_ifconf->ifc_len = target_ifc_len;
4886 /* restore target ifc_buf */
4888 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4890 /* copy struct ifconf to target user */
4892 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4894 return -TARGET_EFAULT;
4895 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4896 unlock_user(argptr, arg, target_size);
4898 if (target_ifc_buf != 0) {
4899 /* copy ifreq[] to target user */
4900 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4901 for (i = 0; i < nb_ifreq ; i++) {
4902 thunk_convert(argptr + i * target_ifreq_size,
4903 host_ifc_buf + i * sizeof(struct ifreq),
4904 ifreq_arg_type, THUNK_TARGET);
4906 unlock_user(argptr, target_ifc_buf, target_ifc_len);
4917 #if defined(CONFIG_USBFS)
4918 #if HOST_LONG_BITS > 64
4919 #error USBDEVFS thunks do not support >64 bit hosts yet.
4922 uint64_t target_urb_adr;
4923 uint64_t target_buf_adr;
4924 char *target_buf_ptr;
4925 struct usbdevfs_urb host_urb;
4928 static GHashTable *usbdevfs_urb_hashtable(void)
4930 static GHashTable *urb_hashtable;
4932 if (!urb_hashtable) {
4933 urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
4935 return urb_hashtable;
4938 static void urb_hashtable_insert(struct live_urb *urb)
4940 GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4941 g_hash_table_insert(urb_hashtable, urb, urb);
4944 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
4946 GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4947 return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
4950 static void urb_hashtable_remove(struct live_urb *urb)
4952 GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4953 g_hash_table_remove(urb_hashtable, urb);
4957 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
4958 int fd, int cmd, abi_long arg)
4960 const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
4961 const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
4962 struct live_urb *lurb;
4966 uintptr_t target_urb_adr;
4969 target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
4971 memset(buf_temp, 0, sizeof(uint64_t));
4972 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4973 if (is_error(ret)) {
4977 memcpy(&hurb, buf_temp, sizeof(uint64_t));
4978 lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
4979 if (!lurb->target_urb_adr) {
4980 return -TARGET_EFAULT;
4982 urb_hashtable_remove(lurb);
4983 unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
4984 lurb->host_urb.buffer_length);
4985 lurb->target_buf_ptr = NULL;
4987 /* restore the guest buffer pointer */
4988 lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
4990 /* update the guest urb struct */
4991 argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
4994 return -TARGET_EFAULT;
4996 thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
4997 unlock_user(argptr, lurb->target_urb_adr, target_size);
4999 target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
5000 /* write back the urb handle */
5001 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5004 return -TARGET_EFAULT;
5007 /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
5008 target_urb_adr = lurb->target_urb_adr;
5009 thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
5010 unlock_user(argptr, arg, target_size);
5017 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
5018 uint8_t *buf_temp __attribute__((unused)),
5019 int fd, int cmd, abi_long arg)
5021 struct live_urb *lurb;
5023 /* map target address back to host URB with metadata. */
5024 lurb = urb_hashtable_lookup(arg);
5026 return -TARGET_EFAULT;
5028 return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5032 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
5033 int fd, int cmd, abi_long arg)
5035 const argtype *arg_type = ie->arg_type;
5040 struct live_urb *lurb;
5043 * each submitted URB needs to map to a unique ID for the
5044 * kernel, and that unique ID needs to be a pointer to
5045 * host memory. hence, we need to malloc for each URB.
5046 * isochronous transfers have a variable length struct.
5049 target_size = thunk_type_size(arg_type, THUNK_TARGET);
5051 /* construct host copy of urb and metadata */
5052 lurb = g_try_malloc0(sizeof(struct live_urb));
5054 return -TARGET_ENOMEM;
5057 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5060 return -TARGET_EFAULT;
5062 thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
5063 unlock_user(argptr, arg, 0);
5065 lurb->target_urb_adr = arg;
5066 lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
5068 /* buffer space used depends on endpoint type so lock the entire buffer */
5069 /* control type urbs should check the buffer contents for true direction */
5070 rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
5071 lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
5072 lurb->host_urb.buffer_length, 1);
5073 if (lurb->target_buf_ptr == NULL) {
5075 return -TARGET_EFAULT;
5078 /* update buffer pointer in host copy */
5079 lurb->host_urb.buffer = lurb->target_buf_ptr;
5081 ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5082 if (is_error(ret)) {
5083 unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
5086 urb_hashtable_insert(lurb);
5091 #endif /* CONFIG_USBFS */
5093 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5094 int cmd, abi_long arg)
5097 struct dm_ioctl *host_dm;
5098 abi_long guest_data;
5099 uint32_t guest_data_size;
5101 const argtype *arg_type = ie->arg_type;
5103 void *big_buf = NULL;
5107 target_size = thunk_type_size(arg_type, 0);
5108 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5110 ret = -TARGET_EFAULT;
5113 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5114 unlock_user(argptr, arg, 0);
5116 /* buf_temp is too small, so fetch things into a bigger buffer */
5117 big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
5118 memcpy(big_buf, buf_temp, target_size);
5122 guest_data = arg + host_dm->data_start;
5123 if ((guest_data - arg) < 0) {
5124 ret = -TARGET_EINVAL;
5127 guest_data_size = host_dm->data_size - host_dm->data_start;
5128 host_data = (char*)host_dm + host_dm->data_start;
5130 argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
5132 ret = -TARGET_EFAULT;
5136 switch (ie->host_cmd) {
5138 case DM_LIST_DEVICES:
5141 case DM_DEV_SUSPEND:
5144 case DM_TABLE_STATUS:
5145 case DM_TABLE_CLEAR:
5147 case DM_LIST_VERSIONS:
5151 case DM_DEV_SET_GEOMETRY:
5152 /* data contains only strings */
5153 memcpy(host_data, argptr, guest_data_size);
5156 memcpy(host_data, argptr, guest_data_size);
5157 *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
5161 void *gspec = argptr;
5162 void *cur_data = host_data;
5163 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5164 int spec_size = thunk_type_size(arg_type, 0);
5167 for (i = 0; i < host_dm->target_count; i++) {
5168 struct dm_target_spec *spec = cur_data;
5172 thunk_convert(spec, gspec, arg_type, THUNK_HOST);
5173 slen = strlen((char*)gspec + spec_size) + 1;
5175 spec->next = sizeof(*spec) + slen;
5176 strcpy((char*)&spec[1], gspec + spec_size);
5178 cur_data += spec->next;
5183 ret = -TARGET_EINVAL;
5184 unlock_user(argptr, guest_data, 0);
5187 unlock_user(argptr, guest_data, 0);
5189 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5190 if (!is_error(ret)) {
5191 guest_data = arg + host_dm->data_start;
5192 guest_data_size = host_dm->data_size - host_dm->data_start;
5193 argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
5194 switch (ie->host_cmd) {
5199 case DM_DEV_SUSPEND:
5202 case DM_TABLE_CLEAR:
5204 case DM_DEV_SET_GEOMETRY:
5205 /* no return data */
5207 case DM_LIST_DEVICES:
5209 struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
5210 uint32_t remaining_data = guest_data_size;
5211 void *cur_data = argptr;
5212 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
5213 int nl_size = 12; /* can't use thunk_size due to alignment */
5216 uint32_t next = nl->next;
5218 nl->next = nl_size + (strlen(nl->name) + 1);
5220 if (remaining_data < nl->next) {
5221 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5224 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
5225 strcpy(cur_data + nl_size, nl->name);
5226 cur_data += nl->next;
5227 remaining_data -= nl->next;
5231 nl = (void*)nl + next;
5236 case DM_TABLE_STATUS:
5238 struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5239 void *cur_data = argptr;
5240 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5241 int spec_size = thunk_type_size(arg_type, 0);
5244 for (i = 0; i < host_dm->target_count; i++) {
5245 uint32_t next = spec->next;
5246 int slen = strlen((char*)&spec[1]) + 1;
5247 spec->next = (cur_data - argptr) + spec_size + slen;
5248 if (guest_data_size < spec->next) {
5249 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5252 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
5253 strcpy(cur_data + spec_size, (char*)&spec[1]);
5254 cur_data = argptr + spec->next;
5255 spec = (void*)host_dm + host_dm->data_start + next;
5261 void *hdata = (void*)host_dm + host_dm->data_start;
5262 int count = *(uint32_t*)hdata;
5263 uint64_t *hdev = hdata + 8;
5264 uint64_t *gdev = argptr + 8;
5267 *(uint32_t*)argptr = tswap32(count);
5268 for (i = 0; i < count; i++) {
5269 *gdev = tswap64(*hdev);
5275 case DM_LIST_VERSIONS:
5277 struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5278 uint32_t remaining_data = guest_data_size;
5279 void *cur_data = argptr;
5280 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5281 int vers_size = thunk_type_size(arg_type, 0);
5284 uint32_t next = vers->next;
5286 vers->next = vers_size + (strlen(vers->name) + 1);
5288 if (remaining_data < vers->next) {
5289 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5292 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
5293 strcpy(cur_data + vers_size, vers->name);
5294 cur_data += vers->next;
5295 remaining_data -= vers->next;
5299 vers = (void*)vers + next;
5304 unlock_user(argptr, guest_data, 0);
5305 ret = -TARGET_EINVAL;
5308 unlock_user(argptr, guest_data, guest_data_size);
5310 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5312 ret = -TARGET_EFAULT;
5315 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5316 unlock_user(argptr, arg, target_size);
5323 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5324 int cmd, abi_long arg)
5328 const argtype *arg_type = ie->arg_type;
5329 const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5332 struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5333 struct blkpg_partition host_part;
5335 /* Read and convert blkpg */
5337 target_size = thunk_type_size(arg_type, 0);
5338 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5340 ret = -TARGET_EFAULT;
5343 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5344 unlock_user(argptr, arg, 0);
5346 switch (host_blkpg->op) {
5347 case BLKPG_ADD_PARTITION:
5348 case BLKPG_DEL_PARTITION:
5349 /* payload is struct blkpg_partition */
5352 /* Unknown opcode */
5353 ret = -TARGET_EINVAL;
5357 /* Read and convert blkpg->data */
5358 arg = (abi_long)(uintptr_t)host_blkpg->data;
5359 target_size = thunk_type_size(part_arg_type, 0);
5360 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5362 ret = -TARGET_EFAULT;
5365 thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5366 unlock_user(argptr, arg, 0);
5368 /* Swizzle the data pointer to our local copy and call! */
5369 host_blkpg->data = &host_part;
5370 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5376 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5377 int fd, int cmd, abi_long arg)
5379 const argtype *arg_type = ie->arg_type;
5380 const StructEntry *se;
5381 const argtype *field_types;
5382 const int *dst_offsets, *src_offsets;
5385 abi_ulong *target_rt_dev_ptr = NULL;
5386 unsigned long *host_rt_dev_ptr = NULL;
5390 assert(ie->access == IOC_W);
5391 assert(*arg_type == TYPE_PTR);
5393 assert(*arg_type == TYPE_STRUCT);
5394 target_size = thunk_type_size(arg_type, 0);
5395 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5397 return -TARGET_EFAULT;
5400 assert(*arg_type == (int)STRUCT_rtentry);
5401 se = struct_entries + *arg_type++;
5402 assert(se->convert[0] == NULL);
5403 /* convert struct here to be able to catch rt_dev string */
5404 field_types = se->field_types;
5405 dst_offsets = se->field_offsets[THUNK_HOST];
5406 src_offsets = se->field_offsets[THUNK_TARGET];
5407 for (i = 0; i < se->nb_fields; i++) {
5408 if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5409 assert(*field_types == TYPE_PTRVOID);
5410 target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
5411 host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5412 if (*target_rt_dev_ptr != 0) {
5413 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5414 tswapal(*target_rt_dev_ptr));
5415 if (!*host_rt_dev_ptr) {
5416 unlock_user(argptr, arg, 0);
5417 return -TARGET_EFAULT;
5420 *host_rt_dev_ptr = 0;
5425 field_types = thunk_convert(buf_temp + dst_offsets[i],
5426 argptr + src_offsets[i],
5427 field_types, THUNK_HOST);
5429 unlock_user(argptr, arg, 0);
5431 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5433 assert(host_rt_dev_ptr != NULL);
5434 assert(target_rt_dev_ptr != NULL);
5435 if (*host_rt_dev_ptr != 0) {
5436 unlock_user((void *)*host_rt_dev_ptr,
5437 *target_rt_dev_ptr, 0);
5442 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5443 int fd, int cmd, abi_long arg)
5445 int sig = target_to_host_signal(arg);
5446 return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5449 static abi_long do_ioctl_SIOCGSTAMP(const IOCTLEntry *ie, uint8_t *buf_temp,
5450 int fd, int cmd, abi_long arg)
5455 ret = get_errno(safe_ioctl(fd, SIOCGSTAMP, &tv));
5456 if (is_error(ret)) {
5460 if (cmd == (int)TARGET_SIOCGSTAMP_OLD) {
5461 if (copy_to_user_timeval(arg, &tv)) {
5462 return -TARGET_EFAULT;
5465 if (copy_to_user_timeval64(arg, &tv)) {
5466 return -TARGET_EFAULT;
5473 static abi_long do_ioctl_SIOCGSTAMPNS(const IOCTLEntry *ie, uint8_t *buf_temp,
5474 int fd, int cmd, abi_long arg)
5479 ret = get_errno(safe_ioctl(fd, SIOCGSTAMPNS, &ts));
5480 if (is_error(ret)) {
5484 if (cmd == (int)TARGET_SIOCGSTAMPNS_OLD) {
5485 if (host_to_target_timespec(arg, &ts)) {
5486 return -TARGET_EFAULT;
5489 if (host_to_target_timespec64(arg, &ts)) {
5490 return -TARGET_EFAULT;
5498 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5499 int fd, int cmd, abi_long arg)
5501 int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5502 return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5508 static void unlock_drm_version(struct drm_version *host_ver,
5509 struct target_drm_version *target_ver,
5512 unlock_user(host_ver->name, target_ver->name,
5513 copy ? host_ver->name_len : 0);
5514 unlock_user(host_ver->date, target_ver->date,
5515 copy ? host_ver->date_len : 0);
5516 unlock_user(host_ver->desc, target_ver->desc,
5517 copy ? host_ver->desc_len : 0);
5520 static inline abi_long target_to_host_drmversion(struct drm_version *host_ver,
5521 struct target_drm_version *target_ver)
5523 memset(host_ver, 0, sizeof(*host_ver));
5525 __get_user(host_ver->name_len, &target_ver->name_len);
5526 if (host_ver->name_len) {
5527 host_ver->name = lock_user(VERIFY_WRITE, target_ver->name,
5528 target_ver->name_len, 0);
5529 if (!host_ver->name) {
5534 __get_user(host_ver->date_len, &target_ver->date_len);
5535 if (host_ver->date_len) {
5536 host_ver->date = lock_user(VERIFY_WRITE, target_ver->date,
5537 target_ver->date_len, 0);
5538 if (!host_ver->date) {
5543 __get_user(host_ver->desc_len, &target_ver->desc_len);
5544 if (host_ver->desc_len) {
5545 host_ver->desc = lock_user(VERIFY_WRITE, target_ver->desc,
5546 target_ver->desc_len, 0);
5547 if (!host_ver->desc) {
5554 unlock_drm_version(host_ver, target_ver, false);
5558 static inline void host_to_target_drmversion(
5559 struct target_drm_version *target_ver,
5560 struct drm_version *host_ver)
5562 __put_user(host_ver->version_major, &target_ver->version_major);
5563 __put_user(host_ver->version_minor, &target_ver->version_minor);
5564 __put_user(host_ver->version_patchlevel, &target_ver->version_patchlevel);
5565 __put_user(host_ver->name_len, &target_ver->name_len);
5566 __put_user(host_ver->date_len, &target_ver->date_len);
5567 __put_user(host_ver->desc_len, &target_ver->desc_len);
5568 unlock_drm_version(host_ver, target_ver, true);
5571 static abi_long do_ioctl_drm(const IOCTLEntry *ie, uint8_t *buf_temp,
5572 int fd, int cmd, abi_long arg)
5574 struct drm_version *ver;
5575 struct target_drm_version *target_ver;
5578 switch (ie->host_cmd) {
5579 case DRM_IOCTL_VERSION:
5580 if (!lock_user_struct(VERIFY_WRITE, target_ver, arg, 0)) {
5581 return -TARGET_EFAULT;
5583 ver = (struct drm_version *)buf_temp;
5584 ret = target_to_host_drmversion(ver, target_ver);
5585 if (!is_error(ret)) {
5586 ret = get_errno(safe_ioctl(fd, ie->host_cmd, ver));
5587 if (is_error(ret)) {
5588 unlock_drm_version(ver, target_ver, false);
5590 host_to_target_drmversion(target_ver, ver);
5593 unlock_user_struct(target_ver, arg, 0);
5596 return -TARGET_ENOSYS;
5599 static abi_long do_ioctl_drm_i915_getparam(const IOCTLEntry *ie,
5600 struct drm_i915_getparam *gparam,
5601 int fd, abi_long arg)
5605 struct target_drm_i915_getparam *target_gparam;
5607 if (!lock_user_struct(VERIFY_READ, target_gparam, arg, 0)) {
5608 return -TARGET_EFAULT;
5611 __get_user(gparam->param, &target_gparam->param);
5612 gparam->value = &value;
5613 ret = get_errno(safe_ioctl(fd, ie->host_cmd, gparam));
5614 put_user_s32(value, target_gparam->value);
5616 unlock_user_struct(target_gparam, arg, 0);
5620 static abi_long do_ioctl_drm_i915(const IOCTLEntry *ie, uint8_t *buf_temp,
5621 int fd, int cmd, abi_long arg)
5623 switch (ie->host_cmd) {
5624 case DRM_IOCTL_I915_GETPARAM:
5625 return do_ioctl_drm_i915_getparam(ie,
5626 (struct drm_i915_getparam *)buf_temp,
5629 return -TARGET_ENOSYS;
5635 static abi_long do_ioctl_TUNSETTXFILTER(const IOCTLEntry *ie, uint8_t *buf_temp,
5636 int fd, int cmd, abi_long arg)
5638 struct tun_filter *filter = (struct tun_filter *)buf_temp;
5639 struct tun_filter *target_filter;
5642 assert(ie->access == IOC_W);
5644 target_filter = lock_user(VERIFY_READ, arg, sizeof(*target_filter), 1);
5645 if (!target_filter) {
5646 return -TARGET_EFAULT;
5648 filter->flags = tswap16(target_filter->flags);
5649 filter->count = tswap16(target_filter->count);
5650 unlock_user(target_filter, arg, 0);
5652 if (filter->count) {
5653 if (offsetof(struct tun_filter, addr) + filter->count * ETH_ALEN >
5655 return -TARGET_EFAULT;
5658 target_addr = lock_user(VERIFY_READ,
5659 arg + offsetof(struct tun_filter, addr),
5660 filter->count * ETH_ALEN, 1);
5662 return -TARGET_EFAULT;
5664 memcpy(filter->addr, target_addr, filter->count * ETH_ALEN);
5665 unlock_user(target_addr, arg + offsetof(struct tun_filter, addr), 0);
5668 return get_errno(safe_ioctl(fd, ie->host_cmd, filter));
5671 IOCTLEntry ioctl_entries[] = {
5672 #define IOCTL(cmd, access, ...) \
5673 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
5674 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
5675 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
5676 #define IOCTL_IGNORE(cmd) \
5677 { TARGET_ ## cmd, 0, #cmd },
5682 /* ??? Implement proper locking for ioctls. */
5683 /* do_ioctl() Must return target values and target errnos. */
5684 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5686 const IOCTLEntry *ie;
5687 const argtype *arg_type;
5689 uint8_t buf_temp[MAX_STRUCT_SIZE];
5695 if (ie->target_cmd == 0) {
5697 LOG_UNIMP, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5698 return -TARGET_ENOSYS;
5700 if (ie->target_cmd == cmd)
5704 arg_type = ie->arg_type;
5706 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5707 } else if (!ie->host_cmd) {
5708 /* Some architectures define BSD ioctls in their headers
5709 that are not implemented in Linux. */
5710 return -TARGET_ENOSYS;
5713 switch(arg_type[0]) {
5716 ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5722 ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5726 target_size = thunk_type_size(arg_type, 0);
5727 switch(ie->access) {
5729 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5730 if (!is_error(ret)) {
5731 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5733 return -TARGET_EFAULT;
5734 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5735 unlock_user(argptr, arg, target_size);
5739 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5741 return -TARGET_EFAULT;
5742 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5743 unlock_user(argptr, arg, 0);
5744 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5748 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5750 return -TARGET_EFAULT;
5751 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5752 unlock_user(argptr, arg, 0);
5753 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5754 if (!is_error(ret)) {
5755 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5757 return -TARGET_EFAULT;
5758 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5759 unlock_user(argptr, arg, target_size);
5765 qemu_log_mask(LOG_UNIMP,
5766 "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5767 (long)cmd, arg_type[0]);
5768 ret = -TARGET_ENOSYS;
5774 static const bitmask_transtbl iflag_tbl[] = {
5775 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5776 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5777 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5778 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5779 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5780 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5781 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5782 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5783 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5784 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5785 { TARGET_IXON, TARGET_IXON, IXON, IXON },
5786 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5787 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5788 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5789 { TARGET_IUTF8, TARGET_IUTF8, IUTF8, IUTF8},
5793 static const bitmask_transtbl oflag_tbl[] = {
5794 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5795 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5796 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5797 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5798 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5799 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5800 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5801 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5802 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5803 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5804 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5805 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5806 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5807 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5808 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5809 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5810 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5811 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5812 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5813 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5814 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5815 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5816 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5817 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5821 static const bitmask_transtbl cflag_tbl[] = {
5822 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5823 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5824 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5825 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5826 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5827 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5828 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5829 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5830 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5831 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5832 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5833 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5834 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5835 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5836 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5837 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5838 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5839 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5840 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5841 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5842 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5843 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5844 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5845 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5846 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5847 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5848 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5849 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5850 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5851 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5852 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5856 static const bitmask_transtbl lflag_tbl[] = {
5857 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5858 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5859 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5860 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5861 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5862 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5863 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5864 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5865 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5866 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5867 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5868 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5869 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5870 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5871 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5872 { TARGET_EXTPROC, TARGET_EXTPROC, EXTPROC, EXTPROC},
5876 static void target_to_host_termios (void *dst, const void *src)
5878 struct host_termios *host = dst;
5879 const struct target_termios *target = src;
5882 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5884 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5886 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5888 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5889 host->c_line = target->c_line;
5891 memset(host->c_cc, 0, sizeof(host->c_cc));
5892 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5893 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5894 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5895 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5896 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5897 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5898 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5899 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5900 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5901 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5902 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5903 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5904 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5905 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5906 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5907 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5908 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5911 static void host_to_target_termios (void *dst, const void *src)
5913 struct target_termios *target = dst;
5914 const struct host_termios *host = src;
5917 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5919 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5921 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5923 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5924 target->c_line = host->c_line;
5926 memset(target->c_cc, 0, sizeof(target->c_cc));
5927 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5928 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5929 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5930 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5931 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5932 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5933 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5934 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5935 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5936 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5937 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5938 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5939 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5940 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5941 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5942 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5943 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5946 static const StructEntry struct_termios_def = {
5947 .convert = { host_to_target_termios, target_to_host_termios },
5948 .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5949 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5950 .print = print_termios,
5953 static const bitmask_transtbl mmap_flags_tbl[] = {
5954 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
5955 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
5956 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5957 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
5958 MAP_ANONYMOUS, MAP_ANONYMOUS },
5959 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
5960 MAP_GROWSDOWN, MAP_GROWSDOWN },
5961 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
5962 MAP_DENYWRITE, MAP_DENYWRITE },
5963 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
5964 MAP_EXECUTABLE, MAP_EXECUTABLE },
5965 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5966 { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
5967 MAP_NORESERVE, MAP_NORESERVE },
5968 { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
5969 /* MAP_STACK had been ignored by the kernel for quite some time.
5970 Recognize it for the target insofar as we do not want to pass
5971 it through to the host. */
5972 { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
5977 * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
5978 * TARGET_I386 is defined if TARGET_X86_64 is defined
5980 #if defined(TARGET_I386)
5982 /* NOTE: there is really one LDT for all the threads */
5983 static uint8_t *ldt_table;
5985 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
5992 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
5993 if (size > bytecount)
5995 p = lock_user(VERIFY_WRITE, ptr, size, 0);
5997 return -TARGET_EFAULT;
5998 /* ??? Should this by byteswapped? */
5999 memcpy(p, ldt_table, size);
6000 unlock_user(p, ptr, size);
6004 /* XXX: add locking support */
6005 static abi_long write_ldt(CPUX86State *env,
6006 abi_ulong ptr, unsigned long bytecount, int oldmode)
6008 struct target_modify_ldt_ldt_s ldt_info;
6009 struct target_modify_ldt_ldt_s *target_ldt_info;
6010 int seg_32bit, contents, read_exec_only, limit_in_pages;
6011 int seg_not_present, useable, lm;
6012 uint32_t *lp, entry_1, entry_2;
6014 if (bytecount != sizeof(ldt_info))
6015 return -TARGET_EINVAL;
6016 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
6017 return -TARGET_EFAULT;
6018 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6019 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6020 ldt_info.limit = tswap32(target_ldt_info->limit);
6021 ldt_info.flags = tswap32(target_ldt_info->flags);
6022 unlock_user_struct(target_ldt_info, ptr, 0);
6024 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
6025 return -TARGET_EINVAL;
6026 seg_32bit = ldt_info.flags & 1;
6027 contents = (ldt_info.flags >> 1) & 3;
6028 read_exec_only = (ldt_info.flags >> 3) & 1;
6029 limit_in_pages = (ldt_info.flags >> 4) & 1;
6030 seg_not_present = (ldt_info.flags >> 5) & 1;
6031 useable = (ldt_info.flags >> 6) & 1;
6035 lm = (ldt_info.flags >> 7) & 1;
6037 if (contents == 3) {
6039 return -TARGET_EINVAL;
6040 if (seg_not_present == 0)
6041 return -TARGET_EINVAL;
6043 /* allocate the LDT */
6045 env->ldt.base = target_mmap(0,
6046 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
6047 PROT_READ|PROT_WRITE,
6048 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
6049 if (env->ldt.base == -1)
6050 return -TARGET_ENOMEM;
6051 memset(g2h_untagged(env->ldt.base), 0,
6052 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
6053 env->ldt.limit = 0xffff;
6054 ldt_table = g2h_untagged(env->ldt.base);
6057 /* NOTE: same code as Linux kernel */
6058 /* Allow LDTs to be cleared by the user. */
6059 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6062 read_exec_only == 1 &&
6064 limit_in_pages == 0 &&
6065 seg_not_present == 1 &&
6073 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6074 (ldt_info.limit & 0x0ffff);
6075 entry_2 = (ldt_info.base_addr & 0xff000000) |
6076 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6077 (ldt_info.limit & 0xf0000) |
6078 ((read_exec_only ^ 1) << 9) |
6080 ((seg_not_present ^ 1) << 15) |
6082 (limit_in_pages << 23) |
6086 entry_2 |= (useable << 20);
6088 /* Install the new entry ... */
6090 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
6091 lp[0] = tswap32(entry_1);
6092 lp[1] = tswap32(entry_2);
6096 /* specific and weird i386 syscalls */
6097 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
6098 unsigned long bytecount)
6104 ret = read_ldt(ptr, bytecount);
6107 ret = write_ldt(env, ptr, bytecount, 1);
6110 ret = write_ldt(env, ptr, bytecount, 0);
6113 ret = -TARGET_ENOSYS;
6119 #if defined(TARGET_ABI32)
6120 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
6122 uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6123 struct target_modify_ldt_ldt_s ldt_info;
6124 struct target_modify_ldt_ldt_s *target_ldt_info;
6125 int seg_32bit, contents, read_exec_only, limit_in_pages;
6126 int seg_not_present, useable, lm;
6127 uint32_t *lp, entry_1, entry_2;
6130 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6131 if (!target_ldt_info)
6132 return -TARGET_EFAULT;
6133 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6134 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6135 ldt_info.limit = tswap32(target_ldt_info->limit);
6136 ldt_info.flags = tswap32(target_ldt_info->flags);
6137 if (ldt_info.entry_number == -1) {
6138 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
6139 if (gdt_table[i] == 0) {
6140 ldt_info.entry_number = i;
6141 target_ldt_info->entry_number = tswap32(i);
6146 unlock_user_struct(target_ldt_info, ptr, 1);
6148 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
6149 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
6150 return -TARGET_EINVAL;
6151 seg_32bit = ldt_info.flags & 1;
6152 contents = (ldt_info.flags >> 1) & 3;
6153 read_exec_only = (ldt_info.flags >> 3) & 1;
6154 limit_in_pages = (ldt_info.flags >> 4) & 1;
6155 seg_not_present = (ldt_info.flags >> 5) & 1;
6156 useable = (ldt_info.flags >> 6) & 1;
6160 lm = (ldt_info.flags >> 7) & 1;
6163 if (contents == 3) {
6164 if (seg_not_present == 0)
6165 return -TARGET_EINVAL;
6168 /* NOTE: same code as Linux kernel */
6169 /* Allow LDTs to be cleared by the user. */
6170 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6171 if ((contents == 0 &&
6172 read_exec_only == 1 &&
6174 limit_in_pages == 0 &&
6175 seg_not_present == 1 &&
6183 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6184 (ldt_info.limit & 0x0ffff);
6185 entry_2 = (ldt_info.base_addr & 0xff000000) |
6186 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6187 (ldt_info.limit & 0xf0000) |
6188 ((read_exec_only ^ 1) << 9) |
6190 ((seg_not_present ^ 1) << 15) |
6192 (limit_in_pages << 23) |
6197 /* Install the new entry ... */
6199 lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
6200 lp[0] = tswap32(entry_1);
6201 lp[1] = tswap32(entry_2);
6205 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
6207 struct target_modify_ldt_ldt_s *target_ldt_info;
6208 uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6209 uint32_t base_addr, limit, flags;
6210 int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
6211 int seg_not_present, useable, lm;
6212 uint32_t *lp, entry_1, entry_2;
6214 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6215 if (!target_ldt_info)
6216 return -TARGET_EFAULT;
6217 idx = tswap32(target_ldt_info->entry_number);
6218 if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
6219 idx > TARGET_GDT_ENTRY_TLS_MAX) {
6220 unlock_user_struct(target_ldt_info, ptr, 1);
6221 return -TARGET_EINVAL;
6223 lp = (uint32_t *)(gdt_table + idx);
6224 entry_1 = tswap32(lp[0]);
6225 entry_2 = tswap32(lp[1]);
6227 read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
6228 contents = (entry_2 >> 10) & 3;
6229 seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
6230 seg_32bit = (entry_2 >> 22) & 1;
6231 limit_in_pages = (entry_2 >> 23) & 1;
6232 useable = (entry_2 >> 20) & 1;
6236 lm = (entry_2 >> 21) & 1;
6238 flags = (seg_32bit << 0) | (contents << 1) |
6239 (read_exec_only << 3) | (limit_in_pages << 4) |
6240 (seg_not_present << 5) | (useable << 6) | (lm << 7);
6241 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000);
6242 base_addr = (entry_1 >> 16) |
6243 (entry_2 & 0xff000000) |
6244 ((entry_2 & 0xff) << 16);
6245 target_ldt_info->base_addr = tswapal(base_addr);
6246 target_ldt_info->limit = tswap32(limit);
6247 target_ldt_info->flags = tswap32(flags);
6248 unlock_user_struct(target_ldt_info, ptr, 1);
6252 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6254 return -TARGET_ENOSYS;
6257 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6264 case TARGET_ARCH_SET_GS:
6265 case TARGET_ARCH_SET_FS:
6266 if (code == TARGET_ARCH_SET_GS)
6270 cpu_x86_load_seg(env, idx, 0);
6271 env->segs[idx].base = addr;
6273 case TARGET_ARCH_GET_GS:
6274 case TARGET_ARCH_GET_FS:
6275 if (code == TARGET_ARCH_GET_GS)
6279 val = env->segs[idx].base;
6280 if (put_user(val, addr, abi_ulong))
6281 ret = -TARGET_EFAULT;
6284 ret = -TARGET_EINVAL;
6289 #endif /* defined(TARGET_ABI32 */
6291 #endif /* defined(TARGET_I386) */
6293 #define NEW_STACK_SIZE 0x40000
6296 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
6299 pthread_mutex_t mutex;
6300 pthread_cond_t cond;
6303 abi_ulong child_tidptr;
6304 abi_ulong parent_tidptr;
6308 static void *clone_func(void *arg)
6310 new_thread_info *info = arg;
6315 rcu_register_thread();
6316 tcg_register_thread();
6320 ts = (TaskState *)cpu->opaque;
6321 info->tid = sys_gettid();
6323 if (info->child_tidptr)
6324 put_user_u32(info->tid, info->child_tidptr);
6325 if (info->parent_tidptr)
6326 put_user_u32(info->tid, info->parent_tidptr);
6327 qemu_guest_random_seed_thread_part2(cpu->random_seed);
6328 /* Enable signals. */
6329 sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
6330 /* Signal to the parent that we're ready. */
6331 pthread_mutex_lock(&info->mutex);
6332 pthread_cond_broadcast(&info->cond);
6333 pthread_mutex_unlock(&info->mutex);
6334 /* Wait until the parent has finished initializing the tls state. */
6335 pthread_mutex_lock(&clone_lock);
6336 pthread_mutex_unlock(&clone_lock);
6342 /* do_fork() Must return host values and target errnos (unlike most
6343 do_*() functions). */
6344 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
6345 abi_ulong parent_tidptr, target_ulong newtls,
6346 abi_ulong child_tidptr)
6348 CPUState *cpu = env_cpu(env);
6352 CPUArchState *new_env;
6355 flags &= ~CLONE_IGNORED_FLAGS;
6357 /* Emulate vfork() with fork() */
6358 if (flags & CLONE_VFORK)
6359 flags &= ~(CLONE_VFORK | CLONE_VM);
6361 if (flags & CLONE_VM) {
6362 TaskState *parent_ts = (TaskState *)cpu->opaque;
6363 new_thread_info info;
6364 pthread_attr_t attr;
6366 if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
6367 (flags & CLONE_INVALID_THREAD_FLAGS)) {
6368 return -TARGET_EINVAL;
6371 ts = g_new0(TaskState, 1);
6372 init_task_state(ts);
6374 /* Grab a mutex so that thread setup appears atomic. */
6375 pthread_mutex_lock(&clone_lock);
6378 * If this is our first additional thread, we need to ensure we
6379 * generate code for parallel execution and flush old translations.
6380 * Do this now so that the copy gets CF_PARALLEL too.
6382 if (!(cpu->tcg_cflags & CF_PARALLEL)) {
6383 cpu->tcg_cflags |= CF_PARALLEL;
6387 /* we create a new CPU instance. */
6388 new_env = cpu_copy(env);
6389 /* Init regs that differ from the parent. */
6390 cpu_clone_regs_child(new_env, newsp, flags);
6391 cpu_clone_regs_parent(env, flags);
6392 new_cpu = env_cpu(new_env);
6393 new_cpu->opaque = ts;
6394 ts->bprm = parent_ts->bprm;
6395 ts->info = parent_ts->info;
6396 ts->signal_mask = parent_ts->signal_mask;
6398 if (flags & CLONE_CHILD_CLEARTID) {
6399 ts->child_tidptr = child_tidptr;
6402 if (flags & CLONE_SETTLS) {
6403 cpu_set_tls (new_env, newtls);
6406 memset(&info, 0, sizeof(info));
6407 pthread_mutex_init(&info.mutex, NULL);
6408 pthread_mutex_lock(&info.mutex);
6409 pthread_cond_init(&info.cond, NULL);
6411 if (flags & CLONE_CHILD_SETTID) {
6412 info.child_tidptr = child_tidptr;
6414 if (flags & CLONE_PARENT_SETTID) {
6415 info.parent_tidptr = parent_tidptr;
6418 ret = pthread_attr_init(&attr);
6419 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6420 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6421 /* It is not safe to deliver signals until the child has finished
6422 initializing, so temporarily block all signals. */
6423 sigfillset(&sigmask);
6424 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6425 cpu->random_seed = qemu_guest_random_seed_thread_part1();
6427 ret = pthread_create(&info.thread, &attr, clone_func, &info);
6428 /* TODO: Free new CPU state if thread creation failed. */
6430 sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6431 pthread_attr_destroy(&attr);
6433 /* Wait for the child to initialize. */
6434 pthread_cond_wait(&info.cond, &info.mutex);
6439 pthread_mutex_unlock(&info.mutex);
6440 pthread_cond_destroy(&info.cond);
6441 pthread_mutex_destroy(&info.mutex);
6442 pthread_mutex_unlock(&clone_lock);
6444 /* if no CLONE_VM, we consider it is a fork */
6445 if (flags & CLONE_INVALID_FORK_FLAGS) {
6446 return -TARGET_EINVAL;
6449 /* We can't support custom termination signals */
6450 if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6451 return -TARGET_EINVAL;
6454 if (block_signals()) {
6455 return -TARGET_ERESTARTSYS;
6461 /* Child Process. */
6462 cpu_clone_regs_child(env, newsp, flags);
6464 /* There is a race condition here. The parent process could
6465 theoretically read the TID in the child process before the child
6466 tid is set. This would require using either ptrace
6467 (not implemented) or having *_tidptr to point at a shared memory
6468 mapping. We can't repeat the spinlock hack used above because
6469 the child process gets its own copy of the lock. */
6470 if (flags & CLONE_CHILD_SETTID)
6471 put_user_u32(sys_gettid(), child_tidptr);
6472 if (flags & CLONE_PARENT_SETTID)
6473 put_user_u32(sys_gettid(), parent_tidptr);
6474 ts = (TaskState *)cpu->opaque;
6475 if (flags & CLONE_SETTLS)
6476 cpu_set_tls (env, newtls);
6477 if (flags & CLONE_CHILD_CLEARTID)
6478 ts->child_tidptr = child_tidptr;
6480 cpu_clone_regs_parent(env, flags);
6487 /* warning : doesn't handle linux specific flags... */
6488 static int target_to_host_fcntl_cmd(int cmd)
6493 case TARGET_F_DUPFD:
6494 case TARGET_F_GETFD:
6495 case TARGET_F_SETFD:
6496 case TARGET_F_GETFL:
6497 case TARGET_F_SETFL:
6498 case TARGET_F_OFD_GETLK:
6499 case TARGET_F_OFD_SETLK:
6500 case TARGET_F_OFD_SETLKW:
6503 case TARGET_F_GETLK:
6506 case TARGET_F_SETLK:
6509 case TARGET_F_SETLKW:
6512 case TARGET_F_GETOWN:
6515 case TARGET_F_SETOWN:
6518 case TARGET_F_GETSIG:
6521 case TARGET_F_SETSIG:
6524 #if TARGET_ABI_BITS == 32
6525 case TARGET_F_GETLK64:
6528 case TARGET_F_SETLK64:
6531 case TARGET_F_SETLKW64:
6535 case TARGET_F_SETLEASE:
6538 case TARGET_F_GETLEASE:
6541 #ifdef F_DUPFD_CLOEXEC
6542 case TARGET_F_DUPFD_CLOEXEC:
6543 ret = F_DUPFD_CLOEXEC;
6546 case TARGET_F_NOTIFY:
6550 case TARGET_F_GETOWN_EX:
6555 case TARGET_F_SETOWN_EX:
6560 case TARGET_F_SETPIPE_SZ:
6563 case TARGET_F_GETPIPE_SZ:
6568 case TARGET_F_ADD_SEALS:
6571 case TARGET_F_GET_SEALS:
6576 ret = -TARGET_EINVAL;
6580 #if defined(__powerpc64__)
6581 /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6582 * is not supported by kernel. The glibc fcntl call actually adjusts
6583 * them to 5, 6 and 7 before making the syscall(). Since we make the
6584 * syscall directly, adjust to what is supported by the kernel.
6586 if (ret >= F_GETLK64 && ret <= F_SETLKW64) {
6587 ret -= F_GETLK64 - 5;
6594 #define FLOCK_TRANSTBL \
6596 TRANSTBL_CONVERT(F_RDLCK); \
6597 TRANSTBL_CONVERT(F_WRLCK); \
6598 TRANSTBL_CONVERT(F_UNLCK); \
6601 static int target_to_host_flock(int type)
6603 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6605 #undef TRANSTBL_CONVERT
6606 return -TARGET_EINVAL;
6609 static int host_to_target_flock(int type)
6611 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6613 #undef TRANSTBL_CONVERT
6614 /* if we don't know how to convert the value coming
6615 * from the host we copy to the target field as-is
6620 static inline abi_long copy_from_user_flock(struct flock64 *fl,
6621 abi_ulong target_flock_addr)
6623 struct target_flock *target_fl;
6626 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6627 return -TARGET_EFAULT;
6630 __get_user(l_type, &target_fl->l_type);
6631 l_type = target_to_host_flock(l_type);
6635 fl->l_type = l_type;
6636 __get_user(fl->l_whence, &target_fl->l_whence);
6637 __get_user(fl->l_start, &target_fl->l_start);
6638 __get_user(fl->l_len, &target_fl->l_len);
6639 __get_user(fl->l_pid, &target_fl->l_pid);
6640 unlock_user_struct(target_fl, target_flock_addr, 0);
6644 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6645 const struct flock64 *fl)
6647 struct target_flock *target_fl;
6650 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6651 return -TARGET_EFAULT;
6654 l_type = host_to_target_flock(fl->l_type);
6655 __put_user(l_type, &target_fl->l_type);
6656 __put_user(fl->l_whence, &target_fl->l_whence);
6657 __put_user(fl->l_start, &target_fl->l_start);
6658 __put_user(fl->l_len, &target_fl->l_len);
6659 __put_user(fl->l_pid, &target_fl->l_pid);
6660 unlock_user_struct(target_fl, target_flock_addr, 1);
6664 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
6665 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
6667 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6668 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
6669 abi_ulong target_flock_addr)
6671 struct target_oabi_flock64 *target_fl;
6674 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6675 return -TARGET_EFAULT;
6678 __get_user(l_type, &target_fl->l_type);
6679 l_type = target_to_host_flock(l_type);
6683 fl->l_type = l_type;
6684 __get_user(fl->l_whence, &target_fl->l_whence);
6685 __get_user(fl->l_start, &target_fl->l_start);
6686 __get_user(fl->l_len, &target_fl->l_len);
6687 __get_user(fl->l_pid, &target_fl->l_pid);
6688 unlock_user_struct(target_fl, target_flock_addr, 0);
6692 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
6693 const struct flock64 *fl)
6695 struct target_oabi_flock64 *target_fl;
6698 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6699 return -TARGET_EFAULT;
6702 l_type = host_to_target_flock(fl->l_type);
6703 __put_user(l_type, &target_fl->l_type);
6704 __put_user(fl->l_whence, &target_fl->l_whence);
6705 __put_user(fl->l_start, &target_fl->l_start);
6706 __put_user(fl->l_len, &target_fl->l_len);
6707 __put_user(fl->l_pid, &target_fl->l_pid);
6708 unlock_user_struct(target_fl, target_flock_addr, 1);
6713 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
6714 abi_ulong target_flock_addr)
6716 struct target_flock64 *target_fl;
6719 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6720 return -TARGET_EFAULT;
6723 __get_user(l_type, &target_fl->l_type);
6724 l_type = target_to_host_flock(l_type);
6728 fl->l_type = l_type;
6729 __get_user(fl->l_whence, &target_fl->l_whence);
6730 __get_user(fl->l_start, &target_fl->l_start);
6731 __get_user(fl->l_len, &target_fl->l_len);
6732 __get_user(fl->l_pid, &target_fl->l_pid);
6733 unlock_user_struct(target_fl, target_flock_addr, 0);
6737 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
6738 const struct flock64 *fl)
6740 struct target_flock64 *target_fl;
6743 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6744 return -TARGET_EFAULT;
6747 l_type = host_to_target_flock(fl->l_type);
6748 __put_user(l_type, &target_fl->l_type);
6749 __put_user(fl->l_whence, &target_fl->l_whence);
6750 __put_user(fl->l_start, &target_fl->l_start);
6751 __put_user(fl->l_len, &target_fl->l_len);
6752 __put_user(fl->l_pid, &target_fl->l_pid);
6753 unlock_user_struct(target_fl, target_flock_addr, 1);
6757 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
6759 struct flock64 fl64;
6761 struct f_owner_ex fox;
6762 struct target_f_owner_ex *target_fox;
6765 int host_cmd = target_to_host_fcntl_cmd(cmd);
6767 if (host_cmd == -TARGET_EINVAL)
6771 case TARGET_F_GETLK:
6772 ret = copy_from_user_flock(&fl64, arg);
6776 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6778 ret = copy_to_user_flock(arg, &fl64);
6782 case TARGET_F_SETLK:
6783 case TARGET_F_SETLKW:
6784 ret = copy_from_user_flock(&fl64, arg);
6788 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6791 case TARGET_F_GETLK64:
6792 case TARGET_F_OFD_GETLK:
6793 ret = copy_from_user_flock64(&fl64, arg);
6797 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6799 ret = copy_to_user_flock64(arg, &fl64);
6802 case TARGET_F_SETLK64:
6803 case TARGET_F_SETLKW64:
6804 case TARGET_F_OFD_SETLK:
6805 case TARGET_F_OFD_SETLKW:
6806 ret = copy_from_user_flock64(&fl64, arg);
6810 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6813 case TARGET_F_GETFL:
6814 ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6816 ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
6820 case TARGET_F_SETFL:
6821 ret = get_errno(safe_fcntl(fd, host_cmd,
6822 target_to_host_bitmask(arg,
6827 case TARGET_F_GETOWN_EX:
6828 ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6830 if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
6831 return -TARGET_EFAULT;
6832 target_fox->type = tswap32(fox.type);
6833 target_fox->pid = tswap32(fox.pid);
6834 unlock_user_struct(target_fox, arg, 1);
6840 case TARGET_F_SETOWN_EX:
6841 if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
6842 return -TARGET_EFAULT;
6843 fox.type = tswap32(target_fox->type);
6844 fox.pid = tswap32(target_fox->pid);
6845 unlock_user_struct(target_fox, arg, 0);
6846 ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6850 case TARGET_F_SETSIG:
6851 ret = get_errno(safe_fcntl(fd, host_cmd, target_to_host_signal(arg)));
6854 case TARGET_F_GETSIG:
6855 ret = host_to_target_signal(get_errno(safe_fcntl(fd, host_cmd, arg)));
6858 case TARGET_F_SETOWN:
6859 case TARGET_F_GETOWN:
6860 case TARGET_F_SETLEASE:
6861 case TARGET_F_GETLEASE:
6862 case TARGET_F_SETPIPE_SZ:
6863 case TARGET_F_GETPIPE_SZ:
6864 case TARGET_F_ADD_SEALS:
6865 case TARGET_F_GET_SEALS:
6866 ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6870 ret = get_errno(safe_fcntl(fd, cmd, arg));
6878 static inline int high2lowuid(int uid)
6886 static inline int high2lowgid(int gid)
6894 static inline int low2highuid(int uid)
6896 if ((int16_t)uid == -1)
6902 static inline int low2highgid(int gid)
6904 if ((int16_t)gid == -1)
6909 static inline int tswapid(int id)
6914 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6916 #else /* !USE_UID16 */
6917 static inline int high2lowuid(int uid)
6921 static inline int high2lowgid(int gid)
6925 static inline int low2highuid(int uid)
6929 static inline int low2highgid(int gid)
6933 static inline int tswapid(int id)
6938 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6940 #endif /* USE_UID16 */
6942 /* We must do direct syscalls for setting UID/GID, because we want to
6943 * implement the Linux system call semantics of "change only for this thread",
6944 * not the libc/POSIX semantics of "change for all threads in process".
6945 * (See http://ewontfix.com/17/ for more details.)
6946 * We use the 32-bit version of the syscalls if present; if it is not
6947 * then either the host architecture supports 32-bit UIDs natively with
6948 * the standard syscall, or the 16-bit UID is the best we can do.
6950 #ifdef __NR_setuid32
6951 #define __NR_sys_setuid __NR_setuid32
6953 #define __NR_sys_setuid __NR_setuid
6955 #ifdef __NR_setgid32
6956 #define __NR_sys_setgid __NR_setgid32
6958 #define __NR_sys_setgid __NR_setgid
6960 #ifdef __NR_setresuid32
6961 #define __NR_sys_setresuid __NR_setresuid32
6963 #define __NR_sys_setresuid __NR_setresuid
6965 #ifdef __NR_setresgid32
6966 #define __NR_sys_setresgid __NR_setresgid32
6968 #define __NR_sys_setresgid __NR_setresgid
6971 _syscall1(int, sys_setuid, uid_t, uid)
6972 _syscall1(int, sys_setgid, gid_t, gid)
6973 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
6974 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
6976 void syscall_init(void)
6979 const argtype *arg_type;
6982 thunk_init(STRUCT_MAX);
6984 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6985 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6986 #include "syscall_types.h"
6988 #undef STRUCT_SPECIAL
6990 /* we patch the ioctl size if necessary. We rely on the fact that
6991 no ioctl has all the bits at '1' in the size field */
6993 while (ie->target_cmd != 0) {
6994 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
6995 TARGET_IOC_SIZEMASK) {
6996 arg_type = ie->arg_type;
6997 if (arg_type[0] != TYPE_PTR) {
6998 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
7003 size = thunk_type_size(arg_type, 0);
7004 ie->target_cmd = (ie->target_cmd &
7005 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
7006 (size << TARGET_IOC_SIZESHIFT);
7009 /* automatic consistency check if same arch */
7010 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7011 (defined(__x86_64__) && defined(TARGET_X86_64))
7012 if (unlikely(ie->target_cmd != ie->host_cmd)) {
7013 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
7014 ie->name, ie->target_cmd, ie->host_cmd);
7021 #ifdef TARGET_NR_truncate64
7022 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
7027 if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
7031 return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
7035 #ifdef TARGET_NR_ftruncate64
7036 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
7041 if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
7045 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
7049 #if defined(TARGET_NR_timer_settime) || \
7050 (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
7051 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_its,
7052 abi_ulong target_addr)
7054 if (target_to_host_timespec(&host_its->it_interval, target_addr +
7055 offsetof(struct target_itimerspec,
7057 target_to_host_timespec(&host_its->it_value, target_addr +
7058 offsetof(struct target_itimerspec,
7060 return -TARGET_EFAULT;
7067 #if defined(TARGET_NR_timer_settime64) || \
7068 (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD))
7069 static inline abi_long target_to_host_itimerspec64(struct itimerspec *host_its,
7070 abi_ulong target_addr)
7072 if (target_to_host_timespec64(&host_its->it_interval, target_addr +
7073 offsetof(struct target__kernel_itimerspec,
7075 target_to_host_timespec64(&host_its->it_value, target_addr +
7076 offsetof(struct target__kernel_itimerspec,
7078 return -TARGET_EFAULT;
7085 #if ((defined(TARGET_NR_timerfd_gettime) || \
7086 defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
7087 defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
7088 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
7089 struct itimerspec *host_its)
7091 if (host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7093 &host_its->it_interval) ||
7094 host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7096 &host_its->it_value)) {
7097 return -TARGET_EFAULT;
7103 #if ((defined(TARGET_NR_timerfd_gettime64) || \
7104 defined(TARGET_NR_timerfd_settime64)) && defined(CONFIG_TIMERFD)) || \
7105 defined(TARGET_NR_timer_gettime64) || defined(TARGET_NR_timer_settime64)
7106 static inline abi_long host_to_target_itimerspec64(abi_ulong target_addr,
7107 struct itimerspec *host_its)
7109 if (host_to_target_timespec64(target_addr +
7110 offsetof(struct target__kernel_itimerspec,
7112 &host_its->it_interval) ||
7113 host_to_target_timespec64(target_addr +
7114 offsetof(struct target__kernel_itimerspec,
7116 &host_its->it_value)) {
7117 return -TARGET_EFAULT;
7123 #if defined(TARGET_NR_adjtimex) || \
7124 (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
7125 static inline abi_long target_to_host_timex(struct timex *host_tx,
7126 abi_long target_addr)
7128 struct target_timex *target_tx;
7130 if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7131 return -TARGET_EFAULT;
7134 __get_user(host_tx->modes, &target_tx->modes);
7135 __get_user(host_tx->offset, &target_tx->offset);
7136 __get_user(host_tx->freq, &target_tx->freq);
7137 __get_user(host_tx->maxerror, &target_tx->maxerror);
7138 __get_user(host_tx->esterror, &target_tx->esterror);
7139 __get_user(host_tx->status, &target_tx->status);
7140 __get_user(host_tx->constant, &target_tx->constant);
7141 __get_user(host_tx->precision, &target_tx->precision);
7142 __get_user(host_tx->tolerance, &target_tx->tolerance);
7143 __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7144 __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7145 __get_user(host_tx->tick, &target_tx->tick);
7146 __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7147 __get_user(host_tx->jitter, &target_tx->jitter);
7148 __get_user(host_tx->shift, &target_tx->shift);
7149 __get_user(host_tx->stabil, &target_tx->stabil);
7150 __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7151 __get_user(host_tx->calcnt, &target_tx->calcnt);
7152 __get_user(host_tx->errcnt, &target_tx->errcnt);
7153 __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7154 __get_user(host_tx->tai, &target_tx->tai);
7156 unlock_user_struct(target_tx, target_addr, 0);
7160 static inline abi_long host_to_target_timex(abi_long target_addr,
7161 struct timex *host_tx)
7163 struct target_timex *target_tx;
7165 if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7166 return -TARGET_EFAULT;
7169 __put_user(host_tx->modes, &target_tx->modes);
7170 __put_user(host_tx->offset, &target_tx->offset);
7171 __put_user(host_tx->freq, &target_tx->freq);
7172 __put_user(host_tx->maxerror, &target_tx->maxerror);
7173 __put_user(host_tx->esterror, &target_tx->esterror);
7174 __put_user(host_tx->status, &target_tx->status);
7175 __put_user(host_tx->constant, &target_tx->constant);
7176 __put_user(host_tx->precision, &target_tx->precision);
7177 __put_user(host_tx->tolerance, &target_tx->tolerance);
7178 __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7179 __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7180 __put_user(host_tx->tick, &target_tx->tick);
7181 __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7182 __put_user(host_tx->jitter, &target_tx->jitter);
7183 __put_user(host_tx->shift, &target_tx->shift);
7184 __put_user(host_tx->stabil, &target_tx->stabil);
7185 __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7186 __put_user(host_tx->calcnt, &target_tx->calcnt);
7187 __put_user(host_tx->errcnt, &target_tx->errcnt);
7188 __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7189 __put_user(host_tx->tai, &target_tx->tai);
7191 unlock_user_struct(target_tx, target_addr, 1);
7197 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
7198 static inline abi_long target_to_host_timex64(struct timex *host_tx,
7199 abi_long target_addr)
7201 struct target__kernel_timex *target_tx;
7203 if (copy_from_user_timeval64(&host_tx->time, target_addr +
7204 offsetof(struct target__kernel_timex,
7206 return -TARGET_EFAULT;
7209 if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7210 return -TARGET_EFAULT;
7213 __get_user(host_tx->modes, &target_tx->modes);
7214 __get_user(host_tx->offset, &target_tx->offset);
7215 __get_user(host_tx->freq, &target_tx->freq);
7216 __get_user(host_tx->maxerror, &target_tx->maxerror);
7217 __get_user(host_tx->esterror, &target_tx->esterror);
7218 __get_user(host_tx->status, &target_tx->status);
7219 __get_user(host_tx->constant, &target_tx->constant);
7220 __get_user(host_tx->precision, &target_tx->precision);
7221 __get_user(host_tx->tolerance, &target_tx->tolerance);
7222 __get_user(host_tx->tick, &target_tx->tick);
7223 __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7224 __get_user(host_tx->jitter, &target_tx->jitter);
7225 __get_user(host_tx->shift, &target_tx->shift);
7226 __get_user(host_tx->stabil, &target_tx->stabil);
7227 __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7228 __get_user(host_tx->calcnt, &target_tx->calcnt);
7229 __get_user(host_tx->errcnt, &target_tx->errcnt);
7230 __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7231 __get_user(host_tx->tai, &target_tx->tai);
7233 unlock_user_struct(target_tx, target_addr, 0);
7237 static inline abi_long host_to_target_timex64(abi_long target_addr,
7238 struct timex *host_tx)
7240 struct target__kernel_timex *target_tx;
7242 if (copy_to_user_timeval64(target_addr +
7243 offsetof(struct target__kernel_timex, time),
7245 return -TARGET_EFAULT;
7248 if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7249 return -TARGET_EFAULT;
7252 __put_user(host_tx->modes, &target_tx->modes);
7253 __put_user(host_tx->offset, &target_tx->offset);
7254 __put_user(host_tx->freq, &target_tx->freq);
7255 __put_user(host_tx->maxerror, &target_tx->maxerror);
7256 __put_user(host_tx->esterror, &target_tx->esterror);
7257 __put_user(host_tx->status, &target_tx->status);
7258 __put_user(host_tx->constant, &target_tx->constant);
7259 __put_user(host_tx->precision, &target_tx->precision);
7260 __put_user(host_tx->tolerance, &target_tx->tolerance);
7261 __put_user(host_tx->tick, &target_tx->tick);
7262 __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7263 __put_user(host_tx->jitter, &target_tx->jitter);
7264 __put_user(host_tx->shift, &target_tx->shift);
7265 __put_user(host_tx->stabil, &target_tx->stabil);
7266 __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7267 __put_user(host_tx->calcnt, &target_tx->calcnt);
7268 __put_user(host_tx->errcnt, &target_tx->errcnt);
7269 __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7270 __put_user(host_tx->tai, &target_tx->tai);
7272 unlock_user_struct(target_tx, target_addr, 1);
7277 #ifndef HAVE_SIGEV_NOTIFY_THREAD_ID
7278 #define sigev_notify_thread_id _sigev_un._tid
7281 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
7282 abi_ulong target_addr)
7284 struct target_sigevent *target_sevp;
7286 if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
7287 return -TARGET_EFAULT;
7290 /* This union is awkward on 64 bit systems because it has a 32 bit
7291 * integer and a pointer in it; we follow the conversion approach
7292 * used for handling sigval types in signal.c so the guest should get
7293 * the correct value back even if we did a 64 bit byteswap and it's
7294 * using the 32 bit integer.
7296 host_sevp->sigev_value.sival_ptr =
7297 (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
7298 host_sevp->sigev_signo =
7299 target_to_host_signal(tswap32(target_sevp->sigev_signo));
7300 host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
7301 host_sevp->sigev_notify_thread_id = tswap32(target_sevp->_sigev_un._tid);
7303 unlock_user_struct(target_sevp, target_addr, 1);
7307 #if defined(TARGET_NR_mlockall)
7308 static inline int target_to_host_mlockall_arg(int arg)
7312 if (arg & TARGET_MCL_CURRENT) {
7313 result |= MCL_CURRENT;
7315 if (arg & TARGET_MCL_FUTURE) {
7316 result |= MCL_FUTURE;
7319 if (arg & TARGET_MCL_ONFAULT) {
7320 result |= MCL_ONFAULT;
7328 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) || \
7329 defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) || \
7330 defined(TARGET_NR_newfstatat))
7331 static inline abi_long host_to_target_stat64(void *cpu_env,
7332 abi_ulong target_addr,
7333 struct stat *host_st)
7335 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7336 if (((CPUARMState *)cpu_env)->eabi) {
7337 struct target_eabi_stat64 *target_st;
7339 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7340 return -TARGET_EFAULT;
7341 memset(target_st, 0, sizeof(struct target_eabi_stat64));
7342 __put_user(host_st->st_dev, &target_st->st_dev);
7343 __put_user(host_st->st_ino, &target_st->st_ino);
7344 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7345 __put_user(host_st->st_ino, &target_st->__st_ino);
7347 __put_user(host_st->st_mode, &target_st->st_mode);
7348 __put_user(host_st->st_nlink, &target_st->st_nlink);
7349 __put_user(host_st->st_uid, &target_st->st_uid);
7350 __put_user(host_st->st_gid, &target_st->st_gid);
7351 __put_user(host_st->st_rdev, &target_st->st_rdev);
7352 __put_user(host_st->st_size, &target_st->st_size);
7353 __put_user(host_st->st_blksize, &target_st->st_blksize);
7354 __put_user(host_st->st_blocks, &target_st->st_blocks);
7355 __put_user(host_st->st_atime, &target_st->target_st_atime);
7356 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7357 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7358 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7359 __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7360 __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7361 __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7363 unlock_user_struct(target_st, target_addr, 1);
7367 #if defined(TARGET_HAS_STRUCT_STAT64)
7368 struct target_stat64 *target_st;
7370 struct target_stat *target_st;
7373 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7374 return -TARGET_EFAULT;
7375 memset(target_st, 0, sizeof(*target_st));
7376 __put_user(host_st->st_dev, &target_st->st_dev);
7377 __put_user(host_st->st_ino, &target_st->st_ino);
7378 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7379 __put_user(host_st->st_ino, &target_st->__st_ino);
7381 __put_user(host_st->st_mode, &target_st->st_mode);
7382 __put_user(host_st->st_nlink, &target_st->st_nlink);
7383 __put_user(host_st->st_uid, &target_st->st_uid);
7384 __put_user(host_st->st_gid, &target_st->st_gid);
7385 __put_user(host_st->st_rdev, &target_st->st_rdev);
7386 /* XXX: better use of kernel struct */
7387 __put_user(host_st->st_size, &target_st->st_size);
7388 __put_user(host_st->st_blksize, &target_st->st_blksize);
7389 __put_user(host_st->st_blocks, &target_st->st_blocks);
7390 __put_user(host_st->st_atime, &target_st->target_st_atime);
7391 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7392 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7393 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7394 __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7395 __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7396 __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7398 unlock_user_struct(target_st, target_addr, 1);
7405 #if defined(TARGET_NR_statx) && defined(__NR_statx)
7406 static inline abi_long host_to_target_statx(struct target_statx *host_stx,
7407 abi_ulong target_addr)
7409 struct target_statx *target_stx;
7411 if (!lock_user_struct(VERIFY_WRITE, target_stx, target_addr, 0)) {
7412 return -TARGET_EFAULT;
7414 memset(target_stx, 0, sizeof(*target_stx));
7416 __put_user(host_stx->stx_mask, &target_stx->stx_mask);
7417 __put_user(host_stx->stx_blksize, &target_stx->stx_blksize);
7418 __put_user(host_stx->stx_attributes, &target_stx->stx_attributes);
7419 __put_user(host_stx->stx_nlink, &target_stx->stx_nlink);
7420 __put_user(host_stx->stx_uid, &target_stx->stx_uid);
7421 __put_user(host_stx->stx_gid, &target_stx->stx_gid);
7422 __put_user(host_stx->stx_mode, &target_stx->stx_mode);
7423 __put_user(host_stx->stx_ino, &target_stx->stx_ino);
7424 __put_user(host_stx->stx_size, &target_stx->stx_size);
7425 __put_user(host_stx->stx_blocks, &target_stx->stx_blocks);
7426 __put_user(host_stx->stx_attributes_mask, &target_stx->stx_attributes_mask);
7427 __put_user(host_stx->stx_atime.tv_sec, &target_stx->stx_atime.tv_sec);
7428 __put_user(host_stx->stx_atime.tv_nsec, &target_stx->stx_atime.tv_nsec);
7429 __put_user(host_stx->stx_btime.tv_sec, &target_stx->stx_btime.tv_sec);
7430 __put_user(host_stx->stx_btime.tv_nsec, &target_stx->stx_btime.tv_nsec);
7431 __put_user(host_stx->stx_ctime.tv_sec, &target_stx->stx_ctime.tv_sec);
7432 __put_user(host_stx->stx_ctime.tv_nsec, &target_stx->stx_ctime.tv_nsec);
7433 __put_user(host_stx->stx_mtime.tv_sec, &target_stx->stx_mtime.tv_sec);
7434 __put_user(host_stx->stx_mtime.tv_nsec, &target_stx->stx_mtime.tv_nsec);
7435 __put_user(host_stx->stx_rdev_major, &target_stx->stx_rdev_major);
7436 __put_user(host_stx->stx_rdev_minor, &target_stx->stx_rdev_minor);
7437 __put_user(host_stx->stx_dev_major, &target_stx->stx_dev_major);
7438 __put_user(host_stx->stx_dev_minor, &target_stx->stx_dev_minor);
7440 unlock_user_struct(target_stx, target_addr, 1);
7446 static int do_sys_futex(int *uaddr, int op, int val,
7447 const struct timespec *timeout, int *uaddr2,
7450 #if HOST_LONG_BITS == 64
7451 #if defined(__NR_futex)
7452 /* always a 64-bit time_t, it doesn't define _time64 version */
7453 return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7456 #else /* HOST_LONG_BITS == 64 */
7457 #if defined(__NR_futex_time64)
7458 if (sizeof(timeout->tv_sec) == 8) {
7459 /* _time64 function on 32bit arch */
7460 return sys_futex_time64(uaddr, op, val, timeout, uaddr2, val3);
7463 #if defined(__NR_futex)
7464 /* old function on 32bit arch */
7465 return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7467 #endif /* HOST_LONG_BITS == 64 */
7468 g_assert_not_reached();
7471 static int do_safe_futex(int *uaddr, int op, int val,
7472 const struct timespec *timeout, int *uaddr2,
7475 #if HOST_LONG_BITS == 64
7476 #if defined(__NR_futex)
7477 /* always a 64-bit time_t, it doesn't define _time64 version */
7478 return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7480 #else /* HOST_LONG_BITS == 64 */
7481 #if defined(__NR_futex_time64)
7482 if (sizeof(timeout->tv_sec) == 8) {
7483 /* _time64 function on 32bit arch */
7484 return get_errno(safe_futex_time64(uaddr, op, val, timeout, uaddr2,
7488 #if defined(__NR_futex)
7489 /* old function on 32bit arch */
7490 return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7492 #endif /* HOST_LONG_BITS == 64 */
7493 return -TARGET_ENOSYS;
7496 /* ??? Using host futex calls even when target atomic operations
7497 are not really atomic probably breaks things. However implementing
7498 futexes locally would make futexes shared between multiple processes
7499 tricky. However they're probably useless because guest atomic
7500 operations won't work either. */
7501 #if defined(TARGET_NR_futex)
7502 static int do_futex(CPUState *cpu, target_ulong uaddr, int op, int val,
7503 target_ulong timeout, target_ulong uaddr2, int val3)
7505 struct timespec ts, *pts;
7508 /* ??? We assume FUTEX_* constants are the same on both host
7510 #ifdef FUTEX_CMD_MASK
7511 base_op = op & FUTEX_CMD_MASK;
7517 case FUTEX_WAIT_BITSET:
7520 target_to_host_timespec(pts, timeout);
7524 return do_safe_futex(g2h(cpu, uaddr),
7525 op, tswap32(val), pts, NULL, val3);
7527 return do_safe_futex(g2h(cpu, uaddr),
7528 op, val, NULL, NULL, 0);
7530 return do_safe_futex(g2h(cpu, uaddr),
7531 op, val, NULL, NULL, 0);
7533 case FUTEX_CMP_REQUEUE:
7535 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7536 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7537 But the prototype takes a `struct timespec *'; insert casts
7538 to satisfy the compiler. We do not need to tswap TIMEOUT
7539 since it's not compared to guest memory. */
7540 pts = (struct timespec *)(uintptr_t) timeout;
7541 return do_safe_futex(g2h(cpu, uaddr), op, val, pts, g2h(cpu, uaddr2),
7542 (base_op == FUTEX_CMP_REQUEUE
7543 ? tswap32(val3) : val3));
7545 return -TARGET_ENOSYS;
7550 #if defined(TARGET_NR_futex_time64)
7551 static int do_futex_time64(CPUState *cpu, target_ulong uaddr, int op,
7552 int val, target_ulong timeout,
7553 target_ulong uaddr2, int val3)
7555 struct timespec ts, *pts;
7558 /* ??? We assume FUTEX_* constants are the same on both host
7560 #ifdef FUTEX_CMD_MASK
7561 base_op = op & FUTEX_CMD_MASK;
7567 case FUTEX_WAIT_BITSET:
7570 if (target_to_host_timespec64(pts, timeout)) {
7571 return -TARGET_EFAULT;
7576 return do_safe_futex(g2h(cpu, uaddr), op,
7577 tswap32(val), pts, NULL, val3);
7579 return do_safe_futex(g2h(cpu, uaddr), op, val, NULL, NULL, 0);
7581 return do_safe_futex(g2h(cpu, uaddr), op, val, NULL, NULL, 0);
7583 case FUTEX_CMP_REQUEUE:
7585 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7586 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7587 But the prototype takes a `struct timespec *'; insert casts
7588 to satisfy the compiler. We do not need to tswap TIMEOUT
7589 since it's not compared to guest memory. */
7590 pts = (struct timespec *)(uintptr_t) timeout;
7591 return do_safe_futex(g2h(cpu, uaddr), op, val, pts, g2h(cpu, uaddr2),
7592 (base_op == FUTEX_CMP_REQUEUE
7593 ? tswap32(val3) : val3));
7595 return -TARGET_ENOSYS;
7600 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7601 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
7602 abi_long handle, abi_long mount_id,
7605 struct file_handle *target_fh;
7606 struct file_handle *fh;
7610 unsigned int size, total_size;
7612 if (get_user_s32(size, handle)) {
7613 return -TARGET_EFAULT;
7616 name = lock_user_string(pathname);
7618 return -TARGET_EFAULT;
7621 total_size = sizeof(struct file_handle) + size;
7622 target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
7624 unlock_user(name, pathname, 0);
7625 return -TARGET_EFAULT;
7628 fh = g_malloc0(total_size);
7629 fh->handle_bytes = size;
7631 ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
7632 unlock_user(name, pathname, 0);
7634 /* man name_to_handle_at(2):
7635 * Other than the use of the handle_bytes field, the caller should treat
7636 * the file_handle structure as an opaque data type
7639 memcpy(target_fh, fh, total_size);
7640 target_fh->handle_bytes = tswap32(fh->handle_bytes);
7641 target_fh->handle_type = tswap32(fh->handle_type);
7643 unlock_user(target_fh, handle, total_size);
7645 if (put_user_s32(mid, mount_id)) {
7646 return -TARGET_EFAULT;
7654 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7655 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
7658 struct file_handle *target_fh;
7659 struct file_handle *fh;
7660 unsigned int size, total_size;
7663 if (get_user_s32(size, handle)) {
7664 return -TARGET_EFAULT;
7667 total_size = sizeof(struct file_handle) + size;
7668 target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
7670 return -TARGET_EFAULT;
7673 fh = g_memdup(target_fh, total_size);
7674 fh->handle_bytes = size;
7675 fh->handle_type = tswap32(target_fh->handle_type);
7677 ret = get_errno(open_by_handle_at(mount_fd, fh,
7678 target_to_host_bitmask(flags, fcntl_flags_tbl)));
7682 unlock_user(target_fh, handle, total_size);
7688 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7690 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
7693 target_sigset_t *target_mask;
7697 if (flags & ~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC)) {
7698 return -TARGET_EINVAL;
7700 if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
7701 return -TARGET_EFAULT;
7704 target_to_host_sigset(&host_mask, target_mask);
7706 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
7708 ret = get_errno(signalfd(fd, &host_mask, host_flags));
7710 fd_trans_register(ret, &target_signalfd_trans);
7713 unlock_user_struct(target_mask, mask, 0);
7719 /* Map host to target signal numbers for the wait family of syscalls.
7720 Assume all other status bits are the same. */
7721 int host_to_target_waitstatus(int status)
7723 if (WIFSIGNALED(status)) {
7724 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
7726 if (WIFSTOPPED(status)) {
7727 return (host_to_target_signal(WSTOPSIG(status)) << 8)
7733 static int open_self_cmdline(void *cpu_env, int fd)
7735 CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7736 struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
7739 for (i = 0; i < bprm->argc; i++) {
7740 size_t len = strlen(bprm->argv[i]) + 1;
7742 if (write(fd, bprm->argv[i], len) != len) {
7750 static int open_self_maps(void *cpu_env, int fd)
7752 CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7753 TaskState *ts = cpu->opaque;
7754 GSList *map_info = read_self_maps();
7758 for (s = map_info; s; s = g_slist_next(s)) {
7759 MapInfo *e = (MapInfo *) s->data;
7761 if (h2g_valid(e->start)) {
7762 unsigned long min = e->start;
7763 unsigned long max = e->end;
7764 int flags = page_get_flags(h2g(min));
7767 max = h2g_valid(max - 1) ?
7768 max : (uintptr_t) g2h_untagged(GUEST_ADDR_MAX) + 1;
7770 if (page_check_range(h2g(min), max - min, flags) == -1) {
7774 if (h2g(min) == ts->info->stack_limit) {
7780 count = dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
7781 " %c%c%c%c %08" PRIx64 " %s %"PRId64,
7782 h2g(min), h2g(max - 1) + 1,
7783 (flags & PAGE_READ) ? 'r' : '-',
7784 (flags & PAGE_WRITE_ORG) ? 'w' : '-',
7785 (flags & PAGE_EXEC) ? 'x' : '-',
7786 e->is_priv ? 'p' : '-',
7787 (uint64_t) e->offset, e->dev, e->inode);
7789 dprintf(fd, "%*s%s\n", 73 - count, "", path);
7796 free_self_maps(map_info);
7798 #ifdef TARGET_VSYSCALL_PAGE
7800 * We only support execution from the vsyscall page.
7801 * This is as if CONFIG_LEGACY_VSYSCALL_XONLY=y from v5.3.
7803 count = dprintf(fd, TARGET_FMT_lx "-" TARGET_FMT_lx
7804 " --xp 00000000 00:00 0",
7805 TARGET_VSYSCALL_PAGE, TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE);
7806 dprintf(fd, "%*s%s\n", 73 - count, "", "[vsyscall]");
7812 static int open_self_stat(void *cpu_env, int fd)
7814 CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7815 TaskState *ts = cpu->opaque;
7816 g_autoptr(GString) buf = g_string_new(NULL);
7819 for (i = 0; i < 44; i++) {
7822 g_string_printf(buf, FMT_pid " ", getpid());
7823 } else if (i == 1) {
7825 gchar *bin = g_strrstr(ts->bprm->argv[0], "/");
7826 bin = bin ? bin + 1 : ts->bprm->argv[0];
7827 g_string_printf(buf, "(%.15s) ", bin);
7828 } else if (i == 3) {
7830 g_string_printf(buf, FMT_pid " ", getppid());
7831 } else if (i == 27) {
7833 g_string_printf(buf, TARGET_ABI_FMT_ld " ", ts->info->start_stack);
7835 /* for the rest, there is MasterCard */
7836 g_string_printf(buf, "0%c", i == 43 ? '\n' : ' ');
7839 if (write(fd, buf->str, buf->len) != buf->len) {
7847 static int open_self_auxv(void *cpu_env, int fd)
7849 CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7850 TaskState *ts = cpu->opaque;
7851 abi_ulong auxv = ts->info->saved_auxv;
7852 abi_ulong len = ts->info->auxv_len;
7856 * Auxiliary vector is stored in target process stack.
7857 * read in whole auxv vector and copy it to file
7859 ptr = lock_user(VERIFY_READ, auxv, len, 0);
7863 r = write(fd, ptr, len);
7870 lseek(fd, 0, SEEK_SET);
7871 unlock_user(ptr, auxv, len);
7877 static int is_proc_myself(const char *filename, const char *entry)
7879 if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
7880 filename += strlen("/proc/");
7881 if (!strncmp(filename, "self/", strlen("self/"))) {
7882 filename += strlen("self/");
7883 } else if (*filename >= '1' && *filename <= '9') {
7885 snprintf(myself, sizeof(myself), "%d/", getpid());
7886 if (!strncmp(filename, myself, strlen(myself))) {
7887 filename += strlen(myself);
7894 if (!strcmp(filename, entry)) {
7901 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) || \
7902 defined(TARGET_SPARC) || defined(TARGET_M68K) || defined(TARGET_HPPA)
7903 static int is_proc(const char *filename, const char *entry)
7905 return strcmp(filename, entry) == 0;
7909 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7910 static int open_net_route(void *cpu_env, int fd)
7917 fp = fopen("/proc/net/route", "r");
7924 read = getline(&line, &len, fp);
7925 dprintf(fd, "%s", line);
7929 while ((read = getline(&line, &len, fp)) != -1) {
7931 uint32_t dest, gw, mask;
7932 unsigned int flags, refcnt, use, metric, mtu, window, irtt;
7935 fields = sscanf(line,
7936 "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7937 iface, &dest, &gw, &flags, &refcnt, &use, &metric,
7938 &mask, &mtu, &window, &irtt);
7942 dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7943 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
7944 metric, tswap32(mask), mtu, window, irtt);
7954 #if defined(TARGET_SPARC)
7955 static int open_cpuinfo(void *cpu_env, int fd)
7957 dprintf(fd, "type\t\t: sun4u\n");
7962 #if defined(TARGET_HPPA)
7963 static int open_cpuinfo(void *cpu_env, int fd)
7965 dprintf(fd, "cpu family\t: PA-RISC 1.1e\n");
7966 dprintf(fd, "cpu\t\t: PA7300LC (PCX-L2)\n");
7967 dprintf(fd, "capabilities\t: os32\n");
7968 dprintf(fd, "model\t\t: 9000/778/B160L\n");
7969 dprintf(fd, "model name\t: Merlin L2 160 QEMU (9000/778/B160L)\n");
7974 #if defined(TARGET_M68K)
7975 static int open_hardware(void *cpu_env, int fd)
7977 dprintf(fd, "Model:\t\tqemu-m68k\n");
7982 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
7985 const char *filename;
7986 int (*fill)(void *cpu_env, int fd);
7987 int (*cmp)(const char *s1, const char *s2);
7989 const struct fake_open *fake_open;
7990 static const struct fake_open fakes[] = {
7991 { "maps", open_self_maps, is_proc_myself },
7992 { "stat", open_self_stat, is_proc_myself },
7993 { "auxv", open_self_auxv, is_proc_myself },
7994 { "cmdline", open_self_cmdline, is_proc_myself },
7995 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7996 { "/proc/net/route", open_net_route, is_proc },
7998 #if defined(TARGET_SPARC) || defined(TARGET_HPPA)
7999 { "/proc/cpuinfo", open_cpuinfo, is_proc },
8001 #if defined(TARGET_M68K)
8002 { "/proc/hardware", open_hardware, is_proc },
8004 { NULL, NULL, NULL }
8007 if (is_proc_myself(pathname, "exe")) {
8008 int execfd = qemu_getauxval(AT_EXECFD);
8009 return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
8012 for (fake_open = fakes; fake_open->filename; fake_open++) {
8013 if (fake_open->cmp(pathname, fake_open->filename)) {
8018 if (fake_open->filename) {
8020 char filename[PATH_MAX];
8023 /* create temporary file to map stat to */
8024 tmpdir = getenv("TMPDIR");
8027 snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
8028 fd = mkstemp(filename);
8034 if ((r = fake_open->fill(cpu_env, fd))) {
8040 lseek(fd, 0, SEEK_SET);
8045 return safe_openat(dirfd, path(pathname), flags, mode);
8048 #define TIMER_MAGIC 0x0caf0000
8049 #define TIMER_MAGIC_MASK 0xffff0000
8051 /* Convert QEMU provided timer ID back to internal 16bit index format */
8052 static target_timer_t get_timer_id(abi_long arg)
8054 target_timer_t timerid = arg;
8056 if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
8057 return -TARGET_EINVAL;
8062 if (timerid >= ARRAY_SIZE(g_posix_timers)) {
8063 return -TARGET_EINVAL;
8069 static int target_to_host_cpu_mask(unsigned long *host_mask,
8071 abi_ulong target_addr,
8074 unsigned target_bits = sizeof(abi_ulong) * 8;
8075 unsigned host_bits = sizeof(*host_mask) * 8;
8076 abi_ulong *target_mask;
8079 assert(host_size >= target_size);
8081 target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
8083 return -TARGET_EFAULT;
8085 memset(host_mask, 0, host_size);
8087 for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8088 unsigned bit = i * target_bits;
8091 __get_user(val, &target_mask[i]);
8092 for (j = 0; j < target_bits; j++, bit++) {
8093 if (val & (1UL << j)) {
8094 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
8099 unlock_user(target_mask, target_addr, 0);
8103 static int host_to_target_cpu_mask(const unsigned long *host_mask,
8105 abi_ulong target_addr,
8108 unsigned target_bits = sizeof(abi_ulong) * 8;
8109 unsigned host_bits = sizeof(*host_mask) * 8;
8110 abi_ulong *target_mask;
8113 assert(host_size >= target_size);
8115 target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
8117 return -TARGET_EFAULT;
8120 for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8121 unsigned bit = i * target_bits;
8124 for (j = 0; j < target_bits; j++, bit++) {
8125 if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
8129 __put_user(val, &target_mask[i]);
8132 unlock_user(target_mask, target_addr, target_size);
8136 #if defined(TARGET_NR_pivot_root) && defined(__NR_pivot_root)
8137 _syscall2(int, pivot_root, const char *, new_root, const char *, put_old)
8140 /* This is an internal helper for do_syscall so that it is easier
8141 * to have a single return point, so that actions, such as logging
8142 * of syscall results, can be performed.
8143 * All errnos that do_syscall() returns must be -TARGET_<errcode>.
8145 static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
8146 abi_long arg2, abi_long arg3, abi_long arg4,
8147 abi_long arg5, abi_long arg6, abi_long arg7,
8150 CPUState *cpu = env_cpu(cpu_env);
8152 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
8153 || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
8154 || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
8155 || defined(TARGET_NR_statx)
8158 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
8159 || defined(TARGET_NR_fstatfs)
8165 case TARGET_NR_exit:
8166 /* In old applications this may be used to implement _exit(2).
8167 However in threaded applications it is used for thread termination,
8168 and _exit_group is used for application termination.
8169 Do thread termination if we have more then one thread. */
8171 if (block_signals()) {
8172 return -TARGET_ERESTARTSYS;
8175 pthread_mutex_lock(&clone_lock);
8177 if (CPU_NEXT(first_cpu)) {
8178 TaskState *ts = cpu->opaque;
8180 object_property_set_bool(OBJECT(cpu), "realized", false, NULL);
8181 object_unref(OBJECT(cpu));
8183 * At this point the CPU should be unrealized and removed
8184 * from cpu lists. We can clean-up the rest of the thread
8185 * data without the lock held.
8188 pthread_mutex_unlock(&clone_lock);
8190 if (ts->child_tidptr) {
8191 put_user_u32(0, ts->child_tidptr);
8192 do_sys_futex(g2h(cpu, ts->child_tidptr),
8193 FUTEX_WAKE, INT_MAX, NULL, NULL, 0);
8197 rcu_unregister_thread();
8201 pthread_mutex_unlock(&clone_lock);
8202 preexit_cleanup(cpu_env, arg1);
8204 return 0; /* avoid warning */
8205 case TARGET_NR_read:
8206 if (arg2 == 0 && arg3 == 0) {
8207 return get_errno(safe_read(arg1, 0, 0));
8209 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
8210 return -TARGET_EFAULT;
8211 ret = get_errno(safe_read(arg1, p, arg3));
8213 fd_trans_host_to_target_data(arg1)) {
8214 ret = fd_trans_host_to_target_data(arg1)(p, ret);
8216 unlock_user(p, arg2, ret);
8219 case TARGET_NR_write:
8220 if (arg2 == 0 && arg3 == 0) {
8221 return get_errno(safe_write(arg1, 0, 0));
8223 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
8224 return -TARGET_EFAULT;
8225 if (fd_trans_target_to_host_data(arg1)) {
8226 void *copy = g_malloc(arg3);
8227 memcpy(copy, p, arg3);
8228 ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
8230 ret = get_errno(safe_write(arg1, copy, ret));
8234 ret = get_errno(safe_write(arg1, p, arg3));
8236 unlock_user(p, arg2, 0);
8239 #ifdef TARGET_NR_open
8240 case TARGET_NR_open:
8241 if (!(p = lock_user_string(arg1)))
8242 return -TARGET_EFAULT;
8243 ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
8244 target_to_host_bitmask(arg2, fcntl_flags_tbl),
8246 fd_trans_unregister(ret);
8247 unlock_user(p, arg1, 0);
8250 case TARGET_NR_openat:
8251 if (!(p = lock_user_string(arg2)))
8252 return -TARGET_EFAULT;
8253 ret = get_errno(do_openat(cpu_env, arg1, p,
8254 target_to_host_bitmask(arg3, fcntl_flags_tbl),
8256 fd_trans_unregister(ret);
8257 unlock_user(p, arg2, 0);
8259 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8260 case TARGET_NR_name_to_handle_at:
8261 ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
8264 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8265 case TARGET_NR_open_by_handle_at:
8266 ret = do_open_by_handle_at(arg1, arg2, arg3);
8267 fd_trans_unregister(ret);
8270 case TARGET_NR_close:
8271 fd_trans_unregister(arg1);
8272 return get_errno(close(arg1));
8275 return do_brk(arg1);
8276 #ifdef TARGET_NR_fork
8277 case TARGET_NR_fork:
8278 return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
8280 #ifdef TARGET_NR_waitpid
8281 case TARGET_NR_waitpid:
8284 ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
8285 if (!is_error(ret) && arg2 && ret
8286 && put_user_s32(host_to_target_waitstatus(status), arg2))
8287 return -TARGET_EFAULT;
8291 #ifdef TARGET_NR_waitid
8292 case TARGET_NR_waitid:
8296 ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
8297 if (!is_error(ret) && arg3 && info.si_pid != 0) {
8298 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
8299 return -TARGET_EFAULT;
8300 host_to_target_siginfo(p, &info);
8301 unlock_user(p, arg3, sizeof(target_siginfo_t));
8306 #ifdef TARGET_NR_creat /* not on alpha */
8307 case TARGET_NR_creat:
8308 if (!(p = lock_user_string(arg1)))
8309 return -TARGET_EFAULT;
8310 ret = get_errno(creat(p, arg2));
8311 fd_trans_unregister(ret);
8312 unlock_user(p, arg1, 0);
8315 #ifdef TARGET_NR_link
8316 case TARGET_NR_link:
8319 p = lock_user_string(arg1);
8320 p2 = lock_user_string(arg2);
8322 ret = -TARGET_EFAULT;
8324 ret = get_errno(link(p, p2));
8325 unlock_user(p2, arg2, 0);
8326 unlock_user(p, arg1, 0);
8330 #if defined(TARGET_NR_linkat)
8331 case TARGET_NR_linkat:
8335 return -TARGET_EFAULT;
8336 p = lock_user_string(arg2);
8337 p2 = lock_user_string(arg4);
8339 ret = -TARGET_EFAULT;
8341 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
8342 unlock_user(p, arg2, 0);
8343 unlock_user(p2, arg4, 0);
8347 #ifdef TARGET_NR_unlink
8348 case TARGET_NR_unlink:
8349 if (!(p = lock_user_string(arg1)))
8350 return -TARGET_EFAULT;
8351 ret = get_errno(unlink(p));
8352 unlock_user(p, arg1, 0);
8355 #if defined(TARGET_NR_unlinkat)
8356 case TARGET_NR_unlinkat:
8357 if (!(p = lock_user_string(arg2)))
8358 return -TARGET_EFAULT;
8359 ret = get_errno(unlinkat(arg1, p, arg3));
8360 unlock_user(p, arg2, 0);
8363 case TARGET_NR_execve:
8365 char **argp, **envp;
8368 abi_ulong guest_argp;
8369 abi_ulong guest_envp;
8375 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
8376 if (get_user_ual(addr, gp))
8377 return -TARGET_EFAULT;
8384 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
8385 if (get_user_ual(addr, gp))
8386 return -TARGET_EFAULT;
8392 argp = g_new0(char *, argc + 1);
8393 envp = g_new0(char *, envc + 1);
8395 for (gp = guest_argp, q = argp; gp;
8396 gp += sizeof(abi_ulong), q++) {
8397 if (get_user_ual(addr, gp))
8401 if (!(*q = lock_user_string(addr)))
8406 for (gp = guest_envp, q = envp; gp;
8407 gp += sizeof(abi_ulong), q++) {
8408 if (get_user_ual(addr, gp))
8412 if (!(*q = lock_user_string(addr)))
8417 if (!(p = lock_user_string(arg1)))
8419 /* Although execve() is not an interruptible syscall it is
8420 * a special case where we must use the safe_syscall wrapper:
8421 * if we allow a signal to happen before we make the host
8422 * syscall then we will 'lose' it, because at the point of
8423 * execve the process leaves QEMU's control. So we use the
8424 * safe syscall wrapper to ensure that we either take the
8425 * signal as a guest signal, or else it does not happen
8426 * before the execve completes and makes it the other
8427 * program's problem.
8429 ret = get_errno(safe_execve(p, argp, envp));
8430 unlock_user(p, arg1, 0);
8435 ret = -TARGET_EFAULT;
8438 for (gp = guest_argp, q = argp; *q;
8439 gp += sizeof(abi_ulong), q++) {
8440 if (get_user_ual(addr, gp)
8443 unlock_user(*q, addr, 0);
8445 for (gp = guest_envp, q = envp; *q;
8446 gp += sizeof(abi_ulong), q++) {
8447 if (get_user_ual(addr, gp)
8450 unlock_user(*q, addr, 0);
8457 case TARGET_NR_chdir:
8458 if (!(p = lock_user_string(arg1)))
8459 return -TARGET_EFAULT;
8460 ret = get_errno(chdir(p));
8461 unlock_user(p, arg1, 0);
8463 #ifdef TARGET_NR_time
8464 case TARGET_NR_time:
8467 ret = get_errno(time(&host_time));
8470 && put_user_sal(host_time, arg1))
8471 return -TARGET_EFAULT;
8475 #ifdef TARGET_NR_mknod
8476 case TARGET_NR_mknod:
8477 if (!(p = lock_user_string(arg1)))
8478 return -TARGET_EFAULT;
8479 ret = get_errno(mknod(p, arg2, arg3));
8480 unlock_user(p, arg1, 0);
8483 #if defined(TARGET_NR_mknodat)
8484 case TARGET_NR_mknodat:
8485 if (!(p = lock_user_string(arg2)))
8486 return -TARGET_EFAULT;
8487 ret = get_errno(mknodat(arg1, p, arg3, arg4));
8488 unlock_user(p, arg2, 0);
8491 #ifdef TARGET_NR_chmod
8492 case TARGET_NR_chmod:
8493 if (!(p = lock_user_string(arg1)))
8494 return -TARGET_EFAULT;
8495 ret = get_errno(chmod(p, arg2));
8496 unlock_user(p, arg1, 0);
8499 #ifdef TARGET_NR_lseek
8500 case TARGET_NR_lseek:
8501 return get_errno(lseek(arg1, arg2, arg3));
8503 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
8504 /* Alpha specific */
8505 case TARGET_NR_getxpid:
8506 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
8507 return get_errno(getpid());
8509 #ifdef TARGET_NR_getpid
8510 case TARGET_NR_getpid:
8511 return get_errno(getpid());
8513 case TARGET_NR_mount:
8515 /* need to look at the data field */
8519 p = lock_user_string(arg1);
8521 return -TARGET_EFAULT;
8527 p2 = lock_user_string(arg2);
8530 unlock_user(p, arg1, 0);
8532 return -TARGET_EFAULT;
8536 p3 = lock_user_string(arg3);
8539 unlock_user(p, arg1, 0);
8541 unlock_user(p2, arg2, 0);
8542 return -TARGET_EFAULT;
8548 /* FIXME - arg5 should be locked, but it isn't clear how to
8549 * do that since it's not guaranteed to be a NULL-terminated
8553 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
8555 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(cpu, arg5));
8557 ret = get_errno(ret);
8560 unlock_user(p, arg1, 0);
8562 unlock_user(p2, arg2, 0);
8564 unlock_user(p3, arg3, 0);
8568 #if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount)
8569 #if defined(TARGET_NR_umount)
8570 case TARGET_NR_umount:
8572 #if defined(TARGET_NR_oldumount)
8573 case TARGET_NR_oldumount:
8575 if (!(p = lock_user_string(arg1)))
8576 return -TARGET_EFAULT;
8577 ret = get_errno(umount(p));
8578 unlock_user(p, arg1, 0);
8581 #ifdef TARGET_NR_stime /* not on alpha */
8582 case TARGET_NR_stime:
8586 if (get_user_sal(ts.tv_sec, arg1)) {
8587 return -TARGET_EFAULT;
8589 return get_errno(clock_settime(CLOCK_REALTIME, &ts));
8592 #ifdef TARGET_NR_alarm /* not on alpha */
8593 case TARGET_NR_alarm:
8596 #ifdef TARGET_NR_pause /* not on alpha */
8597 case TARGET_NR_pause:
8598 if (!block_signals()) {
8599 sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
8601 return -TARGET_EINTR;
8603 #ifdef TARGET_NR_utime
8604 case TARGET_NR_utime:
8606 struct utimbuf tbuf, *host_tbuf;
8607 struct target_utimbuf *target_tbuf;
8609 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
8610 return -TARGET_EFAULT;
8611 tbuf.actime = tswapal(target_tbuf->actime);
8612 tbuf.modtime = tswapal(target_tbuf->modtime);
8613 unlock_user_struct(target_tbuf, arg2, 0);
8618 if (!(p = lock_user_string(arg1)))
8619 return -TARGET_EFAULT;
8620 ret = get_errno(utime(p, host_tbuf));
8621 unlock_user(p, arg1, 0);
8625 #ifdef TARGET_NR_utimes
8626 case TARGET_NR_utimes:
8628 struct timeval *tvp, tv[2];
8630 if (copy_from_user_timeval(&tv[0], arg2)
8631 || copy_from_user_timeval(&tv[1],
8632 arg2 + sizeof(struct target_timeval)))
8633 return -TARGET_EFAULT;
8638 if (!(p = lock_user_string(arg1)))
8639 return -TARGET_EFAULT;
8640 ret = get_errno(utimes(p, tvp));
8641 unlock_user(p, arg1, 0);
8645 #if defined(TARGET_NR_futimesat)
8646 case TARGET_NR_futimesat:
8648 struct timeval *tvp, tv[2];
8650 if (copy_from_user_timeval(&tv[0], arg3)
8651 || copy_from_user_timeval(&tv[1],
8652 arg3 + sizeof(struct target_timeval)))
8653 return -TARGET_EFAULT;
8658 if (!(p = lock_user_string(arg2))) {
8659 return -TARGET_EFAULT;
8661 ret = get_errno(futimesat(arg1, path(p), tvp));
8662 unlock_user(p, arg2, 0);
8666 #ifdef TARGET_NR_access
8667 case TARGET_NR_access:
8668 if (!(p = lock_user_string(arg1))) {
8669 return -TARGET_EFAULT;
8671 ret = get_errno(access(path(p), arg2));
8672 unlock_user(p, arg1, 0);
8675 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
8676 case TARGET_NR_faccessat:
8677 if (!(p = lock_user_string(arg2))) {
8678 return -TARGET_EFAULT;
8680 ret = get_errno(faccessat(arg1, p, arg3, 0));
8681 unlock_user(p, arg2, 0);
8684 #ifdef TARGET_NR_nice /* not on alpha */
8685 case TARGET_NR_nice:
8686 return get_errno(nice(arg1));
8688 case TARGET_NR_sync:
8691 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
8692 case TARGET_NR_syncfs:
8693 return get_errno(syncfs(arg1));
8695 case TARGET_NR_kill:
8696 return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
8697 #ifdef TARGET_NR_rename
8698 case TARGET_NR_rename:
8701 p = lock_user_string(arg1);
8702 p2 = lock_user_string(arg2);
8704 ret = -TARGET_EFAULT;
8706 ret = get_errno(rename(p, p2));
8707 unlock_user(p2, arg2, 0);
8708 unlock_user(p, arg1, 0);
8712 #if defined(TARGET_NR_renameat)
8713 case TARGET_NR_renameat:
8716 p = lock_user_string(arg2);
8717 p2 = lock_user_string(arg4);
8719 ret = -TARGET_EFAULT;
8721 ret = get_errno(renameat(arg1, p, arg3, p2));
8722 unlock_user(p2, arg4, 0);
8723 unlock_user(p, arg2, 0);
8727 #if defined(TARGET_NR_renameat2)
8728 case TARGET_NR_renameat2:
8731 p = lock_user_string(arg2);
8732 p2 = lock_user_string(arg4);
8734 ret = -TARGET_EFAULT;
8736 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
8738 unlock_user(p2, arg4, 0);
8739 unlock_user(p, arg2, 0);
8743 #ifdef TARGET_NR_mkdir
8744 case TARGET_NR_mkdir:
8745 if (!(p = lock_user_string(arg1)))
8746 return -TARGET_EFAULT;
8747 ret = get_errno(mkdir(p, arg2));
8748 unlock_user(p, arg1, 0);
8751 #if defined(TARGET_NR_mkdirat)
8752 case TARGET_NR_mkdirat:
8753 if (!(p = lock_user_string(arg2)))
8754 return -TARGET_EFAULT;
8755 ret = get_errno(mkdirat(arg1, p, arg3));
8756 unlock_user(p, arg2, 0);
8759 #ifdef TARGET_NR_rmdir
8760 case TARGET_NR_rmdir:
8761 if (!(p = lock_user_string(arg1)))
8762 return -TARGET_EFAULT;
8763 ret = get_errno(rmdir(p));
8764 unlock_user(p, arg1, 0);
8768 ret = get_errno(dup(arg1));
8770 fd_trans_dup(arg1, ret);
8773 #ifdef TARGET_NR_pipe
8774 case TARGET_NR_pipe:
8775 return do_pipe(cpu_env, arg1, 0, 0);
8777 #ifdef TARGET_NR_pipe2
8778 case TARGET_NR_pipe2:
8779 return do_pipe(cpu_env, arg1,
8780 target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
8782 case TARGET_NR_times:
8784 struct target_tms *tmsp;
8786 ret = get_errno(times(&tms));
8788 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
8790 return -TARGET_EFAULT;
8791 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
8792 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
8793 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
8794 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
8797 ret = host_to_target_clock_t(ret);
8800 case TARGET_NR_acct:
8802 ret = get_errno(acct(NULL));
8804 if (!(p = lock_user_string(arg1))) {
8805 return -TARGET_EFAULT;
8807 ret = get_errno(acct(path(p)));
8808 unlock_user(p, arg1, 0);
8811 #ifdef TARGET_NR_umount2
8812 case TARGET_NR_umount2:
8813 if (!(p = lock_user_string(arg1)))
8814 return -TARGET_EFAULT;
8815 ret = get_errno(umount2(p, arg2));
8816 unlock_user(p, arg1, 0);
8819 case TARGET_NR_ioctl:
8820 return do_ioctl(arg1, arg2, arg3);
8821 #ifdef TARGET_NR_fcntl
8822 case TARGET_NR_fcntl:
8823 return do_fcntl(arg1, arg2, arg3);
8825 case TARGET_NR_setpgid:
8826 return get_errno(setpgid(arg1, arg2));
8827 case TARGET_NR_umask:
8828 return get_errno(umask(arg1));
8829 case TARGET_NR_chroot:
8830 if (!(p = lock_user_string(arg1)))
8831 return -TARGET_EFAULT;
8832 ret = get_errno(chroot(p));
8833 unlock_user(p, arg1, 0);
8835 #ifdef TARGET_NR_dup2
8836 case TARGET_NR_dup2:
8837 ret = get_errno(dup2(arg1, arg2));
8839 fd_trans_dup(arg1, arg2);
8843 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
8844 case TARGET_NR_dup3:
8848 if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
8851 host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
8852 ret = get_errno(dup3(arg1, arg2, host_flags));
8854 fd_trans_dup(arg1, arg2);
8859 #ifdef TARGET_NR_getppid /* not on alpha */
8860 case TARGET_NR_getppid:
8861 return get_errno(getppid());
8863 #ifdef TARGET_NR_getpgrp
8864 case TARGET_NR_getpgrp:
8865 return get_errno(getpgrp());
8867 case TARGET_NR_setsid:
8868 return get_errno(setsid());
8869 #ifdef TARGET_NR_sigaction
8870 case TARGET_NR_sigaction:
8872 #if defined(TARGET_MIPS)
8873 struct target_sigaction act, oact, *pact, *old_act;
8876 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8877 return -TARGET_EFAULT;
8878 act._sa_handler = old_act->_sa_handler;
8879 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
8880 act.sa_flags = old_act->sa_flags;
8881 unlock_user_struct(old_act, arg2, 0);
8887 ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
8889 if (!is_error(ret) && arg3) {
8890 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8891 return -TARGET_EFAULT;
8892 old_act->_sa_handler = oact._sa_handler;
8893 old_act->sa_flags = oact.sa_flags;
8894 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
8895 old_act->sa_mask.sig[1] = 0;
8896 old_act->sa_mask.sig[2] = 0;
8897 old_act->sa_mask.sig[3] = 0;
8898 unlock_user_struct(old_act, arg3, 1);
8901 struct target_old_sigaction *old_act;
8902 struct target_sigaction act, oact, *pact;
8904 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8905 return -TARGET_EFAULT;
8906 act._sa_handler = old_act->_sa_handler;
8907 target_siginitset(&act.sa_mask, old_act->sa_mask);
8908 act.sa_flags = old_act->sa_flags;
8909 #ifdef TARGET_ARCH_HAS_SA_RESTORER
8910 act.sa_restorer = old_act->sa_restorer;
8912 unlock_user_struct(old_act, arg2, 0);
8917 ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
8918 if (!is_error(ret) && arg3) {
8919 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8920 return -TARGET_EFAULT;
8921 old_act->_sa_handler = oact._sa_handler;
8922 old_act->sa_mask = oact.sa_mask.sig[0];
8923 old_act->sa_flags = oact.sa_flags;
8924 #ifdef TARGET_ARCH_HAS_SA_RESTORER
8925 old_act->sa_restorer = oact.sa_restorer;
8927 unlock_user_struct(old_act, arg3, 1);
8933 case TARGET_NR_rt_sigaction:
8936 * For Alpha and SPARC this is a 5 argument syscall, with
8937 * a 'restorer' parameter which must be copied into the
8938 * sa_restorer field of the sigaction struct.
8939 * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
8940 * and arg5 is the sigsetsize.
8942 #if defined(TARGET_ALPHA)
8943 target_ulong sigsetsize = arg4;
8944 target_ulong restorer = arg5;
8945 #elif defined(TARGET_SPARC)
8946 target_ulong restorer = arg4;
8947 target_ulong sigsetsize = arg5;
8949 target_ulong sigsetsize = arg4;
8950 target_ulong restorer = 0;
8952 struct target_sigaction *act = NULL;
8953 struct target_sigaction *oact = NULL;
8955 if (sigsetsize != sizeof(target_sigset_t)) {
8956 return -TARGET_EINVAL;
8958 if (arg2 && !lock_user_struct(VERIFY_READ, act, arg2, 1)) {
8959 return -TARGET_EFAULT;
8961 if (arg3 && !lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
8962 ret = -TARGET_EFAULT;
8964 ret = get_errno(do_sigaction(arg1, act, oact, restorer));
8966 unlock_user_struct(oact, arg3, 1);
8970 unlock_user_struct(act, arg2, 0);
8974 #ifdef TARGET_NR_sgetmask /* not on alpha */
8975 case TARGET_NR_sgetmask:
8978 abi_ulong target_set;
8979 ret = do_sigprocmask(0, NULL, &cur_set);
8981 host_to_target_old_sigset(&target_set, &cur_set);
8987 #ifdef TARGET_NR_ssetmask /* not on alpha */
8988 case TARGET_NR_ssetmask:
8991 abi_ulong target_set = arg1;
8992 target_to_host_old_sigset(&set, &target_set);
8993 ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
8995 host_to_target_old_sigset(&target_set, &oset);
9001 #ifdef TARGET_NR_sigprocmask
9002 case TARGET_NR_sigprocmask:
9004 #if defined(TARGET_ALPHA)
9005 sigset_t set, oldset;
9010 case TARGET_SIG_BLOCK:
9013 case TARGET_SIG_UNBLOCK:
9016 case TARGET_SIG_SETMASK:
9020 return -TARGET_EINVAL;
9023 target_to_host_old_sigset(&set, &mask);
9025 ret = do_sigprocmask(how, &set, &oldset);
9026 if (!is_error(ret)) {
9027 host_to_target_old_sigset(&mask, &oldset);
9029 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
9032 sigset_t set, oldset, *set_ptr;
9037 case TARGET_SIG_BLOCK:
9040 case TARGET_SIG_UNBLOCK:
9043 case TARGET_SIG_SETMASK:
9047 return -TARGET_EINVAL;
9049 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
9050 return -TARGET_EFAULT;
9051 target_to_host_old_sigset(&set, p);
9052 unlock_user(p, arg2, 0);
9058 ret = do_sigprocmask(how, set_ptr, &oldset);
9059 if (!is_error(ret) && arg3) {
9060 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9061 return -TARGET_EFAULT;
9062 host_to_target_old_sigset(p, &oldset);
9063 unlock_user(p, arg3, sizeof(target_sigset_t));
9069 case TARGET_NR_rt_sigprocmask:
9072 sigset_t set, oldset, *set_ptr;
9074 if (arg4 != sizeof(target_sigset_t)) {
9075 return -TARGET_EINVAL;
9080 case TARGET_SIG_BLOCK:
9083 case TARGET_SIG_UNBLOCK:
9086 case TARGET_SIG_SETMASK:
9090 return -TARGET_EINVAL;
9092 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
9093 return -TARGET_EFAULT;
9094 target_to_host_sigset(&set, p);
9095 unlock_user(p, arg2, 0);
9101 ret = do_sigprocmask(how, set_ptr, &oldset);
9102 if (!is_error(ret) && arg3) {
9103 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9104 return -TARGET_EFAULT;
9105 host_to_target_sigset(p, &oldset);
9106 unlock_user(p, arg3, sizeof(target_sigset_t));
9110 #ifdef TARGET_NR_sigpending
9111 case TARGET_NR_sigpending:
9114 ret = get_errno(sigpending(&set));
9115 if (!is_error(ret)) {
9116 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9117 return -TARGET_EFAULT;
9118 host_to_target_old_sigset(p, &set);
9119 unlock_user(p, arg1, sizeof(target_sigset_t));
9124 case TARGET_NR_rt_sigpending:
9128 /* Yes, this check is >, not != like most. We follow the kernel's
9129 * logic and it does it like this because it implements
9130 * NR_sigpending through the same code path, and in that case
9131 * the old_sigset_t is smaller in size.
9133 if (arg2 > sizeof(target_sigset_t)) {
9134 return -TARGET_EINVAL;
9137 ret = get_errno(sigpending(&set));
9138 if (!is_error(ret)) {
9139 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9140 return -TARGET_EFAULT;
9141 host_to_target_sigset(p, &set);
9142 unlock_user(p, arg1, sizeof(target_sigset_t));
9146 #ifdef TARGET_NR_sigsuspend
9147 case TARGET_NR_sigsuspend:
9149 TaskState *ts = cpu->opaque;
9150 #if defined(TARGET_ALPHA)
9151 abi_ulong mask = arg1;
9152 target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
9154 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9155 return -TARGET_EFAULT;
9156 target_to_host_old_sigset(&ts->sigsuspend_mask, p);
9157 unlock_user(p, arg1, 0);
9159 ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
9161 if (ret != -TARGET_ERESTARTSYS) {
9162 ts->in_sigsuspend = 1;
9167 case TARGET_NR_rt_sigsuspend:
9169 TaskState *ts = cpu->opaque;
9171 if (arg2 != sizeof(target_sigset_t)) {
9172 return -TARGET_EINVAL;
9174 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9175 return -TARGET_EFAULT;
9176 target_to_host_sigset(&ts->sigsuspend_mask, p);
9177 unlock_user(p, arg1, 0);
9178 ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
9180 if (ret != -TARGET_ERESTARTSYS) {
9181 ts->in_sigsuspend = 1;
9185 #ifdef TARGET_NR_rt_sigtimedwait
9186 case TARGET_NR_rt_sigtimedwait:
9189 struct timespec uts, *puts;
9192 if (arg4 != sizeof(target_sigset_t)) {
9193 return -TARGET_EINVAL;
9196 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9197 return -TARGET_EFAULT;
9198 target_to_host_sigset(&set, p);
9199 unlock_user(p, arg1, 0);
9202 if (target_to_host_timespec(puts, arg3)) {
9203 return -TARGET_EFAULT;
9208 ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9210 if (!is_error(ret)) {
9212 p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
9215 return -TARGET_EFAULT;
9217 host_to_target_siginfo(p, &uinfo);
9218 unlock_user(p, arg2, sizeof(target_siginfo_t));
9220 ret = host_to_target_signal(ret);
9225 #ifdef TARGET_NR_rt_sigtimedwait_time64
9226 case TARGET_NR_rt_sigtimedwait_time64:
9229 struct timespec uts, *puts;
9232 if (arg4 != sizeof(target_sigset_t)) {
9233 return -TARGET_EINVAL;
9236 p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1);
9238 return -TARGET_EFAULT;
9240 target_to_host_sigset(&set, p);
9241 unlock_user(p, arg1, 0);
9244 if (target_to_host_timespec64(puts, arg3)) {
9245 return -TARGET_EFAULT;
9250 ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9252 if (!is_error(ret)) {
9254 p = lock_user(VERIFY_WRITE, arg2,
9255 sizeof(target_siginfo_t), 0);
9257 return -TARGET_EFAULT;
9259 host_to_target_siginfo(p, &uinfo);
9260 unlock_user(p, arg2, sizeof(target_siginfo_t));
9262 ret = host_to_target_signal(ret);
9267 case TARGET_NR_rt_sigqueueinfo:
9271 p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
9273 return -TARGET_EFAULT;
9275 target_to_host_siginfo(&uinfo, p);
9276 unlock_user(p, arg3, 0);
9277 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
9280 case TARGET_NR_rt_tgsigqueueinfo:
9284 p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
9286 return -TARGET_EFAULT;
9288 target_to_host_siginfo(&uinfo, p);
9289 unlock_user(p, arg4, 0);
9290 ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, arg3, &uinfo));
9293 #ifdef TARGET_NR_sigreturn
9294 case TARGET_NR_sigreturn:
9295 if (block_signals()) {
9296 return -TARGET_ERESTARTSYS;
9298 return do_sigreturn(cpu_env);
9300 case TARGET_NR_rt_sigreturn:
9301 if (block_signals()) {
9302 return -TARGET_ERESTARTSYS;
9304 return do_rt_sigreturn(cpu_env);
9305 case TARGET_NR_sethostname:
9306 if (!(p = lock_user_string(arg1)))
9307 return -TARGET_EFAULT;
9308 ret = get_errno(sethostname(p, arg2));
9309 unlock_user(p, arg1, 0);
9311 #ifdef TARGET_NR_setrlimit
9312 case TARGET_NR_setrlimit:
9314 int resource = target_to_host_resource(arg1);
9315 struct target_rlimit *target_rlim;
9317 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
9318 return -TARGET_EFAULT;
9319 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
9320 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
9321 unlock_user_struct(target_rlim, arg2, 0);
9323 * If we just passed through resource limit settings for memory then
9324 * they would also apply to QEMU's own allocations, and QEMU will
9325 * crash or hang or die if its allocations fail. Ideally we would
9326 * track the guest allocations in QEMU and apply the limits ourselves.
9327 * For now, just tell the guest the call succeeded but don't actually
9330 if (resource != RLIMIT_AS &&
9331 resource != RLIMIT_DATA &&
9332 resource != RLIMIT_STACK) {
9333 return get_errno(setrlimit(resource, &rlim));
9339 #ifdef TARGET_NR_getrlimit
9340 case TARGET_NR_getrlimit:
9342 int resource = target_to_host_resource(arg1);
9343 struct target_rlimit *target_rlim;
9346 ret = get_errno(getrlimit(resource, &rlim));
9347 if (!is_error(ret)) {
9348 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
9349 return -TARGET_EFAULT;
9350 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
9351 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
9352 unlock_user_struct(target_rlim, arg2, 1);
9357 case TARGET_NR_getrusage:
9359 struct rusage rusage;
9360 ret = get_errno(getrusage(arg1, &rusage));
9361 if (!is_error(ret)) {
9362 ret = host_to_target_rusage(arg2, &rusage);
9366 #if defined(TARGET_NR_gettimeofday)
9367 case TARGET_NR_gettimeofday:
9372 ret = get_errno(gettimeofday(&tv, &tz));
9373 if (!is_error(ret)) {
9374 if (arg1 && copy_to_user_timeval(arg1, &tv)) {
9375 return -TARGET_EFAULT;
9377 if (arg2 && copy_to_user_timezone(arg2, &tz)) {
9378 return -TARGET_EFAULT;
9384 #if defined(TARGET_NR_settimeofday)
9385 case TARGET_NR_settimeofday:
9387 struct timeval tv, *ptv = NULL;
9388 struct timezone tz, *ptz = NULL;
9391 if (copy_from_user_timeval(&tv, arg1)) {
9392 return -TARGET_EFAULT;
9398 if (copy_from_user_timezone(&tz, arg2)) {
9399 return -TARGET_EFAULT;
9404 return get_errno(settimeofday(ptv, ptz));
9407 #if defined(TARGET_NR_select)
9408 case TARGET_NR_select:
9409 #if defined(TARGET_WANT_NI_OLD_SELECT)
9410 /* some architectures used to have old_select here
9411 * but now ENOSYS it.
9413 ret = -TARGET_ENOSYS;
9414 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
9415 ret = do_old_select(arg1);
9417 ret = do_select(arg1, arg2, arg3, arg4, arg5);
9421 #ifdef TARGET_NR_pselect6
9422 case TARGET_NR_pselect6:
9423 return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, false);
9425 #ifdef TARGET_NR_pselect6_time64
9426 case TARGET_NR_pselect6_time64:
9427 return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, true);
9429 #ifdef TARGET_NR_symlink
9430 case TARGET_NR_symlink:
9433 p = lock_user_string(arg1);
9434 p2 = lock_user_string(arg2);
9436 ret = -TARGET_EFAULT;
9438 ret = get_errno(symlink(p, p2));
9439 unlock_user(p2, arg2, 0);
9440 unlock_user(p, arg1, 0);
9444 #if defined(TARGET_NR_symlinkat)
9445 case TARGET_NR_symlinkat:
9448 p = lock_user_string(arg1);
9449 p2 = lock_user_string(arg3);
9451 ret = -TARGET_EFAULT;
9453 ret = get_errno(symlinkat(p, arg2, p2));
9454 unlock_user(p2, arg3, 0);
9455 unlock_user(p, arg1, 0);
9459 #ifdef TARGET_NR_readlink
9460 case TARGET_NR_readlink:
9463 p = lock_user_string(arg1);
9464 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9466 ret = -TARGET_EFAULT;
9468 /* Short circuit this for the magic exe check. */
9469 ret = -TARGET_EINVAL;
9470 } else if (is_proc_myself((const char *)p, "exe")) {
9471 char real[PATH_MAX], *temp;
9472 temp = realpath(exec_path, real);
9473 /* Return value is # of bytes that we wrote to the buffer. */
9475 ret = get_errno(-1);
9477 /* Don't worry about sign mismatch as earlier mapping
9478 * logic would have thrown a bad address error. */
9479 ret = MIN(strlen(real), arg3);
9480 /* We cannot NUL terminate the string. */
9481 memcpy(p2, real, ret);
9484 ret = get_errno(readlink(path(p), p2, arg3));
9486 unlock_user(p2, arg2, ret);
9487 unlock_user(p, arg1, 0);
9491 #if defined(TARGET_NR_readlinkat)
9492 case TARGET_NR_readlinkat:
9495 p = lock_user_string(arg2);
9496 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
9498 ret = -TARGET_EFAULT;
9499 } else if (is_proc_myself((const char *)p, "exe")) {
9500 char real[PATH_MAX], *temp;
9501 temp = realpath(exec_path, real);
9502 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
9503 snprintf((char *)p2, arg4, "%s", real);
9505 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
9507 unlock_user(p2, arg3, ret);
9508 unlock_user(p, arg2, 0);
9512 #ifdef TARGET_NR_swapon
9513 case TARGET_NR_swapon:
9514 if (!(p = lock_user_string(arg1)))
9515 return -TARGET_EFAULT;
9516 ret = get_errno(swapon(p, arg2));
9517 unlock_user(p, arg1, 0);
9520 case TARGET_NR_reboot:
9521 if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
9522 /* arg4 must be ignored in all other cases */
9523 p = lock_user_string(arg4);
9525 return -TARGET_EFAULT;
9527 ret = get_errno(reboot(arg1, arg2, arg3, p));
9528 unlock_user(p, arg4, 0);
9530 ret = get_errno(reboot(arg1, arg2, arg3, NULL));
9533 #ifdef TARGET_NR_mmap
9534 case TARGET_NR_mmap:
9535 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
9536 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
9537 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
9538 || defined(TARGET_S390X)
9541 abi_ulong v1, v2, v3, v4, v5, v6;
9542 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
9543 return -TARGET_EFAULT;
9550 unlock_user(v, arg1, 0);
9551 ret = get_errno(target_mmap(v1, v2, v3,
9552 target_to_host_bitmask(v4, mmap_flags_tbl),
9556 /* mmap pointers are always untagged */
9557 ret = get_errno(target_mmap(arg1, arg2, arg3,
9558 target_to_host_bitmask(arg4, mmap_flags_tbl),
9564 #ifdef TARGET_NR_mmap2
9565 case TARGET_NR_mmap2:
9567 #define MMAP_SHIFT 12
9569 ret = target_mmap(arg1, arg2, arg3,
9570 target_to_host_bitmask(arg4, mmap_flags_tbl),
9571 arg5, arg6 << MMAP_SHIFT);
9572 return get_errno(ret);
9574 case TARGET_NR_munmap:
9575 arg1 = cpu_untagged_addr(cpu, arg1);
9576 return get_errno(target_munmap(arg1, arg2));
9577 case TARGET_NR_mprotect:
9578 arg1 = cpu_untagged_addr(cpu, arg1);
9580 TaskState *ts = cpu->opaque;
9581 /* Special hack to detect libc making the stack executable. */
9582 if ((arg3 & PROT_GROWSDOWN)
9583 && arg1 >= ts->info->stack_limit
9584 && arg1 <= ts->info->start_stack) {
9585 arg3 &= ~PROT_GROWSDOWN;
9586 arg2 = arg2 + arg1 - ts->info->stack_limit;
9587 arg1 = ts->info->stack_limit;
9590 return get_errno(target_mprotect(arg1, arg2, arg3));
9591 #ifdef TARGET_NR_mremap
9592 case TARGET_NR_mremap:
9593 arg1 = cpu_untagged_addr(cpu, arg1);
9594 /* mremap new_addr (arg5) is always untagged */
9595 return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
9597 /* ??? msync/mlock/munlock are broken for softmmu. */
9598 #ifdef TARGET_NR_msync
9599 case TARGET_NR_msync:
9600 return get_errno(msync(g2h(cpu, arg1), arg2, arg3));
9602 #ifdef TARGET_NR_mlock
9603 case TARGET_NR_mlock:
9604 return get_errno(mlock(g2h(cpu, arg1), arg2));
9606 #ifdef TARGET_NR_munlock
9607 case TARGET_NR_munlock:
9608 return get_errno(munlock(g2h(cpu, arg1), arg2));
9610 #ifdef TARGET_NR_mlockall
9611 case TARGET_NR_mlockall:
9612 return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
9614 #ifdef TARGET_NR_munlockall
9615 case TARGET_NR_munlockall:
9616 return get_errno(munlockall());
9618 #ifdef TARGET_NR_truncate
9619 case TARGET_NR_truncate:
9620 if (!(p = lock_user_string(arg1)))
9621 return -TARGET_EFAULT;
9622 ret = get_errno(truncate(p, arg2));
9623 unlock_user(p, arg1, 0);
9626 #ifdef TARGET_NR_ftruncate
9627 case TARGET_NR_ftruncate:
9628 return get_errno(ftruncate(arg1, arg2));
9630 case TARGET_NR_fchmod:
9631 return get_errno(fchmod(arg1, arg2));
9632 #if defined(TARGET_NR_fchmodat)
9633 case TARGET_NR_fchmodat:
9634 if (!(p = lock_user_string(arg2)))
9635 return -TARGET_EFAULT;
9636 ret = get_errno(fchmodat(arg1, p, arg3, 0));
9637 unlock_user(p, arg2, 0);
9640 case TARGET_NR_getpriority:
9641 /* Note that negative values are valid for getpriority, so we must
9642 differentiate based on errno settings. */
9644 ret = getpriority(arg1, arg2);
9645 if (ret == -1 && errno != 0) {
9646 return -host_to_target_errno(errno);
9649 /* Return value is the unbiased priority. Signal no error. */
9650 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
9652 /* Return value is a biased priority to avoid negative numbers. */
9656 case TARGET_NR_setpriority:
9657 return get_errno(setpriority(arg1, arg2, arg3));
9658 #ifdef TARGET_NR_statfs
9659 case TARGET_NR_statfs:
9660 if (!(p = lock_user_string(arg1))) {
9661 return -TARGET_EFAULT;
9663 ret = get_errno(statfs(path(p), &stfs));
9664 unlock_user(p, arg1, 0);
9666 if (!is_error(ret)) {
9667 struct target_statfs *target_stfs;
9669 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
9670 return -TARGET_EFAULT;
9671 __put_user(stfs.f_type, &target_stfs->f_type);
9672 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9673 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9674 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9675 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9676 __put_user(stfs.f_files, &target_stfs->f_files);
9677 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9678 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9679 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9680 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9681 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9682 #ifdef _STATFS_F_FLAGS
9683 __put_user(stfs.f_flags, &target_stfs->f_flags);
9685 __put_user(0, &target_stfs->f_flags);
9687 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9688 unlock_user_struct(target_stfs, arg2, 1);
9692 #ifdef TARGET_NR_fstatfs
9693 case TARGET_NR_fstatfs:
9694 ret = get_errno(fstatfs(arg1, &stfs));
9695 goto convert_statfs;
9697 #ifdef TARGET_NR_statfs64
9698 case TARGET_NR_statfs64:
9699 if (!(p = lock_user_string(arg1))) {
9700 return -TARGET_EFAULT;
9702 ret = get_errno(statfs(path(p), &stfs));
9703 unlock_user(p, arg1, 0);
9705 if (!is_error(ret)) {
9706 struct target_statfs64 *target_stfs;
9708 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
9709 return -TARGET_EFAULT;
9710 __put_user(stfs.f_type, &target_stfs->f_type);
9711 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9712 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9713 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9714 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9715 __put_user(stfs.f_files, &target_stfs->f_files);
9716 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9717 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9718 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9719 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9720 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9721 #ifdef _STATFS_F_FLAGS
9722 __put_user(stfs.f_flags, &target_stfs->f_flags);
9724 __put_user(0, &target_stfs->f_flags);
9726 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9727 unlock_user_struct(target_stfs, arg3, 1);
9730 case TARGET_NR_fstatfs64:
9731 ret = get_errno(fstatfs(arg1, &stfs));
9732 goto convert_statfs64;
9734 #ifdef TARGET_NR_socketcall
9735 case TARGET_NR_socketcall:
9736 return do_socketcall(arg1, arg2);
9738 #ifdef TARGET_NR_accept
9739 case TARGET_NR_accept:
9740 return do_accept4(arg1, arg2, arg3, 0);
9742 #ifdef TARGET_NR_accept4
9743 case TARGET_NR_accept4:
9744 return do_accept4(arg1, arg2, arg3, arg4);
9746 #ifdef TARGET_NR_bind
9747 case TARGET_NR_bind:
9748 return do_bind(arg1, arg2, arg3);
9750 #ifdef TARGET_NR_connect
9751 case TARGET_NR_connect:
9752 return do_connect(arg1, arg2, arg3);
9754 #ifdef TARGET_NR_getpeername
9755 case TARGET_NR_getpeername:
9756 return do_getpeername(arg1, arg2, arg3);
9758 #ifdef TARGET_NR_getsockname
9759 case TARGET_NR_getsockname:
9760 return do_getsockname(arg1, arg2, arg3);
9762 #ifdef TARGET_NR_getsockopt
9763 case TARGET_NR_getsockopt:
9764 return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
9766 #ifdef TARGET_NR_listen
9767 case TARGET_NR_listen:
9768 return get_errno(listen(arg1, arg2));
9770 #ifdef TARGET_NR_recv
9771 case TARGET_NR_recv:
9772 return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
9774 #ifdef TARGET_NR_recvfrom
9775 case TARGET_NR_recvfrom:
9776 return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
9778 #ifdef TARGET_NR_recvmsg
9779 case TARGET_NR_recvmsg:
9780 return do_sendrecvmsg(arg1, arg2, arg3, 0);
9782 #ifdef TARGET_NR_send
9783 case TARGET_NR_send:
9784 return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
9786 #ifdef TARGET_NR_sendmsg
9787 case TARGET_NR_sendmsg:
9788 return do_sendrecvmsg(arg1, arg2, arg3, 1);
9790 #ifdef TARGET_NR_sendmmsg
9791 case TARGET_NR_sendmmsg:
9792 return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
9794 #ifdef TARGET_NR_recvmmsg
9795 case TARGET_NR_recvmmsg:
9796 return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
9798 #ifdef TARGET_NR_sendto
9799 case TARGET_NR_sendto:
9800 return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
9802 #ifdef TARGET_NR_shutdown
9803 case TARGET_NR_shutdown:
9804 return get_errno(shutdown(arg1, arg2));
9806 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
9807 case TARGET_NR_getrandom:
9808 p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
9810 return -TARGET_EFAULT;
9812 ret = get_errno(getrandom(p, arg2, arg3));
9813 unlock_user(p, arg1, ret);
9816 #ifdef TARGET_NR_socket
9817 case TARGET_NR_socket:
9818 return do_socket(arg1, arg2, arg3);
9820 #ifdef TARGET_NR_socketpair
9821 case TARGET_NR_socketpair:
9822 return do_socketpair(arg1, arg2, arg3, arg4);
9824 #ifdef TARGET_NR_setsockopt
9825 case TARGET_NR_setsockopt:
9826 return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
9828 #if defined(TARGET_NR_syslog)
9829 case TARGET_NR_syslog:
9834 case TARGET_SYSLOG_ACTION_CLOSE: /* Close log */
9835 case TARGET_SYSLOG_ACTION_OPEN: /* Open log */
9836 case TARGET_SYSLOG_ACTION_CLEAR: /* Clear ring buffer */
9837 case TARGET_SYSLOG_ACTION_CONSOLE_OFF: /* Disable logging */
9838 case TARGET_SYSLOG_ACTION_CONSOLE_ON: /* Enable logging */
9839 case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
9840 case TARGET_SYSLOG_ACTION_SIZE_UNREAD: /* Number of chars */
9841 case TARGET_SYSLOG_ACTION_SIZE_BUFFER: /* Size of the buffer */
9842 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
9843 case TARGET_SYSLOG_ACTION_READ: /* Read from log */
9844 case TARGET_SYSLOG_ACTION_READ_CLEAR: /* Read/clear msgs */
9845 case TARGET_SYSLOG_ACTION_READ_ALL: /* Read last messages */
9848 return -TARGET_EINVAL;
9853 p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9855 return -TARGET_EFAULT;
9857 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
9858 unlock_user(p, arg2, arg3);
9862 return -TARGET_EINVAL;
9867 case TARGET_NR_setitimer:
9869 struct itimerval value, ovalue, *pvalue;
9873 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
9874 || copy_from_user_timeval(&pvalue->it_value,
9875 arg2 + sizeof(struct target_timeval)))
9876 return -TARGET_EFAULT;
9880 ret = get_errno(setitimer(arg1, pvalue, &ovalue));
9881 if (!is_error(ret) && arg3) {
9882 if (copy_to_user_timeval(arg3,
9883 &ovalue.it_interval)
9884 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
9886 return -TARGET_EFAULT;
9890 case TARGET_NR_getitimer:
9892 struct itimerval value;
9894 ret = get_errno(getitimer(arg1, &value));
9895 if (!is_error(ret) && arg2) {
9896 if (copy_to_user_timeval(arg2,
9898 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
9900 return -TARGET_EFAULT;
9904 #ifdef TARGET_NR_stat
9905 case TARGET_NR_stat:
9906 if (!(p = lock_user_string(arg1))) {
9907 return -TARGET_EFAULT;
9909 ret = get_errno(stat(path(p), &st));
9910 unlock_user(p, arg1, 0);
9913 #ifdef TARGET_NR_lstat
9914 case TARGET_NR_lstat:
9915 if (!(p = lock_user_string(arg1))) {
9916 return -TARGET_EFAULT;
9918 ret = get_errno(lstat(path(p), &st));
9919 unlock_user(p, arg1, 0);
9922 #ifdef TARGET_NR_fstat
9923 case TARGET_NR_fstat:
9925 ret = get_errno(fstat(arg1, &st));
9926 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
9929 if (!is_error(ret)) {
9930 struct target_stat *target_st;
9932 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
9933 return -TARGET_EFAULT;
9934 memset(target_st, 0, sizeof(*target_st));
9935 __put_user(st.st_dev, &target_st->st_dev);
9936 __put_user(st.st_ino, &target_st->st_ino);
9937 __put_user(st.st_mode, &target_st->st_mode);
9938 __put_user(st.st_uid, &target_st->st_uid);
9939 __put_user(st.st_gid, &target_st->st_gid);
9940 __put_user(st.st_nlink, &target_st->st_nlink);
9941 __put_user(st.st_rdev, &target_st->st_rdev);
9942 __put_user(st.st_size, &target_st->st_size);
9943 __put_user(st.st_blksize, &target_st->st_blksize);
9944 __put_user(st.st_blocks, &target_st->st_blocks);
9945 __put_user(st.st_atime, &target_st->target_st_atime);
9946 __put_user(st.st_mtime, &target_st->target_st_mtime);
9947 __put_user(st.st_ctime, &target_st->target_st_ctime);
9948 #if defined(HAVE_STRUCT_STAT_ST_ATIM) && defined(TARGET_STAT_HAVE_NSEC)
9949 __put_user(st.st_atim.tv_nsec,
9950 &target_st->target_st_atime_nsec);
9951 __put_user(st.st_mtim.tv_nsec,
9952 &target_st->target_st_mtime_nsec);
9953 __put_user(st.st_ctim.tv_nsec,
9954 &target_st->target_st_ctime_nsec);
9956 unlock_user_struct(target_st, arg2, 1);
9961 case TARGET_NR_vhangup:
9962 return get_errno(vhangup());
9963 #ifdef TARGET_NR_syscall
9964 case TARGET_NR_syscall:
9965 return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
9966 arg6, arg7, arg8, 0);
9968 #if defined(TARGET_NR_wait4)
9969 case TARGET_NR_wait4:
9972 abi_long status_ptr = arg2;
9973 struct rusage rusage, *rusage_ptr;
9974 abi_ulong target_rusage = arg4;
9975 abi_long rusage_err;
9977 rusage_ptr = &rusage;
9980 ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
9981 if (!is_error(ret)) {
9982 if (status_ptr && ret) {
9983 status = host_to_target_waitstatus(status);
9984 if (put_user_s32(status, status_ptr))
9985 return -TARGET_EFAULT;
9987 if (target_rusage) {
9988 rusage_err = host_to_target_rusage(target_rusage, &rusage);
9997 #ifdef TARGET_NR_swapoff
9998 case TARGET_NR_swapoff:
9999 if (!(p = lock_user_string(arg1)))
10000 return -TARGET_EFAULT;
10001 ret = get_errno(swapoff(p));
10002 unlock_user(p, arg1, 0);
10005 case TARGET_NR_sysinfo:
10007 struct target_sysinfo *target_value;
10008 struct sysinfo value;
10009 ret = get_errno(sysinfo(&value));
10010 if (!is_error(ret) && arg1)
10012 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
10013 return -TARGET_EFAULT;
10014 __put_user(value.uptime, &target_value->uptime);
10015 __put_user(value.loads[0], &target_value->loads[0]);
10016 __put_user(value.loads[1], &target_value->loads[1]);
10017 __put_user(value.loads[2], &target_value->loads[2]);
10018 __put_user(value.totalram, &target_value->totalram);
10019 __put_user(value.freeram, &target_value->freeram);
10020 __put_user(value.sharedram, &target_value->sharedram);
10021 __put_user(value.bufferram, &target_value->bufferram);
10022 __put_user(value.totalswap, &target_value->totalswap);
10023 __put_user(value.freeswap, &target_value->freeswap);
10024 __put_user(value.procs, &target_value->procs);
10025 __put_user(value.totalhigh, &target_value->totalhigh);
10026 __put_user(value.freehigh, &target_value->freehigh);
10027 __put_user(value.mem_unit, &target_value->mem_unit);
10028 unlock_user_struct(target_value, arg1, 1);
10032 #ifdef TARGET_NR_ipc
10033 case TARGET_NR_ipc:
10034 return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
10036 #ifdef TARGET_NR_semget
10037 case TARGET_NR_semget:
10038 return get_errno(semget(arg1, arg2, arg3));
10040 #ifdef TARGET_NR_semop
10041 case TARGET_NR_semop:
10042 return do_semtimedop(arg1, arg2, arg3, 0, false);
10044 #ifdef TARGET_NR_semtimedop
10045 case TARGET_NR_semtimedop:
10046 return do_semtimedop(arg1, arg2, arg3, arg4, false);
10048 #ifdef TARGET_NR_semtimedop_time64
10049 case TARGET_NR_semtimedop_time64:
10050 return do_semtimedop(arg1, arg2, arg3, arg4, true);
10052 #ifdef TARGET_NR_semctl
10053 case TARGET_NR_semctl:
10054 return do_semctl(arg1, arg2, arg3, arg4);
10056 #ifdef TARGET_NR_msgctl
10057 case TARGET_NR_msgctl:
10058 return do_msgctl(arg1, arg2, arg3);
10060 #ifdef TARGET_NR_msgget
10061 case TARGET_NR_msgget:
10062 return get_errno(msgget(arg1, arg2));
10064 #ifdef TARGET_NR_msgrcv
10065 case TARGET_NR_msgrcv:
10066 return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
10068 #ifdef TARGET_NR_msgsnd
10069 case TARGET_NR_msgsnd:
10070 return do_msgsnd(arg1, arg2, arg3, arg4);
10072 #ifdef TARGET_NR_shmget
10073 case TARGET_NR_shmget:
10074 return get_errno(shmget(arg1, arg2, arg3));
10076 #ifdef TARGET_NR_shmctl
10077 case TARGET_NR_shmctl:
10078 return do_shmctl(arg1, arg2, arg3);
10080 #ifdef TARGET_NR_shmat
10081 case TARGET_NR_shmat:
10082 return do_shmat(cpu_env, arg1, arg2, arg3);
10084 #ifdef TARGET_NR_shmdt
10085 case TARGET_NR_shmdt:
10086 return do_shmdt(arg1);
10088 case TARGET_NR_fsync:
10089 return get_errno(fsync(arg1));
10090 case TARGET_NR_clone:
10091 /* Linux manages to have three different orderings for its
10092 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
10093 * match the kernel's CONFIG_CLONE_* settings.
10094 * Microblaze is further special in that it uses a sixth
10095 * implicit argument to clone for the TLS pointer.
10097 #if defined(TARGET_MICROBLAZE)
10098 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
10099 #elif defined(TARGET_CLONE_BACKWARDS)
10100 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
10101 #elif defined(TARGET_CLONE_BACKWARDS2)
10102 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
10104 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
10107 #ifdef __NR_exit_group
10108 /* new thread calls */
10109 case TARGET_NR_exit_group:
10110 preexit_cleanup(cpu_env, arg1);
10111 return get_errno(exit_group(arg1));
10113 case TARGET_NR_setdomainname:
10114 if (!(p = lock_user_string(arg1)))
10115 return -TARGET_EFAULT;
10116 ret = get_errno(setdomainname(p, arg2));
10117 unlock_user(p, arg1, 0);
10119 case TARGET_NR_uname:
10120 /* no need to transcode because we use the linux syscall */
10122 struct new_utsname * buf;
10124 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
10125 return -TARGET_EFAULT;
10126 ret = get_errno(sys_uname(buf));
10127 if (!is_error(ret)) {
10128 /* Overwrite the native machine name with whatever is being
10130 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
10131 sizeof(buf->machine));
10132 /* Allow the user to override the reported release. */
10133 if (qemu_uname_release && *qemu_uname_release) {
10134 g_strlcpy(buf->release, qemu_uname_release,
10135 sizeof(buf->release));
10138 unlock_user_struct(buf, arg1, 1);
10142 case TARGET_NR_modify_ldt:
10143 return do_modify_ldt(cpu_env, arg1, arg2, arg3);
10144 #if !defined(TARGET_X86_64)
10145 case TARGET_NR_vm86:
10146 return do_vm86(cpu_env, arg1, arg2);
10149 #if defined(TARGET_NR_adjtimex)
10150 case TARGET_NR_adjtimex:
10152 struct timex host_buf;
10154 if (target_to_host_timex(&host_buf, arg1) != 0) {
10155 return -TARGET_EFAULT;
10157 ret = get_errno(adjtimex(&host_buf));
10158 if (!is_error(ret)) {
10159 if (host_to_target_timex(arg1, &host_buf) != 0) {
10160 return -TARGET_EFAULT;
10166 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
10167 case TARGET_NR_clock_adjtime:
10169 struct timex htx, *phtx = &htx;
10171 if (target_to_host_timex(phtx, arg2) != 0) {
10172 return -TARGET_EFAULT;
10174 ret = get_errno(clock_adjtime(arg1, phtx));
10175 if (!is_error(ret) && phtx) {
10176 if (host_to_target_timex(arg2, phtx) != 0) {
10177 return -TARGET_EFAULT;
10183 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
10184 case TARGET_NR_clock_adjtime64:
10188 if (target_to_host_timex64(&htx, arg2) != 0) {
10189 return -TARGET_EFAULT;
10191 ret = get_errno(clock_adjtime(arg1, &htx));
10192 if (!is_error(ret) && host_to_target_timex64(arg2, &htx)) {
10193 return -TARGET_EFAULT;
10198 case TARGET_NR_getpgid:
10199 return get_errno(getpgid(arg1));
10200 case TARGET_NR_fchdir:
10201 return get_errno(fchdir(arg1));
10202 case TARGET_NR_personality:
10203 return get_errno(personality(arg1));
10204 #ifdef TARGET_NR__llseek /* Not on alpha */
10205 case TARGET_NR__llseek:
10208 #if !defined(__NR_llseek)
10209 res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
10211 ret = get_errno(res);
10216 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
10218 if ((ret == 0) && put_user_s64(res, arg4)) {
10219 return -TARGET_EFAULT;
10224 #ifdef TARGET_NR_getdents
10225 case TARGET_NR_getdents:
10226 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
10227 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
10229 struct target_dirent *target_dirp;
10230 struct linux_dirent *dirp;
10231 abi_long count = arg3;
10233 dirp = g_try_malloc(count);
10235 return -TARGET_ENOMEM;
10238 ret = get_errno(sys_getdents(arg1, dirp, count));
10239 if (!is_error(ret)) {
10240 struct linux_dirent *de;
10241 struct target_dirent *tde;
10243 int reclen, treclen;
10244 int count1, tnamelen;
10248 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10249 return -TARGET_EFAULT;
10252 reclen = de->d_reclen;
10253 tnamelen = reclen - offsetof(struct linux_dirent, d_name);
10254 assert(tnamelen >= 0);
10255 treclen = tnamelen + offsetof(struct target_dirent, d_name);
10256 assert(count1 + treclen <= count);
10257 tde->d_reclen = tswap16(treclen);
10258 tde->d_ino = tswapal(de->d_ino);
10259 tde->d_off = tswapal(de->d_off);
10260 memcpy(tde->d_name, de->d_name, tnamelen);
10261 de = (struct linux_dirent *)((char *)de + reclen);
10263 tde = (struct target_dirent *)((char *)tde + treclen);
10267 unlock_user(target_dirp, arg2, ret);
10273 struct linux_dirent *dirp;
10274 abi_long count = arg3;
10276 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10277 return -TARGET_EFAULT;
10278 ret = get_errno(sys_getdents(arg1, dirp, count));
10279 if (!is_error(ret)) {
10280 struct linux_dirent *de;
10285 reclen = de->d_reclen;
10288 de->d_reclen = tswap16(reclen);
10289 tswapls(&de->d_ino);
10290 tswapls(&de->d_off);
10291 de = (struct linux_dirent *)((char *)de + reclen);
10295 unlock_user(dirp, arg2, ret);
10299 /* Implement getdents in terms of getdents64 */
10301 struct linux_dirent64 *dirp;
10302 abi_long count = arg3;
10304 dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
10306 return -TARGET_EFAULT;
10308 ret = get_errno(sys_getdents64(arg1, dirp, count));
10309 if (!is_error(ret)) {
10310 /* Convert the dirent64 structs to target dirent. We do this
10311 * in-place, since we can guarantee that a target_dirent is no
10312 * larger than a dirent64; however this means we have to be
10313 * careful to read everything before writing in the new format.
10315 struct linux_dirent64 *de;
10316 struct target_dirent *tde;
10321 tde = (struct target_dirent *)dirp;
10323 int namelen, treclen;
10324 int reclen = de->d_reclen;
10325 uint64_t ino = de->d_ino;
10326 int64_t off = de->d_off;
10327 uint8_t type = de->d_type;
10329 namelen = strlen(de->d_name);
10330 treclen = offsetof(struct target_dirent, d_name)
10332 treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
10334 memmove(tde->d_name, de->d_name, namelen + 1);
10335 tde->d_ino = tswapal(ino);
10336 tde->d_off = tswapal(off);
10337 tde->d_reclen = tswap16(treclen);
10338 /* The target_dirent type is in what was formerly a padding
10339 * byte at the end of the structure:
10341 *(((char *)tde) + treclen - 1) = type;
10343 de = (struct linux_dirent64 *)((char *)de + reclen);
10344 tde = (struct target_dirent *)((char *)tde + treclen);
10350 unlock_user(dirp, arg2, ret);
10354 #endif /* TARGET_NR_getdents */
10355 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
10356 case TARGET_NR_getdents64:
10358 struct linux_dirent64 *dirp;
10359 abi_long count = arg3;
10360 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10361 return -TARGET_EFAULT;
10362 ret = get_errno(sys_getdents64(arg1, dirp, count));
10363 if (!is_error(ret)) {
10364 struct linux_dirent64 *de;
10369 reclen = de->d_reclen;
10372 de->d_reclen = tswap16(reclen);
10373 tswap64s((uint64_t *)&de->d_ino);
10374 tswap64s((uint64_t *)&de->d_off);
10375 de = (struct linux_dirent64 *)((char *)de + reclen);
10379 unlock_user(dirp, arg2, ret);
10382 #endif /* TARGET_NR_getdents64 */
10383 #if defined(TARGET_NR__newselect)
10384 case TARGET_NR__newselect:
10385 return do_select(arg1, arg2, arg3, arg4, arg5);
10387 #ifdef TARGET_NR_poll
10388 case TARGET_NR_poll:
10389 return do_ppoll(arg1, arg2, arg3, arg4, arg5, false, false);
10391 #ifdef TARGET_NR_ppoll
10392 case TARGET_NR_ppoll:
10393 return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, false);
10395 #ifdef TARGET_NR_ppoll_time64
10396 case TARGET_NR_ppoll_time64:
10397 return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, true);
10399 case TARGET_NR_flock:
10400 /* NOTE: the flock constant seems to be the same for every
10402 return get_errno(safe_flock(arg1, arg2));
10403 case TARGET_NR_readv:
10405 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10407 ret = get_errno(safe_readv(arg1, vec, arg3));
10408 unlock_iovec(vec, arg2, arg3, 1);
10410 ret = -host_to_target_errno(errno);
10414 case TARGET_NR_writev:
10416 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10418 ret = get_errno(safe_writev(arg1, vec, arg3));
10419 unlock_iovec(vec, arg2, arg3, 0);
10421 ret = -host_to_target_errno(errno);
10425 #if defined(TARGET_NR_preadv)
10426 case TARGET_NR_preadv:
10428 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10430 unsigned long low, high;
10432 target_to_host_low_high(arg4, arg5, &low, &high);
10433 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
10434 unlock_iovec(vec, arg2, arg3, 1);
10436 ret = -host_to_target_errno(errno);
10441 #if defined(TARGET_NR_pwritev)
10442 case TARGET_NR_pwritev:
10444 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10446 unsigned long low, high;
10448 target_to_host_low_high(arg4, arg5, &low, &high);
10449 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
10450 unlock_iovec(vec, arg2, arg3, 0);
10452 ret = -host_to_target_errno(errno);
10457 case TARGET_NR_getsid:
10458 return get_errno(getsid(arg1));
10459 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10460 case TARGET_NR_fdatasync:
10461 return get_errno(fdatasync(arg1));
10463 case TARGET_NR_sched_getaffinity:
10465 unsigned int mask_size;
10466 unsigned long *mask;
10469 * sched_getaffinity needs multiples of ulong, so need to take
10470 * care of mismatches between target ulong and host ulong sizes.
10472 if (arg2 & (sizeof(abi_ulong) - 1)) {
10473 return -TARGET_EINVAL;
10475 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10477 mask = alloca(mask_size);
10478 memset(mask, 0, mask_size);
10479 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
10481 if (!is_error(ret)) {
10483 /* More data returned than the caller's buffer will fit.
10484 * This only happens if sizeof(abi_long) < sizeof(long)
10485 * and the caller passed us a buffer holding an odd number
10486 * of abi_longs. If the host kernel is actually using the
10487 * extra 4 bytes then fail EINVAL; otherwise we can just
10488 * ignore them and only copy the interesting part.
10490 int numcpus = sysconf(_SC_NPROCESSORS_CONF);
10491 if (numcpus > arg2 * 8) {
10492 return -TARGET_EINVAL;
10497 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
10498 return -TARGET_EFAULT;
10503 case TARGET_NR_sched_setaffinity:
10505 unsigned int mask_size;
10506 unsigned long *mask;
10509 * sched_setaffinity needs multiples of ulong, so need to take
10510 * care of mismatches between target ulong and host ulong sizes.
10512 if (arg2 & (sizeof(abi_ulong) - 1)) {
10513 return -TARGET_EINVAL;
10515 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10516 mask = alloca(mask_size);
10518 ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
10523 return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
10525 case TARGET_NR_getcpu:
10527 unsigned cpu, node;
10528 ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
10529 arg2 ? &node : NULL,
10531 if (is_error(ret)) {
10534 if (arg1 && put_user_u32(cpu, arg1)) {
10535 return -TARGET_EFAULT;
10537 if (arg2 && put_user_u32(node, arg2)) {
10538 return -TARGET_EFAULT;
10542 case TARGET_NR_sched_setparam:
10544 struct sched_param *target_schp;
10545 struct sched_param schp;
10548 return -TARGET_EINVAL;
10550 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
10551 return -TARGET_EFAULT;
10552 schp.sched_priority = tswap32(target_schp->sched_priority);
10553 unlock_user_struct(target_schp, arg2, 0);
10554 return get_errno(sched_setparam(arg1, &schp));
10556 case TARGET_NR_sched_getparam:
10558 struct sched_param *target_schp;
10559 struct sched_param schp;
10562 return -TARGET_EINVAL;
10564 ret = get_errno(sched_getparam(arg1, &schp));
10565 if (!is_error(ret)) {
10566 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
10567 return -TARGET_EFAULT;
10568 target_schp->sched_priority = tswap32(schp.sched_priority);
10569 unlock_user_struct(target_schp, arg2, 1);
10573 case TARGET_NR_sched_setscheduler:
10575 struct sched_param *target_schp;
10576 struct sched_param schp;
10578 return -TARGET_EINVAL;
10580 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
10581 return -TARGET_EFAULT;
10582 schp.sched_priority = tswap32(target_schp->sched_priority);
10583 unlock_user_struct(target_schp, arg3, 0);
10584 return get_errno(sched_setscheduler(arg1, arg2, &schp));
10586 case TARGET_NR_sched_getscheduler:
10587 return get_errno(sched_getscheduler(arg1));
10588 case TARGET_NR_sched_yield:
10589 return get_errno(sched_yield());
10590 case TARGET_NR_sched_get_priority_max:
10591 return get_errno(sched_get_priority_max(arg1));
10592 case TARGET_NR_sched_get_priority_min:
10593 return get_errno(sched_get_priority_min(arg1));
10594 #ifdef TARGET_NR_sched_rr_get_interval
10595 case TARGET_NR_sched_rr_get_interval:
10597 struct timespec ts;
10598 ret = get_errno(sched_rr_get_interval(arg1, &ts));
10599 if (!is_error(ret)) {
10600 ret = host_to_target_timespec(arg2, &ts);
10605 #ifdef TARGET_NR_sched_rr_get_interval_time64
10606 case TARGET_NR_sched_rr_get_interval_time64:
10608 struct timespec ts;
10609 ret = get_errno(sched_rr_get_interval(arg1, &ts));
10610 if (!is_error(ret)) {
10611 ret = host_to_target_timespec64(arg2, &ts);
10616 #if defined(TARGET_NR_nanosleep)
10617 case TARGET_NR_nanosleep:
10619 struct timespec req, rem;
10620 target_to_host_timespec(&req, arg1);
10621 ret = get_errno(safe_nanosleep(&req, &rem));
10622 if (is_error(ret) && arg2) {
10623 host_to_target_timespec(arg2, &rem);
10628 case TARGET_NR_prctl:
10630 case PR_GET_PDEATHSIG:
10633 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
10634 if (!is_error(ret) && arg2
10635 && put_user_s32(deathsig, arg2)) {
10636 return -TARGET_EFAULT;
10643 void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
10645 return -TARGET_EFAULT;
10647 ret = get_errno(prctl(arg1, (unsigned long)name,
10648 arg3, arg4, arg5));
10649 unlock_user(name, arg2, 16);
10654 void *name = lock_user(VERIFY_READ, arg2, 16, 1);
10656 return -TARGET_EFAULT;
10658 ret = get_errno(prctl(arg1, (unsigned long)name,
10659 arg3, arg4, arg5));
10660 unlock_user(name, arg2, 0);
10665 case TARGET_PR_GET_FP_MODE:
10667 CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
10669 if (env->CP0_Status & (1 << CP0St_FR)) {
10670 ret |= TARGET_PR_FP_MODE_FR;
10672 if (env->CP0_Config5 & (1 << CP0C5_FRE)) {
10673 ret |= TARGET_PR_FP_MODE_FRE;
10677 case TARGET_PR_SET_FP_MODE:
10679 CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
10680 bool old_fr = env->CP0_Status & (1 << CP0St_FR);
10681 bool old_fre = env->CP0_Config5 & (1 << CP0C5_FRE);
10682 bool new_fr = arg2 & TARGET_PR_FP_MODE_FR;
10683 bool new_fre = arg2 & TARGET_PR_FP_MODE_FRE;
10685 const unsigned int known_bits = TARGET_PR_FP_MODE_FR |
10686 TARGET_PR_FP_MODE_FRE;
10688 /* If nothing to change, return right away, successfully. */
10689 if (old_fr == new_fr && old_fre == new_fre) {
10692 /* Check the value is valid */
10693 if (arg2 & ~known_bits) {
10694 return -TARGET_EOPNOTSUPP;
10696 /* Setting FRE without FR is not supported. */
10697 if (new_fre && !new_fr) {
10698 return -TARGET_EOPNOTSUPP;
10700 if (new_fr && !(env->active_fpu.fcr0 & (1 << FCR0_F64))) {
10701 /* FR1 is not supported */
10702 return -TARGET_EOPNOTSUPP;
10704 if (!new_fr && (env->active_fpu.fcr0 & (1 << FCR0_F64))
10705 && !(env->CP0_Status_rw_bitmask & (1 << CP0St_FR))) {
10706 /* cannot set FR=0 */
10707 return -TARGET_EOPNOTSUPP;
10709 if (new_fre && !(env->active_fpu.fcr0 & (1 << FCR0_FREP))) {
10710 /* Cannot set FRE=1 */
10711 return -TARGET_EOPNOTSUPP;
10715 fpr_t *fpr = env->active_fpu.fpr;
10716 for (i = 0; i < 32 ; i += 2) {
10717 if (!old_fr && new_fr) {
10718 fpr[i].w[!FP_ENDIAN_IDX] = fpr[i + 1].w[FP_ENDIAN_IDX];
10719 } else if (old_fr && !new_fr) {
10720 fpr[i + 1].w[FP_ENDIAN_IDX] = fpr[i].w[!FP_ENDIAN_IDX];
10725 env->CP0_Status |= (1 << CP0St_FR);
10726 env->hflags |= MIPS_HFLAG_F64;
10728 env->CP0_Status &= ~(1 << CP0St_FR);
10729 env->hflags &= ~MIPS_HFLAG_F64;
10732 env->CP0_Config5 |= (1 << CP0C5_FRE);
10733 if (env->active_fpu.fcr0 & (1 << FCR0_FREP)) {
10734 env->hflags |= MIPS_HFLAG_FRE;
10737 env->CP0_Config5 &= ~(1 << CP0C5_FRE);
10738 env->hflags &= ~MIPS_HFLAG_FRE;
10744 #ifdef TARGET_AARCH64
10745 case TARGET_PR_SVE_SET_VL:
10747 * We cannot support either PR_SVE_SET_VL_ONEXEC or
10748 * PR_SVE_VL_INHERIT. Note the kernel definition
10749 * of sve_vl_valid allows for VQ=512, i.e. VL=8192,
10750 * even though the current architectural maximum is VQ=16.
10752 ret = -TARGET_EINVAL;
10753 if (cpu_isar_feature(aa64_sve, env_archcpu(cpu_env))
10754 && arg2 >= 0 && arg2 <= 512 * 16 && !(arg2 & 15)) {
10755 CPUARMState *env = cpu_env;
10756 ARMCPU *cpu = env_archcpu(env);
10757 uint32_t vq, old_vq;
10759 old_vq = (env->vfp.zcr_el[1] & 0xf) + 1;
10760 vq = MAX(arg2 / 16, 1);
10761 vq = MIN(vq, cpu->sve_max_vq);
10764 aarch64_sve_narrow_vq(env, vq);
10766 env->vfp.zcr_el[1] = vq - 1;
10767 arm_rebuild_hflags(env);
10771 case TARGET_PR_SVE_GET_VL:
10772 ret = -TARGET_EINVAL;
10774 ARMCPU *cpu = env_archcpu(cpu_env);
10775 if (cpu_isar_feature(aa64_sve, cpu)) {
10776 ret = ((cpu->env.vfp.zcr_el[1] & 0xf) + 1) * 16;
10780 case TARGET_PR_PAC_RESET_KEYS:
10782 CPUARMState *env = cpu_env;
10783 ARMCPU *cpu = env_archcpu(env);
10785 if (arg3 || arg4 || arg5) {
10786 return -TARGET_EINVAL;
10788 if (cpu_isar_feature(aa64_pauth, cpu)) {
10789 int all = (TARGET_PR_PAC_APIAKEY | TARGET_PR_PAC_APIBKEY |
10790 TARGET_PR_PAC_APDAKEY | TARGET_PR_PAC_APDBKEY |
10791 TARGET_PR_PAC_APGAKEY);
10797 } else if (arg2 & ~all) {
10798 return -TARGET_EINVAL;
10800 if (arg2 & TARGET_PR_PAC_APIAKEY) {
10801 ret |= qemu_guest_getrandom(&env->keys.apia,
10802 sizeof(ARMPACKey), &err);
10804 if (arg2 & TARGET_PR_PAC_APIBKEY) {
10805 ret |= qemu_guest_getrandom(&env->keys.apib,
10806 sizeof(ARMPACKey), &err);
10808 if (arg2 & TARGET_PR_PAC_APDAKEY) {
10809 ret |= qemu_guest_getrandom(&env->keys.apda,
10810 sizeof(ARMPACKey), &err);
10812 if (arg2 & TARGET_PR_PAC_APDBKEY) {
10813 ret |= qemu_guest_getrandom(&env->keys.apdb,
10814 sizeof(ARMPACKey), &err);
10816 if (arg2 & TARGET_PR_PAC_APGAKEY) {
10817 ret |= qemu_guest_getrandom(&env->keys.apga,
10818 sizeof(ARMPACKey), &err);
10822 * Some unknown failure in the crypto. The best
10823 * we can do is log it and fail the syscall.
10824 * The real syscall cannot fail this way.
10826 qemu_log_mask(LOG_UNIMP,
10827 "PR_PAC_RESET_KEYS: Crypto failure: %s",
10828 error_get_pretty(err));
10830 return -TARGET_EIO;
10835 return -TARGET_EINVAL;
10836 case TARGET_PR_SET_TAGGED_ADDR_CTRL:
10838 abi_ulong valid_mask = TARGET_PR_TAGGED_ADDR_ENABLE;
10839 CPUARMState *env = cpu_env;
10840 ARMCPU *cpu = env_archcpu(env);
10842 if (cpu_isar_feature(aa64_mte, cpu)) {
10843 valid_mask |= TARGET_PR_MTE_TCF_MASK;
10844 valid_mask |= TARGET_PR_MTE_TAG_MASK;
10847 if ((arg2 & ~valid_mask) || arg3 || arg4 || arg5) {
10848 return -TARGET_EINVAL;
10850 env->tagged_addr_enable = arg2 & TARGET_PR_TAGGED_ADDR_ENABLE;
10852 if (cpu_isar_feature(aa64_mte, cpu)) {
10853 switch (arg2 & TARGET_PR_MTE_TCF_MASK) {
10854 case TARGET_PR_MTE_TCF_NONE:
10855 case TARGET_PR_MTE_TCF_SYNC:
10856 case TARGET_PR_MTE_TCF_ASYNC:
10863 * Write PR_MTE_TCF to SCTLR_EL1[TCF0].
10864 * Note that the syscall values are consistent with hw.
10866 env->cp15.sctlr_el[1] =
10867 deposit64(env->cp15.sctlr_el[1], 38, 2,
10868 arg2 >> TARGET_PR_MTE_TCF_SHIFT);
10871 * Write PR_MTE_TAG to GCR_EL1[Exclude].
10872 * Note that the syscall uses an include mask,
10873 * and hardware uses an exclude mask -- invert.
10875 env->cp15.gcr_el1 =
10876 deposit64(env->cp15.gcr_el1, 0, 16,
10877 ~arg2 >> TARGET_PR_MTE_TAG_SHIFT);
10878 arm_rebuild_hflags(env);
10882 case TARGET_PR_GET_TAGGED_ADDR_CTRL:
10885 CPUARMState *env = cpu_env;
10886 ARMCPU *cpu = env_archcpu(env);
10888 if (arg2 || arg3 || arg4 || arg5) {
10889 return -TARGET_EINVAL;
10891 if (env->tagged_addr_enable) {
10892 ret |= TARGET_PR_TAGGED_ADDR_ENABLE;
10894 if (cpu_isar_feature(aa64_mte, cpu)) {
10896 ret |= (extract64(env->cp15.sctlr_el[1], 38, 2)
10897 << TARGET_PR_MTE_TCF_SHIFT);
10898 ret = deposit64(ret, TARGET_PR_MTE_TAG_SHIFT, 16,
10899 ~env->cp15.gcr_el1);
10903 #endif /* AARCH64 */
10904 case PR_GET_SECCOMP:
10905 case PR_SET_SECCOMP:
10906 /* Disable seccomp to prevent the target disabling syscalls we
10908 return -TARGET_EINVAL;
10910 /* Most prctl options have no pointer arguments */
10911 return get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
10914 #ifdef TARGET_NR_arch_prctl
10915 case TARGET_NR_arch_prctl:
10916 return do_arch_prctl(cpu_env, arg1, arg2);
10918 #ifdef TARGET_NR_pread64
10919 case TARGET_NR_pread64:
10920 if (regpairs_aligned(cpu_env, num)) {
10924 if (arg2 == 0 && arg3 == 0) {
10925 /* Special-case NULL buffer and zero length, which should succeed */
10928 p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10930 return -TARGET_EFAULT;
10933 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
10934 unlock_user(p, arg2, ret);
10936 case TARGET_NR_pwrite64:
10937 if (regpairs_aligned(cpu_env, num)) {
10941 if (arg2 == 0 && arg3 == 0) {
10942 /* Special-case NULL buffer and zero length, which should succeed */
10945 p = lock_user(VERIFY_READ, arg2, arg3, 1);
10947 return -TARGET_EFAULT;
10950 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
10951 unlock_user(p, arg2, 0);
10954 case TARGET_NR_getcwd:
10955 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
10956 return -TARGET_EFAULT;
10957 ret = get_errno(sys_getcwd1(p, arg2));
10958 unlock_user(p, arg1, ret);
10960 case TARGET_NR_capget:
10961 case TARGET_NR_capset:
10963 struct target_user_cap_header *target_header;
10964 struct target_user_cap_data *target_data = NULL;
10965 struct __user_cap_header_struct header;
10966 struct __user_cap_data_struct data[2];
10967 struct __user_cap_data_struct *dataptr = NULL;
10968 int i, target_datalen;
10969 int data_items = 1;
10971 if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
10972 return -TARGET_EFAULT;
10974 header.version = tswap32(target_header->version);
10975 header.pid = tswap32(target_header->pid);
10977 if (header.version != _LINUX_CAPABILITY_VERSION) {
10978 /* Version 2 and up takes pointer to two user_data structs */
10982 target_datalen = sizeof(*target_data) * data_items;
10985 if (num == TARGET_NR_capget) {
10986 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
10988 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
10990 if (!target_data) {
10991 unlock_user_struct(target_header, arg1, 0);
10992 return -TARGET_EFAULT;
10995 if (num == TARGET_NR_capset) {
10996 for (i = 0; i < data_items; i++) {
10997 data[i].effective = tswap32(target_data[i].effective);
10998 data[i].permitted = tswap32(target_data[i].permitted);
10999 data[i].inheritable = tswap32(target_data[i].inheritable);
11006 if (num == TARGET_NR_capget) {
11007 ret = get_errno(capget(&header, dataptr));
11009 ret = get_errno(capset(&header, dataptr));
11012 /* The kernel always updates version for both capget and capset */
11013 target_header->version = tswap32(header.version);
11014 unlock_user_struct(target_header, arg1, 1);
11017 if (num == TARGET_NR_capget) {
11018 for (i = 0; i < data_items; i++) {
11019 target_data[i].effective = tswap32(data[i].effective);
11020 target_data[i].permitted = tswap32(data[i].permitted);
11021 target_data[i].inheritable = tswap32(data[i].inheritable);
11023 unlock_user(target_data, arg2, target_datalen);
11025 unlock_user(target_data, arg2, 0);
11030 case TARGET_NR_sigaltstack:
11031 return do_sigaltstack(arg1, arg2, cpu_env);
11033 #ifdef CONFIG_SENDFILE
11034 #ifdef TARGET_NR_sendfile
11035 case TARGET_NR_sendfile:
11037 off_t *offp = NULL;
11040 ret = get_user_sal(off, arg3);
11041 if (is_error(ret)) {
11046 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11047 if (!is_error(ret) && arg3) {
11048 abi_long ret2 = put_user_sal(off, arg3);
11049 if (is_error(ret2)) {
11056 #ifdef TARGET_NR_sendfile64
11057 case TARGET_NR_sendfile64:
11059 off_t *offp = NULL;
11062 ret = get_user_s64(off, arg3);
11063 if (is_error(ret)) {
11068 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11069 if (!is_error(ret) && arg3) {
11070 abi_long ret2 = put_user_s64(off, arg3);
11071 if (is_error(ret2)) {
11079 #ifdef TARGET_NR_vfork
11080 case TARGET_NR_vfork:
11081 return get_errno(do_fork(cpu_env,
11082 CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
11085 #ifdef TARGET_NR_ugetrlimit
11086 case TARGET_NR_ugetrlimit:
11088 struct rlimit rlim;
11089 int resource = target_to_host_resource(arg1);
11090 ret = get_errno(getrlimit(resource, &rlim));
11091 if (!is_error(ret)) {
11092 struct target_rlimit *target_rlim;
11093 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
11094 return -TARGET_EFAULT;
11095 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
11096 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
11097 unlock_user_struct(target_rlim, arg2, 1);
11102 #ifdef TARGET_NR_truncate64
11103 case TARGET_NR_truncate64:
11104 if (!(p = lock_user_string(arg1)))
11105 return -TARGET_EFAULT;
11106 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
11107 unlock_user(p, arg1, 0);
11110 #ifdef TARGET_NR_ftruncate64
11111 case TARGET_NR_ftruncate64:
11112 return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
11114 #ifdef TARGET_NR_stat64
11115 case TARGET_NR_stat64:
11116 if (!(p = lock_user_string(arg1))) {
11117 return -TARGET_EFAULT;
11119 ret = get_errno(stat(path(p), &st));
11120 unlock_user(p, arg1, 0);
11121 if (!is_error(ret))
11122 ret = host_to_target_stat64(cpu_env, arg2, &st);
11125 #ifdef TARGET_NR_lstat64
11126 case TARGET_NR_lstat64:
11127 if (!(p = lock_user_string(arg1))) {
11128 return -TARGET_EFAULT;
11130 ret = get_errno(lstat(path(p), &st));
11131 unlock_user(p, arg1, 0);
11132 if (!is_error(ret))
11133 ret = host_to_target_stat64(cpu_env, arg2, &st);
11136 #ifdef TARGET_NR_fstat64
11137 case TARGET_NR_fstat64:
11138 ret = get_errno(fstat(arg1, &st));
11139 if (!is_error(ret))
11140 ret = host_to_target_stat64(cpu_env, arg2, &st);
11143 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
11144 #ifdef TARGET_NR_fstatat64
11145 case TARGET_NR_fstatat64:
11147 #ifdef TARGET_NR_newfstatat
11148 case TARGET_NR_newfstatat:
11150 if (!(p = lock_user_string(arg2))) {
11151 return -TARGET_EFAULT;
11153 ret = get_errno(fstatat(arg1, path(p), &st, arg4));
11154 unlock_user(p, arg2, 0);
11155 if (!is_error(ret))
11156 ret = host_to_target_stat64(cpu_env, arg3, &st);
11159 #if defined(TARGET_NR_statx)
11160 case TARGET_NR_statx:
11162 struct target_statx *target_stx;
11166 p = lock_user_string(arg2);
11168 return -TARGET_EFAULT;
11170 #if defined(__NR_statx)
11173 * It is assumed that struct statx is architecture independent.
11175 struct target_statx host_stx;
11178 ret = get_errno(sys_statx(dirfd, p, flags, mask, &host_stx));
11179 if (!is_error(ret)) {
11180 if (host_to_target_statx(&host_stx, arg5) != 0) {
11181 unlock_user(p, arg2, 0);
11182 return -TARGET_EFAULT;
11186 if (ret != -TARGET_ENOSYS) {
11187 unlock_user(p, arg2, 0);
11192 ret = get_errno(fstatat(dirfd, path(p), &st, flags));
11193 unlock_user(p, arg2, 0);
11195 if (!is_error(ret)) {
11196 if (!lock_user_struct(VERIFY_WRITE, target_stx, arg5, 0)) {
11197 return -TARGET_EFAULT;
11199 memset(target_stx, 0, sizeof(*target_stx));
11200 __put_user(major(st.st_dev), &target_stx->stx_dev_major);
11201 __put_user(minor(st.st_dev), &target_stx->stx_dev_minor);
11202 __put_user(st.st_ino, &target_stx->stx_ino);
11203 __put_user(st.st_mode, &target_stx->stx_mode);
11204 __put_user(st.st_uid, &target_stx->stx_uid);
11205 __put_user(st.st_gid, &target_stx->stx_gid);
11206 __put_user(st.st_nlink, &target_stx->stx_nlink);
11207 __put_user(major(st.st_rdev), &target_stx->stx_rdev_major);
11208 __put_user(minor(st.st_rdev), &target_stx->stx_rdev_minor);
11209 __put_user(st.st_size, &target_stx->stx_size);
11210 __put_user(st.st_blksize, &target_stx->stx_blksize);
11211 __put_user(st.st_blocks, &target_stx->stx_blocks);
11212 __put_user(st.st_atime, &target_stx->stx_atime.tv_sec);
11213 __put_user(st.st_mtime, &target_stx->stx_mtime.tv_sec);
11214 __put_user(st.st_ctime, &target_stx->stx_ctime.tv_sec);
11215 unlock_user_struct(target_stx, arg5, 1);
11220 #ifdef TARGET_NR_lchown
11221 case TARGET_NR_lchown:
11222 if (!(p = lock_user_string(arg1)))
11223 return -TARGET_EFAULT;
11224 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
11225 unlock_user(p, arg1, 0);
11228 #ifdef TARGET_NR_getuid
11229 case TARGET_NR_getuid:
11230 return get_errno(high2lowuid(getuid()));
11232 #ifdef TARGET_NR_getgid
11233 case TARGET_NR_getgid:
11234 return get_errno(high2lowgid(getgid()));
11236 #ifdef TARGET_NR_geteuid
11237 case TARGET_NR_geteuid:
11238 return get_errno(high2lowuid(geteuid()));
11240 #ifdef TARGET_NR_getegid
11241 case TARGET_NR_getegid:
11242 return get_errno(high2lowgid(getegid()));
11244 case TARGET_NR_setreuid:
11245 return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
11246 case TARGET_NR_setregid:
11247 return get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
11248 case TARGET_NR_getgroups:
11250 int gidsetsize = arg1;
11251 target_id *target_grouplist;
11255 grouplist = alloca(gidsetsize * sizeof(gid_t));
11256 ret = get_errno(getgroups(gidsetsize, grouplist));
11257 if (gidsetsize == 0)
11259 if (!is_error(ret)) {
11260 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
11261 if (!target_grouplist)
11262 return -TARGET_EFAULT;
11263 for(i = 0;i < ret; i++)
11264 target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
11265 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
11269 case TARGET_NR_setgroups:
11271 int gidsetsize = arg1;
11272 target_id *target_grouplist;
11273 gid_t *grouplist = NULL;
11276 grouplist = alloca(gidsetsize * sizeof(gid_t));
11277 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
11278 if (!target_grouplist) {
11279 return -TARGET_EFAULT;
11281 for (i = 0; i < gidsetsize; i++) {
11282 grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
11284 unlock_user(target_grouplist, arg2, 0);
11286 return get_errno(setgroups(gidsetsize, grouplist));
11288 case TARGET_NR_fchown:
11289 return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
11290 #if defined(TARGET_NR_fchownat)
11291 case TARGET_NR_fchownat:
11292 if (!(p = lock_user_string(arg2)))
11293 return -TARGET_EFAULT;
11294 ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
11295 low2highgid(arg4), arg5));
11296 unlock_user(p, arg2, 0);
11299 #ifdef TARGET_NR_setresuid
11300 case TARGET_NR_setresuid:
11301 return get_errno(sys_setresuid(low2highuid(arg1),
11303 low2highuid(arg3)));
11305 #ifdef TARGET_NR_getresuid
11306 case TARGET_NR_getresuid:
11308 uid_t ruid, euid, suid;
11309 ret = get_errno(getresuid(&ruid, &euid, &suid));
11310 if (!is_error(ret)) {
11311 if (put_user_id(high2lowuid(ruid), arg1)
11312 || put_user_id(high2lowuid(euid), arg2)
11313 || put_user_id(high2lowuid(suid), arg3))
11314 return -TARGET_EFAULT;
11319 #ifdef TARGET_NR_getresgid
11320 case TARGET_NR_setresgid:
11321 return get_errno(sys_setresgid(low2highgid(arg1),
11323 low2highgid(arg3)));
11325 #ifdef TARGET_NR_getresgid
11326 case TARGET_NR_getresgid:
11328 gid_t rgid, egid, sgid;
11329 ret = get_errno(getresgid(&rgid, &egid, &sgid));
11330 if (!is_error(ret)) {
11331 if (put_user_id(high2lowgid(rgid), arg1)
11332 || put_user_id(high2lowgid(egid), arg2)
11333 || put_user_id(high2lowgid(sgid), arg3))
11334 return -TARGET_EFAULT;
11339 #ifdef TARGET_NR_chown
11340 case TARGET_NR_chown:
11341 if (!(p = lock_user_string(arg1)))
11342 return -TARGET_EFAULT;
11343 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
11344 unlock_user(p, arg1, 0);
11347 case TARGET_NR_setuid:
11348 return get_errno(sys_setuid(low2highuid(arg1)));
11349 case TARGET_NR_setgid:
11350 return get_errno(sys_setgid(low2highgid(arg1)));
11351 case TARGET_NR_setfsuid:
11352 return get_errno(setfsuid(arg1));
11353 case TARGET_NR_setfsgid:
11354 return get_errno(setfsgid(arg1));
11356 #ifdef TARGET_NR_lchown32
11357 case TARGET_NR_lchown32:
11358 if (!(p = lock_user_string(arg1)))
11359 return -TARGET_EFAULT;
11360 ret = get_errno(lchown(p, arg2, arg3));
11361 unlock_user(p, arg1, 0);
11364 #ifdef TARGET_NR_getuid32
11365 case TARGET_NR_getuid32:
11366 return get_errno(getuid());
11369 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
11370 /* Alpha specific */
11371 case TARGET_NR_getxuid:
11375 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
11377 return get_errno(getuid());
11379 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
11380 /* Alpha specific */
11381 case TARGET_NR_getxgid:
11385 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
11387 return get_errno(getgid());
11389 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
11390 /* Alpha specific */
11391 case TARGET_NR_osf_getsysinfo:
11392 ret = -TARGET_EOPNOTSUPP;
11394 case TARGET_GSI_IEEE_FP_CONTROL:
11396 uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env);
11397 uint64_t swcr = ((CPUAlphaState *)cpu_env)->swcr;
11399 swcr &= ~SWCR_STATUS_MASK;
11400 swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
11402 if (put_user_u64 (swcr, arg2))
11403 return -TARGET_EFAULT;
11408 /* case GSI_IEEE_STATE_AT_SIGNAL:
11409 -- Not implemented in linux kernel.
11411 -- Retrieves current unaligned access state; not much used.
11412 case GSI_PROC_TYPE:
11413 -- Retrieves implver information; surely not used.
11414 case GSI_GET_HWRPB:
11415 -- Grabs a copy of the HWRPB; surely not used.
11420 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
11421 /* Alpha specific */
11422 case TARGET_NR_osf_setsysinfo:
11423 ret = -TARGET_EOPNOTSUPP;
11425 case TARGET_SSI_IEEE_FP_CONTROL:
11427 uint64_t swcr, fpcr;
11429 if (get_user_u64 (swcr, arg2)) {
11430 return -TARGET_EFAULT;
11434 * The kernel calls swcr_update_status to update the
11435 * status bits from the fpcr at every point that it
11436 * could be queried. Therefore, we store the status
11437 * bits only in FPCR.
11439 ((CPUAlphaState *)cpu_env)->swcr
11440 = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK);
11442 fpcr = cpu_alpha_load_fpcr(cpu_env);
11443 fpcr &= ((uint64_t)FPCR_DYN_MASK << 32);
11444 fpcr |= alpha_ieee_swcr_to_fpcr(swcr);
11445 cpu_alpha_store_fpcr(cpu_env, fpcr);
11450 case TARGET_SSI_IEEE_RAISE_EXCEPTION:
11452 uint64_t exc, fpcr, fex;
11454 if (get_user_u64(exc, arg2)) {
11455 return -TARGET_EFAULT;
11457 exc &= SWCR_STATUS_MASK;
11458 fpcr = cpu_alpha_load_fpcr(cpu_env);
11460 /* Old exceptions are not signaled. */
11461 fex = alpha_ieee_fpcr_to_swcr(fpcr);
11463 fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT;
11464 fex &= ((CPUArchState *)cpu_env)->swcr;
11466 /* Update the hardware fpcr. */
11467 fpcr |= alpha_ieee_swcr_to_fpcr(exc);
11468 cpu_alpha_store_fpcr(cpu_env, fpcr);
11471 int si_code = TARGET_FPE_FLTUNK;
11472 target_siginfo_t info;
11474 if (fex & SWCR_TRAP_ENABLE_DNO) {
11475 si_code = TARGET_FPE_FLTUND;
11477 if (fex & SWCR_TRAP_ENABLE_INE) {
11478 si_code = TARGET_FPE_FLTRES;
11480 if (fex & SWCR_TRAP_ENABLE_UNF) {
11481 si_code = TARGET_FPE_FLTUND;
11483 if (fex & SWCR_TRAP_ENABLE_OVF) {
11484 si_code = TARGET_FPE_FLTOVF;
11486 if (fex & SWCR_TRAP_ENABLE_DZE) {
11487 si_code = TARGET_FPE_FLTDIV;
11489 if (fex & SWCR_TRAP_ENABLE_INV) {
11490 si_code = TARGET_FPE_FLTINV;
11493 info.si_signo = SIGFPE;
11495 info.si_code = si_code;
11496 info._sifields._sigfault._addr
11497 = ((CPUArchState *)cpu_env)->pc;
11498 queue_signal((CPUArchState *)cpu_env, info.si_signo,
11499 QEMU_SI_FAULT, &info);
11505 /* case SSI_NVPAIRS:
11506 -- Used with SSIN_UACPROC to enable unaligned accesses.
11507 case SSI_IEEE_STATE_AT_SIGNAL:
11508 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
11509 -- Not implemented in linux kernel
11514 #ifdef TARGET_NR_osf_sigprocmask
11515 /* Alpha specific. */
11516 case TARGET_NR_osf_sigprocmask:
11520 sigset_t set, oldset;
11523 case TARGET_SIG_BLOCK:
11526 case TARGET_SIG_UNBLOCK:
11529 case TARGET_SIG_SETMASK:
11533 return -TARGET_EINVAL;
11536 target_to_host_old_sigset(&set, &mask);
11537 ret = do_sigprocmask(how, &set, &oldset);
11539 host_to_target_old_sigset(&mask, &oldset);
11546 #ifdef TARGET_NR_getgid32
11547 case TARGET_NR_getgid32:
11548 return get_errno(getgid());
11550 #ifdef TARGET_NR_geteuid32
11551 case TARGET_NR_geteuid32:
11552 return get_errno(geteuid());
11554 #ifdef TARGET_NR_getegid32
11555 case TARGET_NR_getegid32:
11556 return get_errno(getegid());
11558 #ifdef TARGET_NR_setreuid32
11559 case TARGET_NR_setreuid32:
11560 return get_errno(setreuid(arg1, arg2));
11562 #ifdef TARGET_NR_setregid32
11563 case TARGET_NR_setregid32:
11564 return get_errno(setregid(arg1, arg2));
11566 #ifdef TARGET_NR_getgroups32
11567 case TARGET_NR_getgroups32:
11569 int gidsetsize = arg1;
11570 uint32_t *target_grouplist;
11574 grouplist = alloca(gidsetsize * sizeof(gid_t));
11575 ret = get_errno(getgroups(gidsetsize, grouplist));
11576 if (gidsetsize == 0)
11578 if (!is_error(ret)) {
11579 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
11580 if (!target_grouplist) {
11581 return -TARGET_EFAULT;
11583 for(i = 0;i < ret; i++)
11584 target_grouplist[i] = tswap32(grouplist[i]);
11585 unlock_user(target_grouplist, arg2, gidsetsize * 4);
11590 #ifdef TARGET_NR_setgroups32
11591 case TARGET_NR_setgroups32:
11593 int gidsetsize = arg1;
11594 uint32_t *target_grouplist;
11598 grouplist = alloca(gidsetsize * sizeof(gid_t));
11599 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
11600 if (!target_grouplist) {
11601 return -TARGET_EFAULT;
11603 for(i = 0;i < gidsetsize; i++)
11604 grouplist[i] = tswap32(target_grouplist[i]);
11605 unlock_user(target_grouplist, arg2, 0);
11606 return get_errno(setgroups(gidsetsize, grouplist));
11609 #ifdef TARGET_NR_fchown32
11610 case TARGET_NR_fchown32:
11611 return get_errno(fchown(arg1, arg2, arg3));
11613 #ifdef TARGET_NR_setresuid32
11614 case TARGET_NR_setresuid32:
11615 return get_errno(sys_setresuid(arg1, arg2, arg3));
11617 #ifdef TARGET_NR_getresuid32
11618 case TARGET_NR_getresuid32:
11620 uid_t ruid, euid, suid;
11621 ret = get_errno(getresuid(&ruid, &euid, &suid));
11622 if (!is_error(ret)) {
11623 if (put_user_u32(ruid, arg1)
11624 || put_user_u32(euid, arg2)
11625 || put_user_u32(suid, arg3))
11626 return -TARGET_EFAULT;
11631 #ifdef TARGET_NR_setresgid32
11632 case TARGET_NR_setresgid32:
11633 return get_errno(sys_setresgid(arg1, arg2, arg3));
11635 #ifdef TARGET_NR_getresgid32
11636 case TARGET_NR_getresgid32:
11638 gid_t rgid, egid, sgid;
11639 ret = get_errno(getresgid(&rgid, &egid, &sgid));
11640 if (!is_error(ret)) {
11641 if (put_user_u32(rgid, arg1)
11642 || put_user_u32(egid, arg2)
11643 || put_user_u32(sgid, arg3))
11644 return -TARGET_EFAULT;
11649 #ifdef TARGET_NR_chown32
11650 case TARGET_NR_chown32:
11651 if (!(p = lock_user_string(arg1)))
11652 return -TARGET_EFAULT;
11653 ret = get_errno(chown(p, arg2, arg3));
11654 unlock_user(p, arg1, 0);
11657 #ifdef TARGET_NR_setuid32
11658 case TARGET_NR_setuid32:
11659 return get_errno(sys_setuid(arg1));
11661 #ifdef TARGET_NR_setgid32
11662 case TARGET_NR_setgid32:
11663 return get_errno(sys_setgid(arg1));
11665 #ifdef TARGET_NR_setfsuid32
11666 case TARGET_NR_setfsuid32:
11667 return get_errno(setfsuid(arg1));
11669 #ifdef TARGET_NR_setfsgid32
11670 case TARGET_NR_setfsgid32:
11671 return get_errno(setfsgid(arg1));
11673 #ifdef TARGET_NR_mincore
11674 case TARGET_NR_mincore:
11676 void *a = lock_user(VERIFY_READ, arg1, arg2, 0);
11678 return -TARGET_ENOMEM;
11680 p = lock_user_string(arg3);
11682 ret = -TARGET_EFAULT;
11684 ret = get_errno(mincore(a, arg2, p));
11685 unlock_user(p, arg3, ret);
11687 unlock_user(a, arg1, 0);
11691 #ifdef TARGET_NR_arm_fadvise64_64
11692 case TARGET_NR_arm_fadvise64_64:
11693 /* arm_fadvise64_64 looks like fadvise64_64 but
11694 * with different argument order: fd, advice, offset, len
11695 * rather than the usual fd, offset, len, advice.
11696 * Note that offset and len are both 64-bit so appear as
11697 * pairs of 32-bit registers.
11699 ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
11700 target_offset64(arg5, arg6), arg2);
11701 return -host_to_target_errno(ret);
11704 #if TARGET_ABI_BITS == 32
11706 #ifdef TARGET_NR_fadvise64_64
11707 case TARGET_NR_fadvise64_64:
11708 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
11709 /* 6 args: fd, advice, offset (high, low), len (high, low) */
11717 /* 6 args: fd, offset (high, low), len (high, low), advice */
11718 if (regpairs_aligned(cpu_env, num)) {
11719 /* offset is in (3,4), len in (5,6) and advice in 7 */
11727 ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
11728 target_offset64(arg4, arg5), arg6);
11729 return -host_to_target_errno(ret);
11732 #ifdef TARGET_NR_fadvise64
11733 case TARGET_NR_fadvise64:
11734 /* 5 args: fd, offset (high, low), len, advice */
11735 if (regpairs_aligned(cpu_env, num)) {
11736 /* offset is in (3,4), len in 5 and advice in 6 */
11742 ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
11743 return -host_to_target_errno(ret);
11746 #else /* not a 32-bit ABI */
11747 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11748 #ifdef TARGET_NR_fadvise64_64
11749 case TARGET_NR_fadvise64_64:
11751 #ifdef TARGET_NR_fadvise64
11752 case TARGET_NR_fadvise64:
11754 #ifdef TARGET_S390X
11756 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
11757 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
11758 case 6: arg4 = POSIX_FADV_DONTNEED; break;
11759 case 7: arg4 = POSIX_FADV_NOREUSE; break;
11763 return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
11765 #endif /* end of 64-bit ABI fadvise handling */
11767 #ifdef TARGET_NR_madvise
11768 case TARGET_NR_madvise:
11769 /* A straight passthrough may not be safe because qemu sometimes
11770 turns private file-backed mappings into anonymous mappings.
11771 This will break MADV_DONTNEED.
11772 This is a hint, so ignoring and returning success is ok. */
11775 #ifdef TARGET_NR_fcntl64
11776 case TARGET_NR_fcntl64:
11780 from_flock64_fn *copyfrom = copy_from_user_flock64;
11781 to_flock64_fn *copyto = copy_to_user_flock64;
11784 if (!((CPUARMState *)cpu_env)->eabi) {
11785 copyfrom = copy_from_user_oabi_flock64;
11786 copyto = copy_to_user_oabi_flock64;
11790 cmd = target_to_host_fcntl_cmd(arg2);
11791 if (cmd == -TARGET_EINVAL) {
11796 case TARGET_F_GETLK64:
11797 ret = copyfrom(&fl, arg3);
11801 ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11803 ret = copyto(arg3, &fl);
11807 case TARGET_F_SETLK64:
11808 case TARGET_F_SETLKW64:
11809 ret = copyfrom(&fl, arg3);
11813 ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11816 ret = do_fcntl(arg1, arg2, arg3);
11822 #ifdef TARGET_NR_cacheflush
11823 case TARGET_NR_cacheflush:
11824 /* self-modifying code is handled automatically, so nothing needed */
11827 #ifdef TARGET_NR_getpagesize
11828 case TARGET_NR_getpagesize:
11829 return TARGET_PAGE_SIZE;
11831 case TARGET_NR_gettid:
11832 return get_errno(sys_gettid());
11833 #ifdef TARGET_NR_readahead
11834 case TARGET_NR_readahead:
11835 #if TARGET_ABI_BITS == 32
11836 if (regpairs_aligned(cpu_env, num)) {
11841 ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
11843 ret = get_errno(readahead(arg1, arg2, arg3));
11848 #ifdef TARGET_NR_setxattr
11849 case TARGET_NR_listxattr:
11850 case TARGET_NR_llistxattr:
11854 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11856 return -TARGET_EFAULT;
11859 p = lock_user_string(arg1);
11861 if (num == TARGET_NR_listxattr) {
11862 ret = get_errno(listxattr(p, b, arg3));
11864 ret = get_errno(llistxattr(p, b, arg3));
11867 ret = -TARGET_EFAULT;
11869 unlock_user(p, arg1, 0);
11870 unlock_user(b, arg2, arg3);
11873 case TARGET_NR_flistxattr:
11877 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11879 return -TARGET_EFAULT;
11882 ret = get_errno(flistxattr(arg1, b, arg3));
11883 unlock_user(b, arg2, arg3);
11886 case TARGET_NR_setxattr:
11887 case TARGET_NR_lsetxattr:
11889 void *p, *n, *v = 0;
11891 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11893 return -TARGET_EFAULT;
11896 p = lock_user_string(arg1);
11897 n = lock_user_string(arg2);
11899 if (num == TARGET_NR_setxattr) {
11900 ret = get_errno(setxattr(p, n, v, arg4, arg5));
11902 ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
11905 ret = -TARGET_EFAULT;
11907 unlock_user(p, arg1, 0);
11908 unlock_user(n, arg2, 0);
11909 unlock_user(v, arg3, 0);
11912 case TARGET_NR_fsetxattr:
11916 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11918 return -TARGET_EFAULT;
11921 n = lock_user_string(arg2);
11923 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
11925 ret = -TARGET_EFAULT;
11927 unlock_user(n, arg2, 0);
11928 unlock_user(v, arg3, 0);
11931 case TARGET_NR_getxattr:
11932 case TARGET_NR_lgetxattr:
11934 void *p, *n, *v = 0;
11936 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11938 return -TARGET_EFAULT;
11941 p = lock_user_string(arg1);
11942 n = lock_user_string(arg2);
11944 if (num == TARGET_NR_getxattr) {
11945 ret = get_errno(getxattr(p, n, v, arg4));
11947 ret = get_errno(lgetxattr(p, n, v, arg4));
11950 ret = -TARGET_EFAULT;
11952 unlock_user(p, arg1, 0);
11953 unlock_user(n, arg2, 0);
11954 unlock_user(v, arg3, arg4);
11957 case TARGET_NR_fgetxattr:
11961 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11963 return -TARGET_EFAULT;
11966 n = lock_user_string(arg2);
11968 ret = get_errno(fgetxattr(arg1, n, v, arg4));
11970 ret = -TARGET_EFAULT;
11972 unlock_user(n, arg2, 0);
11973 unlock_user(v, arg3, arg4);
11976 case TARGET_NR_removexattr:
11977 case TARGET_NR_lremovexattr:
11980 p = lock_user_string(arg1);
11981 n = lock_user_string(arg2);
11983 if (num == TARGET_NR_removexattr) {
11984 ret = get_errno(removexattr(p, n));
11986 ret = get_errno(lremovexattr(p, n));
11989 ret = -TARGET_EFAULT;
11991 unlock_user(p, arg1, 0);
11992 unlock_user(n, arg2, 0);
11995 case TARGET_NR_fremovexattr:
11998 n = lock_user_string(arg2);
12000 ret = get_errno(fremovexattr(arg1, n));
12002 ret = -TARGET_EFAULT;
12004 unlock_user(n, arg2, 0);
12008 #endif /* CONFIG_ATTR */
12009 #ifdef TARGET_NR_set_thread_area
12010 case TARGET_NR_set_thread_area:
12011 #if defined(TARGET_MIPS)
12012 ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
12014 #elif defined(TARGET_CRIS)
12016 ret = -TARGET_EINVAL;
12018 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
12022 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
12023 return do_set_thread_area(cpu_env, arg1);
12024 #elif defined(TARGET_M68K)
12026 TaskState *ts = cpu->opaque;
12027 ts->tp_value = arg1;
12031 return -TARGET_ENOSYS;
12034 #ifdef TARGET_NR_get_thread_area
12035 case TARGET_NR_get_thread_area:
12036 #if defined(TARGET_I386) && defined(TARGET_ABI32)
12037 return do_get_thread_area(cpu_env, arg1);
12038 #elif defined(TARGET_M68K)
12040 TaskState *ts = cpu->opaque;
12041 return ts->tp_value;
12044 return -TARGET_ENOSYS;
12047 #ifdef TARGET_NR_getdomainname
12048 case TARGET_NR_getdomainname:
12049 return -TARGET_ENOSYS;
12052 #ifdef TARGET_NR_clock_settime
12053 case TARGET_NR_clock_settime:
12055 struct timespec ts;
12057 ret = target_to_host_timespec(&ts, arg2);
12058 if (!is_error(ret)) {
12059 ret = get_errno(clock_settime(arg1, &ts));
12064 #ifdef TARGET_NR_clock_settime64
12065 case TARGET_NR_clock_settime64:
12067 struct timespec ts;
12069 ret = target_to_host_timespec64(&ts, arg2);
12070 if (!is_error(ret)) {
12071 ret = get_errno(clock_settime(arg1, &ts));
12076 #ifdef TARGET_NR_clock_gettime
12077 case TARGET_NR_clock_gettime:
12079 struct timespec ts;
12080 ret = get_errno(clock_gettime(arg1, &ts));
12081 if (!is_error(ret)) {
12082 ret = host_to_target_timespec(arg2, &ts);
12087 #ifdef TARGET_NR_clock_gettime64
12088 case TARGET_NR_clock_gettime64:
12090 struct timespec ts;
12091 ret = get_errno(clock_gettime(arg1, &ts));
12092 if (!is_error(ret)) {
12093 ret = host_to_target_timespec64(arg2, &ts);
12098 #ifdef TARGET_NR_clock_getres
12099 case TARGET_NR_clock_getres:
12101 struct timespec ts;
12102 ret = get_errno(clock_getres(arg1, &ts));
12103 if (!is_error(ret)) {
12104 host_to_target_timespec(arg2, &ts);
12109 #ifdef TARGET_NR_clock_getres_time64
12110 case TARGET_NR_clock_getres_time64:
12112 struct timespec ts;
12113 ret = get_errno(clock_getres(arg1, &ts));
12114 if (!is_error(ret)) {
12115 host_to_target_timespec64(arg2, &ts);
12120 #ifdef TARGET_NR_clock_nanosleep
12121 case TARGET_NR_clock_nanosleep:
12123 struct timespec ts;
12124 if (target_to_host_timespec(&ts, arg3)) {
12125 return -TARGET_EFAULT;
12127 ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12128 &ts, arg4 ? &ts : NULL));
12130 * if the call is interrupted by a signal handler, it fails
12131 * with error -TARGET_EINTR and if arg4 is not NULL and arg2 is not
12132 * TIMER_ABSTIME, it returns the remaining unslept time in arg4.
12134 if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12135 host_to_target_timespec(arg4, &ts)) {
12136 return -TARGET_EFAULT;
12142 #ifdef TARGET_NR_clock_nanosleep_time64
12143 case TARGET_NR_clock_nanosleep_time64:
12145 struct timespec ts;
12147 if (target_to_host_timespec64(&ts, arg3)) {
12148 return -TARGET_EFAULT;
12151 ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12152 &ts, arg4 ? &ts : NULL));
12154 if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12155 host_to_target_timespec64(arg4, &ts)) {
12156 return -TARGET_EFAULT;
12162 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
12163 case TARGET_NR_set_tid_address:
12164 return get_errno(set_tid_address((int *)g2h(cpu, arg1)));
12167 case TARGET_NR_tkill:
12168 return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
12170 case TARGET_NR_tgkill:
12171 return get_errno(safe_tgkill((int)arg1, (int)arg2,
12172 target_to_host_signal(arg3)));
12174 #ifdef TARGET_NR_set_robust_list
12175 case TARGET_NR_set_robust_list:
12176 case TARGET_NR_get_robust_list:
12177 /* The ABI for supporting robust futexes has userspace pass
12178 * the kernel a pointer to a linked list which is updated by
12179 * userspace after the syscall; the list is walked by the kernel
12180 * when the thread exits. Since the linked list in QEMU guest
12181 * memory isn't a valid linked list for the host and we have
12182 * no way to reliably intercept the thread-death event, we can't
12183 * support these. Silently return ENOSYS so that guest userspace
12184 * falls back to a non-robust futex implementation (which should
12185 * be OK except in the corner case of the guest crashing while
12186 * holding a mutex that is shared with another process via
12189 return -TARGET_ENOSYS;
12192 #if defined(TARGET_NR_utimensat)
12193 case TARGET_NR_utimensat:
12195 struct timespec *tsp, ts[2];
12199 if (target_to_host_timespec(ts, arg3)) {
12200 return -TARGET_EFAULT;
12202 if (target_to_host_timespec(ts + 1, arg3 +
12203 sizeof(struct target_timespec))) {
12204 return -TARGET_EFAULT;
12209 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12211 if (!(p = lock_user_string(arg2))) {
12212 return -TARGET_EFAULT;
12214 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12215 unlock_user(p, arg2, 0);
12220 #ifdef TARGET_NR_utimensat_time64
12221 case TARGET_NR_utimensat_time64:
12223 struct timespec *tsp, ts[2];
12227 if (target_to_host_timespec64(ts, arg3)) {
12228 return -TARGET_EFAULT;
12230 if (target_to_host_timespec64(ts + 1, arg3 +
12231 sizeof(struct target__kernel_timespec))) {
12232 return -TARGET_EFAULT;
12237 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12239 p = lock_user_string(arg2);
12241 return -TARGET_EFAULT;
12243 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12244 unlock_user(p, arg2, 0);
12249 #ifdef TARGET_NR_futex
12250 case TARGET_NR_futex:
12251 return do_futex(cpu, arg1, arg2, arg3, arg4, arg5, arg6);
12253 #ifdef TARGET_NR_futex_time64
12254 case TARGET_NR_futex_time64:
12255 return do_futex_time64(cpu, arg1, arg2, arg3, arg4, arg5, arg6);
12257 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
12258 case TARGET_NR_inotify_init:
12259 ret = get_errno(sys_inotify_init());
12261 fd_trans_register(ret, &target_inotify_trans);
12265 #ifdef CONFIG_INOTIFY1
12266 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
12267 case TARGET_NR_inotify_init1:
12268 ret = get_errno(sys_inotify_init1(target_to_host_bitmask(arg1,
12269 fcntl_flags_tbl)));
12271 fd_trans_register(ret, &target_inotify_trans);
12276 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
12277 case TARGET_NR_inotify_add_watch:
12278 p = lock_user_string(arg2);
12279 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
12280 unlock_user(p, arg2, 0);
12283 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
12284 case TARGET_NR_inotify_rm_watch:
12285 return get_errno(sys_inotify_rm_watch(arg1, arg2));
12288 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
12289 case TARGET_NR_mq_open:
12291 struct mq_attr posix_mq_attr;
12292 struct mq_attr *pposix_mq_attr;
12295 host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
12296 pposix_mq_attr = NULL;
12298 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
12299 return -TARGET_EFAULT;
12301 pposix_mq_attr = &posix_mq_attr;
12303 p = lock_user_string(arg1 - 1);
12305 return -TARGET_EFAULT;
12307 ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
12308 unlock_user (p, arg1, 0);
12312 case TARGET_NR_mq_unlink:
12313 p = lock_user_string(arg1 - 1);
12315 return -TARGET_EFAULT;
12317 ret = get_errno(mq_unlink(p));
12318 unlock_user (p, arg1, 0);
12321 #ifdef TARGET_NR_mq_timedsend
12322 case TARGET_NR_mq_timedsend:
12324 struct timespec ts;
12326 p = lock_user (VERIFY_READ, arg2, arg3, 1);
12328 if (target_to_host_timespec(&ts, arg5)) {
12329 return -TARGET_EFAULT;
12331 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12332 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
12333 return -TARGET_EFAULT;
12336 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12338 unlock_user (p, arg2, arg3);
12342 #ifdef TARGET_NR_mq_timedsend_time64
12343 case TARGET_NR_mq_timedsend_time64:
12345 struct timespec ts;
12347 p = lock_user(VERIFY_READ, arg2, arg3, 1);
12349 if (target_to_host_timespec64(&ts, arg5)) {
12350 return -TARGET_EFAULT;
12352 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12353 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
12354 return -TARGET_EFAULT;
12357 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12359 unlock_user(p, arg2, arg3);
12364 #ifdef TARGET_NR_mq_timedreceive
12365 case TARGET_NR_mq_timedreceive:
12367 struct timespec ts;
12370 p = lock_user (VERIFY_READ, arg2, arg3, 1);
12372 if (target_to_host_timespec(&ts, arg5)) {
12373 return -TARGET_EFAULT;
12375 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12377 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
12378 return -TARGET_EFAULT;
12381 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12384 unlock_user (p, arg2, arg3);
12386 put_user_u32(prio, arg4);
12390 #ifdef TARGET_NR_mq_timedreceive_time64
12391 case TARGET_NR_mq_timedreceive_time64:
12393 struct timespec ts;
12396 p = lock_user(VERIFY_READ, arg2, arg3, 1);
12398 if (target_to_host_timespec64(&ts, arg5)) {
12399 return -TARGET_EFAULT;
12401 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12403 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
12404 return -TARGET_EFAULT;
12407 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12410 unlock_user(p, arg2, arg3);
12412 put_user_u32(prio, arg4);
12418 /* Not implemented for now... */
12419 /* case TARGET_NR_mq_notify: */
12422 case TARGET_NR_mq_getsetattr:
12424 struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
12427 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
12428 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
12429 &posix_mq_attr_out));
12430 } else if (arg3 != 0) {
12431 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
12433 if (ret == 0 && arg3 != 0) {
12434 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
12440 #ifdef CONFIG_SPLICE
12441 #ifdef TARGET_NR_tee
12442 case TARGET_NR_tee:
12444 ret = get_errno(tee(arg1,arg2,arg3,arg4));
12448 #ifdef TARGET_NR_splice
12449 case TARGET_NR_splice:
12451 loff_t loff_in, loff_out;
12452 loff_t *ploff_in = NULL, *ploff_out = NULL;
12454 if (get_user_u64(loff_in, arg2)) {
12455 return -TARGET_EFAULT;
12457 ploff_in = &loff_in;
12460 if (get_user_u64(loff_out, arg4)) {
12461 return -TARGET_EFAULT;
12463 ploff_out = &loff_out;
12465 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
12467 if (put_user_u64(loff_in, arg2)) {
12468 return -TARGET_EFAULT;
12472 if (put_user_u64(loff_out, arg4)) {
12473 return -TARGET_EFAULT;
12479 #ifdef TARGET_NR_vmsplice
12480 case TARGET_NR_vmsplice:
12482 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
12484 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
12485 unlock_iovec(vec, arg2, arg3, 0);
12487 ret = -host_to_target_errno(errno);
12492 #endif /* CONFIG_SPLICE */
12493 #ifdef CONFIG_EVENTFD
12494 #if defined(TARGET_NR_eventfd)
12495 case TARGET_NR_eventfd:
12496 ret = get_errno(eventfd(arg1, 0));
12498 fd_trans_register(ret, &target_eventfd_trans);
12502 #if defined(TARGET_NR_eventfd2)
12503 case TARGET_NR_eventfd2:
12505 int host_flags = arg2 & (~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC));
12506 if (arg2 & TARGET_O_NONBLOCK) {
12507 host_flags |= O_NONBLOCK;
12509 if (arg2 & TARGET_O_CLOEXEC) {
12510 host_flags |= O_CLOEXEC;
12512 ret = get_errno(eventfd(arg1, host_flags));
12514 fd_trans_register(ret, &target_eventfd_trans);
12519 #endif /* CONFIG_EVENTFD */
12520 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
12521 case TARGET_NR_fallocate:
12522 #if TARGET_ABI_BITS == 32
12523 ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
12524 target_offset64(arg5, arg6)));
12526 ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
12530 #if defined(CONFIG_SYNC_FILE_RANGE)
12531 #if defined(TARGET_NR_sync_file_range)
12532 case TARGET_NR_sync_file_range:
12533 #if TARGET_ABI_BITS == 32
12534 #if defined(TARGET_MIPS)
12535 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12536 target_offset64(arg5, arg6), arg7));
12538 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
12539 target_offset64(arg4, arg5), arg6));
12540 #endif /* !TARGET_MIPS */
12542 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
12546 #if defined(TARGET_NR_sync_file_range2) || \
12547 defined(TARGET_NR_arm_sync_file_range)
12548 #if defined(TARGET_NR_sync_file_range2)
12549 case TARGET_NR_sync_file_range2:
12551 #if defined(TARGET_NR_arm_sync_file_range)
12552 case TARGET_NR_arm_sync_file_range:
12554 /* This is like sync_file_range but the arguments are reordered */
12555 #if TARGET_ABI_BITS == 32
12556 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12557 target_offset64(arg5, arg6), arg2));
12559 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
12564 #if defined(TARGET_NR_signalfd4)
12565 case TARGET_NR_signalfd4:
12566 return do_signalfd4(arg1, arg2, arg4);
12568 #if defined(TARGET_NR_signalfd)
12569 case TARGET_NR_signalfd:
12570 return do_signalfd4(arg1, arg2, 0);
12572 #if defined(CONFIG_EPOLL)
12573 #if defined(TARGET_NR_epoll_create)
12574 case TARGET_NR_epoll_create:
12575 return get_errno(epoll_create(arg1));
12577 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
12578 case TARGET_NR_epoll_create1:
12579 return get_errno(epoll_create1(target_to_host_bitmask(arg1, fcntl_flags_tbl)));
12581 #if defined(TARGET_NR_epoll_ctl)
12582 case TARGET_NR_epoll_ctl:
12584 struct epoll_event ep;
12585 struct epoll_event *epp = 0;
12587 if (arg2 != EPOLL_CTL_DEL) {
12588 struct target_epoll_event *target_ep;
12589 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
12590 return -TARGET_EFAULT;
12592 ep.events = tswap32(target_ep->events);
12594 * The epoll_data_t union is just opaque data to the kernel,
12595 * so we transfer all 64 bits across and need not worry what
12596 * actual data type it is.
12598 ep.data.u64 = tswap64(target_ep->data.u64);
12599 unlock_user_struct(target_ep, arg4, 0);
12602 * before kernel 2.6.9, EPOLL_CTL_DEL operation required a
12603 * non-null pointer, even though this argument is ignored.
12608 return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
12612 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
12613 #if defined(TARGET_NR_epoll_wait)
12614 case TARGET_NR_epoll_wait:
12616 #if defined(TARGET_NR_epoll_pwait)
12617 case TARGET_NR_epoll_pwait:
12620 struct target_epoll_event *target_ep;
12621 struct epoll_event *ep;
12623 int maxevents = arg3;
12624 int timeout = arg4;
12626 if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
12627 return -TARGET_EINVAL;
12630 target_ep = lock_user(VERIFY_WRITE, arg2,
12631 maxevents * sizeof(struct target_epoll_event), 1);
12633 return -TARGET_EFAULT;
12636 ep = g_try_new(struct epoll_event, maxevents);
12638 unlock_user(target_ep, arg2, 0);
12639 return -TARGET_ENOMEM;
12643 #if defined(TARGET_NR_epoll_pwait)
12644 case TARGET_NR_epoll_pwait:
12646 target_sigset_t *target_set;
12647 sigset_t _set, *set = &_set;
12650 if (arg6 != sizeof(target_sigset_t)) {
12651 ret = -TARGET_EINVAL;
12655 target_set = lock_user(VERIFY_READ, arg5,
12656 sizeof(target_sigset_t), 1);
12658 ret = -TARGET_EFAULT;
12661 target_to_host_sigset(set, target_set);
12662 unlock_user(target_set, arg5, 0);
12667 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12668 set, SIGSET_T_SIZE));
12672 #if defined(TARGET_NR_epoll_wait)
12673 case TARGET_NR_epoll_wait:
12674 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12679 ret = -TARGET_ENOSYS;
12681 if (!is_error(ret)) {
12683 for (i = 0; i < ret; i++) {
12684 target_ep[i].events = tswap32(ep[i].events);
12685 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
12687 unlock_user(target_ep, arg2,
12688 ret * sizeof(struct target_epoll_event));
12690 unlock_user(target_ep, arg2, 0);
12697 #ifdef TARGET_NR_prlimit64
12698 case TARGET_NR_prlimit64:
12700 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
12701 struct target_rlimit64 *target_rnew, *target_rold;
12702 struct host_rlimit64 rnew, rold, *rnewp = 0;
12703 int resource = target_to_host_resource(arg2);
12705 if (arg3 && (resource != RLIMIT_AS &&
12706 resource != RLIMIT_DATA &&
12707 resource != RLIMIT_STACK)) {
12708 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
12709 return -TARGET_EFAULT;
12711 rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
12712 rnew.rlim_max = tswap64(target_rnew->rlim_max);
12713 unlock_user_struct(target_rnew, arg3, 0);
12717 ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
12718 if (!is_error(ret) && arg4) {
12719 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
12720 return -TARGET_EFAULT;
12722 target_rold->rlim_cur = tswap64(rold.rlim_cur);
12723 target_rold->rlim_max = tswap64(rold.rlim_max);
12724 unlock_user_struct(target_rold, arg4, 1);
12729 #ifdef TARGET_NR_gethostname
12730 case TARGET_NR_gethostname:
12732 char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
12734 ret = get_errno(gethostname(name, arg2));
12735 unlock_user(name, arg1, arg2);
12737 ret = -TARGET_EFAULT;
12742 #ifdef TARGET_NR_atomic_cmpxchg_32
12743 case TARGET_NR_atomic_cmpxchg_32:
12745 /* should use start_exclusive from main.c */
12746 abi_ulong mem_value;
12747 if (get_user_u32(mem_value, arg6)) {
12748 target_siginfo_t info;
12749 info.si_signo = SIGSEGV;
12751 info.si_code = TARGET_SEGV_MAPERR;
12752 info._sifields._sigfault._addr = arg6;
12753 queue_signal((CPUArchState *)cpu_env, info.si_signo,
12754 QEMU_SI_FAULT, &info);
12758 if (mem_value == arg2)
12759 put_user_u32(arg1, arg6);
12763 #ifdef TARGET_NR_atomic_barrier
12764 case TARGET_NR_atomic_barrier:
12765 /* Like the kernel implementation and the
12766 qemu arm barrier, no-op this? */
12770 #ifdef TARGET_NR_timer_create
12771 case TARGET_NR_timer_create:
12773 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
12775 struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
12778 int timer_index = next_free_host_timer();
12780 if (timer_index < 0) {
12781 ret = -TARGET_EAGAIN;
12783 timer_t *phtimer = g_posix_timers + timer_index;
12786 phost_sevp = &host_sevp;
12787 ret = target_to_host_sigevent(phost_sevp, arg2);
12793 ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
12797 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
12798 return -TARGET_EFAULT;
12806 #ifdef TARGET_NR_timer_settime
12807 case TARGET_NR_timer_settime:
12809 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
12810 * struct itimerspec * old_value */
12811 target_timer_t timerid = get_timer_id(arg1);
12815 } else if (arg3 == 0) {
12816 ret = -TARGET_EINVAL;
12818 timer_t htimer = g_posix_timers[timerid];
12819 struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12821 if (target_to_host_itimerspec(&hspec_new, arg3)) {
12822 return -TARGET_EFAULT;
12825 timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12826 if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
12827 return -TARGET_EFAULT;
12834 #ifdef TARGET_NR_timer_settime64
12835 case TARGET_NR_timer_settime64:
12837 target_timer_t timerid = get_timer_id(arg1);
12841 } else if (arg3 == 0) {
12842 ret = -TARGET_EINVAL;
12844 timer_t htimer = g_posix_timers[timerid];
12845 struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12847 if (target_to_host_itimerspec64(&hspec_new, arg3)) {
12848 return -TARGET_EFAULT;
12851 timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12852 if (arg4 && host_to_target_itimerspec64(arg4, &hspec_old)) {
12853 return -TARGET_EFAULT;
12860 #ifdef TARGET_NR_timer_gettime
12861 case TARGET_NR_timer_gettime:
12863 /* args: timer_t timerid, struct itimerspec *curr_value */
12864 target_timer_t timerid = get_timer_id(arg1);
12868 } else if (!arg2) {
12869 ret = -TARGET_EFAULT;
12871 timer_t htimer = g_posix_timers[timerid];
12872 struct itimerspec hspec;
12873 ret = get_errno(timer_gettime(htimer, &hspec));
12875 if (host_to_target_itimerspec(arg2, &hspec)) {
12876 ret = -TARGET_EFAULT;
12883 #ifdef TARGET_NR_timer_gettime64
12884 case TARGET_NR_timer_gettime64:
12886 /* args: timer_t timerid, struct itimerspec64 *curr_value */
12887 target_timer_t timerid = get_timer_id(arg1);
12891 } else if (!arg2) {
12892 ret = -TARGET_EFAULT;
12894 timer_t htimer = g_posix_timers[timerid];
12895 struct itimerspec hspec;
12896 ret = get_errno(timer_gettime(htimer, &hspec));
12898 if (host_to_target_itimerspec64(arg2, &hspec)) {
12899 ret = -TARGET_EFAULT;
12906 #ifdef TARGET_NR_timer_getoverrun
12907 case TARGET_NR_timer_getoverrun:
12909 /* args: timer_t timerid */
12910 target_timer_t timerid = get_timer_id(arg1);
12915 timer_t htimer = g_posix_timers[timerid];
12916 ret = get_errno(timer_getoverrun(htimer));
12922 #ifdef TARGET_NR_timer_delete
12923 case TARGET_NR_timer_delete:
12925 /* args: timer_t timerid */
12926 target_timer_t timerid = get_timer_id(arg1);
12931 timer_t htimer = g_posix_timers[timerid];
12932 ret = get_errno(timer_delete(htimer));
12933 g_posix_timers[timerid] = 0;
12939 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
12940 case TARGET_NR_timerfd_create:
12941 return get_errno(timerfd_create(arg1,
12942 target_to_host_bitmask(arg2, fcntl_flags_tbl)));
12945 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
12946 case TARGET_NR_timerfd_gettime:
12948 struct itimerspec its_curr;
12950 ret = get_errno(timerfd_gettime(arg1, &its_curr));
12952 if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
12953 return -TARGET_EFAULT;
12959 #if defined(TARGET_NR_timerfd_gettime64) && defined(CONFIG_TIMERFD)
12960 case TARGET_NR_timerfd_gettime64:
12962 struct itimerspec its_curr;
12964 ret = get_errno(timerfd_gettime(arg1, &its_curr));
12966 if (arg2 && host_to_target_itimerspec64(arg2, &its_curr)) {
12967 return -TARGET_EFAULT;
12973 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
12974 case TARGET_NR_timerfd_settime:
12976 struct itimerspec its_new, its_old, *p_new;
12979 if (target_to_host_itimerspec(&its_new, arg3)) {
12980 return -TARGET_EFAULT;
12987 ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
12989 if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
12990 return -TARGET_EFAULT;
12996 #if defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)
12997 case TARGET_NR_timerfd_settime64:
12999 struct itimerspec its_new, its_old, *p_new;
13002 if (target_to_host_itimerspec64(&its_new, arg3)) {
13003 return -TARGET_EFAULT;
13010 ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13012 if (arg4 && host_to_target_itimerspec64(arg4, &its_old)) {
13013 return -TARGET_EFAULT;
13019 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
13020 case TARGET_NR_ioprio_get:
13021 return get_errno(ioprio_get(arg1, arg2));
13024 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
13025 case TARGET_NR_ioprio_set:
13026 return get_errno(ioprio_set(arg1, arg2, arg3));
13029 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
13030 case TARGET_NR_setns:
13031 return get_errno(setns(arg1, arg2));
13033 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
13034 case TARGET_NR_unshare:
13035 return get_errno(unshare(arg1));
13037 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
13038 case TARGET_NR_kcmp:
13039 return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
13041 #ifdef TARGET_NR_swapcontext
13042 case TARGET_NR_swapcontext:
13043 /* PowerPC specific. */
13044 return do_swapcontext(cpu_env, arg1, arg2, arg3);
13046 #ifdef TARGET_NR_memfd_create
13047 case TARGET_NR_memfd_create:
13048 p = lock_user_string(arg1);
13050 return -TARGET_EFAULT;
13052 ret = get_errno(memfd_create(p, arg2));
13053 fd_trans_unregister(ret);
13054 unlock_user(p, arg1, 0);
13057 #if defined TARGET_NR_membarrier && defined __NR_membarrier
13058 case TARGET_NR_membarrier:
13059 return get_errno(membarrier(arg1, arg2));
13062 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
13063 case TARGET_NR_copy_file_range:
13065 loff_t inoff, outoff;
13066 loff_t *pinoff = NULL, *poutoff = NULL;
13069 if (get_user_u64(inoff, arg2)) {
13070 return -TARGET_EFAULT;
13075 if (get_user_u64(outoff, arg4)) {
13076 return -TARGET_EFAULT;
13080 /* Do not sign-extend the count parameter. */
13081 ret = get_errno(safe_copy_file_range(arg1, pinoff, arg3, poutoff,
13082 (abi_ulong)arg5, arg6));
13083 if (!is_error(ret) && ret > 0) {
13085 if (put_user_u64(inoff, arg2)) {
13086 return -TARGET_EFAULT;
13090 if (put_user_u64(outoff, arg4)) {
13091 return -TARGET_EFAULT;
13099 #if defined(TARGET_NR_pivot_root)
13100 case TARGET_NR_pivot_root:
13103 p = lock_user_string(arg1); /* new_root */
13104 p2 = lock_user_string(arg2); /* put_old */
13106 ret = -TARGET_EFAULT;
13108 ret = get_errno(pivot_root(p, p2));
13110 unlock_user(p2, arg2, 0);
13111 unlock_user(p, arg1, 0);
13117 qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
13118 return -TARGET_ENOSYS;
13123 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
13124 abi_long arg2, abi_long arg3, abi_long arg4,
13125 abi_long arg5, abi_long arg6, abi_long arg7,
13128 CPUState *cpu = env_cpu(cpu_env);
13131 #ifdef DEBUG_ERESTARTSYS
13132 /* Debug-only code for exercising the syscall-restart code paths
13133 * in the per-architecture cpu main loops: restart every syscall
13134 * the guest makes once before letting it through.
13140 return -TARGET_ERESTARTSYS;
13145 record_syscall_start(cpu, num, arg1,
13146 arg2, arg3, arg4, arg5, arg6, arg7, arg8);
13148 if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13149 print_syscall(cpu_env, num, arg1, arg2, arg3, arg4, arg5, arg6);
13152 ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
13153 arg5, arg6, arg7, arg8);
13155 if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13156 print_syscall_ret(cpu_env, num, ret, arg1, arg2,
13157 arg3, arg4, arg5, arg6);
13160 record_syscall_return(cpu, num, ret);