4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
31 #include <sys/mount.h>
33 #include <sys/fsuid.h>
34 #include <sys/personality.h>
35 #include <sys/prctl.h>
36 #include <sys/resource.h>
38 #include <linux/capability.h>
40 #include <sys/timex.h>
41 #include <sys/socket.h>
42 #include <linux/sockios.h>
46 #include <sys/times.h>
49 #include <sys/statfs.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/in.h>
55 #include <netinet/ip.h>
56 #include <netinet/tcp.h>
57 #include <netinet/udp.h>
58 #include <linux/wireless.h>
59 #include <linux/icmp.h>
60 #include <linux/icmpv6.h>
61 #include <linux/if_tun.h>
62 #include <linux/in6.h>
63 #include <linux/errqueue.h>
64 #include <linux/random.h>
66 #include <sys/timerfd.h>
69 #include <sys/eventfd.h>
72 #include <sys/epoll.h>
75 #include "qemu/xattr.h"
77 #ifdef CONFIG_SENDFILE
78 #include <sys/sendfile.h>
80 #ifdef HAVE_SYS_KCOV_H
84 #define termios host_termios
85 #define winsize host_winsize
86 #define termio host_termio
87 #define sgttyb host_sgttyb /* same as target */
88 #define tchars host_tchars /* same as target */
89 #define ltchars host_ltchars /* same as target */
91 #include <linux/termios.h>
92 #include <linux/unistd.h>
93 #include <linux/cdrom.h>
94 #include <linux/hdreg.h>
95 #include <linux/soundcard.h>
97 #include <linux/mtio.h>
100 #if defined(CONFIG_FIEMAP)
101 #include <linux/fiemap.h>
103 #include <linux/fb.h>
104 #if defined(CONFIG_USBFS)
105 #include <linux/usbdevice_fs.h>
106 #include <linux/usb/ch9.h>
108 #include <linux/vt.h>
109 #include <linux/dm-ioctl.h>
110 #include <linux/reboot.h>
111 #include <linux/route.h>
112 #include <linux/filter.h>
113 #include <linux/blkpg.h>
114 #include <netpacket/packet.h>
115 #include <linux/netlink.h>
116 #include <linux/if_alg.h>
117 #include <linux/rtc.h>
118 #include <sound/asound.h>
120 #include <linux/btrfs.h>
123 #include <libdrm/drm.h>
124 #include <libdrm/i915_drm.h>
126 #include "linux_loop.h"
130 #include "user-internals.h"
132 #include "signal-common.h"
134 #include "user-mmap.h"
135 #include "user/safe-syscall.h"
136 #include "qemu/guest-random.h"
137 #include "qemu/selfmap.h"
138 #include "user/syscall-trace.h"
139 #include "special-errno.h"
140 #include "qapi/error.h"
141 #include "fd-trans.h"
145 #define CLONE_IO 0x80000000 /* Clone io context */
148 /* We can't directly call the host clone syscall, because this will
149 * badly confuse libc (breaking mutexes, for example). So we must
150 * divide clone flags into:
151 * * flag combinations that look like pthread_create()
152 * * flag combinations that look like fork()
153 * * flags we can implement within QEMU itself
154 * * flags we can't support and will return an error for
156 /* For thread creation, all these flags must be present; for
157 * fork, none must be present.
159 #define CLONE_THREAD_FLAGS \
160 (CLONE_VM | CLONE_FS | CLONE_FILES | \
161 CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
163 /* These flags are ignored:
164 * CLONE_DETACHED is now ignored by the kernel;
165 * CLONE_IO is just an optimisation hint to the I/O scheduler
167 #define CLONE_IGNORED_FLAGS \
168 (CLONE_DETACHED | CLONE_IO)
170 /* Flags for fork which we can implement within QEMU itself */
171 #define CLONE_OPTIONAL_FORK_FLAGS \
172 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
173 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
175 /* Flags for thread creation which we can implement within QEMU itself */
176 #define CLONE_OPTIONAL_THREAD_FLAGS \
177 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
178 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
180 #define CLONE_INVALID_FORK_FLAGS \
181 (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
183 #define CLONE_INVALID_THREAD_FLAGS \
184 (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS | \
185 CLONE_IGNORED_FLAGS))
187 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
188 * have almost all been allocated. We cannot support any of
189 * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
190 * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
191 * The checks against the invalid thread masks above will catch these.
192 * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
195 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
196 * once. This exercises the codepaths for restart.
198 //#define DEBUG_ERESTARTSYS
200 //#include <linux/msdos_fs.h>
201 #define VFAT_IOCTL_READDIR_BOTH \
202 _IOC(_IOC_READ, 'r', 1, (sizeof(struct linux_dirent) + 256) * 2)
203 #define VFAT_IOCTL_READDIR_SHORT \
204 _IOC(_IOC_READ, 'r', 2, (sizeof(struct linux_dirent) + 256) * 2)
214 #define _syscall0(type,name) \
215 static type name (void) \
217 return syscall(__NR_##name); \
220 #define _syscall1(type,name,type1,arg1) \
221 static type name (type1 arg1) \
223 return syscall(__NR_##name, arg1); \
226 #define _syscall2(type,name,type1,arg1,type2,arg2) \
227 static type name (type1 arg1,type2 arg2) \
229 return syscall(__NR_##name, arg1, arg2); \
232 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
233 static type name (type1 arg1,type2 arg2,type3 arg3) \
235 return syscall(__NR_##name, arg1, arg2, arg3); \
238 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
239 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
241 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
244 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
246 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
248 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
252 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
253 type5,arg5,type6,arg6) \
254 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
257 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
261 #define __NR_sys_uname __NR_uname
262 #define __NR_sys_getcwd1 __NR_getcwd
263 #define __NR_sys_getdents __NR_getdents
264 #define __NR_sys_getdents64 __NR_getdents64
265 #define __NR_sys_getpriority __NR_getpriority
266 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
267 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
268 #define __NR_sys_syslog __NR_syslog
269 #if defined(__NR_futex)
270 # define __NR_sys_futex __NR_futex
272 #if defined(__NR_futex_time64)
273 # define __NR_sys_futex_time64 __NR_futex_time64
275 #define __NR_sys_statx __NR_statx
277 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
278 #define __NR__llseek __NR_lseek
281 /* Newer kernel ports have llseek() instead of _llseek() */
282 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
283 #define TARGET_NR__llseek TARGET_NR_llseek
286 /* some platforms need to mask more bits than just TARGET_O_NONBLOCK */
287 #ifndef TARGET_O_NONBLOCK_MASK
288 #define TARGET_O_NONBLOCK_MASK TARGET_O_NONBLOCK
291 #define __NR_sys_gettid __NR_gettid
292 _syscall0(int, sys_gettid)
294 /* For the 64-bit guest on 32-bit host case we must emulate
295 * getdents using getdents64, because otherwise the host
296 * might hand us back more dirent records than we can fit
297 * into the guest buffer after structure format conversion.
298 * Otherwise we emulate getdents with getdents if the host has it.
300 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
301 #define EMULATE_GETDENTS_WITH_GETDENTS
304 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
305 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
307 #if (defined(TARGET_NR_getdents) && \
308 !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
309 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
310 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
312 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
313 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
314 loff_t *, res, uint, wh);
316 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
317 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
319 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
320 #ifdef __NR_exit_group
321 _syscall1(int,exit_group,int,error_code)
323 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
324 _syscall1(int,set_tid_address,int *,tidptr)
326 #if defined(__NR_futex)
327 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
328 const struct timespec *,timeout,int *,uaddr2,int,val3)
330 #if defined(__NR_futex_time64)
331 _syscall6(int,sys_futex_time64,int *,uaddr,int,op,int,val,
332 const struct timespec *,timeout,int *,uaddr2,int,val3)
334 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
335 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
336 unsigned long *, user_mask_ptr);
337 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
338 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
339 unsigned long *, user_mask_ptr);
340 /* sched_attr is not defined in glibc */
343 uint32_t sched_policy;
344 uint64_t sched_flags;
346 uint32_t sched_priority;
347 uint64_t sched_runtime;
348 uint64_t sched_deadline;
349 uint64_t sched_period;
350 uint32_t sched_util_min;
351 uint32_t sched_util_max;
353 #define __NR_sys_sched_getattr __NR_sched_getattr
354 _syscall4(int, sys_sched_getattr, pid_t, pid, struct sched_attr *, attr,
355 unsigned int, size, unsigned int, flags);
356 #define __NR_sys_sched_setattr __NR_sched_setattr
357 _syscall3(int, sys_sched_setattr, pid_t, pid, struct sched_attr *, attr,
358 unsigned int, flags);
359 #define __NR_sys_sched_getscheduler __NR_sched_getscheduler
360 _syscall1(int, sys_sched_getscheduler, pid_t, pid);
361 #define __NR_sys_sched_setscheduler __NR_sched_setscheduler
362 _syscall3(int, sys_sched_setscheduler, pid_t, pid, int, policy,
363 const struct sched_param *, param);
364 #define __NR_sys_sched_getparam __NR_sched_getparam
365 _syscall2(int, sys_sched_getparam, pid_t, pid,
366 struct sched_param *, param);
367 #define __NR_sys_sched_setparam __NR_sched_setparam
368 _syscall2(int, sys_sched_setparam, pid_t, pid,
369 const struct sched_param *, param);
370 #define __NR_sys_getcpu __NR_getcpu
371 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
372 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
374 _syscall2(int, capget, struct __user_cap_header_struct *, header,
375 struct __user_cap_data_struct *, data);
376 _syscall2(int, capset, struct __user_cap_header_struct *, header,
377 struct __user_cap_data_struct *, data);
378 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
379 _syscall2(int, ioprio_get, int, which, int, who)
381 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
382 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
384 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
385 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
388 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
389 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
390 unsigned long, idx1, unsigned long, idx2)
394 * It is assumed that struct statx is architecture independent.
396 #if defined(TARGET_NR_statx) && defined(__NR_statx)
397 _syscall5(int, sys_statx, int, dirfd, const char *, pathname, int, flags,
398 unsigned int, mask, struct target_statx *, statxbuf)
400 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
401 _syscall2(int, membarrier, int, cmd, int, flags)
404 static const bitmask_transtbl fcntl_flags_tbl[] = {
405 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
406 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, },
407 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, },
408 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, },
409 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, },
410 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, },
411 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, },
412 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, },
413 { TARGET_O_SYNC, TARGET_O_DSYNC, O_SYNC, O_DSYNC, },
414 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, },
415 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, },
416 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
417 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, },
418 #if defined(O_DIRECT)
419 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, },
421 #if defined(O_NOATIME)
422 { TARGET_O_NOATIME, TARGET_O_NOATIME, O_NOATIME, O_NOATIME },
424 #if defined(O_CLOEXEC)
425 { TARGET_O_CLOEXEC, TARGET_O_CLOEXEC, O_CLOEXEC, O_CLOEXEC },
428 { TARGET_O_PATH, TARGET_O_PATH, O_PATH, O_PATH },
430 #if defined(O_TMPFILE)
431 { TARGET_O_TMPFILE, TARGET_O_TMPFILE, O_TMPFILE, O_TMPFILE },
433 /* Don't terminate the list prematurely on 64-bit host+guest. */
434 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
435 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
440 _syscall2(int, sys_getcwd1, char *, buf, size_t, size)
442 #if defined(TARGET_NR_utimensat) || defined(TARGET_NR_utimensat_time64)
443 #if defined(__NR_utimensat)
444 #define __NR_sys_utimensat __NR_utimensat
445 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
446 const struct timespec *,tsp,int,flags)
448 static int sys_utimensat(int dirfd, const char *pathname,
449 const struct timespec times[2], int flags)
455 #endif /* TARGET_NR_utimensat */
457 #ifdef TARGET_NR_renameat2
458 #if defined(__NR_renameat2)
459 #define __NR_sys_renameat2 __NR_renameat2
460 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
461 const char *, new, unsigned int, flags)
463 static int sys_renameat2(int oldfd, const char *old,
464 int newfd, const char *new, int flags)
467 return renameat(oldfd, old, newfd, new);
473 #endif /* TARGET_NR_renameat2 */
475 #ifdef CONFIG_INOTIFY
476 #include <sys/inotify.h>
478 /* Userspace can usually survive runtime without inotify */
479 #undef TARGET_NR_inotify_init
480 #undef TARGET_NR_inotify_init1
481 #undef TARGET_NR_inotify_add_watch
482 #undef TARGET_NR_inotify_rm_watch
483 #endif /* CONFIG_INOTIFY */
485 #if defined(TARGET_NR_prlimit64)
486 #ifndef __NR_prlimit64
487 # define __NR_prlimit64 -1
489 #define __NR_sys_prlimit64 __NR_prlimit64
490 /* The glibc rlimit structure may not be that used by the underlying syscall */
491 struct host_rlimit64 {
495 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
496 const struct host_rlimit64 *, new_limit,
497 struct host_rlimit64 *, old_limit)
501 #if defined(TARGET_NR_timer_create)
502 /* Maximum of 32 active POSIX timers allowed at any one time. */
503 static timer_t g_posix_timers[32] = { 0, } ;
505 static inline int next_free_host_timer(void)
508 /* FIXME: Does finding the next free slot require a lock? */
509 for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
510 if (g_posix_timers[k] == 0) {
511 g_posix_timers[k] = (timer_t) 1;
519 static inline int host_to_target_errno(int host_errno)
521 switch (host_errno) {
522 #define E(X) case X: return TARGET_##X;
523 #include "errnos.c.inc"
530 static inline int target_to_host_errno(int target_errno)
532 switch (target_errno) {
533 #define E(X) case TARGET_##X: return X;
534 #include "errnos.c.inc"
541 static inline abi_long get_errno(abi_long ret)
544 return -host_to_target_errno(errno);
549 const char *target_strerror(int err)
551 if (err == QEMU_ERESTARTSYS) {
552 return "To be restarted";
554 if (err == QEMU_ESIGRETURN) {
555 return "Successful exit from sigreturn";
558 return strerror(target_to_host_errno(err));
561 static int check_zeroed_user(abi_long addr, size_t ksize, size_t usize)
565 if (usize <= ksize) {
568 for (i = ksize; i < usize; i++) {
569 if (get_user_u8(b, addr + i)) {
570 return -TARGET_EFAULT;
579 #define safe_syscall0(type, name) \
580 static type safe_##name(void) \
582 return safe_syscall(__NR_##name); \
585 #define safe_syscall1(type, name, type1, arg1) \
586 static type safe_##name(type1 arg1) \
588 return safe_syscall(__NR_##name, arg1); \
591 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
592 static type safe_##name(type1 arg1, type2 arg2) \
594 return safe_syscall(__NR_##name, arg1, arg2); \
597 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
598 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
600 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
603 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
605 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
607 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
610 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
611 type4, arg4, type5, arg5) \
612 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
615 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
618 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
619 type4, arg4, type5, arg5, type6, arg6) \
620 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
621 type5 arg5, type6 arg6) \
623 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
626 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
627 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
628 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
629 int, flags, mode_t, mode)
630 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
631 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
632 struct rusage *, rusage)
634 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
635 int, options, struct rusage *, rusage)
636 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
637 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
638 defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
639 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
640 fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
642 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_ppoll_time64)
643 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
644 struct timespec *, tsp, const sigset_t *, sigmask,
647 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
648 int, maxevents, int, timeout, const sigset_t *, sigmask,
650 #if defined(__NR_futex)
651 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
652 const struct timespec *,timeout,int *,uaddr2,int,val3)
654 #if defined(__NR_futex_time64)
655 safe_syscall6(int,futex_time64,int *,uaddr,int,op,int,val, \
656 const struct timespec *,timeout,int *,uaddr2,int,val3)
658 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
659 safe_syscall2(int, kill, pid_t, pid, int, sig)
660 safe_syscall2(int, tkill, int, tid, int, sig)
661 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
662 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
663 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
664 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
665 unsigned long, pos_l, unsigned long, pos_h)
666 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
667 unsigned long, pos_l, unsigned long, pos_h)
668 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
670 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
671 int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
672 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
673 int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
674 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
675 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
676 safe_syscall2(int, flock, int, fd, int, operation)
677 #if defined(TARGET_NR_rt_sigtimedwait) || defined(TARGET_NR_rt_sigtimedwait_time64)
678 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
679 const struct timespec *, uts, size_t, sigsetsize)
681 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
683 #if defined(TARGET_NR_nanosleep)
684 safe_syscall2(int, nanosleep, const struct timespec *, req,
685 struct timespec *, rem)
687 #if defined(TARGET_NR_clock_nanosleep) || \
688 defined(TARGET_NR_clock_nanosleep_time64)
689 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
690 const struct timespec *, req, struct timespec *, rem)
694 safe_syscall5(int, ipc, int, call, long, first, long, second, long, third,
697 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
698 void *, ptr, long, fifth)
702 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
706 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
707 long, msgtype, int, flags)
709 #ifdef __NR_semtimedop
710 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
711 unsigned, nsops, const struct timespec *, timeout)
713 #if defined(TARGET_NR_mq_timedsend) || \
714 defined(TARGET_NR_mq_timedsend_time64)
715 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
716 size_t, len, unsigned, prio, const struct timespec *, timeout)
718 #if defined(TARGET_NR_mq_timedreceive) || \
719 defined(TARGET_NR_mq_timedreceive_time64)
720 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
721 size_t, len, unsigned *, prio, const struct timespec *, timeout)
723 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
724 safe_syscall6(ssize_t, copy_file_range, int, infd, loff_t *, pinoff,
725 int, outfd, loff_t *, poutoff, size_t, length,
729 /* We do ioctl like this rather than via safe_syscall3 to preserve the
730 * "third argument might be integer or pointer or not present" behaviour of
733 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
734 /* Similarly for fcntl. Note that callers must always:
735 * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
736 * use the flock64 struct rather than unsuffixed flock
737 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
740 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
742 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
745 static inline int host_to_target_sock_type(int host_type)
749 switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
751 target_type = TARGET_SOCK_DGRAM;
754 target_type = TARGET_SOCK_STREAM;
757 target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
761 #if defined(SOCK_CLOEXEC)
762 if (host_type & SOCK_CLOEXEC) {
763 target_type |= TARGET_SOCK_CLOEXEC;
767 #if defined(SOCK_NONBLOCK)
768 if (host_type & SOCK_NONBLOCK) {
769 target_type |= TARGET_SOCK_NONBLOCK;
776 static abi_ulong target_brk;
777 static abi_ulong target_original_brk;
778 static abi_ulong brk_page;
780 void target_set_brk(abi_ulong new_brk)
782 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
783 brk_page = HOST_PAGE_ALIGN(target_brk);
786 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
787 #define DEBUGF_BRK(message, args...)
789 /* do_brk() must return target values and target errnos. */
790 abi_long do_brk(abi_ulong new_brk)
792 abi_long mapped_addr;
793 abi_ulong new_alloc_size;
795 /* brk pointers are always untagged */
797 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
800 DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
803 if (new_brk < target_original_brk) {
804 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
809 /* If the new brk is less than the highest page reserved to the
810 * target heap allocation, set it and we're almost done... */
811 if (new_brk <= brk_page) {
812 /* Heap contents are initialized to zero, as for anonymous
814 if (new_brk > target_brk) {
815 memset(g2h_untagged(target_brk), 0, new_brk - target_brk);
817 target_brk = new_brk;
818 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
822 /* We need to allocate more memory after the brk... Note that
823 * we don't use MAP_FIXED because that will map over the top of
824 * any existing mapping (like the one with the host libc or qemu
825 * itself); instead we treat "mapped but at wrong address" as
826 * a failure and unmap again.
828 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
829 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
830 PROT_READ|PROT_WRITE,
831 MAP_ANON|MAP_PRIVATE, 0, 0));
833 if (mapped_addr == brk_page) {
834 /* Heap contents are initialized to zero, as for anonymous
835 * mapped pages. Technically the new pages are already
836 * initialized to zero since they *are* anonymous mapped
837 * pages, however we have to take care with the contents that
838 * come from the remaining part of the previous page: it may
839 * contains garbage data due to a previous heap usage (grown
841 memset(g2h_untagged(target_brk), 0, brk_page - target_brk);
843 target_brk = new_brk;
844 brk_page = HOST_PAGE_ALIGN(target_brk);
845 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
848 } else if (mapped_addr != -1) {
849 /* Mapped but at wrong address, meaning there wasn't actually
850 * enough space for this brk.
852 target_munmap(mapped_addr, new_alloc_size);
854 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
857 DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
860 #if defined(TARGET_ALPHA)
861 /* We (partially) emulate OSF/1 on Alpha, which requires we
862 return a proper errno, not an unchanged brk value. */
863 return -TARGET_ENOMEM;
865 /* For everything else, return the previous break. */
869 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
870 defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
871 static inline abi_long copy_from_user_fdset(fd_set *fds,
872 abi_ulong target_fds_addr,
876 abi_ulong b, *target_fds;
878 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
879 if (!(target_fds = lock_user(VERIFY_READ,
881 sizeof(abi_ulong) * nw,
883 return -TARGET_EFAULT;
887 for (i = 0; i < nw; i++) {
888 /* grab the abi_ulong */
889 __get_user(b, &target_fds[i]);
890 for (j = 0; j < TARGET_ABI_BITS; j++) {
891 /* check the bit inside the abi_ulong */
898 unlock_user(target_fds, target_fds_addr, 0);
903 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
904 abi_ulong target_fds_addr,
907 if (target_fds_addr) {
908 if (copy_from_user_fdset(fds, target_fds_addr, n))
909 return -TARGET_EFAULT;
917 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
923 abi_ulong *target_fds;
925 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
926 if (!(target_fds = lock_user(VERIFY_WRITE,
928 sizeof(abi_ulong) * nw,
930 return -TARGET_EFAULT;
933 for (i = 0; i < nw; i++) {
935 for (j = 0; j < TARGET_ABI_BITS; j++) {
936 v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
939 __put_user(v, &target_fds[i]);
942 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
948 #if defined(__alpha__)
954 static inline abi_long host_to_target_clock_t(long ticks)
956 #if HOST_HZ == TARGET_HZ
959 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
963 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
964 const struct rusage *rusage)
966 struct target_rusage *target_rusage;
968 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
969 return -TARGET_EFAULT;
970 target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
971 target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
972 target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
973 target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
974 target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
975 target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
976 target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
977 target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
978 target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
979 target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
980 target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
981 target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
982 target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
983 target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
984 target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
985 target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
986 target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
987 target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
988 unlock_user_struct(target_rusage, target_addr, 1);
993 #ifdef TARGET_NR_setrlimit
994 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
996 abi_ulong target_rlim_swap;
999 target_rlim_swap = tswapal(target_rlim);
1000 if (target_rlim_swap == TARGET_RLIM_INFINITY)
1001 return RLIM_INFINITY;
1003 result = target_rlim_swap;
1004 if (target_rlim_swap != (rlim_t)result)
1005 return RLIM_INFINITY;
1011 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
1012 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1014 abi_ulong target_rlim_swap;
1017 if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1018 target_rlim_swap = TARGET_RLIM_INFINITY;
1020 target_rlim_swap = rlim;
1021 result = tswapal(target_rlim_swap);
1027 static inline int target_to_host_resource(int code)
1030 case TARGET_RLIMIT_AS:
1032 case TARGET_RLIMIT_CORE:
1034 case TARGET_RLIMIT_CPU:
1036 case TARGET_RLIMIT_DATA:
1038 case TARGET_RLIMIT_FSIZE:
1039 return RLIMIT_FSIZE;
1040 case TARGET_RLIMIT_LOCKS:
1041 return RLIMIT_LOCKS;
1042 case TARGET_RLIMIT_MEMLOCK:
1043 return RLIMIT_MEMLOCK;
1044 case TARGET_RLIMIT_MSGQUEUE:
1045 return RLIMIT_MSGQUEUE;
1046 case TARGET_RLIMIT_NICE:
1048 case TARGET_RLIMIT_NOFILE:
1049 return RLIMIT_NOFILE;
1050 case TARGET_RLIMIT_NPROC:
1051 return RLIMIT_NPROC;
1052 case TARGET_RLIMIT_RSS:
1054 case TARGET_RLIMIT_RTPRIO:
1055 return RLIMIT_RTPRIO;
1056 case TARGET_RLIMIT_SIGPENDING:
1057 return RLIMIT_SIGPENDING;
1058 case TARGET_RLIMIT_STACK:
1059 return RLIMIT_STACK;
1065 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1066 abi_ulong target_tv_addr)
1068 struct target_timeval *target_tv;
1070 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1071 return -TARGET_EFAULT;
1074 __get_user(tv->tv_sec, &target_tv->tv_sec);
1075 __get_user(tv->tv_usec, &target_tv->tv_usec);
1077 unlock_user_struct(target_tv, target_tv_addr, 0);
1082 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1083 const struct timeval *tv)
1085 struct target_timeval *target_tv;
1087 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1088 return -TARGET_EFAULT;
1091 __put_user(tv->tv_sec, &target_tv->tv_sec);
1092 __put_user(tv->tv_usec, &target_tv->tv_usec);
1094 unlock_user_struct(target_tv, target_tv_addr, 1);
1099 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
1100 static inline abi_long copy_from_user_timeval64(struct timeval *tv,
1101 abi_ulong target_tv_addr)
1103 struct target__kernel_sock_timeval *target_tv;
1105 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1106 return -TARGET_EFAULT;
1109 __get_user(tv->tv_sec, &target_tv->tv_sec);
1110 __get_user(tv->tv_usec, &target_tv->tv_usec);
1112 unlock_user_struct(target_tv, target_tv_addr, 0);
1118 static inline abi_long copy_to_user_timeval64(abi_ulong target_tv_addr,
1119 const struct timeval *tv)
1121 struct target__kernel_sock_timeval *target_tv;
1123 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1124 return -TARGET_EFAULT;
1127 __put_user(tv->tv_sec, &target_tv->tv_sec);
1128 __put_user(tv->tv_usec, &target_tv->tv_usec);
1130 unlock_user_struct(target_tv, target_tv_addr, 1);
1135 #if defined(TARGET_NR_futex) || \
1136 defined(TARGET_NR_rt_sigtimedwait) || \
1137 defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1138 defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1139 defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1140 defined(TARGET_NR_mq_timedreceive) || defined(TARGET_NR_ipc) || \
1141 defined(TARGET_NR_semop) || defined(TARGET_NR_semtimedop) || \
1142 defined(TARGET_NR_timer_settime) || \
1143 (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
1144 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
1145 abi_ulong target_addr)
1147 struct target_timespec *target_ts;
1149 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1150 return -TARGET_EFAULT;
1152 __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1153 __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1154 unlock_user_struct(target_ts, target_addr, 0);
1159 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64) || \
1160 defined(TARGET_NR_timer_settime64) || \
1161 defined(TARGET_NR_mq_timedsend_time64) || \
1162 defined(TARGET_NR_mq_timedreceive_time64) || \
1163 (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)) || \
1164 defined(TARGET_NR_clock_nanosleep_time64) || \
1165 defined(TARGET_NR_rt_sigtimedwait_time64) || \
1166 defined(TARGET_NR_utimensat) || \
1167 defined(TARGET_NR_utimensat_time64) || \
1168 defined(TARGET_NR_semtimedop_time64) || \
1169 defined(TARGET_NR_pselect6_time64) || defined(TARGET_NR_ppoll_time64)
1170 static inline abi_long target_to_host_timespec64(struct timespec *host_ts,
1171 abi_ulong target_addr)
1173 struct target__kernel_timespec *target_ts;
1175 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1176 return -TARGET_EFAULT;
1178 __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1179 __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1180 /* in 32bit mode, this drops the padding */
1181 host_ts->tv_nsec = (long)(abi_long)host_ts->tv_nsec;
1182 unlock_user_struct(target_ts, target_addr, 0);
1187 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
1188 struct timespec *host_ts)
1190 struct target_timespec *target_ts;
1192 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1193 return -TARGET_EFAULT;
1195 __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1196 __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1197 unlock_user_struct(target_ts, target_addr, 1);
1201 static inline abi_long host_to_target_timespec64(abi_ulong target_addr,
1202 struct timespec *host_ts)
1204 struct target__kernel_timespec *target_ts;
1206 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1207 return -TARGET_EFAULT;
1209 __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1210 __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1211 unlock_user_struct(target_ts, target_addr, 1);
1215 #if defined(TARGET_NR_gettimeofday)
1216 static inline abi_long copy_to_user_timezone(abi_ulong target_tz_addr,
1217 struct timezone *tz)
1219 struct target_timezone *target_tz;
1221 if (!lock_user_struct(VERIFY_WRITE, target_tz, target_tz_addr, 1)) {
1222 return -TARGET_EFAULT;
1225 __put_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1226 __put_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1228 unlock_user_struct(target_tz, target_tz_addr, 1);
1234 #if defined(TARGET_NR_settimeofday)
1235 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1236 abi_ulong target_tz_addr)
1238 struct target_timezone *target_tz;
1240 if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1241 return -TARGET_EFAULT;
1244 __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1245 __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1247 unlock_user_struct(target_tz, target_tz_addr, 0);
1253 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1256 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1257 abi_ulong target_mq_attr_addr)
1259 struct target_mq_attr *target_mq_attr;
1261 if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1262 target_mq_attr_addr, 1))
1263 return -TARGET_EFAULT;
1265 __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1266 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1267 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1268 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1270 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1275 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1276 const struct mq_attr *attr)
1278 struct target_mq_attr *target_mq_attr;
1280 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1281 target_mq_attr_addr, 0))
1282 return -TARGET_EFAULT;
1284 __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1285 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1286 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1287 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1289 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1295 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1296 /* do_select() must return target values and target errnos. */
1297 static abi_long do_select(int n,
1298 abi_ulong rfd_addr, abi_ulong wfd_addr,
1299 abi_ulong efd_addr, abi_ulong target_tv_addr)
1301 fd_set rfds, wfds, efds;
1302 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1304 struct timespec ts, *ts_ptr;
1307 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1311 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1315 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1320 if (target_tv_addr) {
1321 if (copy_from_user_timeval(&tv, target_tv_addr))
1322 return -TARGET_EFAULT;
1323 ts.tv_sec = tv.tv_sec;
1324 ts.tv_nsec = tv.tv_usec * 1000;
1330 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1333 if (!is_error(ret)) {
1334 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1335 return -TARGET_EFAULT;
1336 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1337 return -TARGET_EFAULT;
1338 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1339 return -TARGET_EFAULT;
1341 if (target_tv_addr) {
1342 tv.tv_sec = ts.tv_sec;
1343 tv.tv_usec = ts.tv_nsec / 1000;
1344 if (copy_to_user_timeval(target_tv_addr, &tv)) {
1345 return -TARGET_EFAULT;
1353 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1354 static abi_long do_old_select(abi_ulong arg1)
1356 struct target_sel_arg_struct *sel;
1357 abi_ulong inp, outp, exp, tvp;
1360 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1361 return -TARGET_EFAULT;
1364 nsel = tswapal(sel->n);
1365 inp = tswapal(sel->inp);
1366 outp = tswapal(sel->outp);
1367 exp = tswapal(sel->exp);
1368 tvp = tswapal(sel->tvp);
1370 unlock_user_struct(sel, arg1, 0);
1372 return do_select(nsel, inp, outp, exp, tvp);
1377 #if defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
1378 static abi_long do_pselect6(abi_long arg1, abi_long arg2, abi_long arg3,
1379 abi_long arg4, abi_long arg5, abi_long arg6,
1382 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
1383 fd_set rfds, wfds, efds;
1384 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1385 struct timespec ts, *ts_ptr;
1389 * The 6th arg is actually two args smashed together,
1390 * so we cannot use the C library.
1398 abi_ulong arg_sigset, arg_sigsize, *arg7;
1399 target_sigset_t *target_sigset;
1407 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1411 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1415 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1421 * This takes a timespec, and not a timeval, so we cannot
1422 * use the do_select() helper ...
1426 if (target_to_host_timespec64(&ts, ts_addr)) {
1427 return -TARGET_EFAULT;
1430 if (target_to_host_timespec(&ts, ts_addr)) {
1431 return -TARGET_EFAULT;
1439 /* Extract the two packed args for the sigset */
1442 sig.size = SIGSET_T_SIZE;
1444 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
1446 return -TARGET_EFAULT;
1448 arg_sigset = tswapal(arg7[0]);
1449 arg_sigsize = tswapal(arg7[1]);
1450 unlock_user(arg7, arg6, 0);
1454 if (arg_sigsize != sizeof(*target_sigset)) {
1455 /* Like the kernel, we enforce correct size sigsets */
1456 return -TARGET_EINVAL;
1458 target_sigset = lock_user(VERIFY_READ, arg_sigset,
1459 sizeof(*target_sigset), 1);
1460 if (!target_sigset) {
1461 return -TARGET_EFAULT;
1463 target_to_host_sigset(&set, target_sigset);
1464 unlock_user(target_sigset, arg_sigset, 0);
1472 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1475 if (!is_error(ret)) {
1476 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) {
1477 return -TARGET_EFAULT;
1479 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) {
1480 return -TARGET_EFAULT;
1482 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) {
1483 return -TARGET_EFAULT;
1486 if (ts_addr && host_to_target_timespec64(ts_addr, &ts)) {
1487 return -TARGET_EFAULT;
1490 if (ts_addr && host_to_target_timespec(ts_addr, &ts)) {
1491 return -TARGET_EFAULT;
1499 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) || \
1500 defined(TARGET_NR_ppoll_time64)
1501 static abi_long do_ppoll(abi_long arg1, abi_long arg2, abi_long arg3,
1502 abi_long arg4, abi_long arg5, bool ppoll, bool time64)
1504 struct target_pollfd *target_pfd;
1505 unsigned int nfds = arg2;
1513 if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
1514 return -TARGET_EINVAL;
1516 target_pfd = lock_user(VERIFY_WRITE, arg1,
1517 sizeof(struct target_pollfd) * nfds, 1);
1519 return -TARGET_EFAULT;
1522 pfd = alloca(sizeof(struct pollfd) * nfds);
1523 for (i = 0; i < nfds; i++) {
1524 pfd[i].fd = tswap32(target_pfd[i].fd);
1525 pfd[i].events = tswap16(target_pfd[i].events);
1529 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
1530 target_sigset_t *target_set;
1531 sigset_t _set, *set = &_set;
1535 if (target_to_host_timespec64(timeout_ts, arg3)) {
1536 unlock_user(target_pfd, arg1, 0);
1537 return -TARGET_EFAULT;
1540 if (target_to_host_timespec(timeout_ts, arg3)) {
1541 unlock_user(target_pfd, arg1, 0);
1542 return -TARGET_EFAULT;
1550 if (arg5 != sizeof(target_sigset_t)) {
1551 unlock_user(target_pfd, arg1, 0);
1552 return -TARGET_EINVAL;
1555 target_set = lock_user(VERIFY_READ, arg4,
1556 sizeof(target_sigset_t), 1);
1558 unlock_user(target_pfd, arg1, 0);
1559 return -TARGET_EFAULT;
1561 target_to_host_sigset(set, target_set);
1566 ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
1567 set, SIGSET_T_SIZE));
1569 if (!is_error(ret) && arg3) {
1571 if (host_to_target_timespec64(arg3, timeout_ts)) {
1572 return -TARGET_EFAULT;
1575 if (host_to_target_timespec(arg3, timeout_ts)) {
1576 return -TARGET_EFAULT;
1581 unlock_user(target_set, arg4, 0);
1584 struct timespec ts, *pts;
1587 /* Convert ms to secs, ns */
1588 ts.tv_sec = arg3 / 1000;
1589 ts.tv_nsec = (arg3 % 1000) * 1000000LL;
1592 /* -ve poll() timeout means "infinite" */
1595 ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
1598 if (!is_error(ret)) {
1599 for (i = 0; i < nfds; i++) {
1600 target_pfd[i].revents = tswap16(pfd[i].revents);
1603 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
1608 static abi_long do_pipe2(int host_pipe[], int flags)
1611 return pipe2(host_pipe, flags);
1617 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1618 int flags, int is_pipe2)
1622 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1625 return get_errno(ret);
1627 /* Several targets have special calling conventions for the original
1628 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1630 #if defined(TARGET_ALPHA)
1631 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1632 return host_pipe[0];
1633 #elif defined(TARGET_MIPS)
1634 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1635 return host_pipe[0];
1636 #elif defined(TARGET_SH4)
1637 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1638 return host_pipe[0];
1639 #elif defined(TARGET_SPARC)
1640 ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1641 return host_pipe[0];
1645 if (put_user_s32(host_pipe[0], pipedes)
1646 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1647 return -TARGET_EFAULT;
1648 return get_errno(ret);
1651 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1652 abi_ulong target_addr,
1655 struct target_ip_mreqn *target_smreqn;
1657 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1659 return -TARGET_EFAULT;
1660 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1661 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1662 if (len == sizeof(struct target_ip_mreqn))
1663 mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1664 unlock_user(target_smreqn, target_addr, 0);
1669 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1670 abi_ulong target_addr,
1673 const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1674 sa_family_t sa_family;
1675 struct target_sockaddr *target_saddr;
1677 if (fd_trans_target_to_host_addr(fd)) {
1678 return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1681 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1683 return -TARGET_EFAULT;
1685 sa_family = tswap16(target_saddr->sa_family);
1687 /* Oops. The caller might send a incomplete sun_path; sun_path
1688 * must be terminated by \0 (see the manual page), but
1689 * unfortunately it is quite common to specify sockaddr_un
1690 * length as "strlen(x->sun_path)" while it should be
1691 * "strlen(...) + 1". We'll fix that here if needed.
1692 * Linux kernel has a similar feature.
1695 if (sa_family == AF_UNIX) {
1696 if (len < unix_maxlen && len > 0) {
1697 char *cp = (char*)target_saddr;
1699 if ( cp[len-1] && !cp[len] )
1702 if (len > unix_maxlen)
1706 memcpy(addr, target_saddr, len);
1707 addr->sa_family = sa_family;
1708 if (sa_family == AF_NETLINK) {
1709 struct sockaddr_nl *nladdr;
1711 nladdr = (struct sockaddr_nl *)addr;
1712 nladdr->nl_pid = tswap32(nladdr->nl_pid);
1713 nladdr->nl_groups = tswap32(nladdr->nl_groups);
1714 } else if (sa_family == AF_PACKET) {
1715 struct target_sockaddr_ll *lladdr;
1717 lladdr = (struct target_sockaddr_ll *)addr;
1718 lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1719 lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1721 unlock_user(target_saddr, target_addr, 0);
1726 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1727 struct sockaddr *addr,
1730 struct target_sockaddr *target_saddr;
1737 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1739 return -TARGET_EFAULT;
1740 memcpy(target_saddr, addr, len);
1741 if (len >= offsetof(struct target_sockaddr, sa_family) +
1742 sizeof(target_saddr->sa_family)) {
1743 target_saddr->sa_family = tswap16(addr->sa_family);
1745 if (addr->sa_family == AF_NETLINK &&
1746 len >= sizeof(struct target_sockaddr_nl)) {
1747 struct target_sockaddr_nl *target_nl =
1748 (struct target_sockaddr_nl *)target_saddr;
1749 target_nl->nl_pid = tswap32(target_nl->nl_pid);
1750 target_nl->nl_groups = tswap32(target_nl->nl_groups);
1751 } else if (addr->sa_family == AF_PACKET) {
1752 struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1753 target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1754 target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1755 } else if (addr->sa_family == AF_INET6 &&
1756 len >= sizeof(struct target_sockaddr_in6)) {
1757 struct target_sockaddr_in6 *target_in6 =
1758 (struct target_sockaddr_in6 *)target_saddr;
1759 target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1761 unlock_user(target_saddr, target_addr, len);
1766 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1767 struct target_msghdr *target_msgh)
1769 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1770 abi_long msg_controllen;
1771 abi_ulong target_cmsg_addr;
1772 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1773 socklen_t space = 0;
1775 msg_controllen = tswapal(target_msgh->msg_controllen);
1776 if (msg_controllen < sizeof (struct target_cmsghdr))
1778 target_cmsg_addr = tswapal(target_msgh->msg_control);
1779 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1780 target_cmsg_start = target_cmsg;
1782 return -TARGET_EFAULT;
1784 while (cmsg && target_cmsg) {
1785 void *data = CMSG_DATA(cmsg);
1786 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1788 int len = tswapal(target_cmsg->cmsg_len)
1789 - sizeof(struct target_cmsghdr);
1791 space += CMSG_SPACE(len);
1792 if (space > msgh->msg_controllen) {
1793 space -= CMSG_SPACE(len);
1794 /* This is a QEMU bug, since we allocated the payload
1795 * area ourselves (unlike overflow in host-to-target
1796 * conversion, which is just the guest giving us a buffer
1797 * that's too small). It can't happen for the payload types
1798 * we currently support; if it becomes an issue in future
1799 * we would need to improve our allocation strategy to
1800 * something more intelligent than "twice the size of the
1801 * target buffer we're reading from".
1803 qemu_log_mask(LOG_UNIMP,
1804 ("Unsupported ancillary data %d/%d: "
1805 "unhandled msg size\n"),
1806 tswap32(target_cmsg->cmsg_level),
1807 tswap32(target_cmsg->cmsg_type));
1811 if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1812 cmsg->cmsg_level = SOL_SOCKET;
1814 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1816 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1817 cmsg->cmsg_len = CMSG_LEN(len);
1819 if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1820 int *fd = (int *)data;
1821 int *target_fd = (int *)target_data;
1822 int i, numfds = len / sizeof(int);
1824 for (i = 0; i < numfds; i++) {
1825 __get_user(fd[i], target_fd + i);
1827 } else if (cmsg->cmsg_level == SOL_SOCKET
1828 && cmsg->cmsg_type == SCM_CREDENTIALS) {
1829 struct ucred *cred = (struct ucred *)data;
1830 struct target_ucred *target_cred =
1831 (struct target_ucred *)target_data;
1833 __get_user(cred->pid, &target_cred->pid);
1834 __get_user(cred->uid, &target_cred->uid);
1835 __get_user(cred->gid, &target_cred->gid);
1837 qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
1838 cmsg->cmsg_level, cmsg->cmsg_type);
1839 memcpy(data, target_data, len);
1842 cmsg = CMSG_NXTHDR(msgh, cmsg);
1843 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1846 unlock_user(target_cmsg, target_cmsg_addr, 0);
1848 msgh->msg_controllen = space;
1852 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1853 struct msghdr *msgh)
1855 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1856 abi_long msg_controllen;
1857 abi_ulong target_cmsg_addr;
1858 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1859 socklen_t space = 0;
1861 msg_controllen = tswapal(target_msgh->msg_controllen);
1862 if (msg_controllen < sizeof (struct target_cmsghdr))
1864 target_cmsg_addr = tswapal(target_msgh->msg_control);
1865 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1866 target_cmsg_start = target_cmsg;
1868 return -TARGET_EFAULT;
1870 while (cmsg && target_cmsg) {
1871 void *data = CMSG_DATA(cmsg);
1872 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1874 int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1875 int tgt_len, tgt_space;
1877 /* We never copy a half-header but may copy half-data;
1878 * this is Linux's behaviour in put_cmsg(). Note that
1879 * truncation here is a guest problem (which we report
1880 * to the guest via the CTRUNC bit), unlike truncation
1881 * in target_to_host_cmsg, which is a QEMU bug.
1883 if (msg_controllen < sizeof(struct target_cmsghdr)) {
1884 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1888 if (cmsg->cmsg_level == SOL_SOCKET) {
1889 target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1891 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1893 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1895 /* Payload types which need a different size of payload on
1896 * the target must adjust tgt_len here.
1899 switch (cmsg->cmsg_level) {
1901 switch (cmsg->cmsg_type) {
1903 tgt_len = sizeof(struct target_timeval);
1913 if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1914 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1915 tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1918 /* We must now copy-and-convert len bytes of payload
1919 * into tgt_len bytes of destination space. Bear in mind
1920 * that in both source and destination we may be dealing
1921 * with a truncated value!
1923 switch (cmsg->cmsg_level) {
1925 switch (cmsg->cmsg_type) {
1928 int *fd = (int *)data;
1929 int *target_fd = (int *)target_data;
1930 int i, numfds = tgt_len / sizeof(int);
1932 for (i = 0; i < numfds; i++) {
1933 __put_user(fd[i], target_fd + i);
1939 struct timeval *tv = (struct timeval *)data;
1940 struct target_timeval *target_tv =
1941 (struct target_timeval *)target_data;
1943 if (len != sizeof(struct timeval) ||
1944 tgt_len != sizeof(struct target_timeval)) {
1948 /* copy struct timeval to target */
1949 __put_user(tv->tv_sec, &target_tv->tv_sec);
1950 __put_user(tv->tv_usec, &target_tv->tv_usec);
1953 case SCM_CREDENTIALS:
1955 struct ucred *cred = (struct ucred *)data;
1956 struct target_ucred *target_cred =
1957 (struct target_ucred *)target_data;
1959 __put_user(cred->pid, &target_cred->pid);
1960 __put_user(cred->uid, &target_cred->uid);
1961 __put_user(cred->gid, &target_cred->gid);
1970 switch (cmsg->cmsg_type) {
1973 uint32_t *v = (uint32_t *)data;
1974 uint32_t *t_int = (uint32_t *)target_data;
1976 if (len != sizeof(uint32_t) ||
1977 tgt_len != sizeof(uint32_t)) {
1980 __put_user(*v, t_int);
1986 struct sock_extended_err ee;
1987 struct sockaddr_in offender;
1989 struct errhdr_t *errh = (struct errhdr_t *)data;
1990 struct errhdr_t *target_errh =
1991 (struct errhdr_t *)target_data;
1993 if (len != sizeof(struct errhdr_t) ||
1994 tgt_len != sizeof(struct errhdr_t)) {
1997 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1998 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1999 __put_user(errh->ee.ee_type, &target_errh->ee.ee_type);
2000 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2001 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2002 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2003 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2004 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2005 (void *) &errh->offender, sizeof(errh->offender));
2014 switch (cmsg->cmsg_type) {
2017 uint32_t *v = (uint32_t *)data;
2018 uint32_t *t_int = (uint32_t *)target_data;
2020 if (len != sizeof(uint32_t) ||
2021 tgt_len != sizeof(uint32_t)) {
2024 __put_user(*v, t_int);
2030 struct sock_extended_err ee;
2031 struct sockaddr_in6 offender;
2033 struct errhdr6_t *errh = (struct errhdr6_t *)data;
2034 struct errhdr6_t *target_errh =
2035 (struct errhdr6_t *)target_data;
2037 if (len != sizeof(struct errhdr6_t) ||
2038 tgt_len != sizeof(struct errhdr6_t)) {
2041 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
2042 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
2043 __put_user(errh->ee.ee_type, &target_errh->ee.ee_type);
2044 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2045 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2046 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2047 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2048 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2049 (void *) &errh->offender, sizeof(errh->offender));
2059 qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
2060 cmsg->cmsg_level, cmsg->cmsg_type);
2061 memcpy(target_data, data, MIN(len, tgt_len));
2062 if (tgt_len > len) {
2063 memset(target_data + len, 0, tgt_len - len);
2067 target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
2068 tgt_space = TARGET_CMSG_SPACE(tgt_len);
2069 if (msg_controllen < tgt_space) {
2070 tgt_space = msg_controllen;
2072 msg_controllen -= tgt_space;
2074 cmsg = CMSG_NXTHDR(msgh, cmsg);
2075 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
2078 unlock_user(target_cmsg, target_cmsg_addr, space);
2080 target_msgh->msg_controllen = tswapal(space);
2084 /* do_setsockopt() Must return target values and target errnos. */
2085 static abi_long do_setsockopt(int sockfd, int level, int optname,
2086 abi_ulong optval_addr, socklen_t optlen)
2090 struct ip_mreqn *ip_mreq;
2091 struct ip_mreq_source *ip_mreq_source;
2096 /* TCP and UDP options all take an 'int' value. */
2097 if (optlen < sizeof(uint32_t))
2098 return -TARGET_EINVAL;
2100 if (get_user_u32(val, optval_addr))
2101 return -TARGET_EFAULT;
2102 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2109 case IP_ROUTER_ALERT:
2113 case IP_MTU_DISCOVER:
2120 case IP_MULTICAST_TTL:
2121 case IP_MULTICAST_LOOP:
2123 if (optlen >= sizeof(uint32_t)) {
2124 if (get_user_u32(val, optval_addr))
2125 return -TARGET_EFAULT;
2126 } else if (optlen >= 1) {
2127 if (get_user_u8(val, optval_addr))
2128 return -TARGET_EFAULT;
2130 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2132 case IP_ADD_MEMBERSHIP:
2133 case IP_DROP_MEMBERSHIP:
2134 if (optlen < sizeof (struct target_ip_mreq) ||
2135 optlen > sizeof (struct target_ip_mreqn))
2136 return -TARGET_EINVAL;
2138 ip_mreq = (struct ip_mreqn *) alloca(optlen);
2139 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
2140 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
2143 case IP_BLOCK_SOURCE:
2144 case IP_UNBLOCK_SOURCE:
2145 case IP_ADD_SOURCE_MEMBERSHIP:
2146 case IP_DROP_SOURCE_MEMBERSHIP:
2147 if (optlen != sizeof (struct target_ip_mreq_source))
2148 return -TARGET_EINVAL;
2150 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2151 if (!ip_mreq_source) {
2152 return -TARGET_EFAULT;
2154 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
2155 unlock_user (ip_mreq_source, optval_addr, 0);
2164 case IPV6_MTU_DISCOVER:
2167 case IPV6_RECVPKTINFO:
2168 case IPV6_UNICAST_HOPS:
2169 case IPV6_MULTICAST_HOPS:
2170 case IPV6_MULTICAST_LOOP:
2172 case IPV6_RECVHOPLIMIT:
2173 case IPV6_2292HOPLIMIT:
2176 case IPV6_2292PKTINFO:
2177 case IPV6_RECVTCLASS:
2178 case IPV6_RECVRTHDR:
2179 case IPV6_2292RTHDR:
2180 case IPV6_RECVHOPOPTS:
2181 case IPV6_2292HOPOPTS:
2182 case IPV6_RECVDSTOPTS:
2183 case IPV6_2292DSTOPTS:
2185 case IPV6_ADDR_PREFERENCES:
2186 #ifdef IPV6_RECVPATHMTU
2187 case IPV6_RECVPATHMTU:
2189 #ifdef IPV6_TRANSPARENT
2190 case IPV6_TRANSPARENT:
2192 #ifdef IPV6_FREEBIND
2195 #ifdef IPV6_RECVORIGDSTADDR
2196 case IPV6_RECVORIGDSTADDR:
2199 if (optlen < sizeof(uint32_t)) {
2200 return -TARGET_EINVAL;
2202 if (get_user_u32(val, optval_addr)) {
2203 return -TARGET_EFAULT;
2205 ret = get_errno(setsockopt(sockfd, level, optname,
2206 &val, sizeof(val)));
2210 struct in6_pktinfo pki;
2212 if (optlen < sizeof(pki)) {
2213 return -TARGET_EINVAL;
2216 if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
2217 return -TARGET_EFAULT;
2220 pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
2222 ret = get_errno(setsockopt(sockfd, level, optname,
2223 &pki, sizeof(pki)));
2226 case IPV6_ADD_MEMBERSHIP:
2227 case IPV6_DROP_MEMBERSHIP:
2229 struct ipv6_mreq ipv6mreq;
2231 if (optlen < sizeof(ipv6mreq)) {
2232 return -TARGET_EINVAL;
2235 if (copy_from_user(&ipv6mreq, optval_addr, sizeof(ipv6mreq))) {
2236 return -TARGET_EFAULT;
2239 ipv6mreq.ipv6mr_interface = tswap32(ipv6mreq.ipv6mr_interface);
2241 ret = get_errno(setsockopt(sockfd, level, optname,
2242 &ipv6mreq, sizeof(ipv6mreq)));
2253 struct icmp6_filter icmp6f;
2255 if (optlen > sizeof(icmp6f)) {
2256 optlen = sizeof(icmp6f);
2259 if (copy_from_user(&icmp6f, optval_addr, optlen)) {
2260 return -TARGET_EFAULT;
2263 for (val = 0; val < 8; val++) {
2264 icmp6f.data[val] = tswap32(icmp6f.data[val]);
2267 ret = get_errno(setsockopt(sockfd, level, optname,
2279 /* those take an u32 value */
2280 if (optlen < sizeof(uint32_t)) {
2281 return -TARGET_EINVAL;
2284 if (get_user_u32(val, optval_addr)) {
2285 return -TARGET_EFAULT;
2287 ret = get_errno(setsockopt(sockfd, level, optname,
2288 &val, sizeof(val)));
2295 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2300 char *alg_key = g_malloc(optlen);
2303 return -TARGET_ENOMEM;
2305 if (copy_from_user(alg_key, optval_addr, optlen)) {
2307 return -TARGET_EFAULT;
2309 ret = get_errno(setsockopt(sockfd, level, optname,
2314 case ALG_SET_AEAD_AUTHSIZE:
2316 ret = get_errno(setsockopt(sockfd, level, optname,
2325 case TARGET_SOL_SOCKET:
2327 case TARGET_SO_RCVTIMEO:
2331 optname = SO_RCVTIMEO;
2334 if (optlen != sizeof(struct target_timeval)) {
2335 return -TARGET_EINVAL;
2338 if (copy_from_user_timeval(&tv, optval_addr)) {
2339 return -TARGET_EFAULT;
2342 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2346 case TARGET_SO_SNDTIMEO:
2347 optname = SO_SNDTIMEO;
2349 case TARGET_SO_ATTACH_FILTER:
2351 struct target_sock_fprog *tfprog;
2352 struct target_sock_filter *tfilter;
2353 struct sock_fprog fprog;
2354 struct sock_filter *filter;
2357 if (optlen != sizeof(*tfprog)) {
2358 return -TARGET_EINVAL;
2360 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2361 return -TARGET_EFAULT;
2363 if (!lock_user_struct(VERIFY_READ, tfilter,
2364 tswapal(tfprog->filter), 0)) {
2365 unlock_user_struct(tfprog, optval_addr, 1);
2366 return -TARGET_EFAULT;
2369 fprog.len = tswap16(tfprog->len);
2370 filter = g_try_new(struct sock_filter, fprog.len);
2371 if (filter == NULL) {
2372 unlock_user_struct(tfilter, tfprog->filter, 1);
2373 unlock_user_struct(tfprog, optval_addr, 1);
2374 return -TARGET_ENOMEM;
2376 for (i = 0; i < fprog.len; i++) {
2377 filter[i].code = tswap16(tfilter[i].code);
2378 filter[i].jt = tfilter[i].jt;
2379 filter[i].jf = tfilter[i].jf;
2380 filter[i].k = tswap32(tfilter[i].k);
2382 fprog.filter = filter;
2384 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2385 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2388 unlock_user_struct(tfilter, tfprog->filter, 1);
2389 unlock_user_struct(tfprog, optval_addr, 1);
2392 case TARGET_SO_BINDTODEVICE:
2394 char *dev_ifname, *addr_ifname;
2396 if (optlen > IFNAMSIZ - 1) {
2397 optlen = IFNAMSIZ - 1;
2399 dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2401 return -TARGET_EFAULT;
2403 optname = SO_BINDTODEVICE;
2404 addr_ifname = alloca(IFNAMSIZ);
2405 memcpy(addr_ifname, dev_ifname, optlen);
2406 addr_ifname[optlen] = 0;
2407 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2408 addr_ifname, optlen));
2409 unlock_user (dev_ifname, optval_addr, 0);
2412 case TARGET_SO_LINGER:
2415 struct target_linger *tlg;
2417 if (optlen != sizeof(struct target_linger)) {
2418 return -TARGET_EINVAL;
2420 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2421 return -TARGET_EFAULT;
2423 __get_user(lg.l_onoff, &tlg->l_onoff);
2424 __get_user(lg.l_linger, &tlg->l_linger);
2425 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2427 unlock_user_struct(tlg, optval_addr, 0);
2430 /* Options with 'int' argument. */
2431 case TARGET_SO_DEBUG:
2434 case TARGET_SO_REUSEADDR:
2435 optname = SO_REUSEADDR;
2438 case TARGET_SO_REUSEPORT:
2439 optname = SO_REUSEPORT;
2442 case TARGET_SO_TYPE:
2445 case TARGET_SO_ERROR:
2448 case TARGET_SO_DONTROUTE:
2449 optname = SO_DONTROUTE;
2451 case TARGET_SO_BROADCAST:
2452 optname = SO_BROADCAST;
2454 case TARGET_SO_SNDBUF:
2455 optname = SO_SNDBUF;
2457 case TARGET_SO_SNDBUFFORCE:
2458 optname = SO_SNDBUFFORCE;
2460 case TARGET_SO_RCVBUF:
2461 optname = SO_RCVBUF;
2463 case TARGET_SO_RCVBUFFORCE:
2464 optname = SO_RCVBUFFORCE;
2466 case TARGET_SO_KEEPALIVE:
2467 optname = SO_KEEPALIVE;
2469 case TARGET_SO_OOBINLINE:
2470 optname = SO_OOBINLINE;
2472 case TARGET_SO_NO_CHECK:
2473 optname = SO_NO_CHECK;
2475 case TARGET_SO_PRIORITY:
2476 optname = SO_PRIORITY;
2479 case TARGET_SO_BSDCOMPAT:
2480 optname = SO_BSDCOMPAT;
2483 case TARGET_SO_PASSCRED:
2484 optname = SO_PASSCRED;
2486 case TARGET_SO_PASSSEC:
2487 optname = SO_PASSSEC;
2489 case TARGET_SO_TIMESTAMP:
2490 optname = SO_TIMESTAMP;
2492 case TARGET_SO_RCVLOWAT:
2493 optname = SO_RCVLOWAT;
2498 if (optlen < sizeof(uint32_t))
2499 return -TARGET_EINVAL;
2501 if (get_user_u32(val, optval_addr))
2502 return -TARGET_EFAULT;
2503 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2508 case NETLINK_PKTINFO:
2509 case NETLINK_ADD_MEMBERSHIP:
2510 case NETLINK_DROP_MEMBERSHIP:
2511 case NETLINK_BROADCAST_ERROR:
2512 case NETLINK_NO_ENOBUFS:
2513 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2514 case NETLINK_LISTEN_ALL_NSID:
2515 case NETLINK_CAP_ACK:
2516 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2517 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2518 case NETLINK_EXT_ACK:
2519 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2520 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2521 case NETLINK_GET_STRICT_CHK:
2522 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2528 if (optlen < sizeof(uint32_t)) {
2529 return -TARGET_EINVAL;
2531 if (get_user_u32(val, optval_addr)) {
2532 return -TARGET_EFAULT;
2534 ret = get_errno(setsockopt(sockfd, SOL_NETLINK, optname, &val,
2537 #endif /* SOL_NETLINK */
2540 qemu_log_mask(LOG_UNIMP, "Unsupported setsockopt level=%d optname=%d\n",
2542 ret = -TARGET_ENOPROTOOPT;
2547 /* do_getsockopt() Must return target values and target errnos. */
2548 static abi_long do_getsockopt(int sockfd, int level, int optname,
2549 abi_ulong optval_addr, abi_ulong optlen)
2556 case TARGET_SOL_SOCKET:
2559 /* These don't just return a single integer */
2560 case TARGET_SO_PEERNAME:
2562 case TARGET_SO_RCVTIMEO: {
2566 optname = SO_RCVTIMEO;
2569 if (get_user_u32(len, optlen)) {
2570 return -TARGET_EFAULT;
2573 return -TARGET_EINVAL;
2577 ret = get_errno(getsockopt(sockfd, level, optname,
2582 if (len > sizeof(struct target_timeval)) {
2583 len = sizeof(struct target_timeval);
2585 if (copy_to_user_timeval(optval_addr, &tv)) {
2586 return -TARGET_EFAULT;
2588 if (put_user_u32(len, optlen)) {
2589 return -TARGET_EFAULT;
2593 case TARGET_SO_SNDTIMEO:
2594 optname = SO_SNDTIMEO;
2596 case TARGET_SO_PEERCRED: {
2599 struct target_ucred *tcr;
2601 if (get_user_u32(len, optlen)) {
2602 return -TARGET_EFAULT;
2605 return -TARGET_EINVAL;
2609 ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2617 if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2618 return -TARGET_EFAULT;
2620 __put_user(cr.pid, &tcr->pid);
2621 __put_user(cr.uid, &tcr->uid);
2622 __put_user(cr.gid, &tcr->gid);
2623 unlock_user_struct(tcr, optval_addr, 1);
2624 if (put_user_u32(len, optlen)) {
2625 return -TARGET_EFAULT;
2629 case TARGET_SO_PEERSEC: {
2632 if (get_user_u32(len, optlen)) {
2633 return -TARGET_EFAULT;
2636 return -TARGET_EINVAL;
2638 name = lock_user(VERIFY_WRITE, optval_addr, len, 0);
2640 return -TARGET_EFAULT;
2643 ret = get_errno(getsockopt(sockfd, level, SO_PEERSEC,
2645 if (put_user_u32(lv, optlen)) {
2646 ret = -TARGET_EFAULT;
2648 unlock_user(name, optval_addr, lv);
2651 case TARGET_SO_LINGER:
2655 struct target_linger *tlg;
2657 if (get_user_u32(len, optlen)) {
2658 return -TARGET_EFAULT;
2661 return -TARGET_EINVAL;
2665 ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2673 if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2674 return -TARGET_EFAULT;
2676 __put_user(lg.l_onoff, &tlg->l_onoff);
2677 __put_user(lg.l_linger, &tlg->l_linger);
2678 unlock_user_struct(tlg, optval_addr, 1);
2679 if (put_user_u32(len, optlen)) {
2680 return -TARGET_EFAULT;
2684 /* Options with 'int' argument. */
2685 case TARGET_SO_DEBUG:
2688 case TARGET_SO_REUSEADDR:
2689 optname = SO_REUSEADDR;
2692 case TARGET_SO_REUSEPORT:
2693 optname = SO_REUSEPORT;
2696 case TARGET_SO_TYPE:
2699 case TARGET_SO_ERROR:
2702 case TARGET_SO_DONTROUTE:
2703 optname = SO_DONTROUTE;
2705 case TARGET_SO_BROADCAST:
2706 optname = SO_BROADCAST;
2708 case TARGET_SO_SNDBUF:
2709 optname = SO_SNDBUF;
2711 case TARGET_SO_RCVBUF:
2712 optname = SO_RCVBUF;
2714 case TARGET_SO_KEEPALIVE:
2715 optname = SO_KEEPALIVE;
2717 case TARGET_SO_OOBINLINE:
2718 optname = SO_OOBINLINE;
2720 case TARGET_SO_NO_CHECK:
2721 optname = SO_NO_CHECK;
2723 case TARGET_SO_PRIORITY:
2724 optname = SO_PRIORITY;
2727 case TARGET_SO_BSDCOMPAT:
2728 optname = SO_BSDCOMPAT;
2731 case TARGET_SO_PASSCRED:
2732 optname = SO_PASSCRED;
2734 case TARGET_SO_TIMESTAMP:
2735 optname = SO_TIMESTAMP;
2737 case TARGET_SO_RCVLOWAT:
2738 optname = SO_RCVLOWAT;
2740 case TARGET_SO_ACCEPTCONN:
2741 optname = SO_ACCEPTCONN;
2743 case TARGET_SO_PROTOCOL:
2744 optname = SO_PROTOCOL;
2746 case TARGET_SO_DOMAIN:
2747 optname = SO_DOMAIN;
2755 /* TCP and UDP options all take an 'int' value. */
2757 if (get_user_u32(len, optlen))
2758 return -TARGET_EFAULT;
2760 return -TARGET_EINVAL;
2762 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2765 if (optname == SO_TYPE) {
2766 val = host_to_target_sock_type(val);
2771 if (put_user_u32(val, optval_addr))
2772 return -TARGET_EFAULT;
2774 if (put_user_u8(val, optval_addr))
2775 return -TARGET_EFAULT;
2777 if (put_user_u32(len, optlen))
2778 return -TARGET_EFAULT;
2785 case IP_ROUTER_ALERT:
2789 case IP_MTU_DISCOVER:
2795 case IP_MULTICAST_TTL:
2796 case IP_MULTICAST_LOOP:
2797 if (get_user_u32(len, optlen))
2798 return -TARGET_EFAULT;
2800 return -TARGET_EINVAL;
2802 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2805 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2807 if (put_user_u32(len, optlen)
2808 || put_user_u8(val, optval_addr))
2809 return -TARGET_EFAULT;
2811 if (len > sizeof(int))
2813 if (put_user_u32(len, optlen)
2814 || put_user_u32(val, optval_addr))
2815 return -TARGET_EFAULT;
2819 ret = -TARGET_ENOPROTOOPT;
2825 case IPV6_MTU_DISCOVER:
2828 case IPV6_RECVPKTINFO:
2829 case IPV6_UNICAST_HOPS:
2830 case IPV6_MULTICAST_HOPS:
2831 case IPV6_MULTICAST_LOOP:
2833 case IPV6_RECVHOPLIMIT:
2834 case IPV6_2292HOPLIMIT:
2837 case IPV6_2292PKTINFO:
2838 case IPV6_RECVTCLASS:
2839 case IPV6_RECVRTHDR:
2840 case IPV6_2292RTHDR:
2841 case IPV6_RECVHOPOPTS:
2842 case IPV6_2292HOPOPTS:
2843 case IPV6_RECVDSTOPTS:
2844 case IPV6_2292DSTOPTS:
2846 case IPV6_ADDR_PREFERENCES:
2847 #ifdef IPV6_RECVPATHMTU
2848 case IPV6_RECVPATHMTU:
2850 #ifdef IPV6_TRANSPARENT
2851 case IPV6_TRANSPARENT:
2853 #ifdef IPV6_FREEBIND
2856 #ifdef IPV6_RECVORIGDSTADDR
2857 case IPV6_RECVORIGDSTADDR:
2859 if (get_user_u32(len, optlen))
2860 return -TARGET_EFAULT;
2862 return -TARGET_EINVAL;
2864 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2867 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2869 if (put_user_u32(len, optlen)
2870 || put_user_u8(val, optval_addr))
2871 return -TARGET_EFAULT;
2873 if (len > sizeof(int))
2875 if (put_user_u32(len, optlen)
2876 || put_user_u32(val, optval_addr))
2877 return -TARGET_EFAULT;
2881 ret = -TARGET_ENOPROTOOPT;
2888 case NETLINK_PKTINFO:
2889 case NETLINK_BROADCAST_ERROR:
2890 case NETLINK_NO_ENOBUFS:
2891 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2892 case NETLINK_LISTEN_ALL_NSID:
2893 case NETLINK_CAP_ACK:
2894 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2895 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2896 case NETLINK_EXT_ACK:
2897 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2898 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2899 case NETLINK_GET_STRICT_CHK:
2900 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2901 if (get_user_u32(len, optlen)) {
2902 return -TARGET_EFAULT;
2904 if (len != sizeof(val)) {
2905 return -TARGET_EINVAL;
2908 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2912 if (put_user_u32(lv, optlen)
2913 || put_user_u32(val, optval_addr)) {
2914 return -TARGET_EFAULT;
2917 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2918 case NETLINK_LIST_MEMBERSHIPS:
2922 if (get_user_u32(len, optlen)) {
2923 return -TARGET_EFAULT;
2926 return -TARGET_EINVAL;
2928 results = lock_user(VERIFY_WRITE, optval_addr, len, 1);
2929 if (!results && len > 0) {
2930 return -TARGET_EFAULT;
2933 ret = get_errno(getsockopt(sockfd, level, optname, results, &lv));
2935 unlock_user(results, optval_addr, 0);
2938 /* swap host endianess to target endianess. */
2939 for (i = 0; i < (len / sizeof(uint32_t)); i++) {
2940 results[i] = tswap32(results[i]);
2942 if (put_user_u32(lv, optlen)) {
2943 return -TARGET_EFAULT;
2945 unlock_user(results, optval_addr, 0);
2948 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2953 #endif /* SOL_NETLINK */
2956 qemu_log_mask(LOG_UNIMP,
2957 "getsockopt level=%d optname=%d not yet supported\n",
2959 ret = -TARGET_EOPNOTSUPP;
2965 /* Convert target low/high pair representing file offset into the host
2966 * low/high pair. This function doesn't handle offsets bigger than 64 bits
2967 * as the kernel doesn't handle them either.
2969 static void target_to_host_low_high(abi_ulong tlow,
2971 unsigned long *hlow,
2972 unsigned long *hhigh)
2974 uint64_t off = tlow |
2975 ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
2976 TARGET_LONG_BITS / 2;
2979 *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
2982 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
2983 abi_ulong count, int copy)
2985 struct target_iovec *target_vec;
2987 abi_ulong total_len, max_len;
2990 bool bad_address = false;
2996 if (count > IOV_MAX) {
3001 vec = g_try_new0(struct iovec, count);
3007 target_vec = lock_user(VERIFY_READ, target_addr,
3008 count * sizeof(struct target_iovec), 1);
3009 if (target_vec == NULL) {
3014 /* ??? If host page size > target page size, this will result in a
3015 value larger than what we can actually support. */
3016 max_len = 0x7fffffff & TARGET_PAGE_MASK;
3019 for (i = 0; i < count; i++) {
3020 abi_ulong base = tswapal(target_vec[i].iov_base);
3021 abi_long len = tswapal(target_vec[i].iov_len);
3026 } else if (len == 0) {
3027 /* Zero length pointer is ignored. */
3028 vec[i].iov_base = 0;
3030 vec[i].iov_base = lock_user(type, base, len, copy);
3031 /* If the first buffer pointer is bad, this is a fault. But
3032 * subsequent bad buffers will result in a partial write; this
3033 * is realized by filling the vector with null pointers and
3035 if (!vec[i].iov_base) {
3046 if (len > max_len - total_len) {
3047 len = max_len - total_len;
3050 vec[i].iov_len = len;
3054 unlock_user(target_vec, target_addr, 0);
3059 if (tswapal(target_vec[i].iov_len) > 0) {
3060 unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
3063 unlock_user(target_vec, target_addr, 0);
3070 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
3071 abi_ulong count, int copy)
3073 struct target_iovec *target_vec;
3076 target_vec = lock_user(VERIFY_READ, target_addr,
3077 count * sizeof(struct target_iovec), 1);
3079 for (i = 0; i < count; i++) {
3080 abi_ulong base = tswapal(target_vec[i].iov_base);
3081 abi_long len = tswapal(target_vec[i].iov_len);
3085 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
3087 unlock_user(target_vec, target_addr, 0);
3093 static inline int target_to_host_sock_type(int *type)
3096 int target_type = *type;
3098 switch (target_type & TARGET_SOCK_TYPE_MASK) {
3099 case TARGET_SOCK_DGRAM:
3100 host_type = SOCK_DGRAM;
3102 case TARGET_SOCK_STREAM:
3103 host_type = SOCK_STREAM;
3106 host_type = target_type & TARGET_SOCK_TYPE_MASK;
3109 if (target_type & TARGET_SOCK_CLOEXEC) {
3110 #if defined(SOCK_CLOEXEC)
3111 host_type |= SOCK_CLOEXEC;
3113 return -TARGET_EINVAL;
3116 if (target_type & TARGET_SOCK_NONBLOCK) {
3117 #if defined(SOCK_NONBLOCK)
3118 host_type |= SOCK_NONBLOCK;
3119 #elif !defined(O_NONBLOCK)
3120 return -TARGET_EINVAL;
3127 /* Try to emulate socket type flags after socket creation. */
3128 static int sock_flags_fixup(int fd, int target_type)
3130 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3131 if (target_type & TARGET_SOCK_NONBLOCK) {
3132 int flags = fcntl(fd, F_GETFL);
3133 if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
3135 return -TARGET_EINVAL;
3142 /* do_socket() Must return target values and target errnos. */
3143 static abi_long do_socket(int domain, int type, int protocol)
3145 int target_type = type;
3148 ret = target_to_host_sock_type(&type);
3153 if (domain == PF_NETLINK && !(
3154 #ifdef CONFIG_RTNETLINK
3155 protocol == NETLINK_ROUTE ||
3157 protocol == NETLINK_KOBJECT_UEVENT ||
3158 protocol == NETLINK_AUDIT)) {
3159 return -TARGET_EPROTONOSUPPORT;
3162 if (domain == AF_PACKET ||
3163 (domain == AF_INET && type == SOCK_PACKET)) {
3164 protocol = tswap16(protocol);
3167 ret = get_errno(socket(domain, type, protocol));
3169 ret = sock_flags_fixup(ret, target_type);
3170 if (type == SOCK_PACKET) {
3171 /* Manage an obsolete case :
3172 * if socket type is SOCK_PACKET, bind by name
3174 fd_trans_register(ret, &target_packet_trans);
3175 } else if (domain == PF_NETLINK) {
3177 #ifdef CONFIG_RTNETLINK
3179 fd_trans_register(ret, &target_netlink_route_trans);
3182 case NETLINK_KOBJECT_UEVENT:
3183 /* nothing to do: messages are strings */
3186 fd_trans_register(ret, &target_netlink_audit_trans);
3189 g_assert_not_reached();
3196 /* do_bind() Must return target values and target errnos. */
3197 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3203 if ((int)addrlen < 0) {
3204 return -TARGET_EINVAL;
3207 addr = alloca(addrlen+1);
3209 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3213 return get_errno(bind(sockfd, addr, addrlen));
3216 /* do_connect() Must return target values and target errnos. */
3217 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3223 if ((int)addrlen < 0) {
3224 return -TARGET_EINVAL;
3227 addr = alloca(addrlen+1);
3229 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3233 return get_errno(safe_connect(sockfd, addr, addrlen));
3236 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3237 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3238 int flags, int send)
3244 abi_ulong target_vec;
3246 if (msgp->msg_name) {
3247 msg.msg_namelen = tswap32(msgp->msg_namelen);
3248 msg.msg_name = alloca(msg.msg_namelen+1);
3249 ret = target_to_host_sockaddr(fd, msg.msg_name,
3250 tswapal(msgp->msg_name),
3252 if (ret == -TARGET_EFAULT) {
3253 /* For connected sockets msg_name and msg_namelen must
3254 * be ignored, so returning EFAULT immediately is wrong.
3255 * Instead, pass a bad msg_name to the host kernel, and
3256 * let it decide whether to return EFAULT or not.
3258 msg.msg_name = (void *)-1;
3263 msg.msg_name = NULL;
3264 msg.msg_namelen = 0;
3266 msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3267 msg.msg_control = alloca(msg.msg_controllen);
3268 memset(msg.msg_control, 0, msg.msg_controllen);
3270 msg.msg_flags = tswap32(msgp->msg_flags);
3272 count = tswapal(msgp->msg_iovlen);
3273 target_vec = tswapal(msgp->msg_iov);
3275 if (count > IOV_MAX) {
3276 /* sendrcvmsg returns a different errno for this condition than
3277 * readv/writev, so we must catch it here before lock_iovec() does.
3279 ret = -TARGET_EMSGSIZE;
3283 vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3284 target_vec, count, send);
3286 ret = -host_to_target_errno(errno);
3289 msg.msg_iovlen = count;
3293 if (fd_trans_target_to_host_data(fd)) {
3296 host_msg = g_malloc(msg.msg_iov->iov_len);
3297 memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3298 ret = fd_trans_target_to_host_data(fd)(host_msg,
3299 msg.msg_iov->iov_len);
3301 msg.msg_iov->iov_base = host_msg;
3302 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3306 ret = target_to_host_cmsg(&msg, msgp);
3308 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3312 ret = get_errno(safe_recvmsg(fd, &msg, flags));
3313 if (!is_error(ret)) {
3315 if (fd_trans_host_to_target_data(fd)) {
3316 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3317 MIN(msg.msg_iov->iov_len, len));
3319 ret = host_to_target_cmsg(msgp, &msg);
3321 if (!is_error(ret)) {
3322 msgp->msg_namelen = tswap32(msg.msg_namelen);
3323 msgp->msg_flags = tswap32(msg.msg_flags);
3324 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3325 ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3326 msg.msg_name, msg.msg_namelen);
3338 unlock_iovec(vec, target_vec, count, !send);
3343 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3344 int flags, int send)
3347 struct target_msghdr *msgp;
3349 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3353 return -TARGET_EFAULT;
3355 ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3356 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3360 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3361 * so it might not have this *mmsg-specific flag either.
3363 #ifndef MSG_WAITFORONE
3364 #define MSG_WAITFORONE 0x10000
3367 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3368 unsigned int vlen, unsigned int flags,
3371 struct target_mmsghdr *mmsgp;
3375 if (vlen > UIO_MAXIOV) {
3379 mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3381 return -TARGET_EFAULT;
3384 for (i = 0; i < vlen; i++) {
3385 ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3386 if (is_error(ret)) {
3389 mmsgp[i].msg_len = tswap32(ret);
3390 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3391 if (flags & MSG_WAITFORONE) {
3392 flags |= MSG_DONTWAIT;
3396 unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3398 /* Return number of datagrams sent if we sent any at all;
3399 * otherwise return the error.
3407 /* do_accept4() Must return target values and target errnos. */
3408 static abi_long do_accept4(int fd, abi_ulong target_addr,
3409 abi_ulong target_addrlen_addr, int flags)
3411 socklen_t addrlen, ret_addrlen;
3416 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
3418 if (target_addr == 0) {
3419 return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3422 /* linux returns EFAULT if addrlen pointer is invalid */
3423 if (get_user_u32(addrlen, target_addrlen_addr))
3424 return -TARGET_EFAULT;
3426 if ((int)addrlen < 0) {
3427 return -TARGET_EINVAL;
3430 if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3431 return -TARGET_EFAULT;
3434 addr = alloca(addrlen);
3436 ret_addrlen = addrlen;
3437 ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
3438 if (!is_error(ret)) {
3439 host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3440 if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3441 ret = -TARGET_EFAULT;
3447 /* do_getpeername() Must return target values and target errnos. */
3448 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3449 abi_ulong target_addrlen_addr)
3451 socklen_t addrlen, ret_addrlen;
3455 if (get_user_u32(addrlen, target_addrlen_addr))
3456 return -TARGET_EFAULT;
3458 if ((int)addrlen < 0) {
3459 return -TARGET_EINVAL;
3462 if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3463 return -TARGET_EFAULT;
3466 addr = alloca(addrlen);
3468 ret_addrlen = addrlen;
3469 ret = get_errno(getpeername(fd, addr, &ret_addrlen));
3470 if (!is_error(ret)) {
3471 host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3472 if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3473 ret = -TARGET_EFAULT;
3479 /* do_getsockname() Must return target values and target errnos. */
3480 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3481 abi_ulong target_addrlen_addr)
3483 socklen_t addrlen, ret_addrlen;
3487 if (get_user_u32(addrlen, target_addrlen_addr))
3488 return -TARGET_EFAULT;
3490 if ((int)addrlen < 0) {
3491 return -TARGET_EINVAL;
3494 if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3495 return -TARGET_EFAULT;
3498 addr = alloca(addrlen);
3500 ret_addrlen = addrlen;
3501 ret = get_errno(getsockname(fd, addr, &ret_addrlen));
3502 if (!is_error(ret)) {
3503 host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3504 if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3505 ret = -TARGET_EFAULT;
3511 /* do_socketpair() Must return target values and target errnos. */
3512 static abi_long do_socketpair(int domain, int type, int protocol,
3513 abi_ulong target_tab_addr)
3518 target_to_host_sock_type(&type);
3520 ret = get_errno(socketpair(domain, type, protocol, tab));
3521 if (!is_error(ret)) {
3522 if (put_user_s32(tab[0], target_tab_addr)
3523 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3524 ret = -TARGET_EFAULT;
3529 /* do_sendto() Must return target values and target errnos. */
3530 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3531 abi_ulong target_addr, socklen_t addrlen)
3535 void *copy_msg = NULL;
3538 if ((int)addrlen < 0) {
3539 return -TARGET_EINVAL;
3542 host_msg = lock_user(VERIFY_READ, msg, len, 1);
3544 return -TARGET_EFAULT;
3545 if (fd_trans_target_to_host_data(fd)) {
3546 copy_msg = host_msg;
3547 host_msg = g_malloc(len);
3548 memcpy(host_msg, copy_msg, len);
3549 ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3555 addr = alloca(addrlen+1);
3556 ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3560 ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3562 ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3567 host_msg = copy_msg;
3569 unlock_user(host_msg, msg, 0);
3573 /* do_recvfrom() Must return target values and target errnos. */
3574 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3575 abi_ulong target_addr,
3576 abi_ulong target_addrlen)
3578 socklen_t addrlen, ret_addrlen;
3586 host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3588 return -TARGET_EFAULT;
3592 if (get_user_u32(addrlen, target_addrlen)) {
3593 ret = -TARGET_EFAULT;
3596 if ((int)addrlen < 0) {
3597 ret = -TARGET_EINVAL;
3600 addr = alloca(addrlen);
3601 ret_addrlen = addrlen;
3602 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3603 addr, &ret_addrlen));
3605 addr = NULL; /* To keep compiler quiet. */
3606 addrlen = 0; /* To keep compiler quiet. */
3607 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3609 if (!is_error(ret)) {
3610 if (fd_trans_host_to_target_data(fd)) {
3612 trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3613 if (is_error(trans)) {
3619 host_to_target_sockaddr(target_addr, addr,
3620 MIN(addrlen, ret_addrlen));
3621 if (put_user_u32(ret_addrlen, target_addrlen)) {
3622 ret = -TARGET_EFAULT;
3626 unlock_user(host_msg, msg, len);
3629 unlock_user(host_msg, msg, 0);
3634 #ifdef TARGET_NR_socketcall
3635 /* do_socketcall() must return target values and target errnos. */
3636 static abi_long do_socketcall(int num, abi_ulong vptr)
3638 static const unsigned nargs[] = { /* number of arguments per operation */
3639 [TARGET_SYS_SOCKET] = 3, /* domain, type, protocol */
3640 [TARGET_SYS_BIND] = 3, /* fd, addr, addrlen */
3641 [TARGET_SYS_CONNECT] = 3, /* fd, addr, addrlen */
3642 [TARGET_SYS_LISTEN] = 2, /* fd, backlog */
3643 [TARGET_SYS_ACCEPT] = 3, /* fd, addr, addrlen */
3644 [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3645 [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3646 [TARGET_SYS_SOCKETPAIR] = 4, /* domain, type, protocol, tab */
3647 [TARGET_SYS_SEND] = 4, /* fd, msg, len, flags */
3648 [TARGET_SYS_RECV] = 4, /* fd, msg, len, flags */
3649 [TARGET_SYS_SENDTO] = 6, /* fd, msg, len, flags, addr, addrlen */
3650 [TARGET_SYS_RECVFROM] = 6, /* fd, msg, len, flags, addr, addrlen */
3651 [TARGET_SYS_SHUTDOWN] = 2, /* fd, how */
3652 [TARGET_SYS_SETSOCKOPT] = 5, /* fd, level, optname, optval, optlen */
3653 [TARGET_SYS_GETSOCKOPT] = 5, /* fd, level, optname, optval, optlen */
3654 [TARGET_SYS_SENDMSG] = 3, /* fd, msg, flags */
3655 [TARGET_SYS_RECVMSG] = 3, /* fd, msg, flags */
3656 [TARGET_SYS_ACCEPT4] = 4, /* fd, addr, addrlen, flags */
3657 [TARGET_SYS_RECVMMSG] = 4, /* fd, msgvec, vlen, flags */
3658 [TARGET_SYS_SENDMMSG] = 4, /* fd, msgvec, vlen, flags */
3660 abi_long a[6]; /* max 6 args */
3663 /* check the range of the first argument num */
3664 /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3665 if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3666 return -TARGET_EINVAL;
3668 /* ensure we have space for args */
3669 if (nargs[num] > ARRAY_SIZE(a)) {
3670 return -TARGET_EINVAL;
3672 /* collect the arguments in a[] according to nargs[] */
3673 for (i = 0; i < nargs[num]; ++i) {
3674 if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3675 return -TARGET_EFAULT;
3678 /* now when we have the args, invoke the appropriate underlying function */
3680 case TARGET_SYS_SOCKET: /* domain, type, protocol */
3681 return do_socket(a[0], a[1], a[2]);
3682 case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3683 return do_bind(a[0], a[1], a[2]);
3684 case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3685 return do_connect(a[0], a[1], a[2]);
3686 case TARGET_SYS_LISTEN: /* sockfd, backlog */
3687 return get_errno(listen(a[0], a[1]));
3688 case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3689 return do_accept4(a[0], a[1], a[2], 0);
3690 case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3691 return do_getsockname(a[0], a[1], a[2]);
3692 case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3693 return do_getpeername(a[0], a[1], a[2]);
3694 case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3695 return do_socketpair(a[0], a[1], a[2], a[3]);
3696 case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3697 return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3698 case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3699 return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3700 case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3701 return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3702 case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3703 return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3704 case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3705 return get_errno(shutdown(a[0], a[1]));
3706 case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3707 return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3708 case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3709 return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3710 case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3711 return do_sendrecvmsg(a[0], a[1], a[2], 1);
3712 case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3713 return do_sendrecvmsg(a[0], a[1], a[2], 0);
3714 case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3715 return do_accept4(a[0], a[1], a[2], a[3]);
3716 case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3717 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3718 case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3719 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3721 qemu_log_mask(LOG_UNIMP, "Unsupported socketcall: %d\n", num);
3722 return -TARGET_EINVAL;
3727 #define N_SHM_REGIONS 32
3729 static struct shm_region {
3733 } shm_regions[N_SHM_REGIONS];
3735 #ifndef TARGET_SEMID64_DS
3736 /* asm-generic version of this struct */
3737 struct target_semid64_ds
3739 struct target_ipc_perm sem_perm;
3740 abi_ulong sem_otime;
3741 #if TARGET_ABI_BITS == 32
3742 abi_ulong __unused1;
3744 abi_ulong sem_ctime;
3745 #if TARGET_ABI_BITS == 32
3746 abi_ulong __unused2;
3748 abi_ulong sem_nsems;
3749 abi_ulong __unused3;
3750 abi_ulong __unused4;
3754 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3755 abi_ulong target_addr)
3757 struct target_ipc_perm *target_ip;
3758 struct target_semid64_ds *target_sd;
3760 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3761 return -TARGET_EFAULT;
3762 target_ip = &(target_sd->sem_perm);
3763 host_ip->__key = tswap32(target_ip->__key);
3764 host_ip->uid = tswap32(target_ip->uid);
3765 host_ip->gid = tswap32(target_ip->gid);
3766 host_ip->cuid = tswap32(target_ip->cuid);
3767 host_ip->cgid = tswap32(target_ip->cgid);
3768 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3769 host_ip->mode = tswap32(target_ip->mode);
3771 host_ip->mode = tswap16(target_ip->mode);
3773 #if defined(TARGET_PPC)
3774 host_ip->__seq = tswap32(target_ip->__seq);
3776 host_ip->__seq = tswap16(target_ip->__seq);
3778 unlock_user_struct(target_sd, target_addr, 0);
3782 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3783 struct ipc_perm *host_ip)
3785 struct target_ipc_perm *target_ip;
3786 struct target_semid64_ds *target_sd;
3788 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3789 return -TARGET_EFAULT;
3790 target_ip = &(target_sd->sem_perm);
3791 target_ip->__key = tswap32(host_ip->__key);
3792 target_ip->uid = tswap32(host_ip->uid);
3793 target_ip->gid = tswap32(host_ip->gid);
3794 target_ip->cuid = tswap32(host_ip->cuid);
3795 target_ip->cgid = tswap32(host_ip->cgid);
3796 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3797 target_ip->mode = tswap32(host_ip->mode);
3799 target_ip->mode = tswap16(host_ip->mode);
3801 #if defined(TARGET_PPC)
3802 target_ip->__seq = tswap32(host_ip->__seq);
3804 target_ip->__seq = tswap16(host_ip->__seq);
3806 unlock_user_struct(target_sd, target_addr, 1);
3810 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3811 abi_ulong target_addr)
3813 struct target_semid64_ds *target_sd;
3815 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3816 return -TARGET_EFAULT;
3817 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3818 return -TARGET_EFAULT;
3819 host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3820 host_sd->sem_otime = tswapal(target_sd->sem_otime);
3821 host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3822 unlock_user_struct(target_sd, target_addr, 0);
3826 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3827 struct semid_ds *host_sd)
3829 struct target_semid64_ds *target_sd;
3831 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3832 return -TARGET_EFAULT;
3833 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3834 return -TARGET_EFAULT;
3835 target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3836 target_sd->sem_otime = tswapal(host_sd->sem_otime);
3837 target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3838 unlock_user_struct(target_sd, target_addr, 1);
3842 struct target_seminfo {
3855 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3856 struct seminfo *host_seminfo)
3858 struct target_seminfo *target_seminfo;
3859 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3860 return -TARGET_EFAULT;
3861 __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3862 __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3863 __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3864 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3865 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3866 __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3867 __put_user(host_seminfo->semume, &target_seminfo->semume);
3868 __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3869 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3870 __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3871 unlock_user_struct(target_seminfo, target_addr, 1);
3877 struct semid_ds *buf;
3878 unsigned short *array;
3879 struct seminfo *__buf;
3882 union target_semun {
3889 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3890 abi_ulong target_addr)
3893 unsigned short *array;
3895 struct semid_ds semid_ds;
3898 semun.buf = &semid_ds;
3900 ret = semctl(semid, 0, IPC_STAT, semun);
3902 return get_errno(ret);
3904 nsems = semid_ds.sem_nsems;
3906 *host_array = g_try_new(unsigned short, nsems);
3908 return -TARGET_ENOMEM;
3910 array = lock_user(VERIFY_READ, target_addr,
3911 nsems*sizeof(unsigned short), 1);
3913 g_free(*host_array);
3914 return -TARGET_EFAULT;
3917 for(i=0; i<nsems; i++) {
3918 __get_user((*host_array)[i], &array[i]);
3920 unlock_user(array, target_addr, 0);
3925 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3926 unsigned short **host_array)
3929 unsigned short *array;
3931 struct semid_ds semid_ds;
3934 semun.buf = &semid_ds;
3936 ret = semctl(semid, 0, IPC_STAT, semun);
3938 return get_errno(ret);
3940 nsems = semid_ds.sem_nsems;
3942 array = lock_user(VERIFY_WRITE, target_addr,
3943 nsems*sizeof(unsigned short), 0);
3945 return -TARGET_EFAULT;
3947 for(i=0; i<nsems; i++) {
3948 __put_user((*host_array)[i], &array[i]);
3950 g_free(*host_array);
3951 unlock_user(array, target_addr, 1);
3956 static inline abi_long do_semctl(int semid, int semnum, int cmd,
3957 abi_ulong target_arg)
3959 union target_semun target_su = { .buf = target_arg };
3961 struct semid_ds dsarg;
3962 unsigned short *array = NULL;
3963 struct seminfo seminfo;
3964 abi_long ret = -TARGET_EINVAL;
3971 /* In 64 bit cross-endian situations, we will erroneously pick up
3972 * the wrong half of the union for the "val" element. To rectify
3973 * this, the entire 8-byte structure is byteswapped, followed by
3974 * a swap of the 4 byte val field. In other cases, the data is
3975 * already in proper host byte order. */
3976 if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
3977 target_su.buf = tswapal(target_su.buf);
3978 arg.val = tswap32(target_su.val);
3980 arg.val = target_su.val;
3982 ret = get_errno(semctl(semid, semnum, cmd, arg));
3986 err = target_to_host_semarray(semid, &array, target_su.array);
3990 ret = get_errno(semctl(semid, semnum, cmd, arg));
3991 err = host_to_target_semarray(semid, target_su.array, &array);
3998 err = target_to_host_semid_ds(&dsarg, target_su.buf);
4002 ret = get_errno(semctl(semid, semnum, cmd, arg));
4003 err = host_to_target_semid_ds(target_su.buf, &dsarg);
4009 arg.__buf = &seminfo;
4010 ret = get_errno(semctl(semid, semnum, cmd, arg));
4011 err = host_to_target_seminfo(target_su.__buf, &seminfo);
4019 ret = get_errno(semctl(semid, semnum, cmd, NULL));
4026 struct target_sembuf {
4027 unsigned short sem_num;
4032 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
4033 abi_ulong target_addr,
4036 struct target_sembuf *target_sembuf;
4039 target_sembuf = lock_user(VERIFY_READ, target_addr,
4040 nsops*sizeof(struct target_sembuf), 1);
4042 return -TARGET_EFAULT;
4044 for(i=0; i<nsops; i++) {
4045 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
4046 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
4047 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
4050 unlock_user(target_sembuf, target_addr, 0);
4055 #if defined(TARGET_NR_ipc) || defined(TARGET_NR_semop) || \
4056 defined(TARGET_NR_semtimedop) || defined(TARGET_NR_semtimedop_time64)
4059 * This macro is required to handle the s390 variants, which passes the
4060 * arguments in a different order than default.
4063 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4064 (__nsops), (__timeout), (__sops)
4066 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4067 (__nsops), 0, (__sops), (__timeout)
4070 static inline abi_long do_semtimedop(int semid,
4073 abi_long timeout, bool time64)
4075 struct sembuf *sops;
4076 struct timespec ts, *pts = NULL;
4082 if (target_to_host_timespec64(pts, timeout)) {
4083 return -TARGET_EFAULT;
4086 if (target_to_host_timespec(pts, timeout)) {
4087 return -TARGET_EFAULT;
4092 if (nsops > TARGET_SEMOPM) {
4093 return -TARGET_E2BIG;
4096 sops = g_new(struct sembuf, nsops);
4098 if (target_to_host_sembuf(sops, ptr, nsops)) {
4100 return -TARGET_EFAULT;
4103 ret = -TARGET_ENOSYS;
4104 #ifdef __NR_semtimedop
4105 ret = get_errno(safe_semtimedop(semid, sops, nsops, pts));
4108 if (ret == -TARGET_ENOSYS) {
4109 ret = get_errno(safe_ipc(IPCOP_semtimedop, semid,
4110 SEMTIMEDOP_IPC_ARGS(nsops, sops, (long)pts)));
4118 struct target_msqid_ds
4120 struct target_ipc_perm msg_perm;
4121 abi_ulong msg_stime;
4122 #if TARGET_ABI_BITS == 32
4123 abi_ulong __unused1;
4125 abi_ulong msg_rtime;
4126 #if TARGET_ABI_BITS == 32
4127 abi_ulong __unused2;
4129 abi_ulong msg_ctime;
4130 #if TARGET_ABI_BITS == 32
4131 abi_ulong __unused3;
4133 abi_ulong __msg_cbytes;
4135 abi_ulong msg_qbytes;
4136 abi_ulong msg_lspid;
4137 abi_ulong msg_lrpid;
4138 abi_ulong __unused4;
4139 abi_ulong __unused5;
4142 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
4143 abi_ulong target_addr)
4145 struct target_msqid_ds *target_md;
4147 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
4148 return -TARGET_EFAULT;
4149 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
4150 return -TARGET_EFAULT;
4151 host_md->msg_stime = tswapal(target_md->msg_stime);
4152 host_md->msg_rtime = tswapal(target_md->msg_rtime);
4153 host_md->msg_ctime = tswapal(target_md->msg_ctime);
4154 host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
4155 host_md->msg_qnum = tswapal(target_md->msg_qnum);
4156 host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
4157 host_md->msg_lspid = tswapal(target_md->msg_lspid);
4158 host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
4159 unlock_user_struct(target_md, target_addr, 0);
4163 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
4164 struct msqid_ds *host_md)
4166 struct target_msqid_ds *target_md;
4168 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
4169 return -TARGET_EFAULT;
4170 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
4171 return -TARGET_EFAULT;
4172 target_md->msg_stime = tswapal(host_md->msg_stime);
4173 target_md->msg_rtime = tswapal(host_md->msg_rtime);
4174 target_md->msg_ctime = tswapal(host_md->msg_ctime);
4175 target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
4176 target_md->msg_qnum = tswapal(host_md->msg_qnum);
4177 target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
4178 target_md->msg_lspid = tswapal(host_md->msg_lspid);
4179 target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
4180 unlock_user_struct(target_md, target_addr, 1);
4184 struct target_msginfo {
4192 unsigned short int msgseg;
4195 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
4196 struct msginfo *host_msginfo)
4198 struct target_msginfo *target_msginfo;
4199 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
4200 return -TARGET_EFAULT;
4201 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
4202 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
4203 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
4204 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
4205 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
4206 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
4207 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4208 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4209 unlock_user_struct(target_msginfo, target_addr, 1);
4213 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4215 struct msqid_ds dsarg;
4216 struct msginfo msginfo;
4217 abi_long ret = -TARGET_EINVAL;
4225 if (target_to_host_msqid_ds(&dsarg,ptr))
4226 return -TARGET_EFAULT;
4227 ret = get_errno(msgctl(msgid, cmd, &dsarg));
4228 if (host_to_target_msqid_ds(ptr,&dsarg))
4229 return -TARGET_EFAULT;
4232 ret = get_errno(msgctl(msgid, cmd, NULL));
4236 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4237 if (host_to_target_msginfo(ptr, &msginfo))
4238 return -TARGET_EFAULT;
4245 struct target_msgbuf {
4250 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4251 ssize_t msgsz, int msgflg)
4253 struct target_msgbuf *target_mb;
4254 struct msgbuf *host_mb;
4258 return -TARGET_EINVAL;
4261 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4262 return -TARGET_EFAULT;
4263 host_mb = g_try_malloc(msgsz + sizeof(long));
4265 unlock_user_struct(target_mb, msgp, 0);
4266 return -TARGET_ENOMEM;
4268 host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4269 memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4270 ret = -TARGET_ENOSYS;
4272 ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4275 if (ret == -TARGET_ENOSYS) {
4277 ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4280 ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4286 unlock_user_struct(target_mb, msgp, 0);
4292 #if defined(__sparc__)
4293 /* SPARC for msgrcv it does not use the kludge on final 2 arguments. */
4294 #define MSGRCV_ARGS(__msgp, __msgtyp) __msgp, __msgtyp
4295 #elif defined(__s390x__)
4296 /* The s390 sys_ipc variant has only five parameters. */
4297 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4298 ((long int[]){(long int)__msgp, __msgtyp})
4300 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4301 ((long int[]){(long int)__msgp, __msgtyp}), 0
4305 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4306 ssize_t msgsz, abi_long msgtyp,
4309 struct target_msgbuf *target_mb;
4311 struct msgbuf *host_mb;
4315 return -TARGET_EINVAL;
4318 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4319 return -TARGET_EFAULT;
4321 host_mb = g_try_malloc(msgsz + sizeof(long));
4323 ret = -TARGET_ENOMEM;
4326 ret = -TARGET_ENOSYS;
4328 ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4331 if (ret == -TARGET_ENOSYS) {
4332 ret = get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv), msqid, msgsz,
4333 msgflg, MSGRCV_ARGS(host_mb, msgtyp)));
4338 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4339 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4340 if (!target_mtext) {
4341 ret = -TARGET_EFAULT;
4344 memcpy(target_mb->mtext, host_mb->mtext, ret);
4345 unlock_user(target_mtext, target_mtext_addr, ret);
4348 target_mb->mtype = tswapal(host_mb->mtype);
4352 unlock_user_struct(target_mb, msgp, 1);
4357 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4358 abi_ulong target_addr)
4360 struct target_shmid_ds *target_sd;
4362 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4363 return -TARGET_EFAULT;
4364 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4365 return -TARGET_EFAULT;
4366 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4367 __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4368 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4369 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4370 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4371 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4372 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4373 unlock_user_struct(target_sd, target_addr, 0);
4377 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4378 struct shmid_ds *host_sd)
4380 struct target_shmid_ds *target_sd;
4382 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4383 return -TARGET_EFAULT;
4384 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4385 return -TARGET_EFAULT;
4386 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4387 __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4388 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4389 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4390 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4391 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4392 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4393 unlock_user_struct(target_sd, target_addr, 1);
4397 struct target_shminfo {
4405 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4406 struct shminfo *host_shminfo)
4408 struct target_shminfo *target_shminfo;
4409 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4410 return -TARGET_EFAULT;
4411 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4412 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4413 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4414 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4415 __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4416 unlock_user_struct(target_shminfo, target_addr, 1);
4420 struct target_shm_info {
4425 abi_ulong swap_attempts;
4426 abi_ulong swap_successes;
4429 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4430 struct shm_info *host_shm_info)
4432 struct target_shm_info *target_shm_info;
4433 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4434 return -TARGET_EFAULT;
4435 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4436 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4437 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4438 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4439 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4440 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4441 unlock_user_struct(target_shm_info, target_addr, 1);
4445 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4447 struct shmid_ds dsarg;
4448 struct shminfo shminfo;
4449 struct shm_info shm_info;
4450 abi_long ret = -TARGET_EINVAL;
4458 if (target_to_host_shmid_ds(&dsarg, buf))
4459 return -TARGET_EFAULT;
4460 ret = get_errno(shmctl(shmid, cmd, &dsarg));
4461 if (host_to_target_shmid_ds(buf, &dsarg))
4462 return -TARGET_EFAULT;
4465 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4466 if (host_to_target_shminfo(buf, &shminfo))
4467 return -TARGET_EFAULT;
4470 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4471 if (host_to_target_shm_info(buf, &shm_info))
4472 return -TARGET_EFAULT;
4477 ret = get_errno(shmctl(shmid, cmd, NULL));
4484 #ifndef TARGET_FORCE_SHMLBA
4485 /* For most architectures, SHMLBA is the same as the page size;
4486 * some architectures have larger values, in which case they should
4487 * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4488 * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4489 * and defining its own value for SHMLBA.
4491 * The kernel also permits SHMLBA to be set by the architecture to a
4492 * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4493 * this means that addresses are rounded to the large size if
4494 * SHM_RND is set but addresses not aligned to that size are not rejected
4495 * as long as they are at least page-aligned. Since the only architecture
4496 * which uses this is ia64 this code doesn't provide for that oddity.
4498 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
4500 return TARGET_PAGE_SIZE;
4504 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
4505 int shmid, abi_ulong shmaddr, int shmflg)
4507 CPUState *cpu = env_cpu(cpu_env);
4510 struct shmid_ds shm_info;
4514 /* shmat pointers are always untagged */
4516 /* find out the length of the shared memory segment */
4517 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4518 if (is_error(ret)) {
4519 /* can't get length, bail out */
4523 shmlba = target_shmlba(cpu_env);
4525 if (shmaddr & (shmlba - 1)) {
4526 if (shmflg & SHM_RND) {
4527 shmaddr &= ~(shmlba - 1);
4529 return -TARGET_EINVAL;
4532 if (!guest_range_valid_untagged(shmaddr, shm_info.shm_segsz)) {
4533 return -TARGET_EINVAL;
4539 * We're mapping shared memory, so ensure we generate code for parallel
4540 * execution and flush old translations. This will work up to the level
4541 * supported by the host -- anything that requires EXCP_ATOMIC will not
4542 * be atomic with respect to an external process.
4544 if (!(cpu->tcg_cflags & CF_PARALLEL)) {
4545 cpu->tcg_cflags |= CF_PARALLEL;
4550 host_raddr = shmat(shmid, (void *)g2h_untagged(shmaddr), shmflg);
4552 abi_ulong mmap_start;
4554 /* In order to use the host shmat, we need to honor host SHMLBA. */
4555 mmap_start = mmap_find_vma(0, shm_info.shm_segsz, MAX(SHMLBA, shmlba));
4557 if (mmap_start == -1) {
4559 host_raddr = (void *)-1;
4561 host_raddr = shmat(shmid, g2h_untagged(mmap_start),
4562 shmflg | SHM_REMAP);
4565 if (host_raddr == (void *)-1) {
4567 return get_errno((long)host_raddr);
4569 raddr=h2g((unsigned long)host_raddr);
4571 page_set_flags(raddr, raddr + shm_info.shm_segsz,
4572 PAGE_VALID | PAGE_RESET | PAGE_READ |
4573 (shmflg & SHM_RDONLY ? 0 : PAGE_WRITE));
4575 for (i = 0; i < N_SHM_REGIONS; i++) {
4576 if (!shm_regions[i].in_use) {
4577 shm_regions[i].in_use = true;
4578 shm_regions[i].start = raddr;
4579 shm_regions[i].size = shm_info.shm_segsz;
4589 static inline abi_long do_shmdt(abi_ulong shmaddr)
4594 /* shmdt pointers are always untagged */
4598 for (i = 0; i < N_SHM_REGIONS; ++i) {
4599 if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4600 shm_regions[i].in_use = false;
4601 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
4605 rv = get_errno(shmdt(g2h_untagged(shmaddr)));
4612 #ifdef TARGET_NR_ipc
4613 /* ??? This only works with linear mappings. */
4614 /* do_ipc() must return target values and target errnos. */
4615 static abi_long do_ipc(CPUArchState *cpu_env,
4616 unsigned int call, abi_long first,
4617 abi_long second, abi_long third,
4618 abi_long ptr, abi_long fifth)
4623 version = call >> 16;
4628 ret = do_semtimedop(first, ptr, second, 0, false);
4630 case IPCOP_semtimedop:
4632 * The s390 sys_ipc variant has only five parameters instead of six
4633 * (as for default variant) and the only difference is the handling of
4634 * SEMTIMEDOP where on s390 the third parameter is used as a pointer
4635 * to a struct timespec where the generic variant uses fifth parameter.
4637 #if defined(TARGET_S390X)
4638 ret = do_semtimedop(first, ptr, second, third, TARGET_ABI_BITS == 64);
4640 ret = do_semtimedop(first, ptr, second, fifth, TARGET_ABI_BITS == 64);
4645 ret = get_errno(semget(first, second, third));
4648 case IPCOP_semctl: {
4649 /* The semun argument to semctl is passed by value, so dereference the
4652 get_user_ual(atptr, ptr);
4653 ret = do_semctl(first, second, third, atptr);
4658 ret = get_errno(msgget(first, second));
4662 ret = do_msgsnd(first, ptr, second, third);
4666 ret = do_msgctl(first, second, ptr);
4673 struct target_ipc_kludge {
4678 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4679 ret = -TARGET_EFAULT;
4683 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4685 unlock_user_struct(tmp, ptr, 0);
4689 ret = do_msgrcv(first, ptr, second, fifth, third);
4698 raddr = do_shmat(cpu_env, first, ptr, second);
4699 if (is_error(raddr))
4700 return get_errno(raddr);
4701 if (put_user_ual(raddr, third))
4702 return -TARGET_EFAULT;
4706 ret = -TARGET_EINVAL;
4711 ret = do_shmdt(ptr);
4715 /* IPC_* flag values are the same on all linux platforms */
4716 ret = get_errno(shmget(first, second, third));
4719 /* IPC_* and SHM_* command values are the same on all linux platforms */
4721 ret = do_shmctl(first, second, ptr);
4724 qemu_log_mask(LOG_UNIMP, "Unsupported ipc call: %d (version %d)\n",
4726 ret = -TARGET_ENOSYS;
4733 /* kernel structure types definitions */
4735 #define STRUCT(name, ...) STRUCT_ ## name,
4736 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4738 #include "syscall_types.h"
4742 #undef STRUCT_SPECIAL
4744 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
4745 #define STRUCT_SPECIAL(name)
4746 #include "syscall_types.h"
4748 #undef STRUCT_SPECIAL
4750 #define MAX_STRUCT_SIZE 4096
4752 #ifdef CONFIG_FIEMAP
4753 /* So fiemap access checks don't overflow on 32 bit systems.
4754 * This is very slightly smaller than the limit imposed by
4755 * the underlying kernel.
4757 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
4758 / sizeof(struct fiemap_extent))
4760 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4761 int fd, int cmd, abi_long arg)
4763 /* The parameter for this ioctl is a struct fiemap followed
4764 * by an array of struct fiemap_extent whose size is set
4765 * in fiemap->fm_extent_count. The array is filled in by the
4768 int target_size_in, target_size_out;
4770 const argtype *arg_type = ie->arg_type;
4771 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4774 int i, extent_size = thunk_type_size(extent_arg_type, 0);
4778 assert(arg_type[0] == TYPE_PTR);
4779 assert(ie->access == IOC_RW);
4781 target_size_in = thunk_type_size(arg_type, 0);
4782 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4784 return -TARGET_EFAULT;
4786 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4787 unlock_user(argptr, arg, 0);
4788 fm = (struct fiemap *)buf_temp;
4789 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4790 return -TARGET_EINVAL;
4793 outbufsz = sizeof (*fm) +
4794 (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4796 if (outbufsz > MAX_STRUCT_SIZE) {
4797 /* We can't fit all the extents into the fixed size buffer.
4798 * Allocate one that is large enough and use it instead.
4800 fm = g_try_malloc(outbufsz);
4802 return -TARGET_ENOMEM;
4804 memcpy(fm, buf_temp, sizeof(struct fiemap));
4807 ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4808 if (!is_error(ret)) {
4809 target_size_out = target_size_in;
4810 /* An extent_count of 0 means we were only counting the extents
4811 * so there are no structs to copy
4813 if (fm->fm_extent_count != 0) {
4814 target_size_out += fm->fm_mapped_extents * extent_size;
4816 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4818 ret = -TARGET_EFAULT;
4820 /* Convert the struct fiemap */
4821 thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4822 if (fm->fm_extent_count != 0) {
4823 p = argptr + target_size_in;
4824 /* ...and then all the struct fiemap_extents */
4825 for (i = 0; i < fm->fm_mapped_extents; i++) {
4826 thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4831 unlock_user(argptr, arg, target_size_out);
4841 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4842 int fd, int cmd, abi_long arg)
4844 const argtype *arg_type = ie->arg_type;
4848 struct ifconf *host_ifconf;
4850 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4851 const argtype ifreq_max_type[] = { MK_STRUCT(STRUCT_ifmap_ifreq) };
4852 int target_ifreq_size;
4857 abi_long target_ifc_buf;
4861 assert(arg_type[0] == TYPE_PTR);
4862 assert(ie->access == IOC_RW);
4865 target_size = thunk_type_size(arg_type, 0);
4867 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4869 return -TARGET_EFAULT;
4870 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4871 unlock_user(argptr, arg, 0);
4873 host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4874 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4875 target_ifreq_size = thunk_type_size(ifreq_max_type, 0);
4877 if (target_ifc_buf != 0) {
4878 target_ifc_len = host_ifconf->ifc_len;
4879 nb_ifreq = target_ifc_len / target_ifreq_size;
4880 host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4882 outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4883 if (outbufsz > MAX_STRUCT_SIZE) {
4885 * We can't fit all the extents into the fixed size buffer.
4886 * Allocate one that is large enough and use it instead.
4888 host_ifconf = g_try_malloc(outbufsz);
4890 return -TARGET_ENOMEM;
4892 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4895 host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4897 host_ifconf->ifc_len = host_ifc_len;
4899 host_ifc_buf = NULL;
4901 host_ifconf->ifc_buf = host_ifc_buf;
4903 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4904 if (!is_error(ret)) {
4905 /* convert host ifc_len to target ifc_len */
4907 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4908 target_ifc_len = nb_ifreq * target_ifreq_size;
4909 host_ifconf->ifc_len = target_ifc_len;
4911 /* restore target ifc_buf */
4913 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4915 /* copy struct ifconf to target user */
4917 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4919 return -TARGET_EFAULT;
4920 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4921 unlock_user(argptr, arg, target_size);
4923 if (target_ifc_buf != 0) {
4924 /* copy ifreq[] to target user */
4925 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4926 for (i = 0; i < nb_ifreq ; i++) {
4927 thunk_convert(argptr + i * target_ifreq_size,
4928 host_ifc_buf + i * sizeof(struct ifreq),
4929 ifreq_arg_type, THUNK_TARGET);
4931 unlock_user(argptr, target_ifc_buf, target_ifc_len);
4936 g_free(host_ifconf);
4942 #if defined(CONFIG_USBFS)
4943 #if HOST_LONG_BITS > 64
4944 #error USBDEVFS thunks do not support >64 bit hosts yet.
4947 uint64_t target_urb_adr;
4948 uint64_t target_buf_adr;
4949 char *target_buf_ptr;
4950 struct usbdevfs_urb host_urb;
4953 static GHashTable *usbdevfs_urb_hashtable(void)
4955 static GHashTable *urb_hashtable;
4957 if (!urb_hashtable) {
4958 urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
4960 return urb_hashtable;
4963 static void urb_hashtable_insert(struct live_urb *urb)
4965 GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4966 g_hash_table_insert(urb_hashtable, urb, urb);
4969 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
4971 GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4972 return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
4975 static void urb_hashtable_remove(struct live_urb *urb)
4977 GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4978 g_hash_table_remove(urb_hashtable, urb);
4982 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
4983 int fd, int cmd, abi_long arg)
4985 const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
4986 const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
4987 struct live_urb *lurb;
4991 uintptr_t target_urb_adr;
4994 target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
4996 memset(buf_temp, 0, sizeof(uint64_t));
4997 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4998 if (is_error(ret)) {
5002 memcpy(&hurb, buf_temp, sizeof(uint64_t));
5003 lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
5004 if (!lurb->target_urb_adr) {
5005 return -TARGET_EFAULT;
5007 urb_hashtable_remove(lurb);
5008 unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
5009 lurb->host_urb.buffer_length);
5010 lurb->target_buf_ptr = NULL;
5012 /* restore the guest buffer pointer */
5013 lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
5015 /* update the guest urb struct */
5016 argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
5019 return -TARGET_EFAULT;
5021 thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
5022 unlock_user(argptr, lurb->target_urb_adr, target_size);
5024 target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
5025 /* write back the urb handle */
5026 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5029 return -TARGET_EFAULT;
5032 /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
5033 target_urb_adr = lurb->target_urb_adr;
5034 thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
5035 unlock_user(argptr, arg, target_size);
5042 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
5043 uint8_t *buf_temp __attribute__((unused)),
5044 int fd, int cmd, abi_long arg)
5046 struct live_urb *lurb;
5048 /* map target address back to host URB with metadata. */
5049 lurb = urb_hashtable_lookup(arg);
5051 return -TARGET_EFAULT;
5053 return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5057 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
5058 int fd, int cmd, abi_long arg)
5060 const argtype *arg_type = ie->arg_type;
5065 struct live_urb *lurb;
5068 * each submitted URB needs to map to a unique ID for the
5069 * kernel, and that unique ID needs to be a pointer to
5070 * host memory. hence, we need to malloc for each URB.
5071 * isochronous transfers have a variable length struct.
5074 target_size = thunk_type_size(arg_type, THUNK_TARGET);
5076 /* construct host copy of urb and metadata */
5077 lurb = g_try_malloc0(sizeof(struct live_urb));
5079 return -TARGET_ENOMEM;
5082 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5085 return -TARGET_EFAULT;
5087 thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
5088 unlock_user(argptr, arg, 0);
5090 lurb->target_urb_adr = arg;
5091 lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
5093 /* buffer space used depends on endpoint type so lock the entire buffer */
5094 /* control type urbs should check the buffer contents for true direction */
5095 rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
5096 lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
5097 lurb->host_urb.buffer_length, 1);
5098 if (lurb->target_buf_ptr == NULL) {
5100 return -TARGET_EFAULT;
5103 /* update buffer pointer in host copy */
5104 lurb->host_urb.buffer = lurb->target_buf_ptr;
5106 ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5107 if (is_error(ret)) {
5108 unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
5111 urb_hashtable_insert(lurb);
5116 #endif /* CONFIG_USBFS */
5118 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5119 int cmd, abi_long arg)
5122 struct dm_ioctl *host_dm;
5123 abi_long guest_data;
5124 uint32_t guest_data_size;
5126 const argtype *arg_type = ie->arg_type;
5128 void *big_buf = NULL;
5132 target_size = thunk_type_size(arg_type, 0);
5133 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5135 ret = -TARGET_EFAULT;
5138 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5139 unlock_user(argptr, arg, 0);
5141 /* buf_temp is too small, so fetch things into a bigger buffer */
5142 big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
5143 memcpy(big_buf, buf_temp, target_size);
5147 guest_data = arg + host_dm->data_start;
5148 if ((guest_data - arg) < 0) {
5149 ret = -TARGET_EINVAL;
5152 guest_data_size = host_dm->data_size - host_dm->data_start;
5153 host_data = (char*)host_dm + host_dm->data_start;
5155 argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
5157 ret = -TARGET_EFAULT;
5161 switch (ie->host_cmd) {
5163 case DM_LIST_DEVICES:
5166 case DM_DEV_SUSPEND:
5169 case DM_TABLE_STATUS:
5170 case DM_TABLE_CLEAR:
5172 case DM_LIST_VERSIONS:
5176 case DM_DEV_SET_GEOMETRY:
5177 /* data contains only strings */
5178 memcpy(host_data, argptr, guest_data_size);
5181 memcpy(host_data, argptr, guest_data_size);
5182 *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
5186 void *gspec = argptr;
5187 void *cur_data = host_data;
5188 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5189 int spec_size = thunk_type_size(arg_type, 0);
5192 for (i = 0; i < host_dm->target_count; i++) {
5193 struct dm_target_spec *spec = cur_data;
5197 thunk_convert(spec, gspec, arg_type, THUNK_HOST);
5198 slen = strlen((char*)gspec + spec_size) + 1;
5200 spec->next = sizeof(*spec) + slen;
5201 strcpy((char*)&spec[1], gspec + spec_size);
5203 cur_data += spec->next;
5208 ret = -TARGET_EINVAL;
5209 unlock_user(argptr, guest_data, 0);
5212 unlock_user(argptr, guest_data, 0);
5214 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5215 if (!is_error(ret)) {
5216 guest_data = arg + host_dm->data_start;
5217 guest_data_size = host_dm->data_size - host_dm->data_start;
5218 argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
5219 switch (ie->host_cmd) {
5224 case DM_DEV_SUSPEND:
5227 case DM_TABLE_CLEAR:
5229 case DM_DEV_SET_GEOMETRY:
5230 /* no return data */
5232 case DM_LIST_DEVICES:
5234 struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
5235 uint32_t remaining_data = guest_data_size;
5236 void *cur_data = argptr;
5237 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
5238 int nl_size = 12; /* can't use thunk_size due to alignment */
5241 uint32_t next = nl->next;
5243 nl->next = nl_size + (strlen(nl->name) + 1);
5245 if (remaining_data < nl->next) {
5246 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5249 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
5250 strcpy(cur_data + nl_size, nl->name);
5251 cur_data += nl->next;
5252 remaining_data -= nl->next;
5256 nl = (void*)nl + next;
5261 case DM_TABLE_STATUS:
5263 struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5264 void *cur_data = argptr;
5265 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5266 int spec_size = thunk_type_size(arg_type, 0);
5269 for (i = 0; i < host_dm->target_count; i++) {
5270 uint32_t next = spec->next;
5271 int slen = strlen((char*)&spec[1]) + 1;
5272 spec->next = (cur_data - argptr) + spec_size + slen;
5273 if (guest_data_size < spec->next) {
5274 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5277 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
5278 strcpy(cur_data + spec_size, (char*)&spec[1]);
5279 cur_data = argptr + spec->next;
5280 spec = (void*)host_dm + host_dm->data_start + next;
5286 void *hdata = (void*)host_dm + host_dm->data_start;
5287 int count = *(uint32_t*)hdata;
5288 uint64_t *hdev = hdata + 8;
5289 uint64_t *gdev = argptr + 8;
5292 *(uint32_t*)argptr = tswap32(count);
5293 for (i = 0; i < count; i++) {
5294 *gdev = tswap64(*hdev);
5300 case DM_LIST_VERSIONS:
5302 struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5303 uint32_t remaining_data = guest_data_size;
5304 void *cur_data = argptr;
5305 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5306 int vers_size = thunk_type_size(arg_type, 0);
5309 uint32_t next = vers->next;
5311 vers->next = vers_size + (strlen(vers->name) + 1);
5313 if (remaining_data < vers->next) {
5314 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5317 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
5318 strcpy(cur_data + vers_size, vers->name);
5319 cur_data += vers->next;
5320 remaining_data -= vers->next;
5324 vers = (void*)vers + next;
5329 unlock_user(argptr, guest_data, 0);
5330 ret = -TARGET_EINVAL;
5333 unlock_user(argptr, guest_data, guest_data_size);
5335 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5337 ret = -TARGET_EFAULT;
5340 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5341 unlock_user(argptr, arg, target_size);
5348 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5349 int cmd, abi_long arg)
5353 const argtype *arg_type = ie->arg_type;
5354 const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5357 struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5358 struct blkpg_partition host_part;
5360 /* Read and convert blkpg */
5362 target_size = thunk_type_size(arg_type, 0);
5363 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5365 ret = -TARGET_EFAULT;
5368 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5369 unlock_user(argptr, arg, 0);
5371 switch (host_blkpg->op) {
5372 case BLKPG_ADD_PARTITION:
5373 case BLKPG_DEL_PARTITION:
5374 /* payload is struct blkpg_partition */
5377 /* Unknown opcode */
5378 ret = -TARGET_EINVAL;
5382 /* Read and convert blkpg->data */
5383 arg = (abi_long)(uintptr_t)host_blkpg->data;
5384 target_size = thunk_type_size(part_arg_type, 0);
5385 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5387 ret = -TARGET_EFAULT;
5390 thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5391 unlock_user(argptr, arg, 0);
5393 /* Swizzle the data pointer to our local copy and call! */
5394 host_blkpg->data = &host_part;
5395 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5401 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5402 int fd, int cmd, abi_long arg)
5404 const argtype *arg_type = ie->arg_type;
5405 const StructEntry *se;
5406 const argtype *field_types;
5407 const int *dst_offsets, *src_offsets;
5410 abi_ulong *target_rt_dev_ptr = NULL;
5411 unsigned long *host_rt_dev_ptr = NULL;
5415 assert(ie->access == IOC_W);
5416 assert(*arg_type == TYPE_PTR);
5418 assert(*arg_type == TYPE_STRUCT);
5419 target_size = thunk_type_size(arg_type, 0);
5420 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5422 return -TARGET_EFAULT;
5425 assert(*arg_type == (int)STRUCT_rtentry);
5426 se = struct_entries + *arg_type++;
5427 assert(se->convert[0] == NULL);
5428 /* convert struct here to be able to catch rt_dev string */
5429 field_types = se->field_types;
5430 dst_offsets = se->field_offsets[THUNK_HOST];
5431 src_offsets = se->field_offsets[THUNK_TARGET];
5432 for (i = 0; i < se->nb_fields; i++) {
5433 if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5434 assert(*field_types == TYPE_PTRVOID);
5435 target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
5436 host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5437 if (*target_rt_dev_ptr != 0) {
5438 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5439 tswapal(*target_rt_dev_ptr));
5440 if (!*host_rt_dev_ptr) {
5441 unlock_user(argptr, arg, 0);
5442 return -TARGET_EFAULT;
5445 *host_rt_dev_ptr = 0;
5450 field_types = thunk_convert(buf_temp + dst_offsets[i],
5451 argptr + src_offsets[i],
5452 field_types, THUNK_HOST);
5454 unlock_user(argptr, arg, 0);
5456 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5458 assert(host_rt_dev_ptr != NULL);
5459 assert(target_rt_dev_ptr != NULL);
5460 if (*host_rt_dev_ptr != 0) {
5461 unlock_user((void *)*host_rt_dev_ptr,
5462 *target_rt_dev_ptr, 0);
5467 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5468 int fd, int cmd, abi_long arg)
5470 int sig = target_to_host_signal(arg);
5471 return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5474 static abi_long do_ioctl_SIOCGSTAMP(const IOCTLEntry *ie, uint8_t *buf_temp,
5475 int fd, int cmd, abi_long arg)
5480 ret = get_errno(safe_ioctl(fd, SIOCGSTAMP, &tv));
5481 if (is_error(ret)) {
5485 if (cmd == (int)TARGET_SIOCGSTAMP_OLD) {
5486 if (copy_to_user_timeval(arg, &tv)) {
5487 return -TARGET_EFAULT;
5490 if (copy_to_user_timeval64(arg, &tv)) {
5491 return -TARGET_EFAULT;
5498 static abi_long do_ioctl_SIOCGSTAMPNS(const IOCTLEntry *ie, uint8_t *buf_temp,
5499 int fd, int cmd, abi_long arg)
5504 ret = get_errno(safe_ioctl(fd, SIOCGSTAMPNS, &ts));
5505 if (is_error(ret)) {
5509 if (cmd == (int)TARGET_SIOCGSTAMPNS_OLD) {
5510 if (host_to_target_timespec(arg, &ts)) {
5511 return -TARGET_EFAULT;
5514 if (host_to_target_timespec64(arg, &ts)) {
5515 return -TARGET_EFAULT;
5523 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5524 int fd, int cmd, abi_long arg)
5526 int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5527 return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5533 static void unlock_drm_version(struct drm_version *host_ver,
5534 struct target_drm_version *target_ver,
5537 unlock_user(host_ver->name, target_ver->name,
5538 copy ? host_ver->name_len : 0);
5539 unlock_user(host_ver->date, target_ver->date,
5540 copy ? host_ver->date_len : 0);
5541 unlock_user(host_ver->desc, target_ver->desc,
5542 copy ? host_ver->desc_len : 0);
5545 static inline abi_long target_to_host_drmversion(struct drm_version *host_ver,
5546 struct target_drm_version *target_ver)
5548 memset(host_ver, 0, sizeof(*host_ver));
5550 __get_user(host_ver->name_len, &target_ver->name_len);
5551 if (host_ver->name_len) {
5552 host_ver->name = lock_user(VERIFY_WRITE, target_ver->name,
5553 target_ver->name_len, 0);
5554 if (!host_ver->name) {
5559 __get_user(host_ver->date_len, &target_ver->date_len);
5560 if (host_ver->date_len) {
5561 host_ver->date = lock_user(VERIFY_WRITE, target_ver->date,
5562 target_ver->date_len, 0);
5563 if (!host_ver->date) {
5568 __get_user(host_ver->desc_len, &target_ver->desc_len);
5569 if (host_ver->desc_len) {
5570 host_ver->desc = lock_user(VERIFY_WRITE, target_ver->desc,
5571 target_ver->desc_len, 0);
5572 if (!host_ver->desc) {
5579 unlock_drm_version(host_ver, target_ver, false);
5583 static inline void host_to_target_drmversion(
5584 struct target_drm_version *target_ver,
5585 struct drm_version *host_ver)
5587 __put_user(host_ver->version_major, &target_ver->version_major);
5588 __put_user(host_ver->version_minor, &target_ver->version_minor);
5589 __put_user(host_ver->version_patchlevel, &target_ver->version_patchlevel);
5590 __put_user(host_ver->name_len, &target_ver->name_len);
5591 __put_user(host_ver->date_len, &target_ver->date_len);
5592 __put_user(host_ver->desc_len, &target_ver->desc_len);
5593 unlock_drm_version(host_ver, target_ver, true);
5596 static abi_long do_ioctl_drm(const IOCTLEntry *ie, uint8_t *buf_temp,
5597 int fd, int cmd, abi_long arg)
5599 struct drm_version *ver;
5600 struct target_drm_version *target_ver;
5603 switch (ie->host_cmd) {
5604 case DRM_IOCTL_VERSION:
5605 if (!lock_user_struct(VERIFY_WRITE, target_ver, arg, 0)) {
5606 return -TARGET_EFAULT;
5608 ver = (struct drm_version *)buf_temp;
5609 ret = target_to_host_drmversion(ver, target_ver);
5610 if (!is_error(ret)) {
5611 ret = get_errno(safe_ioctl(fd, ie->host_cmd, ver));
5612 if (is_error(ret)) {
5613 unlock_drm_version(ver, target_ver, false);
5615 host_to_target_drmversion(target_ver, ver);
5618 unlock_user_struct(target_ver, arg, 0);
5621 return -TARGET_ENOSYS;
5624 static abi_long do_ioctl_drm_i915_getparam(const IOCTLEntry *ie,
5625 struct drm_i915_getparam *gparam,
5626 int fd, abi_long arg)
5630 struct target_drm_i915_getparam *target_gparam;
5632 if (!lock_user_struct(VERIFY_READ, target_gparam, arg, 0)) {
5633 return -TARGET_EFAULT;
5636 __get_user(gparam->param, &target_gparam->param);
5637 gparam->value = &value;
5638 ret = get_errno(safe_ioctl(fd, ie->host_cmd, gparam));
5639 put_user_s32(value, target_gparam->value);
5641 unlock_user_struct(target_gparam, arg, 0);
5645 static abi_long do_ioctl_drm_i915(const IOCTLEntry *ie, uint8_t *buf_temp,
5646 int fd, int cmd, abi_long arg)
5648 switch (ie->host_cmd) {
5649 case DRM_IOCTL_I915_GETPARAM:
5650 return do_ioctl_drm_i915_getparam(ie,
5651 (struct drm_i915_getparam *)buf_temp,
5654 return -TARGET_ENOSYS;
5660 static abi_long do_ioctl_TUNSETTXFILTER(const IOCTLEntry *ie, uint8_t *buf_temp,
5661 int fd, int cmd, abi_long arg)
5663 struct tun_filter *filter = (struct tun_filter *)buf_temp;
5664 struct tun_filter *target_filter;
5667 assert(ie->access == IOC_W);
5669 target_filter = lock_user(VERIFY_READ, arg, sizeof(*target_filter), 1);
5670 if (!target_filter) {
5671 return -TARGET_EFAULT;
5673 filter->flags = tswap16(target_filter->flags);
5674 filter->count = tswap16(target_filter->count);
5675 unlock_user(target_filter, arg, 0);
5677 if (filter->count) {
5678 if (offsetof(struct tun_filter, addr) + filter->count * ETH_ALEN >
5680 return -TARGET_EFAULT;
5683 target_addr = lock_user(VERIFY_READ,
5684 arg + offsetof(struct tun_filter, addr),
5685 filter->count * ETH_ALEN, 1);
5687 return -TARGET_EFAULT;
5689 memcpy(filter->addr, target_addr, filter->count * ETH_ALEN);
5690 unlock_user(target_addr, arg + offsetof(struct tun_filter, addr), 0);
5693 return get_errno(safe_ioctl(fd, ie->host_cmd, filter));
5696 IOCTLEntry ioctl_entries[] = {
5697 #define IOCTL(cmd, access, ...) \
5698 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
5699 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
5700 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
5701 #define IOCTL_IGNORE(cmd) \
5702 { TARGET_ ## cmd, 0, #cmd },
5707 /* ??? Implement proper locking for ioctls. */
5708 /* do_ioctl() Must return target values and target errnos. */
5709 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5711 const IOCTLEntry *ie;
5712 const argtype *arg_type;
5714 uint8_t buf_temp[MAX_STRUCT_SIZE];
5720 if (ie->target_cmd == 0) {
5722 LOG_UNIMP, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5723 return -TARGET_ENOSYS;
5725 if (ie->target_cmd == cmd)
5729 arg_type = ie->arg_type;
5731 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5732 } else if (!ie->host_cmd) {
5733 /* Some architectures define BSD ioctls in their headers
5734 that are not implemented in Linux. */
5735 return -TARGET_ENOSYS;
5738 switch(arg_type[0]) {
5741 ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5747 ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5751 target_size = thunk_type_size(arg_type, 0);
5752 switch(ie->access) {
5754 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5755 if (!is_error(ret)) {
5756 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5758 return -TARGET_EFAULT;
5759 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5760 unlock_user(argptr, arg, target_size);
5764 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5766 return -TARGET_EFAULT;
5767 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5768 unlock_user(argptr, arg, 0);
5769 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5773 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5775 return -TARGET_EFAULT;
5776 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5777 unlock_user(argptr, arg, 0);
5778 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5779 if (!is_error(ret)) {
5780 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5782 return -TARGET_EFAULT;
5783 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5784 unlock_user(argptr, arg, target_size);
5790 qemu_log_mask(LOG_UNIMP,
5791 "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5792 (long)cmd, arg_type[0]);
5793 ret = -TARGET_ENOSYS;
5799 static const bitmask_transtbl iflag_tbl[] = {
5800 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5801 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5802 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5803 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5804 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5805 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5806 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5807 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5808 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5809 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5810 { TARGET_IXON, TARGET_IXON, IXON, IXON },
5811 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5812 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5813 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5814 { TARGET_IUTF8, TARGET_IUTF8, IUTF8, IUTF8},
5818 static const bitmask_transtbl oflag_tbl[] = {
5819 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5820 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5821 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5822 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5823 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5824 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5825 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5826 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5827 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5828 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5829 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5830 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5831 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5832 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5833 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5834 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5835 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5836 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5837 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5838 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5839 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5840 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5841 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5842 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5846 static const bitmask_transtbl cflag_tbl[] = {
5847 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5848 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5849 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5850 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5851 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5852 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5853 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5854 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5855 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5856 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5857 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5858 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5859 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5860 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5861 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5862 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5863 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5864 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5865 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5866 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5867 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5868 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5869 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5870 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5871 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5872 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5873 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5874 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5875 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5876 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5877 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5881 static const bitmask_transtbl lflag_tbl[] = {
5882 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5883 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5884 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5885 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5886 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5887 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5888 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5889 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5890 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5891 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5892 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5893 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5894 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5895 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5896 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5897 { TARGET_EXTPROC, TARGET_EXTPROC, EXTPROC, EXTPROC},
5901 static void target_to_host_termios (void *dst, const void *src)
5903 struct host_termios *host = dst;
5904 const struct target_termios *target = src;
5907 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5909 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5911 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5913 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5914 host->c_line = target->c_line;
5916 memset(host->c_cc, 0, sizeof(host->c_cc));
5917 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5918 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5919 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5920 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5921 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5922 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5923 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5924 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5925 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5926 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5927 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5928 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5929 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5930 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5931 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5932 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5933 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5936 static void host_to_target_termios (void *dst, const void *src)
5938 struct target_termios *target = dst;
5939 const struct host_termios *host = src;
5942 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5944 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5946 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5948 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5949 target->c_line = host->c_line;
5951 memset(target->c_cc, 0, sizeof(target->c_cc));
5952 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5953 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5954 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5955 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5956 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5957 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5958 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5959 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5960 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5961 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5962 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5963 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5964 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5965 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5966 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5967 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5968 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5971 static const StructEntry struct_termios_def = {
5972 .convert = { host_to_target_termios, target_to_host_termios },
5973 .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5974 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5975 .print = print_termios,
5978 static const bitmask_transtbl mmap_flags_tbl[] = {
5979 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
5980 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
5981 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5982 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
5983 MAP_ANONYMOUS, MAP_ANONYMOUS },
5984 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
5985 MAP_GROWSDOWN, MAP_GROWSDOWN },
5986 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
5987 MAP_DENYWRITE, MAP_DENYWRITE },
5988 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
5989 MAP_EXECUTABLE, MAP_EXECUTABLE },
5990 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5991 { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
5992 MAP_NORESERVE, MAP_NORESERVE },
5993 { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
5994 /* MAP_STACK had been ignored by the kernel for quite some time.
5995 Recognize it for the target insofar as we do not want to pass
5996 it through to the host. */
5997 { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
6002 * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
6003 * TARGET_I386 is defined if TARGET_X86_64 is defined
6005 #if defined(TARGET_I386)
6007 /* NOTE: there is really one LDT for all the threads */
6008 static uint8_t *ldt_table;
6010 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
6017 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
6018 if (size > bytecount)
6020 p = lock_user(VERIFY_WRITE, ptr, size, 0);
6022 return -TARGET_EFAULT;
6023 /* ??? Should this by byteswapped? */
6024 memcpy(p, ldt_table, size);
6025 unlock_user(p, ptr, size);
6029 /* XXX: add locking support */
6030 static abi_long write_ldt(CPUX86State *env,
6031 abi_ulong ptr, unsigned long bytecount, int oldmode)
6033 struct target_modify_ldt_ldt_s ldt_info;
6034 struct target_modify_ldt_ldt_s *target_ldt_info;
6035 int seg_32bit, contents, read_exec_only, limit_in_pages;
6036 int seg_not_present, useable, lm;
6037 uint32_t *lp, entry_1, entry_2;
6039 if (bytecount != sizeof(ldt_info))
6040 return -TARGET_EINVAL;
6041 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
6042 return -TARGET_EFAULT;
6043 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6044 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6045 ldt_info.limit = tswap32(target_ldt_info->limit);
6046 ldt_info.flags = tswap32(target_ldt_info->flags);
6047 unlock_user_struct(target_ldt_info, ptr, 0);
6049 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
6050 return -TARGET_EINVAL;
6051 seg_32bit = ldt_info.flags & 1;
6052 contents = (ldt_info.flags >> 1) & 3;
6053 read_exec_only = (ldt_info.flags >> 3) & 1;
6054 limit_in_pages = (ldt_info.flags >> 4) & 1;
6055 seg_not_present = (ldt_info.flags >> 5) & 1;
6056 useable = (ldt_info.flags >> 6) & 1;
6060 lm = (ldt_info.flags >> 7) & 1;
6062 if (contents == 3) {
6064 return -TARGET_EINVAL;
6065 if (seg_not_present == 0)
6066 return -TARGET_EINVAL;
6068 /* allocate the LDT */
6070 env->ldt.base = target_mmap(0,
6071 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
6072 PROT_READ|PROT_WRITE,
6073 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
6074 if (env->ldt.base == -1)
6075 return -TARGET_ENOMEM;
6076 memset(g2h_untagged(env->ldt.base), 0,
6077 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
6078 env->ldt.limit = 0xffff;
6079 ldt_table = g2h_untagged(env->ldt.base);
6082 /* NOTE: same code as Linux kernel */
6083 /* Allow LDTs to be cleared by the user. */
6084 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6087 read_exec_only == 1 &&
6089 limit_in_pages == 0 &&
6090 seg_not_present == 1 &&
6098 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6099 (ldt_info.limit & 0x0ffff);
6100 entry_2 = (ldt_info.base_addr & 0xff000000) |
6101 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6102 (ldt_info.limit & 0xf0000) |
6103 ((read_exec_only ^ 1) << 9) |
6105 ((seg_not_present ^ 1) << 15) |
6107 (limit_in_pages << 23) |
6111 entry_2 |= (useable << 20);
6113 /* Install the new entry ... */
6115 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
6116 lp[0] = tswap32(entry_1);
6117 lp[1] = tswap32(entry_2);
6121 /* specific and weird i386 syscalls */
6122 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
6123 unsigned long bytecount)
6129 ret = read_ldt(ptr, bytecount);
6132 ret = write_ldt(env, ptr, bytecount, 1);
6135 ret = write_ldt(env, ptr, bytecount, 0);
6138 ret = -TARGET_ENOSYS;
6144 #if defined(TARGET_ABI32)
6145 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
6147 uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6148 struct target_modify_ldt_ldt_s ldt_info;
6149 struct target_modify_ldt_ldt_s *target_ldt_info;
6150 int seg_32bit, contents, read_exec_only, limit_in_pages;
6151 int seg_not_present, useable, lm;
6152 uint32_t *lp, entry_1, entry_2;
6155 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6156 if (!target_ldt_info)
6157 return -TARGET_EFAULT;
6158 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6159 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6160 ldt_info.limit = tswap32(target_ldt_info->limit);
6161 ldt_info.flags = tswap32(target_ldt_info->flags);
6162 if (ldt_info.entry_number == -1) {
6163 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
6164 if (gdt_table[i] == 0) {
6165 ldt_info.entry_number = i;
6166 target_ldt_info->entry_number = tswap32(i);
6171 unlock_user_struct(target_ldt_info, ptr, 1);
6173 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
6174 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
6175 return -TARGET_EINVAL;
6176 seg_32bit = ldt_info.flags & 1;
6177 contents = (ldt_info.flags >> 1) & 3;
6178 read_exec_only = (ldt_info.flags >> 3) & 1;
6179 limit_in_pages = (ldt_info.flags >> 4) & 1;
6180 seg_not_present = (ldt_info.flags >> 5) & 1;
6181 useable = (ldt_info.flags >> 6) & 1;
6185 lm = (ldt_info.flags >> 7) & 1;
6188 if (contents == 3) {
6189 if (seg_not_present == 0)
6190 return -TARGET_EINVAL;
6193 /* NOTE: same code as Linux kernel */
6194 /* Allow LDTs to be cleared by the user. */
6195 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6196 if ((contents == 0 &&
6197 read_exec_only == 1 &&
6199 limit_in_pages == 0 &&
6200 seg_not_present == 1 &&
6208 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6209 (ldt_info.limit & 0x0ffff);
6210 entry_2 = (ldt_info.base_addr & 0xff000000) |
6211 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6212 (ldt_info.limit & 0xf0000) |
6213 ((read_exec_only ^ 1) << 9) |
6215 ((seg_not_present ^ 1) << 15) |
6217 (limit_in_pages << 23) |
6222 /* Install the new entry ... */
6224 lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
6225 lp[0] = tswap32(entry_1);
6226 lp[1] = tswap32(entry_2);
6230 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
6232 struct target_modify_ldt_ldt_s *target_ldt_info;
6233 uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6234 uint32_t base_addr, limit, flags;
6235 int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
6236 int seg_not_present, useable, lm;
6237 uint32_t *lp, entry_1, entry_2;
6239 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6240 if (!target_ldt_info)
6241 return -TARGET_EFAULT;
6242 idx = tswap32(target_ldt_info->entry_number);
6243 if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
6244 idx > TARGET_GDT_ENTRY_TLS_MAX) {
6245 unlock_user_struct(target_ldt_info, ptr, 1);
6246 return -TARGET_EINVAL;
6248 lp = (uint32_t *)(gdt_table + idx);
6249 entry_1 = tswap32(lp[0]);
6250 entry_2 = tswap32(lp[1]);
6252 read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
6253 contents = (entry_2 >> 10) & 3;
6254 seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
6255 seg_32bit = (entry_2 >> 22) & 1;
6256 limit_in_pages = (entry_2 >> 23) & 1;
6257 useable = (entry_2 >> 20) & 1;
6261 lm = (entry_2 >> 21) & 1;
6263 flags = (seg_32bit << 0) | (contents << 1) |
6264 (read_exec_only << 3) | (limit_in_pages << 4) |
6265 (seg_not_present << 5) | (useable << 6) | (lm << 7);
6266 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000);
6267 base_addr = (entry_1 >> 16) |
6268 (entry_2 & 0xff000000) |
6269 ((entry_2 & 0xff) << 16);
6270 target_ldt_info->base_addr = tswapal(base_addr);
6271 target_ldt_info->limit = tswap32(limit);
6272 target_ldt_info->flags = tswap32(flags);
6273 unlock_user_struct(target_ldt_info, ptr, 1);
6277 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6279 return -TARGET_ENOSYS;
6282 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6289 case TARGET_ARCH_SET_GS:
6290 case TARGET_ARCH_SET_FS:
6291 if (code == TARGET_ARCH_SET_GS)
6295 cpu_x86_load_seg(env, idx, 0);
6296 env->segs[idx].base = addr;
6298 case TARGET_ARCH_GET_GS:
6299 case TARGET_ARCH_GET_FS:
6300 if (code == TARGET_ARCH_GET_GS)
6304 val = env->segs[idx].base;
6305 if (put_user(val, addr, abi_ulong))
6306 ret = -TARGET_EFAULT;
6309 ret = -TARGET_EINVAL;
6314 #endif /* defined(TARGET_ABI32 */
6315 #endif /* defined(TARGET_I386) */
6318 * These constants are generic. Supply any that are missing from the host.
6321 # define PR_SET_NAME 15
6322 # define PR_GET_NAME 16
6324 #ifndef PR_SET_FP_MODE
6325 # define PR_SET_FP_MODE 45
6326 # define PR_GET_FP_MODE 46
6327 # define PR_FP_MODE_FR (1 << 0)
6328 # define PR_FP_MODE_FRE (1 << 1)
6330 #ifndef PR_SVE_SET_VL
6331 # define PR_SVE_SET_VL 50
6332 # define PR_SVE_GET_VL 51
6333 # define PR_SVE_VL_LEN_MASK 0xffff
6334 # define PR_SVE_VL_INHERIT (1 << 17)
6336 #ifndef PR_PAC_RESET_KEYS
6337 # define PR_PAC_RESET_KEYS 54
6338 # define PR_PAC_APIAKEY (1 << 0)
6339 # define PR_PAC_APIBKEY (1 << 1)
6340 # define PR_PAC_APDAKEY (1 << 2)
6341 # define PR_PAC_APDBKEY (1 << 3)
6342 # define PR_PAC_APGAKEY (1 << 4)
6344 #ifndef PR_SET_TAGGED_ADDR_CTRL
6345 # define PR_SET_TAGGED_ADDR_CTRL 55
6346 # define PR_GET_TAGGED_ADDR_CTRL 56
6347 # define PR_TAGGED_ADDR_ENABLE (1UL << 0)
6349 #ifndef PR_MTE_TCF_SHIFT
6350 # define PR_MTE_TCF_SHIFT 1
6351 # define PR_MTE_TCF_NONE (0UL << PR_MTE_TCF_SHIFT)
6352 # define PR_MTE_TCF_SYNC (1UL << PR_MTE_TCF_SHIFT)
6353 # define PR_MTE_TCF_ASYNC (2UL << PR_MTE_TCF_SHIFT)
6354 # define PR_MTE_TCF_MASK (3UL << PR_MTE_TCF_SHIFT)
6355 # define PR_MTE_TAG_SHIFT 3
6356 # define PR_MTE_TAG_MASK (0xffffUL << PR_MTE_TAG_SHIFT)
6358 #ifndef PR_SET_IO_FLUSHER
6359 # define PR_SET_IO_FLUSHER 57
6360 # define PR_GET_IO_FLUSHER 58
6362 #ifndef PR_SET_SYSCALL_USER_DISPATCH
6363 # define PR_SET_SYSCALL_USER_DISPATCH 59
6366 #include "target_prctl.h"
6368 static abi_long do_prctl_inval0(CPUArchState *env)
6370 return -TARGET_EINVAL;
6373 static abi_long do_prctl_inval1(CPUArchState *env, abi_long arg2)
6375 return -TARGET_EINVAL;
6378 #ifndef do_prctl_get_fp_mode
6379 #define do_prctl_get_fp_mode do_prctl_inval0
6381 #ifndef do_prctl_set_fp_mode
6382 #define do_prctl_set_fp_mode do_prctl_inval1
6384 #ifndef do_prctl_get_vl
6385 #define do_prctl_get_vl do_prctl_inval0
6387 #ifndef do_prctl_set_vl
6388 #define do_prctl_set_vl do_prctl_inval1
6390 #ifndef do_prctl_reset_keys
6391 #define do_prctl_reset_keys do_prctl_inval1
6393 #ifndef do_prctl_set_tagged_addr_ctrl
6394 #define do_prctl_set_tagged_addr_ctrl do_prctl_inval1
6396 #ifndef do_prctl_get_tagged_addr_ctrl
6397 #define do_prctl_get_tagged_addr_ctrl do_prctl_inval0
6399 #ifndef do_prctl_get_unalign
6400 #define do_prctl_get_unalign do_prctl_inval1
6402 #ifndef do_prctl_set_unalign
6403 #define do_prctl_set_unalign do_prctl_inval1
6406 static abi_long do_prctl(CPUArchState *env, abi_long option, abi_long arg2,
6407 abi_long arg3, abi_long arg4, abi_long arg5)
6412 case PR_GET_PDEATHSIG:
6415 ret = get_errno(prctl(PR_GET_PDEATHSIG, &deathsig,
6417 if (!is_error(ret) &&
6418 put_user_s32(host_to_target_signal(deathsig), arg2)) {
6419 return -TARGET_EFAULT;
6423 case PR_SET_PDEATHSIG:
6424 return get_errno(prctl(PR_SET_PDEATHSIG, target_to_host_signal(arg2),
6428 void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
6430 return -TARGET_EFAULT;
6432 ret = get_errno(prctl(PR_GET_NAME, (uintptr_t)name,
6434 unlock_user(name, arg2, 16);
6439 void *name = lock_user(VERIFY_READ, arg2, 16, 1);
6441 return -TARGET_EFAULT;
6443 ret = get_errno(prctl(PR_SET_NAME, (uintptr_t)name,
6445 unlock_user(name, arg2, 0);
6448 case PR_GET_FP_MODE:
6449 return do_prctl_get_fp_mode(env);
6450 case PR_SET_FP_MODE:
6451 return do_prctl_set_fp_mode(env, arg2);
6453 return do_prctl_get_vl(env);
6455 return do_prctl_set_vl(env, arg2);
6456 case PR_PAC_RESET_KEYS:
6457 if (arg3 || arg4 || arg5) {
6458 return -TARGET_EINVAL;
6460 return do_prctl_reset_keys(env, arg2);
6461 case PR_SET_TAGGED_ADDR_CTRL:
6462 if (arg3 || arg4 || arg5) {
6463 return -TARGET_EINVAL;
6465 return do_prctl_set_tagged_addr_ctrl(env, arg2);
6466 case PR_GET_TAGGED_ADDR_CTRL:
6467 if (arg2 || arg3 || arg4 || arg5) {
6468 return -TARGET_EINVAL;
6470 return do_prctl_get_tagged_addr_ctrl(env);
6472 case PR_GET_UNALIGN:
6473 return do_prctl_get_unalign(env, arg2);
6474 case PR_SET_UNALIGN:
6475 return do_prctl_set_unalign(env, arg2);
6477 case PR_CAP_AMBIENT:
6478 case PR_CAPBSET_READ:
6479 case PR_CAPBSET_DROP:
6480 case PR_GET_DUMPABLE:
6481 case PR_SET_DUMPABLE:
6482 case PR_GET_KEEPCAPS:
6483 case PR_SET_KEEPCAPS:
6484 case PR_GET_SECUREBITS:
6485 case PR_SET_SECUREBITS:
6488 case PR_GET_TIMERSLACK:
6489 case PR_SET_TIMERSLACK:
6491 case PR_MCE_KILL_GET:
6492 case PR_GET_NO_NEW_PRIVS:
6493 case PR_SET_NO_NEW_PRIVS:
6494 case PR_GET_IO_FLUSHER:
6495 case PR_SET_IO_FLUSHER:
6496 /* Some prctl options have no pointer arguments and we can pass on. */
6497 return get_errno(prctl(option, arg2, arg3, arg4, arg5));
6499 case PR_GET_CHILD_SUBREAPER:
6500 case PR_SET_CHILD_SUBREAPER:
6501 case PR_GET_SPECULATION_CTRL:
6502 case PR_SET_SPECULATION_CTRL:
6503 case PR_GET_TID_ADDRESS:
6505 return -TARGET_EINVAL;
6509 /* Was used for SPE on PowerPC. */
6510 return -TARGET_EINVAL;
6517 case PR_GET_SECCOMP:
6518 case PR_SET_SECCOMP:
6519 case PR_SET_SYSCALL_USER_DISPATCH:
6520 case PR_GET_THP_DISABLE:
6521 case PR_SET_THP_DISABLE:
6524 /* Disable to prevent the target disabling stuff we need. */
6525 return -TARGET_EINVAL;
6528 qemu_log_mask(LOG_UNIMP, "Unsupported prctl: " TARGET_ABI_FMT_ld "\n",
6530 return -TARGET_EINVAL;
6534 #define NEW_STACK_SIZE 0x40000
6537 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
6540 pthread_mutex_t mutex;
6541 pthread_cond_t cond;
6544 abi_ulong child_tidptr;
6545 abi_ulong parent_tidptr;
6549 static void *clone_func(void *arg)
6551 new_thread_info *info = arg;
6556 rcu_register_thread();
6557 tcg_register_thread();
6561 ts = (TaskState *)cpu->opaque;
6562 info->tid = sys_gettid();
6564 if (info->child_tidptr)
6565 put_user_u32(info->tid, info->child_tidptr);
6566 if (info->parent_tidptr)
6567 put_user_u32(info->tid, info->parent_tidptr);
6568 qemu_guest_random_seed_thread_part2(cpu->random_seed);
6569 /* Enable signals. */
6570 sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
6571 /* Signal to the parent that we're ready. */
6572 pthread_mutex_lock(&info->mutex);
6573 pthread_cond_broadcast(&info->cond);
6574 pthread_mutex_unlock(&info->mutex);
6575 /* Wait until the parent has finished initializing the tls state. */
6576 pthread_mutex_lock(&clone_lock);
6577 pthread_mutex_unlock(&clone_lock);
6583 /* do_fork() Must return host values and target errnos (unlike most
6584 do_*() functions). */
6585 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
6586 abi_ulong parent_tidptr, target_ulong newtls,
6587 abi_ulong child_tidptr)
6589 CPUState *cpu = env_cpu(env);
6593 CPUArchState *new_env;
6596 flags &= ~CLONE_IGNORED_FLAGS;
6598 /* Emulate vfork() with fork() */
6599 if (flags & CLONE_VFORK)
6600 flags &= ~(CLONE_VFORK | CLONE_VM);
6602 if (flags & CLONE_VM) {
6603 TaskState *parent_ts = (TaskState *)cpu->opaque;
6604 new_thread_info info;
6605 pthread_attr_t attr;
6607 if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
6608 (flags & CLONE_INVALID_THREAD_FLAGS)) {
6609 return -TARGET_EINVAL;
6612 ts = g_new0(TaskState, 1);
6613 init_task_state(ts);
6615 /* Grab a mutex so that thread setup appears atomic. */
6616 pthread_mutex_lock(&clone_lock);
6619 * If this is our first additional thread, we need to ensure we
6620 * generate code for parallel execution and flush old translations.
6621 * Do this now so that the copy gets CF_PARALLEL too.
6623 if (!(cpu->tcg_cflags & CF_PARALLEL)) {
6624 cpu->tcg_cflags |= CF_PARALLEL;
6628 /* we create a new CPU instance. */
6629 new_env = cpu_copy(env);
6630 /* Init regs that differ from the parent. */
6631 cpu_clone_regs_child(new_env, newsp, flags);
6632 cpu_clone_regs_parent(env, flags);
6633 new_cpu = env_cpu(new_env);
6634 new_cpu->opaque = ts;
6635 ts->bprm = parent_ts->bprm;
6636 ts->info = parent_ts->info;
6637 ts->signal_mask = parent_ts->signal_mask;
6639 if (flags & CLONE_CHILD_CLEARTID) {
6640 ts->child_tidptr = child_tidptr;
6643 if (flags & CLONE_SETTLS) {
6644 cpu_set_tls (new_env, newtls);
6647 memset(&info, 0, sizeof(info));
6648 pthread_mutex_init(&info.mutex, NULL);
6649 pthread_mutex_lock(&info.mutex);
6650 pthread_cond_init(&info.cond, NULL);
6652 if (flags & CLONE_CHILD_SETTID) {
6653 info.child_tidptr = child_tidptr;
6655 if (flags & CLONE_PARENT_SETTID) {
6656 info.parent_tidptr = parent_tidptr;
6659 ret = pthread_attr_init(&attr);
6660 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6661 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6662 /* It is not safe to deliver signals until the child has finished
6663 initializing, so temporarily block all signals. */
6664 sigfillset(&sigmask);
6665 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6666 cpu->random_seed = qemu_guest_random_seed_thread_part1();
6668 ret = pthread_create(&info.thread, &attr, clone_func, &info);
6669 /* TODO: Free new CPU state if thread creation failed. */
6671 sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6672 pthread_attr_destroy(&attr);
6674 /* Wait for the child to initialize. */
6675 pthread_cond_wait(&info.cond, &info.mutex);
6680 pthread_mutex_unlock(&info.mutex);
6681 pthread_cond_destroy(&info.cond);
6682 pthread_mutex_destroy(&info.mutex);
6683 pthread_mutex_unlock(&clone_lock);
6685 /* if no CLONE_VM, we consider it is a fork */
6686 if (flags & CLONE_INVALID_FORK_FLAGS) {
6687 return -TARGET_EINVAL;
6690 /* We can't support custom termination signals */
6691 if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6692 return -TARGET_EINVAL;
6695 if (block_signals()) {
6696 return -QEMU_ERESTARTSYS;
6702 /* Child Process. */
6703 cpu_clone_regs_child(env, newsp, flags);
6705 /* There is a race condition here. The parent process could
6706 theoretically read the TID in the child process before the child
6707 tid is set. This would require using either ptrace
6708 (not implemented) or having *_tidptr to point at a shared memory
6709 mapping. We can't repeat the spinlock hack used above because
6710 the child process gets its own copy of the lock. */
6711 if (flags & CLONE_CHILD_SETTID)
6712 put_user_u32(sys_gettid(), child_tidptr);
6713 if (flags & CLONE_PARENT_SETTID)
6714 put_user_u32(sys_gettid(), parent_tidptr);
6715 ts = (TaskState *)cpu->opaque;
6716 if (flags & CLONE_SETTLS)
6717 cpu_set_tls (env, newtls);
6718 if (flags & CLONE_CHILD_CLEARTID)
6719 ts->child_tidptr = child_tidptr;
6721 cpu_clone_regs_parent(env, flags);
6728 /* warning : doesn't handle linux specific flags... */
6729 static int target_to_host_fcntl_cmd(int cmd)
6734 case TARGET_F_DUPFD:
6735 case TARGET_F_GETFD:
6736 case TARGET_F_SETFD:
6737 case TARGET_F_GETFL:
6738 case TARGET_F_SETFL:
6739 case TARGET_F_OFD_GETLK:
6740 case TARGET_F_OFD_SETLK:
6741 case TARGET_F_OFD_SETLKW:
6744 case TARGET_F_GETLK:
6747 case TARGET_F_SETLK:
6750 case TARGET_F_SETLKW:
6753 case TARGET_F_GETOWN:
6756 case TARGET_F_SETOWN:
6759 case TARGET_F_GETSIG:
6762 case TARGET_F_SETSIG:
6765 #if TARGET_ABI_BITS == 32
6766 case TARGET_F_GETLK64:
6769 case TARGET_F_SETLK64:
6772 case TARGET_F_SETLKW64:
6776 case TARGET_F_SETLEASE:
6779 case TARGET_F_GETLEASE:
6782 #ifdef F_DUPFD_CLOEXEC
6783 case TARGET_F_DUPFD_CLOEXEC:
6784 ret = F_DUPFD_CLOEXEC;
6787 case TARGET_F_NOTIFY:
6791 case TARGET_F_GETOWN_EX:
6796 case TARGET_F_SETOWN_EX:
6801 case TARGET_F_SETPIPE_SZ:
6804 case TARGET_F_GETPIPE_SZ:
6809 case TARGET_F_ADD_SEALS:
6812 case TARGET_F_GET_SEALS:
6817 ret = -TARGET_EINVAL;
6821 #if defined(__powerpc64__)
6822 /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6823 * is not supported by kernel. The glibc fcntl call actually adjusts
6824 * them to 5, 6 and 7 before making the syscall(). Since we make the
6825 * syscall directly, adjust to what is supported by the kernel.
6827 if (ret >= F_GETLK64 && ret <= F_SETLKW64) {
6828 ret -= F_GETLK64 - 5;
6835 #define FLOCK_TRANSTBL \
6837 TRANSTBL_CONVERT(F_RDLCK); \
6838 TRANSTBL_CONVERT(F_WRLCK); \
6839 TRANSTBL_CONVERT(F_UNLCK); \
6842 static int target_to_host_flock(int type)
6844 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6846 #undef TRANSTBL_CONVERT
6847 return -TARGET_EINVAL;
6850 static int host_to_target_flock(int type)
6852 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6854 #undef TRANSTBL_CONVERT
6855 /* if we don't know how to convert the value coming
6856 * from the host we copy to the target field as-is
6861 static inline abi_long copy_from_user_flock(struct flock64 *fl,
6862 abi_ulong target_flock_addr)
6864 struct target_flock *target_fl;
6867 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6868 return -TARGET_EFAULT;
6871 __get_user(l_type, &target_fl->l_type);
6872 l_type = target_to_host_flock(l_type);
6876 fl->l_type = l_type;
6877 __get_user(fl->l_whence, &target_fl->l_whence);
6878 __get_user(fl->l_start, &target_fl->l_start);
6879 __get_user(fl->l_len, &target_fl->l_len);
6880 __get_user(fl->l_pid, &target_fl->l_pid);
6881 unlock_user_struct(target_fl, target_flock_addr, 0);
6885 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6886 const struct flock64 *fl)
6888 struct target_flock *target_fl;
6891 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6892 return -TARGET_EFAULT;
6895 l_type = host_to_target_flock(fl->l_type);
6896 __put_user(l_type, &target_fl->l_type);
6897 __put_user(fl->l_whence, &target_fl->l_whence);
6898 __put_user(fl->l_start, &target_fl->l_start);
6899 __put_user(fl->l_len, &target_fl->l_len);
6900 __put_user(fl->l_pid, &target_fl->l_pid);
6901 unlock_user_struct(target_fl, target_flock_addr, 1);
6905 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
6906 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
6908 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6909 struct target_oabi_flock64 {
6917 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
6918 abi_ulong target_flock_addr)
6920 struct target_oabi_flock64 *target_fl;
6923 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6924 return -TARGET_EFAULT;
6927 __get_user(l_type, &target_fl->l_type);
6928 l_type = target_to_host_flock(l_type);
6932 fl->l_type = l_type;
6933 __get_user(fl->l_whence, &target_fl->l_whence);
6934 __get_user(fl->l_start, &target_fl->l_start);
6935 __get_user(fl->l_len, &target_fl->l_len);
6936 __get_user(fl->l_pid, &target_fl->l_pid);
6937 unlock_user_struct(target_fl, target_flock_addr, 0);
6941 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
6942 const struct flock64 *fl)
6944 struct target_oabi_flock64 *target_fl;
6947 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6948 return -TARGET_EFAULT;
6951 l_type = host_to_target_flock(fl->l_type);
6952 __put_user(l_type, &target_fl->l_type);
6953 __put_user(fl->l_whence, &target_fl->l_whence);
6954 __put_user(fl->l_start, &target_fl->l_start);
6955 __put_user(fl->l_len, &target_fl->l_len);
6956 __put_user(fl->l_pid, &target_fl->l_pid);
6957 unlock_user_struct(target_fl, target_flock_addr, 1);
6962 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
6963 abi_ulong target_flock_addr)
6965 struct target_flock64 *target_fl;
6968 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6969 return -TARGET_EFAULT;
6972 __get_user(l_type, &target_fl->l_type);
6973 l_type = target_to_host_flock(l_type);
6977 fl->l_type = l_type;
6978 __get_user(fl->l_whence, &target_fl->l_whence);
6979 __get_user(fl->l_start, &target_fl->l_start);
6980 __get_user(fl->l_len, &target_fl->l_len);
6981 __get_user(fl->l_pid, &target_fl->l_pid);
6982 unlock_user_struct(target_fl, target_flock_addr, 0);
6986 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
6987 const struct flock64 *fl)
6989 struct target_flock64 *target_fl;
6992 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6993 return -TARGET_EFAULT;
6996 l_type = host_to_target_flock(fl->l_type);
6997 __put_user(l_type, &target_fl->l_type);
6998 __put_user(fl->l_whence, &target_fl->l_whence);
6999 __put_user(fl->l_start, &target_fl->l_start);
7000 __put_user(fl->l_len, &target_fl->l_len);
7001 __put_user(fl->l_pid, &target_fl->l_pid);
7002 unlock_user_struct(target_fl, target_flock_addr, 1);
7006 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
7008 struct flock64 fl64;
7010 struct f_owner_ex fox;
7011 struct target_f_owner_ex *target_fox;
7014 int host_cmd = target_to_host_fcntl_cmd(cmd);
7016 if (host_cmd == -TARGET_EINVAL)
7020 case TARGET_F_GETLK:
7021 ret = copy_from_user_flock(&fl64, arg);
7025 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7027 ret = copy_to_user_flock(arg, &fl64);
7031 case TARGET_F_SETLK:
7032 case TARGET_F_SETLKW:
7033 ret = copy_from_user_flock(&fl64, arg);
7037 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7040 case TARGET_F_GETLK64:
7041 case TARGET_F_OFD_GETLK:
7042 ret = copy_from_user_flock64(&fl64, arg);
7046 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7048 ret = copy_to_user_flock64(arg, &fl64);
7051 case TARGET_F_SETLK64:
7052 case TARGET_F_SETLKW64:
7053 case TARGET_F_OFD_SETLK:
7054 case TARGET_F_OFD_SETLKW:
7055 ret = copy_from_user_flock64(&fl64, arg);
7059 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7062 case TARGET_F_GETFL:
7063 ret = get_errno(safe_fcntl(fd, host_cmd, arg));
7065 ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
7069 case TARGET_F_SETFL:
7070 ret = get_errno(safe_fcntl(fd, host_cmd,
7071 target_to_host_bitmask(arg,
7076 case TARGET_F_GETOWN_EX:
7077 ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
7079 if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
7080 return -TARGET_EFAULT;
7081 target_fox->type = tswap32(fox.type);
7082 target_fox->pid = tswap32(fox.pid);
7083 unlock_user_struct(target_fox, arg, 1);
7089 case TARGET_F_SETOWN_EX:
7090 if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
7091 return -TARGET_EFAULT;
7092 fox.type = tswap32(target_fox->type);
7093 fox.pid = tswap32(target_fox->pid);
7094 unlock_user_struct(target_fox, arg, 0);
7095 ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
7099 case TARGET_F_SETSIG:
7100 ret = get_errno(safe_fcntl(fd, host_cmd, target_to_host_signal(arg)));
7103 case TARGET_F_GETSIG:
7104 ret = host_to_target_signal(get_errno(safe_fcntl(fd, host_cmd, arg)));
7107 case TARGET_F_SETOWN:
7108 case TARGET_F_GETOWN:
7109 case TARGET_F_SETLEASE:
7110 case TARGET_F_GETLEASE:
7111 case TARGET_F_SETPIPE_SZ:
7112 case TARGET_F_GETPIPE_SZ:
7113 case TARGET_F_ADD_SEALS:
7114 case TARGET_F_GET_SEALS:
7115 ret = get_errno(safe_fcntl(fd, host_cmd, arg));
7119 ret = get_errno(safe_fcntl(fd, cmd, arg));
7127 static inline int high2lowuid(int uid)
7135 static inline int high2lowgid(int gid)
7143 static inline int low2highuid(int uid)
7145 if ((int16_t)uid == -1)
7151 static inline int low2highgid(int gid)
7153 if ((int16_t)gid == -1)
7158 static inline int tswapid(int id)
7163 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
7165 #else /* !USE_UID16 */
7166 static inline int high2lowuid(int uid)
7170 static inline int high2lowgid(int gid)
7174 static inline int low2highuid(int uid)
7178 static inline int low2highgid(int gid)
7182 static inline int tswapid(int id)
7187 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
7189 #endif /* USE_UID16 */
7191 /* We must do direct syscalls for setting UID/GID, because we want to
7192 * implement the Linux system call semantics of "change only for this thread",
7193 * not the libc/POSIX semantics of "change for all threads in process".
7194 * (See http://ewontfix.com/17/ for more details.)
7195 * We use the 32-bit version of the syscalls if present; if it is not
7196 * then either the host architecture supports 32-bit UIDs natively with
7197 * the standard syscall, or the 16-bit UID is the best we can do.
7199 #ifdef __NR_setuid32
7200 #define __NR_sys_setuid __NR_setuid32
7202 #define __NR_sys_setuid __NR_setuid
7204 #ifdef __NR_setgid32
7205 #define __NR_sys_setgid __NR_setgid32
7207 #define __NR_sys_setgid __NR_setgid
7209 #ifdef __NR_setresuid32
7210 #define __NR_sys_setresuid __NR_setresuid32
7212 #define __NR_sys_setresuid __NR_setresuid
7214 #ifdef __NR_setresgid32
7215 #define __NR_sys_setresgid __NR_setresgid32
7217 #define __NR_sys_setresgid __NR_setresgid
7220 _syscall1(int, sys_setuid, uid_t, uid)
7221 _syscall1(int, sys_setgid, gid_t, gid)
7222 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
7223 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
7225 void syscall_init(void)
7228 const argtype *arg_type;
7231 thunk_init(STRUCT_MAX);
7233 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
7234 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
7235 #include "syscall_types.h"
7237 #undef STRUCT_SPECIAL
7239 /* we patch the ioctl size if necessary. We rely on the fact that
7240 no ioctl has all the bits at '1' in the size field */
7242 while (ie->target_cmd != 0) {
7243 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
7244 TARGET_IOC_SIZEMASK) {
7245 arg_type = ie->arg_type;
7246 if (arg_type[0] != TYPE_PTR) {
7247 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
7252 size = thunk_type_size(arg_type, 0);
7253 ie->target_cmd = (ie->target_cmd &
7254 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
7255 (size << TARGET_IOC_SIZESHIFT);
7258 /* automatic consistency check if same arch */
7259 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7260 (defined(__x86_64__) && defined(TARGET_X86_64))
7261 if (unlikely(ie->target_cmd != ie->host_cmd)) {
7262 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
7263 ie->name, ie->target_cmd, ie->host_cmd);
7270 #ifdef TARGET_NR_truncate64
7271 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
7276 if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
7280 return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
7284 #ifdef TARGET_NR_ftruncate64
7285 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
7290 if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
7294 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
7298 #if defined(TARGET_NR_timer_settime) || \
7299 (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
7300 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_its,
7301 abi_ulong target_addr)
7303 if (target_to_host_timespec(&host_its->it_interval, target_addr +
7304 offsetof(struct target_itimerspec,
7306 target_to_host_timespec(&host_its->it_value, target_addr +
7307 offsetof(struct target_itimerspec,
7309 return -TARGET_EFAULT;
7316 #if defined(TARGET_NR_timer_settime64) || \
7317 (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD))
7318 static inline abi_long target_to_host_itimerspec64(struct itimerspec *host_its,
7319 abi_ulong target_addr)
7321 if (target_to_host_timespec64(&host_its->it_interval, target_addr +
7322 offsetof(struct target__kernel_itimerspec,
7324 target_to_host_timespec64(&host_its->it_value, target_addr +
7325 offsetof(struct target__kernel_itimerspec,
7327 return -TARGET_EFAULT;
7334 #if ((defined(TARGET_NR_timerfd_gettime) || \
7335 defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
7336 defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
7337 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
7338 struct itimerspec *host_its)
7340 if (host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7342 &host_its->it_interval) ||
7343 host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7345 &host_its->it_value)) {
7346 return -TARGET_EFAULT;
7352 #if ((defined(TARGET_NR_timerfd_gettime64) || \
7353 defined(TARGET_NR_timerfd_settime64)) && defined(CONFIG_TIMERFD)) || \
7354 defined(TARGET_NR_timer_gettime64) || defined(TARGET_NR_timer_settime64)
7355 static inline abi_long host_to_target_itimerspec64(abi_ulong target_addr,
7356 struct itimerspec *host_its)
7358 if (host_to_target_timespec64(target_addr +
7359 offsetof(struct target__kernel_itimerspec,
7361 &host_its->it_interval) ||
7362 host_to_target_timespec64(target_addr +
7363 offsetof(struct target__kernel_itimerspec,
7365 &host_its->it_value)) {
7366 return -TARGET_EFAULT;
7372 #if defined(TARGET_NR_adjtimex) || \
7373 (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
7374 static inline abi_long target_to_host_timex(struct timex *host_tx,
7375 abi_long target_addr)
7377 struct target_timex *target_tx;
7379 if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7380 return -TARGET_EFAULT;
7383 __get_user(host_tx->modes, &target_tx->modes);
7384 __get_user(host_tx->offset, &target_tx->offset);
7385 __get_user(host_tx->freq, &target_tx->freq);
7386 __get_user(host_tx->maxerror, &target_tx->maxerror);
7387 __get_user(host_tx->esterror, &target_tx->esterror);
7388 __get_user(host_tx->status, &target_tx->status);
7389 __get_user(host_tx->constant, &target_tx->constant);
7390 __get_user(host_tx->precision, &target_tx->precision);
7391 __get_user(host_tx->tolerance, &target_tx->tolerance);
7392 __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7393 __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7394 __get_user(host_tx->tick, &target_tx->tick);
7395 __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7396 __get_user(host_tx->jitter, &target_tx->jitter);
7397 __get_user(host_tx->shift, &target_tx->shift);
7398 __get_user(host_tx->stabil, &target_tx->stabil);
7399 __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7400 __get_user(host_tx->calcnt, &target_tx->calcnt);
7401 __get_user(host_tx->errcnt, &target_tx->errcnt);
7402 __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7403 __get_user(host_tx->tai, &target_tx->tai);
7405 unlock_user_struct(target_tx, target_addr, 0);
7409 static inline abi_long host_to_target_timex(abi_long target_addr,
7410 struct timex *host_tx)
7412 struct target_timex *target_tx;
7414 if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7415 return -TARGET_EFAULT;
7418 __put_user(host_tx->modes, &target_tx->modes);
7419 __put_user(host_tx->offset, &target_tx->offset);
7420 __put_user(host_tx->freq, &target_tx->freq);
7421 __put_user(host_tx->maxerror, &target_tx->maxerror);
7422 __put_user(host_tx->esterror, &target_tx->esterror);
7423 __put_user(host_tx->status, &target_tx->status);
7424 __put_user(host_tx->constant, &target_tx->constant);
7425 __put_user(host_tx->precision, &target_tx->precision);
7426 __put_user(host_tx->tolerance, &target_tx->tolerance);
7427 __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7428 __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7429 __put_user(host_tx->tick, &target_tx->tick);
7430 __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7431 __put_user(host_tx->jitter, &target_tx->jitter);
7432 __put_user(host_tx->shift, &target_tx->shift);
7433 __put_user(host_tx->stabil, &target_tx->stabil);
7434 __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7435 __put_user(host_tx->calcnt, &target_tx->calcnt);
7436 __put_user(host_tx->errcnt, &target_tx->errcnt);
7437 __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7438 __put_user(host_tx->tai, &target_tx->tai);
7440 unlock_user_struct(target_tx, target_addr, 1);
7446 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
7447 static inline abi_long target_to_host_timex64(struct timex *host_tx,
7448 abi_long target_addr)
7450 struct target__kernel_timex *target_tx;
7452 if (copy_from_user_timeval64(&host_tx->time, target_addr +
7453 offsetof(struct target__kernel_timex,
7455 return -TARGET_EFAULT;
7458 if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7459 return -TARGET_EFAULT;
7462 __get_user(host_tx->modes, &target_tx->modes);
7463 __get_user(host_tx->offset, &target_tx->offset);
7464 __get_user(host_tx->freq, &target_tx->freq);
7465 __get_user(host_tx->maxerror, &target_tx->maxerror);
7466 __get_user(host_tx->esterror, &target_tx->esterror);
7467 __get_user(host_tx->status, &target_tx->status);
7468 __get_user(host_tx->constant, &target_tx->constant);
7469 __get_user(host_tx->precision, &target_tx->precision);
7470 __get_user(host_tx->tolerance, &target_tx->tolerance);
7471 __get_user(host_tx->tick, &target_tx->tick);
7472 __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7473 __get_user(host_tx->jitter, &target_tx->jitter);
7474 __get_user(host_tx->shift, &target_tx->shift);
7475 __get_user(host_tx->stabil, &target_tx->stabil);
7476 __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7477 __get_user(host_tx->calcnt, &target_tx->calcnt);
7478 __get_user(host_tx->errcnt, &target_tx->errcnt);
7479 __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7480 __get_user(host_tx->tai, &target_tx->tai);
7482 unlock_user_struct(target_tx, target_addr, 0);
7486 static inline abi_long host_to_target_timex64(abi_long target_addr,
7487 struct timex *host_tx)
7489 struct target__kernel_timex *target_tx;
7491 if (copy_to_user_timeval64(target_addr +
7492 offsetof(struct target__kernel_timex, time),
7494 return -TARGET_EFAULT;
7497 if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7498 return -TARGET_EFAULT;
7501 __put_user(host_tx->modes, &target_tx->modes);
7502 __put_user(host_tx->offset, &target_tx->offset);
7503 __put_user(host_tx->freq, &target_tx->freq);
7504 __put_user(host_tx->maxerror, &target_tx->maxerror);
7505 __put_user(host_tx->esterror, &target_tx->esterror);
7506 __put_user(host_tx->status, &target_tx->status);
7507 __put_user(host_tx->constant, &target_tx->constant);
7508 __put_user(host_tx->precision, &target_tx->precision);
7509 __put_user(host_tx->tolerance, &target_tx->tolerance);
7510 __put_user(host_tx->tick, &target_tx->tick);
7511 __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7512 __put_user(host_tx->jitter, &target_tx->jitter);
7513 __put_user(host_tx->shift, &target_tx->shift);
7514 __put_user(host_tx->stabil, &target_tx->stabil);
7515 __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7516 __put_user(host_tx->calcnt, &target_tx->calcnt);
7517 __put_user(host_tx->errcnt, &target_tx->errcnt);
7518 __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7519 __put_user(host_tx->tai, &target_tx->tai);
7521 unlock_user_struct(target_tx, target_addr, 1);
7526 #ifndef HAVE_SIGEV_NOTIFY_THREAD_ID
7527 #define sigev_notify_thread_id _sigev_un._tid
7530 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
7531 abi_ulong target_addr)
7533 struct target_sigevent *target_sevp;
7535 if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
7536 return -TARGET_EFAULT;
7539 /* This union is awkward on 64 bit systems because it has a 32 bit
7540 * integer and a pointer in it; we follow the conversion approach
7541 * used for handling sigval types in signal.c so the guest should get
7542 * the correct value back even if we did a 64 bit byteswap and it's
7543 * using the 32 bit integer.
7545 host_sevp->sigev_value.sival_ptr =
7546 (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
7547 host_sevp->sigev_signo =
7548 target_to_host_signal(tswap32(target_sevp->sigev_signo));
7549 host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
7550 host_sevp->sigev_notify_thread_id = tswap32(target_sevp->_sigev_un._tid);
7552 unlock_user_struct(target_sevp, target_addr, 1);
7556 #if defined(TARGET_NR_mlockall)
7557 static inline int target_to_host_mlockall_arg(int arg)
7561 if (arg & TARGET_MCL_CURRENT) {
7562 result |= MCL_CURRENT;
7564 if (arg & TARGET_MCL_FUTURE) {
7565 result |= MCL_FUTURE;
7568 if (arg & TARGET_MCL_ONFAULT) {
7569 result |= MCL_ONFAULT;
7577 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) || \
7578 defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) || \
7579 defined(TARGET_NR_newfstatat))
7580 static inline abi_long host_to_target_stat64(void *cpu_env,
7581 abi_ulong target_addr,
7582 struct stat *host_st)
7584 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7585 if (((CPUARMState *)cpu_env)->eabi) {
7586 struct target_eabi_stat64 *target_st;
7588 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7589 return -TARGET_EFAULT;
7590 memset(target_st, 0, sizeof(struct target_eabi_stat64));
7591 __put_user(host_st->st_dev, &target_st->st_dev);
7592 __put_user(host_st->st_ino, &target_st->st_ino);
7593 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7594 __put_user(host_st->st_ino, &target_st->__st_ino);
7596 __put_user(host_st->st_mode, &target_st->st_mode);
7597 __put_user(host_st->st_nlink, &target_st->st_nlink);
7598 __put_user(host_st->st_uid, &target_st->st_uid);
7599 __put_user(host_st->st_gid, &target_st->st_gid);
7600 __put_user(host_st->st_rdev, &target_st->st_rdev);
7601 __put_user(host_st->st_size, &target_st->st_size);
7602 __put_user(host_st->st_blksize, &target_st->st_blksize);
7603 __put_user(host_st->st_blocks, &target_st->st_blocks);
7604 __put_user(host_st->st_atime, &target_st->target_st_atime);
7605 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7606 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7607 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7608 __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7609 __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7610 __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7612 unlock_user_struct(target_st, target_addr, 1);
7616 #if defined(TARGET_HAS_STRUCT_STAT64)
7617 struct target_stat64 *target_st;
7619 struct target_stat *target_st;
7622 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7623 return -TARGET_EFAULT;
7624 memset(target_st, 0, sizeof(*target_st));
7625 __put_user(host_st->st_dev, &target_st->st_dev);
7626 __put_user(host_st->st_ino, &target_st->st_ino);
7627 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7628 __put_user(host_st->st_ino, &target_st->__st_ino);
7630 __put_user(host_st->st_mode, &target_st->st_mode);
7631 __put_user(host_st->st_nlink, &target_st->st_nlink);
7632 __put_user(host_st->st_uid, &target_st->st_uid);
7633 __put_user(host_st->st_gid, &target_st->st_gid);
7634 __put_user(host_st->st_rdev, &target_st->st_rdev);
7635 /* XXX: better use of kernel struct */
7636 __put_user(host_st->st_size, &target_st->st_size);
7637 __put_user(host_st->st_blksize, &target_st->st_blksize);
7638 __put_user(host_st->st_blocks, &target_st->st_blocks);
7639 __put_user(host_st->st_atime, &target_st->target_st_atime);
7640 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7641 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7642 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7643 __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7644 __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7645 __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7647 unlock_user_struct(target_st, target_addr, 1);
7654 #if defined(TARGET_NR_statx) && defined(__NR_statx)
7655 static inline abi_long host_to_target_statx(struct target_statx *host_stx,
7656 abi_ulong target_addr)
7658 struct target_statx *target_stx;
7660 if (!lock_user_struct(VERIFY_WRITE, target_stx, target_addr, 0)) {
7661 return -TARGET_EFAULT;
7663 memset(target_stx, 0, sizeof(*target_stx));
7665 __put_user(host_stx->stx_mask, &target_stx->stx_mask);
7666 __put_user(host_stx->stx_blksize, &target_stx->stx_blksize);
7667 __put_user(host_stx->stx_attributes, &target_stx->stx_attributes);
7668 __put_user(host_stx->stx_nlink, &target_stx->stx_nlink);
7669 __put_user(host_stx->stx_uid, &target_stx->stx_uid);
7670 __put_user(host_stx->stx_gid, &target_stx->stx_gid);
7671 __put_user(host_stx->stx_mode, &target_stx->stx_mode);
7672 __put_user(host_stx->stx_ino, &target_stx->stx_ino);
7673 __put_user(host_stx->stx_size, &target_stx->stx_size);
7674 __put_user(host_stx->stx_blocks, &target_stx->stx_blocks);
7675 __put_user(host_stx->stx_attributes_mask, &target_stx->stx_attributes_mask);
7676 __put_user(host_stx->stx_atime.tv_sec, &target_stx->stx_atime.tv_sec);
7677 __put_user(host_stx->stx_atime.tv_nsec, &target_stx->stx_atime.tv_nsec);
7678 __put_user(host_stx->stx_btime.tv_sec, &target_stx->stx_btime.tv_sec);
7679 __put_user(host_stx->stx_btime.tv_nsec, &target_stx->stx_btime.tv_nsec);
7680 __put_user(host_stx->stx_ctime.tv_sec, &target_stx->stx_ctime.tv_sec);
7681 __put_user(host_stx->stx_ctime.tv_nsec, &target_stx->stx_ctime.tv_nsec);
7682 __put_user(host_stx->stx_mtime.tv_sec, &target_stx->stx_mtime.tv_sec);
7683 __put_user(host_stx->stx_mtime.tv_nsec, &target_stx->stx_mtime.tv_nsec);
7684 __put_user(host_stx->stx_rdev_major, &target_stx->stx_rdev_major);
7685 __put_user(host_stx->stx_rdev_minor, &target_stx->stx_rdev_minor);
7686 __put_user(host_stx->stx_dev_major, &target_stx->stx_dev_major);
7687 __put_user(host_stx->stx_dev_minor, &target_stx->stx_dev_minor);
7689 unlock_user_struct(target_stx, target_addr, 1);
7695 static int do_sys_futex(int *uaddr, int op, int val,
7696 const struct timespec *timeout, int *uaddr2,
7699 #if HOST_LONG_BITS == 64
7700 #if defined(__NR_futex)
7701 /* always a 64-bit time_t, it doesn't define _time64 version */
7702 return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7705 #else /* HOST_LONG_BITS == 64 */
7706 #if defined(__NR_futex_time64)
7707 if (sizeof(timeout->tv_sec) == 8) {
7708 /* _time64 function on 32bit arch */
7709 return sys_futex_time64(uaddr, op, val, timeout, uaddr2, val3);
7712 #if defined(__NR_futex)
7713 /* old function on 32bit arch */
7714 return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7716 #endif /* HOST_LONG_BITS == 64 */
7717 g_assert_not_reached();
7720 static int do_safe_futex(int *uaddr, int op, int val,
7721 const struct timespec *timeout, int *uaddr2,
7724 #if HOST_LONG_BITS == 64
7725 #if defined(__NR_futex)
7726 /* always a 64-bit time_t, it doesn't define _time64 version */
7727 return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7729 #else /* HOST_LONG_BITS == 64 */
7730 #if defined(__NR_futex_time64)
7731 if (sizeof(timeout->tv_sec) == 8) {
7732 /* _time64 function on 32bit arch */
7733 return get_errno(safe_futex_time64(uaddr, op, val, timeout, uaddr2,
7737 #if defined(__NR_futex)
7738 /* old function on 32bit arch */
7739 return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7741 #endif /* HOST_LONG_BITS == 64 */
7742 return -TARGET_ENOSYS;
7745 /* ??? Using host futex calls even when target atomic operations
7746 are not really atomic probably breaks things. However implementing
7747 futexes locally would make futexes shared between multiple processes
7748 tricky. However they're probably useless because guest atomic
7749 operations won't work either. */
7750 #if defined(TARGET_NR_futex)
7751 static int do_futex(CPUState *cpu, target_ulong uaddr, int op, int val,
7752 target_ulong timeout, target_ulong uaddr2, int val3)
7754 struct timespec ts, *pts;
7757 /* ??? We assume FUTEX_* constants are the same on both host
7759 #ifdef FUTEX_CMD_MASK
7760 base_op = op & FUTEX_CMD_MASK;
7766 case FUTEX_WAIT_BITSET:
7769 target_to_host_timespec(pts, timeout);
7773 return do_safe_futex(g2h(cpu, uaddr),
7774 op, tswap32(val), pts, NULL, val3);
7776 return do_safe_futex(g2h(cpu, uaddr),
7777 op, val, NULL, NULL, 0);
7779 return do_safe_futex(g2h(cpu, uaddr),
7780 op, val, NULL, NULL, 0);
7782 case FUTEX_CMP_REQUEUE:
7784 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7785 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7786 But the prototype takes a `struct timespec *'; insert casts
7787 to satisfy the compiler. We do not need to tswap TIMEOUT
7788 since it's not compared to guest memory. */
7789 pts = (struct timespec *)(uintptr_t) timeout;
7790 return do_safe_futex(g2h(cpu, uaddr), op, val, pts, g2h(cpu, uaddr2),
7791 (base_op == FUTEX_CMP_REQUEUE
7792 ? tswap32(val3) : val3));
7794 return -TARGET_ENOSYS;
7799 #if defined(TARGET_NR_futex_time64)
7800 static int do_futex_time64(CPUState *cpu, target_ulong uaddr, int op,
7801 int val, target_ulong timeout,
7802 target_ulong uaddr2, int val3)
7804 struct timespec ts, *pts;
7807 /* ??? We assume FUTEX_* constants are the same on both host
7809 #ifdef FUTEX_CMD_MASK
7810 base_op = op & FUTEX_CMD_MASK;
7816 case FUTEX_WAIT_BITSET:
7819 if (target_to_host_timespec64(pts, timeout)) {
7820 return -TARGET_EFAULT;
7825 return do_safe_futex(g2h(cpu, uaddr), op,
7826 tswap32(val), pts, NULL, val3);
7828 return do_safe_futex(g2h(cpu, uaddr), op, val, NULL, NULL, 0);
7830 return do_safe_futex(g2h(cpu, uaddr), op, val, NULL, NULL, 0);
7832 case FUTEX_CMP_REQUEUE:
7834 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7835 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7836 But the prototype takes a `struct timespec *'; insert casts
7837 to satisfy the compiler. We do not need to tswap TIMEOUT
7838 since it's not compared to guest memory. */
7839 pts = (struct timespec *)(uintptr_t) timeout;
7840 return do_safe_futex(g2h(cpu, uaddr), op, val, pts, g2h(cpu, uaddr2),
7841 (base_op == FUTEX_CMP_REQUEUE
7842 ? tswap32(val3) : val3));
7844 return -TARGET_ENOSYS;
7849 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7850 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
7851 abi_long handle, abi_long mount_id,
7854 struct file_handle *target_fh;
7855 struct file_handle *fh;
7859 unsigned int size, total_size;
7861 if (get_user_s32(size, handle)) {
7862 return -TARGET_EFAULT;
7865 name = lock_user_string(pathname);
7867 return -TARGET_EFAULT;
7870 total_size = sizeof(struct file_handle) + size;
7871 target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
7873 unlock_user(name, pathname, 0);
7874 return -TARGET_EFAULT;
7877 fh = g_malloc0(total_size);
7878 fh->handle_bytes = size;
7880 ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
7881 unlock_user(name, pathname, 0);
7883 /* man name_to_handle_at(2):
7884 * Other than the use of the handle_bytes field, the caller should treat
7885 * the file_handle structure as an opaque data type
7888 memcpy(target_fh, fh, total_size);
7889 target_fh->handle_bytes = tswap32(fh->handle_bytes);
7890 target_fh->handle_type = tswap32(fh->handle_type);
7892 unlock_user(target_fh, handle, total_size);
7894 if (put_user_s32(mid, mount_id)) {
7895 return -TARGET_EFAULT;
7903 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7904 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
7907 struct file_handle *target_fh;
7908 struct file_handle *fh;
7909 unsigned int size, total_size;
7912 if (get_user_s32(size, handle)) {
7913 return -TARGET_EFAULT;
7916 total_size = sizeof(struct file_handle) + size;
7917 target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
7919 return -TARGET_EFAULT;
7922 fh = g_memdup(target_fh, total_size);
7923 fh->handle_bytes = size;
7924 fh->handle_type = tswap32(target_fh->handle_type);
7926 ret = get_errno(open_by_handle_at(mount_fd, fh,
7927 target_to_host_bitmask(flags, fcntl_flags_tbl)));
7931 unlock_user(target_fh, handle, total_size);
7937 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7939 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
7942 target_sigset_t *target_mask;
7946 if (flags & ~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC)) {
7947 return -TARGET_EINVAL;
7949 if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
7950 return -TARGET_EFAULT;
7953 target_to_host_sigset(&host_mask, target_mask);
7955 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
7957 ret = get_errno(signalfd(fd, &host_mask, host_flags));
7959 fd_trans_register(ret, &target_signalfd_trans);
7962 unlock_user_struct(target_mask, mask, 0);
7968 /* Map host to target signal numbers for the wait family of syscalls.
7969 Assume all other status bits are the same. */
7970 int host_to_target_waitstatus(int status)
7972 if (WIFSIGNALED(status)) {
7973 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
7975 if (WIFSTOPPED(status)) {
7976 return (host_to_target_signal(WSTOPSIG(status)) << 8)
7982 static int open_self_cmdline(void *cpu_env, int fd)
7984 CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7985 struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
7988 for (i = 0; i < bprm->argc; i++) {
7989 size_t len = strlen(bprm->argv[i]) + 1;
7991 if (write(fd, bprm->argv[i], len) != len) {
7999 static int open_self_maps(void *cpu_env, int fd)
8001 CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
8002 TaskState *ts = cpu->opaque;
8003 GSList *map_info = read_self_maps();
8007 for (s = map_info; s; s = g_slist_next(s)) {
8008 MapInfo *e = (MapInfo *) s->data;
8010 if (h2g_valid(e->start)) {
8011 unsigned long min = e->start;
8012 unsigned long max = e->end;
8013 int flags = page_get_flags(h2g(min));
8016 max = h2g_valid(max - 1) ?
8017 max : (uintptr_t) g2h_untagged(GUEST_ADDR_MAX) + 1;
8019 if (page_check_range(h2g(min), max - min, flags) == -1) {
8023 if (h2g(min) == ts->info->stack_limit) {
8029 count = dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
8030 " %c%c%c%c %08" PRIx64 " %s %"PRId64,
8031 h2g(min), h2g(max - 1) + 1,
8032 (flags & PAGE_READ) ? 'r' : '-',
8033 (flags & PAGE_WRITE_ORG) ? 'w' : '-',
8034 (flags & PAGE_EXEC) ? 'x' : '-',
8035 e->is_priv ? 'p' : 's',
8036 (uint64_t) e->offset, e->dev, e->inode);
8038 dprintf(fd, "%*s%s\n", 73 - count, "", path);
8045 free_self_maps(map_info);
8047 #ifdef TARGET_VSYSCALL_PAGE
8049 * We only support execution from the vsyscall page.
8050 * This is as if CONFIG_LEGACY_VSYSCALL_XONLY=y from v5.3.
8052 count = dprintf(fd, TARGET_FMT_lx "-" TARGET_FMT_lx
8053 " --xp 00000000 00:00 0",
8054 TARGET_VSYSCALL_PAGE, TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE);
8055 dprintf(fd, "%*s%s\n", 73 - count, "", "[vsyscall]");
8061 static int open_self_stat(void *cpu_env, int fd)
8063 CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
8064 TaskState *ts = cpu->opaque;
8065 g_autoptr(GString) buf = g_string_new(NULL);
8068 for (i = 0; i < 44; i++) {
8071 g_string_printf(buf, FMT_pid " ", getpid());
8072 } else if (i == 1) {
8074 gchar *bin = g_strrstr(ts->bprm->argv[0], "/");
8075 bin = bin ? bin + 1 : ts->bprm->argv[0];
8076 g_string_printf(buf, "(%.15s) ", bin);
8077 } else if (i == 3) {
8079 g_string_printf(buf, FMT_pid " ", getppid());
8080 } else if (i == 27) {
8082 g_string_printf(buf, TARGET_ABI_FMT_ld " ", ts->info->start_stack);
8084 /* for the rest, there is MasterCard */
8085 g_string_printf(buf, "0%c", i == 43 ? '\n' : ' ');
8088 if (write(fd, buf->str, buf->len) != buf->len) {
8096 static int open_self_auxv(void *cpu_env, int fd)
8098 CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
8099 TaskState *ts = cpu->opaque;
8100 abi_ulong auxv = ts->info->saved_auxv;
8101 abi_ulong len = ts->info->auxv_len;
8105 * Auxiliary vector is stored in target process stack.
8106 * read in whole auxv vector and copy it to file
8108 ptr = lock_user(VERIFY_READ, auxv, len, 0);
8112 r = write(fd, ptr, len);
8119 lseek(fd, 0, SEEK_SET);
8120 unlock_user(ptr, auxv, len);
8126 static int is_proc_myself(const char *filename, const char *entry)
8128 if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
8129 filename += strlen("/proc/");
8130 if (!strncmp(filename, "self/", strlen("self/"))) {
8131 filename += strlen("self/");
8132 } else if (*filename >= '1' && *filename <= '9') {
8134 snprintf(myself, sizeof(myself), "%d/", getpid());
8135 if (!strncmp(filename, myself, strlen(myself))) {
8136 filename += strlen(myself);
8143 if (!strcmp(filename, entry)) {
8150 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) || \
8151 defined(TARGET_SPARC) || defined(TARGET_M68K) || defined(TARGET_HPPA)
8152 static int is_proc(const char *filename, const char *entry)
8154 return strcmp(filename, entry) == 0;
8158 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
8159 static int open_net_route(void *cpu_env, int fd)
8166 fp = fopen("/proc/net/route", "r");
8173 read = getline(&line, &len, fp);
8174 dprintf(fd, "%s", line);
8178 while ((read = getline(&line, &len, fp)) != -1) {
8180 uint32_t dest, gw, mask;
8181 unsigned int flags, refcnt, use, metric, mtu, window, irtt;
8184 fields = sscanf(line,
8185 "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8186 iface, &dest, &gw, &flags, &refcnt, &use, &metric,
8187 &mask, &mtu, &window, &irtt);
8191 dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8192 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
8193 metric, tswap32(mask), mtu, window, irtt);
8203 #if defined(TARGET_SPARC)
8204 static int open_cpuinfo(void *cpu_env, int fd)
8206 dprintf(fd, "type\t\t: sun4u\n");
8211 #if defined(TARGET_HPPA)
8212 static int open_cpuinfo(void *cpu_env, int fd)
8214 dprintf(fd, "cpu family\t: PA-RISC 1.1e\n");
8215 dprintf(fd, "cpu\t\t: PA7300LC (PCX-L2)\n");
8216 dprintf(fd, "capabilities\t: os32\n");
8217 dprintf(fd, "model\t\t: 9000/778/B160L\n");
8218 dprintf(fd, "model name\t: Merlin L2 160 QEMU (9000/778/B160L)\n");
8223 #if defined(TARGET_M68K)
8224 static int open_hardware(void *cpu_env, int fd)
8226 dprintf(fd, "Model:\t\tqemu-m68k\n");
8231 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
8234 const char *filename;
8235 int (*fill)(void *cpu_env, int fd);
8236 int (*cmp)(const char *s1, const char *s2);
8238 const struct fake_open *fake_open;
8239 static const struct fake_open fakes[] = {
8240 { "maps", open_self_maps, is_proc_myself },
8241 { "stat", open_self_stat, is_proc_myself },
8242 { "auxv", open_self_auxv, is_proc_myself },
8243 { "cmdline", open_self_cmdline, is_proc_myself },
8244 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
8245 { "/proc/net/route", open_net_route, is_proc },
8247 #if defined(TARGET_SPARC) || defined(TARGET_HPPA)
8248 { "/proc/cpuinfo", open_cpuinfo, is_proc },
8250 #if defined(TARGET_M68K)
8251 { "/proc/hardware", open_hardware, is_proc },
8253 { NULL, NULL, NULL }
8256 if (is_proc_myself(pathname, "exe")) {
8257 int execfd = qemu_getauxval(AT_EXECFD);
8258 return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
8261 for (fake_open = fakes; fake_open->filename; fake_open++) {
8262 if (fake_open->cmp(pathname, fake_open->filename)) {
8267 if (fake_open->filename) {
8269 char filename[PATH_MAX];
8272 /* create temporary file to map stat to */
8273 tmpdir = getenv("TMPDIR");
8276 snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
8277 fd = mkstemp(filename);
8283 if ((r = fake_open->fill(cpu_env, fd))) {
8289 lseek(fd, 0, SEEK_SET);
8294 return safe_openat(dirfd, path(pathname), flags, mode);
8297 #define TIMER_MAGIC 0x0caf0000
8298 #define TIMER_MAGIC_MASK 0xffff0000
8300 /* Convert QEMU provided timer ID back to internal 16bit index format */
8301 static target_timer_t get_timer_id(abi_long arg)
8303 target_timer_t timerid = arg;
8305 if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
8306 return -TARGET_EINVAL;
8311 if (timerid >= ARRAY_SIZE(g_posix_timers)) {
8312 return -TARGET_EINVAL;
8318 static int target_to_host_cpu_mask(unsigned long *host_mask,
8320 abi_ulong target_addr,
8323 unsigned target_bits = sizeof(abi_ulong) * 8;
8324 unsigned host_bits = sizeof(*host_mask) * 8;
8325 abi_ulong *target_mask;
8328 assert(host_size >= target_size);
8330 target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
8332 return -TARGET_EFAULT;
8334 memset(host_mask, 0, host_size);
8336 for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8337 unsigned bit = i * target_bits;
8340 __get_user(val, &target_mask[i]);
8341 for (j = 0; j < target_bits; j++, bit++) {
8342 if (val & (1UL << j)) {
8343 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
8348 unlock_user(target_mask, target_addr, 0);
8352 static int host_to_target_cpu_mask(const unsigned long *host_mask,
8354 abi_ulong target_addr,
8357 unsigned target_bits = sizeof(abi_ulong) * 8;
8358 unsigned host_bits = sizeof(*host_mask) * 8;
8359 abi_ulong *target_mask;
8362 assert(host_size >= target_size);
8364 target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
8366 return -TARGET_EFAULT;
8369 for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8370 unsigned bit = i * target_bits;
8373 for (j = 0; j < target_bits; j++, bit++) {
8374 if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
8378 __put_user(val, &target_mask[i]);
8381 unlock_user(target_mask, target_addr, target_size);
8385 #ifdef TARGET_NR_getdents
8386 static int do_getdents(abi_long dirfd, abi_long arg2, abi_long count)
8388 g_autofree void *hdirp = NULL;
8390 int hlen, hoff, toff;
8391 int hreclen, treclen;
8392 off64_t prev_diroff = 0;
8394 hdirp = g_try_malloc(count);
8396 return -TARGET_ENOMEM;
8399 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8400 hlen = sys_getdents(dirfd, hdirp, count);
8402 hlen = sys_getdents64(dirfd, hdirp, count);
8405 hlen = get_errno(hlen);
8406 if (is_error(hlen)) {
8410 tdirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8412 return -TARGET_EFAULT;
8415 for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) {
8416 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8417 struct linux_dirent *hde = hdirp + hoff;
8419 struct linux_dirent64 *hde = hdirp + hoff;
8421 struct target_dirent *tde = tdirp + toff;
8425 namelen = strlen(hde->d_name);
8426 hreclen = hde->d_reclen;
8427 treclen = offsetof(struct target_dirent, d_name) + namelen + 2;
8428 treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent));
8430 if (toff + treclen > count) {
8432 * If the host struct is smaller than the target struct, or
8433 * requires less alignment and thus packs into less space,
8434 * then the host can return more entries than we can pass
8438 toff = -TARGET_EINVAL; /* result buffer is too small */
8442 * Return what we have, resetting the file pointer to the
8443 * location of the first record not returned.
8445 lseek64(dirfd, prev_diroff, SEEK_SET);
8449 prev_diroff = hde->d_off;
8450 tde->d_ino = tswapal(hde->d_ino);
8451 tde->d_off = tswapal(hde->d_off);
8452 tde->d_reclen = tswap16(treclen);
8453 memcpy(tde->d_name, hde->d_name, namelen + 1);
8456 * The getdents type is in what was formerly a padding byte at the
8457 * end of the structure.
8459 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8460 type = *((uint8_t *)hde + hreclen - 1);
8464 *((uint8_t *)tde + treclen - 1) = type;
8467 unlock_user(tdirp, arg2, toff);
8470 #endif /* TARGET_NR_getdents */
8472 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
8473 static int do_getdents64(abi_long dirfd, abi_long arg2, abi_long count)
8475 g_autofree void *hdirp = NULL;
8477 int hlen, hoff, toff;
8478 int hreclen, treclen;
8479 off64_t prev_diroff = 0;
8481 hdirp = g_try_malloc(count);
8483 return -TARGET_ENOMEM;
8486 hlen = get_errno(sys_getdents64(dirfd, hdirp, count));
8487 if (is_error(hlen)) {
8491 tdirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8493 return -TARGET_EFAULT;
8496 for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) {
8497 struct linux_dirent64 *hde = hdirp + hoff;
8498 struct target_dirent64 *tde = tdirp + toff;
8501 namelen = strlen(hde->d_name) + 1;
8502 hreclen = hde->d_reclen;
8503 treclen = offsetof(struct target_dirent64, d_name) + namelen;
8504 treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent64));
8506 if (toff + treclen > count) {
8508 * If the host struct is smaller than the target struct, or
8509 * requires less alignment and thus packs into less space,
8510 * then the host can return more entries than we can pass
8514 toff = -TARGET_EINVAL; /* result buffer is too small */
8518 * Return what we have, resetting the file pointer to the
8519 * location of the first record not returned.
8521 lseek64(dirfd, prev_diroff, SEEK_SET);
8525 prev_diroff = hde->d_off;
8526 tde->d_ino = tswap64(hde->d_ino);
8527 tde->d_off = tswap64(hde->d_off);
8528 tde->d_reclen = tswap16(treclen);
8529 tde->d_type = hde->d_type;
8530 memcpy(tde->d_name, hde->d_name, namelen);
8533 unlock_user(tdirp, arg2, toff);
8536 #endif /* TARGET_NR_getdents64 */
8538 #if defined(TARGET_NR_pivot_root) && defined(__NR_pivot_root)
8539 _syscall2(int, pivot_root, const char *, new_root, const char *, put_old)
8542 /* This is an internal helper for do_syscall so that it is easier
8543 * to have a single return point, so that actions, such as logging
8544 * of syscall results, can be performed.
8545 * All errnos that do_syscall() returns must be -TARGET_<errcode>.
8547 static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
8548 abi_long arg2, abi_long arg3, abi_long arg4,
8549 abi_long arg5, abi_long arg6, abi_long arg7,
8552 CPUState *cpu = env_cpu(cpu_env);
8554 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
8555 || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
8556 || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
8557 || defined(TARGET_NR_statx)
8560 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
8561 || defined(TARGET_NR_fstatfs)
8567 case TARGET_NR_exit:
8568 /* In old applications this may be used to implement _exit(2).
8569 However in threaded applications it is used for thread termination,
8570 and _exit_group is used for application termination.
8571 Do thread termination if we have more then one thread. */
8573 if (block_signals()) {
8574 return -QEMU_ERESTARTSYS;
8577 pthread_mutex_lock(&clone_lock);
8579 if (CPU_NEXT(first_cpu)) {
8580 TaskState *ts = cpu->opaque;
8582 object_property_set_bool(OBJECT(cpu), "realized", false, NULL);
8583 object_unref(OBJECT(cpu));
8585 * At this point the CPU should be unrealized and removed
8586 * from cpu lists. We can clean-up the rest of the thread
8587 * data without the lock held.
8590 pthread_mutex_unlock(&clone_lock);
8592 if (ts->child_tidptr) {
8593 put_user_u32(0, ts->child_tidptr);
8594 do_sys_futex(g2h(cpu, ts->child_tidptr),
8595 FUTEX_WAKE, INT_MAX, NULL, NULL, 0);
8599 rcu_unregister_thread();
8603 pthread_mutex_unlock(&clone_lock);
8604 preexit_cleanup(cpu_env, arg1);
8606 return 0; /* avoid warning */
8607 case TARGET_NR_read:
8608 if (arg2 == 0 && arg3 == 0) {
8609 return get_errno(safe_read(arg1, 0, 0));
8611 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
8612 return -TARGET_EFAULT;
8613 ret = get_errno(safe_read(arg1, p, arg3));
8615 fd_trans_host_to_target_data(arg1)) {
8616 ret = fd_trans_host_to_target_data(arg1)(p, ret);
8618 unlock_user(p, arg2, ret);
8621 case TARGET_NR_write:
8622 if (arg2 == 0 && arg3 == 0) {
8623 return get_errno(safe_write(arg1, 0, 0));
8625 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
8626 return -TARGET_EFAULT;
8627 if (fd_trans_target_to_host_data(arg1)) {
8628 void *copy = g_malloc(arg3);
8629 memcpy(copy, p, arg3);
8630 ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
8632 ret = get_errno(safe_write(arg1, copy, ret));
8636 ret = get_errno(safe_write(arg1, p, arg3));
8638 unlock_user(p, arg2, 0);
8641 #ifdef TARGET_NR_open
8642 case TARGET_NR_open:
8643 if (!(p = lock_user_string(arg1)))
8644 return -TARGET_EFAULT;
8645 ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
8646 target_to_host_bitmask(arg2, fcntl_flags_tbl),
8648 fd_trans_unregister(ret);
8649 unlock_user(p, arg1, 0);
8652 case TARGET_NR_openat:
8653 if (!(p = lock_user_string(arg2)))
8654 return -TARGET_EFAULT;
8655 ret = get_errno(do_openat(cpu_env, arg1, p,
8656 target_to_host_bitmask(arg3, fcntl_flags_tbl),
8658 fd_trans_unregister(ret);
8659 unlock_user(p, arg2, 0);
8661 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8662 case TARGET_NR_name_to_handle_at:
8663 ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
8666 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8667 case TARGET_NR_open_by_handle_at:
8668 ret = do_open_by_handle_at(arg1, arg2, arg3);
8669 fd_trans_unregister(ret);
8672 case TARGET_NR_close:
8673 fd_trans_unregister(arg1);
8674 return get_errno(close(arg1));
8677 return do_brk(arg1);
8678 #ifdef TARGET_NR_fork
8679 case TARGET_NR_fork:
8680 return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
8682 #ifdef TARGET_NR_waitpid
8683 case TARGET_NR_waitpid:
8686 ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
8687 if (!is_error(ret) && arg2 && ret
8688 && put_user_s32(host_to_target_waitstatus(status), arg2))
8689 return -TARGET_EFAULT;
8693 #ifdef TARGET_NR_waitid
8694 case TARGET_NR_waitid:
8698 ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
8699 if (!is_error(ret) && arg3 && info.si_pid != 0) {
8700 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
8701 return -TARGET_EFAULT;
8702 host_to_target_siginfo(p, &info);
8703 unlock_user(p, arg3, sizeof(target_siginfo_t));
8708 #ifdef TARGET_NR_creat /* not on alpha */
8709 case TARGET_NR_creat:
8710 if (!(p = lock_user_string(arg1)))
8711 return -TARGET_EFAULT;
8712 ret = get_errno(creat(p, arg2));
8713 fd_trans_unregister(ret);
8714 unlock_user(p, arg1, 0);
8717 #ifdef TARGET_NR_link
8718 case TARGET_NR_link:
8721 p = lock_user_string(arg1);
8722 p2 = lock_user_string(arg2);
8724 ret = -TARGET_EFAULT;
8726 ret = get_errno(link(p, p2));
8727 unlock_user(p2, arg2, 0);
8728 unlock_user(p, arg1, 0);
8732 #if defined(TARGET_NR_linkat)
8733 case TARGET_NR_linkat:
8737 return -TARGET_EFAULT;
8738 p = lock_user_string(arg2);
8739 p2 = lock_user_string(arg4);
8741 ret = -TARGET_EFAULT;
8743 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
8744 unlock_user(p, arg2, 0);
8745 unlock_user(p2, arg4, 0);
8749 #ifdef TARGET_NR_unlink
8750 case TARGET_NR_unlink:
8751 if (!(p = lock_user_string(arg1)))
8752 return -TARGET_EFAULT;
8753 ret = get_errno(unlink(p));
8754 unlock_user(p, arg1, 0);
8757 #if defined(TARGET_NR_unlinkat)
8758 case TARGET_NR_unlinkat:
8759 if (!(p = lock_user_string(arg2)))
8760 return -TARGET_EFAULT;
8761 ret = get_errno(unlinkat(arg1, p, arg3));
8762 unlock_user(p, arg2, 0);
8765 case TARGET_NR_execve:
8767 char **argp, **envp;
8770 abi_ulong guest_argp;
8771 abi_ulong guest_envp;
8777 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
8778 if (get_user_ual(addr, gp))
8779 return -TARGET_EFAULT;
8786 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
8787 if (get_user_ual(addr, gp))
8788 return -TARGET_EFAULT;
8794 argp = g_new0(char *, argc + 1);
8795 envp = g_new0(char *, envc + 1);
8797 for (gp = guest_argp, q = argp; gp;
8798 gp += sizeof(abi_ulong), q++) {
8799 if (get_user_ual(addr, gp))
8803 if (!(*q = lock_user_string(addr)))
8808 for (gp = guest_envp, q = envp; gp;
8809 gp += sizeof(abi_ulong), q++) {
8810 if (get_user_ual(addr, gp))
8814 if (!(*q = lock_user_string(addr)))
8819 if (!(p = lock_user_string(arg1)))
8821 /* Although execve() is not an interruptible syscall it is
8822 * a special case where we must use the safe_syscall wrapper:
8823 * if we allow a signal to happen before we make the host
8824 * syscall then we will 'lose' it, because at the point of
8825 * execve the process leaves QEMU's control. So we use the
8826 * safe syscall wrapper to ensure that we either take the
8827 * signal as a guest signal, or else it does not happen
8828 * before the execve completes and makes it the other
8829 * program's problem.
8831 ret = get_errno(safe_execve(p, argp, envp));
8832 unlock_user(p, arg1, 0);
8837 ret = -TARGET_EFAULT;
8840 for (gp = guest_argp, q = argp; *q;
8841 gp += sizeof(abi_ulong), q++) {
8842 if (get_user_ual(addr, gp)
8845 unlock_user(*q, addr, 0);
8847 for (gp = guest_envp, q = envp; *q;
8848 gp += sizeof(abi_ulong), q++) {
8849 if (get_user_ual(addr, gp)
8852 unlock_user(*q, addr, 0);
8859 case TARGET_NR_chdir:
8860 if (!(p = lock_user_string(arg1)))
8861 return -TARGET_EFAULT;
8862 ret = get_errno(chdir(p));
8863 unlock_user(p, arg1, 0);
8865 #ifdef TARGET_NR_time
8866 case TARGET_NR_time:
8869 ret = get_errno(time(&host_time));
8872 && put_user_sal(host_time, arg1))
8873 return -TARGET_EFAULT;
8877 #ifdef TARGET_NR_mknod
8878 case TARGET_NR_mknod:
8879 if (!(p = lock_user_string(arg1)))
8880 return -TARGET_EFAULT;
8881 ret = get_errno(mknod(p, arg2, arg3));
8882 unlock_user(p, arg1, 0);
8885 #if defined(TARGET_NR_mknodat)
8886 case TARGET_NR_mknodat:
8887 if (!(p = lock_user_string(arg2)))
8888 return -TARGET_EFAULT;
8889 ret = get_errno(mknodat(arg1, p, arg3, arg4));
8890 unlock_user(p, arg2, 0);
8893 #ifdef TARGET_NR_chmod
8894 case TARGET_NR_chmod:
8895 if (!(p = lock_user_string(arg1)))
8896 return -TARGET_EFAULT;
8897 ret = get_errno(chmod(p, arg2));
8898 unlock_user(p, arg1, 0);
8901 #ifdef TARGET_NR_lseek
8902 case TARGET_NR_lseek:
8903 return get_errno(lseek(arg1, arg2, arg3));
8905 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
8906 /* Alpha specific */
8907 case TARGET_NR_getxpid:
8908 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
8909 return get_errno(getpid());
8911 #ifdef TARGET_NR_getpid
8912 case TARGET_NR_getpid:
8913 return get_errno(getpid());
8915 case TARGET_NR_mount:
8917 /* need to look at the data field */
8921 p = lock_user_string(arg1);
8923 return -TARGET_EFAULT;
8929 p2 = lock_user_string(arg2);
8932 unlock_user(p, arg1, 0);
8934 return -TARGET_EFAULT;
8938 p3 = lock_user_string(arg3);
8941 unlock_user(p, arg1, 0);
8943 unlock_user(p2, arg2, 0);
8944 return -TARGET_EFAULT;
8950 /* FIXME - arg5 should be locked, but it isn't clear how to
8951 * do that since it's not guaranteed to be a NULL-terminated
8955 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
8957 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(cpu, arg5));
8959 ret = get_errno(ret);
8962 unlock_user(p, arg1, 0);
8964 unlock_user(p2, arg2, 0);
8966 unlock_user(p3, arg3, 0);
8970 #if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount)
8971 #if defined(TARGET_NR_umount)
8972 case TARGET_NR_umount:
8974 #if defined(TARGET_NR_oldumount)
8975 case TARGET_NR_oldumount:
8977 if (!(p = lock_user_string(arg1)))
8978 return -TARGET_EFAULT;
8979 ret = get_errno(umount(p));
8980 unlock_user(p, arg1, 0);
8983 #ifdef TARGET_NR_stime /* not on alpha */
8984 case TARGET_NR_stime:
8988 if (get_user_sal(ts.tv_sec, arg1)) {
8989 return -TARGET_EFAULT;
8991 return get_errno(clock_settime(CLOCK_REALTIME, &ts));
8994 #ifdef TARGET_NR_alarm /* not on alpha */
8995 case TARGET_NR_alarm:
8998 #ifdef TARGET_NR_pause /* not on alpha */
8999 case TARGET_NR_pause:
9000 if (!block_signals()) {
9001 sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
9003 return -TARGET_EINTR;
9005 #ifdef TARGET_NR_utime
9006 case TARGET_NR_utime:
9008 struct utimbuf tbuf, *host_tbuf;
9009 struct target_utimbuf *target_tbuf;
9011 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
9012 return -TARGET_EFAULT;
9013 tbuf.actime = tswapal(target_tbuf->actime);
9014 tbuf.modtime = tswapal(target_tbuf->modtime);
9015 unlock_user_struct(target_tbuf, arg2, 0);
9020 if (!(p = lock_user_string(arg1)))
9021 return -TARGET_EFAULT;
9022 ret = get_errno(utime(p, host_tbuf));
9023 unlock_user(p, arg1, 0);
9027 #ifdef TARGET_NR_utimes
9028 case TARGET_NR_utimes:
9030 struct timeval *tvp, tv[2];
9032 if (copy_from_user_timeval(&tv[0], arg2)
9033 || copy_from_user_timeval(&tv[1],
9034 arg2 + sizeof(struct target_timeval)))
9035 return -TARGET_EFAULT;
9040 if (!(p = lock_user_string(arg1)))
9041 return -TARGET_EFAULT;
9042 ret = get_errno(utimes(p, tvp));
9043 unlock_user(p, arg1, 0);
9047 #if defined(TARGET_NR_futimesat)
9048 case TARGET_NR_futimesat:
9050 struct timeval *tvp, tv[2];
9052 if (copy_from_user_timeval(&tv[0], arg3)
9053 || copy_from_user_timeval(&tv[1],
9054 arg3 + sizeof(struct target_timeval)))
9055 return -TARGET_EFAULT;
9060 if (!(p = lock_user_string(arg2))) {
9061 return -TARGET_EFAULT;
9063 ret = get_errno(futimesat(arg1, path(p), tvp));
9064 unlock_user(p, arg2, 0);
9068 #ifdef TARGET_NR_access
9069 case TARGET_NR_access:
9070 if (!(p = lock_user_string(arg1))) {
9071 return -TARGET_EFAULT;
9073 ret = get_errno(access(path(p), arg2));
9074 unlock_user(p, arg1, 0);
9077 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
9078 case TARGET_NR_faccessat:
9079 if (!(p = lock_user_string(arg2))) {
9080 return -TARGET_EFAULT;
9082 ret = get_errno(faccessat(arg1, p, arg3, 0));
9083 unlock_user(p, arg2, 0);
9086 #ifdef TARGET_NR_nice /* not on alpha */
9087 case TARGET_NR_nice:
9088 return get_errno(nice(arg1));
9090 case TARGET_NR_sync:
9093 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
9094 case TARGET_NR_syncfs:
9095 return get_errno(syncfs(arg1));
9097 case TARGET_NR_kill:
9098 return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
9099 #ifdef TARGET_NR_rename
9100 case TARGET_NR_rename:
9103 p = lock_user_string(arg1);
9104 p2 = lock_user_string(arg2);
9106 ret = -TARGET_EFAULT;
9108 ret = get_errno(rename(p, p2));
9109 unlock_user(p2, arg2, 0);
9110 unlock_user(p, arg1, 0);
9114 #if defined(TARGET_NR_renameat)
9115 case TARGET_NR_renameat:
9118 p = lock_user_string(arg2);
9119 p2 = lock_user_string(arg4);
9121 ret = -TARGET_EFAULT;
9123 ret = get_errno(renameat(arg1, p, arg3, p2));
9124 unlock_user(p2, arg4, 0);
9125 unlock_user(p, arg2, 0);
9129 #if defined(TARGET_NR_renameat2)
9130 case TARGET_NR_renameat2:
9133 p = lock_user_string(arg2);
9134 p2 = lock_user_string(arg4);
9136 ret = -TARGET_EFAULT;
9138 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
9140 unlock_user(p2, arg4, 0);
9141 unlock_user(p, arg2, 0);
9145 #ifdef TARGET_NR_mkdir
9146 case TARGET_NR_mkdir:
9147 if (!(p = lock_user_string(arg1)))
9148 return -TARGET_EFAULT;
9149 ret = get_errno(mkdir(p, arg2));
9150 unlock_user(p, arg1, 0);
9153 #if defined(TARGET_NR_mkdirat)
9154 case TARGET_NR_mkdirat:
9155 if (!(p = lock_user_string(arg2)))
9156 return -TARGET_EFAULT;
9157 ret = get_errno(mkdirat(arg1, p, arg3));
9158 unlock_user(p, arg2, 0);
9161 #ifdef TARGET_NR_rmdir
9162 case TARGET_NR_rmdir:
9163 if (!(p = lock_user_string(arg1)))
9164 return -TARGET_EFAULT;
9165 ret = get_errno(rmdir(p));
9166 unlock_user(p, arg1, 0);
9170 ret = get_errno(dup(arg1));
9172 fd_trans_dup(arg1, ret);
9175 #ifdef TARGET_NR_pipe
9176 case TARGET_NR_pipe:
9177 return do_pipe(cpu_env, arg1, 0, 0);
9179 #ifdef TARGET_NR_pipe2
9180 case TARGET_NR_pipe2:
9181 return do_pipe(cpu_env, arg1,
9182 target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
9184 case TARGET_NR_times:
9186 struct target_tms *tmsp;
9188 ret = get_errno(times(&tms));
9190 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
9192 return -TARGET_EFAULT;
9193 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
9194 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
9195 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
9196 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
9199 ret = host_to_target_clock_t(ret);
9202 case TARGET_NR_acct:
9204 ret = get_errno(acct(NULL));
9206 if (!(p = lock_user_string(arg1))) {
9207 return -TARGET_EFAULT;
9209 ret = get_errno(acct(path(p)));
9210 unlock_user(p, arg1, 0);
9213 #ifdef TARGET_NR_umount2
9214 case TARGET_NR_umount2:
9215 if (!(p = lock_user_string(arg1)))
9216 return -TARGET_EFAULT;
9217 ret = get_errno(umount2(p, arg2));
9218 unlock_user(p, arg1, 0);
9221 case TARGET_NR_ioctl:
9222 return do_ioctl(arg1, arg2, arg3);
9223 #ifdef TARGET_NR_fcntl
9224 case TARGET_NR_fcntl:
9225 return do_fcntl(arg1, arg2, arg3);
9227 case TARGET_NR_setpgid:
9228 return get_errno(setpgid(arg1, arg2));
9229 case TARGET_NR_umask:
9230 return get_errno(umask(arg1));
9231 case TARGET_NR_chroot:
9232 if (!(p = lock_user_string(arg1)))
9233 return -TARGET_EFAULT;
9234 ret = get_errno(chroot(p));
9235 unlock_user(p, arg1, 0);
9237 #ifdef TARGET_NR_dup2
9238 case TARGET_NR_dup2:
9239 ret = get_errno(dup2(arg1, arg2));
9241 fd_trans_dup(arg1, arg2);
9245 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
9246 case TARGET_NR_dup3:
9250 if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
9253 host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
9254 ret = get_errno(dup3(arg1, arg2, host_flags));
9256 fd_trans_dup(arg1, arg2);
9261 #ifdef TARGET_NR_getppid /* not on alpha */
9262 case TARGET_NR_getppid:
9263 return get_errno(getppid());
9265 #ifdef TARGET_NR_getpgrp
9266 case TARGET_NR_getpgrp:
9267 return get_errno(getpgrp());
9269 case TARGET_NR_setsid:
9270 return get_errno(setsid());
9271 #ifdef TARGET_NR_sigaction
9272 case TARGET_NR_sigaction:
9274 #if defined(TARGET_MIPS)
9275 struct target_sigaction act, oact, *pact, *old_act;
9278 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
9279 return -TARGET_EFAULT;
9280 act._sa_handler = old_act->_sa_handler;
9281 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
9282 act.sa_flags = old_act->sa_flags;
9283 unlock_user_struct(old_act, arg2, 0);
9289 ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
9291 if (!is_error(ret) && arg3) {
9292 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
9293 return -TARGET_EFAULT;
9294 old_act->_sa_handler = oact._sa_handler;
9295 old_act->sa_flags = oact.sa_flags;
9296 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
9297 old_act->sa_mask.sig[1] = 0;
9298 old_act->sa_mask.sig[2] = 0;
9299 old_act->sa_mask.sig[3] = 0;
9300 unlock_user_struct(old_act, arg3, 1);
9303 struct target_old_sigaction *old_act;
9304 struct target_sigaction act, oact, *pact;
9306 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
9307 return -TARGET_EFAULT;
9308 act._sa_handler = old_act->_sa_handler;
9309 target_siginitset(&act.sa_mask, old_act->sa_mask);
9310 act.sa_flags = old_act->sa_flags;
9311 #ifdef TARGET_ARCH_HAS_SA_RESTORER
9312 act.sa_restorer = old_act->sa_restorer;
9314 unlock_user_struct(old_act, arg2, 0);
9319 ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
9320 if (!is_error(ret) && arg3) {
9321 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
9322 return -TARGET_EFAULT;
9323 old_act->_sa_handler = oact._sa_handler;
9324 old_act->sa_mask = oact.sa_mask.sig[0];
9325 old_act->sa_flags = oact.sa_flags;
9326 #ifdef TARGET_ARCH_HAS_SA_RESTORER
9327 old_act->sa_restorer = oact.sa_restorer;
9329 unlock_user_struct(old_act, arg3, 1);
9335 case TARGET_NR_rt_sigaction:
9338 * For Alpha and SPARC this is a 5 argument syscall, with
9339 * a 'restorer' parameter which must be copied into the
9340 * sa_restorer field of the sigaction struct.
9341 * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
9342 * and arg5 is the sigsetsize.
9344 #if defined(TARGET_ALPHA)
9345 target_ulong sigsetsize = arg4;
9346 target_ulong restorer = arg5;
9347 #elif defined(TARGET_SPARC)
9348 target_ulong restorer = arg4;
9349 target_ulong sigsetsize = arg5;
9351 target_ulong sigsetsize = arg4;
9352 target_ulong restorer = 0;
9354 struct target_sigaction *act = NULL;
9355 struct target_sigaction *oact = NULL;
9357 if (sigsetsize != sizeof(target_sigset_t)) {
9358 return -TARGET_EINVAL;
9360 if (arg2 && !lock_user_struct(VERIFY_READ, act, arg2, 1)) {
9361 return -TARGET_EFAULT;
9363 if (arg3 && !lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
9364 ret = -TARGET_EFAULT;
9366 ret = get_errno(do_sigaction(arg1, act, oact, restorer));
9368 unlock_user_struct(oact, arg3, 1);
9372 unlock_user_struct(act, arg2, 0);
9376 #ifdef TARGET_NR_sgetmask /* not on alpha */
9377 case TARGET_NR_sgetmask:
9380 abi_ulong target_set;
9381 ret = do_sigprocmask(0, NULL, &cur_set);
9383 host_to_target_old_sigset(&target_set, &cur_set);
9389 #ifdef TARGET_NR_ssetmask /* not on alpha */
9390 case TARGET_NR_ssetmask:
9393 abi_ulong target_set = arg1;
9394 target_to_host_old_sigset(&set, &target_set);
9395 ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
9397 host_to_target_old_sigset(&target_set, &oset);
9403 #ifdef TARGET_NR_sigprocmask
9404 case TARGET_NR_sigprocmask:
9406 #if defined(TARGET_ALPHA)
9407 sigset_t set, oldset;
9412 case TARGET_SIG_BLOCK:
9415 case TARGET_SIG_UNBLOCK:
9418 case TARGET_SIG_SETMASK:
9422 return -TARGET_EINVAL;
9425 target_to_host_old_sigset(&set, &mask);
9427 ret = do_sigprocmask(how, &set, &oldset);
9428 if (!is_error(ret)) {
9429 host_to_target_old_sigset(&mask, &oldset);
9431 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
9434 sigset_t set, oldset, *set_ptr;
9438 p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1);
9440 return -TARGET_EFAULT;
9442 target_to_host_old_sigset(&set, p);
9443 unlock_user(p, arg2, 0);
9446 case TARGET_SIG_BLOCK:
9449 case TARGET_SIG_UNBLOCK:
9452 case TARGET_SIG_SETMASK:
9456 return -TARGET_EINVAL;
9462 ret = do_sigprocmask(how, set_ptr, &oldset);
9463 if (!is_error(ret) && arg3) {
9464 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9465 return -TARGET_EFAULT;
9466 host_to_target_old_sigset(p, &oldset);
9467 unlock_user(p, arg3, sizeof(target_sigset_t));
9473 case TARGET_NR_rt_sigprocmask:
9476 sigset_t set, oldset, *set_ptr;
9478 if (arg4 != sizeof(target_sigset_t)) {
9479 return -TARGET_EINVAL;
9483 p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1);
9485 return -TARGET_EFAULT;
9487 target_to_host_sigset(&set, p);
9488 unlock_user(p, arg2, 0);
9491 case TARGET_SIG_BLOCK:
9494 case TARGET_SIG_UNBLOCK:
9497 case TARGET_SIG_SETMASK:
9501 return -TARGET_EINVAL;
9507 ret = do_sigprocmask(how, set_ptr, &oldset);
9508 if (!is_error(ret) && arg3) {
9509 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9510 return -TARGET_EFAULT;
9511 host_to_target_sigset(p, &oldset);
9512 unlock_user(p, arg3, sizeof(target_sigset_t));
9516 #ifdef TARGET_NR_sigpending
9517 case TARGET_NR_sigpending:
9520 ret = get_errno(sigpending(&set));
9521 if (!is_error(ret)) {
9522 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9523 return -TARGET_EFAULT;
9524 host_to_target_old_sigset(p, &set);
9525 unlock_user(p, arg1, sizeof(target_sigset_t));
9530 case TARGET_NR_rt_sigpending:
9534 /* Yes, this check is >, not != like most. We follow the kernel's
9535 * logic and it does it like this because it implements
9536 * NR_sigpending through the same code path, and in that case
9537 * the old_sigset_t is smaller in size.
9539 if (arg2 > sizeof(target_sigset_t)) {
9540 return -TARGET_EINVAL;
9543 ret = get_errno(sigpending(&set));
9544 if (!is_error(ret)) {
9545 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9546 return -TARGET_EFAULT;
9547 host_to_target_sigset(p, &set);
9548 unlock_user(p, arg1, sizeof(target_sigset_t));
9552 #ifdef TARGET_NR_sigsuspend
9553 case TARGET_NR_sigsuspend:
9555 TaskState *ts = cpu->opaque;
9556 #if defined(TARGET_ALPHA)
9557 abi_ulong mask = arg1;
9558 target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
9560 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9561 return -TARGET_EFAULT;
9562 target_to_host_old_sigset(&ts->sigsuspend_mask, p);
9563 unlock_user(p, arg1, 0);
9565 ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
9567 if (ret != -QEMU_ERESTARTSYS) {
9568 ts->in_sigsuspend = 1;
9573 case TARGET_NR_rt_sigsuspend:
9575 TaskState *ts = cpu->opaque;
9577 if (arg2 != sizeof(target_sigset_t)) {
9578 return -TARGET_EINVAL;
9580 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9581 return -TARGET_EFAULT;
9582 target_to_host_sigset(&ts->sigsuspend_mask, p);
9583 unlock_user(p, arg1, 0);
9584 ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
9586 if (ret != -QEMU_ERESTARTSYS) {
9587 ts->in_sigsuspend = 1;
9591 #ifdef TARGET_NR_rt_sigtimedwait
9592 case TARGET_NR_rt_sigtimedwait:
9595 struct timespec uts, *puts;
9598 if (arg4 != sizeof(target_sigset_t)) {
9599 return -TARGET_EINVAL;
9602 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9603 return -TARGET_EFAULT;
9604 target_to_host_sigset(&set, p);
9605 unlock_user(p, arg1, 0);
9608 if (target_to_host_timespec(puts, arg3)) {
9609 return -TARGET_EFAULT;
9614 ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9616 if (!is_error(ret)) {
9618 p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
9621 return -TARGET_EFAULT;
9623 host_to_target_siginfo(p, &uinfo);
9624 unlock_user(p, arg2, sizeof(target_siginfo_t));
9626 ret = host_to_target_signal(ret);
9631 #ifdef TARGET_NR_rt_sigtimedwait_time64
9632 case TARGET_NR_rt_sigtimedwait_time64:
9635 struct timespec uts, *puts;
9638 if (arg4 != sizeof(target_sigset_t)) {
9639 return -TARGET_EINVAL;
9642 p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1);
9644 return -TARGET_EFAULT;
9646 target_to_host_sigset(&set, p);
9647 unlock_user(p, arg1, 0);
9650 if (target_to_host_timespec64(puts, arg3)) {
9651 return -TARGET_EFAULT;
9656 ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9658 if (!is_error(ret)) {
9660 p = lock_user(VERIFY_WRITE, arg2,
9661 sizeof(target_siginfo_t), 0);
9663 return -TARGET_EFAULT;
9665 host_to_target_siginfo(p, &uinfo);
9666 unlock_user(p, arg2, sizeof(target_siginfo_t));
9668 ret = host_to_target_signal(ret);
9673 case TARGET_NR_rt_sigqueueinfo:
9677 p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
9679 return -TARGET_EFAULT;
9681 target_to_host_siginfo(&uinfo, p);
9682 unlock_user(p, arg3, 0);
9683 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
9686 case TARGET_NR_rt_tgsigqueueinfo:
9690 p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
9692 return -TARGET_EFAULT;
9694 target_to_host_siginfo(&uinfo, p);
9695 unlock_user(p, arg4, 0);
9696 ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, arg3, &uinfo));
9699 #ifdef TARGET_NR_sigreturn
9700 case TARGET_NR_sigreturn:
9701 if (block_signals()) {
9702 return -QEMU_ERESTARTSYS;
9704 return do_sigreturn(cpu_env);
9706 case TARGET_NR_rt_sigreturn:
9707 if (block_signals()) {
9708 return -QEMU_ERESTARTSYS;
9710 return do_rt_sigreturn(cpu_env);
9711 case TARGET_NR_sethostname:
9712 if (!(p = lock_user_string(arg1)))
9713 return -TARGET_EFAULT;
9714 ret = get_errno(sethostname(p, arg2));
9715 unlock_user(p, arg1, 0);
9717 #ifdef TARGET_NR_setrlimit
9718 case TARGET_NR_setrlimit:
9720 int resource = target_to_host_resource(arg1);
9721 struct target_rlimit *target_rlim;
9723 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
9724 return -TARGET_EFAULT;
9725 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
9726 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
9727 unlock_user_struct(target_rlim, arg2, 0);
9729 * If we just passed through resource limit settings for memory then
9730 * they would also apply to QEMU's own allocations, and QEMU will
9731 * crash or hang or die if its allocations fail. Ideally we would
9732 * track the guest allocations in QEMU and apply the limits ourselves.
9733 * For now, just tell the guest the call succeeded but don't actually
9736 if (resource != RLIMIT_AS &&
9737 resource != RLIMIT_DATA &&
9738 resource != RLIMIT_STACK) {
9739 return get_errno(setrlimit(resource, &rlim));
9745 #ifdef TARGET_NR_getrlimit
9746 case TARGET_NR_getrlimit:
9748 int resource = target_to_host_resource(arg1);
9749 struct target_rlimit *target_rlim;
9752 ret = get_errno(getrlimit(resource, &rlim));
9753 if (!is_error(ret)) {
9754 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
9755 return -TARGET_EFAULT;
9756 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
9757 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
9758 unlock_user_struct(target_rlim, arg2, 1);
9763 case TARGET_NR_getrusage:
9765 struct rusage rusage;
9766 ret = get_errno(getrusage(arg1, &rusage));
9767 if (!is_error(ret)) {
9768 ret = host_to_target_rusage(arg2, &rusage);
9772 #if defined(TARGET_NR_gettimeofday)
9773 case TARGET_NR_gettimeofday:
9778 ret = get_errno(gettimeofday(&tv, &tz));
9779 if (!is_error(ret)) {
9780 if (arg1 && copy_to_user_timeval(arg1, &tv)) {
9781 return -TARGET_EFAULT;
9783 if (arg2 && copy_to_user_timezone(arg2, &tz)) {
9784 return -TARGET_EFAULT;
9790 #if defined(TARGET_NR_settimeofday)
9791 case TARGET_NR_settimeofday:
9793 struct timeval tv, *ptv = NULL;
9794 struct timezone tz, *ptz = NULL;
9797 if (copy_from_user_timeval(&tv, arg1)) {
9798 return -TARGET_EFAULT;
9804 if (copy_from_user_timezone(&tz, arg2)) {
9805 return -TARGET_EFAULT;
9810 return get_errno(settimeofday(ptv, ptz));
9813 #if defined(TARGET_NR_select)
9814 case TARGET_NR_select:
9815 #if defined(TARGET_WANT_NI_OLD_SELECT)
9816 /* some architectures used to have old_select here
9817 * but now ENOSYS it.
9819 ret = -TARGET_ENOSYS;
9820 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
9821 ret = do_old_select(arg1);
9823 ret = do_select(arg1, arg2, arg3, arg4, arg5);
9827 #ifdef TARGET_NR_pselect6
9828 case TARGET_NR_pselect6:
9829 return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, false);
9831 #ifdef TARGET_NR_pselect6_time64
9832 case TARGET_NR_pselect6_time64:
9833 return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, true);
9835 #ifdef TARGET_NR_symlink
9836 case TARGET_NR_symlink:
9839 p = lock_user_string(arg1);
9840 p2 = lock_user_string(arg2);
9842 ret = -TARGET_EFAULT;
9844 ret = get_errno(symlink(p, p2));
9845 unlock_user(p2, arg2, 0);
9846 unlock_user(p, arg1, 0);
9850 #if defined(TARGET_NR_symlinkat)
9851 case TARGET_NR_symlinkat:
9854 p = lock_user_string(arg1);
9855 p2 = lock_user_string(arg3);
9857 ret = -TARGET_EFAULT;
9859 ret = get_errno(symlinkat(p, arg2, p2));
9860 unlock_user(p2, arg3, 0);
9861 unlock_user(p, arg1, 0);
9865 #ifdef TARGET_NR_readlink
9866 case TARGET_NR_readlink:
9869 p = lock_user_string(arg1);
9870 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9872 ret = -TARGET_EFAULT;
9874 /* Short circuit this for the magic exe check. */
9875 ret = -TARGET_EINVAL;
9876 } else if (is_proc_myself((const char *)p, "exe")) {
9877 char real[PATH_MAX], *temp;
9878 temp = realpath(exec_path, real);
9879 /* Return value is # of bytes that we wrote to the buffer. */
9881 ret = get_errno(-1);
9883 /* Don't worry about sign mismatch as earlier mapping
9884 * logic would have thrown a bad address error. */
9885 ret = MIN(strlen(real), arg3);
9886 /* We cannot NUL terminate the string. */
9887 memcpy(p2, real, ret);
9890 ret = get_errno(readlink(path(p), p2, arg3));
9892 unlock_user(p2, arg2, ret);
9893 unlock_user(p, arg1, 0);
9897 #if defined(TARGET_NR_readlinkat)
9898 case TARGET_NR_readlinkat:
9901 p = lock_user_string(arg2);
9902 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
9904 ret = -TARGET_EFAULT;
9905 } else if (is_proc_myself((const char *)p, "exe")) {
9906 char real[PATH_MAX], *temp;
9907 temp = realpath(exec_path, real);
9908 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
9909 snprintf((char *)p2, arg4, "%s", real);
9911 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
9913 unlock_user(p2, arg3, ret);
9914 unlock_user(p, arg2, 0);
9918 #ifdef TARGET_NR_swapon
9919 case TARGET_NR_swapon:
9920 if (!(p = lock_user_string(arg1)))
9921 return -TARGET_EFAULT;
9922 ret = get_errno(swapon(p, arg2));
9923 unlock_user(p, arg1, 0);
9926 case TARGET_NR_reboot:
9927 if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
9928 /* arg4 must be ignored in all other cases */
9929 p = lock_user_string(arg4);
9931 return -TARGET_EFAULT;
9933 ret = get_errno(reboot(arg1, arg2, arg3, p));
9934 unlock_user(p, arg4, 0);
9936 ret = get_errno(reboot(arg1, arg2, arg3, NULL));
9939 #ifdef TARGET_NR_mmap
9940 case TARGET_NR_mmap:
9941 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
9942 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
9943 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
9944 || defined(TARGET_S390X)
9947 abi_ulong v1, v2, v3, v4, v5, v6;
9948 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
9949 return -TARGET_EFAULT;
9956 unlock_user(v, arg1, 0);
9957 ret = get_errno(target_mmap(v1, v2, v3,
9958 target_to_host_bitmask(v4, mmap_flags_tbl),
9962 /* mmap pointers are always untagged */
9963 ret = get_errno(target_mmap(arg1, arg2, arg3,
9964 target_to_host_bitmask(arg4, mmap_flags_tbl),
9970 #ifdef TARGET_NR_mmap2
9971 case TARGET_NR_mmap2:
9973 #define MMAP_SHIFT 12
9975 ret = target_mmap(arg1, arg2, arg3,
9976 target_to_host_bitmask(arg4, mmap_flags_tbl),
9977 arg5, arg6 << MMAP_SHIFT);
9978 return get_errno(ret);
9980 case TARGET_NR_munmap:
9981 arg1 = cpu_untagged_addr(cpu, arg1);
9982 return get_errno(target_munmap(arg1, arg2));
9983 case TARGET_NR_mprotect:
9984 arg1 = cpu_untagged_addr(cpu, arg1);
9986 TaskState *ts = cpu->opaque;
9987 /* Special hack to detect libc making the stack executable. */
9988 if ((arg3 & PROT_GROWSDOWN)
9989 && arg1 >= ts->info->stack_limit
9990 && arg1 <= ts->info->start_stack) {
9991 arg3 &= ~PROT_GROWSDOWN;
9992 arg2 = arg2 + arg1 - ts->info->stack_limit;
9993 arg1 = ts->info->stack_limit;
9996 return get_errno(target_mprotect(arg1, arg2, arg3));
9997 #ifdef TARGET_NR_mremap
9998 case TARGET_NR_mremap:
9999 arg1 = cpu_untagged_addr(cpu, arg1);
10000 /* mremap new_addr (arg5) is always untagged */
10001 return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
10003 /* ??? msync/mlock/munlock are broken for softmmu. */
10004 #ifdef TARGET_NR_msync
10005 case TARGET_NR_msync:
10006 return get_errno(msync(g2h(cpu, arg1), arg2, arg3));
10008 #ifdef TARGET_NR_mlock
10009 case TARGET_NR_mlock:
10010 return get_errno(mlock(g2h(cpu, arg1), arg2));
10012 #ifdef TARGET_NR_munlock
10013 case TARGET_NR_munlock:
10014 return get_errno(munlock(g2h(cpu, arg1), arg2));
10016 #ifdef TARGET_NR_mlockall
10017 case TARGET_NR_mlockall:
10018 return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
10020 #ifdef TARGET_NR_munlockall
10021 case TARGET_NR_munlockall:
10022 return get_errno(munlockall());
10024 #ifdef TARGET_NR_truncate
10025 case TARGET_NR_truncate:
10026 if (!(p = lock_user_string(arg1)))
10027 return -TARGET_EFAULT;
10028 ret = get_errno(truncate(p, arg2));
10029 unlock_user(p, arg1, 0);
10032 #ifdef TARGET_NR_ftruncate
10033 case TARGET_NR_ftruncate:
10034 return get_errno(ftruncate(arg1, arg2));
10036 case TARGET_NR_fchmod:
10037 return get_errno(fchmod(arg1, arg2));
10038 #if defined(TARGET_NR_fchmodat)
10039 case TARGET_NR_fchmodat:
10040 if (!(p = lock_user_string(arg2)))
10041 return -TARGET_EFAULT;
10042 ret = get_errno(fchmodat(arg1, p, arg3, 0));
10043 unlock_user(p, arg2, 0);
10046 case TARGET_NR_getpriority:
10047 /* Note that negative values are valid for getpriority, so we must
10048 differentiate based on errno settings. */
10050 ret = getpriority(arg1, arg2);
10051 if (ret == -1 && errno != 0) {
10052 return -host_to_target_errno(errno);
10054 #ifdef TARGET_ALPHA
10055 /* Return value is the unbiased priority. Signal no error. */
10056 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
10058 /* Return value is a biased priority to avoid negative numbers. */
10062 case TARGET_NR_setpriority:
10063 return get_errno(setpriority(arg1, arg2, arg3));
10064 #ifdef TARGET_NR_statfs
10065 case TARGET_NR_statfs:
10066 if (!(p = lock_user_string(arg1))) {
10067 return -TARGET_EFAULT;
10069 ret = get_errno(statfs(path(p), &stfs));
10070 unlock_user(p, arg1, 0);
10072 if (!is_error(ret)) {
10073 struct target_statfs *target_stfs;
10075 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
10076 return -TARGET_EFAULT;
10077 __put_user(stfs.f_type, &target_stfs->f_type);
10078 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
10079 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
10080 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
10081 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
10082 __put_user(stfs.f_files, &target_stfs->f_files);
10083 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
10084 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
10085 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
10086 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
10087 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
10088 #ifdef _STATFS_F_FLAGS
10089 __put_user(stfs.f_flags, &target_stfs->f_flags);
10091 __put_user(0, &target_stfs->f_flags);
10093 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
10094 unlock_user_struct(target_stfs, arg2, 1);
10098 #ifdef TARGET_NR_fstatfs
10099 case TARGET_NR_fstatfs:
10100 ret = get_errno(fstatfs(arg1, &stfs));
10101 goto convert_statfs;
10103 #ifdef TARGET_NR_statfs64
10104 case TARGET_NR_statfs64:
10105 if (!(p = lock_user_string(arg1))) {
10106 return -TARGET_EFAULT;
10108 ret = get_errno(statfs(path(p), &stfs));
10109 unlock_user(p, arg1, 0);
10111 if (!is_error(ret)) {
10112 struct target_statfs64 *target_stfs;
10114 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
10115 return -TARGET_EFAULT;
10116 __put_user(stfs.f_type, &target_stfs->f_type);
10117 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
10118 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
10119 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
10120 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
10121 __put_user(stfs.f_files, &target_stfs->f_files);
10122 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
10123 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
10124 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
10125 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
10126 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
10127 #ifdef _STATFS_F_FLAGS
10128 __put_user(stfs.f_flags, &target_stfs->f_flags);
10130 __put_user(0, &target_stfs->f_flags);
10132 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
10133 unlock_user_struct(target_stfs, arg3, 1);
10136 case TARGET_NR_fstatfs64:
10137 ret = get_errno(fstatfs(arg1, &stfs));
10138 goto convert_statfs64;
10140 #ifdef TARGET_NR_socketcall
10141 case TARGET_NR_socketcall:
10142 return do_socketcall(arg1, arg2);
10144 #ifdef TARGET_NR_accept
10145 case TARGET_NR_accept:
10146 return do_accept4(arg1, arg2, arg3, 0);
10148 #ifdef TARGET_NR_accept4
10149 case TARGET_NR_accept4:
10150 return do_accept4(arg1, arg2, arg3, arg4);
10152 #ifdef TARGET_NR_bind
10153 case TARGET_NR_bind:
10154 return do_bind(arg1, arg2, arg3);
10156 #ifdef TARGET_NR_connect
10157 case TARGET_NR_connect:
10158 return do_connect(arg1, arg2, arg3);
10160 #ifdef TARGET_NR_getpeername
10161 case TARGET_NR_getpeername:
10162 return do_getpeername(arg1, arg2, arg3);
10164 #ifdef TARGET_NR_getsockname
10165 case TARGET_NR_getsockname:
10166 return do_getsockname(arg1, arg2, arg3);
10168 #ifdef TARGET_NR_getsockopt
10169 case TARGET_NR_getsockopt:
10170 return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
10172 #ifdef TARGET_NR_listen
10173 case TARGET_NR_listen:
10174 return get_errno(listen(arg1, arg2));
10176 #ifdef TARGET_NR_recv
10177 case TARGET_NR_recv:
10178 return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
10180 #ifdef TARGET_NR_recvfrom
10181 case TARGET_NR_recvfrom:
10182 return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
10184 #ifdef TARGET_NR_recvmsg
10185 case TARGET_NR_recvmsg:
10186 return do_sendrecvmsg(arg1, arg2, arg3, 0);
10188 #ifdef TARGET_NR_send
10189 case TARGET_NR_send:
10190 return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
10192 #ifdef TARGET_NR_sendmsg
10193 case TARGET_NR_sendmsg:
10194 return do_sendrecvmsg(arg1, arg2, arg3, 1);
10196 #ifdef TARGET_NR_sendmmsg
10197 case TARGET_NR_sendmmsg:
10198 return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
10200 #ifdef TARGET_NR_recvmmsg
10201 case TARGET_NR_recvmmsg:
10202 return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
10204 #ifdef TARGET_NR_sendto
10205 case TARGET_NR_sendto:
10206 return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
10208 #ifdef TARGET_NR_shutdown
10209 case TARGET_NR_shutdown:
10210 return get_errno(shutdown(arg1, arg2));
10212 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
10213 case TARGET_NR_getrandom:
10214 p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
10216 return -TARGET_EFAULT;
10218 ret = get_errno(getrandom(p, arg2, arg3));
10219 unlock_user(p, arg1, ret);
10222 #ifdef TARGET_NR_socket
10223 case TARGET_NR_socket:
10224 return do_socket(arg1, arg2, arg3);
10226 #ifdef TARGET_NR_socketpair
10227 case TARGET_NR_socketpair:
10228 return do_socketpair(arg1, arg2, arg3, arg4);
10230 #ifdef TARGET_NR_setsockopt
10231 case TARGET_NR_setsockopt:
10232 return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
10234 #if defined(TARGET_NR_syslog)
10235 case TARGET_NR_syslog:
10240 case TARGET_SYSLOG_ACTION_CLOSE: /* Close log */
10241 case TARGET_SYSLOG_ACTION_OPEN: /* Open log */
10242 case TARGET_SYSLOG_ACTION_CLEAR: /* Clear ring buffer */
10243 case TARGET_SYSLOG_ACTION_CONSOLE_OFF: /* Disable logging */
10244 case TARGET_SYSLOG_ACTION_CONSOLE_ON: /* Enable logging */
10245 case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
10246 case TARGET_SYSLOG_ACTION_SIZE_UNREAD: /* Number of chars */
10247 case TARGET_SYSLOG_ACTION_SIZE_BUFFER: /* Size of the buffer */
10248 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
10249 case TARGET_SYSLOG_ACTION_READ: /* Read from log */
10250 case TARGET_SYSLOG_ACTION_READ_CLEAR: /* Read/clear msgs */
10251 case TARGET_SYSLOG_ACTION_READ_ALL: /* Read last messages */
10254 return -TARGET_EINVAL;
10259 p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10261 return -TARGET_EFAULT;
10263 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
10264 unlock_user(p, arg2, arg3);
10268 return -TARGET_EINVAL;
10273 case TARGET_NR_setitimer:
10275 struct itimerval value, ovalue, *pvalue;
10279 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
10280 || copy_from_user_timeval(&pvalue->it_value,
10281 arg2 + sizeof(struct target_timeval)))
10282 return -TARGET_EFAULT;
10286 ret = get_errno(setitimer(arg1, pvalue, &ovalue));
10287 if (!is_error(ret) && arg3) {
10288 if (copy_to_user_timeval(arg3,
10289 &ovalue.it_interval)
10290 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
10292 return -TARGET_EFAULT;
10296 case TARGET_NR_getitimer:
10298 struct itimerval value;
10300 ret = get_errno(getitimer(arg1, &value));
10301 if (!is_error(ret) && arg2) {
10302 if (copy_to_user_timeval(arg2,
10303 &value.it_interval)
10304 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
10306 return -TARGET_EFAULT;
10310 #ifdef TARGET_NR_stat
10311 case TARGET_NR_stat:
10312 if (!(p = lock_user_string(arg1))) {
10313 return -TARGET_EFAULT;
10315 ret = get_errno(stat(path(p), &st));
10316 unlock_user(p, arg1, 0);
10319 #ifdef TARGET_NR_lstat
10320 case TARGET_NR_lstat:
10321 if (!(p = lock_user_string(arg1))) {
10322 return -TARGET_EFAULT;
10324 ret = get_errno(lstat(path(p), &st));
10325 unlock_user(p, arg1, 0);
10328 #ifdef TARGET_NR_fstat
10329 case TARGET_NR_fstat:
10331 ret = get_errno(fstat(arg1, &st));
10332 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
10335 if (!is_error(ret)) {
10336 struct target_stat *target_st;
10338 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
10339 return -TARGET_EFAULT;
10340 memset(target_st, 0, sizeof(*target_st));
10341 __put_user(st.st_dev, &target_st->st_dev);
10342 __put_user(st.st_ino, &target_st->st_ino);
10343 __put_user(st.st_mode, &target_st->st_mode);
10344 __put_user(st.st_uid, &target_st->st_uid);
10345 __put_user(st.st_gid, &target_st->st_gid);
10346 __put_user(st.st_nlink, &target_st->st_nlink);
10347 __put_user(st.st_rdev, &target_st->st_rdev);
10348 __put_user(st.st_size, &target_st->st_size);
10349 __put_user(st.st_blksize, &target_st->st_blksize);
10350 __put_user(st.st_blocks, &target_st->st_blocks);
10351 __put_user(st.st_atime, &target_st->target_st_atime);
10352 __put_user(st.st_mtime, &target_st->target_st_mtime);
10353 __put_user(st.st_ctime, &target_st->target_st_ctime);
10354 #if defined(HAVE_STRUCT_STAT_ST_ATIM) && defined(TARGET_STAT_HAVE_NSEC)
10355 __put_user(st.st_atim.tv_nsec,
10356 &target_st->target_st_atime_nsec);
10357 __put_user(st.st_mtim.tv_nsec,
10358 &target_st->target_st_mtime_nsec);
10359 __put_user(st.st_ctim.tv_nsec,
10360 &target_st->target_st_ctime_nsec);
10362 unlock_user_struct(target_st, arg2, 1);
10367 case TARGET_NR_vhangup:
10368 return get_errno(vhangup());
10369 #ifdef TARGET_NR_syscall
10370 case TARGET_NR_syscall:
10371 return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
10372 arg6, arg7, arg8, 0);
10374 #if defined(TARGET_NR_wait4)
10375 case TARGET_NR_wait4:
10378 abi_long status_ptr = arg2;
10379 struct rusage rusage, *rusage_ptr;
10380 abi_ulong target_rusage = arg4;
10381 abi_long rusage_err;
10383 rusage_ptr = &rusage;
10386 ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
10387 if (!is_error(ret)) {
10388 if (status_ptr && ret) {
10389 status = host_to_target_waitstatus(status);
10390 if (put_user_s32(status, status_ptr))
10391 return -TARGET_EFAULT;
10393 if (target_rusage) {
10394 rusage_err = host_to_target_rusage(target_rusage, &rusage);
10403 #ifdef TARGET_NR_swapoff
10404 case TARGET_NR_swapoff:
10405 if (!(p = lock_user_string(arg1)))
10406 return -TARGET_EFAULT;
10407 ret = get_errno(swapoff(p));
10408 unlock_user(p, arg1, 0);
10411 case TARGET_NR_sysinfo:
10413 struct target_sysinfo *target_value;
10414 struct sysinfo value;
10415 ret = get_errno(sysinfo(&value));
10416 if (!is_error(ret) && arg1)
10418 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
10419 return -TARGET_EFAULT;
10420 __put_user(value.uptime, &target_value->uptime);
10421 __put_user(value.loads[0], &target_value->loads[0]);
10422 __put_user(value.loads[1], &target_value->loads[1]);
10423 __put_user(value.loads[2], &target_value->loads[2]);
10424 __put_user(value.totalram, &target_value->totalram);
10425 __put_user(value.freeram, &target_value->freeram);
10426 __put_user(value.sharedram, &target_value->sharedram);
10427 __put_user(value.bufferram, &target_value->bufferram);
10428 __put_user(value.totalswap, &target_value->totalswap);
10429 __put_user(value.freeswap, &target_value->freeswap);
10430 __put_user(value.procs, &target_value->procs);
10431 __put_user(value.totalhigh, &target_value->totalhigh);
10432 __put_user(value.freehigh, &target_value->freehigh);
10433 __put_user(value.mem_unit, &target_value->mem_unit);
10434 unlock_user_struct(target_value, arg1, 1);
10438 #ifdef TARGET_NR_ipc
10439 case TARGET_NR_ipc:
10440 return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
10442 #ifdef TARGET_NR_semget
10443 case TARGET_NR_semget:
10444 return get_errno(semget(arg1, arg2, arg3));
10446 #ifdef TARGET_NR_semop
10447 case TARGET_NR_semop:
10448 return do_semtimedop(arg1, arg2, arg3, 0, false);
10450 #ifdef TARGET_NR_semtimedop
10451 case TARGET_NR_semtimedop:
10452 return do_semtimedop(arg1, arg2, arg3, arg4, false);
10454 #ifdef TARGET_NR_semtimedop_time64
10455 case TARGET_NR_semtimedop_time64:
10456 return do_semtimedop(arg1, arg2, arg3, arg4, true);
10458 #ifdef TARGET_NR_semctl
10459 case TARGET_NR_semctl:
10460 return do_semctl(arg1, arg2, arg3, arg4);
10462 #ifdef TARGET_NR_msgctl
10463 case TARGET_NR_msgctl:
10464 return do_msgctl(arg1, arg2, arg3);
10466 #ifdef TARGET_NR_msgget
10467 case TARGET_NR_msgget:
10468 return get_errno(msgget(arg1, arg2));
10470 #ifdef TARGET_NR_msgrcv
10471 case TARGET_NR_msgrcv:
10472 return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
10474 #ifdef TARGET_NR_msgsnd
10475 case TARGET_NR_msgsnd:
10476 return do_msgsnd(arg1, arg2, arg3, arg4);
10478 #ifdef TARGET_NR_shmget
10479 case TARGET_NR_shmget:
10480 return get_errno(shmget(arg1, arg2, arg3));
10482 #ifdef TARGET_NR_shmctl
10483 case TARGET_NR_shmctl:
10484 return do_shmctl(arg1, arg2, arg3);
10486 #ifdef TARGET_NR_shmat
10487 case TARGET_NR_shmat:
10488 return do_shmat(cpu_env, arg1, arg2, arg3);
10490 #ifdef TARGET_NR_shmdt
10491 case TARGET_NR_shmdt:
10492 return do_shmdt(arg1);
10494 case TARGET_NR_fsync:
10495 return get_errno(fsync(arg1));
10496 case TARGET_NR_clone:
10497 /* Linux manages to have three different orderings for its
10498 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
10499 * match the kernel's CONFIG_CLONE_* settings.
10500 * Microblaze is further special in that it uses a sixth
10501 * implicit argument to clone for the TLS pointer.
10503 #if defined(TARGET_MICROBLAZE)
10504 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
10505 #elif defined(TARGET_CLONE_BACKWARDS)
10506 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
10507 #elif defined(TARGET_CLONE_BACKWARDS2)
10508 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
10510 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
10513 #ifdef __NR_exit_group
10514 /* new thread calls */
10515 case TARGET_NR_exit_group:
10516 preexit_cleanup(cpu_env, arg1);
10517 return get_errno(exit_group(arg1));
10519 case TARGET_NR_setdomainname:
10520 if (!(p = lock_user_string(arg1)))
10521 return -TARGET_EFAULT;
10522 ret = get_errno(setdomainname(p, arg2));
10523 unlock_user(p, arg1, 0);
10525 case TARGET_NR_uname:
10526 /* no need to transcode because we use the linux syscall */
10528 struct new_utsname * buf;
10530 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
10531 return -TARGET_EFAULT;
10532 ret = get_errno(sys_uname(buf));
10533 if (!is_error(ret)) {
10534 /* Overwrite the native machine name with whatever is being
10536 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
10537 sizeof(buf->machine));
10538 /* Allow the user to override the reported release. */
10539 if (qemu_uname_release && *qemu_uname_release) {
10540 g_strlcpy(buf->release, qemu_uname_release,
10541 sizeof(buf->release));
10544 unlock_user_struct(buf, arg1, 1);
10548 case TARGET_NR_modify_ldt:
10549 return do_modify_ldt(cpu_env, arg1, arg2, arg3);
10550 #if !defined(TARGET_X86_64)
10551 case TARGET_NR_vm86:
10552 return do_vm86(cpu_env, arg1, arg2);
10555 #if defined(TARGET_NR_adjtimex)
10556 case TARGET_NR_adjtimex:
10558 struct timex host_buf;
10560 if (target_to_host_timex(&host_buf, arg1) != 0) {
10561 return -TARGET_EFAULT;
10563 ret = get_errno(adjtimex(&host_buf));
10564 if (!is_error(ret)) {
10565 if (host_to_target_timex(arg1, &host_buf) != 0) {
10566 return -TARGET_EFAULT;
10572 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
10573 case TARGET_NR_clock_adjtime:
10575 struct timex htx, *phtx = &htx;
10577 if (target_to_host_timex(phtx, arg2) != 0) {
10578 return -TARGET_EFAULT;
10580 ret = get_errno(clock_adjtime(arg1, phtx));
10581 if (!is_error(ret) && phtx) {
10582 if (host_to_target_timex(arg2, phtx) != 0) {
10583 return -TARGET_EFAULT;
10589 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
10590 case TARGET_NR_clock_adjtime64:
10594 if (target_to_host_timex64(&htx, arg2) != 0) {
10595 return -TARGET_EFAULT;
10597 ret = get_errno(clock_adjtime(arg1, &htx));
10598 if (!is_error(ret) && host_to_target_timex64(arg2, &htx)) {
10599 return -TARGET_EFAULT;
10604 case TARGET_NR_getpgid:
10605 return get_errno(getpgid(arg1));
10606 case TARGET_NR_fchdir:
10607 return get_errno(fchdir(arg1));
10608 case TARGET_NR_personality:
10609 return get_errno(personality(arg1));
10610 #ifdef TARGET_NR__llseek /* Not on alpha */
10611 case TARGET_NR__llseek:
10614 #if !defined(__NR_llseek)
10615 res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
10617 ret = get_errno(res);
10622 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
10624 if ((ret == 0) && put_user_s64(res, arg4)) {
10625 return -TARGET_EFAULT;
10630 #ifdef TARGET_NR_getdents
10631 case TARGET_NR_getdents:
10632 return do_getdents(arg1, arg2, arg3);
10633 #endif /* TARGET_NR_getdents */
10634 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
10635 case TARGET_NR_getdents64:
10636 return do_getdents64(arg1, arg2, arg3);
10637 #endif /* TARGET_NR_getdents64 */
10638 #if defined(TARGET_NR__newselect)
10639 case TARGET_NR__newselect:
10640 return do_select(arg1, arg2, arg3, arg4, arg5);
10642 #ifdef TARGET_NR_poll
10643 case TARGET_NR_poll:
10644 return do_ppoll(arg1, arg2, arg3, arg4, arg5, false, false);
10646 #ifdef TARGET_NR_ppoll
10647 case TARGET_NR_ppoll:
10648 return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, false);
10650 #ifdef TARGET_NR_ppoll_time64
10651 case TARGET_NR_ppoll_time64:
10652 return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, true);
10654 case TARGET_NR_flock:
10655 /* NOTE: the flock constant seems to be the same for every
10657 return get_errno(safe_flock(arg1, arg2));
10658 case TARGET_NR_readv:
10660 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10662 ret = get_errno(safe_readv(arg1, vec, arg3));
10663 unlock_iovec(vec, arg2, arg3, 1);
10665 ret = -host_to_target_errno(errno);
10669 case TARGET_NR_writev:
10671 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10673 ret = get_errno(safe_writev(arg1, vec, arg3));
10674 unlock_iovec(vec, arg2, arg3, 0);
10676 ret = -host_to_target_errno(errno);
10680 #if defined(TARGET_NR_preadv)
10681 case TARGET_NR_preadv:
10683 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10685 unsigned long low, high;
10687 target_to_host_low_high(arg4, arg5, &low, &high);
10688 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
10689 unlock_iovec(vec, arg2, arg3, 1);
10691 ret = -host_to_target_errno(errno);
10696 #if defined(TARGET_NR_pwritev)
10697 case TARGET_NR_pwritev:
10699 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10701 unsigned long low, high;
10703 target_to_host_low_high(arg4, arg5, &low, &high);
10704 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
10705 unlock_iovec(vec, arg2, arg3, 0);
10707 ret = -host_to_target_errno(errno);
10712 case TARGET_NR_getsid:
10713 return get_errno(getsid(arg1));
10714 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10715 case TARGET_NR_fdatasync:
10716 return get_errno(fdatasync(arg1));
10718 case TARGET_NR_sched_getaffinity:
10720 unsigned int mask_size;
10721 unsigned long *mask;
10724 * sched_getaffinity needs multiples of ulong, so need to take
10725 * care of mismatches between target ulong and host ulong sizes.
10727 if (arg2 & (sizeof(abi_ulong) - 1)) {
10728 return -TARGET_EINVAL;
10730 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10732 mask = alloca(mask_size);
10733 memset(mask, 0, mask_size);
10734 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
10736 if (!is_error(ret)) {
10738 /* More data returned than the caller's buffer will fit.
10739 * This only happens if sizeof(abi_long) < sizeof(long)
10740 * and the caller passed us a buffer holding an odd number
10741 * of abi_longs. If the host kernel is actually using the
10742 * extra 4 bytes then fail EINVAL; otherwise we can just
10743 * ignore them and only copy the interesting part.
10745 int numcpus = sysconf(_SC_NPROCESSORS_CONF);
10746 if (numcpus > arg2 * 8) {
10747 return -TARGET_EINVAL;
10752 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
10753 return -TARGET_EFAULT;
10758 case TARGET_NR_sched_setaffinity:
10760 unsigned int mask_size;
10761 unsigned long *mask;
10764 * sched_setaffinity needs multiples of ulong, so need to take
10765 * care of mismatches between target ulong and host ulong sizes.
10767 if (arg2 & (sizeof(abi_ulong) - 1)) {
10768 return -TARGET_EINVAL;
10770 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10771 mask = alloca(mask_size);
10773 ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
10778 return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
10780 case TARGET_NR_getcpu:
10782 unsigned cpu, node;
10783 ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
10784 arg2 ? &node : NULL,
10786 if (is_error(ret)) {
10789 if (arg1 && put_user_u32(cpu, arg1)) {
10790 return -TARGET_EFAULT;
10792 if (arg2 && put_user_u32(node, arg2)) {
10793 return -TARGET_EFAULT;
10797 case TARGET_NR_sched_setparam:
10799 struct target_sched_param *target_schp;
10800 struct sched_param schp;
10803 return -TARGET_EINVAL;
10805 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1)) {
10806 return -TARGET_EFAULT;
10808 schp.sched_priority = tswap32(target_schp->sched_priority);
10809 unlock_user_struct(target_schp, arg2, 0);
10810 return get_errno(sys_sched_setparam(arg1, &schp));
10812 case TARGET_NR_sched_getparam:
10814 struct target_sched_param *target_schp;
10815 struct sched_param schp;
10818 return -TARGET_EINVAL;
10820 ret = get_errno(sys_sched_getparam(arg1, &schp));
10821 if (!is_error(ret)) {
10822 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0)) {
10823 return -TARGET_EFAULT;
10825 target_schp->sched_priority = tswap32(schp.sched_priority);
10826 unlock_user_struct(target_schp, arg2, 1);
10830 case TARGET_NR_sched_setscheduler:
10832 struct target_sched_param *target_schp;
10833 struct sched_param schp;
10835 return -TARGET_EINVAL;
10837 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1)) {
10838 return -TARGET_EFAULT;
10840 schp.sched_priority = tswap32(target_schp->sched_priority);
10841 unlock_user_struct(target_schp, arg3, 0);
10842 return get_errno(sys_sched_setscheduler(arg1, arg2, &schp));
10844 case TARGET_NR_sched_getscheduler:
10845 return get_errno(sys_sched_getscheduler(arg1));
10846 case TARGET_NR_sched_getattr:
10848 struct target_sched_attr *target_scha;
10849 struct sched_attr scha;
10851 return -TARGET_EINVAL;
10853 if (arg3 > sizeof(scha)) {
10854 arg3 = sizeof(scha);
10856 ret = get_errno(sys_sched_getattr(arg1, &scha, arg3, arg4));
10857 if (!is_error(ret)) {
10858 target_scha = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10859 if (!target_scha) {
10860 return -TARGET_EFAULT;
10862 target_scha->size = tswap32(scha.size);
10863 target_scha->sched_policy = tswap32(scha.sched_policy);
10864 target_scha->sched_flags = tswap64(scha.sched_flags);
10865 target_scha->sched_nice = tswap32(scha.sched_nice);
10866 target_scha->sched_priority = tswap32(scha.sched_priority);
10867 target_scha->sched_runtime = tswap64(scha.sched_runtime);
10868 target_scha->sched_deadline = tswap64(scha.sched_deadline);
10869 target_scha->sched_period = tswap64(scha.sched_period);
10870 if (scha.size > offsetof(struct sched_attr, sched_util_min)) {
10871 target_scha->sched_util_min = tswap32(scha.sched_util_min);
10872 target_scha->sched_util_max = tswap32(scha.sched_util_max);
10874 unlock_user(target_scha, arg2, arg3);
10878 case TARGET_NR_sched_setattr:
10880 struct target_sched_attr *target_scha;
10881 struct sched_attr scha;
10885 return -TARGET_EINVAL;
10887 if (get_user_u32(size, arg2)) {
10888 return -TARGET_EFAULT;
10891 size = offsetof(struct target_sched_attr, sched_util_min);
10893 if (size < offsetof(struct target_sched_attr, sched_util_min)) {
10894 if (put_user_u32(sizeof(struct target_sched_attr), arg2)) {
10895 return -TARGET_EFAULT;
10897 return -TARGET_E2BIG;
10900 zeroed = check_zeroed_user(arg2, sizeof(struct target_sched_attr), size);
10903 } else if (zeroed == 0) {
10904 if (put_user_u32(sizeof(struct target_sched_attr), arg2)) {
10905 return -TARGET_EFAULT;
10907 return -TARGET_E2BIG;
10909 if (size > sizeof(struct target_sched_attr)) {
10910 size = sizeof(struct target_sched_attr);
10913 target_scha = lock_user(VERIFY_READ, arg2, size, 1);
10914 if (!target_scha) {
10915 return -TARGET_EFAULT;
10918 scha.sched_policy = tswap32(target_scha->sched_policy);
10919 scha.sched_flags = tswap64(target_scha->sched_flags);
10920 scha.sched_nice = tswap32(target_scha->sched_nice);
10921 scha.sched_priority = tswap32(target_scha->sched_priority);
10922 scha.sched_runtime = tswap64(target_scha->sched_runtime);
10923 scha.sched_deadline = tswap64(target_scha->sched_deadline);
10924 scha.sched_period = tswap64(target_scha->sched_period);
10925 if (size > offsetof(struct target_sched_attr, sched_util_min)) {
10926 scha.sched_util_min = tswap32(target_scha->sched_util_min);
10927 scha.sched_util_max = tswap32(target_scha->sched_util_max);
10929 unlock_user(target_scha, arg2, 0);
10930 return get_errno(sys_sched_setattr(arg1, &scha, arg3));
10932 case TARGET_NR_sched_yield:
10933 return get_errno(sched_yield());
10934 case TARGET_NR_sched_get_priority_max:
10935 return get_errno(sched_get_priority_max(arg1));
10936 case TARGET_NR_sched_get_priority_min:
10937 return get_errno(sched_get_priority_min(arg1));
10938 #ifdef TARGET_NR_sched_rr_get_interval
10939 case TARGET_NR_sched_rr_get_interval:
10941 struct timespec ts;
10942 ret = get_errno(sched_rr_get_interval(arg1, &ts));
10943 if (!is_error(ret)) {
10944 ret = host_to_target_timespec(arg2, &ts);
10949 #ifdef TARGET_NR_sched_rr_get_interval_time64
10950 case TARGET_NR_sched_rr_get_interval_time64:
10952 struct timespec ts;
10953 ret = get_errno(sched_rr_get_interval(arg1, &ts));
10954 if (!is_error(ret)) {
10955 ret = host_to_target_timespec64(arg2, &ts);
10960 #if defined(TARGET_NR_nanosleep)
10961 case TARGET_NR_nanosleep:
10963 struct timespec req, rem;
10964 target_to_host_timespec(&req, arg1);
10965 ret = get_errno(safe_nanosleep(&req, &rem));
10966 if (is_error(ret) && arg2) {
10967 host_to_target_timespec(arg2, &rem);
10972 case TARGET_NR_prctl:
10973 return do_prctl(cpu_env, arg1, arg2, arg3, arg4, arg5);
10975 #ifdef TARGET_NR_arch_prctl
10976 case TARGET_NR_arch_prctl:
10977 return do_arch_prctl(cpu_env, arg1, arg2);
10979 #ifdef TARGET_NR_pread64
10980 case TARGET_NR_pread64:
10981 if (regpairs_aligned(cpu_env, num)) {
10985 if (arg2 == 0 && arg3 == 0) {
10986 /* Special-case NULL buffer and zero length, which should succeed */
10989 p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10991 return -TARGET_EFAULT;
10994 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
10995 unlock_user(p, arg2, ret);
10997 case TARGET_NR_pwrite64:
10998 if (regpairs_aligned(cpu_env, num)) {
11002 if (arg2 == 0 && arg3 == 0) {
11003 /* Special-case NULL buffer and zero length, which should succeed */
11006 p = lock_user(VERIFY_READ, arg2, arg3, 1);
11008 return -TARGET_EFAULT;
11011 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
11012 unlock_user(p, arg2, 0);
11015 case TARGET_NR_getcwd:
11016 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
11017 return -TARGET_EFAULT;
11018 ret = get_errno(sys_getcwd1(p, arg2));
11019 unlock_user(p, arg1, ret);
11021 case TARGET_NR_capget:
11022 case TARGET_NR_capset:
11024 struct target_user_cap_header *target_header;
11025 struct target_user_cap_data *target_data = NULL;
11026 struct __user_cap_header_struct header;
11027 struct __user_cap_data_struct data[2];
11028 struct __user_cap_data_struct *dataptr = NULL;
11029 int i, target_datalen;
11030 int data_items = 1;
11032 if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
11033 return -TARGET_EFAULT;
11035 header.version = tswap32(target_header->version);
11036 header.pid = tswap32(target_header->pid);
11038 if (header.version != _LINUX_CAPABILITY_VERSION) {
11039 /* Version 2 and up takes pointer to two user_data structs */
11043 target_datalen = sizeof(*target_data) * data_items;
11046 if (num == TARGET_NR_capget) {
11047 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
11049 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
11051 if (!target_data) {
11052 unlock_user_struct(target_header, arg1, 0);
11053 return -TARGET_EFAULT;
11056 if (num == TARGET_NR_capset) {
11057 for (i = 0; i < data_items; i++) {
11058 data[i].effective = tswap32(target_data[i].effective);
11059 data[i].permitted = tswap32(target_data[i].permitted);
11060 data[i].inheritable = tswap32(target_data[i].inheritable);
11067 if (num == TARGET_NR_capget) {
11068 ret = get_errno(capget(&header, dataptr));
11070 ret = get_errno(capset(&header, dataptr));
11073 /* The kernel always updates version for both capget and capset */
11074 target_header->version = tswap32(header.version);
11075 unlock_user_struct(target_header, arg1, 1);
11078 if (num == TARGET_NR_capget) {
11079 for (i = 0; i < data_items; i++) {
11080 target_data[i].effective = tswap32(data[i].effective);
11081 target_data[i].permitted = tswap32(data[i].permitted);
11082 target_data[i].inheritable = tswap32(data[i].inheritable);
11084 unlock_user(target_data, arg2, target_datalen);
11086 unlock_user(target_data, arg2, 0);
11091 case TARGET_NR_sigaltstack:
11092 return do_sigaltstack(arg1, arg2, cpu_env);
11094 #ifdef CONFIG_SENDFILE
11095 #ifdef TARGET_NR_sendfile
11096 case TARGET_NR_sendfile:
11098 off_t *offp = NULL;
11101 ret = get_user_sal(off, arg3);
11102 if (is_error(ret)) {
11107 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11108 if (!is_error(ret) && arg3) {
11109 abi_long ret2 = put_user_sal(off, arg3);
11110 if (is_error(ret2)) {
11117 #ifdef TARGET_NR_sendfile64
11118 case TARGET_NR_sendfile64:
11120 off_t *offp = NULL;
11123 ret = get_user_s64(off, arg3);
11124 if (is_error(ret)) {
11129 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11130 if (!is_error(ret) && arg3) {
11131 abi_long ret2 = put_user_s64(off, arg3);
11132 if (is_error(ret2)) {
11140 #ifdef TARGET_NR_vfork
11141 case TARGET_NR_vfork:
11142 return get_errno(do_fork(cpu_env,
11143 CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
11146 #ifdef TARGET_NR_ugetrlimit
11147 case TARGET_NR_ugetrlimit:
11149 struct rlimit rlim;
11150 int resource = target_to_host_resource(arg1);
11151 ret = get_errno(getrlimit(resource, &rlim));
11152 if (!is_error(ret)) {
11153 struct target_rlimit *target_rlim;
11154 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
11155 return -TARGET_EFAULT;
11156 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
11157 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
11158 unlock_user_struct(target_rlim, arg2, 1);
11163 #ifdef TARGET_NR_truncate64
11164 case TARGET_NR_truncate64:
11165 if (!(p = lock_user_string(arg1)))
11166 return -TARGET_EFAULT;
11167 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
11168 unlock_user(p, arg1, 0);
11171 #ifdef TARGET_NR_ftruncate64
11172 case TARGET_NR_ftruncate64:
11173 return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
11175 #ifdef TARGET_NR_stat64
11176 case TARGET_NR_stat64:
11177 if (!(p = lock_user_string(arg1))) {
11178 return -TARGET_EFAULT;
11180 ret = get_errno(stat(path(p), &st));
11181 unlock_user(p, arg1, 0);
11182 if (!is_error(ret))
11183 ret = host_to_target_stat64(cpu_env, arg2, &st);
11186 #ifdef TARGET_NR_lstat64
11187 case TARGET_NR_lstat64:
11188 if (!(p = lock_user_string(arg1))) {
11189 return -TARGET_EFAULT;
11191 ret = get_errno(lstat(path(p), &st));
11192 unlock_user(p, arg1, 0);
11193 if (!is_error(ret))
11194 ret = host_to_target_stat64(cpu_env, arg2, &st);
11197 #ifdef TARGET_NR_fstat64
11198 case TARGET_NR_fstat64:
11199 ret = get_errno(fstat(arg1, &st));
11200 if (!is_error(ret))
11201 ret = host_to_target_stat64(cpu_env, arg2, &st);
11204 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
11205 #ifdef TARGET_NR_fstatat64
11206 case TARGET_NR_fstatat64:
11208 #ifdef TARGET_NR_newfstatat
11209 case TARGET_NR_newfstatat:
11211 if (!(p = lock_user_string(arg2))) {
11212 return -TARGET_EFAULT;
11214 ret = get_errno(fstatat(arg1, path(p), &st, arg4));
11215 unlock_user(p, arg2, 0);
11216 if (!is_error(ret))
11217 ret = host_to_target_stat64(cpu_env, arg3, &st);
11220 #if defined(TARGET_NR_statx)
11221 case TARGET_NR_statx:
11223 struct target_statx *target_stx;
11227 p = lock_user_string(arg2);
11229 return -TARGET_EFAULT;
11231 #if defined(__NR_statx)
11234 * It is assumed that struct statx is architecture independent.
11236 struct target_statx host_stx;
11239 ret = get_errno(sys_statx(dirfd, p, flags, mask, &host_stx));
11240 if (!is_error(ret)) {
11241 if (host_to_target_statx(&host_stx, arg5) != 0) {
11242 unlock_user(p, arg2, 0);
11243 return -TARGET_EFAULT;
11247 if (ret != -TARGET_ENOSYS) {
11248 unlock_user(p, arg2, 0);
11253 ret = get_errno(fstatat(dirfd, path(p), &st, flags));
11254 unlock_user(p, arg2, 0);
11256 if (!is_error(ret)) {
11257 if (!lock_user_struct(VERIFY_WRITE, target_stx, arg5, 0)) {
11258 return -TARGET_EFAULT;
11260 memset(target_stx, 0, sizeof(*target_stx));
11261 __put_user(major(st.st_dev), &target_stx->stx_dev_major);
11262 __put_user(minor(st.st_dev), &target_stx->stx_dev_minor);
11263 __put_user(st.st_ino, &target_stx->stx_ino);
11264 __put_user(st.st_mode, &target_stx->stx_mode);
11265 __put_user(st.st_uid, &target_stx->stx_uid);
11266 __put_user(st.st_gid, &target_stx->stx_gid);
11267 __put_user(st.st_nlink, &target_stx->stx_nlink);
11268 __put_user(major(st.st_rdev), &target_stx->stx_rdev_major);
11269 __put_user(minor(st.st_rdev), &target_stx->stx_rdev_minor);
11270 __put_user(st.st_size, &target_stx->stx_size);
11271 __put_user(st.st_blksize, &target_stx->stx_blksize);
11272 __put_user(st.st_blocks, &target_stx->stx_blocks);
11273 __put_user(st.st_atime, &target_stx->stx_atime.tv_sec);
11274 __put_user(st.st_mtime, &target_stx->stx_mtime.tv_sec);
11275 __put_user(st.st_ctime, &target_stx->stx_ctime.tv_sec);
11276 unlock_user_struct(target_stx, arg5, 1);
11281 #ifdef TARGET_NR_lchown
11282 case TARGET_NR_lchown:
11283 if (!(p = lock_user_string(arg1)))
11284 return -TARGET_EFAULT;
11285 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
11286 unlock_user(p, arg1, 0);
11289 #ifdef TARGET_NR_getuid
11290 case TARGET_NR_getuid:
11291 return get_errno(high2lowuid(getuid()));
11293 #ifdef TARGET_NR_getgid
11294 case TARGET_NR_getgid:
11295 return get_errno(high2lowgid(getgid()));
11297 #ifdef TARGET_NR_geteuid
11298 case TARGET_NR_geteuid:
11299 return get_errno(high2lowuid(geteuid()));
11301 #ifdef TARGET_NR_getegid
11302 case TARGET_NR_getegid:
11303 return get_errno(high2lowgid(getegid()));
11305 case TARGET_NR_setreuid:
11306 return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
11307 case TARGET_NR_setregid:
11308 return get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
11309 case TARGET_NR_getgroups:
11311 int gidsetsize = arg1;
11312 target_id *target_grouplist;
11316 grouplist = alloca(gidsetsize * sizeof(gid_t));
11317 ret = get_errno(getgroups(gidsetsize, grouplist));
11318 if (gidsetsize == 0)
11320 if (!is_error(ret)) {
11321 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
11322 if (!target_grouplist)
11323 return -TARGET_EFAULT;
11324 for(i = 0;i < ret; i++)
11325 target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
11326 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
11330 case TARGET_NR_setgroups:
11332 int gidsetsize = arg1;
11333 target_id *target_grouplist;
11334 gid_t *grouplist = NULL;
11337 grouplist = alloca(gidsetsize * sizeof(gid_t));
11338 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
11339 if (!target_grouplist) {
11340 return -TARGET_EFAULT;
11342 for (i = 0; i < gidsetsize; i++) {
11343 grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
11345 unlock_user(target_grouplist, arg2, 0);
11347 return get_errno(setgroups(gidsetsize, grouplist));
11349 case TARGET_NR_fchown:
11350 return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
11351 #if defined(TARGET_NR_fchownat)
11352 case TARGET_NR_fchownat:
11353 if (!(p = lock_user_string(arg2)))
11354 return -TARGET_EFAULT;
11355 ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
11356 low2highgid(arg4), arg5));
11357 unlock_user(p, arg2, 0);
11360 #ifdef TARGET_NR_setresuid
11361 case TARGET_NR_setresuid:
11362 return get_errno(sys_setresuid(low2highuid(arg1),
11364 low2highuid(arg3)));
11366 #ifdef TARGET_NR_getresuid
11367 case TARGET_NR_getresuid:
11369 uid_t ruid, euid, suid;
11370 ret = get_errno(getresuid(&ruid, &euid, &suid));
11371 if (!is_error(ret)) {
11372 if (put_user_id(high2lowuid(ruid), arg1)
11373 || put_user_id(high2lowuid(euid), arg2)
11374 || put_user_id(high2lowuid(suid), arg3))
11375 return -TARGET_EFAULT;
11380 #ifdef TARGET_NR_getresgid
11381 case TARGET_NR_setresgid:
11382 return get_errno(sys_setresgid(low2highgid(arg1),
11384 low2highgid(arg3)));
11386 #ifdef TARGET_NR_getresgid
11387 case TARGET_NR_getresgid:
11389 gid_t rgid, egid, sgid;
11390 ret = get_errno(getresgid(&rgid, &egid, &sgid));
11391 if (!is_error(ret)) {
11392 if (put_user_id(high2lowgid(rgid), arg1)
11393 || put_user_id(high2lowgid(egid), arg2)
11394 || put_user_id(high2lowgid(sgid), arg3))
11395 return -TARGET_EFAULT;
11400 #ifdef TARGET_NR_chown
11401 case TARGET_NR_chown:
11402 if (!(p = lock_user_string(arg1)))
11403 return -TARGET_EFAULT;
11404 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
11405 unlock_user(p, arg1, 0);
11408 case TARGET_NR_setuid:
11409 return get_errno(sys_setuid(low2highuid(arg1)));
11410 case TARGET_NR_setgid:
11411 return get_errno(sys_setgid(low2highgid(arg1)));
11412 case TARGET_NR_setfsuid:
11413 return get_errno(setfsuid(arg1));
11414 case TARGET_NR_setfsgid:
11415 return get_errno(setfsgid(arg1));
11417 #ifdef TARGET_NR_lchown32
11418 case TARGET_NR_lchown32:
11419 if (!(p = lock_user_string(arg1)))
11420 return -TARGET_EFAULT;
11421 ret = get_errno(lchown(p, arg2, arg3));
11422 unlock_user(p, arg1, 0);
11425 #ifdef TARGET_NR_getuid32
11426 case TARGET_NR_getuid32:
11427 return get_errno(getuid());
11430 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
11431 /* Alpha specific */
11432 case TARGET_NR_getxuid:
11436 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
11438 return get_errno(getuid());
11440 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
11441 /* Alpha specific */
11442 case TARGET_NR_getxgid:
11446 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
11448 return get_errno(getgid());
11450 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
11451 /* Alpha specific */
11452 case TARGET_NR_osf_getsysinfo:
11453 ret = -TARGET_EOPNOTSUPP;
11455 case TARGET_GSI_IEEE_FP_CONTROL:
11457 uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env);
11458 uint64_t swcr = ((CPUAlphaState *)cpu_env)->swcr;
11460 swcr &= ~SWCR_STATUS_MASK;
11461 swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
11463 if (put_user_u64 (swcr, arg2))
11464 return -TARGET_EFAULT;
11469 /* case GSI_IEEE_STATE_AT_SIGNAL:
11470 -- Not implemented in linux kernel.
11472 -- Retrieves current unaligned access state; not much used.
11473 case GSI_PROC_TYPE:
11474 -- Retrieves implver information; surely not used.
11475 case GSI_GET_HWRPB:
11476 -- Grabs a copy of the HWRPB; surely not used.
11481 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
11482 /* Alpha specific */
11483 case TARGET_NR_osf_setsysinfo:
11484 ret = -TARGET_EOPNOTSUPP;
11486 case TARGET_SSI_IEEE_FP_CONTROL:
11488 uint64_t swcr, fpcr;
11490 if (get_user_u64 (swcr, arg2)) {
11491 return -TARGET_EFAULT;
11495 * The kernel calls swcr_update_status to update the
11496 * status bits from the fpcr at every point that it
11497 * could be queried. Therefore, we store the status
11498 * bits only in FPCR.
11500 ((CPUAlphaState *)cpu_env)->swcr
11501 = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK);
11503 fpcr = cpu_alpha_load_fpcr(cpu_env);
11504 fpcr &= ((uint64_t)FPCR_DYN_MASK << 32);
11505 fpcr |= alpha_ieee_swcr_to_fpcr(swcr);
11506 cpu_alpha_store_fpcr(cpu_env, fpcr);
11511 case TARGET_SSI_IEEE_RAISE_EXCEPTION:
11513 uint64_t exc, fpcr, fex;
11515 if (get_user_u64(exc, arg2)) {
11516 return -TARGET_EFAULT;
11518 exc &= SWCR_STATUS_MASK;
11519 fpcr = cpu_alpha_load_fpcr(cpu_env);
11521 /* Old exceptions are not signaled. */
11522 fex = alpha_ieee_fpcr_to_swcr(fpcr);
11524 fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT;
11525 fex &= ((CPUArchState *)cpu_env)->swcr;
11527 /* Update the hardware fpcr. */
11528 fpcr |= alpha_ieee_swcr_to_fpcr(exc);
11529 cpu_alpha_store_fpcr(cpu_env, fpcr);
11532 int si_code = TARGET_FPE_FLTUNK;
11533 target_siginfo_t info;
11535 if (fex & SWCR_TRAP_ENABLE_DNO) {
11536 si_code = TARGET_FPE_FLTUND;
11538 if (fex & SWCR_TRAP_ENABLE_INE) {
11539 si_code = TARGET_FPE_FLTRES;
11541 if (fex & SWCR_TRAP_ENABLE_UNF) {
11542 si_code = TARGET_FPE_FLTUND;
11544 if (fex & SWCR_TRAP_ENABLE_OVF) {
11545 si_code = TARGET_FPE_FLTOVF;
11547 if (fex & SWCR_TRAP_ENABLE_DZE) {
11548 si_code = TARGET_FPE_FLTDIV;
11550 if (fex & SWCR_TRAP_ENABLE_INV) {
11551 si_code = TARGET_FPE_FLTINV;
11554 info.si_signo = SIGFPE;
11556 info.si_code = si_code;
11557 info._sifields._sigfault._addr
11558 = ((CPUArchState *)cpu_env)->pc;
11559 queue_signal((CPUArchState *)cpu_env, info.si_signo,
11560 QEMU_SI_FAULT, &info);
11566 /* case SSI_NVPAIRS:
11567 -- Used with SSIN_UACPROC to enable unaligned accesses.
11568 case SSI_IEEE_STATE_AT_SIGNAL:
11569 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
11570 -- Not implemented in linux kernel
11575 #ifdef TARGET_NR_osf_sigprocmask
11576 /* Alpha specific. */
11577 case TARGET_NR_osf_sigprocmask:
11581 sigset_t set, oldset;
11584 case TARGET_SIG_BLOCK:
11587 case TARGET_SIG_UNBLOCK:
11590 case TARGET_SIG_SETMASK:
11594 return -TARGET_EINVAL;
11597 target_to_host_old_sigset(&set, &mask);
11598 ret = do_sigprocmask(how, &set, &oldset);
11600 host_to_target_old_sigset(&mask, &oldset);
11607 #ifdef TARGET_NR_getgid32
11608 case TARGET_NR_getgid32:
11609 return get_errno(getgid());
11611 #ifdef TARGET_NR_geteuid32
11612 case TARGET_NR_geteuid32:
11613 return get_errno(geteuid());
11615 #ifdef TARGET_NR_getegid32
11616 case TARGET_NR_getegid32:
11617 return get_errno(getegid());
11619 #ifdef TARGET_NR_setreuid32
11620 case TARGET_NR_setreuid32:
11621 return get_errno(setreuid(arg1, arg2));
11623 #ifdef TARGET_NR_setregid32
11624 case TARGET_NR_setregid32:
11625 return get_errno(setregid(arg1, arg2));
11627 #ifdef TARGET_NR_getgroups32
11628 case TARGET_NR_getgroups32:
11630 int gidsetsize = arg1;
11631 uint32_t *target_grouplist;
11635 grouplist = alloca(gidsetsize * sizeof(gid_t));
11636 ret = get_errno(getgroups(gidsetsize, grouplist));
11637 if (gidsetsize == 0)
11639 if (!is_error(ret)) {
11640 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
11641 if (!target_grouplist) {
11642 return -TARGET_EFAULT;
11644 for(i = 0;i < ret; i++)
11645 target_grouplist[i] = tswap32(grouplist[i]);
11646 unlock_user(target_grouplist, arg2, gidsetsize * 4);
11651 #ifdef TARGET_NR_setgroups32
11652 case TARGET_NR_setgroups32:
11654 int gidsetsize = arg1;
11655 uint32_t *target_grouplist;
11659 grouplist = alloca(gidsetsize * sizeof(gid_t));
11660 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
11661 if (!target_grouplist) {
11662 return -TARGET_EFAULT;
11664 for(i = 0;i < gidsetsize; i++)
11665 grouplist[i] = tswap32(target_grouplist[i]);
11666 unlock_user(target_grouplist, arg2, 0);
11667 return get_errno(setgroups(gidsetsize, grouplist));
11670 #ifdef TARGET_NR_fchown32
11671 case TARGET_NR_fchown32:
11672 return get_errno(fchown(arg1, arg2, arg3));
11674 #ifdef TARGET_NR_setresuid32
11675 case TARGET_NR_setresuid32:
11676 return get_errno(sys_setresuid(arg1, arg2, arg3));
11678 #ifdef TARGET_NR_getresuid32
11679 case TARGET_NR_getresuid32:
11681 uid_t ruid, euid, suid;
11682 ret = get_errno(getresuid(&ruid, &euid, &suid));
11683 if (!is_error(ret)) {
11684 if (put_user_u32(ruid, arg1)
11685 || put_user_u32(euid, arg2)
11686 || put_user_u32(suid, arg3))
11687 return -TARGET_EFAULT;
11692 #ifdef TARGET_NR_setresgid32
11693 case TARGET_NR_setresgid32:
11694 return get_errno(sys_setresgid(arg1, arg2, arg3));
11696 #ifdef TARGET_NR_getresgid32
11697 case TARGET_NR_getresgid32:
11699 gid_t rgid, egid, sgid;
11700 ret = get_errno(getresgid(&rgid, &egid, &sgid));
11701 if (!is_error(ret)) {
11702 if (put_user_u32(rgid, arg1)
11703 || put_user_u32(egid, arg2)
11704 || put_user_u32(sgid, arg3))
11705 return -TARGET_EFAULT;
11710 #ifdef TARGET_NR_chown32
11711 case TARGET_NR_chown32:
11712 if (!(p = lock_user_string(arg1)))
11713 return -TARGET_EFAULT;
11714 ret = get_errno(chown(p, arg2, arg3));
11715 unlock_user(p, arg1, 0);
11718 #ifdef TARGET_NR_setuid32
11719 case TARGET_NR_setuid32:
11720 return get_errno(sys_setuid(arg1));
11722 #ifdef TARGET_NR_setgid32
11723 case TARGET_NR_setgid32:
11724 return get_errno(sys_setgid(arg1));
11726 #ifdef TARGET_NR_setfsuid32
11727 case TARGET_NR_setfsuid32:
11728 return get_errno(setfsuid(arg1));
11730 #ifdef TARGET_NR_setfsgid32
11731 case TARGET_NR_setfsgid32:
11732 return get_errno(setfsgid(arg1));
11734 #ifdef TARGET_NR_mincore
11735 case TARGET_NR_mincore:
11737 void *a = lock_user(VERIFY_READ, arg1, arg2, 0);
11739 return -TARGET_ENOMEM;
11741 p = lock_user_string(arg3);
11743 ret = -TARGET_EFAULT;
11745 ret = get_errno(mincore(a, arg2, p));
11746 unlock_user(p, arg3, ret);
11748 unlock_user(a, arg1, 0);
11752 #ifdef TARGET_NR_arm_fadvise64_64
11753 case TARGET_NR_arm_fadvise64_64:
11754 /* arm_fadvise64_64 looks like fadvise64_64 but
11755 * with different argument order: fd, advice, offset, len
11756 * rather than the usual fd, offset, len, advice.
11757 * Note that offset and len are both 64-bit so appear as
11758 * pairs of 32-bit registers.
11760 ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
11761 target_offset64(arg5, arg6), arg2);
11762 return -host_to_target_errno(ret);
11765 #if TARGET_ABI_BITS == 32
11767 #ifdef TARGET_NR_fadvise64_64
11768 case TARGET_NR_fadvise64_64:
11769 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
11770 /* 6 args: fd, advice, offset (high, low), len (high, low) */
11778 /* 6 args: fd, offset (high, low), len (high, low), advice */
11779 if (regpairs_aligned(cpu_env, num)) {
11780 /* offset is in (3,4), len in (5,6) and advice in 7 */
11788 ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
11789 target_offset64(arg4, arg5), arg6);
11790 return -host_to_target_errno(ret);
11793 #ifdef TARGET_NR_fadvise64
11794 case TARGET_NR_fadvise64:
11795 /* 5 args: fd, offset (high, low), len, advice */
11796 if (regpairs_aligned(cpu_env, num)) {
11797 /* offset is in (3,4), len in 5 and advice in 6 */
11803 ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
11804 return -host_to_target_errno(ret);
11807 #else /* not a 32-bit ABI */
11808 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11809 #ifdef TARGET_NR_fadvise64_64
11810 case TARGET_NR_fadvise64_64:
11812 #ifdef TARGET_NR_fadvise64
11813 case TARGET_NR_fadvise64:
11815 #ifdef TARGET_S390X
11817 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
11818 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
11819 case 6: arg4 = POSIX_FADV_DONTNEED; break;
11820 case 7: arg4 = POSIX_FADV_NOREUSE; break;
11824 return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
11826 #endif /* end of 64-bit ABI fadvise handling */
11828 #ifdef TARGET_NR_madvise
11829 case TARGET_NR_madvise:
11830 /* A straight passthrough may not be safe because qemu sometimes
11831 turns private file-backed mappings into anonymous mappings.
11832 This will break MADV_DONTNEED.
11833 This is a hint, so ignoring and returning success is ok. */
11836 #ifdef TARGET_NR_fcntl64
11837 case TARGET_NR_fcntl64:
11841 from_flock64_fn *copyfrom = copy_from_user_flock64;
11842 to_flock64_fn *copyto = copy_to_user_flock64;
11845 if (!((CPUARMState *)cpu_env)->eabi) {
11846 copyfrom = copy_from_user_oabi_flock64;
11847 copyto = copy_to_user_oabi_flock64;
11851 cmd = target_to_host_fcntl_cmd(arg2);
11852 if (cmd == -TARGET_EINVAL) {
11857 case TARGET_F_GETLK64:
11858 ret = copyfrom(&fl, arg3);
11862 ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11864 ret = copyto(arg3, &fl);
11868 case TARGET_F_SETLK64:
11869 case TARGET_F_SETLKW64:
11870 ret = copyfrom(&fl, arg3);
11874 ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11877 ret = do_fcntl(arg1, arg2, arg3);
11883 #ifdef TARGET_NR_cacheflush
11884 case TARGET_NR_cacheflush:
11885 /* self-modifying code is handled automatically, so nothing needed */
11888 #ifdef TARGET_NR_getpagesize
11889 case TARGET_NR_getpagesize:
11890 return TARGET_PAGE_SIZE;
11892 case TARGET_NR_gettid:
11893 return get_errno(sys_gettid());
11894 #ifdef TARGET_NR_readahead
11895 case TARGET_NR_readahead:
11896 #if TARGET_ABI_BITS == 32
11897 if (regpairs_aligned(cpu_env, num)) {
11902 ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
11904 ret = get_errno(readahead(arg1, arg2, arg3));
11909 #ifdef TARGET_NR_setxattr
11910 case TARGET_NR_listxattr:
11911 case TARGET_NR_llistxattr:
11915 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11917 return -TARGET_EFAULT;
11920 p = lock_user_string(arg1);
11922 if (num == TARGET_NR_listxattr) {
11923 ret = get_errno(listxattr(p, b, arg3));
11925 ret = get_errno(llistxattr(p, b, arg3));
11928 ret = -TARGET_EFAULT;
11930 unlock_user(p, arg1, 0);
11931 unlock_user(b, arg2, arg3);
11934 case TARGET_NR_flistxattr:
11938 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11940 return -TARGET_EFAULT;
11943 ret = get_errno(flistxattr(arg1, b, arg3));
11944 unlock_user(b, arg2, arg3);
11947 case TARGET_NR_setxattr:
11948 case TARGET_NR_lsetxattr:
11950 void *p, *n, *v = 0;
11952 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11954 return -TARGET_EFAULT;
11957 p = lock_user_string(arg1);
11958 n = lock_user_string(arg2);
11960 if (num == TARGET_NR_setxattr) {
11961 ret = get_errno(setxattr(p, n, v, arg4, arg5));
11963 ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
11966 ret = -TARGET_EFAULT;
11968 unlock_user(p, arg1, 0);
11969 unlock_user(n, arg2, 0);
11970 unlock_user(v, arg3, 0);
11973 case TARGET_NR_fsetxattr:
11977 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11979 return -TARGET_EFAULT;
11982 n = lock_user_string(arg2);
11984 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
11986 ret = -TARGET_EFAULT;
11988 unlock_user(n, arg2, 0);
11989 unlock_user(v, arg3, 0);
11992 case TARGET_NR_getxattr:
11993 case TARGET_NR_lgetxattr:
11995 void *p, *n, *v = 0;
11997 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11999 return -TARGET_EFAULT;
12002 p = lock_user_string(arg1);
12003 n = lock_user_string(arg2);
12005 if (num == TARGET_NR_getxattr) {
12006 ret = get_errno(getxattr(p, n, v, arg4));
12008 ret = get_errno(lgetxattr(p, n, v, arg4));
12011 ret = -TARGET_EFAULT;
12013 unlock_user(p, arg1, 0);
12014 unlock_user(n, arg2, 0);
12015 unlock_user(v, arg3, arg4);
12018 case TARGET_NR_fgetxattr:
12022 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
12024 return -TARGET_EFAULT;
12027 n = lock_user_string(arg2);
12029 ret = get_errno(fgetxattr(arg1, n, v, arg4));
12031 ret = -TARGET_EFAULT;
12033 unlock_user(n, arg2, 0);
12034 unlock_user(v, arg3, arg4);
12037 case TARGET_NR_removexattr:
12038 case TARGET_NR_lremovexattr:
12041 p = lock_user_string(arg1);
12042 n = lock_user_string(arg2);
12044 if (num == TARGET_NR_removexattr) {
12045 ret = get_errno(removexattr(p, n));
12047 ret = get_errno(lremovexattr(p, n));
12050 ret = -TARGET_EFAULT;
12052 unlock_user(p, arg1, 0);
12053 unlock_user(n, arg2, 0);
12056 case TARGET_NR_fremovexattr:
12059 n = lock_user_string(arg2);
12061 ret = get_errno(fremovexattr(arg1, n));
12063 ret = -TARGET_EFAULT;
12065 unlock_user(n, arg2, 0);
12069 #endif /* CONFIG_ATTR */
12070 #ifdef TARGET_NR_set_thread_area
12071 case TARGET_NR_set_thread_area:
12072 #if defined(TARGET_MIPS)
12073 ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
12075 #elif defined(TARGET_CRIS)
12077 ret = -TARGET_EINVAL;
12079 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
12083 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
12084 return do_set_thread_area(cpu_env, arg1);
12085 #elif defined(TARGET_M68K)
12087 TaskState *ts = cpu->opaque;
12088 ts->tp_value = arg1;
12092 return -TARGET_ENOSYS;
12095 #ifdef TARGET_NR_get_thread_area
12096 case TARGET_NR_get_thread_area:
12097 #if defined(TARGET_I386) && defined(TARGET_ABI32)
12098 return do_get_thread_area(cpu_env, arg1);
12099 #elif defined(TARGET_M68K)
12101 TaskState *ts = cpu->opaque;
12102 return ts->tp_value;
12105 return -TARGET_ENOSYS;
12108 #ifdef TARGET_NR_getdomainname
12109 case TARGET_NR_getdomainname:
12110 return -TARGET_ENOSYS;
12113 #ifdef TARGET_NR_clock_settime
12114 case TARGET_NR_clock_settime:
12116 struct timespec ts;
12118 ret = target_to_host_timespec(&ts, arg2);
12119 if (!is_error(ret)) {
12120 ret = get_errno(clock_settime(arg1, &ts));
12125 #ifdef TARGET_NR_clock_settime64
12126 case TARGET_NR_clock_settime64:
12128 struct timespec ts;
12130 ret = target_to_host_timespec64(&ts, arg2);
12131 if (!is_error(ret)) {
12132 ret = get_errno(clock_settime(arg1, &ts));
12137 #ifdef TARGET_NR_clock_gettime
12138 case TARGET_NR_clock_gettime:
12140 struct timespec ts;
12141 ret = get_errno(clock_gettime(arg1, &ts));
12142 if (!is_error(ret)) {
12143 ret = host_to_target_timespec(arg2, &ts);
12148 #ifdef TARGET_NR_clock_gettime64
12149 case TARGET_NR_clock_gettime64:
12151 struct timespec ts;
12152 ret = get_errno(clock_gettime(arg1, &ts));
12153 if (!is_error(ret)) {
12154 ret = host_to_target_timespec64(arg2, &ts);
12159 #ifdef TARGET_NR_clock_getres
12160 case TARGET_NR_clock_getres:
12162 struct timespec ts;
12163 ret = get_errno(clock_getres(arg1, &ts));
12164 if (!is_error(ret)) {
12165 host_to_target_timespec(arg2, &ts);
12170 #ifdef TARGET_NR_clock_getres_time64
12171 case TARGET_NR_clock_getres_time64:
12173 struct timespec ts;
12174 ret = get_errno(clock_getres(arg1, &ts));
12175 if (!is_error(ret)) {
12176 host_to_target_timespec64(arg2, &ts);
12181 #ifdef TARGET_NR_clock_nanosleep
12182 case TARGET_NR_clock_nanosleep:
12184 struct timespec ts;
12185 if (target_to_host_timespec(&ts, arg3)) {
12186 return -TARGET_EFAULT;
12188 ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12189 &ts, arg4 ? &ts : NULL));
12191 * if the call is interrupted by a signal handler, it fails
12192 * with error -TARGET_EINTR and if arg4 is not NULL and arg2 is not
12193 * TIMER_ABSTIME, it returns the remaining unslept time in arg4.
12195 if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12196 host_to_target_timespec(arg4, &ts)) {
12197 return -TARGET_EFAULT;
12203 #ifdef TARGET_NR_clock_nanosleep_time64
12204 case TARGET_NR_clock_nanosleep_time64:
12206 struct timespec ts;
12208 if (target_to_host_timespec64(&ts, arg3)) {
12209 return -TARGET_EFAULT;
12212 ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12213 &ts, arg4 ? &ts : NULL));
12215 if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12216 host_to_target_timespec64(arg4, &ts)) {
12217 return -TARGET_EFAULT;
12223 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
12224 case TARGET_NR_set_tid_address:
12225 return get_errno(set_tid_address((int *)g2h(cpu, arg1)));
12228 case TARGET_NR_tkill:
12229 return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
12231 case TARGET_NR_tgkill:
12232 return get_errno(safe_tgkill((int)arg1, (int)arg2,
12233 target_to_host_signal(arg3)));
12235 #ifdef TARGET_NR_set_robust_list
12236 case TARGET_NR_set_robust_list:
12237 case TARGET_NR_get_robust_list:
12238 /* The ABI for supporting robust futexes has userspace pass
12239 * the kernel a pointer to a linked list which is updated by
12240 * userspace after the syscall; the list is walked by the kernel
12241 * when the thread exits. Since the linked list in QEMU guest
12242 * memory isn't a valid linked list for the host and we have
12243 * no way to reliably intercept the thread-death event, we can't
12244 * support these. Silently return ENOSYS so that guest userspace
12245 * falls back to a non-robust futex implementation (which should
12246 * be OK except in the corner case of the guest crashing while
12247 * holding a mutex that is shared with another process via
12250 return -TARGET_ENOSYS;
12253 #if defined(TARGET_NR_utimensat)
12254 case TARGET_NR_utimensat:
12256 struct timespec *tsp, ts[2];
12260 if (target_to_host_timespec(ts, arg3)) {
12261 return -TARGET_EFAULT;
12263 if (target_to_host_timespec(ts + 1, arg3 +
12264 sizeof(struct target_timespec))) {
12265 return -TARGET_EFAULT;
12270 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12272 if (!(p = lock_user_string(arg2))) {
12273 return -TARGET_EFAULT;
12275 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12276 unlock_user(p, arg2, 0);
12281 #ifdef TARGET_NR_utimensat_time64
12282 case TARGET_NR_utimensat_time64:
12284 struct timespec *tsp, ts[2];
12288 if (target_to_host_timespec64(ts, arg3)) {
12289 return -TARGET_EFAULT;
12291 if (target_to_host_timespec64(ts + 1, arg3 +
12292 sizeof(struct target__kernel_timespec))) {
12293 return -TARGET_EFAULT;
12298 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12300 p = lock_user_string(arg2);
12302 return -TARGET_EFAULT;
12304 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12305 unlock_user(p, arg2, 0);
12310 #ifdef TARGET_NR_futex
12311 case TARGET_NR_futex:
12312 return do_futex(cpu, arg1, arg2, arg3, arg4, arg5, arg6);
12314 #ifdef TARGET_NR_futex_time64
12315 case TARGET_NR_futex_time64:
12316 return do_futex_time64(cpu, arg1, arg2, arg3, arg4, arg5, arg6);
12318 #ifdef CONFIG_INOTIFY
12319 #if defined(TARGET_NR_inotify_init)
12320 case TARGET_NR_inotify_init:
12321 ret = get_errno(inotify_init());
12323 fd_trans_register(ret, &target_inotify_trans);
12327 #if defined(TARGET_NR_inotify_init1) && defined(CONFIG_INOTIFY1)
12328 case TARGET_NR_inotify_init1:
12329 ret = get_errno(inotify_init1(target_to_host_bitmask(arg1,
12330 fcntl_flags_tbl)));
12332 fd_trans_register(ret, &target_inotify_trans);
12336 #if defined(TARGET_NR_inotify_add_watch)
12337 case TARGET_NR_inotify_add_watch:
12338 p = lock_user_string(arg2);
12339 ret = get_errno(inotify_add_watch(arg1, path(p), arg3));
12340 unlock_user(p, arg2, 0);
12343 #if defined(TARGET_NR_inotify_rm_watch)
12344 case TARGET_NR_inotify_rm_watch:
12345 return get_errno(inotify_rm_watch(arg1, arg2));
12349 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
12350 case TARGET_NR_mq_open:
12352 struct mq_attr posix_mq_attr;
12353 struct mq_attr *pposix_mq_attr;
12356 host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
12357 pposix_mq_attr = NULL;
12359 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
12360 return -TARGET_EFAULT;
12362 pposix_mq_attr = &posix_mq_attr;
12364 p = lock_user_string(arg1 - 1);
12366 return -TARGET_EFAULT;
12368 ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
12369 unlock_user (p, arg1, 0);
12373 case TARGET_NR_mq_unlink:
12374 p = lock_user_string(arg1 - 1);
12376 return -TARGET_EFAULT;
12378 ret = get_errno(mq_unlink(p));
12379 unlock_user (p, arg1, 0);
12382 #ifdef TARGET_NR_mq_timedsend
12383 case TARGET_NR_mq_timedsend:
12385 struct timespec ts;
12387 p = lock_user (VERIFY_READ, arg2, arg3, 1);
12389 if (target_to_host_timespec(&ts, arg5)) {
12390 return -TARGET_EFAULT;
12392 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12393 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
12394 return -TARGET_EFAULT;
12397 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12399 unlock_user (p, arg2, arg3);
12403 #ifdef TARGET_NR_mq_timedsend_time64
12404 case TARGET_NR_mq_timedsend_time64:
12406 struct timespec ts;
12408 p = lock_user(VERIFY_READ, arg2, arg3, 1);
12410 if (target_to_host_timespec64(&ts, arg5)) {
12411 return -TARGET_EFAULT;
12413 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12414 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
12415 return -TARGET_EFAULT;
12418 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12420 unlock_user(p, arg2, arg3);
12425 #ifdef TARGET_NR_mq_timedreceive
12426 case TARGET_NR_mq_timedreceive:
12428 struct timespec ts;
12431 p = lock_user (VERIFY_READ, arg2, arg3, 1);
12433 if (target_to_host_timespec(&ts, arg5)) {
12434 return -TARGET_EFAULT;
12436 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12438 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
12439 return -TARGET_EFAULT;
12442 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12445 unlock_user (p, arg2, arg3);
12447 put_user_u32(prio, arg4);
12451 #ifdef TARGET_NR_mq_timedreceive_time64
12452 case TARGET_NR_mq_timedreceive_time64:
12454 struct timespec ts;
12457 p = lock_user(VERIFY_READ, arg2, arg3, 1);
12459 if (target_to_host_timespec64(&ts, arg5)) {
12460 return -TARGET_EFAULT;
12462 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12464 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
12465 return -TARGET_EFAULT;
12468 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12471 unlock_user(p, arg2, arg3);
12473 put_user_u32(prio, arg4);
12479 /* Not implemented for now... */
12480 /* case TARGET_NR_mq_notify: */
12483 case TARGET_NR_mq_getsetattr:
12485 struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
12488 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
12489 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
12490 &posix_mq_attr_out));
12491 } else if (arg3 != 0) {
12492 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
12494 if (ret == 0 && arg3 != 0) {
12495 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
12501 #ifdef CONFIG_SPLICE
12502 #ifdef TARGET_NR_tee
12503 case TARGET_NR_tee:
12505 ret = get_errno(tee(arg1,arg2,arg3,arg4));
12509 #ifdef TARGET_NR_splice
12510 case TARGET_NR_splice:
12512 loff_t loff_in, loff_out;
12513 loff_t *ploff_in = NULL, *ploff_out = NULL;
12515 if (get_user_u64(loff_in, arg2)) {
12516 return -TARGET_EFAULT;
12518 ploff_in = &loff_in;
12521 if (get_user_u64(loff_out, arg4)) {
12522 return -TARGET_EFAULT;
12524 ploff_out = &loff_out;
12526 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
12528 if (put_user_u64(loff_in, arg2)) {
12529 return -TARGET_EFAULT;
12533 if (put_user_u64(loff_out, arg4)) {
12534 return -TARGET_EFAULT;
12540 #ifdef TARGET_NR_vmsplice
12541 case TARGET_NR_vmsplice:
12543 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
12545 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
12546 unlock_iovec(vec, arg2, arg3, 0);
12548 ret = -host_to_target_errno(errno);
12553 #endif /* CONFIG_SPLICE */
12554 #ifdef CONFIG_EVENTFD
12555 #if defined(TARGET_NR_eventfd)
12556 case TARGET_NR_eventfd:
12557 ret = get_errno(eventfd(arg1, 0));
12559 fd_trans_register(ret, &target_eventfd_trans);
12563 #if defined(TARGET_NR_eventfd2)
12564 case TARGET_NR_eventfd2:
12566 int host_flags = arg2 & (~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC));
12567 if (arg2 & TARGET_O_NONBLOCK) {
12568 host_flags |= O_NONBLOCK;
12570 if (arg2 & TARGET_O_CLOEXEC) {
12571 host_flags |= O_CLOEXEC;
12573 ret = get_errno(eventfd(arg1, host_flags));
12575 fd_trans_register(ret, &target_eventfd_trans);
12580 #endif /* CONFIG_EVENTFD */
12581 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
12582 case TARGET_NR_fallocate:
12583 #if TARGET_ABI_BITS == 32
12584 ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
12585 target_offset64(arg5, arg6)));
12587 ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
12591 #if defined(CONFIG_SYNC_FILE_RANGE)
12592 #if defined(TARGET_NR_sync_file_range)
12593 case TARGET_NR_sync_file_range:
12594 #if TARGET_ABI_BITS == 32
12595 #if defined(TARGET_MIPS)
12596 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12597 target_offset64(arg5, arg6), arg7));
12599 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
12600 target_offset64(arg4, arg5), arg6));
12601 #endif /* !TARGET_MIPS */
12603 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
12607 #if defined(TARGET_NR_sync_file_range2) || \
12608 defined(TARGET_NR_arm_sync_file_range)
12609 #if defined(TARGET_NR_sync_file_range2)
12610 case TARGET_NR_sync_file_range2:
12612 #if defined(TARGET_NR_arm_sync_file_range)
12613 case TARGET_NR_arm_sync_file_range:
12615 /* This is like sync_file_range but the arguments are reordered */
12616 #if TARGET_ABI_BITS == 32
12617 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12618 target_offset64(arg5, arg6), arg2));
12620 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
12625 #if defined(TARGET_NR_signalfd4)
12626 case TARGET_NR_signalfd4:
12627 return do_signalfd4(arg1, arg2, arg4);
12629 #if defined(TARGET_NR_signalfd)
12630 case TARGET_NR_signalfd:
12631 return do_signalfd4(arg1, arg2, 0);
12633 #if defined(CONFIG_EPOLL)
12634 #if defined(TARGET_NR_epoll_create)
12635 case TARGET_NR_epoll_create:
12636 return get_errno(epoll_create(arg1));
12638 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
12639 case TARGET_NR_epoll_create1:
12640 return get_errno(epoll_create1(target_to_host_bitmask(arg1, fcntl_flags_tbl)));
12642 #if defined(TARGET_NR_epoll_ctl)
12643 case TARGET_NR_epoll_ctl:
12645 struct epoll_event ep;
12646 struct epoll_event *epp = 0;
12648 if (arg2 != EPOLL_CTL_DEL) {
12649 struct target_epoll_event *target_ep;
12650 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
12651 return -TARGET_EFAULT;
12653 ep.events = tswap32(target_ep->events);
12655 * The epoll_data_t union is just opaque data to the kernel,
12656 * so we transfer all 64 bits across and need not worry what
12657 * actual data type it is.
12659 ep.data.u64 = tswap64(target_ep->data.u64);
12660 unlock_user_struct(target_ep, arg4, 0);
12663 * before kernel 2.6.9, EPOLL_CTL_DEL operation required a
12664 * non-null pointer, even though this argument is ignored.
12669 return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
12673 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
12674 #if defined(TARGET_NR_epoll_wait)
12675 case TARGET_NR_epoll_wait:
12677 #if defined(TARGET_NR_epoll_pwait)
12678 case TARGET_NR_epoll_pwait:
12681 struct target_epoll_event *target_ep;
12682 struct epoll_event *ep;
12684 int maxevents = arg3;
12685 int timeout = arg4;
12687 if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
12688 return -TARGET_EINVAL;
12691 target_ep = lock_user(VERIFY_WRITE, arg2,
12692 maxevents * sizeof(struct target_epoll_event), 1);
12694 return -TARGET_EFAULT;
12697 ep = g_try_new(struct epoll_event, maxevents);
12699 unlock_user(target_ep, arg2, 0);
12700 return -TARGET_ENOMEM;
12704 #if defined(TARGET_NR_epoll_pwait)
12705 case TARGET_NR_epoll_pwait:
12707 target_sigset_t *target_set;
12708 sigset_t _set, *set = &_set;
12711 if (arg6 != sizeof(target_sigset_t)) {
12712 ret = -TARGET_EINVAL;
12716 target_set = lock_user(VERIFY_READ, arg5,
12717 sizeof(target_sigset_t), 1);
12719 ret = -TARGET_EFAULT;
12722 target_to_host_sigset(set, target_set);
12723 unlock_user(target_set, arg5, 0);
12728 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12729 set, SIGSET_T_SIZE));
12733 #if defined(TARGET_NR_epoll_wait)
12734 case TARGET_NR_epoll_wait:
12735 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12740 ret = -TARGET_ENOSYS;
12742 if (!is_error(ret)) {
12744 for (i = 0; i < ret; i++) {
12745 target_ep[i].events = tswap32(ep[i].events);
12746 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
12748 unlock_user(target_ep, arg2,
12749 ret * sizeof(struct target_epoll_event));
12751 unlock_user(target_ep, arg2, 0);
12758 #ifdef TARGET_NR_prlimit64
12759 case TARGET_NR_prlimit64:
12761 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
12762 struct target_rlimit64 *target_rnew, *target_rold;
12763 struct host_rlimit64 rnew, rold, *rnewp = 0;
12764 int resource = target_to_host_resource(arg2);
12766 if (arg3 && (resource != RLIMIT_AS &&
12767 resource != RLIMIT_DATA &&
12768 resource != RLIMIT_STACK)) {
12769 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
12770 return -TARGET_EFAULT;
12772 rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
12773 rnew.rlim_max = tswap64(target_rnew->rlim_max);
12774 unlock_user_struct(target_rnew, arg3, 0);
12778 ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
12779 if (!is_error(ret) && arg4) {
12780 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
12781 return -TARGET_EFAULT;
12783 target_rold->rlim_cur = tswap64(rold.rlim_cur);
12784 target_rold->rlim_max = tswap64(rold.rlim_max);
12785 unlock_user_struct(target_rold, arg4, 1);
12790 #ifdef TARGET_NR_gethostname
12791 case TARGET_NR_gethostname:
12793 char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
12795 ret = get_errno(gethostname(name, arg2));
12796 unlock_user(name, arg1, arg2);
12798 ret = -TARGET_EFAULT;
12803 #ifdef TARGET_NR_atomic_cmpxchg_32
12804 case TARGET_NR_atomic_cmpxchg_32:
12806 /* should use start_exclusive from main.c */
12807 abi_ulong mem_value;
12808 if (get_user_u32(mem_value, arg6)) {
12809 target_siginfo_t info;
12810 info.si_signo = SIGSEGV;
12812 info.si_code = TARGET_SEGV_MAPERR;
12813 info._sifields._sigfault._addr = arg6;
12814 queue_signal((CPUArchState *)cpu_env, info.si_signo,
12815 QEMU_SI_FAULT, &info);
12819 if (mem_value == arg2)
12820 put_user_u32(arg1, arg6);
12824 #ifdef TARGET_NR_atomic_barrier
12825 case TARGET_NR_atomic_barrier:
12826 /* Like the kernel implementation and the
12827 qemu arm barrier, no-op this? */
12831 #ifdef TARGET_NR_timer_create
12832 case TARGET_NR_timer_create:
12834 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
12836 struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
12839 int timer_index = next_free_host_timer();
12841 if (timer_index < 0) {
12842 ret = -TARGET_EAGAIN;
12844 timer_t *phtimer = g_posix_timers + timer_index;
12847 phost_sevp = &host_sevp;
12848 ret = target_to_host_sigevent(phost_sevp, arg2);
12854 ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
12858 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
12859 return -TARGET_EFAULT;
12867 #ifdef TARGET_NR_timer_settime
12868 case TARGET_NR_timer_settime:
12870 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
12871 * struct itimerspec * old_value */
12872 target_timer_t timerid = get_timer_id(arg1);
12876 } else if (arg3 == 0) {
12877 ret = -TARGET_EINVAL;
12879 timer_t htimer = g_posix_timers[timerid];
12880 struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12882 if (target_to_host_itimerspec(&hspec_new, arg3)) {
12883 return -TARGET_EFAULT;
12886 timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12887 if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
12888 return -TARGET_EFAULT;
12895 #ifdef TARGET_NR_timer_settime64
12896 case TARGET_NR_timer_settime64:
12898 target_timer_t timerid = get_timer_id(arg1);
12902 } else if (arg3 == 0) {
12903 ret = -TARGET_EINVAL;
12905 timer_t htimer = g_posix_timers[timerid];
12906 struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12908 if (target_to_host_itimerspec64(&hspec_new, arg3)) {
12909 return -TARGET_EFAULT;
12912 timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12913 if (arg4 && host_to_target_itimerspec64(arg4, &hspec_old)) {
12914 return -TARGET_EFAULT;
12921 #ifdef TARGET_NR_timer_gettime
12922 case TARGET_NR_timer_gettime:
12924 /* args: timer_t timerid, struct itimerspec *curr_value */
12925 target_timer_t timerid = get_timer_id(arg1);
12929 } else if (!arg2) {
12930 ret = -TARGET_EFAULT;
12932 timer_t htimer = g_posix_timers[timerid];
12933 struct itimerspec hspec;
12934 ret = get_errno(timer_gettime(htimer, &hspec));
12936 if (host_to_target_itimerspec(arg2, &hspec)) {
12937 ret = -TARGET_EFAULT;
12944 #ifdef TARGET_NR_timer_gettime64
12945 case TARGET_NR_timer_gettime64:
12947 /* args: timer_t timerid, struct itimerspec64 *curr_value */
12948 target_timer_t timerid = get_timer_id(arg1);
12952 } else if (!arg2) {
12953 ret = -TARGET_EFAULT;
12955 timer_t htimer = g_posix_timers[timerid];
12956 struct itimerspec hspec;
12957 ret = get_errno(timer_gettime(htimer, &hspec));
12959 if (host_to_target_itimerspec64(arg2, &hspec)) {
12960 ret = -TARGET_EFAULT;
12967 #ifdef TARGET_NR_timer_getoverrun
12968 case TARGET_NR_timer_getoverrun:
12970 /* args: timer_t timerid */
12971 target_timer_t timerid = get_timer_id(arg1);
12976 timer_t htimer = g_posix_timers[timerid];
12977 ret = get_errno(timer_getoverrun(htimer));
12983 #ifdef TARGET_NR_timer_delete
12984 case TARGET_NR_timer_delete:
12986 /* args: timer_t timerid */
12987 target_timer_t timerid = get_timer_id(arg1);
12992 timer_t htimer = g_posix_timers[timerid];
12993 ret = get_errno(timer_delete(htimer));
12994 g_posix_timers[timerid] = 0;
13000 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
13001 case TARGET_NR_timerfd_create:
13002 return get_errno(timerfd_create(arg1,
13003 target_to_host_bitmask(arg2, fcntl_flags_tbl)));
13006 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
13007 case TARGET_NR_timerfd_gettime:
13009 struct itimerspec its_curr;
13011 ret = get_errno(timerfd_gettime(arg1, &its_curr));
13013 if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
13014 return -TARGET_EFAULT;
13020 #if defined(TARGET_NR_timerfd_gettime64) && defined(CONFIG_TIMERFD)
13021 case TARGET_NR_timerfd_gettime64:
13023 struct itimerspec its_curr;
13025 ret = get_errno(timerfd_gettime(arg1, &its_curr));
13027 if (arg2 && host_to_target_itimerspec64(arg2, &its_curr)) {
13028 return -TARGET_EFAULT;
13034 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
13035 case TARGET_NR_timerfd_settime:
13037 struct itimerspec its_new, its_old, *p_new;
13040 if (target_to_host_itimerspec(&its_new, arg3)) {
13041 return -TARGET_EFAULT;
13048 ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13050 if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
13051 return -TARGET_EFAULT;
13057 #if defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)
13058 case TARGET_NR_timerfd_settime64:
13060 struct itimerspec its_new, its_old, *p_new;
13063 if (target_to_host_itimerspec64(&its_new, arg3)) {
13064 return -TARGET_EFAULT;
13071 ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13073 if (arg4 && host_to_target_itimerspec64(arg4, &its_old)) {
13074 return -TARGET_EFAULT;
13080 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
13081 case TARGET_NR_ioprio_get:
13082 return get_errno(ioprio_get(arg1, arg2));
13085 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
13086 case TARGET_NR_ioprio_set:
13087 return get_errno(ioprio_set(arg1, arg2, arg3));
13090 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
13091 case TARGET_NR_setns:
13092 return get_errno(setns(arg1, arg2));
13094 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
13095 case TARGET_NR_unshare:
13096 return get_errno(unshare(arg1));
13098 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
13099 case TARGET_NR_kcmp:
13100 return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
13102 #ifdef TARGET_NR_swapcontext
13103 case TARGET_NR_swapcontext:
13104 /* PowerPC specific. */
13105 return do_swapcontext(cpu_env, arg1, arg2, arg3);
13107 #ifdef TARGET_NR_memfd_create
13108 case TARGET_NR_memfd_create:
13109 p = lock_user_string(arg1);
13111 return -TARGET_EFAULT;
13113 ret = get_errno(memfd_create(p, arg2));
13114 fd_trans_unregister(ret);
13115 unlock_user(p, arg1, 0);
13118 #if defined TARGET_NR_membarrier && defined __NR_membarrier
13119 case TARGET_NR_membarrier:
13120 return get_errno(membarrier(arg1, arg2));
13123 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
13124 case TARGET_NR_copy_file_range:
13126 loff_t inoff, outoff;
13127 loff_t *pinoff = NULL, *poutoff = NULL;
13130 if (get_user_u64(inoff, arg2)) {
13131 return -TARGET_EFAULT;
13136 if (get_user_u64(outoff, arg4)) {
13137 return -TARGET_EFAULT;
13141 /* Do not sign-extend the count parameter. */
13142 ret = get_errno(safe_copy_file_range(arg1, pinoff, arg3, poutoff,
13143 (abi_ulong)arg5, arg6));
13144 if (!is_error(ret) && ret > 0) {
13146 if (put_user_u64(inoff, arg2)) {
13147 return -TARGET_EFAULT;
13151 if (put_user_u64(outoff, arg4)) {
13152 return -TARGET_EFAULT;
13160 #if defined(TARGET_NR_pivot_root)
13161 case TARGET_NR_pivot_root:
13164 p = lock_user_string(arg1); /* new_root */
13165 p2 = lock_user_string(arg2); /* put_old */
13167 ret = -TARGET_EFAULT;
13169 ret = get_errno(pivot_root(p, p2));
13171 unlock_user(p2, arg2, 0);
13172 unlock_user(p, arg1, 0);
13178 qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
13179 return -TARGET_ENOSYS;
13184 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
13185 abi_long arg2, abi_long arg3, abi_long arg4,
13186 abi_long arg5, abi_long arg6, abi_long arg7,
13189 CPUState *cpu = env_cpu(cpu_env);
13192 #ifdef DEBUG_ERESTARTSYS
13193 /* Debug-only code for exercising the syscall-restart code paths
13194 * in the per-architecture cpu main loops: restart every syscall
13195 * the guest makes once before letting it through.
13201 return -QEMU_ERESTARTSYS;
13206 record_syscall_start(cpu, num, arg1,
13207 arg2, arg3, arg4, arg5, arg6, arg7, arg8);
13209 if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13210 print_syscall(cpu_env, num, arg1, arg2, arg3, arg4, arg5, arg6);
13213 ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
13214 arg5, arg6, arg7, arg8);
13216 if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13217 print_syscall_ret(cpu_env, num, ret, arg1, arg2,
13218 arg3, arg4, arg5, arg6);
13221 record_syscall_return(cpu, num, ret);