4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
31 #include <sys/mount.h>
33 #include <sys/fsuid.h>
34 #include <sys/personality.h>
35 #include <sys/prctl.h>
36 #include <sys/resource.h>
38 #include <linux/capability.h>
40 #include <sys/timex.h>
41 #include <sys/socket.h>
42 #include <linux/sockios.h>
46 #include <sys/times.h>
49 #include <sys/statfs.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/ip.h>
55 #include <netinet/tcp.h>
56 #include <linux/wireless.h>
57 #include <linux/icmp.h>
58 #include <linux/icmpv6.h>
59 #include <linux/errqueue.h>
60 #include <linux/random.h>
62 #include <sys/timerfd.h>
65 #include <sys/eventfd.h>
68 #include <sys/epoll.h>
71 #include "qemu/xattr.h"
73 #ifdef CONFIG_SENDFILE
74 #include <sys/sendfile.h>
77 #define termios host_termios
78 #define winsize host_winsize
79 #define termio host_termio
80 #define sgttyb host_sgttyb /* same as target */
81 #define tchars host_tchars /* same as target */
82 #define ltchars host_ltchars /* same as target */
84 #include <linux/termios.h>
85 #include <linux/unistd.h>
86 #include <linux/cdrom.h>
87 #include <linux/hdreg.h>
88 #include <linux/soundcard.h>
90 #include <linux/mtio.h>
93 #if defined(CONFIG_FIEMAP)
94 #include <linux/fiemap.h>
97 #if defined(CONFIG_USBFS)
98 #include <linux/usbdevice_fs.h>
99 #include <linux/usb/ch9.h>
101 #include <linux/vt.h>
102 #include <linux/dm-ioctl.h>
103 #include <linux/reboot.h>
104 #include <linux/route.h>
105 #include <linux/filter.h>
106 #include <linux/blkpg.h>
107 #include <netpacket/packet.h>
108 #include <linux/netlink.h>
109 #include <linux/if_alg.h>
110 #include "linux_loop.h"
114 #include "qemu/guest-random.h"
115 #include "qapi/error.h"
116 #include "fd-trans.h"
119 #define CLONE_IO 0x80000000 /* Clone io context */
122 /* We can't directly call the host clone syscall, because this will
123 * badly confuse libc (breaking mutexes, for example). So we must
124 * divide clone flags into:
125 * * flag combinations that look like pthread_create()
126 * * flag combinations that look like fork()
127 * * flags we can implement within QEMU itself
128 * * flags we can't support and will return an error for
130 /* For thread creation, all these flags must be present; for
131 * fork, none must be present.
133 #define CLONE_THREAD_FLAGS \
134 (CLONE_VM | CLONE_FS | CLONE_FILES | \
135 CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
137 /* These flags are ignored:
138 * CLONE_DETACHED is now ignored by the kernel;
139 * CLONE_IO is just an optimisation hint to the I/O scheduler
141 #define CLONE_IGNORED_FLAGS \
142 (CLONE_DETACHED | CLONE_IO)
144 /* Flags for fork which we can implement within QEMU itself */
145 #define CLONE_OPTIONAL_FORK_FLAGS \
146 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
147 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
149 /* Flags for thread creation which we can implement within QEMU itself */
150 #define CLONE_OPTIONAL_THREAD_FLAGS \
151 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
152 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
154 #define CLONE_INVALID_FORK_FLAGS \
155 (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
157 #define CLONE_INVALID_THREAD_FLAGS \
158 (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS | \
159 CLONE_IGNORED_FLAGS))
161 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
162 * have almost all been allocated. We cannot support any of
163 * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
164 * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
165 * The checks against the invalid thread masks above will catch these.
166 * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
169 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
170 * once. This exercises the codepaths for restart.
172 //#define DEBUG_ERESTARTSYS
174 //#include <linux/msdos_fs.h>
175 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
176 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
186 #define _syscall0(type,name) \
187 static type name (void) \
189 return syscall(__NR_##name); \
192 #define _syscall1(type,name,type1,arg1) \
193 static type name (type1 arg1) \
195 return syscall(__NR_##name, arg1); \
198 #define _syscall2(type,name,type1,arg1,type2,arg2) \
199 static type name (type1 arg1,type2 arg2) \
201 return syscall(__NR_##name, arg1, arg2); \
204 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
205 static type name (type1 arg1,type2 arg2,type3 arg3) \
207 return syscall(__NR_##name, arg1, arg2, arg3); \
210 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
211 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
213 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
216 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
218 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
220 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
224 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
225 type5,arg5,type6,arg6) \
226 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
229 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
233 #define __NR_sys_uname __NR_uname
234 #define __NR_sys_getcwd1 __NR_getcwd
235 #define __NR_sys_getdents __NR_getdents
236 #define __NR_sys_getdents64 __NR_getdents64
237 #define __NR_sys_getpriority __NR_getpriority
238 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
239 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
240 #define __NR_sys_syslog __NR_syslog
241 #define __NR_sys_futex __NR_futex
242 #define __NR_sys_inotify_init __NR_inotify_init
243 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
244 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
245 #define __NR_sys_statx __NR_statx
247 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
248 #define __NR__llseek __NR_lseek
251 /* Newer kernel ports have llseek() instead of _llseek() */
252 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
253 #define TARGET_NR__llseek TARGET_NR_llseek
256 #define __NR_sys_gettid __NR_gettid
257 _syscall0(int, sys_gettid)
259 /* For the 64-bit guest on 32-bit host case we must emulate
260 * getdents using getdents64, because otherwise the host
261 * might hand us back more dirent records than we can fit
262 * into the guest buffer after structure format conversion.
263 * Otherwise we emulate getdents with getdents if the host has it.
265 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
266 #define EMULATE_GETDENTS_WITH_GETDENTS
269 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
270 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
272 #if (defined(TARGET_NR_getdents) && \
273 !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
274 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
275 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
277 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
278 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
279 loff_t *, res, uint, wh);
281 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
282 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
284 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
285 #ifdef __NR_exit_group
286 _syscall1(int,exit_group,int,error_code)
288 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
289 _syscall1(int,set_tid_address,int *,tidptr)
291 #if defined(TARGET_NR_futex) && defined(__NR_futex)
292 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
293 const struct timespec *,timeout,int *,uaddr2,int,val3)
295 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
296 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
297 unsigned long *, user_mask_ptr);
298 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
299 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
300 unsigned long *, user_mask_ptr);
301 #define __NR_sys_getcpu __NR_getcpu
302 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
303 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
305 _syscall2(int, capget, struct __user_cap_header_struct *, header,
306 struct __user_cap_data_struct *, data);
307 _syscall2(int, capset, struct __user_cap_header_struct *, header,
308 struct __user_cap_data_struct *, data);
309 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
310 _syscall2(int, ioprio_get, int, which, int, who)
312 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
313 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
315 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
316 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
319 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
320 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
321 unsigned long, idx1, unsigned long, idx2)
325 * It is assumed that struct statx is architecture independent.
327 #if defined(TARGET_NR_statx) && defined(__NR_statx)
328 _syscall5(int, sys_statx, int, dirfd, const char *, pathname, int, flags,
329 unsigned int, mask, struct target_statx *, statxbuf)
332 static bitmask_transtbl fcntl_flags_tbl[] = {
333 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
334 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, },
335 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, },
336 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, },
337 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, },
338 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, },
339 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, },
340 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, },
341 { TARGET_O_SYNC, TARGET_O_DSYNC, O_SYNC, O_DSYNC, },
342 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, },
343 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, },
344 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
345 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, },
346 #if defined(O_DIRECT)
347 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, },
349 #if defined(O_NOATIME)
350 { TARGET_O_NOATIME, TARGET_O_NOATIME, O_NOATIME, O_NOATIME },
352 #if defined(O_CLOEXEC)
353 { TARGET_O_CLOEXEC, TARGET_O_CLOEXEC, O_CLOEXEC, O_CLOEXEC },
356 { TARGET_O_PATH, TARGET_O_PATH, O_PATH, O_PATH },
358 #if defined(O_TMPFILE)
359 { TARGET_O_TMPFILE, TARGET_O_TMPFILE, O_TMPFILE, O_TMPFILE },
361 /* Don't terminate the list prematurely on 64-bit host+guest. */
362 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
363 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
368 static int sys_getcwd1(char *buf, size_t size)
370 if (getcwd(buf, size) == NULL) {
371 /* getcwd() sets errno */
374 return strlen(buf)+1;
377 #ifdef TARGET_NR_utimensat
378 #if defined(__NR_utimensat)
379 #define __NR_sys_utimensat __NR_utimensat
380 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
381 const struct timespec *,tsp,int,flags)
383 static int sys_utimensat(int dirfd, const char *pathname,
384 const struct timespec times[2], int flags)
390 #endif /* TARGET_NR_utimensat */
392 #ifdef TARGET_NR_renameat2
393 #if defined(__NR_renameat2)
394 #define __NR_sys_renameat2 __NR_renameat2
395 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
396 const char *, new, unsigned int, flags)
398 static int sys_renameat2(int oldfd, const char *old,
399 int newfd, const char *new, int flags)
402 return renameat(oldfd, old, newfd, new);
408 #endif /* TARGET_NR_renameat2 */
410 #ifdef CONFIG_INOTIFY
411 #include <sys/inotify.h>
413 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
414 static int sys_inotify_init(void)
416 return (inotify_init());
419 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
420 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
422 return (inotify_add_watch(fd, pathname, mask));
425 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
426 static int sys_inotify_rm_watch(int fd, int32_t wd)
428 return (inotify_rm_watch(fd, wd));
431 #ifdef CONFIG_INOTIFY1
432 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
433 static int sys_inotify_init1(int flags)
435 return (inotify_init1(flags));
440 /* Userspace can usually survive runtime without inotify */
441 #undef TARGET_NR_inotify_init
442 #undef TARGET_NR_inotify_init1
443 #undef TARGET_NR_inotify_add_watch
444 #undef TARGET_NR_inotify_rm_watch
445 #endif /* CONFIG_INOTIFY */
447 #if defined(TARGET_NR_prlimit64)
448 #ifndef __NR_prlimit64
449 # define __NR_prlimit64 -1
451 #define __NR_sys_prlimit64 __NR_prlimit64
452 /* The glibc rlimit structure may not be that used by the underlying syscall */
453 struct host_rlimit64 {
457 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
458 const struct host_rlimit64 *, new_limit,
459 struct host_rlimit64 *, old_limit)
463 #if defined(TARGET_NR_timer_create)
464 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
465 static timer_t g_posix_timers[32] = { 0, } ;
467 static inline int next_free_host_timer(void)
470 /* FIXME: Does finding the next free slot require a lock? */
471 for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
472 if (g_posix_timers[k] == 0) {
473 g_posix_timers[k] = (timer_t) 1;
481 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
483 static inline int regpairs_aligned(void *cpu_env, int num)
485 return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
487 #elif defined(TARGET_MIPS) && (TARGET_ABI_BITS == 32)
488 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
489 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
490 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
491 * of registers which translates to the same as ARM/MIPS, because we start with
493 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
494 #elif defined(TARGET_SH4)
495 /* SH4 doesn't align register pairs, except for p{read,write}64 */
496 static inline int regpairs_aligned(void *cpu_env, int num)
499 case TARGET_NR_pread64:
500 case TARGET_NR_pwrite64:
507 #elif defined(TARGET_XTENSA)
508 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
510 static inline int regpairs_aligned(void *cpu_env, int num) { return 0; }
513 #define ERRNO_TABLE_SIZE 1200
515 /* target_to_host_errno_table[] is initialized from
516 * host_to_target_errno_table[] in syscall_init(). */
517 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
521 * This list is the union of errno values overridden in asm-<arch>/errno.h
522 * minus the errnos that are not actually generic to all archs.
524 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
525 [EAGAIN] = TARGET_EAGAIN,
526 [EIDRM] = TARGET_EIDRM,
527 [ECHRNG] = TARGET_ECHRNG,
528 [EL2NSYNC] = TARGET_EL2NSYNC,
529 [EL3HLT] = TARGET_EL3HLT,
530 [EL3RST] = TARGET_EL3RST,
531 [ELNRNG] = TARGET_ELNRNG,
532 [EUNATCH] = TARGET_EUNATCH,
533 [ENOCSI] = TARGET_ENOCSI,
534 [EL2HLT] = TARGET_EL2HLT,
535 [EDEADLK] = TARGET_EDEADLK,
536 [ENOLCK] = TARGET_ENOLCK,
537 [EBADE] = TARGET_EBADE,
538 [EBADR] = TARGET_EBADR,
539 [EXFULL] = TARGET_EXFULL,
540 [ENOANO] = TARGET_ENOANO,
541 [EBADRQC] = TARGET_EBADRQC,
542 [EBADSLT] = TARGET_EBADSLT,
543 [EBFONT] = TARGET_EBFONT,
544 [ENOSTR] = TARGET_ENOSTR,
545 [ENODATA] = TARGET_ENODATA,
546 [ETIME] = TARGET_ETIME,
547 [ENOSR] = TARGET_ENOSR,
548 [ENONET] = TARGET_ENONET,
549 [ENOPKG] = TARGET_ENOPKG,
550 [EREMOTE] = TARGET_EREMOTE,
551 [ENOLINK] = TARGET_ENOLINK,
552 [EADV] = TARGET_EADV,
553 [ESRMNT] = TARGET_ESRMNT,
554 [ECOMM] = TARGET_ECOMM,
555 [EPROTO] = TARGET_EPROTO,
556 [EDOTDOT] = TARGET_EDOTDOT,
557 [EMULTIHOP] = TARGET_EMULTIHOP,
558 [EBADMSG] = TARGET_EBADMSG,
559 [ENAMETOOLONG] = TARGET_ENAMETOOLONG,
560 [EOVERFLOW] = TARGET_EOVERFLOW,
561 [ENOTUNIQ] = TARGET_ENOTUNIQ,
562 [EBADFD] = TARGET_EBADFD,
563 [EREMCHG] = TARGET_EREMCHG,
564 [ELIBACC] = TARGET_ELIBACC,
565 [ELIBBAD] = TARGET_ELIBBAD,
566 [ELIBSCN] = TARGET_ELIBSCN,
567 [ELIBMAX] = TARGET_ELIBMAX,
568 [ELIBEXEC] = TARGET_ELIBEXEC,
569 [EILSEQ] = TARGET_EILSEQ,
570 [ENOSYS] = TARGET_ENOSYS,
571 [ELOOP] = TARGET_ELOOP,
572 [ERESTART] = TARGET_ERESTART,
573 [ESTRPIPE] = TARGET_ESTRPIPE,
574 [ENOTEMPTY] = TARGET_ENOTEMPTY,
575 [EUSERS] = TARGET_EUSERS,
576 [ENOTSOCK] = TARGET_ENOTSOCK,
577 [EDESTADDRREQ] = TARGET_EDESTADDRREQ,
578 [EMSGSIZE] = TARGET_EMSGSIZE,
579 [EPROTOTYPE] = TARGET_EPROTOTYPE,
580 [ENOPROTOOPT] = TARGET_ENOPROTOOPT,
581 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT,
582 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT,
583 [EOPNOTSUPP] = TARGET_EOPNOTSUPP,
584 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT,
585 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT,
586 [EADDRINUSE] = TARGET_EADDRINUSE,
587 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL,
588 [ENETDOWN] = TARGET_ENETDOWN,
589 [ENETUNREACH] = TARGET_ENETUNREACH,
590 [ENETRESET] = TARGET_ENETRESET,
591 [ECONNABORTED] = TARGET_ECONNABORTED,
592 [ECONNRESET] = TARGET_ECONNRESET,
593 [ENOBUFS] = TARGET_ENOBUFS,
594 [EISCONN] = TARGET_EISCONN,
595 [ENOTCONN] = TARGET_ENOTCONN,
596 [EUCLEAN] = TARGET_EUCLEAN,
597 [ENOTNAM] = TARGET_ENOTNAM,
598 [ENAVAIL] = TARGET_ENAVAIL,
599 [EISNAM] = TARGET_EISNAM,
600 [EREMOTEIO] = TARGET_EREMOTEIO,
601 [EDQUOT] = TARGET_EDQUOT,
602 [ESHUTDOWN] = TARGET_ESHUTDOWN,
603 [ETOOMANYREFS] = TARGET_ETOOMANYREFS,
604 [ETIMEDOUT] = TARGET_ETIMEDOUT,
605 [ECONNREFUSED] = TARGET_ECONNREFUSED,
606 [EHOSTDOWN] = TARGET_EHOSTDOWN,
607 [EHOSTUNREACH] = TARGET_EHOSTUNREACH,
608 [EALREADY] = TARGET_EALREADY,
609 [EINPROGRESS] = TARGET_EINPROGRESS,
610 [ESTALE] = TARGET_ESTALE,
611 [ECANCELED] = TARGET_ECANCELED,
612 [ENOMEDIUM] = TARGET_ENOMEDIUM,
613 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE,
615 [ENOKEY] = TARGET_ENOKEY,
618 [EKEYEXPIRED] = TARGET_EKEYEXPIRED,
621 [EKEYREVOKED] = TARGET_EKEYREVOKED,
624 [EKEYREJECTED] = TARGET_EKEYREJECTED,
627 [EOWNERDEAD] = TARGET_EOWNERDEAD,
629 #ifdef ENOTRECOVERABLE
630 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE,
633 [ENOMSG] = TARGET_ENOMSG,
636 [ERFKILL] = TARGET_ERFKILL,
639 [EHWPOISON] = TARGET_EHWPOISON,
643 static inline int host_to_target_errno(int err)
645 if (err >= 0 && err < ERRNO_TABLE_SIZE &&
646 host_to_target_errno_table[err]) {
647 return host_to_target_errno_table[err];
652 static inline int target_to_host_errno(int err)
654 if (err >= 0 && err < ERRNO_TABLE_SIZE &&
655 target_to_host_errno_table[err]) {
656 return target_to_host_errno_table[err];
661 static inline abi_long get_errno(abi_long ret)
664 return -host_to_target_errno(errno);
669 const char *target_strerror(int err)
671 if (err == TARGET_ERESTARTSYS) {
672 return "To be restarted";
674 if (err == TARGET_QEMU_ESIGRETURN) {
675 return "Successful exit from sigreturn";
678 if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
681 return strerror(target_to_host_errno(err));
684 #define safe_syscall0(type, name) \
685 static type safe_##name(void) \
687 return safe_syscall(__NR_##name); \
690 #define safe_syscall1(type, name, type1, arg1) \
691 static type safe_##name(type1 arg1) \
693 return safe_syscall(__NR_##name, arg1); \
696 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
697 static type safe_##name(type1 arg1, type2 arg2) \
699 return safe_syscall(__NR_##name, arg1, arg2); \
702 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
703 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
705 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
708 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
710 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
712 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
715 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
716 type4, arg4, type5, arg5) \
717 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
720 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
723 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
724 type4, arg4, type5, arg5, type6, arg6) \
725 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
726 type5 arg5, type6 arg6) \
728 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
731 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
732 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
733 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
734 int, flags, mode_t, mode)
735 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
736 struct rusage *, rusage)
737 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
738 int, options, struct rusage *, rusage)
739 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
740 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
741 fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
742 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
743 struct timespec *, tsp, const sigset_t *, sigmask,
745 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
746 int, maxevents, int, timeout, const sigset_t *, sigmask,
748 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
749 const struct timespec *,timeout,int *,uaddr2,int,val3)
750 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
751 safe_syscall2(int, kill, pid_t, pid, int, sig)
752 safe_syscall2(int, tkill, int, tid, int, sig)
753 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
754 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
755 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
756 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
757 unsigned long, pos_l, unsigned long, pos_h)
758 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
759 unsigned long, pos_l, unsigned long, pos_h)
760 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
762 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
763 int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
764 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
765 int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
766 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
767 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
768 safe_syscall2(int, flock, int, fd, int, operation)
769 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
770 const struct timespec *, uts, size_t, sigsetsize)
771 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
773 safe_syscall2(int, nanosleep, const struct timespec *, req,
774 struct timespec *, rem)
775 #ifdef TARGET_NR_clock_nanosleep
776 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
777 const struct timespec *, req, struct timespec *, rem)
780 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
781 void *, ptr, long, fifth)
784 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
788 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
789 long, msgtype, int, flags)
791 #ifdef __NR_semtimedop
792 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
793 unsigned, nsops, const struct timespec *, timeout)
795 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
796 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
797 size_t, len, unsigned, prio, const struct timespec *, timeout)
798 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
799 size_t, len, unsigned *, prio, const struct timespec *, timeout)
801 /* We do ioctl like this rather than via safe_syscall3 to preserve the
802 * "third argument might be integer or pointer or not present" behaviour of
805 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
806 /* Similarly for fcntl. Note that callers must always:
807 * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
808 * use the flock64 struct rather than unsuffixed flock
809 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
812 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
814 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
817 static inline int host_to_target_sock_type(int host_type)
821 switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
823 target_type = TARGET_SOCK_DGRAM;
826 target_type = TARGET_SOCK_STREAM;
829 target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
833 #if defined(SOCK_CLOEXEC)
834 if (host_type & SOCK_CLOEXEC) {
835 target_type |= TARGET_SOCK_CLOEXEC;
839 #if defined(SOCK_NONBLOCK)
840 if (host_type & SOCK_NONBLOCK) {
841 target_type |= TARGET_SOCK_NONBLOCK;
848 static abi_ulong target_brk;
849 static abi_ulong target_original_brk;
850 static abi_ulong brk_page;
852 void target_set_brk(abi_ulong new_brk)
854 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
855 brk_page = HOST_PAGE_ALIGN(target_brk);
858 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
859 #define DEBUGF_BRK(message, args...)
861 /* do_brk() must return target values and target errnos. */
862 abi_long do_brk(abi_ulong new_brk)
864 abi_long mapped_addr;
865 abi_ulong new_alloc_size;
867 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
870 DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
873 if (new_brk < target_original_brk) {
874 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
879 /* If the new brk is less than the highest page reserved to the
880 * target heap allocation, set it and we're almost done... */
881 if (new_brk <= brk_page) {
882 /* Heap contents are initialized to zero, as for anonymous
884 if (new_brk > target_brk) {
885 memset(g2h(target_brk), 0, new_brk - target_brk);
887 target_brk = new_brk;
888 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
892 /* We need to allocate more memory after the brk... Note that
893 * we don't use MAP_FIXED because that will map over the top of
894 * any existing mapping (like the one with the host libc or qemu
895 * itself); instead we treat "mapped but at wrong address" as
896 * a failure and unmap again.
898 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
899 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
900 PROT_READ|PROT_WRITE,
901 MAP_ANON|MAP_PRIVATE, 0, 0));
903 if (mapped_addr == brk_page) {
904 /* Heap contents are initialized to zero, as for anonymous
905 * mapped pages. Technically the new pages are already
906 * initialized to zero since they *are* anonymous mapped
907 * pages, however we have to take care with the contents that
908 * come from the remaining part of the previous page: it may
909 * contains garbage data due to a previous heap usage (grown
911 memset(g2h(target_brk), 0, brk_page - target_brk);
913 target_brk = new_brk;
914 brk_page = HOST_PAGE_ALIGN(target_brk);
915 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
918 } else if (mapped_addr != -1) {
919 /* Mapped but at wrong address, meaning there wasn't actually
920 * enough space for this brk.
922 target_munmap(mapped_addr, new_alloc_size);
924 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
927 DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
930 #if defined(TARGET_ALPHA)
931 /* We (partially) emulate OSF/1 on Alpha, which requires we
932 return a proper errno, not an unchanged brk value. */
933 return -TARGET_ENOMEM;
935 /* For everything else, return the previous break. */
939 static inline abi_long copy_from_user_fdset(fd_set *fds,
940 abi_ulong target_fds_addr,
944 abi_ulong b, *target_fds;
946 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
947 if (!(target_fds = lock_user(VERIFY_READ,
949 sizeof(abi_ulong) * nw,
951 return -TARGET_EFAULT;
955 for (i = 0; i < nw; i++) {
956 /* grab the abi_ulong */
957 __get_user(b, &target_fds[i]);
958 for (j = 0; j < TARGET_ABI_BITS; j++) {
959 /* check the bit inside the abi_ulong */
966 unlock_user(target_fds, target_fds_addr, 0);
971 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
972 abi_ulong target_fds_addr,
975 if (target_fds_addr) {
976 if (copy_from_user_fdset(fds, target_fds_addr, n))
977 return -TARGET_EFAULT;
985 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
991 abi_ulong *target_fds;
993 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
994 if (!(target_fds = lock_user(VERIFY_WRITE,
996 sizeof(abi_ulong) * nw,
998 return -TARGET_EFAULT;
1001 for (i = 0; i < nw; i++) {
1003 for (j = 0; j < TARGET_ABI_BITS; j++) {
1004 v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
1007 __put_user(v, &target_fds[i]);
1010 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
1015 #if defined(__alpha__)
1016 #define HOST_HZ 1024
1021 static inline abi_long host_to_target_clock_t(long ticks)
1023 #if HOST_HZ == TARGET_HZ
1026 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
1030 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
1031 const struct rusage *rusage)
1033 struct target_rusage *target_rusage;
1035 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
1036 return -TARGET_EFAULT;
1037 target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
1038 target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
1039 target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1040 target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1041 target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1042 target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1043 target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1044 target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1045 target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1046 target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1047 target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1048 target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1049 target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1050 target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1051 target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1052 target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1053 target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1054 target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1055 unlock_user_struct(target_rusage, target_addr, 1);
1060 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1062 abi_ulong target_rlim_swap;
1065 target_rlim_swap = tswapal(target_rlim);
1066 if (target_rlim_swap == TARGET_RLIM_INFINITY)
1067 return RLIM_INFINITY;
1069 result = target_rlim_swap;
1070 if (target_rlim_swap != (rlim_t)result)
1071 return RLIM_INFINITY;
1076 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1078 abi_ulong target_rlim_swap;
1081 if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1082 target_rlim_swap = TARGET_RLIM_INFINITY;
1084 target_rlim_swap = rlim;
1085 result = tswapal(target_rlim_swap);
1090 static inline int target_to_host_resource(int code)
1093 case TARGET_RLIMIT_AS:
1095 case TARGET_RLIMIT_CORE:
1097 case TARGET_RLIMIT_CPU:
1099 case TARGET_RLIMIT_DATA:
1101 case TARGET_RLIMIT_FSIZE:
1102 return RLIMIT_FSIZE;
1103 case TARGET_RLIMIT_LOCKS:
1104 return RLIMIT_LOCKS;
1105 case TARGET_RLIMIT_MEMLOCK:
1106 return RLIMIT_MEMLOCK;
1107 case TARGET_RLIMIT_MSGQUEUE:
1108 return RLIMIT_MSGQUEUE;
1109 case TARGET_RLIMIT_NICE:
1111 case TARGET_RLIMIT_NOFILE:
1112 return RLIMIT_NOFILE;
1113 case TARGET_RLIMIT_NPROC:
1114 return RLIMIT_NPROC;
1115 case TARGET_RLIMIT_RSS:
1117 case TARGET_RLIMIT_RTPRIO:
1118 return RLIMIT_RTPRIO;
1119 case TARGET_RLIMIT_SIGPENDING:
1120 return RLIMIT_SIGPENDING;
1121 case TARGET_RLIMIT_STACK:
1122 return RLIMIT_STACK;
1128 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1129 abi_ulong target_tv_addr)
1131 struct target_timeval *target_tv;
1133 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1134 return -TARGET_EFAULT;
1137 __get_user(tv->tv_sec, &target_tv->tv_sec);
1138 __get_user(tv->tv_usec, &target_tv->tv_usec);
1140 unlock_user_struct(target_tv, target_tv_addr, 0);
1145 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1146 const struct timeval *tv)
1148 struct target_timeval *target_tv;
1150 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1151 return -TARGET_EFAULT;
1154 __put_user(tv->tv_sec, &target_tv->tv_sec);
1155 __put_user(tv->tv_usec, &target_tv->tv_usec);
1157 unlock_user_struct(target_tv, target_tv_addr, 1);
1162 static inline abi_long copy_to_user_timeval64(abi_ulong target_tv_addr,
1163 const struct timeval *tv)
1165 struct target__kernel_sock_timeval *target_tv;
1167 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1168 return -TARGET_EFAULT;
1171 __put_user(tv->tv_sec, &target_tv->tv_sec);
1172 __put_user(tv->tv_usec, &target_tv->tv_usec);
1174 unlock_user_struct(target_tv, target_tv_addr, 1);
1179 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
1180 abi_ulong target_addr)
1182 struct target_timespec *target_ts;
1184 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1185 return -TARGET_EFAULT;
1187 __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1188 __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1189 unlock_user_struct(target_ts, target_addr, 0);
1193 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
1194 struct timespec *host_ts)
1196 struct target_timespec *target_ts;
1198 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1199 return -TARGET_EFAULT;
1201 __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1202 __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1203 unlock_user_struct(target_ts, target_addr, 1);
1207 static inline abi_long host_to_target_timespec64(abi_ulong target_addr,
1208 struct timespec *host_ts)
1210 struct target__kernel_timespec *target_ts;
1212 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1213 return -TARGET_EFAULT;
1215 __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1216 __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1217 unlock_user_struct(target_ts, target_addr, 1);
1221 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1222 abi_ulong target_tz_addr)
1224 struct target_timezone *target_tz;
1226 if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1227 return -TARGET_EFAULT;
1230 __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1231 __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1233 unlock_user_struct(target_tz, target_tz_addr, 0);
1238 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1241 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1242 abi_ulong target_mq_attr_addr)
1244 struct target_mq_attr *target_mq_attr;
1246 if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1247 target_mq_attr_addr, 1))
1248 return -TARGET_EFAULT;
1250 __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1251 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1252 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1253 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1255 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1260 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1261 const struct mq_attr *attr)
1263 struct target_mq_attr *target_mq_attr;
1265 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1266 target_mq_attr_addr, 0))
1267 return -TARGET_EFAULT;
1269 __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1270 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1271 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1272 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1274 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1280 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1281 /* do_select() must return target values and target errnos. */
1282 static abi_long do_select(int n,
1283 abi_ulong rfd_addr, abi_ulong wfd_addr,
1284 abi_ulong efd_addr, abi_ulong target_tv_addr)
1286 fd_set rfds, wfds, efds;
1287 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1289 struct timespec ts, *ts_ptr;
1292 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1296 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1300 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1305 if (target_tv_addr) {
1306 if (copy_from_user_timeval(&tv, target_tv_addr))
1307 return -TARGET_EFAULT;
1308 ts.tv_sec = tv.tv_sec;
1309 ts.tv_nsec = tv.tv_usec * 1000;
1315 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1318 if (!is_error(ret)) {
1319 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1320 return -TARGET_EFAULT;
1321 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1322 return -TARGET_EFAULT;
1323 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1324 return -TARGET_EFAULT;
1326 if (target_tv_addr) {
1327 tv.tv_sec = ts.tv_sec;
1328 tv.tv_usec = ts.tv_nsec / 1000;
1329 if (copy_to_user_timeval(target_tv_addr, &tv)) {
1330 return -TARGET_EFAULT;
1338 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1339 static abi_long do_old_select(abi_ulong arg1)
1341 struct target_sel_arg_struct *sel;
1342 abi_ulong inp, outp, exp, tvp;
1345 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1346 return -TARGET_EFAULT;
1349 nsel = tswapal(sel->n);
1350 inp = tswapal(sel->inp);
1351 outp = tswapal(sel->outp);
1352 exp = tswapal(sel->exp);
1353 tvp = tswapal(sel->tvp);
1355 unlock_user_struct(sel, arg1, 0);
1357 return do_select(nsel, inp, outp, exp, tvp);
1362 static abi_long do_pipe2(int host_pipe[], int flags)
1365 return pipe2(host_pipe, flags);
1371 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1372 int flags, int is_pipe2)
1376 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1379 return get_errno(ret);
1381 /* Several targets have special calling conventions for the original
1382 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1384 #if defined(TARGET_ALPHA)
1385 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1386 return host_pipe[0];
1387 #elif defined(TARGET_MIPS)
1388 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1389 return host_pipe[0];
1390 #elif defined(TARGET_SH4)
1391 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1392 return host_pipe[0];
1393 #elif defined(TARGET_SPARC)
1394 ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1395 return host_pipe[0];
1399 if (put_user_s32(host_pipe[0], pipedes)
1400 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1401 return -TARGET_EFAULT;
1402 return get_errno(ret);
1405 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1406 abi_ulong target_addr,
1409 struct target_ip_mreqn *target_smreqn;
1411 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1413 return -TARGET_EFAULT;
1414 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1415 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1416 if (len == sizeof(struct target_ip_mreqn))
1417 mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1418 unlock_user(target_smreqn, target_addr, 0);
1423 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1424 abi_ulong target_addr,
1427 const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1428 sa_family_t sa_family;
1429 struct target_sockaddr *target_saddr;
1431 if (fd_trans_target_to_host_addr(fd)) {
1432 return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1435 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1437 return -TARGET_EFAULT;
1439 sa_family = tswap16(target_saddr->sa_family);
1441 /* Oops. The caller might send a incomplete sun_path; sun_path
1442 * must be terminated by \0 (see the manual page), but
1443 * unfortunately it is quite common to specify sockaddr_un
1444 * length as "strlen(x->sun_path)" while it should be
1445 * "strlen(...) + 1". We'll fix that here if needed.
1446 * Linux kernel has a similar feature.
1449 if (sa_family == AF_UNIX) {
1450 if (len < unix_maxlen && len > 0) {
1451 char *cp = (char*)target_saddr;
1453 if ( cp[len-1] && !cp[len] )
1456 if (len > unix_maxlen)
1460 memcpy(addr, target_saddr, len);
1461 addr->sa_family = sa_family;
1462 if (sa_family == AF_NETLINK) {
1463 struct sockaddr_nl *nladdr;
1465 nladdr = (struct sockaddr_nl *)addr;
1466 nladdr->nl_pid = tswap32(nladdr->nl_pid);
1467 nladdr->nl_groups = tswap32(nladdr->nl_groups);
1468 } else if (sa_family == AF_PACKET) {
1469 struct target_sockaddr_ll *lladdr;
1471 lladdr = (struct target_sockaddr_ll *)addr;
1472 lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1473 lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1475 unlock_user(target_saddr, target_addr, 0);
1480 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1481 struct sockaddr *addr,
1484 struct target_sockaddr *target_saddr;
1491 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1493 return -TARGET_EFAULT;
1494 memcpy(target_saddr, addr, len);
1495 if (len >= offsetof(struct target_sockaddr, sa_family) +
1496 sizeof(target_saddr->sa_family)) {
1497 target_saddr->sa_family = tswap16(addr->sa_family);
1499 if (addr->sa_family == AF_NETLINK &&
1500 len >= sizeof(struct target_sockaddr_nl)) {
1501 struct target_sockaddr_nl *target_nl =
1502 (struct target_sockaddr_nl *)target_saddr;
1503 target_nl->nl_pid = tswap32(target_nl->nl_pid);
1504 target_nl->nl_groups = tswap32(target_nl->nl_groups);
1505 } else if (addr->sa_family == AF_PACKET) {
1506 struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1507 target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1508 target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1509 } else if (addr->sa_family == AF_INET6 &&
1510 len >= sizeof(struct target_sockaddr_in6)) {
1511 struct target_sockaddr_in6 *target_in6 =
1512 (struct target_sockaddr_in6 *)target_saddr;
1513 target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1515 unlock_user(target_saddr, target_addr, len);
1520 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1521 struct target_msghdr *target_msgh)
1523 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1524 abi_long msg_controllen;
1525 abi_ulong target_cmsg_addr;
1526 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1527 socklen_t space = 0;
1529 msg_controllen = tswapal(target_msgh->msg_controllen);
1530 if (msg_controllen < sizeof (struct target_cmsghdr))
1532 target_cmsg_addr = tswapal(target_msgh->msg_control);
1533 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1534 target_cmsg_start = target_cmsg;
1536 return -TARGET_EFAULT;
1538 while (cmsg && target_cmsg) {
1539 void *data = CMSG_DATA(cmsg);
1540 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1542 int len = tswapal(target_cmsg->cmsg_len)
1543 - sizeof(struct target_cmsghdr);
1545 space += CMSG_SPACE(len);
1546 if (space > msgh->msg_controllen) {
1547 space -= CMSG_SPACE(len);
1548 /* This is a QEMU bug, since we allocated the payload
1549 * area ourselves (unlike overflow in host-to-target
1550 * conversion, which is just the guest giving us a buffer
1551 * that's too small). It can't happen for the payload types
1552 * we currently support; if it becomes an issue in future
1553 * we would need to improve our allocation strategy to
1554 * something more intelligent than "twice the size of the
1555 * target buffer we're reading from".
1557 gemu_log("Host cmsg overflow\n");
1561 if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1562 cmsg->cmsg_level = SOL_SOCKET;
1564 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1566 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1567 cmsg->cmsg_len = CMSG_LEN(len);
1569 if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1570 int *fd = (int *)data;
1571 int *target_fd = (int *)target_data;
1572 int i, numfds = len / sizeof(int);
1574 for (i = 0; i < numfds; i++) {
1575 __get_user(fd[i], target_fd + i);
1577 } else if (cmsg->cmsg_level == SOL_SOCKET
1578 && cmsg->cmsg_type == SCM_CREDENTIALS) {
1579 struct ucred *cred = (struct ucred *)data;
1580 struct target_ucred *target_cred =
1581 (struct target_ucred *)target_data;
1583 __get_user(cred->pid, &target_cred->pid);
1584 __get_user(cred->uid, &target_cred->uid);
1585 __get_user(cred->gid, &target_cred->gid);
1587 gemu_log("Unsupported ancillary data: %d/%d\n",
1588 cmsg->cmsg_level, cmsg->cmsg_type);
1589 memcpy(data, target_data, len);
1592 cmsg = CMSG_NXTHDR(msgh, cmsg);
1593 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1596 unlock_user(target_cmsg, target_cmsg_addr, 0);
1598 msgh->msg_controllen = space;
1602 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1603 struct msghdr *msgh)
1605 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1606 abi_long msg_controllen;
1607 abi_ulong target_cmsg_addr;
1608 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1609 socklen_t space = 0;
1611 msg_controllen = tswapal(target_msgh->msg_controllen);
1612 if (msg_controllen < sizeof (struct target_cmsghdr))
1614 target_cmsg_addr = tswapal(target_msgh->msg_control);
1615 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1616 target_cmsg_start = target_cmsg;
1618 return -TARGET_EFAULT;
1620 while (cmsg && target_cmsg) {
1621 void *data = CMSG_DATA(cmsg);
1622 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1624 int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1625 int tgt_len, tgt_space;
1627 /* We never copy a half-header but may copy half-data;
1628 * this is Linux's behaviour in put_cmsg(). Note that
1629 * truncation here is a guest problem (which we report
1630 * to the guest via the CTRUNC bit), unlike truncation
1631 * in target_to_host_cmsg, which is a QEMU bug.
1633 if (msg_controllen < sizeof(struct target_cmsghdr)) {
1634 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1638 if (cmsg->cmsg_level == SOL_SOCKET) {
1639 target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1641 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1643 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1645 /* Payload types which need a different size of payload on
1646 * the target must adjust tgt_len here.
1649 switch (cmsg->cmsg_level) {
1651 switch (cmsg->cmsg_type) {
1653 tgt_len = sizeof(struct target_timeval);
1663 if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1664 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1665 tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1668 /* We must now copy-and-convert len bytes of payload
1669 * into tgt_len bytes of destination space. Bear in mind
1670 * that in both source and destination we may be dealing
1671 * with a truncated value!
1673 switch (cmsg->cmsg_level) {
1675 switch (cmsg->cmsg_type) {
1678 int *fd = (int *)data;
1679 int *target_fd = (int *)target_data;
1680 int i, numfds = tgt_len / sizeof(int);
1682 for (i = 0; i < numfds; i++) {
1683 __put_user(fd[i], target_fd + i);
1689 struct timeval *tv = (struct timeval *)data;
1690 struct target_timeval *target_tv =
1691 (struct target_timeval *)target_data;
1693 if (len != sizeof(struct timeval) ||
1694 tgt_len != sizeof(struct target_timeval)) {
1698 /* copy struct timeval to target */
1699 __put_user(tv->tv_sec, &target_tv->tv_sec);
1700 __put_user(tv->tv_usec, &target_tv->tv_usec);
1703 case SCM_CREDENTIALS:
1705 struct ucred *cred = (struct ucred *)data;
1706 struct target_ucred *target_cred =
1707 (struct target_ucred *)target_data;
1709 __put_user(cred->pid, &target_cred->pid);
1710 __put_user(cred->uid, &target_cred->uid);
1711 __put_user(cred->gid, &target_cred->gid);
1720 switch (cmsg->cmsg_type) {
1723 uint32_t *v = (uint32_t *)data;
1724 uint32_t *t_int = (uint32_t *)target_data;
1726 if (len != sizeof(uint32_t) ||
1727 tgt_len != sizeof(uint32_t)) {
1730 __put_user(*v, t_int);
1736 struct sock_extended_err ee;
1737 struct sockaddr_in offender;
1739 struct errhdr_t *errh = (struct errhdr_t *)data;
1740 struct errhdr_t *target_errh =
1741 (struct errhdr_t *)target_data;
1743 if (len != sizeof(struct errhdr_t) ||
1744 tgt_len != sizeof(struct errhdr_t)) {
1747 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1748 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1749 __put_user(errh->ee.ee_type, &target_errh->ee.ee_type);
1750 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1751 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1752 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1753 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1754 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1755 (void *) &errh->offender, sizeof(errh->offender));
1764 switch (cmsg->cmsg_type) {
1767 uint32_t *v = (uint32_t *)data;
1768 uint32_t *t_int = (uint32_t *)target_data;
1770 if (len != sizeof(uint32_t) ||
1771 tgt_len != sizeof(uint32_t)) {
1774 __put_user(*v, t_int);
1780 struct sock_extended_err ee;
1781 struct sockaddr_in6 offender;
1783 struct errhdr6_t *errh = (struct errhdr6_t *)data;
1784 struct errhdr6_t *target_errh =
1785 (struct errhdr6_t *)target_data;
1787 if (len != sizeof(struct errhdr6_t) ||
1788 tgt_len != sizeof(struct errhdr6_t)) {
1791 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1792 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1793 __put_user(errh->ee.ee_type, &target_errh->ee.ee_type);
1794 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1795 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1796 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1797 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1798 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1799 (void *) &errh->offender, sizeof(errh->offender));
1809 gemu_log("Unsupported ancillary data: %d/%d\n",
1810 cmsg->cmsg_level, cmsg->cmsg_type);
1811 memcpy(target_data, data, MIN(len, tgt_len));
1812 if (tgt_len > len) {
1813 memset(target_data + len, 0, tgt_len - len);
1817 target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
1818 tgt_space = TARGET_CMSG_SPACE(tgt_len);
1819 if (msg_controllen < tgt_space) {
1820 tgt_space = msg_controllen;
1822 msg_controllen -= tgt_space;
1824 cmsg = CMSG_NXTHDR(msgh, cmsg);
1825 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1828 unlock_user(target_cmsg, target_cmsg_addr, space);
1830 target_msgh->msg_controllen = tswapal(space);
1834 /* do_setsockopt() Must return target values and target errnos. */
1835 static abi_long do_setsockopt(int sockfd, int level, int optname,
1836 abi_ulong optval_addr, socklen_t optlen)
1840 struct ip_mreqn *ip_mreq;
1841 struct ip_mreq_source *ip_mreq_source;
1845 /* TCP options all take an 'int' value. */
1846 if (optlen < sizeof(uint32_t))
1847 return -TARGET_EINVAL;
1849 if (get_user_u32(val, optval_addr))
1850 return -TARGET_EFAULT;
1851 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1858 case IP_ROUTER_ALERT:
1862 case IP_MTU_DISCOVER:
1869 case IP_MULTICAST_TTL:
1870 case IP_MULTICAST_LOOP:
1872 if (optlen >= sizeof(uint32_t)) {
1873 if (get_user_u32(val, optval_addr))
1874 return -TARGET_EFAULT;
1875 } else if (optlen >= 1) {
1876 if (get_user_u8(val, optval_addr))
1877 return -TARGET_EFAULT;
1879 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1881 case IP_ADD_MEMBERSHIP:
1882 case IP_DROP_MEMBERSHIP:
1883 if (optlen < sizeof (struct target_ip_mreq) ||
1884 optlen > sizeof (struct target_ip_mreqn))
1885 return -TARGET_EINVAL;
1887 ip_mreq = (struct ip_mreqn *) alloca(optlen);
1888 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
1889 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
1892 case IP_BLOCK_SOURCE:
1893 case IP_UNBLOCK_SOURCE:
1894 case IP_ADD_SOURCE_MEMBERSHIP:
1895 case IP_DROP_SOURCE_MEMBERSHIP:
1896 if (optlen != sizeof (struct target_ip_mreq_source))
1897 return -TARGET_EINVAL;
1899 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
1900 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
1901 unlock_user (ip_mreq_source, optval_addr, 0);
1910 case IPV6_MTU_DISCOVER:
1913 case IPV6_RECVPKTINFO:
1914 case IPV6_UNICAST_HOPS:
1915 case IPV6_MULTICAST_HOPS:
1916 case IPV6_MULTICAST_LOOP:
1918 case IPV6_RECVHOPLIMIT:
1919 case IPV6_2292HOPLIMIT:
1922 case IPV6_2292PKTINFO:
1923 case IPV6_RECVTCLASS:
1924 case IPV6_RECVRTHDR:
1925 case IPV6_2292RTHDR:
1926 case IPV6_RECVHOPOPTS:
1927 case IPV6_2292HOPOPTS:
1928 case IPV6_RECVDSTOPTS:
1929 case IPV6_2292DSTOPTS:
1931 #ifdef IPV6_RECVPATHMTU
1932 case IPV6_RECVPATHMTU:
1934 #ifdef IPV6_TRANSPARENT
1935 case IPV6_TRANSPARENT:
1937 #ifdef IPV6_FREEBIND
1940 #ifdef IPV6_RECVORIGDSTADDR
1941 case IPV6_RECVORIGDSTADDR:
1944 if (optlen < sizeof(uint32_t)) {
1945 return -TARGET_EINVAL;
1947 if (get_user_u32(val, optval_addr)) {
1948 return -TARGET_EFAULT;
1950 ret = get_errno(setsockopt(sockfd, level, optname,
1951 &val, sizeof(val)));
1955 struct in6_pktinfo pki;
1957 if (optlen < sizeof(pki)) {
1958 return -TARGET_EINVAL;
1961 if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
1962 return -TARGET_EFAULT;
1965 pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
1967 ret = get_errno(setsockopt(sockfd, level, optname,
1968 &pki, sizeof(pki)));
1971 case IPV6_ADD_MEMBERSHIP:
1972 case IPV6_DROP_MEMBERSHIP:
1974 struct ipv6_mreq ipv6mreq;
1976 if (optlen < sizeof(ipv6mreq)) {
1977 return -TARGET_EINVAL;
1980 if (copy_from_user(&ipv6mreq, optval_addr, sizeof(ipv6mreq))) {
1981 return -TARGET_EFAULT;
1984 ipv6mreq.ipv6mr_interface = tswap32(ipv6mreq.ipv6mr_interface);
1986 ret = get_errno(setsockopt(sockfd, level, optname,
1987 &ipv6mreq, sizeof(ipv6mreq)));
1998 struct icmp6_filter icmp6f;
2000 if (optlen > sizeof(icmp6f)) {
2001 optlen = sizeof(icmp6f);
2004 if (copy_from_user(&icmp6f, optval_addr, optlen)) {
2005 return -TARGET_EFAULT;
2008 for (val = 0; val < 8; val++) {
2009 icmp6f.data[val] = tswap32(icmp6f.data[val]);
2012 ret = get_errno(setsockopt(sockfd, level, optname,
2024 /* those take an u32 value */
2025 if (optlen < sizeof(uint32_t)) {
2026 return -TARGET_EINVAL;
2029 if (get_user_u32(val, optval_addr)) {
2030 return -TARGET_EFAULT;
2032 ret = get_errno(setsockopt(sockfd, level, optname,
2033 &val, sizeof(val)));
2040 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2045 char *alg_key = g_malloc(optlen);
2048 return -TARGET_ENOMEM;
2050 if (copy_from_user(alg_key, optval_addr, optlen)) {
2052 return -TARGET_EFAULT;
2054 ret = get_errno(setsockopt(sockfd, level, optname,
2059 case ALG_SET_AEAD_AUTHSIZE:
2061 ret = get_errno(setsockopt(sockfd, level, optname,
2070 case TARGET_SOL_SOCKET:
2072 case TARGET_SO_RCVTIMEO:
2076 optname = SO_RCVTIMEO;
2079 if (optlen != sizeof(struct target_timeval)) {
2080 return -TARGET_EINVAL;
2083 if (copy_from_user_timeval(&tv, optval_addr)) {
2084 return -TARGET_EFAULT;
2087 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2091 case TARGET_SO_SNDTIMEO:
2092 optname = SO_SNDTIMEO;
2094 case TARGET_SO_ATTACH_FILTER:
2096 struct target_sock_fprog *tfprog;
2097 struct target_sock_filter *tfilter;
2098 struct sock_fprog fprog;
2099 struct sock_filter *filter;
2102 if (optlen != sizeof(*tfprog)) {
2103 return -TARGET_EINVAL;
2105 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2106 return -TARGET_EFAULT;
2108 if (!lock_user_struct(VERIFY_READ, tfilter,
2109 tswapal(tfprog->filter), 0)) {
2110 unlock_user_struct(tfprog, optval_addr, 1);
2111 return -TARGET_EFAULT;
2114 fprog.len = tswap16(tfprog->len);
2115 filter = g_try_new(struct sock_filter, fprog.len);
2116 if (filter == NULL) {
2117 unlock_user_struct(tfilter, tfprog->filter, 1);
2118 unlock_user_struct(tfprog, optval_addr, 1);
2119 return -TARGET_ENOMEM;
2121 for (i = 0; i < fprog.len; i++) {
2122 filter[i].code = tswap16(tfilter[i].code);
2123 filter[i].jt = tfilter[i].jt;
2124 filter[i].jf = tfilter[i].jf;
2125 filter[i].k = tswap32(tfilter[i].k);
2127 fprog.filter = filter;
2129 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2130 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2133 unlock_user_struct(tfilter, tfprog->filter, 1);
2134 unlock_user_struct(tfprog, optval_addr, 1);
2137 case TARGET_SO_BINDTODEVICE:
2139 char *dev_ifname, *addr_ifname;
2141 if (optlen > IFNAMSIZ - 1) {
2142 optlen = IFNAMSIZ - 1;
2144 dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2146 return -TARGET_EFAULT;
2148 optname = SO_BINDTODEVICE;
2149 addr_ifname = alloca(IFNAMSIZ);
2150 memcpy(addr_ifname, dev_ifname, optlen);
2151 addr_ifname[optlen] = 0;
2152 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2153 addr_ifname, optlen));
2154 unlock_user (dev_ifname, optval_addr, 0);
2157 case TARGET_SO_LINGER:
2160 struct target_linger *tlg;
2162 if (optlen != sizeof(struct target_linger)) {
2163 return -TARGET_EINVAL;
2165 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2166 return -TARGET_EFAULT;
2168 __get_user(lg.l_onoff, &tlg->l_onoff);
2169 __get_user(lg.l_linger, &tlg->l_linger);
2170 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2172 unlock_user_struct(tlg, optval_addr, 0);
2175 /* Options with 'int' argument. */
2176 case TARGET_SO_DEBUG:
2179 case TARGET_SO_REUSEADDR:
2180 optname = SO_REUSEADDR;
2183 case TARGET_SO_REUSEPORT:
2184 optname = SO_REUSEPORT;
2187 case TARGET_SO_TYPE:
2190 case TARGET_SO_ERROR:
2193 case TARGET_SO_DONTROUTE:
2194 optname = SO_DONTROUTE;
2196 case TARGET_SO_BROADCAST:
2197 optname = SO_BROADCAST;
2199 case TARGET_SO_SNDBUF:
2200 optname = SO_SNDBUF;
2202 case TARGET_SO_SNDBUFFORCE:
2203 optname = SO_SNDBUFFORCE;
2205 case TARGET_SO_RCVBUF:
2206 optname = SO_RCVBUF;
2208 case TARGET_SO_RCVBUFFORCE:
2209 optname = SO_RCVBUFFORCE;
2211 case TARGET_SO_KEEPALIVE:
2212 optname = SO_KEEPALIVE;
2214 case TARGET_SO_OOBINLINE:
2215 optname = SO_OOBINLINE;
2217 case TARGET_SO_NO_CHECK:
2218 optname = SO_NO_CHECK;
2220 case TARGET_SO_PRIORITY:
2221 optname = SO_PRIORITY;
2224 case TARGET_SO_BSDCOMPAT:
2225 optname = SO_BSDCOMPAT;
2228 case TARGET_SO_PASSCRED:
2229 optname = SO_PASSCRED;
2231 case TARGET_SO_PASSSEC:
2232 optname = SO_PASSSEC;
2234 case TARGET_SO_TIMESTAMP:
2235 optname = SO_TIMESTAMP;
2237 case TARGET_SO_RCVLOWAT:
2238 optname = SO_RCVLOWAT;
2243 if (optlen < sizeof(uint32_t))
2244 return -TARGET_EINVAL;
2246 if (get_user_u32(val, optval_addr))
2247 return -TARGET_EFAULT;
2248 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2252 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname);
2253 ret = -TARGET_ENOPROTOOPT;
2258 /* do_getsockopt() Must return target values and target errnos. */
2259 static abi_long do_getsockopt(int sockfd, int level, int optname,
2260 abi_ulong optval_addr, abi_ulong optlen)
2267 case TARGET_SOL_SOCKET:
2270 /* These don't just return a single integer */
2271 case TARGET_SO_RCVTIMEO:
2272 case TARGET_SO_SNDTIMEO:
2273 case TARGET_SO_PEERNAME:
2275 case TARGET_SO_PEERCRED: {
2278 struct target_ucred *tcr;
2280 if (get_user_u32(len, optlen)) {
2281 return -TARGET_EFAULT;
2284 return -TARGET_EINVAL;
2288 ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2296 if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2297 return -TARGET_EFAULT;
2299 __put_user(cr.pid, &tcr->pid);
2300 __put_user(cr.uid, &tcr->uid);
2301 __put_user(cr.gid, &tcr->gid);
2302 unlock_user_struct(tcr, optval_addr, 1);
2303 if (put_user_u32(len, optlen)) {
2304 return -TARGET_EFAULT;
2308 case TARGET_SO_LINGER:
2312 struct target_linger *tlg;
2314 if (get_user_u32(len, optlen)) {
2315 return -TARGET_EFAULT;
2318 return -TARGET_EINVAL;
2322 ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2330 if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2331 return -TARGET_EFAULT;
2333 __put_user(lg.l_onoff, &tlg->l_onoff);
2334 __put_user(lg.l_linger, &tlg->l_linger);
2335 unlock_user_struct(tlg, optval_addr, 1);
2336 if (put_user_u32(len, optlen)) {
2337 return -TARGET_EFAULT;
2341 /* Options with 'int' argument. */
2342 case TARGET_SO_DEBUG:
2345 case TARGET_SO_REUSEADDR:
2346 optname = SO_REUSEADDR;
2349 case TARGET_SO_REUSEPORT:
2350 optname = SO_REUSEPORT;
2353 case TARGET_SO_TYPE:
2356 case TARGET_SO_ERROR:
2359 case TARGET_SO_DONTROUTE:
2360 optname = SO_DONTROUTE;
2362 case TARGET_SO_BROADCAST:
2363 optname = SO_BROADCAST;
2365 case TARGET_SO_SNDBUF:
2366 optname = SO_SNDBUF;
2368 case TARGET_SO_RCVBUF:
2369 optname = SO_RCVBUF;
2371 case TARGET_SO_KEEPALIVE:
2372 optname = SO_KEEPALIVE;
2374 case TARGET_SO_OOBINLINE:
2375 optname = SO_OOBINLINE;
2377 case TARGET_SO_NO_CHECK:
2378 optname = SO_NO_CHECK;
2380 case TARGET_SO_PRIORITY:
2381 optname = SO_PRIORITY;
2384 case TARGET_SO_BSDCOMPAT:
2385 optname = SO_BSDCOMPAT;
2388 case TARGET_SO_PASSCRED:
2389 optname = SO_PASSCRED;
2391 case TARGET_SO_TIMESTAMP:
2392 optname = SO_TIMESTAMP;
2394 case TARGET_SO_RCVLOWAT:
2395 optname = SO_RCVLOWAT;
2397 case TARGET_SO_ACCEPTCONN:
2398 optname = SO_ACCEPTCONN;
2405 /* TCP options all take an 'int' value. */
2407 if (get_user_u32(len, optlen))
2408 return -TARGET_EFAULT;
2410 return -TARGET_EINVAL;
2412 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2415 if (optname == SO_TYPE) {
2416 val = host_to_target_sock_type(val);
2421 if (put_user_u32(val, optval_addr))
2422 return -TARGET_EFAULT;
2424 if (put_user_u8(val, optval_addr))
2425 return -TARGET_EFAULT;
2427 if (put_user_u32(len, optlen))
2428 return -TARGET_EFAULT;
2435 case IP_ROUTER_ALERT:
2439 case IP_MTU_DISCOVER:
2445 case IP_MULTICAST_TTL:
2446 case IP_MULTICAST_LOOP:
2447 if (get_user_u32(len, optlen))
2448 return -TARGET_EFAULT;
2450 return -TARGET_EINVAL;
2452 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2455 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2457 if (put_user_u32(len, optlen)
2458 || put_user_u8(val, optval_addr))
2459 return -TARGET_EFAULT;
2461 if (len > sizeof(int))
2463 if (put_user_u32(len, optlen)
2464 || put_user_u32(val, optval_addr))
2465 return -TARGET_EFAULT;
2469 ret = -TARGET_ENOPROTOOPT;
2475 case IPV6_MTU_DISCOVER:
2478 case IPV6_RECVPKTINFO:
2479 case IPV6_UNICAST_HOPS:
2480 case IPV6_MULTICAST_HOPS:
2481 case IPV6_MULTICAST_LOOP:
2483 case IPV6_RECVHOPLIMIT:
2484 case IPV6_2292HOPLIMIT:
2487 case IPV6_2292PKTINFO:
2488 case IPV6_RECVTCLASS:
2489 case IPV6_RECVRTHDR:
2490 case IPV6_2292RTHDR:
2491 case IPV6_RECVHOPOPTS:
2492 case IPV6_2292HOPOPTS:
2493 case IPV6_RECVDSTOPTS:
2494 case IPV6_2292DSTOPTS:
2496 #ifdef IPV6_RECVPATHMTU
2497 case IPV6_RECVPATHMTU:
2499 #ifdef IPV6_TRANSPARENT
2500 case IPV6_TRANSPARENT:
2502 #ifdef IPV6_FREEBIND
2505 #ifdef IPV6_RECVORIGDSTADDR
2506 case IPV6_RECVORIGDSTADDR:
2508 if (get_user_u32(len, optlen))
2509 return -TARGET_EFAULT;
2511 return -TARGET_EINVAL;
2513 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2516 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2518 if (put_user_u32(len, optlen)
2519 || put_user_u8(val, optval_addr))
2520 return -TARGET_EFAULT;
2522 if (len > sizeof(int))
2524 if (put_user_u32(len, optlen)
2525 || put_user_u32(val, optval_addr))
2526 return -TARGET_EFAULT;
2530 ret = -TARGET_ENOPROTOOPT;
2536 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
2538 ret = -TARGET_EOPNOTSUPP;
2544 /* Convert target low/high pair representing file offset into the host
2545 * low/high pair. This function doesn't handle offsets bigger than 64 bits
2546 * as the kernel doesn't handle them either.
2548 static void target_to_host_low_high(abi_ulong tlow,
2550 unsigned long *hlow,
2551 unsigned long *hhigh)
2553 uint64_t off = tlow |
2554 ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
2555 TARGET_LONG_BITS / 2;
2558 *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
2561 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
2562 abi_ulong count, int copy)
2564 struct target_iovec *target_vec;
2566 abi_ulong total_len, max_len;
2569 bool bad_address = false;
2575 if (count > IOV_MAX) {
2580 vec = g_try_new0(struct iovec, count);
2586 target_vec = lock_user(VERIFY_READ, target_addr,
2587 count * sizeof(struct target_iovec), 1);
2588 if (target_vec == NULL) {
2593 /* ??? If host page size > target page size, this will result in a
2594 value larger than what we can actually support. */
2595 max_len = 0x7fffffff & TARGET_PAGE_MASK;
2598 for (i = 0; i < count; i++) {
2599 abi_ulong base = tswapal(target_vec[i].iov_base);
2600 abi_long len = tswapal(target_vec[i].iov_len);
2605 } else if (len == 0) {
2606 /* Zero length pointer is ignored. */
2607 vec[i].iov_base = 0;
2609 vec[i].iov_base = lock_user(type, base, len, copy);
2610 /* If the first buffer pointer is bad, this is a fault. But
2611 * subsequent bad buffers will result in a partial write; this
2612 * is realized by filling the vector with null pointers and
2614 if (!vec[i].iov_base) {
2625 if (len > max_len - total_len) {
2626 len = max_len - total_len;
2629 vec[i].iov_len = len;
2633 unlock_user(target_vec, target_addr, 0);
2638 if (tswapal(target_vec[i].iov_len) > 0) {
2639 unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
2642 unlock_user(target_vec, target_addr, 0);
2649 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
2650 abi_ulong count, int copy)
2652 struct target_iovec *target_vec;
2655 target_vec = lock_user(VERIFY_READ, target_addr,
2656 count * sizeof(struct target_iovec), 1);
2658 for (i = 0; i < count; i++) {
2659 abi_ulong base = tswapal(target_vec[i].iov_base);
2660 abi_long len = tswapal(target_vec[i].iov_len);
2664 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
2666 unlock_user(target_vec, target_addr, 0);
2672 static inline int target_to_host_sock_type(int *type)
2675 int target_type = *type;
2677 switch (target_type & TARGET_SOCK_TYPE_MASK) {
2678 case TARGET_SOCK_DGRAM:
2679 host_type = SOCK_DGRAM;
2681 case TARGET_SOCK_STREAM:
2682 host_type = SOCK_STREAM;
2685 host_type = target_type & TARGET_SOCK_TYPE_MASK;
2688 if (target_type & TARGET_SOCK_CLOEXEC) {
2689 #if defined(SOCK_CLOEXEC)
2690 host_type |= SOCK_CLOEXEC;
2692 return -TARGET_EINVAL;
2695 if (target_type & TARGET_SOCK_NONBLOCK) {
2696 #if defined(SOCK_NONBLOCK)
2697 host_type |= SOCK_NONBLOCK;
2698 #elif !defined(O_NONBLOCK)
2699 return -TARGET_EINVAL;
2706 /* Try to emulate socket type flags after socket creation. */
2707 static int sock_flags_fixup(int fd, int target_type)
2709 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
2710 if (target_type & TARGET_SOCK_NONBLOCK) {
2711 int flags = fcntl(fd, F_GETFL);
2712 if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
2714 return -TARGET_EINVAL;
2721 /* do_socket() Must return target values and target errnos. */
2722 static abi_long do_socket(int domain, int type, int protocol)
2724 int target_type = type;
2727 ret = target_to_host_sock_type(&type);
2732 if (domain == PF_NETLINK && !(
2733 #ifdef CONFIG_RTNETLINK
2734 protocol == NETLINK_ROUTE ||
2736 protocol == NETLINK_KOBJECT_UEVENT ||
2737 protocol == NETLINK_AUDIT)) {
2738 return -EPFNOSUPPORT;
2741 if (domain == AF_PACKET ||
2742 (domain == AF_INET && type == SOCK_PACKET)) {
2743 protocol = tswap16(protocol);
2746 ret = get_errno(socket(domain, type, protocol));
2748 ret = sock_flags_fixup(ret, target_type);
2749 if (type == SOCK_PACKET) {
2750 /* Manage an obsolete case :
2751 * if socket type is SOCK_PACKET, bind by name
2753 fd_trans_register(ret, &target_packet_trans);
2754 } else if (domain == PF_NETLINK) {
2756 #ifdef CONFIG_RTNETLINK
2758 fd_trans_register(ret, &target_netlink_route_trans);
2761 case NETLINK_KOBJECT_UEVENT:
2762 /* nothing to do: messages are strings */
2765 fd_trans_register(ret, &target_netlink_audit_trans);
2768 g_assert_not_reached();
2775 /* do_bind() Must return target values and target errnos. */
2776 static abi_long do_bind(int sockfd, abi_ulong target_addr,
2782 if ((int)addrlen < 0) {
2783 return -TARGET_EINVAL;
2786 addr = alloca(addrlen+1);
2788 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
2792 return get_errno(bind(sockfd, addr, addrlen));
2795 /* do_connect() Must return target values and target errnos. */
2796 static abi_long do_connect(int sockfd, abi_ulong target_addr,
2802 if ((int)addrlen < 0) {
2803 return -TARGET_EINVAL;
2806 addr = alloca(addrlen+1);
2808 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
2812 return get_errno(safe_connect(sockfd, addr, addrlen));
2815 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
2816 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
2817 int flags, int send)
2823 abi_ulong target_vec;
2825 if (msgp->msg_name) {
2826 msg.msg_namelen = tswap32(msgp->msg_namelen);
2827 msg.msg_name = alloca(msg.msg_namelen+1);
2828 ret = target_to_host_sockaddr(fd, msg.msg_name,
2829 tswapal(msgp->msg_name),
2831 if (ret == -TARGET_EFAULT) {
2832 /* For connected sockets msg_name and msg_namelen must
2833 * be ignored, so returning EFAULT immediately is wrong.
2834 * Instead, pass a bad msg_name to the host kernel, and
2835 * let it decide whether to return EFAULT or not.
2837 msg.msg_name = (void *)-1;
2842 msg.msg_name = NULL;
2843 msg.msg_namelen = 0;
2845 msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
2846 msg.msg_control = alloca(msg.msg_controllen);
2847 memset(msg.msg_control, 0, msg.msg_controllen);
2849 msg.msg_flags = tswap32(msgp->msg_flags);
2851 count = tswapal(msgp->msg_iovlen);
2852 target_vec = tswapal(msgp->msg_iov);
2854 if (count > IOV_MAX) {
2855 /* sendrcvmsg returns a different errno for this condition than
2856 * readv/writev, so we must catch it here before lock_iovec() does.
2858 ret = -TARGET_EMSGSIZE;
2862 vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
2863 target_vec, count, send);
2865 ret = -host_to_target_errno(errno);
2868 msg.msg_iovlen = count;
2872 if (fd_trans_target_to_host_data(fd)) {
2875 host_msg = g_malloc(msg.msg_iov->iov_len);
2876 memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
2877 ret = fd_trans_target_to_host_data(fd)(host_msg,
2878 msg.msg_iov->iov_len);
2880 msg.msg_iov->iov_base = host_msg;
2881 ret = get_errno(safe_sendmsg(fd, &msg, flags));
2885 ret = target_to_host_cmsg(&msg, msgp);
2887 ret = get_errno(safe_sendmsg(fd, &msg, flags));
2891 ret = get_errno(safe_recvmsg(fd, &msg, flags));
2892 if (!is_error(ret)) {
2894 if (fd_trans_host_to_target_data(fd)) {
2895 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
2896 MIN(msg.msg_iov->iov_len, len));
2898 ret = host_to_target_cmsg(msgp, &msg);
2900 if (!is_error(ret)) {
2901 msgp->msg_namelen = tswap32(msg.msg_namelen);
2902 msgp->msg_flags = tswap32(msg.msg_flags);
2903 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
2904 ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
2905 msg.msg_name, msg.msg_namelen);
2917 unlock_iovec(vec, target_vec, count, !send);
2922 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
2923 int flags, int send)
2926 struct target_msghdr *msgp;
2928 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
2932 return -TARGET_EFAULT;
2934 ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
2935 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
2939 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
2940 * so it might not have this *mmsg-specific flag either.
2942 #ifndef MSG_WAITFORONE
2943 #define MSG_WAITFORONE 0x10000
2946 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
2947 unsigned int vlen, unsigned int flags,
2950 struct target_mmsghdr *mmsgp;
2954 if (vlen > UIO_MAXIOV) {
2958 mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
2960 return -TARGET_EFAULT;
2963 for (i = 0; i < vlen; i++) {
2964 ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
2965 if (is_error(ret)) {
2968 mmsgp[i].msg_len = tswap32(ret);
2969 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
2970 if (flags & MSG_WAITFORONE) {
2971 flags |= MSG_DONTWAIT;
2975 unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
2977 /* Return number of datagrams sent if we sent any at all;
2978 * otherwise return the error.
2986 /* do_accept4() Must return target values and target errnos. */
2987 static abi_long do_accept4(int fd, abi_ulong target_addr,
2988 abi_ulong target_addrlen_addr, int flags)
2990 socklen_t addrlen, ret_addrlen;
2995 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
2997 if (target_addr == 0) {
2998 return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3001 /* linux returns EINVAL if addrlen pointer is invalid */
3002 if (get_user_u32(addrlen, target_addrlen_addr))
3003 return -TARGET_EINVAL;
3005 if ((int)addrlen < 0) {
3006 return -TARGET_EINVAL;
3009 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3010 return -TARGET_EINVAL;
3012 addr = alloca(addrlen);
3014 ret_addrlen = addrlen;
3015 ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
3016 if (!is_error(ret)) {
3017 host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3018 if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3019 ret = -TARGET_EFAULT;
3025 /* do_getpeername() Must return target values and target errnos. */
3026 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3027 abi_ulong target_addrlen_addr)
3029 socklen_t addrlen, ret_addrlen;
3033 if (get_user_u32(addrlen, target_addrlen_addr))
3034 return -TARGET_EFAULT;
3036 if ((int)addrlen < 0) {
3037 return -TARGET_EINVAL;
3040 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3041 return -TARGET_EFAULT;
3043 addr = alloca(addrlen);
3045 ret_addrlen = addrlen;
3046 ret = get_errno(getpeername(fd, addr, &ret_addrlen));
3047 if (!is_error(ret)) {
3048 host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3049 if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3050 ret = -TARGET_EFAULT;
3056 /* do_getsockname() Must return target values and target errnos. */
3057 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3058 abi_ulong target_addrlen_addr)
3060 socklen_t addrlen, ret_addrlen;
3064 if (get_user_u32(addrlen, target_addrlen_addr))
3065 return -TARGET_EFAULT;
3067 if ((int)addrlen < 0) {
3068 return -TARGET_EINVAL;
3071 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3072 return -TARGET_EFAULT;
3074 addr = alloca(addrlen);
3076 ret_addrlen = addrlen;
3077 ret = get_errno(getsockname(fd, addr, &ret_addrlen));
3078 if (!is_error(ret)) {
3079 host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3080 if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3081 ret = -TARGET_EFAULT;
3087 /* do_socketpair() Must return target values and target errnos. */
3088 static abi_long do_socketpair(int domain, int type, int protocol,
3089 abi_ulong target_tab_addr)
3094 target_to_host_sock_type(&type);
3096 ret = get_errno(socketpair(domain, type, protocol, tab));
3097 if (!is_error(ret)) {
3098 if (put_user_s32(tab[0], target_tab_addr)
3099 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3100 ret = -TARGET_EFAULT;
3105 /* do_sendto() Must return target values and target errnos. */
3106 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3107 abi_ulong target_addr, socklen_t addrlen)
3111 void *copy_msg = NULL;
3114 if ((int)addrlen < 0) {
3115 return -TARGET_EINVAL;
3118 host_msg = lock_user(VERIFY_READ, msg, len, 1);
3120 return -TARGET_EFAULT;
3121 if (fd_trans_target_to_host_data(fd)) {
3122 copy_msg = host_msg;
3123 host_msg = g_malloc(len);
3124 memcpy(host_msg, copy_msg, len);
3125 ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3131 addr = alloca(addrlen+1);
3132 ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3136 ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3138 ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3143 host_msg = copy_msg;
3145 unlock_user(host_msg, msg, 0);
3149 /* do_recvfrom() Must return target values and target errnos. */
3150 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3151 abi_ulong target_addr,
3152 abi_ulong target_addrlen)
3154 socklen_t addrlen, ret_addrlen;
3159 host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3161 return -TARGET_EFAULT;
3163 if (get_user_u32(addrlen, target_addrlen)) {
3164 ret = -TARGET_EFAULT;
3167 if ((int)addrlen < 0) {
3168 ret = -TARGET_EINVAL;
3171 addr = alloca(addrlen);
3172 ret_addrlen = addrlen;
3173 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3174 addr, &ret_addrlen));
3176 addr = NULL; /* To keep compiler quiet. */
3177 addrlen = 0; /* To keep compiler quiet. */
3178 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3180 if (!is_error(ret)) {
3181 if (fd_trans_host_to_target_data(fd)) {
3183 trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3184 if (is_error(trans)) {
3190 host_to_target_sockaddr(target_addr, addr,
3191 MIN(addrlen, ret_addrlen));
3192 if (put_user_u32(ret_addrlen, target_addrlen)) {
3193 ret = -TARGET_EFAULT;
3197 unlock_user(host_msg, msg, len);
3200 unlock_user(host_msg, msg, 0);
3205 #ifdef TARGET_NR_socketcall
3206 /* do_socketcall() must return target values and target errnos. */
3207 static abi_long do_socketcall(int num, abi_ulong vptr)
3209 static const unsigned nargs[] = { /* number of arguments per operation */
3210 [TARGET_SYS_SOCKET] = 3, /* domain, type, protocol */
3211 [TARGET_SYS_BIND] = 3, /* fd, addr, addrlen */
3212 [TARGET_SYS_CONNECT] = 3, /* fd, addr, addrlen */
3213 [TARGET_SYS_LISTEN] = 2, /* fd, backlog */
3214 [TARGET_SYS_ACCEPT] = 3, /* fd, addr, addrlen */
3215 [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3216 [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3217 [TARGET_SYS_SOCKETPAIR] = 4, /* domain, type, protocol, tab */
3218 [TARGET_SYS_SEND] = 4, /* fd, msg, len, flags */
3219 [TARGET_SYS_RECV] = 4, /* fd, msg, len, flags */
3220 [TARGET_SYS_SENDTO] = 6, /* fd, msg, len, flags, addr, addrlen */
3221 [TARGET_SYS_RECVFROM] = 6, /* fd, msg, len, flags, addr, addrlen */
3222 [TARGET_SYS_SHUTDOWN] = 2, /* fd, how */
3223 [TARGET_SYS_SETSOCKOPT] = 5, /* fd, level, optname, optval, optlen */
3224 [TARGET_SYS_GETSOCKOPT] = 5, /* fd, level, optname, optval, optlen */
3225 [TARGET_SYS_SENDMSG] = 3, /* fd, msg, flags */
3226 [TARGET_SYS_RECVMSG] = 3, /* fd, msg, flags */
3227 [TARGET_SYS_ACCEPT4] = 4, /* fd, addr, addrlen, flags */
3228 [TARGET_SYS_RECVMMSG] = 4, /* fd, msgvec, vlen, flags */
3229 [TARGET_SYS_SENDMMSG] = 4, /* fd, msgvec, vlen, flags */
3231 abi_long a[6]; /* max 6 args */
3234 /* check the range of the first argument num */
3235 /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3236 if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3237 return -TARGET_EINVAL;
3239 /* ensure we have space for args */
3240 if (nargs[num] > ARRAY_SIZE(a)) {
3241 return -TARGET_EINVAL;
3243 /* collect the arguments in a[] according to nargs[] */
3244 for (i = 0; i < nargs[num]; ++i) {
3245 if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3246 return -TARGET_EFAULT;
3249 /* now when we have the args, invoke the appropriate underlying function */
3251 case TARGET_SYS_SOCKET: /* domain, type, protocol */
3252 return do_socket(a[0], a[1], a[2]);
3253 case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3254 return do_bind(a[0], a[1], a[2]);
3255 case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3256 return do_connect(a[0], a[1], a[2]);
3257 case TARGET_SYS_LISTEN: /* sockfd, backlog */
3258 return get_errno(listen(a[0], a[1]));
3259 case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3260 return do_accept4(a[0], a[1], a[2], 0);
3261 case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3262 return do_getsockname(a[0], a[1], a[2]);
3263 case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3264 return do_getpeername(a[0], a[1], a[2]);
3265 case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3266 return do_socketpair(a[0], a[1], a[2], a[3]);
3267 case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3268 return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3269 case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3270 return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3271 case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3272 return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3273 case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3274 return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3275 case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3276 return get_errno(shutdown(a[0], a[1]));
3277 case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3278 return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3279 case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3280 return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3281 case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3282 return do_sendrecvmsg(a[0], a[1], a[2], 1);
3283 case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3284 return do_sendrecvmsg(a[0], a[1], a[2], 0);
3285 case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3286 return do_accept4(a[0], a[1], a[2], a[3]);
3287 case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3288 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3289 case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3290 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3292 gemu_log("Unsupported socketcall: %d\n", num);
3293 return -TARGET_EINVAL;
3298 #define N_SHM_REGIONS 32
3300 static struct shm_region {
3304 } shm_regions[N_SHM_REGIONS];
3306 #ifndef TARGET_SEMID64_DS
3307 /* asm-generic version of this struct */
3308 struct target_semid64_ds
3310 struct target_ipc_perm sem_perm;
3311 abi_ulong sem_otime;
3312 #if TARGET_ABI_BITS == 32
3313 abi_ulong __unused1;
3315 abi_ulong sem_ctime;
3316 #if TARGET_ABI_BITS == 32
3317 abi_ulong __unused2;
3319 abi_ulong sem_nsems;
3320 abi_ulong __unused3;
3321 abi_ulong __unused4;
3325 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3326 abi_ulong target_addr)
3328 struct target_ipc_perm *target_ip;
3329 struct target_semid64_ds *target_sd;
3331 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3332 return -TARGET_EFAULT;
3333 target_ip = &(target_sd->sem_perm);
3334 host_ip->__key = tswap32(target_ip->__key);
3335 host_ip->uid = tswap32(target_ip->uid);
3336 host_ip->gid = tswap32(target_ip->gid);
3337 host_ip->cuid = tswap32(target_ip->cuid);
3338 host_ip->cgid = tswap32(target_ip->cgid);
3339 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3340 host_ip->mode = tswap32(target_ip->mode);
3342 host_ip->mode = tswap16(target_ip->mode);
3344 #if defined(TARGET_PPC)
3345 host_ip->__seq = tswap32(target_ip->__seq);
3347 host_ip->__seq = tswap16(target_ip->__seq);
3349 unlock_user_struct(target_sd, target_addr, 0);
3353 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3354 struct ipc_perm *host_ip)
3356 struct target_ipc_perm *target_ip;
3357 struct target_semid64_ds *target_sd;
3359 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3360 return -TARGET_EFAULT;
3361 target_ip = &(target_sd->sem_perm);
3362 target_ip->__key = tswap32(host_ip->__key);
3363 target_ip->uid = tswap32(host_ip->uid);
3364 target_ip->gid = tswap32(host_ip->gid);
3365 target_ip->cuid = tswap32(host_ip->cuid);
3366 target_ip->cgid = tswap32(host_ip->cgid);
3367 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3368 target_ip->mode = tswap32(host_ip->mode);
3370 target_ip->mode = tswap16(host_ip->mode);
3372 #if defined(TARGET_PPC)
3373 target_ip->__seq = tswap32(host_ip->__seq);
3375 target_ip->__seq = tswap16(host_ip->__seq);
3377 unlock_user_struct(target_sd, target_addr, 1);
3381 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3382 abi_ulong target_addr)
3384 struct target_semid64_ds *target_sd;
3386 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3387 return -TARGET_EFAULT;
3388 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3389 return -TARGET_EFAULT;
3390 host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3391 host_sd->sem_otime = tswapal(target_sd->sem_otime);
3392 host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3393 unlock_user_struct(target_sd, target_addr, 0);
3397 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3398 struct semid_ds *host_sd)
3400 struct target_semid64_ds *target_sd;
3402 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3403 return -TARGET_EFAULT;
3404 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3405 return -TARGET_EFAULT;
3406 target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3407 target_sd->sem_otime = tswapal(host_sd->sem_otime);
3408 target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3409 unlock_user_struct(target_sd, target_addr, 1);
3413 struct target_seminfo {
3426 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3427 struct seminfo *host_seminfo)
3429 struct target_seminfo *target_seminfo;
3430 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3431 return -TARGET_EFAULT;
3432 __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3433 __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3434 __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3435 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3436 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3437 __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3438 __put_user(host_seminfo->semume, &target_seminfo->semume);
3439 __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3440 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3441 __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3442 unlock_user_struct(target_seminfo, target_addr, 1);
3448 struct semid_ds *buf;
3449 unsigned short *array;
3450 struct seminfo *__buf;
3453 union target_semun {
3460 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3461 abi_ulong target_addr)
3464 unsigned short *array;
3466 struct semid_ds semid_ds;
3469 semun.buf = &semid_ds;
3471 ret = semctl(semid, 0, IPC_STAT, semun);
3473 return get_errno(ret);
3475 nsems = semid_ds.sem_nsems;
3477 *host_array = g_try_new(unsigned short, nsems);
3479 return -TARGET_ENOMEM;
3481 array = lock_user(VERIFY_READ, target_addr,
3482 nsems*sizeof(unsigned short), 1);
3484 g_free(*host_array);
3485 return -TARGET_EFAULT;
3488 for(i=0; i<nsems; i++) {
3489 __get_user((*host_array)[i], &array[i]);
3491 unlock_user(array, target_addr, 0);
3496 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3497 unsigned short **host_array)
3500 unsigned short *array;
3502 struct semid_ds semid_ds;
3505 semun.buf = &semid_ds;
3507 ret = semctl(semid, 0, IPC_STAT, semun);
3509 return get_errno(ret);
3511 nsems = semid_ds.sem_nsems;
3513 array = lock_user(VERIFY_WRITE, target_addr,
3514 nsems*sizeof(unsigned short), 0);
3516 return -TARGET_EFAULT;
3518 for(i=0; i<nsems; i++) {
3519 __put_user((*host_array)[i], &array[i]);
3521 g_free(*host_array);
3522 unlock_user(array, target_addr, 1);
3527 static inline abi_long do_semctl(int semid, int semnum, int cmd,
3528 abi_ulong target_arg)
3530 union target_semun target_su = { .buf = target_arg };
3532 struct semid_ds dsarg;
3533 unsigned short *array = NULL;
3534 struct seminfo seminfo;
3535 abi_long ret = -TARGET_EINVAL;
3542 /* In 64 bit cross-endian situations, we will erroneously pick up
3543 * the wrong half of the union for the "val" element. To rectify
3544 * this, the entire 8-byte structure is byteswapped, followed by
3545 * a swap of the 4 byte val field. In other cases, the data is
3546 * already in proper host byte order. */
3547 if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
3548 target_su.buf = tswapal(target_su.buf);
3549 arg.val = tswap32(target_su.val);
3551 arg.val = target_su.val;
3553 ret = get_errno(semctl(semid, semnum, cmd, arg));
3557 err = target_to_host_semarray(semid, &array, target_su.array);
3561 ret = get_errno(semctl(semid, semnum, cmd, arg));
3562 err = host_to_target_semarray(semid, target_su.array, &array);
3569 err = target_to_host_semid_ds(&dsarg, target_su.buf);
3573 ret = get_errno(semctl(semid, semnum, cmd, arg));
3574 err = host_to_target_semid_ds(target_su.buf, &dsarg);
3580 arg.__buf = &seminfo;
3581 ret = get_errno(semctl(semid, semnum, cmd, arg));
3582 err = host_to_target_seminfo(target_su.__buf, &seminfo);
3590 ret = get_errno(semctl(semid, semnum, cmd, NULL));
3597 struct target_sembuf {
3598 unsigned short sem_num;
3603 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
3604 abi_ulong target_addr,
3607 struct target_sembuf *target_sembuf;
3610 target_sembuf = lock_user(VERIFY_READ, target_addr,
3611 nsops*sizeof(struct target_sembuf), 1);
3613 return -TARGET_EFAULT;
3615 for(i=0; i<nsops; i++) {
3616 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
3617 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
3618 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
3621 unlock_user(target_sembuf, target_addr, 0);
3626 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
3628 struct sembuf sops[nsops];
3631 if (target_to_host_sembuf(sops, ptr, nsops))
3632 return -TARGET_EFAULT;
3634 ret = -TARGET_ENOSYS;
3635 #ifdef __NR_semtimedop
3636 ret = get_errno(safe_semtimedop(semid, sops, nsops, NULL));
3639 if (ret == -TARGET_ENOSYS) {
3640 ret = get_errno(safe_ipc(IPCOP_semtimedop, semid, nsops, 0, sops, 0));
3646 struct target_msqid_ds
3648 struct target_ipc_perm msg_perm;
3649 abi_ulong msg_stime;
3650 #if TARGET_ABI_BITS == 32
3651 abi_ulong __unused1;
3653 abi_ulong msg_rtime;
3654 #if TARGET_ABI_BITS == 32
3655 abi_ulong __unused2;
3657 abi_ulong msg_ctime;
3658 #if TARGET_ABI_BITS == 32
3659 abi_ulong __unused3;
3661 abi_ulong __msg_cbytes;
3663 abi_ulong msg_qbytes;
3664 abi_ulong msg_lspid;
3665 abi_ulong msg_lrpid;
3666 abi_ulong __unused4;
3667 abi_ulong __unused5;
3670 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
3671 abi_ulong target_addr)
3673 struct target_msqid_ds *target_md;
3675 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
3676 return -TARGET_EFAULT;
3677 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
3678 return -TARGET_EFAULT;
3679 host_md->msg_stime = tswapal(target_md->msg_stime);
3680 host_md->msg_rtime = tswapal(target_md->msg_rtime);
3681 host_md->msg_ctime = tswapal(target_md->msg_ctime);
3682 host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
3683 host_md->msg_qnum = tswapal(target_md->msg_qnum);
3684 host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
3685 host_md->msg_lspid = tswapal(target_md->msg_lspid);
3686 host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
3687 unlock_user_struct(target_md, target_addr, 0);
3691 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
3692 struct msqid_ds *host_md)
3694 struct target_msqid_ds *target_md;
3696 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
3697 return -TARGET_EFAULT;
3698 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
3699 return -TARGET_EFAULT;
3700 target_md->msg_stime = tswapal(host_md->msg_stime);
3701 target_md->msg_rtime = tswapal(host_md->msg_rtime);
3702 target_md->msg_ctime = tswapal(host_md->msg_ctime);
3703 target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
3704 target_md->msg_qnum = tswapal(host_md->msg_qnum);
3705 target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
3706 target_md->msg_lspid = tswapal(host_md->msg_lspid);
3707 target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
3708 unlock_user_struct(target_md, target_addr, 1);
3712 struct target_msginfo {
3720 unsigned short int msgseg;
3723 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
3724 struct msginfo *host_msginfo)
3726 struct target_msginfo *target_msginfo;
3727 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
3728 return -TARGET_EFAULT;
3729 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
3730 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
3731 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
3732 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
3733 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
3734 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
3735 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
3736 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
3737 unlock_user_struct(target_msginfo, target_addr, 1);
3741 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
3743 struct msqid_ds dsarg;
3744 struct msginfo msginfo;
3745 abi_long ret = -TARGET_EINVAL;
3753 if (target_to_host_msqid_ds(&dsarg,ptr))
3754 return -TARGET_EFAULT;
3755 ret = get_errno(msgctl(msgid, cmd, &dsarg));
3756 if (host_to_target_msqid_ds(ptr,&dsarg))
3757 return -TARGET_EFAULT;
3760 ret = get_errno(msgctl(msgid, cmd, NULL));
3764 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
3765 if (host_to_target_msginfo(ptr, &msginfo))
3766 return -TARGET_EFAULT;
3773 struct target_msgbuf {
3778 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
3779 ssize_t msgsz, int msgflg)
3781 struct target_msgbuf *target_mb;
3782 struct msgbuf *host_mb;
3786 return -TARGET_EINVAL;
3789 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
3790 return -TARGET_EFAULT;
3791 host_mb = g_try_malloc(msgsz + sizeof(long));
3793 unlock_user_struct(target_mb, msgp, 0);
3794 return -TARGET_ENOMEM;
3796 host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
3797 memcpy(host_mb->mtext, target_mb->mtext, msgsz);
3798 ret = -TARGET_ENOSYS;
3800 ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
3803 if (ret == -TARGET_ENOSYS) {
3804 ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
3809 unlock_user_struct(target_mb, msgp, 0);
3814 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
3815 ssize_t msgsz, abi_long msgtyp,
3818 struct target_msgbuf *target_mb;
3820 struct msgbuf *host_mb;
3824 return -TARGET_EINVAL;
3827 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
3828 return -TARGET_EFAULT;
3830 host_mb = g_try_malloc(msgsz + sizeof(long));
3832 ret = -TARGET_ENOMEM;
3835 ret = -TARGET_ENOSYS;
3837 ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
3840 if (ret == -TARGET_ENOSYS) {
3841 ret = get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv), msqid, msgsz,
3842 msgflg, host_mb, msgtyp));
3847 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
3848 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
3849 if (!target_mtext) {
3850 ret = -TARGET_EFAULT;
3853 memcpy(target_mb->mtext, host_mb->mtext, ret);
3854 unlock_user(target_mtext, target_mtext_addr, ret);
3857 target_mb->mtype = tswapal(host_mb->mtype);
3861 unlock_user_struct(target_mb, msgp, 1);
3866 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
3867 abi_ulong target_addr)
3869 struct target_shmid_ds *target_sd;
3871 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3872 return -TARGET_EFAULT;
3873 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
3874 return -TARGET_EFAULT;
3875 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
3876 __get_user(host_sd->shm_atime, &target_sd->shm_atime);
3877 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
3878 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
3879 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
3880 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
3881 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
3882 unlock_user_struct(target_sd, target_addr, 0);
3886 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
3887 struct shmid_ds *host_sd)
3889 struct target_shmid_ds *target_sd;
3891 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3892 return -TARGET_EFAULT;
3893 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
3894 return -TARGET_EFAULT;
3895 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
3896 __put_user(host_sd->shm_atime, &target_sd->shm_atime);
3897 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
3898 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
3899 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
3900 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
3901 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
3902 unlock_user_struct(target_sd, target_addr, 1);
3906 struct target_shminfo {
3914 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
3915 struct shminfo *host_shminfo)
3917 struct target_shminfo *target_shminfo;
3918 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
3919 return -TARGET_EFAULT;
3920 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
3921 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
3922 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
3923 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
3924 __put_user(host_shminfo->shmall, &target_shminfo->shmall);
3925 unlock_user_struct(target_shminfo, target_addr, 1);
3929 struct target_shm_info {
3934 abi_ulong swap_attempts;
3935 abi_ulong swap_successes;
3938 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
3939 struct shm_info *host_shm_info)
3941 struct target_shm_info *target_shm_info;
3942 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
3943 return -TARGET_EFAULT;
3944 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
3945 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
3946 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
3947 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
3948 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
3949 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
3950 unlock_user_struct(target_shm_info, target_addr, 1);
3954 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
3956 struct shmid_ds dsarg;
3957 struct shminfo shminfo;
3958 struct shm_info shm_info;
3959 abi_long ret = -TARGET_EINVAL;
3967 if (target_to_host_shmid_ds(&dsarg, buf))
3968 return -TARGET_EFAULT;
3969 ret = get_errno(shmctl(shmid, cmd, &dsarg));
3970 if (host_to_target_shmid_ds(buf, &dsarg))
3971 return -TARGET_EFAULT;
3974 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
3975 if (host_to_target_shminfo(buf, &shminfo))
3976 return -TARGET_EFAULT;
3979 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
3980 if (host_to_target_shm_info(buf, &shm_info))
3981 return -TARGET_EFAULT;
3986 ret = get_errno(shmctl(shmid, cmd, NULL));
3993 #ifndef TARGET_FORCE_SHMLBA
3994 /* For most architectures, SHMLBA is the same as the page size;
3995 * some architectures have larger values, in which case they should
3996 * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
3997 * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
3998 * and defining its own value for SHMLBA.
4000 * The kernel also permits SHMLBA to be set by the architecture to a
4001 * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4002 * this means that addresses are rounded to the large size if
4003 * SHM_RND is set but addresses not aligned to that size are not rejected
4004 * as long as they are at least page-aligned. Since the only architecture
4005 * which uses this is ia64 this code doesn't provide for that oddity.
4007 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
4009 return TARGET_PAGE_SIZE;
4013 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
4014 int shmid, abi_ulong shmaddr, int shmflg)
4018 struct shmid_ds shm_info;
4022 /* find out the length of the shared memory segment */
4023 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4024 if (is_error(ret)) {
4025 /* can't get length, bail out */
4029 shmlba = target_shmlba(cpu_env);
4031 if (shmaddr & (shmlba - 1)) {
4032 if (shmflg & SHM_RND) {
4033 shmaddr &= ~(shmlba - 1);
4035 return -TARGET_EINVAL;
4038 if (!guest_range_valid(shmaddr, shm_info.shm_segsz)) {
4039 return -TARGET_EINVAL;
4045 host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
4047 abi_ulong mmap_start;
4049 /* In order to use the host shmat, we need to honor host SHMLBA. */
4050 mmap_start = mmap_find_vma(0, shm_info.shm_segsz, MAX(SHMLBA, shmlba));
4052 if (mmap_start == -1) {
4054 host_raddr = (void *)-1;
4056 host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
4059 if (host_raddr == (void *)-1) {
4061 return get_errno((long)host_raddr);
4063 raddr=h2g((unsigned long)host_raddr);
4065 page_set_flags(raddr, raddr + shm_info.shm_segsz,
4066 PAGE_VALID | PAGE_READ |
4067 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
4069 for (i = 0; i < N_SHM_REGIONS; i++) {
4070 if (!shm_regions[i].in_use) {
4071 shm_regions[i].in_use = true;
4072 shm_regions[i].start = raddr;
4073 shm_regions[i].size = shm_info.shm_segsz;
4083 static inline abi_long do_shmdt(abi_ulong shmaddr)
4090 for (i = 0; i < N_SHM_REGIONS; ++i) {
4091 if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4092 shm_regions[i].in_use = false;
4093 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
4097 rv = get_errno(shmdt(g2h(shmaddr)));
4104 #ifdef TARGET_NR_ipc
4105 /* ??? This only works with linear mappings. */
4106 /* do_ipc() must return target values and target errnos. */
4107 static abi_long do_ipc(CPUArchState *cpu_env,
4108 unsigned int call, abi_long first,
4109 abi_long second, abi_long third,
4110 abi_long ptr, abi_long fifth)
4115 version = call >> 16;
4120 ret = do_semop(first, ptr, second);
4124 ret = get_errno(semget(first, second, third));
4127 case IPCOP_semctl: {
4128 /* The semun argument to semctl is passed by value, so dereference the
4131 get_user_ual(atptr, ptr);
4132 ret = do_semctl(first, second, third, atptr);
4137 ret = get_errno(msgget(first, second));
4141 ret = do_msgsnd(first, ptr, second, third);
4145 ret = do_msgctl(first, second, ptr);
4152 struct target_ipc_kludge {
4157 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4158 ret = -TARGET_EFAULT;
4162 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4164 unlock_user_struct(tmp, ptr, 0);
4168 ret = do_msgrcv(first, ptr, second, fifth, third);
4177 raddr = do_shmat(cpu_env, first, ptr, second);
4178 if (is_error(raddr))
4179 return get_errno(raddr);
4180 if (put_user_ual(raddr, third))
4181 return -TARGET_EFAULT;
4185 ret = -TARGET_EINVAL;
4190 ret = do_shmdt(ptr);
4194 /* IPC_* flag values are the same on all linux platforms */
4195 ret = get_errno(shmget(first, second, third));
4198 /* IPC_* and SHM_* command values are the same on all linux platforms */
4200 ret = do_shmctl(first, second, ptr);
4203 gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
4204 ret = -TARGET_ENOSYS;
4211 /* kernel structure types definitions */
4213 #define STRUCT(name, ...) STRUCT_ ## name,
4214 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4216 #include "syscall_types.h"
4220 #undef STRUCT_SPECIAL
4222 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
4223 #define STRUCT_SPECIAL(name)
4224 #include "syscall_types.h"
4226 #undef STRUCT_SPECIAL
4228 typedef struct IOCTLEntry IOCTLEntry;
4230 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
4231 int fd, int cmd, abi_long arg);
4235 unsigned int host_cmd;
4238 do_ioctl_fn *do_ioctl;
4239 const argtype arg_type[5];
4242 #define IOC_R 0x0001
4243 #define IOC_W 0x0002
4244 #define IOC_RW (IOC_R | IOC_W)
4246 #define MAX_STRUCT_SIZE 4096
4248 #ifdef CONFIG_FIEMAP
4249 /* So fiemap access checks don't overflow on 32 bit systems.
4250 * This is very slightly smaller than the limit imposed by
4251 * the underlying kernel.
4253 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
4254 / sizeof(struct fiemap_extent))
4256 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4257 int fd, int cmd, abi_long arg)
4259 /* The parameter for this ioctl is a struct fiemap followed
4260 * by an array of struct fiemap_extent whose size is set
4261 * in fiemap->fm_extent_count. The array is filled in by the
4264 int target_size_in, target_size_out;
4266 const argtype *arg_type = ie->arg_type;
4267 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4270 int i, extent_size = thunk_type_size(extent_arg_type, 0);
4274 assert(arg_type[0] == TYPE_PTR);
4275 assert(ie->access == IOC_RW);
4277 target_size_in = thunk_type_size(arg_type, 0);
4278 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4280 return -TARGET_EFAULT;
4282 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4283 unlock_user(argptr, arg, 0);
4284 fm = (struct fiemap *)buf_temp;
4285 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4286 return -TARGET_EINVAL;
4289 outbufsz = sizeof (*fm) +
4290 (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4292 if (outbufsz > MAX_STRUCT_SIZE) {
4293 /* We can't fit all the extents into the fixed size buffer.
4294 * Allocate one that is large enough and use it instead.
4296 fm = g_try_malloc(outbufsz);
4298 return -TARGET_ENOMEM;
4300 memcpy(fm, buf_temp, sizeof(struct fiemap));
4303 ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4304 if (!is_error(ret)) {
4305 target_size_out = target_size_in;
4306 /* An extent_count of 0 means we were only counting the extents
4307 * so there are no structs to copy
4309 if (fm->fm_extent_count != 0) {
4310 target_size_out += fm->fm_mapped_extents * extent_size;
4312 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4314 ret = -TARGET_EFAULT;
4316 /* Convert the struct fiemap */
4317 thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4318 if (fm->fm_extent_count != 0) {
4319 p = argptr + target_size_in;
4320 /* ...and then all the struct fiemap_extents */
4321 for (i = 0; i < fm->fm_mapped_extents; i++) {
4322 thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4327 unlock_user(argptr, arg, target_size_out);
4337 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4338 int fd, int cmd, abi_long arg)
4340 const argtype *arg_type = ie->arg_type;
4344 struct ifconf *host_ifconf;
4346 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4347 int target_ifreq_size;
4352 abi_long target_ifc_buf;
4356 assert(arg_type[0] == TYPE_PTR);
4357 assert(ie->access == IOC_RW);
4360 target_size = thunk_type_size(arg_type, 0);
4362 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4364 return -TARGET_EFAULT;
4365 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4366 unlock_user(argptr, arg, 0);
4368 host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4369 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4370 target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
4372 if (target_ifc_buf != 0) {
4373 target_ifc_len = host_ifconf->ifc_len;
4374 nb_ifreq = target_ifc_len / target_ifreq_size;
4375 host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4377 outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4378 if (outbufsz > MAX_STRUCT_SIZE) {
4380 * We can't fit all the extents into the fixed size buffer.
4381 * Allocate one that is large enough and use it instead.
4383 host_ifconf = malloc(outbufsz);
4385 return -TARGET_ENOMEM;
4387 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4390 host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4392 host_ifconf->ifc_len = host_ifc_len;
4394 host_ifc_buf = NULL;
4396 host_ifconf->ifc_buf = host_ifc_buf;
4398 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4399 if (!is_error(ret)) {
4400 /* convert host ifc_len to target ifc_len */
4402 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4403 target_ifc_len = nb_ifreq * target_ifreq_size;
4404 host_ifconf->ifc_len = target_ifc_len;
4406 /* restore target ifc_buf */
4408 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4410 /* copy struct ifconf to target user */
4412 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4414 return -TARGET_EFAULT;
4415 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4416 unlock_user(argptr, arg, target_size);
4418 if (target_ifc_buf != 0) {
4419 /* copy ifreq[] to target user */
4420 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4421 for (i = 0; i < nb_ifreq ; i++) {
4422 thunk_convert(argptr + i * target_ifreq_size,
4423 host_ifc_buf + i * sizeof(struct ifreq),
4424 ifreq_arg_type, THUNK_TARGET);
4426 unlock_user(argptr, target_ifc_buf, target_ifc_len);
4437 #if defined(CONFIG_USBFS)
4438 #if HOST_LONG_BITS > 64
4439 #error USBDEVFS thunks do not support >64 bit hosts yet.
4442 uint64_t target_urb_adr;
4443 uint64_t target_buf_adr;
4444 char *target_buf_ptr;
4445 struct usbdevfs_urb host_urb;
4448 static GHashTable *usbdevfs_urb_hashtable(void)
4450 static GHashTable *urb_hashtable;
4452 if (!urb_hashtable) {
4453 urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
4455 return urb_hashtable;
4458 static void urb_hashtable_insert(struct live_urb *urb)
4460 GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4461 g_hash_table_insert(urb_hashtable, urb, urb);
4464 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
4466 GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4467 return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
4470 static void urb_hashtable_remove(struct live_urb *urb)
4472 GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4473 g_hash_table_remove(urb_hashtable, urb);
4477 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
4478 int fd, int cmd, abi_long arg)
4480 const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
4481 const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
4482 struct live_urb *lurb;
4486 uintptr_t target_urb_adr;
4489 target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
4491 memset(buf_temp, 0, sizeof(uint64_t));
4492 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4493 if (is_error(ret)) {
4497 memcpy(&hurb, buf_temp, sizeof(uint64_t));
4498 lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
4499 if (!lurb->target_urb_adr) {
4500 return -TARGET_EFAULT;
4502 urb_hashtable_remove(lurb);
4503 unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
4504 lurb->host_urb.buffer_length);
4505 lurb->target_buf_ptr = NULL;
4507 /* restore the guest buffer pointer */
4508 lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
4510 /* update the guest urb struct */
4511 argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
4514 return -TARGET_EFAULT;
4516 thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
4517 unlock_user(argptr, lurb->target_urb_adr, target_size);
4519 target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
4520 /* write back the urb handle */
4521 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4524 return -TARGET_EFAULT;
4527 /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
4528 target_urb_adr = lurb->target_urb_adr;
4529 thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
4530 unlock_user(argptr, arg, target_size);
4537 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
4538 uint8_t *buf_temp __attribute__((unused)),
4539 int fd, int cmd, abi_long arg)
4541 struct live_urb *lurb;
4543 /* map target address back to host URB with metadata. */
4544 lurb = urb_hashtable_lookup(arg);
4546 return -TARGET_EFAULT;
4548 return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4552 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
4553 int fd, int cmd, abi_long arg)
4555 const argtype *arg_type = ie->arg_type;
4560 struct live_urb *lurb;
4563 * each submitted URB needs to map to a unique ID for the
4564 * kernel, and that unique ID needs to be a pointer to
4565 * host memory. hence, we need to malloc for each URB.
4566 * isochronous transfers have a variable length struct.
4569 target_size = thunk_type_size(arg_type, THUNK_TARGET);
4571 /* construct host copy of urb and metadata */
4572 lurb = g_try_malloc0(sizeof(struct live_urb));
4574 return -TARGET_ENOMEM;
4577 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4580 return -TARGET_EFAULT;
4582 thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
4583 unlock_user(argptr, arg, 0);
4585 lurb->target_urb_adr = arg;
4586 lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
4588 /* buffer space used depends on endpoint type so lock the entire buffer */
4589 /* control type urbs should check the buffer contents for true direction */
4590 rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
4591 lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
4592 lurb->host_urb.buffer_length, 1);
4593 if (lurb->target_buf_ptr == NULL) {
4595 return -TARGET_EFAULT;
4598 /* update buffer pointer in host copy */
4599 lurb->host_urb.buffer = lurb->target_buf_ptr;
4601 ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4602 if (is_error(ret)) {
4603 unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
4606 urb_hashtable_insert(lurb);
4611 #endif /* CONFIG_USBFS */
4613 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
4614 int cmd, abi_long arg)
4617 struct dm_ioctl *host_dm;
4618 abi_long guest_data;
4619 uint32_t guest_data_size;
4621 const argtype *arg_type = ie->arg_type;
4623 void *big_buf = NULL;
4627 target_size = thunk_type_size(arg_type, 0);
4628 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4630 ret = -TARGET_EFAULT;
4633 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4634 unlock_user(argptr, arg, 0);
4636 /* buf_temp is too small, so fetch things into a bigger buffer */
4637 big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
4638 memcpy(big_buf, buf_temp, target_size);
4642 guest_data = arg + host_dm->data_start;
4643 if ((guest_data - arg) < 0) {
4644 ret = -TARGET_EINVAL;
4647 guest_data_size = host_dm->data_size - host_dm->data_start;
4648 host_data = (char*)host_dm + host_dm->data_start;
4650 argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
4652 ret = -TARGET_EFAULT;
4656 switch (ie->host_cmd) {
4658 case DM_LIST_DEVICES:
4661 case DM_DEV_SUSPEND:
4664 case DM_TABLE_STATUS:
4665 case DM_TABLE_CLEAR:
4667 case DM_LIST_VERSIONS:
4671 case DM_DEV_SET_GEOMETRY:
4672 /* data contains only strings */
4673 memcpy(host_data, argptr, guest_data_size);
4676 memcpy(host_data, argptr, guest_data_size);
4677 *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
4681 void *gspec = argptr;
4682 void *cur_data = host_data;
4683 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
4684 int spec_size = thunk_type_size(arg_type, 0);
4687 for (i = 0; i < host_dm->target_count; i++) {
4688 struct dm_target_spec *spec = cur_data;
4692 thunk_convert(spec, gspec, arg_type, THUNK_HOST);
4693 slen = strlen((char*)gspec + spec_size) + 1;
4695 spec->next = sizeof(*spec) + slen;
4696 strcpy((char*)&spec[1], gspec + spec_size);
4698 cur_data += spec->next;
4703 ret = -TARGET_EINVAL;
4704 unlock_user(argptr, guest_data, 0);
4707 unlock_user(argptr, guest_data, 0);
4709 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4710 if (!is_error(ret)) {
4711 guest_data = arg + host_dm->data_start;
4712 guest_data_size = host_dm->data_size - host_dm->data_start;
4713 argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
4714 switch (ie->host_cmd) {
4719 case DM_DEV_SUSPEND:
4722 case DM_TABLE_CLEAR:
4724 case DM_DEV_SET_GEOMETRY:
4725 /* no return data */
4727 case DM_LIST_DEVICES:
4729 struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
4730 uint32_t remaining_data = guest_data_size;
4731 void *cur_data = argptr;
4732 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
4733 int nl_size = 12; /* can't use thunk_size due to alignment */
4736 uint32_t next = nl->next;
4738 nl->next = nl_size + (strlen(nl->name) + 1);
4740 if (remaining_data < nl->next) {
4741 host_dm->flags |= DM_BUFFER_FULL_FLAG;
4744 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
4745 strcpy(cur_data + nl_size, nl->name);
4746 cur_data += nl->next;
4747 remaining_data -= nl->next;
4751 nl = (void*)nl + next;
4756 case DM_TABLE_STATUS:
4758 struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
4759 void *cur_data = argptr;
4760 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
4761 int spec_size = thunk_type_size(arg_type, 0);
4764 for (i = 0; i < host_dm->target_count; i++) {
4765 uint32_t next = spec->next;
4766 int slen = strlen((char*)&spec[1]) + 1;
4767 spec->next = (cur_data - argptr) + spec_size + slen;
4768 if (guest_data_size < spec->next) {
4769 host_dm->flags |= DM_BUFFER_FULL_FLAG;
4772 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
4773 strcpy(cur_data + spec_size, (char*)&spec[1]);
4774 cur_data = argptr + spec->next;
4775 spec = (void*)host_dm + host_dm->data_start + next;
4781 void *hdata = (void*)host_dm + host_dm->data_start;
4782 int count = *(uint32_t*)hdata;
4783 uint64_t *hdev = hdata + 8;
4784 uint64_t *gdev = argptr + 8;
4787 *(uint32_t*)argptr = tswap32(count);
4788 for (i = 0; i < count; i++) {
4789 *gdev = tswap64(*hdev);
4795 case DM_LIST_VERSIONS:
4797 struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
4798 uint32_t remaining_data = guest_data_size;
4799 void *cur_data = argptr;
4800 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
4801 int vers_size = thunk_type_size(arg_type, 0);
4804 uint32_t next = vers->next;
4806 vers->next = vers_size + (strlen(vers->name) + 1);
4808 if (remaining_data < vers->next) {
4809 host_dm->flags |= DM_BUFFER_FULL_FLAG;
4812 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
4813 strcpy(cur_data + vers_size, vers->name);
4814 cur_data += vers->next;
4815 remaining_data -= vers->next;
4819 vers = (void*)vers + next;
4824 unlock_user(argptr, guest_data, 0);
4825 ret = -TARGET_EINVAL;
4828 unlock_user(argptr, guest_data, guest_data_size);
4830 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4832 ret = -TARGET_EFAULT;
4835 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
4836 unlock_user(argptr, arg, target_size);
4843 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
4844 int cmd, abi_long arg)
4848 const argtype *arg_type = ie->arg_type;
4849 const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
4852 struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
4853 struct blkpg_partition host_part;
4855 /* Read and convert blkpg */
4857 target_size = thunk_type_size(arg_type, 0);
4858 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4860 ret = -TARGET_EFAULT;
4863 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4864 unlock_user(argptr, arg, 0);
4866 switch (host_blkpg->op) {
4867 case BLKPG_ADD_PARTITION:
4868 case BLKPG_DEL_PARTITION:
4869 /* payload is struct blkpg_partition */
4872 /* Unknown opcode */
4873 ret = -TARGET_EINVAL;
4877 /* Read and convert blkpg->data */
4878 arg = (abi_long)(uintptr_t)host_blkpg->data;
4879 target_size = thunk_type_size(part_arg_type, 0);
4880 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4882 ret = -TARGET_EFAULT;
4885 thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
4886 unlock_user(argptr, arg, 0);
4888 /* Swizzle the data pointer to our local copy and call! */
4889 host_blkpg->data = &host_part;
4890 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
4896 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
4897 int fd, int cmd, abi_long arg)
4899 const argtype *arg_type = ie->arg_type;
4900 const StructEntry *se;
4901 const argtype *field_types;
4902 const int *dst_offsets, *src_offsets;
4905 abi_ulong *target_rt_dev_ptr = NULL;
4906 unsigned long *host_rt_dev_ptr = NULL;
4910 assert(ie->access == IOC_W);
4911 assert(*arg_type == TYPE_PTR);
4913 assert(*arg_type == TYPE_STRUCT);
4914 target_size = thunk_type_size(arg_type, 0);
4915 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4917 return -TARGET_EFAULT;
4920 assert(*arg_type == (int)STRUCT_rtentry);
4921 se = struct_entries + *arg_type++;
4922 assert(se->convert[0] == NULL);
4923 /* convert struct here to be able to catch rt_dev string */
4924 field_types = se->field_types;
4925 dst_offsets = se->field_offsets[THUNK_HOST];
4926 src_offsets = se->field_offsets[THUNK_TARGET];
4927 for (i = 0; i < se->nb_fields; i++) {
4928 if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
4929 assert(*field_types == TYPE_PTRVOID);
4930 target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
4931 host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
4932 if (*target_rt_dev_ptr != 0) {
4933 *host_rt_dev_ptr = (unsigned long)lock_user_string(
4934 tswapal(*target_rt_dev_ptr));
4935 if (!*host_rt_dev_ptr) {
4936 unlock_user(argptr, arg, 0);
4937 return -TARGET_EFAULT;
4940 *host_rt_dev_ptr = 0;
4945 field_types = thunk_convert(buf_temp + dst_offsets[i],
4946 argptr + src_offsets[i],
4947 field_types, THUNK_HOST);
4949 unlock_user(argptr, arg, 0);
4951 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4953 assert(host_rt_dev_ptr != NULL);
4954 assert(target_rt_dev_ptr != NULL);
4955 if (*host_rt_dev_ptr != 0) {
4956 unlock_user((void *)*host_rt_dev_ptr,
4957 *target_rt_dev_ptr, 0);
4962 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
4963 int fd, int cmd, abi_long arg)
4965 int sig = target_to_host_signal(arg);
4966 return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
4969 static abi_long do_ioctl_SIOCGSTAMP(const IOCTLEntry *ie, uint8_t *buf_temp,
4970 int fd, int cmd, abi_long arg)
4975 ret = get_errno(safe_ioctl(fd, SIOCGSTAMP, &tv));
4976 if (is_error(ret)) {
4980 if (cmd == (int)TARGET_SIOCGSTAMP_OLD) {
4981 if (copy_to_user_timeval(arg, &tv)) {
4982 return -TARGET_EFAULT;
4985 if (copy_to_user_timeval64(arg, &tv)) {
4986 return -TARGET_EFAULT;
4993 static abi_long do_ioctl_SIOCGSTAMPNS(const IOCTLEntry *ie, uint8_t *buf_temp,
4994 int fd, int cmd, abi_long arg)
4999 ret = get_errno(safe_ioctl(fd, SIOCGSTAMPNS, &ts));
5000 if (is_error(ret)) {
5004 if (cmd == (int)TARGET_SIOCGSTAMPNS_OLD) {
5005 if (host_to_target_timespec(arg, &ts)) {
5006 return -TARGET_EFAULT;
5009 if (host_to_target_timespec64(arg, &ts)) {
5010 return -TARGET_EFAULT;
5018 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5019 int fd, int cmd, abi_long arg)
5021 int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5022 return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5026 static IOCTLEntry ioctl_entries[] = {
5027 #define IOCTL(cmd, access, ...) \
5028 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
5029 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
5030 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
5031 #define IOCTL_IGNORE(cmd) \
5032 { TARGET_ ## cmd, 0, #cmd },
5037 /* ??? Implement proper locking for ioctls. */
5038 /* do_ioctl() Must return target values and target errnos. */
5039 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5041 const IOCTLEntry *ie;
5042 const argtype *arg_type;
5044 uint8_t buf_temp[MAX_STRUCT_SIZE];
5050 if (ie->target_cmd == 0) {
5051 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5052 return -TARGET_ENOSYS;
5054 if (ie->target_cmd == cmd)
5058 arg_type = ie->arg_type;
5060 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5061 } else if (!ie->host_cmd) {
5062 /* Some architectures define BSD ioctls in their headers
5063 that are not implemented in Linux. */
5064 return -TARGET_ENOSYS;
5067 switch(arg_type[0]) {
5070 ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5074 ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5078 target_size = thunk_type_size(arg_type, 0);
5079 switch(ie->access) {
5081 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5082 if (!is_error(ret)) {
5083 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5085 return -TARGET_EFAULT;
5086 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5087 unlock_user(argptr, arg, target_size);
5091 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5093 return -TARGET_EFAULT;
5094 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5095 unlock_user(argptr, arg, 0);
5096 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5100 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5102 return -TARGET_EFAULT;
5103 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5104 unlock_user(argptr, arg, 0);
5105 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5106 if (!is_error(ret)) {
5107 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5109 return -TARGET_EFAULT;
5110 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5111 unlock_user(argptr, arg, target_size);
5117 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5118 (long)cmd, arg_type[0]);
5119 ret = -TARGET_ENOSYS;
5125 static const bitmask_transtbl iflag_tbl[] = {
5126 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5127 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5128 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5129 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5130 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5131 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5132 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5133 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5134 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5135 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5136 { TARGET_IXON, TARGET_IXON, IXON, IXON },
5137 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5138 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5139 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5143 static const bitmask_transtbl oflag_tbl[] = {
5144 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5145 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5146 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5147 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5148 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5149 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5150 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5151 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5152 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5153 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5154 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5155 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5156 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5157 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5158 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5159 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5160 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5161 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5162 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5163 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5164 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5165 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5166 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5167 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5171 static const bitmask_transtbl cflag_tbl[] = {
5172 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5173 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5174 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5175 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5176 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5177 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5178 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5179 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5180 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5181 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5182 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5183 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5184 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5185 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5186 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5187 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5188 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5189 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5190 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5191 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5192 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5193 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5194 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5195 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5196 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5197 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5198 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5199 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5200 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5201 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5202 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5206 static const bitmask_transtbl lflag_tbl[] = {
5207 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5208 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5209 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5210 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5211 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5212 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5213 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5214 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5215 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5216 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5217 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5218 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5219 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5220 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5221 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5225 static void target_to_host_termios (void *dst, const void *src)
5227 struct host_termios *host = dst;
5228 const struct target_termios *target = src;
5231 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5233 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5235 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5237 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5238 host->c_line = target->c_line;
5240 memset(host->c_cc, 0, sizeof(host->c_cc));
5241 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5242 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5243 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5244 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5245 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5246 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5247 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5248 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5249 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5250 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5251 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5252 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5253 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5254 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5255 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5256 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5257 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5260 static void host_to_target_termios (void *dst, const void *src)
5262 struct target_termios *target = dst;
5263 const struct host_termios *host = src;
5266 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5268 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5270 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5272 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5273 target->c_line = host->c_line;
5275 memset(target->c_cc, 0, sizeof(target->c_cc));
5276 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5277 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5278 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5279 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5280 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5281 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5282 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5283 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5284 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5285 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5286 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5287 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5288 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5289 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5290 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5291 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5292 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5295 static const StructEntry struct_termios_def = {
5296 .convert = { host_to_target_termios, target_to_host_termios },
5297 .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5298 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5301 static bitmask_transtbl mmap_flags_tbl[] = {
5302 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
5303 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
5304 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5305 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
5306 MAP_ANONYMOUS, MAP_ANONYMOUS },
5307 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
5308 MAP_GROWSDOWN, MAP_GROWSDOWN },
5309 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
5310 MAP_DENYWRITE, MAP_DENYWRITE },
5311 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
5312 MAP_EXECUTABLE, MAP_EXECUTABLE },
5313 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5314 { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
5315 MAP_NORESERVE, MAP_NORESERVE },
5316 { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
5317 /* MAP_STACK had been ignored by the kernel for quite some time.
5318 Recognize it for the target insofar as we do not want to pass
5319 it through to the host. */
5320 { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
5324 #if defined(TARGET_I386)
5326 /* NOTE: there is really one LDT for all the threads */
5327 static uint8_t *ldt_table;
5329 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
5336 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
5337 if (size > bytecount)
5339 p = lock_user(VERIFY_WRITE, ptr, size, 0);
5341 return -TARGET_EFAULT;
5342 /* ??? Should this by byteswapped? */
5343 memcpy(p, ldt_table, size);
5344 unlock_user(p, ptr, size);
5348 /* XXX: add locking support */
5349 static abi_long write_ldt(CPUX86State *env,
5350 abi_ulong ptr, unsigned long bytecount, int oldmode)
5352 struct target_modify_ldt_ldt_s ldt_info;
5353 struct target_modify_ldt_ldt_s *target_ldt_info;
5354 int seg_32bit, contents, read_exec_only, limit_in_pages;
5355 int seg_not_present, useable, lm;
5356 uint32_t *lp, entry_1, entry_2;
5358 if (bytecount != sizeof(ldt_info))
5359 return -TARGET_EINVAL;
5360 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
5361 return -TARGET_EFAULT;
5362 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5363 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5364 ldt_info.limit = tswap32(target_ldt_info->limit);
5365 ldt_info.flags = tswap32(target_ldt_info->flags);
5366 unlock_user_struct(target_ldt_info, ptr, 0);
5368 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
5369 return -TARGET_EINVAL;
5370 seg_32bit = ldt_info.flags & 1;
5371 contents = (ldt_info.flags >> 1) & 3;
5372 read_exec_only = (ldt_info.flags >> 3) & 1;
5373 limit_in_pages = (ldt_info.flags >> 4) & 1;
5374 seg_not_present = (ldt_info.flags >> 5) & 1;
5375 useable = (ldt_info.flags >> 6) & 1;
5379 lm = (ldt_info.flags >> 7) & 1;
5381 if (contents == 3) {
5383 return -TARGET_EINVAL;
5384 if (seg_not_present == 0)
5385 return -TARGET_EINVAL;
5387 /* allocate the LDT */
5389 env->ldt.base = target_mmap(0,
5390 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
5391 PROT_READ|PROT_WRITE,
5392 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
5393 if (env->ldt.base == -1)
5394 return -TARGET_ENOMEM;
5395 memset(g2h(env->ldt.base), 0,
5396 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
5397 env->ldt.limit = 0xffff;
5398 ldt_table = g2h(env->ldt.base);
5401 /* NOTE: same code as Linux kernel */
5402 /* Allow LDTs to be cleared by the user. */
5403 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5406 read_exec_only == 1 &&
5408 limit_in_pages == 0 &&
5409 seg_not_present == 1 &&
5417 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5418 (ldt_info.limit & 0x0ffff);
5419 entry_2 = (ldt_info.base_addr & 0xff000000) |
5420 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5421 (ldt_info.limit & 0xf0000) |
5422 ((read_exec_only ^ 1) << 9) |
5424 ((seg_not_present ^ 1) << 15) |
5426 (limit_in_pages << 23) |
5430 entry_2 |= (useable << 20);
5432 /* Install the new entry ... */
5434 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
5435 lp[0] = tswap32(entry_1);
5436 lp[1] = tswap32(entry_2);
5440 /* specific and weird i386 syscalls */
5441 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
5442 unsigned long bytecount)
5448 ret = read_ldt(ptr, bytecount);
5451 ret = write_ldt(env, ptr, bytecount, 1);
5454 ret = write_ldt(env, ptr, bytecount, 0);
5457 ret = -TARGET_ENOSYS;
5463 #if defined(TARGET_I386) && defined(TARGET_ABI32)
5464 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
5466 uint64_t *gdt_table = g2h(env->gdt.base);
5467 struct target_modify_ldt_ldt_s ldt_info;
5468 struct target_modify_ldt_ldt_s *target_ldt_info;
5469 int seg_32bit, contents, read_exec_only, limit_in_pages;
5470 int seg_not_present, useable, lm;
5471 uint32_t *lp, entry_1, entry_2;
5474 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5475 if (!target_ldt_info)
5476 return -TARGET_EFAULT;
5477 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5478 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5479 ldt_info.limit = tswap32(target_ldt_info->limit);
5480 ldt_info.flags = tswap32(target_ldt_info->flags);
5481 if (ldt_info.entry_number == -1) {
5482 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
5483 if (gdt_table[i] == 0) {
5484 ldt_info.entry_number = i;
5485 target_ldt_info->entry_number = tswap32(i);
5490 unlock_user_struct(target_ldt_info, ptr, 1);
5492 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
5493 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
5494 return -TARGET_EINVAL;
5495 seg_32bit = ldt_info.flags & 1;
5496 contents = (ldt_info.flags >> 1) & 3;
5497 read_exec_only = (ldt_info.flags >> 3) & 1;
5498 limit_in_pages = (ldt_info.flags >> 4) & 1;
5499 seg_not_present = (ldt_info.flags >> 5) & 1;
5500 useable = (ldt_info.flags >> 6) & 1;
5504 lm = (ldt_info.flags >> 7) & 1;
5507 if (contents == 3) {
5508 if (seg_not_present == 0)
5509 return -TARGET_EINVAL;
5512 /* NOTE: same code as Linux kernel */
5513 /* Allow LDTs to be cleared by the user. */
5514 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5515 if ((contents == 0 &&
5516 read_exec_only == 1 &&
5518 limit_in_pages == 0 &&
5519 seg_not_present == 1 &&
5527 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5528 (ldt_info.limit & 0x0ffff);
5529 entry_2 = (ldt_info.base_addr & 0xff000000) |
5530 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5531 (ldt_info.limit & 0xf0000) |
5532 ((read_exec_only ^ 1) << 9) |
5534 ((seg_not_present ^ 1) << 15) |
5536 (limit_in_pages << 23) |
5541 /* Install the new entry ... */
5543 lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
5544 lp[0] = tswap32(entry_1);
5545 lp[1] = tswap32(entry_2);
5549 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
5551 struct target_modify_ldt_ldt_s *target_ldt_info;
5552 uint64_t *gdt_table = g2h(env->gdt.base);
5553 uint32_t base_addr, limit, flags;
5554 int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
5555 int seg_not_present, useable, lm;
5556 uint32_t *lp, entry_1, entry_2;
5558 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5559 if (!target_ldt_info)
5560 return -TARGET_EFAULT;
5561 idx = tswap32(target_ldt_info->entry_number);
5562 if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
5563 idx > TARGET_GDT_ENTRY_TLS_MAX) {
5564 unlock_user_struct(target_ldt_info, ptr, 1);
5565 return -TARGET_EINVAL;
5567 lp = (uint32_t *)(gdt_table + idx);
5568 entry_1 = tswap32(lp[0]);
5569 entry_2 = tswap32(lp[1]);
5571 read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
5572 contents = (entry_2 >> 10) & 3;
5573 seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
5574 seg_32bit = (entry_2 >> 22) & 1;
5575 limit_in_pages = (entry_2 >> 23) & 1;
5576 useable = (entry_2 >> 20) & 1;
5580 lm = (entry_2 >> 21) & 1;
5582 flags = (seg_32bit << 0) | (contents << 1) |
5583 (read_exec_only << 3) | (limit_in_pages << 4) |
5584 (seg_not_present << 5) | (useable << 6) | (lm << 7);
5585 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000);
5586 base_addr = (entry_1 >> 16) |
5587 (entry_2 & 0xff000000) |
5588 ((entry_2 & 0xff) << 16);
5589 target_ldt_info->base_addr = tswapal(base_addr);
5590 target_ldt_info->limit = tswap32(limit);
5591 target_ldt_info->flags = tswap32(flags);
5592 unlock_user_struct(target_ldt_info, ptr, 1);
5595 #endif /* TARGET_I386 && TARGET_ABI32 */
5597 #ifndef TARGET_ABI32
5598 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
5605 case TARGET_ARCH_SET_GS:
5606 case TARGET_ARCH_SET_FS:
5607 if (code == TARGET_ARCH_SET_GS)
5611 cpu_x86_load_seg(env, idx, 0);
5612 env->segs[idx].base = addr;
5614 case TARGET_ARCH_GET_GS:
5615 case TARGET_ARCH_GET_FS:
5616 if (code == TARGET_ARCH_GET_GS)
5620 val = env->segs[idx].base;
5621 if (put_user(val, addr, abi_ulong))
5622 ret = -TARGET_EFAULT;
5625 ret = -TARGET_EINVAL;
5632 #endif /* defined(TARGET_I386) */
5634 #define NEW_STACK_SIZE 0x40000
5637 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
5640 pthread_mutex_t mutex;
5641 pthread_cond_t cond;
5644 abi_ulong child_tidptr;
5645 abi_ulong parent_tidptr;
5649 static void *clone_func(void *arg)
5651 new_thread_info *info = arg;
5656 rcu_register_thread();
5657 tcg_register_thread();
5661 ts = (TaskState *)cpu->opaque;
5662 info->tid = sys_gettid();
5664 if (info->child_tidptr)
5665 put_user_u32(info->tid, info->child_tidptr);
5666 if (info->parent_tidptr)
5667 put_user_u32(info->tid, info->parent_tidptr);
5668 qemu_guest_random_seed_thread_part2(cpu->random_seed);
5669 /* Enable signals. */
5670 sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
5671 /* Signal to the parent that we're ready. */
5672 pthread_mutex_lock(&info->mutex);
5673 pthread_cond_broadcast(&info->cond);
5674 pthread_mutex_unlock(&info->mutex);
5675 /* Wait until the parent has finished initializing the tls state. */
5676 pthread_mutex_lock(&clone_lock);
5677 pthread_mutex_unlock(&clone_lock);
5683 /* do_fork() Must return host values and target errnos (unlike most
5684 do_*() functions). */
5685 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
5686 abi_ulong parent_tidptr, target_ulong newtls,
5687 abi_ulong child_tidptr)
5689 CPUState *cpu = env_cpu(env);
5693 CPUArchState *new_env;
5696 flags &= ~CLONE_IGNORED_FLAGS;
5698 /* Emulate vfork() with fork() */
5699 if (flags & CLONE_VFORK)
5700 flags &= ~(CLONE_VFORK | CLONE_VM);
5702 if (flags & CLONE_VM) {
5703 TaskState *parent_ts = (TaskState *)cpu->opaque;
5704 new_thread_info info;
5705 pthread_attr_t attr;
5707 if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
5708 (flags & CLONE_INVALID_THREAD_FLAGS)) {
5709 return -TARGET_EINVAL;
5712 ts = g_new0(TaskState, 1);
5713 init_task_state(ts);
5715 /* Grab a mutex so that thread setup appears atomic. */
5716 pthread_mutex_lock(&clone_lock);
5718 /* we create a new CPU instance. */
5719 new_env = cpu_copy(env);
5720 /* Init regs that differ from the parent. */
5721 cpu_clone_regs(new_env, newsp);
5722 new_cpu = env_cpu(new_env);
5723 new_cpu->opaque = ts;
5724 ts->bprm = parent_ts->bprm;
5725 ts->info = parent_ts->info;
5726 ts->signal_mask = parent_ts->signal_mask;
5728 if (flags & CLONE_CHILD_CLEARTID) {
5729 ts->child_tidptr = child_tidptr;
5732 if (flags & CLONE_SETTLS) {
5733 cpu_set_tls (new_env, newtls);
5736 memset(&info, 0, sizeof(info));
5737 pthread_mutex_init(&info.mutex, NULL);
5738 pthread_mutex_lock(&info.mutex);
5739 pthread_cond_init(&info.cond, NULL);
5741 if (flags & CLONE_CHILD_SETTID) {
5742 info.child_tidptr = child_tidptr;
5744 if (flags & CLONE_PARENT_SETTID) {
5745 info.parent_tidptr = parent_tidptr;
5748 ret = pthread_attr_init(&attr);
5749 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
5750 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
5751 /* It is not safe to deliver signals until the child has finished
5752 initializing, so temporarily block all signals. */
5753 sigfillset(&sigmask);
5754 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
5755 cpu->random_seed = qemu_guest_random_seed_thread_part1();
5757 /* If this is our first additional thread, we need to ensure we
5758 * generate code for parallel execution and flush old translations.
5760 if (!parallel_cpus) {
5761 parallel_cpus = true;
5765 ret = pthread_create(&info.thread, &attr, clone_func, &info);
5766 /* TODO: Free new CPU state if thread creation failed. */
5768 sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
5769 pthread_attr_destroy(&attr);
5771 /* Wait for the child to initialize. */
5772 pthread_cond_wait(&info.cond, &info.mutex);
5777 pthread_mutex_unlock(&info.mutex);
5778 pthread_cond_destroy(&info.cond);
5779 pthread_mutex_destroy(&info.mutex);
5780 pthread_mutex_unlock(&clone_lock);
5782 /* if no CLONE_VM, we consider it is a fork */
5783 if (flags & CLONE_INVALID_FORK_FLAGS) {
5784 return -TARGET_EINVAL;
5787 /* We can't support custom termination signals */
5788 if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
5789 return -TARGET_EINVAL;
5792 if (block_signals()) {
5793 return -TARGET_ERESTARTSYS;
5799 /* Child Process. */
5800 cpu_clone_regs(env, newsp);
5802 /* There is a race condition here. The parent process could
5803 theoretically read the TID in the child process before the child
5804 tid is set. This would require using either ptrace
5805 (not implemented) or having *_tidptr to point at a shared memory
5806 mapping. We can't repeat the spinlock hack used above because
5807 the child process gets its own copy of the lock. */
5808 if (flags & CLONE_CHILD_SETTID)
5809 put_user_u32(sys_gettid(), child_tidptr);
5810 if (flags & CLONE_PARENT_SETTID)
5811 put_user_u32(sys_gettid(), parent_tidptr);
5812 ts = (TaskState *)cpu->opaque;
5813 if (flags & CLONE_SETTLS)
5814 cpu_set_tls (env, newtls);
5815 if (flags & CLONE_CHILD_CLEARTID)
5816 ts->child_tidptr = child_tidptr;
5824 /* warning : doesn't handle linux specific flags... */
5825 static int target_to_host_fcntl_cmd(int cmd)
5830 case TARGET_F_DUPFD:
5831 case TARGET_F_GETFD:
5832 case TARGET_F_SETFD:
5833 case TARGET_F_GETFL:
5834 case TARGET_F_SETFL:
5837 case TARGET_F_GETLK:
5840 case TARGET_F_SETLK:
5843 case TARGET_F_SETLKW:
5846 case TARGET_F_GETOWN:
5849 case TARGET_F_SETOWN:
5852 case TARGET_F_GETSIG:
5855 case TARGET_F_SETSIG:
5858 #if TARGET_ABI_BITS == 32
5859 case TARGET_F_GETLK64:
5862 case TARGET_F_SETLK64:
5865 case TARGET_F_SETLKW64:
5869 case TARGET_F_SETLEASE:
5872 case TARGET_F_GETLEASE:
5875 #ifdef F_DUPFD_CLOEXEC
5876 case TARGET_F_DUPFD_CLOEXEC:
5877 ret = F_DUPFD_CLOEXEC;
5880 case TARGET_F_NOTIFY:
5884 case TARGET_F_GETOWN_EX:
5889 case TARGET_F_SETOWN_EX:
5894 case TARGET_F_SETPIPE_SZ:
5897 case TARGET_F_GETPIPE_SZ:
5902 ret = -TARGET_EINVAL;
5906 #if defined(__powerpc64__)
5907 /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
5908 * is not supported by kernel. The glibc fcntl call actually adjusts
5909 * them to 5, 6 and 7 before making the syscall(). Since we make the
5910 * syscall directly, adjust to what is supported by the kernel.
5912 if (ret >= F_GETLK64 && ret <= F_SETLKW64) {
5913 ret -= F_GETLK64 - 5;
5920 #define FLOCK_TRANSTBL \
5922 TRANSTBL_CONVERT(F_RDLCK); \
5923 TRANSTBL_CONVERT(F_WRLCK); \
5924 TRANSTBL_CONVERT(F_UNLCK); \
5925 TRANSTBL_CONVERT(F_EXLCK); \
5926 TRANSTBL_CONVERT(F_SHLCK); \
5929 static int target_to_host_flock(int type)
5931 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
5933 #undef TRANSTBL_CONVERT
5934 return -TARGET_EINVAL;
5937 static int host_to_target_flock(int type)
5939 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
5941 #undef TRANSTBL_CONVERT
5942 /* if we don't know how to convert the value coming
5943 * from the host we copy to the target field as-is
5948 static inline abi_long copy_from_user_flock(struct flock64 *fl,
5949 abi_ulong target_flock_addr)
5951 struct target_flock *target_fl;
5954 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
5955 return -TARGET_EFAULT;
5958 __get_user(l_type, &target_fl->l_type);
5959 l_type = target_to_host_flock(l_type);
5963 fl->l_type = l_type;
5964 __get_user(fl->l_whence, &target_fl->l_whence);
5965 __get_user(fl->l_start, &target_fl->l_start);
5966 __get_user(fl->l_len, &target_fl->l_len);
5967 __get_user(fl->l_pid, &target_fl->l_pid);
5968 unlock_user_struct(target_fl, target_flock_addr, 0);
5972 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
5973 const struct flock64 *fl)
5975 struct target_flock *target_fl;
5978 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
5979 return -TARGET_EFAULT;
5982 l_type = host_to_target_flock(fl->l_type);
5983 __put_user(l_type, &target_fl->l_type);
5984 __put_user(fl->l_whence, &target_fl->l_whence);
5985 __put_user(fl->l_start, &target_fl->l_start);
5986 __put_user(fl->l_len, &target_fl->l_len);
5987 __put_user(fl->l_pid, &target_fl->l_pid);
5988 unlock_user_struct(target_fl, target_flock_addr, 1);
5992 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
5993 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
5995 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
5996 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
5997 abi_ulong target_flock_addr)
5999 struct target_oabi_flock64 *target_fl;
6002 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6003 return -TARGET_EFAULT;
6006 __get_user(l_type, &target_fl->l_type);
6007 l_type = target_to_host_flock(l_type);
6011 fl->l_type = l_type;
6012 __get_user(fl->l_whence, &target_fl->l_whence);
6013 __get_user(fl->l_start, &target_fl->l_start);
6014 __get_user(fl->l_len, &target_fl->l_len);
6015 __get_user(fl->l_pid, &target_fl->l_pid);
6016 unlock_user_struct(target_fl, target_flock_addr, 0);
6020 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
6021 const struct flock64 *fl)
6023 struct target_oabi_flock64 *target_fl;
6026 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6027 return -TARGET_EFAULT;
6030 l_type = host_to_target_flock(fl->l_type);
6031 __put_user(l_type, &target_fl->l_type);
6032 __put_user(fl->l_whence, &target_fl->l_whence);
6033 __put_user(fl->l_start, &target_fl->l_start);
6034 __put_user(fl->l_len, &target_fl->l_len);
6035 __put_user(fl->l_pid, &target_fl->l_pid);
6036 unlock_user_struct(target_fl, target_flock_addr, 1);
6041 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
6042 abi_ulong target_flock_addr)
6044 struct target_flock64 *target_fl;
6047 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6048 return -TARGET_EFAULT;
6051 __get_user(l_type, &target_fl->l_type);
6052 l_type = target_to_host_flock(l_type);
6056 fl->l_type = l_type;
6057 __get_user(fl->l_whence, &target_fl->l_whence);
6058 __get_user(fl->l_start, &target_fl->l_start);
6059 __get_user(fl->l_len, &target_fl->l_len);
6060 __get_user(fl->l_pid, &target_fl->l_pid);
6061 unlock_user_struct(target_fl, target_flock_addr, 0);
6065 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
6066 const struct flock64 *fl)
6068 struct target_flock64 *target_fl;
6071 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6072 return -TARGET_EFAULT;
6075 l_type = host_to_target_flock(fl->l_type);
6076 __put_user(l_type, &target_fl->l_type);
6077 __put_user(fl->l_whence, &target_fl->l_whence);
6078 __put_user(fl->l_start, &target_fl->l_start);
6079 __put_user(fl->l_len, &target_fl->l_len);
6080 __put_user(fl->l_pid, &target_fl->l_pid);
6081 unlock_user_struct(target_fl, target_flock_addr, 1);
6085 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
6087 struct flock64 fl64;
6089 struct f_owner_ex fox;
6090 struct target_f_owner_ex *target_fox;
6093 int host_cmd = target_to_host_fcntl_cmd(cmd);
6095 if (host_cmd == -TARGET_EINVAL)
6099 case TARGET_F_GETLK:
6100 ret = copy_from_user_flock(&fl64, arg);
6104 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6106 ret = copy_to_user_flock(arg, &fl64);
6110 case TARGET_F_SETLK:
6111 case TARGET_F_SETLKW:
6112 ret = copy_from_user_flock(&fl64, arg);
6116 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6119 case TARGET_F_GETLK64:
6120 ret = copy_from_user_flock64(&fl64, arg);
6124 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6126 ret = copy_to_user_flock64(arg, &fl64);
6129 case TARGET_F_SETLK64:
6130 case TARGET_F_SETLKW64:
6131 ret = copy_from_user_flock64(&fl64, arg);
6135 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6138 case TARGET_F_GETFL:
6139 ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6141 ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
6145 case TARGET_F_SETFL:
6146 ret = get_errno(safe_fcntl(fd, host_cmd,
6147 target_to_host_bitmask(arg,
6152 case TARGET_F_GETOWN_EX:
6153 ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6155 if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
6156 return -TARGET_EFAULT;
6157 target_fox->type = tswap32(fox.type);
6158 target_fox->pid = tswap32(fox.pid);
6159 unlock_user_struct(target_fox, arg, 1);
6165 case TARGET_F_SETOWN_EX:
6166 if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
6167 return -TARGET_EFAULT;
6168 fox.type = tswap32(target_fox->type);
6169 fox.pid = tswap32(target_fox->pid);
6170 unlock_user_struct(target_fox, arg, 0);
6171 ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6175 case TARGET_F_SETOWN:
6176 case TARGET_F_GETOWN:
6177 case TARGET_F_SETSIG:
6178 case TARGET_F_GETSIG:
6179 case TARGET_F_SETLEASE:
6180 case TARGET_F_GETLEASE:
6181 case TARGET_F_SETPIPE_SZ:
6182 case TARGET_F_GETPIPE_SZ:
6183 ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6187 ret = get_errno(safe_fcntl(fd, cmd, arg));
6195 static inline int high2lowuid(int uid)
6203 static inline int high2lowgid(int gid)
6211 static inline int low2highuid(int uid)
6213 if ((int16_t)uid == -1)
6219 static inline int low2highgid(int gid)
6221 if ((int16_t)gid == -1)
6226 static inline int tswapid(int id)
6231 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6233 #else /* !USE_UID16 */
6234 static inline int high2lowuid(int uid)
6238 static inline int high2lowgid(int gid)
6242 static inline int low2highuid(int uid)
6246 static inline int low2highgid(int gid)
6250 static inline int tswapid(int id)
6255 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6257 #endif /* USE_UID16 */
6259 /* We must do direct syscalls for setting UID/GID, because we want to
6260 * implement the Linux system call semantics of "change only for this thread",
6261 * not the libc/POSIX semantics of "change for all threads in process".
6262 * (See http://ewontfix.com/17/ for more details.)
6263 * We use the 32-bit version of the syscalls if present; if it is not
6264 * then either the host architecture supports 32-bit UIDs natively with
6265 * the standard syscall, or the 16-bit UID is the best we can do.
6267 #ifdef __NR_setuid32
6268 #define __NR_sys_setuid __NR_setuid32
6270 #define __NR_sys_setuid __NR_setuid
6272 #ifdef __NR_setgid32
6273 #define __NR_sys_setgid __NR_setgid32
6275 #define __NR_sys_setgid __NR_setgid
6277 #ifdef __NR_setresuid32
6278 #define __NR_sys_setresuid __NR_setresuid32
6280 #define __NR_sys_setresuid __NR_setresuid
6282 #ifdef __NR_setresgid32
6283 #define __NR_sys_setresgid __NR_setresgid32
6285 #define __NR_sys_setresgid __NR_setresgid
6288 _syscall1(int, sys_setuid, uid_t, uid)
6289 _syscall1(int, sys_setgid, gid_t, gid)
6290 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
6291 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
6293 void syscall_init(void)
6296 const argtype *arg_type;
6300 thunk_init(STRUCT_MAX);
6302 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6303 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6304 #include "syscall_types.h"
6306 #undef STRUCT_SPECIAL
6308 /* Build target_to_host_errno_table[] table from
6309 * host_to_target_errno_table[]. */
6310 for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
6311 target_to_host_errno_table[host_to_target_errno_table[i]] = i;
6314 /* we patch the ioctl size if necessary. We rely on the fact that
6315 no ioctl has all the bits at '1' in the size field */
6317 while (ie->target_cmd != 0) {
6318 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
6319 TARGET_IOC_SIZEMASK) {
6320 arg_type = ie->arg_type;
6321 if (arg_type[0] != TYPE_PTR) {
6322 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
6327 size = thunk_type_size(arg_type, 0);
6328 ie->target_cmd = (ie->target_cmd &
6329 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
6330 (size << TARGET_IOC_SIZESHIFT);
6333 /* automatic consistency check if same arch */
6334 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
6335 (defined(__x86_64__) && defined(TARGET_X86_64))
6336 if (unlikely(ie->target_cmd != ie->host_cmd)) {
6337 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
6338 ie->name, ie->target_cmd, ie->host_cmd);
6345 #if TARGET_ABI_BITS == 32
6346 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
6348 #ifdef TARGET_WORDS_BIGENDIAN
6349 return ((uint64_t)word0 << 32) | word1;
6351 return ((uint64_t)word1 << 32) | word0;
6354 #else /* TARGET_ABI_BITS == 32 */
6355 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
6359 #endif /* TARGET_ABI_BITS != 32 */
6361 #ifdef TARGET_NR_truncate64
6362 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
6367 if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
6371 return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
6375 #ifdef TARGET_NR_ftruncate64
6376 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
6381 if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
6385 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
6389 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_itspec,
6390 abi_ulong target_addr)
6392 struct target_itimerspec *target_itspec;
6394 if (!lock_user_struct(VERIFY_READ, target_itspec, target_addr, 1)) {
6395 return -TARGET_EFAULT;
6398 host_itspec->it_interval.tv_sec =
6399 tswapal(target_itspec->it_interval.tv_sec);
6400 host_itspec->it_interval.tv_nsec =
6401 tswapal(target_itspec->it_interval.tv_nsec);
6402 host_itspec->it_value.tv_sec = tswapal(target_itspec->it_value.tv_sec);
6403 host_itspec->it_value.tv_nsec = tswapal(target_itspec->it_value.tv_nsec);
6405 unlock_user_struct(target_itspec, target_addr, 1);
6409 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
6410 struct itimerspec *host_its)
6412 struct target_itimerspec *target_itspec;
6414 if (!lock_user_struct(VERIFY_WRITE, target_itspec, target_addr, 0)) {
6415 return -TARGET_EFAULT;
6418 target_itspec->it_interval.tv_sec = tswapal(host_its->it_interval.tv_sec);
6419 target_itspec->it_interval.tv_nsec = tswapal(host_its->it_interval.tv_nsec);
6421 target_itspec->it_value.tv_sec = tswapal(host_its->it_value.tv_sec);
6422 target_itspec->it_value.tv_nsec = tswapal(host_its->it_value.tv_nsec);
6424 unlock_user_struct(target_itspec, target_addr, 0);
6428 static inline abi_long target_to_host_timex(struct timex *host_tx,
6429 abi_long target_addr)
6431 struct target_timex *target_tx;
6433 if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
6434 return -TARGET_EFAULT;
6437 __get_user(host_tx->modes, &target_tx->modes);
6438 __get_user(host_tx->offset, &target_tx->offset);
6439 __get_user(host_tx->freq, &target_tx->freq);
6440 __get_user(host_tx->maxerror, &target_tx->maxerror);
6441 __get_user(host_tx->esterror, &target_tx->esterror);
6442 __get_user(host_tx->status, &target_tx->status);
6443 __get_user(host_tx->constant, &target_tx->constant);
6444 __get_user(host_tx->precision, &target_tx->precision);
6445 __get_user(host_tx->tolerance, &target_tx->tolerance);
6446 __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
6447 __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
6448 __get_user(host_tx->tick, &target_tx->tick);
6449 __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
6450 __get_user(host_tx->jitter, &target_tx->jitter);
6451 __get_user(host_tx->shift, &target_tx->shift);
6452 __get_user(host_tx->stabil, &target_tx->stabil);
6453 __get_user(host_tx->jitcnt, &target_tx->jitcnt);
6454 __get_user(host_tx->calcnt, &target_tx->calcnt);
6455 __get_user(host_tx->errcnt, &target_tx->errcnt);
6456 __get_user(host_tx->stbcnt, &target_tx->stbcnt);
6457 __get_user(host_tx->tai, &target_tx->tai);
6459 unlock_user_struct(target_tx, target_addr, 0);
6463 static inline abi_long host_to_target_timex(abi_long target_addr,
6464 struct timex *host_tx)
6466 struct target_timex *target_tx;
6468 if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
6469 return -TARGET_EFAULT;
6472 __put_user(host_tx->modes, &target_tx->modes);
6473 __put_user(host_tx->offset, &target_tx->offset);
6474 __put_user(host_tx->freq, &target_tx->freq);
6475 __put_user(host_tx->maxerror, &target_tx->maxerror);
6476 __put_user(host_tx->esterror, &target_tx->esterror);
6477 __put_user(host_tx->status, &target_tx->status);
6478 __put_user(host_tx->constant, &target_tx->constant);
6479 __put_user(host_tx->precision, &target_tx->precision);
6480 __put_user(host_tx->tolerance, &target_tx->tolerance);
6481 __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
6482 __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
6483 __put_user(host_tx->tick, &target_tx->tick);
6484 __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
6485 __put_user(host_tx->jitter, &target_tx->jitter);
6486 __put_user(host_tx->shift, &target_tx->shift);
6487 __put_user(host_tx->stabil, &target_tx->stabil);
6488 __put_user(host_tx->jitcnt, &target_tx->jitcnt);
6489 __put_user(host_tx->calcnt, &target_tx->calcnt);
6490 __put_user(host_tx->errcnt, &target_tx->errcnt);
6491 __put_user(host_tx->stbcnt, &target_tx->stbcnt);
6492 __put_user(host_tx->tai, &target_tx->tai);
6494 unlock_user_struct(target_tx, target_addr, 1);
6499 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
6500 abi_ulong target_addr)
6502 struct target_sigevent *target_sevp;
6504 if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
6505 return -TARGET_EFAULT;
6508 /* This union is awkward on 64 bit systems because it has a 32 bit
6509 * integer and a pointer in it; we follow the conversion approach
6510 * used for handling sigval types in signal.c so the guest should get
6511 * the correct value back even if we did a 64 bit byteswap and it's
6512 * using the 32 bit integer.
6514 host_sevp->sigev_value.sival_ptr =
6515 (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
6516 host_sevp->sigev_signo =
6517 target_to_host_signal(tswap32(target_sevp->sigev_signo));
6518 host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
6519 host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid);
6521 unlock_user_struct(target_sevp, target_addr, 1);
6525 #if defined(TARGET_NR_mlockall)
6526 static inline int target_to_host_mlockall_arg(int arg)
6530 if (arg & TARGET_MLOCKALL_MCL_CURRENT) {
6531 result |= MCL_CURRENT;
6533 if (arg & TARGET_MLOCKALL_MCL_FUTURE) {
6534 result |= MCL_FUTURE;
6540 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) || \
6541 defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) || \
6542 defined(TARGET_NR_newfstatat))
6543 static inline abi_long host_to_target_stat64(void *cpu_env,
6544 abi_ulong target_addr,
6545 struct stat *host_st)
6547 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
6548 if (((CPUARMState *)cpu_env)->eabi) {
6549 struct target_eabi_stat64 *target_st;
6551 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6552 return -TARGET_EFAULT;
6553 memset(target_st, 0, sizeof(struct target_eabi_stat64));
6554 __put_user(host_st->st_dev, &target_st->st_dev);
6555 __put_user(host_st->st_ino, &target_st->st_ino);
6556 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6557 __put_user(host_st->st_ino, &target_st->__st_ino);
6559 __put_user(host_st->st_mode, &target_st->st_mode);
6560 __put_user(host_st->st_nlink, &target_st->st_nlink);
6561 __put_user(host_st->st_uid, &target_st->st_uid);
6562 __put_user(host_st->st_gid, &target_st->st_gid);
6563 __put_user(host_st->st_rdev, &target_st->st_rdev);
6564 __put_user(host_st->st_size, &target_st->st_size);
6565 __put_user(host_st->st_blksize, &target_st->st_blksize);
6566 __put_user(host_st->st_blocks, &target_st->st_blocks);
6567 __put_user(host_st->st_atime, &target_st->target_st_atime);
6568 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6569 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6570 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
6571 __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
6572 __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
6573 __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
6575 unlock_user_struct(target_st, target_addr, 1);
6579 #if defined(TARGET_HAS_STRUCT_STAT64)
6580 struct target_stat64 *target_st;
6582 struct target_stat *target_st;
6585 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6586 return -TARGET_EFAULT;
6587 memset(target_st, 0, sizeof(*target_st));
6588 __put_user(host_st->st_dev, &target_st->st_dev);
6589 __put_user(host_st->st_ino, &target_st->st_ino);
6590 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6591 __put_user(host_st->st_ino, &target_st->__st_ino);
6593 __put_user(host_st->st_mode, &target_st->st_mode);
6594 __put_user(host_st->st_nlink, &target_st->st_nlink);
6595 __put_user(host_st->st_uid, &target_st->st_uid);
6596 __put_user(host_st->st_gid, &target_st->st_gid);
6597 __put_user(host_st->st_rdev, &target_st->st_rdev);
6598 /* XXX: better use of kernel struct */
6599 __put_user(host_st->st_size, &target_st->st_size);
6600 __put_user(host_st->st_blksize, &target_st->st_blksize);
6601 __put_user(host_st->st_blocks, &target_st->st_blocks);
6602 __put_user(host_st->st_atime, &target_st->target_st_atime);
6603 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6604 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6605 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
6606 __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
6607 __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
6608 __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
6610 unlock_user_struct(target_st, target_addr, 1);
6617 #if defined(TARGET_NR_statx) && defined(__NR_statx)
6618 static inline abi_long host_to_target_statx(struct target_statx *host_stx,
6619 abi_ulong target_addr)
6621 struct target_statx *target_stx;
6623 if (!lock_user_struct(VERIFY_WRITE, target_stx, target_addr, 0)) {
6624 return -TARGET_EFAULT;
6626 memset(target_stx, 0, sizeof(*target_stx));
6628 __put_user(host_stx->stx_mask, &target_stx->stx_mask);
6629 __put_user(host_stx->stx_blksize, &target_stx->stx_blksize);
6630 __put_user(host_stx->stx_attributes, &target_stx->stx_attributes);
6631 __put_user(host_stx->stx_nlink, &target_stx->stx_nlink);
6632 __put_user(host_stx->stx_uid, &target_stx->stx_uid);
6633 __put_user(host_stx->stx_gid, &target_stx->stx_gid);
6634 __put_user(host_stx->stx_mode, &target_stx->stx_mode);
6635 __put_user(host_stx->stx_ino, &target_stx->stx_ino);
6636 __put_user(host_stx->stx_size, &target_stx->stx_size);
6637 __put_user(host_stx->stx_blocks, &target_stx->stx_blocks);
6638 __put_user(host_stx->stx_attributes_mask, &target_stx->stx_attributes_mask);
6639 __put_user(host_stx->stx_atime.tv_sec, &target_stx->stx_atime.tv_sec);
6640 __put_user(host_stx->stx_atime.tv_nsec, &target_stx->stx_atime.tv_nsec);
6641 __put_user(host_stx->stx_btime.tv_sec, &target_stx->stx_atime.tv_sec);
6642 __put_user(host_stx->stx_btime.tv_nsec, &target_stx->stx_atime.tv_nsec);
6643 __put_user(host_stx->stx_ctime.tv_sec, &target_stx->stx_atime.tv_sec);
6644 __put_user(host_stx->stx_ctime.tv_nsec, &target_stx->stx_atime.tv_nsec);
6645 __put_user(host_stx->stx_mtime.tv_sec, &target_stx->stx_atime.tv_sec);
6646 __put_user(host_stx->stx_mtime.tv_nsec, &target_stx->stx_atime.tv_nsec);
6647 __put_user(host_stx->stx_rdev_major, &target_stx->stx_rdev_major);
6648 __put_user(host_stx->stx_rdev_minor, &target_stx->stx_rdev_minor);
6649 __put_user(host_stx->stx_dev_major, &target_stx->stx_dev_major);
6650 __put_user(host_stx->stx_dev_minor, &target_stx->stx_dev_minor);
6652 unlock_user_struct(target_stx, target_addr, 1);
6659 /* ??? Using host futex calls even when target atomic operations
6660 are not really atomic probably breaks things. However implementing
6661 futexes locally would make futexes shared between multiple processes
6662 tricky. However they're probably useless because guest atomic
6663 operations won't work either. */
6664 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
6665 target_ulong uaddr2, int val3)
6667 struct timespec ts, *pts;
6670 /* ??? We assume FUTEX_* constants are the same on both host
6672 #ifdef FUTEX_CMD_MASK
6673 base_op = op & FUTEX_CMD_MASK;
6679 case FUTEX_WAIT_BITSET:
6682 target_to_host_timespec(pts, timeout);
6686 return get_errno(safe_futex(g2h(uaddr), op, tswap32(val),
6689 return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
6691 return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
6693 case FUTEX_CMP_REQUEUE:
6695 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
6696 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
6697 But the prototype takes a `struct timespec *'; insert casts
6698 to satisfy the compiler. We do not need to tswap TIMEOUT
6699 since it's not compared to guest memory. */
6700 pts = (struct timespec *)(uintptr_t) timeout;
6701 return get_errno(safe_futex(g2h(uaddr), op, val, pts,
6703 (base_op == FUTEX_CMP_REQUEUE
6707 return -TARGET_ENOSYS;
6710 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6711 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
6712 abi_long handle, abi_long mount_id,
6715 struct file_handle *target_fh;
6716 struct file_handle *fh;
6720 unsigned int size, total_size;
6722 if (get_user_s32(size, handle)) {
6723 return -TARGET_EFAULT;
6726 name = lock_user_string(pathname);
6728 return -TARGET_EFAULT;
6731 total_size = sizeof(struct file_handle) + size;
6732 target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
6734 unlock_user(name, pathname, 0);
6735 return -TARGET_EFAULT;
6738 fh = g_malloc0(total_size);
6739 fh->handle_bytes = size;
6741 ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
6742 unlock_user(name, pathname, 0);
6744 /* man name_to_handle_at(2):
6745 * Other than the use of the handle_bytes field, the caller should treat
6746 * the file_handle structure as an opaque data type
6749 memcpy(target_fh, fh, total_size);
6750 target_fh->handle_bytes = tswap32(fh->handle_bytes);
6751 target_fh->handle_type = tswap32(fh->handle_type);
6753 unlock_user(target_fh, handle, total_size);
6755 if (put_user_s32(mid, mount_id)) {
6756 return -TARGET_EFAULT;
6764 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6765 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
6768 struct file_handle *target_fh;
6769 struct file_handle *fh;
6770 unsigned int size, total_size;
6773 if (get_user_s32(size, handle)) {
6774 return -TARGET_EFAULT;
6777 total_size = sizeof(struct file_handle) + size;
6778 target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
6780 return -TARGET_EFAULT;
6783 fh = g_memdup(target_fh, total_size);
6784 fh->handle_bytes = size;
6785 fh->handle_type = tswap32(target_fh->handle_type);
6787 ret = get_errno(open_by_handle_at(mount_fd, fh,
6788 target_to_host_bitmask(flags, fcntl_flags_tbl)));
6792 unlock_user(target_fh, handle, total_size);
6798 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
6800 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
6803 target_sigset_t *target_mask;
6807 if (flags & ~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)) {
6808 return -TARGET_EINVAL;
6810 if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
6811 return -TARGET_EFAULT;
6814 target_to_host_sigset(&host_mask, target_mask);
6816 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
6818 ret = get_errno(signalfd(fd, &host_mask, host_flags));
6820 fd_trans_register(ret, &target_signalfd_trans);
6823 unlock_user_struct(target_mask, mask, 0);
6829 /* Map host to target signal numbers for the wait family of syscalls.
6830 Assume all other status bits are the same. */
6831 int host_to_target_waitstatus(int status)
6833 if (WIFSIGNALED(status)) {
6834 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
6836 if (WIFSTOPPED(status)) {
6837 return (host_to_target_signal(WSTOPSIG(status)) << 8)
6843 static int open_self_cmdline(void *cpu_env, int fd)
6845 CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
6846 struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
6849 for (i = 0; i < bprm->argc; i++) {
6850 size_t len = strlen(bprm->argv[i]) + 1;
6852 if (write(fd, bprm->argv[i], len) != len) {
6860 static int open_self_maps(void *cpu_env, int fd)
6862 CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
6863 TaskState *ts = cpu->opaque;
6869 fp = fopen("/proc/self/maps", "r");
6874 while ((read = getline(&line, &len, fp)) != -1) {
6875 int fields, dev_maj, dev_min, inode;
6876 uint64_t min, max, offset;
6877 char flag_r, flag_w, flag_x, flag_p;
6878 char path[512] = "";
6879 fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d"
6880 " %512s", &min, &max, &flag_r, &flag_w, &flag_x,
6881 &flag_p, &offset, &dev_maj, &dev_min, &inode, path);
6883 if ((fields < 10) || (fields > 11)) {
6886 if (h2g_valid(min)) {
6887 int flags = page_get_flags(h2g(min));
6888 max = h2g_valid(max - 1) ? max : (uintptr_t)g2h(GUEST_ADDR_MAX) + 1;
6889 if (page_check_range(h2g(min), max - min, flags) == -1) {
6892 if (h2g(min) == ts->info->stack_limit) {
6893 pstrcpy(path, sizeof(path), " [stack]");
6895 dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
6896 " %c%c%c%c %08" PRIx64 " %02x:%02x %d %s%s\n",
6897 h2g(min), h2g(max - 1) + 1, flag_r, flag_w,
6898 flag_x, flag_p, offset, dev_maj, dev_min, inode,
6899 path[0] ? " " : "", path);
6909 static int open_self_stat(void *cpu_env, int fd)
6911 CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
6912 TaskState *ts = cpu->opaque;
6913 abi_ulong start_stack = ts->info->start_stack;
6916 for (i = 0; i < 44; i++) {
6924 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
6925 } else if (i == 1) {
6927 snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]);
6928 } else if (i == 27) {
6931 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
6933 /* for the rest, there is MasterCard */
6934 snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' ');
6938 if (write(fd, buf, len) != len) {
6946 static int open_self_auxv(void *cpu_env, int fd)
6948 CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
6949 TaskState *ts = cpu->opaque;
6950 abi_ulong auxv = ts->info->saved_auxv;
6951 abi_ulong len = ts->info->auxv_len;
6955 * Auxiliary vector is stored in target process stack.
6956 * read in whole auxv vector and copy it to file
6958 ptr = lock_user(VERIFY_READ, auxv, len, 0);
6962 r = write(fd, ptr, len);
6969 lseek(fd, 0, SEEK_SET);
6970 unlock_user(ptr, auxv, len);
6976 static int is_proc_myself(const char *filename, const char *entry)
6978 if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
6979 filename += strlen("/proc/");
6980 if (!strncmp(filename, "self/", strlen("self/"))) {
6981 filename += strlen("self/");
6982 } else if (*filename >= '1' && *filename <= '9') {
6984 snprintf(myself, sizeof(myself), "%d/", getpid());
6985 if (!strncmp(filename, myself, strlen(myself))) {
6986 filename += strlen(myself);
6993 if (!strcmp(filename, entry)) {
7000 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) || \
7001 defined(TARGET_SPARC) || defined(TARGET_M68K)
7002 static int is_proc(const char *filename, const char *entry)
7004 return strcmp(filename, entry) == 0;
7008 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7009 static int open_net_route(void *cpu_env, int fd)
7016 fp = fopen("/proc/net/route", "r");
7023 read = getline(&line, &len, fp);
7024 dprintf(fd, "%s", line);
7028 while ((read = getline(&line, &len, fp)) != -1) {
7030 uint32_t dest, gw, mask;
7031 unsigned int flags, refcnt, use, metric, mtu, window, irtt;
7034 fields = sscanf(line,
7035 "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7036 iface, &dest, &gw, &flags, &refcnt, &use, &metric,
7037 &mask, &mtu, &window, &irtt);
7041 dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7042 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
7043 metric, tswap32(mask), mtu, window, irtt);
7053 #if defined(TARGET_SPARC)
7054 static int open_cpuinfo(void *cpu_env, int fd)
7056 dprintf(fd, "type\t\t: sun4u\n");
7061 #if defined(TARGET_M68K)
7062 static int open_hardware(void *cpu_env, int fd)
7064 dprintf(fd, "Model:\t\tqemu-m68k\n");
7069 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
7072 const char *filename;
7073 int (*fill)(void *cpu_env, int fd);
7074 int (*cmp)(const char *s1, const char *s2);
7076 const struct fake_open *fake_open;
7077 static const struct fake_open fakes[] = {
7078 { "maps", open_self_maps, is_proc_myself },
7079 { "stat", open_self_stat, is_proc_myself },
7080 { "auxv", open_self_auxv, is_proc_myself },
7081 { "cmdline", open_self_cmdline, is_proc_myself },
7082 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7083 { "/proc/net/route", open_net_route, is_proc },
7085 #if defined(TARGET_SPARC)
7086 { "/proc/cpuinfo", open_cpuinfo, is_proc },
7088 #if defined(TARGET_M68K)
7089 { "/proc/hardware", open_hardware, is_proc },
7091 { NULL, NULL, NULL }
7094 if (is_proc_myself(pathname, "exe")) {
7095 int execfd = qemu_getauxval(AT_EXECFD);
7096 return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
7099 for (fake_open = fakes; fake_open->filename; fake_open++) {
7100 if (fake_open->cmp(pathname, fake_open->filename)) {
7105 if (fake_open->filename) {
7107 char filename[PATH_MAX];
7110 /* create temporary file to map stat to */
7111 tmpdir = getenv("TMPDIR");
7114 snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
7115 fd = mkstemp(filename);
7121 if ((r = fake_open->fill(cpu_env, fd))) {
7127 lseek(fd, 0, SEEK_SET);
7132 return safe_openat(dirfd, path(pathname), flags, mode);
7135 #define TIMER_MAGIC 0x0caf0000
7136 #define TIMER_MAGIC_MASK 0xffff0000
7138 /* Convert QEMU provided timer ID back to internal 16bit index format */
7139 static target_timer_t get_timer_id(abi_long arg)
7141 target_timer_t timerid = arg;
7143 if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
7144 return -TARGET_EINVAL;
7149 if (timerid >= ARRAY_SIZE(g_posix_timers)) {
7150 return -TARGET_EINVAL;
7156 static int target_to_host_cpu_mask(unsigned long *host_mask,
7158 abi_ulong target_addr,
7161 unsigned target_bits = sizeof(abi_ulong) * 8;
7162 unsigned host_bits = sizeof(*host_mask) * 8;
7163 abi_ulong *target_mask;
7166 assert(host_size >= target_size);
7168 target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
7170 return -TARGET_EFAULT;
7172 memset(host_mask, 0, host_size);
7174 for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
7175 unsigned bit = i * target_bits;
7178 __get_user(val, &target_mask[i]);
7179 for (j = 0; j < target_bits; j++, bit++) {
7180 if (val & (1UL << j)) {
7181 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
7186 unlock_user(target_mask, target_addr, 0);
7190 static int host_to_target_cpu_mask(const unsigned long *host_mask,
7192 abi_ulong target_addr,
7195 unsigned target_bits = sizeof(abi_ulong) * 8;
7196 unsigned host_bits = sizeof(*host_mask) * 8;
7197 abi_ulong *target_mask;
7200 assert(host_size >= target_size);
7202 target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
7204 return -TARGET_EFAULT;
7207 for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
7208 unsigned bit = i * target_bits;
7211 for (j = 0; j < target_bits; j++, bit++) {
7212 if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
7216 __put_user(val, &target_mask[i]);
7219 unlock_user(target_mask, target_addr, target_size);
7223 /* This is an internal helper for do_syscall so that it is easier
7224 * to have a single return point, so that actions, such as logging
7225 * of syscall results, can be performed.
7226 * All errnos that do_syscall() returns must be -TARGET_<errcode>.
7228 static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
7229 abi_long arg2, abi_long arg3, abi_long arg4,
7230 abi_long arg5, abi_long arg6, abi_long arg7,
7233 CPUState *cpu = env_cpu(cpu_env);
7235 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
7236 || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
7237 || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
7238 || defined(TARGET_NR_statx)
7241 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
7242 || defined(TARGET_NR_fstatfs)
7248 case TARGET_NR_exit:
7249 /* In old applications this may be used to implement _exit(2).
7250 However in threaded applictions it is used for thread termination,
7251 and _exit_group is used for application termination.
7252 Do thread termination if we have more then one thread. */
7254 if (block_signals()) {
7255 return -TARGET_ERESTARTSYS;
7260 if (CPU_NEXT(first_cpu)) {
7263 /* Remove the CPU from the list. */
7264 QTAILQ_REMOVE_RCU(&cpus, cpu, node);
7269 if (ts->child_tidptr) {
7270 put_user_u32(0, ts->child_tidptr);
7271 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
7275 object_unref(OBJECT(cpu));
7277 rcu_unregister_thread();
7282 preexit_cleanup(cpu_env, arg1);
7284 return 0; /* avoid warning */
7285 case TARGET_NR_read:
7286 if (arg2 == 0 && arg3 == 0) {
7287 return get_errno(safe_read(arg1, 0, 0));
7289 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
7290 return -TARGET_EFAULT;
7291 ret = get_errno(safe_read(arg1, p, arg3));
7293 fd_trans_host_to_target_data(arg1)) {
7294 ret = fd_trans_host_to_target_data(arg1)(p, ret);
7296 unlock_user(p, arg2, ret);
7299 case TARGET_NR_write:
7300 if (arg2 == 0 && arg3 == 0) {
7301 return get_errno(safe_write(arg1, 0, 0));
7303 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
7304 return -TARGET_EFAULT;
7305 if (fd_trans_target_to_host_data(arg1)) {
7306 void *copy = g_malloc(arg3);
7307 memcpy(copy, p, arg3);
7308 ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
7310 ret = get_errno(safe_write(arg1, copy, ret));
7314 ret = get_errno(safe_write(arg1, p, arg3));
7316 unlock_user(p, arg2, 0);
7319 #ifdef TARGET_NR_open
7320 case TARGET_NR_open:
7321 if (!(p = lock_user_string(arg1)))
7322 return -TARGET_EFAULT;
7323 ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
7324 target_to_host_bitmask(arg2, fcntl_flags_tbl),
7326 fd_trans_unregister(ret);
7327 unlock_user(p, arg1, 0);
7330 case TARGET_NR_openat:
7331 if (!(p = lock_user_string(arg2)))
7332 return -TARGET_EFAULT;
7333 ret = get_errno(do_openat(cpu_env, arg1, p,
7334 target_to_host_bitmask(arg3, fcntl_flags_tbl),
7336 fd_trans_unregister(ret);
7337 unlock_user(p, arg2, 0);
7339 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7340 case TARGET_NR_name_to_handle_at:
7341 ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
7344 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7345 case TARGET_NR_open_by_handle_at:
7346 ret = do_open_by_handle_at(arg1, arg2, arg3);
7347 fd_trans_unregister(ret);
7350 case TARGET_NR_close:
7351 fd_trans_unregister(arg1);
7352 return get_errno(close(arg1));
7355 return do_brk(arg1);
7356 #ifdef TARGET_NR_fork
7357 case TARGET_NR_fork:
7358 return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
7360 #ifdef TARGET_NR_waitpid
7361 case TARGET_NR_waitpid:
7364 ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
7365 if (!is_error(ret) && arg2 && ret
7366 && put_user_s32(host_to_target_waitstatus(status), arg2))
7367 return -TARGET_EFAULT;
7371 #ifdef TARGET_NR_waitid
7372 case TARGET_NR_waitid:
7376 ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
7377 if (!is_error(ret) && arg3 && info.si_pid != 0) {
7378 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
7379 return -TARGET_EFAULT;
7380 host_to_target_siginfo(p, &info);
7381 unlock_user(p, arg3, sizeof(target_siginfo_t));
7386 #ifdef TARGET_NR_creat /* not on alpha */
7387 case TARGET_NR_creat:
7388 if (!(p = lock_user_string(arg1)))
7389 return -TARGET_EFAULT;
7390 ret = get_errno(creat(p, arg2));
7391 fd_trans_unregister(ret);
7392 unlock_user(p, arg1, 0);
7395 #ifdef TARGET_NR_link
7396 case TARGET_NR_link:
7399 p = lock_user_string(arg1);
7400 p2 = lock_user_string(arg2);
7402 ret = -TARGET_EFAULT;
7404 ret = get_errno(link(p, p2));
7405 unlock_user(p2, arg2, 0);
7406 unlock_user(p, arg1, 0);
7410 #if defined(TARGET_NR_linkat)
7411 case TARGET_NR_linkat:
7415 return -TARGET_EFAULT;
7416 p = lock_user_string(arg2);
7417 p2 = lock_user_string(arg4);
7419 ret = -TARGET_EFAULT;
7421 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
7422 unlock_user(p, arg2, 0);
7423 unlock_user(p2, arg4, 0);
7427 #ifdef TARGET_NR_unlink
7428 case TARGET_NR_unlink:
7429 if (!(p = lock_user_string(arg1)))
7430 return -TARGET_EFAULT;
7431 ret = get_errno(unlink(p));
7432 unlock_user(p, arg1, 0);
7435 #if defined(TARGET_NR_unlinkat)
7436 case TARGET_NR_unlinkat:
7437 if (!(p = lock_user_string(arg2)))
7438 return -TARGET_EFAULT;
7439 ret = get_errno(unlinkat(arg1, p, arg3));
7440 unlock_user(p, arg2, 0);
7443 case TARGET_NR_execve:
7445 char **argp, **envp;
7448 abi_ulong guest_argp;
7449 abi_ulong guest_envp;
7456 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
7457 if (get_user_ual(addr, gp))
7458 return -TARGET_EFAULT;
7465 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
7466 if (get_user_ual(addr, gp))
7467 return -TARGET_EFAULT;
7473 argp = g_new0(char *, argc + 1);
7474 envp = g_new0(char *, envc + 1);
7476 for (gp = guest_argp, q = argp; gp;
7477 gp += sizeof(abi_ulong), q++) {
7478 if (get_user_ual(addr, gp))
7482 if (!(*q = lock_user_string(addr)))
7484 total_size += strlen(*q) + 1;
7488 for (gp = guest_envp, q = envp; gp;
7489 gp += sizeof(abi_ulong), q++) {
7490 if (get_user_ual(addr, gp))
7494 if (!(*q = lock_user_string(addr)))
7496 total_size += strlen(*q) + 1;
7500 if (!(p = lock_user_string(arg1)))
7502 /* Although execve() is not an interruptible syscall it is
7503 * a special case where we must use the safe_syscall wrapper:
7504 * if we allow a signal to happen before we make the host
7505 * syscall then we will 'lose' it, because at the point of
7506 * execve the process leaves QEMU's control. So we use the
7507 * safe syscall wrapper to ensure that we either take the
7508 * signal as a guest signal, or else it does not happen
7509 * before the execve completes and makes it the other
7510 * program's problem.
7512 ret = get_errno(safe_execve(p, argp, envp));
7513 unlock_user(p, arg1, 0);
7518 ret = -TARGET_EFAULT;
7521 for (gp = guest_argp, q = argp; *q;
7522 gp += sizeof(abi_ulong), q++) {
7523 if (get_user_ual(addr, gp)
7526 unlock_user(*q, addr, 0);
7528 for (gp = guest_envp, q = envp; *q;
7529 gp += sizeof(abi_ulong), q++) {
7530 if (get_user_ual(addr, gp)
7533 unlock_user(*q, addr, 0);
7540 case TARGET_NR_chdir:
7541 if (!(p = lock_user_string(arg1)))
7542 return -TARGET_EFAULT;
7543 ret = get_errno(chdir(p));
7544 unlock_user(p, arg1, 0);
7546 #ifdef TARGET_NR_time
7547 case TARGET_NR_time:
7550 ret = get_errno(time(&host_time));
7553 && put_user_sal(host_time, arg1))
7554 return -TARGET_EFAULT;
7558 #ifdef TARGET_NR_mknod
7559 case TARGET_NR_mknod:
7560 if (!(p = lock_user_string(arg1)))
7561 return -TARGET_EFAULT;
7562 ret = get_errno(mknod(p, arg2, arg3));
7563 unlock_user(p, arg1, 0);
7566 #if defined(TARGET_NR_mknodat)
7567 case TARGET_NR_mknodat:
7568 if (!(p = lock_user_string(arg2)))
7569 return -TARGET_EFAULT;
7570 ret = get_errno(mknodat(arg1, p, arg3, arg4));
7571 unlock_user(p, arg2, 0);
7574 #ifdef TARGET_NR_chmod
7575 case TARGET_NR_chmod:
7576 if (!(p = lock_user_string(arg1)))
7577 return -TARGET_EFAULT;
7578 ret = get_errno(chmod(p, arg2));
7579 unlock_user(p, arg1, 0);
7582 #ifdef TARGET_NR_lseek
7583 case TARGET_NR_lseek:
7584 return get_errno(lseek(arg1, arg2, arg3));
7586 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
7587 /* Alpha specific */
7588 case TARGET_NR_getxpid:
7589 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
7590 return get_errno(getpid());
7592 #ifdef TARGET_NR_getpid
7593 case TARGET_NR_getpid:
7594 return get_errno(getpid());
7596 case TARGET_NR_mount:
7598 /* need to look at the data field */
7602 p = lock_user_string(arg1);
7604 return -TARGET_EFAULT;
7610 p2 = lock_user_string(arg2);
7613 unlock_user(p, arg1, 0);
7615 return -TARGET_EFAULT;
7619 p3 = lock_user_string(arg3);
7622 unlock_user(p, arg1, 0);
7624 unlock_user(p2, arg2, 0);
7625 return -TARGET_EFAULT;
7631 /* FIXME - arg5 should be locked, but it isn't clear how to
7632 * do that since it's not guaranteed to be a NULL-terminated
7636 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
7638 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5));
7640 ret = get_errno(ret);
7643 unlock_user(p, arg1, 0);
7645 unlock_user(p2, arg2, 0);
7647 unlock_user(p3, arg3, 0);
7651 #ifdef TARGET_NR_umount
7652 case TARGET_NR_umount:
7653 if (!(p = lock_user_string(arg1)))
7654 return -TARGET_EFAULT;
7655 ret = get_errno(umount(p));
7656 unlock_user(p, arg1, 0);
7659 #ifdef TARGET_NR_stime /* not on alpha */
7660 case TARGET_NR_stime:
7663 if (get_user_sal(host_time, arg1))
7664 return -TARGET_EFAULT;
7665 return get_errno(stime(&host_time));
7668 #ifdef TARGET_NR_alarm /* not on alpha */
7669 case TARGET_NR_alarm:
7672 #ifdef TARGET_NR_pause /* not on alpha */
7673 case TARGET_NR_pause:
7674 if (!block_signals()) {
7675 sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
7677 return -TARGET_EINTR;
7679 #ifdef TARGET_NR_utime
7680 case TARGET_NR_utime:
7682 struct utimbuf tbuf, *host_tbuf;
7683 struct target_utimbuf *target_tbuf;
7685 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
7686 return -TARGET_EFAULT;
7687 tbuf.actime = tswapal(target_tbuf->actime);
7688 tbuf.modtime = tswapal(target_tbuf->modtime);
7689 unlock_user_struct(target_tbuf, arg2, 0);
7694 if (!(p = lock_user_string(arg1)))
7695 return -TARGET_EFAULT;
7696 ret = get_errno(utime(p, host_tbuf));
7697 unlock_user(p, arg1, 0);
7701 #ifdef TARGET_NR_utimes
7702 case TARGET_NR_utimes:
7704 struct timeval *tvp, tv[2];
7706 if (copy_from_user_timeval(&tv[0], arg2)
7707 || copy_from_user_timeval(&tv[1],
7708 arg2 + sizeof(struct target_timeval)))
7709 return -TARGET_EFAULT;
7714 if (!(p = lock_user_string(arg1)))
7715 return -TARGET_EFAULT;
7716 ret = get_errno(utimes(p, tvp));
7717 unlock_user(p, arg1, 0);
7721 #if defined(TARGET_NR_futimesat)
7722 case TARGET_NR_futimesat:
7724 struct timeval *tvp, tv[2];
7726 if (copy_from_user_timeval(&tv[0], arg3)
7727 || copy_from_user_timeval(&tv[1],
7728 arg3 + sizeof(struct target_timeval)))
7729 return -TARGET_EFAULT;
7734 if (!(p = lock_user_string(arg2))) {
7735 return -TARGET_EFAULT;
7737 ret = get_errno(futimesat(arg1, path(p), tvp));
7738 unlock_user(p, arg2, 0);
7742 #ifdef TARGET_NR_access
7743 case TARGET_NR_access:
7744 if (!(p = lock_user_string(arg1))) {
7745 return -TARGET_EFAULT;
7747 ret = get_errno(access(path(p), arg2));
7748 unlock_user(p, arg1, 0);
7751 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
7752 case TARGET_NR_faccessat:
7753 if (!(p = lock_user_string(arg2))) {
7754 return -TARGET_EFAULT;
7756 ret = get_errno(faccessat(arg1, p, arg3, 0));
7757 unlock_user(p, arg2, 0);
7760 #ifdef TARGET_NR_nice /* not on alpha */
7761 case TARGET_NR_nice:
7762 return get_errno(nice(arg1));
7764 case TARGET_NR_sync:
7767 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
7768 case TARGET_NR_syncfs:
7769 return get_errno(syncfs(arg1));
7771 case TARGET_NR_kill:
7772 return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
7773 #ifdef TARGET_NR_rename
7774 case TARGET_NR_rename:
7777 p = lock_user_string(arg1);
7778 p2 = lock_user_string(arg2);
7780 ret = -TARGET_EFAULT;
7782 ret = get_errno(rename(p, p2));
7783 unlock_user(p2, arg2, 0);
7784 unlock_user(p, arg1, 0);
7788 #if defined(TARGET_NR_renameat)
7789 case TARGET_NR_renameat:
7792 p = lock_user_string(arg2);
7793 p2 = lock_user_string(arg4);
7795 ret = -TARGET_EFAULT;
7797 ret = get_errno(renameat(arg1, p, arg3, p2));
7798 unlock_user(p2, arg4, 0);
7799 unlock_user(p, arg2, 0);
7803 #if defined(TARGET_NR_renameat2)
7804 case TARGET_NR_renameat2:
7807 p = lock_user_string(arg2);
7808 p2 = lock_user_string(arg4);
7810 ret = -TARGET_EFAULT;
7812 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
7814 unlock_user(p2, arg4, 0);
7815 unlock_user(p, arg2, 0);
7819 #ifdef TARGET_NR_mkdir
7820 case TARGET_NR_mkdir:
7821 if (!(p = lock_user_string(arg1)))
7822 return -TARGET_EFAULT;
7823 ret = get_errno(mkdir(p, arg2));
7824 unlock_user(p, arg1, 0);
7827 #if defined(TARGET_NR_mkdirat)
7828 case TARGET_NR_mkdirat:
7829 if (!(p = lock_user_string(arg2)))
7830 return -TARGET_EFAULT;
7831 ret = get_errno(mkdirat(arg1, p, arg3));
7832 unlock_user(p, arg2, 0);
7835 #ifdef TARGET_NR_rmdir
7836 case TARGET_NR_rmdir:
7837 if (!(p = lock_user_string(arg1)))
7838 return -TARGET_EFAULT;
7839 ret = get_errno(rmdir(p));
7840 unlock_user(p, arg1, 0);
7844 ret = get_errno(dup(arg1));
7846 fd_trans_dup(arg1, ret);
7849 #ifdef TARGET_NR_pipe
7850 case TARGET_NR_pipe:
7851 return do_pipe(cpu_env, arg1, 0, 0);
7853 #ifdef TARGET_NR_pipe2
7854 case TARGET_NR_pipe2:
7855 return do_pipe(cpu_env, arg1,
7856 target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
7858 case TARGET_NR_times:
7860 struct target_tms *tmsp;
7862 ret = get_errno(times(&tms));
7864 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
7866 return -TARGET_EFAULT;
7867 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
7868 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
7869 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
7870 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
7873 ret = host_to_target_clock_t(ret);
7876 case TARGET_NR_acct:
7878 ret = get_errno(acct(NULL));
7880 if (!(p = lock_user_string(arg1))) {
7881 return -TARGET_EFAULT;
7883 ret = get_errno(acct(path(p)));
7884 unlock_user(p, arg1, 0);
7887 #ifdef TARGET_NR_umount2
7888 case TARGET_NR_umount2:
7889 if (!(p = lock_user_string(arg1)))
7890 return -TARGET_EFAULT;
7891 ret = get_errno(umount2(p, arg2));
7892 unlock_user(p, arg1, 0);
7895 case TARGET_NR_ioctl:
7896 return do_ioctl(arg1, arg2, arg3);
7897 #ifdef TARGET_NR_fcntl
7898 case TARGET_NR_fcntl:
7899 return do_fcntl(arg1, arg2, arg3);
7901 case TARGET_NR_setpgid:
7902 return get_errno(setpgid(arg1, arg2));
7903 case TARGET_NR_umask:
7904 return get_errno(umask(arg1));
7905 case TARGET_NR_chroot:
7906 if (!(p = lock_user_string(arg1)))
7907 return -TARGET_EFAULT;
7908 ret = get_errno(chroot(p));
7909 unlock_user(p, arg1, 0);
7911 #ifdef TARGET_NR_dup2
7912 case TARGET_NR_dup2:
7913 ret = get_errno(dup2(arg1, arg2));
7915 fd_trans_dup(arg1, arg2);
7919 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
7920 case TARGET_NR_dup3:
7924 if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
7927 host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
7928 ret = get_errno(dup3(arg1, arg2, host_flags));
7930 fd_trans_dup(arg1, arg2);
7935 #ifdef TARGET_NR_getppid /* not on alpha */
7936 case TARGET_NR_getppid:
7937 return get_errno(getppid());
7939 #ifdef TARGET_NR_getpgrp
7940 case TARGET_NR_getpgrp:
7941 return get_errno(getpgrp());
7943 case TARGET_NR_setsid:
7944 return get_errno(setsid());
7945 #ifdef TARGET_NR_sigaction
7946 case TARGET_NR_sigaction:
7948 #if defined(TARGET_ALPHA)
7949 struct target_sigaction act, oact, *pact = 0;
7950 struct target_old_sigaction *old_act;
7952 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
7953 return -TARGET_EFAULT;
7954 act._sa_handler = old_act->_sa_handler;
7955 target_siginitset(&act.sa_mask, old_act->sa_mask);
7956 act.sa_flags = old_act->sa_flags;
7957 act.sa_restorer = 0;
7958 unlock_user_struct(old_act, arg2, 0);
7961 ret = get_errno(do_sigaction(arg1, pact, &oact));
7962 if (!is_error(ret) && arg3) {
7963 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
7964 return -TARGET_EFAULT;
7965 old_act->_sa_handler = oact._sa_handler;
7966 old_act->sa_mask = oact.sa_mask.sig[0];
7967 old_act->sa_flags = oact.sa_flags;
7968 unlock_user_struct(old_act, arg3, 1);
7970 #elif defined(TARGET_MIPS)
7971 struct target_sigaction act, oact, *pact, *old_act;
7974 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
7975 return -TARGET_EFAULT;
7976 act._sa_handler = old_act->_sa_handler;
7977 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
7978 act.sa_flags = old_act->sa_flags;
7979 unlock_user_struct(old_act, arg2, 0);
7985 ret = get_errno(do_sigaction(arg1, pact, &oact));
7987 if (!is_error(ret) && arg3) {
7988 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
7989 return -TARGET_EFAULT;
7990 old_act->_sa_handler = oact._sa_handler;
7991 old_act->sa_flags = oact.sa_flags;
7992 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
7993 old_act->sa_mask.sig[1] = 0;
7994 old_act->sa_mask.sig[2] = 0;
7995 old_act->sa_mask.sig[3] = 0;
7996 unlock_user_struct(old_act, arg3, 1);
7999 struct target_old_sigaction *old_act;
8000 struct target_sigaction act, oact, *pact;
8002 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8003 return -TARGET_EFAULT;
8004 act._sa_handler = old_act->_sa_handler;
8005 target_siginitset(&act.sa_mask, old_act->sa_mask);
8006 act.sa_flags = old_act->sa_flags;
8007 act.sa_restorer = old_act->sa_restorer;
8008 #ifdef TARGET_ARCH_HAS_KA_RESTORER
8009 act.ka_restorer = 0;
8011 unlock_user_struct(old_act, arg2, 0);
8016 ret = get_errno(do_sigaction(arg1, pact, &oact));
8017 if (!is_error(ret) && arg3) {
8018 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8019 return -TARGET_EFAULT;
8020 old_act->_sa_handler = oact._sa_handler;
8021 old_act->sa_mask = oact.sa_mask.sig[0];
8022 old_act->sa_flags = oact.sa_flags;
8023 old_act->sa_restorer = oact.sa_restorer;
8024 unlock_user_struct(old_act, arg3, 1);
8030 case TARGET_NR_rt_sigaction:
8032 #if defined(TARGET_ALPHA)
8033 /* For Alpha and SPARC this is a 5 argument syscall, with
8034 * a 'restorer' parameter which must be copied into the
8035 * sa_restorer field of the sigaction struct.
8036 * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
8037 * and arg5 is the sigsetsize.
8038 * Alpha also has a separate rt_sigaction struct that it uses
8039 * here; SPARC uses the usual sigaction struct.
8041 struct target_rt_sigaction *rt_act;
8042 struct target_sigaction act, oact, *pact = 0;
8044 if (arg4 != sizeof(target_sigset_t)) {
8045 return -TARGET_EINVAL;
8048 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
8049 return -TARGET_EFAULT;
8050 act._sa_handler = rt_act->_sa_handler;
8051 act.sa_mask = rt_act->sa_mask;
8052 act.sa_flags = rt_act->sa_flags;
8053 act.sa_restorer = arg5;
8054 unlock_user_struct(rt_act, arg2, 0);
8057 ret = get_errno(do_sigaction(arg1, pact, &oact));
8058 if (!is_error(ret) && arg3) {
8059 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
8060 return -TARGET_EFAULT;
8061 rt_act->_sa_handler = oact._sa_handler;
8062 rt_act->sa_mask = oact.sa_mask;
8063 rt_act->sa_flags = oact.sa_flags;
8064 unlock_user_struct(rt_act, arg3, 1);
8068 target_ulong restorer = arg4;
8069 target_ulong sigsetsize = arg5;
8071 target_ulong sigsetsize = arg4;
8073 struct target_sigaction *act;
8074 struct target_sigaction *oact;
8076 if (sigsetsize != sizeof(target_sigset_t)) {
8077 return -TARGET_EINVAL;
8080 if (!lock_user_struct(VERIFY_READ, act, arg2, 1)) {
8081 return -TARGET_EFAULT;
8083 #ifdef TARGET_ARCH_HAS_KA_RESTORER
8084 act->ka_restorer = restorer;
8090 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
8091 ret = -TARGET_EFAULT;
8092 goto rt_sigaction_fail;
8096 ret = get_errno(do_sigaction(arg1, act, oact));
8099 unlock_user_struct(act, arg2, 0);
8101 unlock_user_struct(oact, arg3, 1);
8105 #ifdef TARGET_NR_sgetmask /* not on alpha */
8106 case TARGET_NR_sgetmask:
8109 abi_ulong target_set;
8110 ret = do_sigprocmask(0, NULL, &cur_set);
8112 host_to_target_old_sigset(&target_set, &cur_set);
8118 #ifdef TARGET_NR_ssetmask /* not on alpha */
8119 case TARGET_NR_ssetmask:
8122 abi_ulong target_set = arg1;
8123 target_to_host_old_sigset(&set, &target_set);
8124 ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
8126 host_to_target_old_sigset(&target_set, &oset);
8132 #ifdef TARGET_NR_sigprocmask
8133 case TARGET_NR_sigprocmask:
8135 #if defined(TARGET_ALPHA)
8136 sigset_t set, oldset;
8141 case TARGET_SIG_BLOCK:
8144 case TARGET_SIG_UNBLOCK:
8147 case TARGET_SIG_SETMASK:
8151 return -TARGET_EINVAL;
8154 target_to_host_old_sigset(&set, &mask);
8156 ret = do_sigprocmask(how, &set, &oldset);
8157 if (!is_error(ret)) {
8158 host_to_target_old_sigset(&mask, &oldset);
8160 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
8163 sigset_t set, oldset, *set_ptr;
8168 case TARGET_SIG_BLOCK:
8171 case TARGET_SIG_UNBLOCK:
8174 case TARGET_SIG_SETMASK:
8178 return -TARGET_EINVAL;
8180 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
8181 return -TARGET_EFAULT;
8182 target_to_host_old_sigset(&set, p);
8183 unlock_user(p, arg2, 0);
8189 ret = do_sigprocmask(how, set_ptr, &oldset);
8190 if (!is_error(ret) && arg3) {
8191 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
8192 return -TARGET_EFAULT;
8193 host_to_target_old_sigset(p, &oldset);
8194 unlock_user(p, arg3, sizeof(target_sigset_t));
8200 case TARGET_NR_rt_sigprocmask:
8203 sigset_t set, oldset, *set_ptr;
8205 if (arg4 != sizeof(target_sigset_t)) {
8206 return -TARGET_EINVAL;
8211 case TARGET_SIG_BLOCK:
8214 case TARGET_SIG_UNBLOCK:
8217 case TARGET_SIG_SETMASK:
8221 return -TARGET_EINVAL;
8223 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
8224 return -TARGET_EFAULT;
8225 target_to_host_sigset(&set, p);
8226 unlock_user(p, arg2, 0);
8232 ret = do_sigprocmask(how, set_ptr, &oldset);
8233 if (!is_error(ret) && arg3) {
8234 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
8235 return -TARGET_EFAULT;
8236 host_to_target_sigset(p, &oldset);
8237 unlock_user(p, arg3, sizeof(target_sigset_t));
8241 #ifdef TARGET_NR_sigpending
8242 case TARGET_NR_sigpending:
8245 ret = get_errno(sigpending(&set));
8246 if (!is_error(ret)) {
8247 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8248 return -TARGET_EFAULT;
8249 host_to_target_old_sigset(p, &set);
8250 unlock_user(p, arg1, sizeof(target_sigset_t));
8255 case TARGET_NR_rt_sigpending:
8259 /* Yes, this check is >, not != like most. We follow the kernel's
8260 * logic and it does it like this because it implements
8261 * NR_sigpending through the same code path, and in that case
8262 * the old_sigset_t is smaller in size.
8264 if (arg2 > sizeof(target_sigset_t)) {
8265 return -TARGET_EINVAL;
8268 ret = get_errno(sigpending(&set));
8269 if (!is_error(ret)) {
8270 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8271 return -TARGET_EFAULT;
8272 host_to_target_sigset(p, &set);
8273 unlock_user(p, arg1, sizeof(target_sigset_t));
8277 #ifdef TARGET_NR_sigsuspend
8278 case TARGET_NR_sigsuspend:
8280 TaskState *ts = cpu->opaque;
8281 #if defined(TARGET_ALPHA)
8282 abi_ulong mask = arg1;
8283 target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
8285 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8286 return -TARGET_EFAULT;
8287 target_to_host_old_sigset(&ts->sigsuspend_mask, p);
8288 unlock_user(p, arg1, 0);
8290 ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8292 if (ret != -TARGET_ERESTARTSYS) {
8293 ts->in_sigsuspend = 1;
8298 case TARGET_NR_rt_sigsuspend:
8300 TaskState *ts = cpu->opaque;
8302 if (arg2 != sizeof(target_sigset_t)) {
8303 return -TARGET_EINVAL;
8305 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8306 return -TARGET_EFAULT;
8307 target_to_host_sigset(&ts->sigsuspend_mask, p);
8308 unlock_user(p, arg1, 0);
8309 ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8311 if (ret != -TARGET_ERESTARTSYS) {
8312 ts->in_sigsuspend = 1;
8316 case TARGET_NR_rt_sigtimedwait:
8319 struct timespec uts, *puts;
8322 if (arg4 != sizeof(target_sigset_t)) {
8323 return -TARGET_EINVAL;
8326 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8327 return -TARGET_EFAULT;
8328 target_to_host_sigset(&set, p);
8329 unlock_user(p, arg1, 0);
8332 target_to_host_timespec(puts, arg3);
8336 ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
8338 if (!is_error(ret)) {
8340 p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
8343 return -TARGET_EFAULT;
8345 host_to_target_siginfo(p, &uinfo);
8346 unlock_user(p, arg2, sizeof(target_siginfo_t));
8348 ret = host_to_target_signal(ret);
8352 case TARGET_NR_rt_sigqueueinfo:
8356 p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
8358 return -TARGET_EFAULT;
8360 target_to_host_siginfo(&uinfo, p);
8361 unlock_user(p, arg3, 0);
8362 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
8365 case TARGET_NR_rt_tgsigqueueinfo:
8369 p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
8371 return -TARGET_EFAULT;
8373 target_to_host_siginfo(&uinfo, p);
8374 unlock_user(p, arg4, 0);
8375 ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, arg3, &uinfo));
8378 #ifdef TARGET_NR_sigreturn
8379 case TARGET_NR_sigreturn:
8380 if (block_signals()) {
8381 return -TARGET_ERESTARTSYS;
8383 return do_sigreturn(cpu_env);
8385 case TARGET_NR_rt_sigreturn:
8386 if (block_signals()) {
8387 return -TARGET_ERESTARTSYS;
8389 return do_rt_sigreturn(cpu_env);
8390 case TARGET_NR_sethostname:
8391 if (!(p = lock_user_string(arg1)))
8392 return -TARGET_EFAULT;
8393 ret = get_errno(sethostname(p, arg2));
8394 unlock_user(p, arg1, 0);
8396 #ifdef TARGET_NR_setrlimit
8397 case TARGET_NR_setrlimit:
8399 int resource = target_to_host_resource(arg1);
8400 struct target_rlimit *target_rlim;
8402 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
8403 return -TARGET_EFAULT;
8404 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
8405 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
8406 unlock_user_struct(target_rlim, arg2, 0);
8408 * If we just passed through resource limit settings for memory then
8409 * they would also apply to QEMU's own allocations, and QEMU will
8410 * crash or hang or die if its allocations fail. Ideally we would
8411 * track the guest allocations in QEMU and apply the limits ourselves.
8412 * For now, just tell the guest the call succeeded but don't actually
8415 if (resource != RLIMIT_AS &&
8416 resource != RLIMIT_DATA &&
8417 resource != RLIMIT_STACK) {
8418 return get_errno(setrlimit(resource, &rlim));
8424 #ifdef TARGET_NR_getrlimit
8425 case TARGET_NR_getrlimit:
8427 int resource = target_to_host_resource(arg1);
8428 struct target_rlimit *target_rlim;
8431 ret = get_errno(getrlimit(resource, &rlim));
8432 if (!is_error(ret)) {
8433 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
8434 return -TARGET_EFAULT;
8435 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
8436 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
8437 unlock_user_struct(target_rlim, arg2, 1);
8442 case TARGET_NR_getrusage:
8444 struct rusage rusage;
8445 ret = get_errno(getrusage(arg1, &rusage));
8446 if (!is_error(ret)) {
8447 ret = host_to_target_rusage(arg2, &rusage);
8451 case TARGET_NR_gettimeofday:
8454 ret = get_errno(gettimeofday(&tv, NULL));
8455 if (!is_error(ret)) {
8456 if (copy_to_user_timeval(arg1, &tv))
8457 return -TARGET_EFAULT;
8461 case TARGET_NR_settimeofday:
8463 struct timeval tv, *ptv = NULL;
8464 struct timezone tz, *ptz = NULL;
8467 if (copy_from_user_timeval(&tv, arg1)) {
8468 return -TARGET_EFAULT;
8474 if (copy_from_user_timezone(&tz, arg2)) {
8475 return -TARGET_EFAULT;
8480 return get_errno(settimeofday(ptv, ptz));
8482 #if defined(TARGET_NR_select)
8483 case TARGET_NR_select:
8484 #if defined(TARGET_WANT_NI_OLD_SELECT)
8485 /* some architectures used to have old_select here
8486 * but now ENOSYS it.
8488 ret = -TARGET_ENOSYS;
8489 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
8490 ret = do_old_select(arg1);
8492 ret = do_select(arg1, arg2, arg3, arg4, arg5);
8496 #ifdef TARGET_NR_pselect6
8497 case TARGET_NR_pselect6:
8499 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
8500 fd_set rfds, wfds, efds;
8501 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
8502 struct timespec ts, *ts_ptr;
8505 * The 6th arg is actually two args smashed together,
8506 * so we cannot use the C library.
8514 abi_ulong arg_sigset, arg_sigsize, *arg7;
8515 target_sigset_t *target_sigset;
8523 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
8527 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
8531 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
8537 * This takes a timespec, and not a timeval, so we cannot
8538 * use the do_select() helper ...
8541 if (target_to_host_timespec(&ts, ts_addr)) {
8542 return -TARGET_EFAULT;
8549 /* Extract the two packed args for the sigset */
8552 sig.size = SIGSET_T_SIZE;
8554 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
8556 return -TARGET_EFAULT;
8558 arg_sigset = tswapal(arg7[0]);
8559 arg_sigsize = tswapal(arg7[1]);
8560 unlock_user(arg7, arg6, 0);
8564 if (arg_sigsize != sizeof(*target_sigset)) {
8565 /* Like the kernel, we enforce correct size sigsets */
8566 return -TARGET_EINVAL;
8568 target_sigset = lock_user(VERIFY_READ, arg_sigset,
8569 sizeof(*target_sigset), 1);
8570 if (!target_sigset) {
8571 return -TARGET_EFAULT;
8573 target_to_host_sigset(&set, target_sigset);
8574 unlock_user(target_sigset, arg_sigset, 0);
8582 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
8585 if (!is_error(ret)) {
8586 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
8587 return -TARGET_EFAULT;
8588 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
8589 return -TARGET_EFAULT;
8590 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
8591 return -TARGET_EFAULT;
8593 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
8594 return -TARGET_EFAULT;
8599 #ifdef TARGET_NR_symlink
8600 case TARGET_NR_symlink:
8603 p = lock_user_string(arg1);
8604 p2 = lock_user_string(arg2);
8606 ret = -TARGET_EFAULT;
8608 ret = get_errno(symlink(p, p2));
8609 unlock_user(p2, arg2, 0);
8610 unlock_user(p, arg1, 0);
8614 #if defined(TARGET_NR_symlinkat)
8615 case TARGET_NR_symlinkat:
8618 p = lock_user_string(arg1);
8619 p2 = lock_user_string(arg3);
8621 ret = -TARGET_EFAULT;
8623 ret = get_errno(symlinkat(p, arg2, p2));
8624 unlock_user(p2, arg3, 0);
8625 unlock_user(p, arg1, 0);
8629 #ifdef TARGET_NR_readlink
8630 case TARGET_NR_readlink:
8633 p = lock_user_string(arg1);
8634 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8636 ret = -TARGET_EFAULT;
8638 /* Short circuit this for the magic exe check. */
8639 ret = -TARGET_EINVAL;
8640 } else if (is_proc_myself((const char *)p, "exe")) {
8641 char real[PATH_MAX], *temp;
8642 temp = realpath(exec_path, real);
8643 /* Return value is # of bytes that we wrote to the buffer. */
8645 ret = get_errno(-1);
8647 /* Don't worry about sign mismatch as earlier mapping
8648 * logic would have thrown a bad address error. */
8649 ret = MIN(strlen(real), arg3);
8650 /* We cannot NUL terminate the string. */
8651 memcpy(p2, real, ret);
8654 ret = get_errno(readlink(path(p), p2, arg3));
8656 unlock_user(p2, arg2, ret);
8657 unlock_user(p, arg1, 0);
8661 #if defined(TARGET_NR_readlinkat)
8662 case TARGET_NR_readlinkat:
8665 p = lock_user_string(arg2);
8666 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
8668 ret = -TARGET_EFAULT;
8669 } else if (is_proc_myself((const char *)p, "exe")) {
8670 char real[PATH_MAX], *temp;
8671 temp = realpath(exec_path, real);
8672 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
8673 snprintf((char *)p2, arg4, "%s", real);
8675 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
8677 unlock_user(p2, arg3, ret);
8678 unlock_user(p, arg2, 0);
8682 #ifdef TARGET_NR_swapon
8683 case TARGET_NR_swapon:
8684 if (!(p = lock_user_string(arg1)))
8685 return -TARGET_EFAULT;
8686 ret = get_errno(swapon(p, arg2));
8687 unlock_user(p, arg1, 0);
8690 case TARGET_NR_reboot:
8691 if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
8692 /* arg4 must be ignored in all other cases */
8693 p = lock_user_string(arg4);
8695 return -TARGET_EFAULT;
8697 ret = get_errno(reboot(arg1, arg2, arg3, p));
8698 unlock_user(p, arg4, 0);
8700 ret = get_errno(reboot(arg1, arg2, arg3, NULL));
8703 #ifdef TARGET_NR_mmap
8704 case TARGET_NR_mmap:
8705 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
8706 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
8707 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
8708 || defined(TARGET_S390X)
8711 abi_ulong v1, v2, v3, v4, v5, v6;
8712 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
8713 return -TARGET_EFAULT;
8720 unlock_user(v, arg1, 0);
8721 ret = get_errno(target_mmap(v1, v2, v3,
8722 target_to_host_bitmask(v4, mmap_flags_tbl),
8726 ret = get_errno(target_mmap(arg1, arg2, arg3,
8727 target_to_host_bitmask(arg4, mmap_flags_tbl),
8733 #ifdef TARGET_NR_mmap2
8734 case TARGET_NR_mmap2:
8736 #define MMAP_SHIFT 12
8738 ret = target_mmap(arg1, arg2, arg3,
8739 target_to_host_bitmask(arg4, mmap_flags_tbl),
8740 arg5, arg6 << MMAP_SHIFT);
8741 return get_errno(ret);
8743 case TARGET_NR_munmap:
8744 return get_errno(target_munmap(arg1, arg2));
8745 case TARGET_NR_mprotect:
8747 TaskState *ts = cpu->opaque;
8748 /* Special hack to detect libc making the stack executable. */
8749 if ((arg3 & PROT_GROWSDOWN)
8750 && arg1 >= ts->info->stack_limit
8751 && arg1 <= ts->info->start_stack) {
8752 arg3 &= ~PROT_GROWSDOWN;
8753 arg2 = arg2 + arg1 - ts->info->stack_limit;
8754 arg1 = ts->info->stack_limit;
8757 return get_errno(target_mprotect(arg1, arg2, arg3));
8758 #ifdef TARGET_NR_mremap
8759 case TARGET_NR_mremap:
8760 return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
8762 /* ??? msync/mlock/munlock are broken for softmmu. */
8763 #ifdef TARGET_NR_msync
8764 case TARGET_NR_msync:
8765 return get_errno(msync(g2h(arg1), arg2, arg3));
8767 #ifdef TARGET_NR_mlock
8768 case TARGET_NR_mlock:
8769 return get_errno(mlock(g2h(arg1), arg2));
8771 #ifdef TARGET_NR_munlock
8772 case TARGET_NR_munlock:
8773 return get_errno(munlock(g2h(arg1), arg2));
8775 #ifdef TARGET_NR_mlockall
8776 case TARGET_NR_mlockall:
8777 return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
8779 #ifdef TARGET_NR_munlockall
8780 case TARGET_NR_munlockall:
8781 return get_errno(munlockall());
8783 #ifdef TARGET_NR_truncate
8784 case TARGET_NR_truncate:
8785 if (!(p = lock_user_string(arg1)))
8786 return -TARGET_EFAULT;
8787 ret = get_errno(truncate(p, arg2));
8788 unlock_user(p, arg1, 0);
8791 #ifdef TARGET_NR_ftruncate
8792 case TARGET_NR_ftruncate:
8793 return get_errno(ftruncate(arg1, arg2));
8795 case TARGET_NR_fchmod:
8796 return get_errno(fchmod(arg1, arg2));
8797 #if defined(TARGET_NR_fchmodat)
8798 case TARGET_NR_fchmodat:
8799 if (!(p = lock_user_string(arg2)))
8800 return -TARGET_EFAULT;
8801 ret = get_errno(fchmodat(arg1, p, arg3, 0));
8802 unlock_user(p, arg2, 0);
8805 case TARGET_NR_getpriority:
8806 /* Note that negative values are valid for getpriority, so we must
8807 differentiate based on errno settings. */
8809 ret = getpriority(arg1, arg2);
8810 if (ret == -1 && errno != 0) {
8811 return -host_to_target_errno(errno);
8814 /* Return value is the unbiased priority. Signal no error. */
8815 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
8817 /* Return value is a biased priority to avoid negative numbers. */
8821 case TARGET_NR_setpriority:
8822 return get_errno(setpriority(arg1, arg2, arg3));
8823 #ifdef TARGET_NR_statfs
8824 case TARGET_NR_statfs:
8825 if (!(p = lock_user_string(arg1))) {
8826 return -TARGET_EFAULT;
8828 ret = get_errno(statfs(path(p), &stfs));
8829 unlock_user(p, arg1, 0);
8831 if (!is_error(ret)) {
8832 struct target_statfs *target_stfs;
8834 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
8835 return -TARGET_EFAULT;
8836 __put_user(stfs.f_type, &target_stfs->f_type);
8837 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
8838 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
8839 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
8840 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
8841 __put_user(stfs.f_files, &target_stfs->f_files);
8842 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
8843 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
8844 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
8845 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
8846 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
8847 #ifdef _STATFS_F_FLAGS
8848 __put_user(stfs.f_flags, &target_stfs->f_flags);
8850 __put_user(0, &target_stfs->f_flags);
8852 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
8853 unlock_user_struct(target_stfs, arg2, 1);
8857 #ifdef TARGET_NR_fstatfs
8858 case TARGET_NR_fstatfs:
8859 ret = get_errno(fstatfs(arg1, &stfs));
8860 goto convert_statfs;
8862 #ifdef TARGET_NR_statfs64
8863 case TARGET_NR_statfs64:
8864 if (!(p = lock_user_string(arg1))) {
8865 return -TARGET_EFAULT;
8867 ret = get_errno(statfs(path(p), &stfs));
8868 unlock_user(p, arg1, 0);
8870 if (!is_error(ret)) {
8871 struct target_statfs64 *target_stfs;
8873 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
8874 return -TARGET_EFAULT;
8875 __put_user(stfs.f_type, &target_stfs->f_type);
8876 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
8877 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
8878 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
8879 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
8880 __put_user(stfs.f_files, &target_stfs->f_files);
8881 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
8882 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
8883 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
8884 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
8885 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
8886 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
8887 unlock_user_struct(target_stfs, arg3, 1);
8890 case TARGET_NR_fstatfs64:
8891 ret = get_errno(fstatfs(arg1, &stfs));
8892 goto convert_statfs64;
8894 #ifdef TARGET_NR_socketcall
8895 case TARGET_NR_socketcall:
8896 return do_socketcall(arg1, arg2);
8898 #ifdef TARGET_NR_accept
8899 case TARGET_NR_accept:
8900 return do_accept4(arg1, arg2, arg3, 0);
8902 #ifdef TARGET_NR_accept4
8903 case TARGET_NR_accept4:
8904 return do_accept4(arg1, arg2, arg3, arg4);
8906 #ifdef TARGET_NR_bind
8907 case TARGET_NR_bind:
8908 return do_bind(arg1, arg2, arg3);
8910 #ifdef TARGET_NR_connect
8911 case TARGET_NR_connect:
8912 return do_connect(arg1, arg2, arg3);
8914 #ifdef TARGET_NR_getpeername
8915 case TARGET_NR_getpeername:
8916 return do_getpeername(arg1, arg2, arg3);
8918 #ifdef TARGET_NR_getsockname
8919 case TARGET_NR_getsockname:
8920 return do_getsockname(arg1, arg2, arg3);
8922 #ifdef TARGET_NR_getsockopt
8923 case TARGET_NR_getsockopt:
8924 return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
8926 #ifdef TARGET_NR_listen
8927 case TARGET_NR_listen:
8928 return get_errno(listen(arg1, arg2));
8930 #ifdef TARGET_NR_recv
8931 case TARGET_NR_recv:
8932 return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
8934 #ifdef TARGET_NR_recvfrom
8935 case TARGET_NR_recvfrom:
8936 return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
8938 #ifdef TARGET_NR_recvmsg
8939 case TARGET_NR_recvmsg:
8940 return do_sendrecvmsg(arg1, arg2, arg3, 0);
8942 #ifdef TARGET_NR_send
8943 case TARGET_NR_send:
8944 return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
8946 #ifdef TARGET_NR_sendmsg
8947 case TARGET_NR_sendmsg:
8948 return do_sendrecvmsg(arg1, arg2, arg3, 1);
8950 #ifdef TARGET_NR_sendmmsg
8951 case TARGET_NR_sendmmsg:
8952 return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
8953 case TARGET_NR_recvmmsg:
8954 return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
8956 #ifdef TARGET_NR_sendto
8957 case TARGET_NR_sendto:
8958 return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
8960 #ifdef TARGET_NR_shutdown
8961 case TARGET_NR_shutdown:
8962 return get_errno(shutdown(arg1, arg2));
8964 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
8965 case TARGET_NR_getrandom:
8966 p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
8968 return -TARGET_EFAULT;
8970 ret = get_errno(getrandom(p, arg2, arg3));
8971 unlock_user(p, arg1, ret);
8974 #ifdef TARGET_NR_socket
8975 case TARGET_NR_socket:
8976 return do_socket(arg1, arg2, arg3);
8978 #ifdef TARGET_NR_socketpair
8979 case TARGET_NR_socketpair:
8980 return do_socketpair(arg1, arg2, arg3, arg4);
8982 #ifdef TARGET_NR_setsockopt
8983 case TARGET_NR_setsockopt:
8984 return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
8986 #if defined(TARGET_NR_syslog)
8987 case TARGET_NR_syslog:
8992 case TARGET_SYSLOG_ACTION_CLOSE: /* Close log */
8993 case TARGET_SYSLOG_ACTION_OPEN: /* Open log */
8994 case TARGET_SYSLOG_ACTION_CLEAR: /* Clear ring buffer */
8995 case TARGET_SYSLOG_ACTION_CONSOLE_OFF: /* Disable logging */
8996 case TARGET_SYSLOG_ACTION_CONSOLE_ON: /* Enable logging */
8997 case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
8998 case TARGET_SYSLOG_ACTION_SIZE_UNREAD: /* Number of chars */
8999 case TARGET_SYSLOG_ACTION_SIZE_BUFFER: /* Size of the buffer */
9000 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
9001 case TARGET_SYSLOG_ACTION_READ: /* Read from log */
9002 case TARGET_SYSLOG_ACTION_READ_CLEAR: /* Read/clear msgs */
9003 case TARGET_SYSLOG_ACTION_READ_ALL: /* Read last messages */
9006 return -TARGET_EINVAL;
9011 p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9013 return -TARGET_EFAULT;
9015 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
9016 unlock_user(p, arg2, arg3);
9020 return -TARGET_EINVAL;
9025 case TARGET_NR_setitimer:
9027 struct itimerval value, ovalue, *pvalue;
9031 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
9032 || copy_from_user_timeval(&pvalue->it_value,
9033 arg2 + sizeof(struct target_timeval)))
9034 return -TARGET_EFAULT;
9038 ret = get_errno(setitimer(arg1, pvalue, &ovalue));
9039 if (!is_error(ret) && arg3) {
9040 if (copy_to_user_timeval(arg3,
9041 &ovalue.it_interval)
9042 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
9044 return -TARGET_EFAULT;
9048 case TARGET_NR_getitimer:
9050 struct itimerval value;
9052 ret = get_errno(getitimer(arg1, &value));
9053 if (!is_error(ret) && arg2) {
9054 if (copy_to_user_timeval(arg2,
9056 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
9058 return -TARGET_EFAULT;
9062 #ifdef TARGET_NR_stat
9063 case TARGET_NR_stat:
9064 if (!(p = lock_user_string(arg1))) {
9065 return -TARGET_EFAULT;
9067 ret = get_errno(stat(path(p), &st));
9068 unlock_user(p, arg1, 0);
9071 #ifdef TARGET_NR_lstat
9072 case TARGET_NR_lstat:
9073 if (!(p = lock_user_string(arg1))) {
9074 return -TARGET_EFAULT;
9076 ret = get_errno(lstat(path(p), &st));
9077 unlock_user(p, arg1, 0);
9080 #ifdef TARGET_NR_fstat
9081 case TARGET_NR_fstat:
9083 ret = get_errno(fstat(arg1, &st));
9084 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
9087 if (!is_error(ret)) {
9088 struct target_stat *target_st;
9090 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
9091 return -TARGET_EFAULT;
9092 memset(target_st, 0, sizeof(*target_st));
9093 __put_user(st.st_dev, &target_st->st_dev);
9094 __put_user(st.st_ino, &target_st->st_ino);
9095 __put_user(st.st_mode, &target_st->st_mode);
9096 __put_user(st.st_uid, &target_st->st_uid);
9097 __put_user(st.st_gid, &target_st->st_gid);
9098 __put_user(st.st_nlink, &target_st->st_nlink);
9099 __put_user(st.st_rdev, &target_st->st_rdev);
9100 __put_user(st.st_size, &target_st->st_size);
9101 __put_user(st.st_blksize, &target_st->st_blksize);
9102 __put_user(st.st_blocks, &target_st->st_blocks);
9103 __put_user(st.st_atime, &target_st->target_st_atime);
9104 __put_user(st.st_mtime, &target_st->target_st_mtime);
9105 __put_user(st.st_ctime, &target_st->target_st_ctime);
9106 #if (_POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700) && \
9107 defined(TARGET_STAT_HAVE_NSEC)
9108 __put_user(st.st_atim.tv_nsec,
9109 &target_st->target_st_atime_nsec);
9110 __put_user(st.st_mtim.tv_nsec,
9111 &target_st->target_st_mtime_nsec);
9112 __put_user(st.st_ctim.tv_nsec,
9113 &target_st->target_st_ctime_nsec);
9115 unlock_user_struct(target_st, arg2, 1);
9120 case TARGET_NR_vhangup:
9121 return get_errno(vhangup());
9122 #ifdef TARGET_NR_syscall
9123 case TARGET_NR_syscall:
9124 return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
9125 arg6, arg7, arg8, 0);
9127 case TARGET_NR_wait4:
9130 abi_long status_ptr = arg2;
9131 struct rusage rusage, *rusage_ptr;
9132 abi_ulong target_rusage = arg4;
9133 abi_long rusage_err;
9135 rusage_ptr = &rusage;
9138 ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
9139 if (!is_error(ret)) {
9140 if (status_ptr && ret) {
9141 status = host_to_target_waitstatus(status);
9142 if (put_user_s32(status, status_ptr))
9143 return -TARGET_EFAULT;
9145 if (target_rusage) {
9146 rusage_err = host_to_target_rusage(target_rusage, &rusage);
9154 #ifdef TARGET_NR_swapoff
9155 case TARGET_NR_swapoff:
9156 if (!(p = lock_user_string(arg1)))
9157 return -TARGET_EFAULT;
9158 ret = get_errno(swapoff(p));
9159 unlock_user(p, arg1, 0);
9162 case TARGET_NR_sysinfo:
9164 struct target_sysinfo *target_value;
9165 struct sysinfo value;
9166 ret = get_errno(sysinfo(&value));
9167 if (!is_error(ret) && arg1)
9169 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
9170 return -TARGET_EFAULT;
9171 __put_user(value.uptime, &target_value->uptime);
9172 __put_user(value.loads[0], &target_value->loads[0]);
9173 __put_user(value.loads[1], &target_value->loads[1]);
9174 __put_user(value.loads[2], &target_value->loads[2]);
9175 __put_user(value.totalram, &target_value->totalram);
9176 __put_user(value.freeram, &target_value->freeram);
9177 __put_user(value.sharedram, &target_value->sharedram);
9178 __put_user(value.bufferram, &target_value->bufferram);
9179 __put_user(value.totalswap, &target_value->totalswap);
9180 __put_user(value.freeswap, &target_value->freeswap);
9181 __put_user(value.procs, &target_value->procs);
9182 __put_user(value.totalhigh, &target_value->totalhigh);
9183 __put_user(value.freehigh, &target_value->freehigh);
9184 __put_user(value.mem_unit, &target_value->mem_unit);
9185 unlock_user_struct(target_value, arg1, 1);
9189 #ifdef TARGET_NR_ipc
9191 return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
9193 #ifdef TARGET_NR_semget
9194 case TARGET_NR_semget:
9195 return get_errno(semget(arg1, arg2, arg3));
9197 #ifdef TARGET_NR_semop
9198 case TARGET_NR_semop:
9199 return do_semop(arg1, arg2, arg3);
9201 #ifdef TARGET_NR_semctl
9202 case TARGET_NR_semctl:
9203 return do_semctl(arg1, arg2, arg3, arg4);
9205 #ifdef TARGET_NR_msgctl
9206 case TARGET_NR_msgctl:
9207 return do_msgctl(arg1, arg2, arg3);
9209 #ifdef TARGET_NR_msgget
9210 case TARGET_NR_msgget:
9211 return get_errno(msgget(arg1, arg2));
9213 #ifdef TARGET_NR_msgrcv
9214 case TARGET_NR_msgrcv:
9215 return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
9217 #ifdef TARGET_NR_msgsnd
9218 case TARGET_NR_msgsnd:
9219 return do_msgsnd(arg1, arg2, arg3, arg4);
9221 #ifdef TARGET_NR_shmget
9222 case TARGET_NR_shmget:
9223 return get_errno(shmget(arg1, arg2, arg3));
9225 #ifdef TARGET_NR_shmctl
9226 case TARGET_NR_shmctl:
9227 return do_shmctl(arg1, arg2, arg3);
9229 #ifdef TARGET_NR_shmat
9230 case TARGET_NR_shmat:
9231 return do_shmat(cpu_env, arg1, arg2, arg3);
9233 #ifdef TARGET_NR_shmdt
9234 case TARGET_NR_shmdt:
9235 return do_shmdt(arg1);
9237 case TARGET_NR_fsync:
9238 return get_errno(fsync(arg1));
9239 case TARGET_NR_clone:
9240 /* Linux manages to have three different orderings for its
9241 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
9242 * match the kernel's CONFIG_CLONE_* settings.
9243 * Microblaze is further special in that it uses a sixth
9244 * implicit argument to clone for the TLS pointer.
9246 #if defined(TARGET_MICROBLAZE)
9247 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
9248 #elif defined(TARGET_CLONE_BACKWARDS)
9249 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
9250 #elif defined(TARGET_CLONE_BACKWARDS2)
9251 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
9253 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
9256 #ifdef __NR_exit_group
9257 /* new thread calls */
9258 case TARGET_NR_exit_group:
9259 preexit_cleanup(cpu_env, arg1);
9260 return get_errno(exit_group(arg1));
9262 case TARGET_NR_setdomainname:
9263 if (!(p = lock_user_string(arg1)))
9264 return -TARGET_EFAULT;
9265 ret = get_errno(setdomainname(p, arg2));
9266 unlock_user(p, arg1, 0);
9268 case TARGET_NR_uname:
9269 /* no need to transcode because we use the linux syscall */
9271 struct new_utsname * buf;
9273 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
9274 return -TARGET_EFAULT;
9275 ret = get_errno(sys_uname(buf));
9276 if (!is_error(ret)) {
9277 /* Overwrite the native machine name with whatever is being
9279 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
9280 sizeof(buf->machine));
9281 /* Allow the user to override the reported release. */
9282 if (qemu_uname_release && *qemu_uname_release) {
9283 g_strlcpy(buf->release, qemu_uname_release,
9284 sizeof(buf->release));
9287 unlock_user_struct(buf, arg1, 1);
9291 case TARGET_NR_modify_ldt:
9292 return do_modify_ldt(cpu_env, arg1, arg2, arg3);
9293 #if !defined(TARGET_X86_64)
9294 case TARGET_NR_vm86:
9295 return do_vm86(cpu_env, arg1, arg2);
9298 case TARGET_NR_adjtimex:
9300 struct timex host_buf;
9302 if (target_to_host_timex(&host_buf, arg1) != 0) {
9303 return -TARGET_EFAULT;
9305 ret = get_errno(adjtimex(&host_buf));
9306 if (!is_error(ret)) {
9307 if (host_to_target_timex(arg1, &host_buf) != 0) {
9308 return -TARGET_EFAULT;
9313 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
9314 case TARGET_NR_clock_adjtime:
9316 struct timex htx, *phtx = &htx;
9318 if (target_to_host_timex(phtx, arg2) != 0) {
9319 return -TARGET_EFAULT;
9321 ret = get_errno(clock_adjtime(arg1, phtx));
9322 if (!is_error(ret) && phtx) {
9323 if (host_to_target_timex(arg2, phtx) != 0) {
9324 return -TARGET_EFAULT;
9330 case TARGET_NR_getpgid:
9331 return get_errno(getpgid(arg1));
9332 case TARGET_NR_fchdir:
9333 return get_errno(fchdir(arg1));
9334 case TARGET_NR_personality:
9335 return get_errno(personality(arg1));
9336 #ifdef TARGET_NR__llseek /* Not on alpha */
9337 case TARGET_NR__llseek:
9340 #if !defined(__NR_llseek)
9341 res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
9343 ret = get_errno(res);
9348 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
9350 if ((ret == 0) && put_user_s64(res, arg4)) {
9351 return -TARGET_EFAULT;
9356 #ifdef TARGET_NR_getdents
9357 case TARGET_NR_getdents:
9358 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
9359 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
9361 struct target_dirent *target_dirp;
9362 struct linux_dirent *dirp;
9363 abi_long count = arg3;
9365 dirp = g_try_malloc(count);
9367 return -TARGET_ENOMEM;
9370 ret = get_errno(sys_getdents(arg1, dirp, count));
9371 if (!is_error(ret)) {
9372 struct linux_dirent *de;
9373 struct target_dirent *tde;
9375 int reclen, treclen;
9376 int count1, tnamelen;
9380 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9381 return -TARGET_EFAULT;
9384 reclen = de->d_reclen;
9385 tnamelen = reclen - offsetof(struct linux_dirent, d_name);
9386 assert(tnamelen >= 0);
9387 treclen = tnamelen + offsetof(struct target_dirent, d_name);
9388 assert(count1 + treclen <= count);
9389 tde->d_reclen = tswap16(treclen);
9390 tde->d_ino = tswapal(de->d_ino);
9391 tde->d_off = tswapal(de->d_off);
9392 memcpy(tde->d_name, de->d_name, tnamelen);
9393 de = (struct linux_dirent *)((char *)de + reclen);
9395 tde = (struct target_dirent *)((char *)tde + treclen);
9399 unlock_user(target_dirp, arg2, ret);
9405 struct linux_dirent *dirp;
9406 abi_long count = arg3;
9408 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9409 return -TARGET_EFAULT;
9410 ret = get_errno(sys_getdents(arg1, dirp, count));
9411 if (!is_error(ret)) {
9412 struct linux_dirent *de;
9417 reclen = de->d_reclen;
9420 de->d_reclen = tswap16(reclen);
9421 tswapls(&de->d_ino);
9422 tswapls(&de->d_off);
9423 de = (struct linux_dirent *)((char *)de + reclen);
9427 unlock_user(dirp, arg2, ret);
9431 /* Implement getdents in terms of getdents64 */
9433 struct linux_dirent64 *dirp;
9434 abi_long count = arg3;
9436 dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
9438 return -TARGET_EFAULT;
9440 ret = get_errno(sys_getdents64(arg1, dirp, count));
9441 if (!is_error(ret)) {
9442 /* Convert the dirent64 structs to target dirent. We do this
9443 * in-place, since we can guarantee that a target_dirent is no
9444 * larger than a dirent64; however this means we have to be
9445 * careful to read everything before writing in the new format.
9447 struct linux_dirent64 *de;
9448 struct target_dirent *tde;
9453 tde = (struct target_dirent *)dirp;
9455 int namelen, treclen;
9456 int reclen = de->d_reclen;
9457 uint64_t ino = de->d_ino;
9458 int64_t off = de->d_off;
9459 uint8_t type = de->d_type;
9461 namelen = strlen(de->d_name);
9462 treclen = offsetof(struct target_dirent, d_name)
9464 treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
9466 memmove(tde->d_name, de->d_name, namelen + 1);
9467 tde->d_ino = tswapal(ino);
9468 tde->d_off = tswapal(off);
9469 tde->d_reclen = tswap16(treclen);
9470 /* The target_dirent type is in what was formerly a padding
9471 * byte at the end of the structure:
9473 *(((char *)tde) + treclen - 1) = type;
9475 de = (struct linux_dirent64 *)((char *)de + reclen);
9476 tde = (struct target_dirent *)((char *)tde + treclen);
9482 unlock_user(dirp, arg2, ret);
9486 #endif /* TARGET_NR_getdents */
9487 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
9488 case TARGET_NR_getdents64:
9490 struct linux_dirent64 *dirp;
9491 abi_long count = arg3;
9492 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9493 return -TARGET_EFAULT;
9494 ret = get_errno(sys_getdents64(arg1, dirp, count));
9495 if (!is_error(ret)) {
9496 struct linux_dirent64 *de;
9501 reclen = de->d_reclen;
9504 de->d_reclen = tswap16(reclen);
9505 tswap64s((uint64_t *)&de->d_ino);
9506 tswap64s((uint64_t *)&de->d_off);
9507 de = (struct linux_dirent64 *)((char *)de + reclen);
9511 unlock_user(dirp, arg2, ret);
9514 #endif /* TARGET_NR_getdents64 */
9515 #if defined(TARGET_NR__newselect)
9516 case TARGET_NR__newselect:
9517 return do_select(arg1, arg2, arg3, arg4, arg5);
9519 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
9520 # ifdef TARGET_NR_poll
9521 case TARGET_NR_poll:
9523 # ifdef TARGET_NR_ppoll
9524 case TARGET_NR_ppoll:
9527 struct target_pollfd *target_pfd;
9528 unsigned int nfds = arg2;
9535 if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
9536 return -TARGET_EINVAL;
9539 target_pfd = lock_user(VERIFY_WRITE, arg1,
9540 sizeof(struct target_pollfd) * nfds, 1);
9542 return -TARGET_EFAULT;
9545 pfd = alloca(sizeof(struct pollfd) * nfds);
9546 for (i = 0; i < nfds; i++) {
9547 pfd[i].fd = tswap32(target_pfd[i].fd);
9548 pfd[i].events = tswap16(target_pfd[i].events);
9553 # ifdef TARGET_NR_ppoll
9554 case TARGET_NR_ppoll:
9556 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
9557 target_sigset_t *target_set;
9558 sigset_t _set, *set = &_set;
9561 if (target_to_host_timespec(timeout_ts, arg3)) {
9562 unlock_user(target_pfd, arg1, 0);
9563 return -TARGET_EFAULT;
9570 if (arg5 != sizeof(target_sigset_t)) {
9571 unlock_user(target_pfd, arg1, 0);
9572 return -TARGET_EINVAL;
9575 target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
9577 unlock_user(target_pfd, arg1, 0);
9578 return -TARGET_EFAULT;
9580 target_to_host_sigset(set, target_set);
9585 ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
9586 set, SIGSET_T_SIZE));
9588 if (!is_error(ret) && arg3) {
9589 host_to_target_timespec(arg3, timeout_ts);
9592 unlock_user(target_set, arg4, 0);
9597 # ifdef TARGET_NR_poll
9598 case TARGET_NR_poll:
9600 struct timespec ts, *pts;
9603 /* Convert ms to secs, ns */
9604 ts.tv_sec = arg3 / 1000;
9605 ts.tv_nsec = (arg3 % 1000) * 1000000LL;
9608 /* -ve poll() timeout means "infinite" */
9611 ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
9616 g_assert_not_reached();
9619 if (!is_error(ret)) {
9620 for(i = 0; i < nfds; i++) {
9621 target_pfd[i].revents = tswap16(pfd[i].revents);
9624 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
9628 case TARGET_NR_flock:
9629 /* NOTE: the flock constant seems to be the same for every
9631 return get_errno(safe_flock(arg1, arg2));
9632 case TARGET_NR_readv:
9634 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
9636 ret = get_errno(safe_readv(arg1, vec, arg3));
9637 unlock_iovec(vec, arg2, arg3, 1);
9639 ret = -host_to_target_errno(errno);
9643 case TARGET_NR_writev:
9645 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
9647 ret = get_errno(safe_writev(arg1, vec, arg3));
9648 unlock_iovec(vec, arg2, arg3, 0);
9650 ret = -host_to_target_errno(errno);
9654 #if defined(TARGET_NR_preadv)
9655 case TARGET_NR_preadv:
9657 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
9659 unsigned long low, high;
9661 target_to_host_low_high(arg4, arg5, &low, &high);
9662 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
9663 unlock_iovec(vec, arg2, arg3, 1);
9665 ret = -host_to_target_errno(errno);
9670 #if defined(TARGET_NR_pwritev)
9671 case TARGET_NR_pwritev:
9673 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
9675 unsigned long low, high;
9677 target_to_host_low_high(arg4, arg5, &low, &high);
9678 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
9679 unlock_iovec(vec, arg2, arg3, 0);
9681 ret = -host_to_target_errno(errno);
9686 case TARGET_NR_getsid:
9687 return get_errno(getsid(arg1));
9688 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
9689 case TARGET_NR_fdatasync:
9690 return get_errno(fdatasync(arg1));
9692 #ifdef TARGET_NR__sysctl
9693 case TARGET_NR__sysctl:
9694 /* We don't implement this, but ENOTDIR is always a safe
9696 return -TARGET_ENOTDIR;
9698 case TARGET_NR_sched_getaffinity:
9700 unsigned int mask_size;
9701 unsigned long *mask;
9704 * sched_getaffinity needs multiples of ulong, so need to take
9705 * care of mismatches between target ulong and host ulong sizes.
9707 if (arg2 & (sizeof(abi_ulong) - 1)) {
9708 return -TARGET_EINVAL;
9710 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
9712 mask = alloca(mask_size);
9713 memset(mask, 0, mask_size);
9714 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
9716 if (!is_error(ret)) {
9718 /* More data returned than the caller's buffer will fit.
9719 * This only happens if sizeof(abi_long) < sizeof(long)
9720 * and the caller passed us a buffer holding an odd number
9721 * of abi_longs. If the host kernel is actually using the
9722 * extra 4 bytes then fail EINVAL; otherwise we can just
9723 * ignore them and only copy the interesting part.
9725 int numcpus = sysconf(_SC_NPROCESSORS_CONF);
9726 if (numcpus > arg2 * 8) {
9727 return -TARGET_EINVAL;
9732 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
9733 return -TARGET_EFAULT;
9738 case TARGET_NR_sched_setaffinity:
9740 unsigned int mask_size;
9741 unsigned long *mask;
9744 * sched_setaffinity needs multiples of ulong, so need to take
9745 * care of mismatches between target ulong and host ulong sizes.
9747 if (arg2 & (sizeof(abi_ulong) - 1)) {
9748 return -TARGET_EINVAL;
9750 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
9751 mask = alloca(mask_size);
9753 ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
9758 return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
9760 case TARGET_NR_getcpu:
9763 ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
9764 arg2 ? &node : NULL,
9766 if (is_error(ret)) {
9769 if (arg1 && put_user_u32(cpu, arg1)) {
9770 return -TARGET_EFAULT;
9772 if (arg2 && put_user_u32(node, arg2)) {
9773 return -TARGET_EFAULT;
9777 case TARGET_NR_sched_setparam:
9779 struct sched_param *target_schp;
9780 struct sched_param schp;
9783 return -TARGET_EINVAL;
9785 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
9786 return -TARGET_EFAULT;
9787 schp.sched_priority = tswap32(target_schp->sched_priority);
9788 unlock_user_struct(target_schp, arg2, 0);
9789 return get_errno(sched_setparam(arg1, &schp));
9791 case TARGET_NR_sched_getparam:
9793 struct sched_param *target_schp;
9794 struct sched_param schp;
9797 return -TARGET_EINVAL;
9799 ret = get_errno(sched_getparam(arg1, &schp));
9800 if (!is_error(ret)) {
9801 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
9802 return -TARGET_EFAULT;
9803 target_schp->sched_priority = tswap32(schp.sched_priority);
9804 unlock_user_struct(target_schp, arg2, 1);
9808 case TARGET_NR_sched_setscheduler:
9810 struct sched_param *target_schp;
9811 struct sched_param schp;
9813 return -TARGET_EINVAL;
9815 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
9816 return -TARGET_EFAULT;
9817 schp.sched_priority = tswap32(target_schp->sched_priority);
9818 unlock_user_struct(target_schp, arg3, 0);
9819 return get_errno(sched_setscheduler(arg1, arg2, &schp));
9821 case TARGET_NR_sched_getscheduler:
9822 return get_errno(sched_getscheduler(arg1));
9823 case TARGET_NR_sched_yield:
9824 return get_errno(sched_yield());
9825 case TARGET_NR_sched_get_priority_max:
9826 return get_errno(sched_get_priority_max(arg1));
9827 case TARGET_NR_sched_get_priority_min:
9828 return get_errno(sched_get_priority_min(arg1));
9829 case TARGET_NR_sched_rr_get_interval:
9832 ret = get_errno(sched_rr_get_interval(arg1, &ts));
9833 if (!is_error(ret)) {
9834 ret = host_to_target_timespec(arg2, &ts);
9838 case TARGET_NR_nanosleep:
9840 struct timespec req, rem;
9841 target_to_host_timespec(&req, arg1);
9842 ret = get_errno(safe_nanosleep(&req, &rem));
9843 if (is_error(ret) && arg2) {
9844 host_to_target_timespec(arg2, &rem);
9848 case TARGET_NR_prctl:
9850 case PR_GET_PDEATHSIG:
9853 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
9854 if (!is_error(ret) && arg2
9855 && put_user_ual(deathsig, arg2)) {
9856 return -TARGET_EFAULT;
9863 void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
9865 return -TARGET_EFAULT;
9867 ret = get_errno(prctl(arg1, (unsigned long)name,
9869 unlock_user(name, arg2, 16);
9874 void *name = lock_user(VERIFY_READ, arg2, 16, 1);
9876 return -TARGET_EFAULT;
9878 ret = get_errno(prctl(arg1, (unsigned long)name,
9880 unlock_user(name, arg2, 0);
9885 case TARGET_PR_GET_FP_MODE:
9887 CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
9889 if (env->CP0_Status & (1 << CP0St_FR)) {
9890 ret |= TARGET_PR_FP_MODE_FR;
9892 if (env->CP0_Config5 & (1 << CP0C5_FRE)) {
9893 ret |= TARGET_PR_FP_MODE_FRE;
9897 case TARGET_PR_SET_FP_MODE:
9899 CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
9900 bool old_fr = env->CP0_Status & (1 << CP0St_FR);
9901 bool old_fre = env->CP0_Config5 & (1 << CP0C5_FRE);
9902 bool new_fr = arg2 & TARGET_PR_FP_MODE_FR;
9903 bool new_fre = arg2 & TARGET_PR_FP_MODE_FRE;
9905 const unsigned int known_bits = TARGET_PR_FP_MODE_FR |
9906 TARGET_PR_FP_MODE_FRE;
9908 /* If nothing to change, return right away, successfully. */
9909 if (old_fr == new_fr && old_fre == new_fre) {
9912 /* Check the value is valid */
9913 if (arg2 & ~known_bits) {
9914 return -TARGET_EOPNOTSUPP;
9916 /* Setting FRE without FR is not supported. */
9917 if (new_fre && !new_fr) {
9918 return -TARGET_EOPNOTSUPP;
9920 if (new_fr && !(env->active_fpu.fcr0 & (1 << FCR0_F64))) {
9921 /* FR1 is not supported */
9922 return -TARGET_EOPNOTSUPP;
9924 if (!new_fr && (env->active_fpu.fcr0 & (1 << FCR0_F64))
9925 && !(env->CP0_Status_rw_bitmask & (1 << CP0St_FR))) {
9926 /* cannot set FR=0 */
9927 return -TARGET_EOPNOTSUPP;
9929 if (new_fre && !(env->active_fpu.fcr0 & (1 << FCR0_FREP))) {
9930 /* Cannot set FRE=1 */
9931 return -TARGET_EOPNOTSUPP;
9935 fpr_t *fpr = env->active_fpu.fpr;
9936 for (i = 0; i < 32 ; i += 2) {
9937 if (!old_fr && new_fr) {
9938 fpr[i].w[!FP_ENDIAN_IDX] = fpr[i + 1].w[FP_ENDIAN_IDX];
9939 } else if (old_fr && !new_fr) {
9940 fpr[i + 1].w[FP_ENDIAN_IDX] = fpr[i].w[!FP_ENDIAN_IDX];
9945 env->CP0_Status |= (1 << CP0St_FR);
9946 env->hflags |= MIPS_HFLAG_F64;
9948 env->CP0_Status &= ~(1 << CP0St_FR);
9949 env->hflags &= ~MIPS_HFLAG_F64;
9952 env->CP0_Config5 |= (1 << CP0C5_FRE);
9953 if (env->active_fpu.fcr0 & (1 << FCR0_FREP)) {
9954 env->hflags |= MIPS_HFLAG_FRE;
9957 env->CP0_Config5 &= ~(1 << CP0C5_FRE);
9958 env->hflags &= ~MIPS_HFLAG_FRE;
9964 #ifdef TARGET_AARCH64
9965 case TARGET_PR_SVE_SET_VL:
9967 * We cannot support either PR_SVE_SET_VL_ONEXEC or
9968 * PR_SVE_VL_INHERIT. Note the kernel definition
9969 * of sve_vl_valid allows for VQ=512, i.e. VL=8192,
9970 * even though the current architectural maximum is VQ=16.
9972 ret = -TARGET_EINVAL;
9973 if (cpu_isar_feature(aa64_sve, env_archcpu(cpu_env))
9974 && arg2 >= 0 && arg2 <= 512 * 16 && !(arg2 & 15)) {
9975 CPUARMState *env = cpu_env;
9976 ARMCPU *cpu = env_archcpu(env);
9977 uint32_t vq, old_vq;
9979 old_vq = (env->vfp.zcr_el[1] & 0xf) + 1;
9980 vq = MAX(arg2 / 16, 1);
9981 vq = MIN(vq, cpu->sve_max_vq);
9984 aarch64_sve_narrow_vq(env, vq);
9986 env->vfp.zcr_el[1] = vq - 1;
9990 case TARGET_PR_SVE_GET_VL:
9991 ret = -TARGET_EINVAL;
9993 ARMCPU *cpu = env_archcpu(cpu_env);
9994 if (cpu_isar_feature(aa64_sve, cpu)) {
9995 ret = ((cpu->env.vfp.zcr_el[1] & 0xf) + 1) * 16;
9999 case TARGET_PR_PAC_RESET_KEYS:
10001 CPUARMState *env = cpu_env;
10002 ARMCPU *cpu = env_archcpu(env);
10004 if (arg3 || arg4 || arg5) {
10005 return -TARGET_EINVAL;
10007 if (cpu_isar_feature(aa64_pauth, cpu)) {
10008 int all = (TARGET_PR_PAC_APIAKEY | TARGET_PR_PAC_APIBKEY |
10009 TARGET_PR_PAC_APDAKEY | TARGET_PR_PAC_APDBKEY |
10010 TARGET_PR_PAC_APGAKEY);
10016 } else if (arg2 & ~all) {
10017 return -TARGET_EINVAL;
10019 if (arg2 & TARGET_PR_PAC_APIAKEY) {
10020 ret |= qemu_guest_getrandom(&env->keys.apia,
10021 sizeof(ARMPACKey), &err);
10023 if (arg2 & TARGET_PR_PAC_APIBKEY) {
10024 ret |= qemu_guest_getrandom(&env->keys.apib,
10025 sizeof(ARMPACKey), &err);
10027 if (arg2 & TARGET_PR_PAC_APDAKEY) {
10028 ret |= qemu_guest_getrandom(&env->keys.apda,
10029 sizeof(ARMPACKey), &err);
10031 if (arg2 & TARGET_PR_PAC_APDBKEY) {
10032 ret |= qemu_guest_getrandom(&env->keys.apdb,
10033 sizeof(ARMPACKey), &err);
10035 if (arg2 & TARGET_PR_PAC_APGAKEY) {
10036 ret |= qemu_guest_getrandom(&env->keys.apga,
10037 sizeof(ARMPACKey), &err);
10041 * Some unknown failure in the crypto. The best
10042 * we can do is log it and fail the syscall.
10043 * The real syscall cannot fail this way.
10045 qemu_log_mask(LOG_UNIMP,
10046 "PR_PAC_RESET_KEYS: Crypto failure: %s",
10047 error_get_pretty(err));
10049 return -TARGET_EIO;
10054 return -TARGET_EINVAL;
10055 #endif /* AARCH64 */
10056 case PR_GET_SECCOMP:
10057 case PR_SET_SECCOMP:
10058 /* Disable seccomp to prevent the target disabling syscalls we
10060 return -TARGET_EINVAL;
10062 /* Most prctl options have no pointer arguments */
10063 return get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
10066 #ifdef TARGET_NR_arch_prctl
10067 case TARGET_NR_arch_prctl:
10068 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
10069 return do_arch_prctl(cpu_env, arg1, arg2);
10074 #ifdef TARGET_NR_pread64
10075 case TARGET_NR_pread64:
10076 if (regpairs_aligned(cpu_env, num)) {
10080 if (arg2 == 0 && arg3 == 0) {
10081 /* Special-case NULL buffer and zero length, which should succeed */
10084 p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10086 return -TARGET_EFAULT;
10089 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
10090 unlock_user(p, arg2, ret);
10092 case TARGET_NR_pwrite64:
10093 if (regpairs_aligned(cpu_env, num)) {
10097 if (arg2 == 0 && arg3 == 0) {
10098 /* Special-case NULL buffer and zero length, which should succeed */
10101 p = lock_user(VERIFY_READ, arg2, arg3, 1);
10103 return -TARGET_EFAULT;
10106 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
10107 unlock_user(p, arg2, 0);
10110 case TARGET_NR_getcwd:
10111 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
10112 return -TARGET_EFAULT;
10113 ret = get_errno(sys_getcwd1(p, arg2));
10114 unlock_user(p, arg1, ret);
10116 case TARGET_NR_capget:
10117 case TARGET_NR_capset:
10119 struct target_user_cap_header *target_header;
10120 struct target_user_cap_data *target_data = NULL;
10121 struct __user_cap_header_struct header;
10122 struct __user_cap_data_struct data[2];
10123 struct __user_cap_data_struct *dataptr = NULL;
10124 int i, target_datalen;
10125 int data_items = 1;
10127 if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
10128 return -TARGET_EFAULT;
10130 header.version = tswap32(target_header->version);
10131 header.pid = tswap32(target_header->pid);
10133 if (header.version != _LINUX_CAPABILITY_VERSION) {
10134 /* Version 2 and up takes pointer to two user_data structs */
10138 target_datalen = sizeof(*target_data) * data_items;
10141 if (num == TARGET_NR_capget) {
10142 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
10144 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
10146 if (!target_data) {
10147 unlock_user_struct(target_header, arg1, 0);
10148 return -TARGET_EFAULT;
10151 if (num == TARGET_NR_capset) {
10152 for (i = 0; i < data_items; i++) {
10153 data[i].effective = tswap32(target_data[i].effective);
10154 data[i].permitted = tswap32(target_data[i].permitted);
10155 data[i].inheritable = tswap32(target_data[i].inheritable);
10162 if (num == TARGET_NR_capget) {
10163 ret = get_errno(capget(&header, dataptr));
10165 ret = get_errno(capset(&header, dataptr));
10168 /* The kernel always updates version for both capget and capset */
10169 target_header->version = tswap32(header.version);
10170 unlock_user_struct(target_header, arg1, 1);
10173 if (num == TARGET_NR_capget) {
10174 for (i = 0; i < data_items; i++) {
10175 target_data[i].effective = tswap32(data[i].effective);
10176 target_data[i].permitted = tswap32(data[i].permitted);
10177 target_data[i].inheritable = tswap32(data[i].inheritable);
10179 unlock_user(target_data, arg2, target_datalen);
10181 unlock_user(target_data, arg2, 0);
10186 case TARGET_NR_sigaltstack:
10187 return do_sigaltstack(arg1, arg2,
10188 get_sp_from_cpustate((CPUArchState *)cpu_env));
10190 #ifdef CONFIG_SENDFILE
10191 #ifdef TARGET_NR_sendfile
10192 case TARGET_NR_sendfile:
10194 off_t *offp = NULL;
10197 ret = get_user_sal(off, arg3);
10198 if (is_error(ret)) {
10203 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
10204 if (!is_error(ret) && arg3) {
10205 abi_long ret2 = put_user_sal(off, arg3);
10206 if (is_error(ret2)) {
10213 #ifdef TARGET_NR_sendfile64
10214 case TARGET_NR_sendfile64:
10216 off_t *offp = NULL;
10219 ret = get_user_s64(off, arg3);
10220 if (is_error(ret)) {
10225 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
10226 if (!is_error(ret) && arg3) {
10227 abi_long ret2 = put_user_s64(off, arg3);
10228 if (is_error(ret2)) {
10236 #ifdef TARGET_NR_vfork
10237 case TARGET_NR_vfork:
10238 return get_errno(do_fork(cpu_env,
10239 CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
10242 #ifdef TARGET_NR_ugetrlimit
10243 case TARGET_NR_ugetrlimit:
10245 struct rlimit rlim;
10246 int resource = target_to_host_resource(arg1);
10247 ret = get_errno(getrlimit(resource, &rlim));
10248 if (!is_error(ret)) {
10249 struct target_rlimit *target_rlim;
10250 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
10251 return -TARGET_EFAULT;
10252 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
10253 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
10254 unlock_user_struct(target_rlim, arg2, 1);
10259 #ifdef TARGET_NR_truncate64
10260 case TARGET_NR_truncate64:
10261 if (!(p = lock_user_string(arg1)))
10262 return -TARGET_EFAULT;
10263 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
10264 unlock_user(p, arg1, 0);
10267 #ifdef TARGET_NR_ftruncate64
10268 case TARGET_NR_ftruncate64:
10269 return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
10271 #ifdef TARGET_NR_stat64
10272 case TARGET_NR_stat64:
10273 if (!(p = lock_user_string(arg1))) {
10274 return -TARGET_EFAULT;
10276 ret = get_errno(stat(path(p), &st));
10277 unlock_user(p, arg1, 0);
10278 if (!is_error(ret))
10279 ret = host_to_target_stat64(cpu_env, arg2, &st);
10282 #ifdef TARGET_NR_lstat64
10283 case TARGET_NR_lstat64:
10284 if (!(p = lock_user_string(arg1))) {
10285 return -TARGET_EFAULT;
10287 ret = get_errno(lstat(path(p), &st));
10288 unlock_user(p, arg1, 0);
10289 if (!is_error(ret))
10290 ret = host_to_target_stat64(cpu_env, arg2, &st);
10293 #ifdef TARGET_NR_fstat64
10294 case TARGET_NR_fstat64:
10295 ret = get_errno(fstat(arg1, &st));
10296 if (!is_error(ret))
10297 ret = host_to_target_stat64(cpu_env, arg2, &st);
10300 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
10301 #ifdef TARGET_NR_fstatat64
10302 case TARGET_NR_fstatat64:
10304 #ifdef TARGET_NR_newfstatat
10305 case TARGET_NR_newfstatat:
10307 if (!(p = lock_user_string(arg2))) {
10308 return -TARGET_EFAULT;
10310 ret = get_errno(fstatat(arg1, path(p), &st, arg4));
10311 unlock_user(p, arg2, 0);
10312 if (!is_error(ret))
10313 ret = host_to_target_stat64(cpu_env, arg3, &st);
10316 #if defined(TARGET_NR_statx)
10317 case TARGET_NR_statx:
10319 struct target_statx *target_stx;
10323 p = lock_user_string(arg2);
10325 return -TARGET_EFAULT;
10327 #if defined(__NR_statx)
10330 * It is assumed that struct statx is architecture independent.
10332 struct target_statx host_stx;
10335 ret = get_errno(sys_statx(dirfd, p, flags, mask, &host_stx));
10336 if (!is_error(ret)) {
10337 if (host_to_target_statx(&host_stx, arg5) != 0) {
10338 unlock_user(p, arg2, 0);
10339 return -TARGET_EFAULT;
10343 if (ret != -TARGET_ENOSYS) {
10344 unlock_user(p, arg2, 0);
10349 ret = get_errno(fstatat(dirfd, path(p), &st, flags));
10350 unlock_user(p, arg2, 0);
10352 if (!is_error(ret)) {
10353 if (!lock_user_struct(VERIFY_WRITE, target_stx, arg5, 0)) {
10354 return -TARGET_EFAULT;
10356 memset(target_stx, 0, sizeof(*target_stx));
10357 __put_user(major(st.st_dev), &target_stx->stx_dev_major);
10358 __put_user(minor(st.st_dev), &target_stx->stx_dev_minor);
10359 __put_user(st.st_ino, &target_stx->stx_ino);
10360 __put_user(st.st_mode, &target_stx->stx_mode);
10361 __put_user(st.st_uid, &target_stx->stx_uid);
10362 __put_user(st.st_gid, &target_stx->stx_gid);
10363 __put_user(st.st_nlink, &target_stx->stx_nlink);
10364 __put_user(major(st.st_rdev), &target_stx->stx_rdev_major);
10365 __put_user(minor(st.st_rdev), &target_stx->stx_rdev_minor);
10366 __put_user(st.st_size, &target_stx->stx_size);
10367 __put_user(st.st_blksize, &target_stx->stx_blksize);
10368 __put_user(st.st_blocks, &target_stx->stx_blocks);
10369 __put_user(st.st_atime, &target_stx->stx_atime.tv_sec);
10370 __put_user(st.st_mtime, &target_stx->stx_mtime.tv_sec);
10371 __put_user(st.st_ctime, &target_stx->stx_ctime.tv_sec);
10372 unlock_user_struct(target_stx, arg5, 1);
10377 #ifdef TARGET_NR_lchown
10378 case TARGET_NR_lchown:
10379 if (!(p = lock_user_string(arg1)))
10380 return -TARGET_EFAULT;
10381 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
10382 unlock_user(p, arg1, 0);
10385 #ifdef TARGET_NR_getuid
10386 case TARGET_NR_getuid:
10387 return get_errno(high2lowuid(getuid()));
10389 #ifdef TARGET_NR_getgid
10390 case TARGET_NR_getgid:
10391 return get_errno(high2lowgid(getgid()));
10393 #ifdef TARGET_NR_geteuid
10394 case TARGET_NR_geteuid:
10395 return get_errno(high2lowuid(geteuid()));
10397 #ifdef TARGET_NR_getegid
10398 case TARGET_NR_getegid:
10399 return get_errno(high2lowgid(getegid()));
10401 case TARGET_NR_setreuid:
10402 return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
10403 case TARGET_NR_setregid:
10404 return get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
10405 case TARGET_NR_getgroups:
10407 int gidsetsize = arg1;
10408 target_id *target_grouplist;
10412 grouplist = alloca(gidsetsize * sizeof(gid_t));
10413 ret = get_errno(getgroups(gidsetsize, grouplist));
10414 if (gidsetsize == 0)
10416 if (!is_error(ret)) {
10417 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
10418 if (!target_grouplist)
10419 return -TARGET_EFAULT;
10420 for(i = 0;i < ret; i++)
10421 target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
10422 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
10426 case TARGET_NR_setgroups:
10428 int gidsetsize = arg1;
10429 target_id *target_grouplist;
10430 gid_t *grouplist = NULL;
10433 grouplist = alloca(gidsetsize * sizeof(gid_t));
10434 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
10435 if (!target_grouplist) {
10436 return -TARGET_EFAULT;
10438 for (i = 0; i < gidsetsize; i++) {
10439 grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
10441 unlock_user(target_grouplist, arg2, 0);
10443 return get_errno(setgroups(gidsetsize, grouplist));
10445 case TARGET_NR_fchown:
10446 return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
10447 #if defined(TARGET_NR_fchownat)
10448 case TARGET_NR_fchownat:
10449 if (!(p = lock_user_string(arg2)))
10450 return -TARGET_EFAULT;
10451 ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
10452 low2highgid(arg4), arg5));
10453 unlock_user(p, arg2, 0);
10456 #ifdef TARGET_NR_setresuid
10457 case TARGET_NR_setresuid:
10458 return get_errno(sys_setresuid(low2highuid(arg1),
10460 low2highuid(arg3)));
10462 #ifdef TARGET_NR_getresuid
10463 case TARGET_NR_getresuid:
10465 uid_t ruid, euid, suid;
10466 ret = get_errno(getresuid(&ruid, &euid, &suid));
10467 if (!is_error(ret)) {
10468 if (put_user_id(high2lowuid(ruid), arg1)
10469 || put_user_id(high2lowuid(euid), arg2)
10470 || put_user_id(high2lowuid(suid), arg3))
10471 return -TARGET_EFAULT;
10476 #ifdef TARGET_NR_getresgid
10477 case TARGET_NR_setresgid:
10478 return get_errno(sys_setresgid(low2highgid(arg1),
10480 low2highgid(arg3)));
10482 #ifdef TARGET_NR_getresgid
10483 case TARGET_NR_getresgid:
10485 gid_t rgid, egid, sgid;
10486 ret = get_errno(getresgid(&rgid, &egid, &sgid));
10487 if (!is_error(ret)) {
10488 if (put_user_id(high2lowgid(rgid), arg1)
10489 || put_user_id(high2lowgid(egid), arg2)
10490 || put_user_id(high2lowgid(sgid), arg3))
10491 return -TARGET_EFAULT;
10496 #ifdef TARGET_NR_chown
10497 case TARGET_NR_chown:
10498 if (!(p = lock_user_string(arg1)))
10499 return -TARGET_EFAULT;
10500 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
10501 unlock_user(p, arg1, 0);
10504 case TARGET_NR_setuid:
10505 return get_errno(sys_setuid(low2highuid(arg1)));
10506 case TARGET_NR_setgid:
10507 return get_errno(sys_setgid(low2highgid(arg1)));
10508 case TARGET_NR_setfsuid:
10509 return get_errno(setfsuid(arg1));
10510 case TARGET_NR_setfsgid:
10511 return get_errno(setfsgid(arg1));
10513 #ifdef TARGET_NR_lchown32
10514 case TARGET_NR_lchown32:
10515 if (!(p = lock_user_string(arg1)))
10516 return -TARGET_EFAULT;
10517 ret = get_errno(lchown(p, arg2, arg3));
10518 unlock_user(p, arg1, 0);
10521 #ifdef TARGET_NR_getuid32
10522 case TARGET_NR_getuid32:
10523 return get_errno(getuid());
10526 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
10527 /* Alpha specific */
10528 case TARGET_NR_getxuid:
10532 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
10534 return get_errno(getuid());
10536 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
10537 /* Alpha specific */
10538 case TARGET_NR_getxgid:
10542 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
10544 return get_errno(getgid());
10546 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
10547 /* Alpha specific */
10548 case TARGET_NR_osf_getsysinfo:
10549 ret = -TARGET_EOPNOTSUPP;
10551 case TARGET_GSI_IEEE_FP_CONTROL:
10553 uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env);
10554 uint64_t swcr = ((CPUAlphaState *)cpu_env)->swcr;
10556 swcr &= ~SWCR_STATUS_MASK;
10557 swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
10559 if (put_user_u64 (swcr, arg2))
10560 return -TARGET_EFAULT;
10565 /* case GSI_IEEE_STATE_AT_SIGNAL:
10566 -- Not implemented in linux kernel.
10568 -- Retrieves current unaligned access state; not much used.
10569 case GSI_PROC_TYPE:
10570 -- Retrieves implver information; surely not used.
10571 case GSI_GET_HWRPB:
10572 -- Grabs a copy of the HWRPB; surely not used.
10577 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
10578 /* Alpha specific */
10579 case TARGET_NR_osf_setsysinfo:
10580 ret = -TARGET_EOPNOTSUPP;
10582 case TARGET_SSI_IEEE_FP_CONTROL:
10584 uint64_t swcr, fpcr;
10586 if (get_user_u64 (swcr, arg2)) {
10587 return -TARGET_EFAULT;
10591 * The kernel calls swcr_update_status to update the
10592 * status bits from the fpcr at every point that it
10593 * could be queried. Therefore, we store the status
10594 * bits only in FPCR.
10596 ((CPUAlphaState *)cpu_env)->swcr
10597 = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK);
10599 fpcr = cpu_alpha_load_fpcr(cpu_env);
10600 fpcr &= ((uint64_t)FPCR_DYN_MASK << 32);
10601 fpcr |= alpha_ieee_swcr_to_fpcr(swcr);
10602 cpu_alpha_store_fpcr(cpu_env, fpcr);
10607 case TARGET_SSI_IEEE_RAISE_EXCEPTION:
10609 uint64_t exc, fpcr, fex;
10611 if (get_user_u64(exc, arg2)) {
10612 return -TARGET_EFAULT;
10614 exc &= SWCR_STATUS_MASK;
10615 fpcr = cpu_alpha_load_fpcr(cpu_env);
10617 /* Old exceptions are not signaled. */
10618 fex = alpha_ieee_fpcr_to_swcr(fpcr);
10620 fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT;
10621 fex &= ((CPUArchState *)cpu_env)->swcr;
10623 /* Update the hardware fpcr. */
10624 fpcr |= alpha_ieee_swcr_to_fpcr(exc);
10625 cpu_alpha_store_fpcr(cpu_env, fpcr);
10628 int si_code = TARGET_FPE_FLTUNK;
10629 target_siginfo_t info;
10631 if (fex & SWCR_TRAP_ENABLE_DNO) {
10632 si_code = TARGET_FPE_FLTUND;
10634 if (fex & SWCR_TRAP_ENABLE_INE) {
10635 si_code = TARGET_FPE_FLTRES;
10637 if (fex & SWCR_TRAP_ENABLE_UNF) {
10638 si_code = TARGET_FPE_FLTUND;
10640 if (fex & SWCR_TRAP_ENABLE_OVF) {
10641 si_code = TARGET_FPE_FLTOVF;
10643 if (fex & SWCR_TRAP_ENABLE_DZE) {
10644 si_code = TARGET_FPE_FLTDIV;
10646 if (fex & SWCR_TRAP_ENABLE_INV) {
10647 si_code = TARGET_FPE_FLTINV;
10650 info.si_signo = SIGFPE;
10652 info.si_code = si_code;
10653 info._sifields._sigfault._addr
10654 = ((CPUArchState *)cpu_env)->pc;
10655 queue_signal((CPUArchState *)cpu_env, info.si_signo,
10656 QEMU_SI_FAULT, &info);
10662 /* case SSI_NVPAIRS:
10663 -- Used with SSIN_UACPROC to enable unaligned accesses.
10664 case SSI_IEEE_STATE_AT_SIGNAL:
10665 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
10666 -- Not implemented in linux kernel
10671 #ifdef TARGET_NR_osf_sigprocmask
10672 /* Alpha specific. */
10673 case TARGET_NR_osf_sigprocmask:
10677 sigset_t set, oldset;
10680 case TARGET_SIG_BLOCK:
10683 case TARGET_SIG_UNBLOCK:
10686 case TARGET_SIG_SETMASK:
10690 return -TARGET_EINVAL;
10693 target_to_host_old_sigset(&set, &mask);
10694 ret = do_sigprocmask(how, &set, &oldset);
10696 host_to_target_old_sigset(&mask, &oldset);
10703 #ifdef TARGET_NR_getgid32
10704 case TARGET_NR_getgid32:
10705 return get_errno(getgid());
10707 #ifdef TARGET_NR_geteuid32
10708 case TARGET_NR_geteuid32:
10709 return get_errno(geteuid());
10711 #ifdef TARGET_NR_getegid32
10712 case TARGET_NR_getegid32:
10713 return get_errno(getegid());
10715 #ifdef TARGET_NR_setreuid32
10716 case TARGET_NR_setreuid32:
10717 return get_errno(setreuid(arg1, arg2));
10719 #ifdef TARGET_NR_setregid32
10720 case TARGET_NR_setregid32:
10721 return get_errno(setregid(arg1, arg2));
10723 #ifdef TARGET_NR_getgroups32
10724 case TARGET_NR_getgroups32:
10726 int gidsetsize = arg1;
10727 uint32_t *target_grouplist;
10731 grouplist = alloca(gidsetsize * sizeof(gid_t));
10732 ret = get_errno(getgroups(gidsetsize, grouplist));
10733 if (gidsetsize == 0)
10735 if (!is_error(ret)) {
10736 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
10737 if (!target_grouplist) {
10738 return -TARGET_EFAULT;
10740 for(i = 0;i < ret; i++)
10741 target_grouplist[i] = tswap32(grouplist[i]);
10742 unlock_user(target_grouplist, arg2, gidsetsize * 4);
10747 #ifdef TARGET_NR_setgroups32
10748 case TARGET_NR_setgroups32:
10750 int gidsetsize = arg1;
10751 uint32_t *target_grouplist;
10755 grouplist = alloca(gidsetsize * sizeof(gid_t));
10756 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
10757 if (!target_grouplist) {
10758 return -TARGET_EFAULT;
10760 for(i = 0;i < gidsetsize; i++)
10761 grouplist[i] = tswap32(target_grouplist[i]);
10762 unlock_user(target_grouplist, arg2, 0);
10763 return get_errno(setgroups(gidsetsize, grouplist));
10766 #ifdef TARGET_NR_fchown32
10767 case TARGET_NR_fchown32:
10768 return get_errno(fchown(arg1, arg2, arg3));
10770 #ifdef TARGET_NR_setresuid32
10771 case TARGET_NR_setresuid32:
10772 return get_errno(sys_setresuid(arg1, arg2, arg3));
10774 #ifdef TARGET_NR_getresuid32
10775 case TARGET_NR_getresuid32:
10777 uid_t ruid, euid, suid;
10778 ret = get_errno(getresuid(&ruid, &euid, &suid));
10779 if (!is_error(ret)) {
10780 if (put_user_u32(ruid, arg1)
10781 || put_user_u32(euid, arg2)
10782 || put_user_u32(suid, arg3))
10783 return -TARGET_EFAULT;
10788 #ifdef TARGET_NR_setresgid32
10789 case TARGET_NR_setresgid32:
10790 return get_errno(sys_setresgid(arg1, arg2, arg3));
10792 #ifdef TARGET_NR_getresgid32
10793 case TARGET_NR_getresgid32:
10795 gid_t rgid, egid, sgid;
10796 ret = get_errno(getresgid(&rgid, &egid, &sgid));
10797 if (!is_error(ret)) {
10798 if (put_user_u32(rgid, arg1)
10799 || put_user_u32(egid, arg2)
10800 || put_user_u32(sgid, arg3))
10801 return -TARGET_EFAULT;
10806 #ifdef TARGET_NR_chown32
10807 case TARGET_NR_chown32:
10808 if (!(p = lock_user_string(arg1)))
10809 return -TARGET_EFAULT;
10810 ret = get_errno(chown(p, arg2, arg3));
10811 unlock_user(p, arg1, 0);
10814 #ifdef TARGET_NR_setuid32
10815 case TARGET_NR_setuid32:
10816 return get_errno(sys_setuid(arg1));
10818 #ifdef TARGET_NR_setgid32
10819 case TARGET_NR_setgid32:
10820 return get_errno(sys_setgid(arg1));
10822 #ifdef TARGET_NR_setfsuid32
10823 case TARGET_NR_setfsuid32:
10824 return get_errno(setfsuid(arg1));
10826 #ifdef TARGET_NR_setfsgid32
10827 case TARGET_NR_setfsgid32:
10828 return get_errno(setfsgid(arg1));
10830 #ifdef TARGET_NR_mincore
10831 case TARGET_NR_mincore:
10833 void *a = lock_user(VERIFY_READ, arg1, arg2, 0);
10835 return -TARGET_ENOMEM;
10837 p = lock_user_string(arg3);
10839 ret = -TARGET_EFAULT;
10841 ret = get_errno(mincore(a, arg2, p));
10842 unlock_user(p, arg3, ret);
10844 unlock_user(a, arg1, 0);
10848 #ifdef TARGET_NR_arm_fadvise64_64
10849 case TARGET_NR_arm_fadvise64_64:
10850 /* arm_fadvise64_64 looks like fadvise64_64 but
10851 * with different argument order: fd, advice, offset, len
10852 * rather than the usual fd, offset, len, advice.
10853 * Note that offset and len are both 64-bit so appear as
10854 * pairs of 32-bit registers.
10856 ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
10857 target_offset64(arg5, arg6), arg2);
10858 return -host_to_target_errno(ret);
10861 #if TARGET_ABI_BITS == 32
10863 #ifdef TARGET_NR_fadvise64_64
10864 case TARGET_NR_fadvise64_64:
10865 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
10866 /* 6 args: fd, advice, offset (high, low), len (high, low) */
10874 /* 6 args: fd, offset (high, low), len (high, low), advice */
10875 if (regpairs_aligned(cpu_env, num)) {
10876 /* offset is in (3,4), len in (5,6) and advice in 7 */
10884 ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
10885 target_offset64(arg4, arg5), arg6);
10886 return -host_to_target_errno(ret);
10889 #ifdef TARGET_NR_fadvise64
10890 case TARGET_NR_fadvise64:
10891 /* 5 args: fd, offset (high, low), len, advice */
10892 if (regpairs_aligned(cpu_env, num)) {
10893 /* offset is in (3,4), len in 5 and advice in 6 */
10899 ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
10900 return -host_to_target_errno(ret);
10903 #else /* not a 32-bit ABI */
10904 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
10905 #ifdef TARGET_NR_fadvise64_64
10906 case TARGET_NR_fadvise64_64:
10908 #ifdef TARGET_NR_fadvise64
10909 case TARGET_NR_fadvise64:
10911 #ifdef TARGET_S390X
10913 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
10914 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
10915 case 6: arg4 = POSIX_FADV_DONTNEED; break;
10916 case 7: arg4 = POSIX_FADV_NOREUSE; break;
10920 return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
10922 #endif /* end of 64-bit ABI fadvise handling */
10924 #ifdef TARGET_NR_madvise
10925 case TARGET_NR_madvise:
10926 /* A straight passthrough may not be safe because qemu sometimes
10927 turns private file-backed mappings into anonymous mappings.
10928 This will break MADV_DONTNEED.
10929 This is a hint, so ignoring and returning success is ok. */
10932 #if TARGET_ABI_BITS == 32
10933 case TARGET_NR_fcntl64:
10937 from_flock64_fn *copyfrom = copy_from_user_flock64;
10938 to_flock64_fn *copyto = copy_to_user_flock64;
10941 if (!((CPUARMState *)cpu_env)->eabi) {
10942 copyfrom = copy_from_user_oabi_flock64;
10943 copyto = copy_to_user_oabi_flock64;
10947 cmd = target_to_host_fcntl_cmd(arg2);
10948 if (cmd == -TARGET_EINVAL) {
10953 case TARGET_F_GETLK64:
10954 ret = copyfrom(&fl, arg3);
10958 ret = get_errno(safe_fcntl(arg1, cmd, &fl));
10960 ret = copyto(arg3, &fl);
10964 case TARGET_F_SETLK64:
10965 case TARGET_F_SETLKW64:
10966 ret = copyfrom(&fl, arg3);
10970 ret = get_errno(safe_fcntl(arg1, cmd, &fl));
10973 ret = do_fcntl(arg1, arg2, arg3);
10979 #ifdef TARGET_NR_cacheflush
10980 case TARGET_NR_cacheflush:
10981 /* self-modifying code is handled automatically, so nothing needed */
10984 #ifdef TARGET_NR_getpagesize
10985 case TARGET_NR_getpagesize:
10986 return TARGET_PAGE_SIZE;
10988 case TARGET_NR_gettid:
10989 return get_errno(sys_gettid());
10990 #ifdef TARGET_NR_readahead
10991 case TARGET_NR_readahead:
10992 #if TARGET_ABI_BITS == 32
10993 if (regpairs_aligned(cpu_env, num)) {
10998 ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
11000 ret = get_errno(readahead(arg1, arg2, arg3));
11005 #ifdef TARGET_NR_setxattr
11006 case TARGET_NR_listxattr:
11007 case TARGET_NR_llistxattr:
11011 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11013 return -TARGET_EFAULT;
11016 p = lock_user_string(arg1);
11018 if (num == TARGET_NR_listxattr) {
11019 ret = get_errno(listxattr(p, b, arg3));
11021 ret = get_errno(llistxattr(p, b, arg3));
11024 ret = -TARGET_EFAULT;
11026 unlock_user(p, arg1, 0);
11027 unlock_user(b, arg2, arg3);
11030 case TARGET_NR_flistxattr:
11034 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11036 return -TARGET_EFAULT;
11039 ret = get_errno(flistxattr(arg1, b, arg3));
11040 unlock_user(b, arg2, arg3);
11043 case TARGET_NR_setxattr:
11044 case TARGET_NR_lsetxattr:
11046 void *p, *n, *v = 0;
11048 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11050 return -TARGET_EFAULT;
11053 p = lock_user_string(arg1);
11054 n = lock_user_string(arg2);
11056 if (num == TARGET_NR_setxattr) {
11057 ret = get_errno(setxattr(p, n, v, arg4, arg5));
11059 ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
11062 ret = -TARGET_EFAULT;
11064 unlock_user(p, arg1, 0);
11065 unlock_user(n, arg2, 0);
11066 unlock_user(v, arg3, 0);
11069 case TARGET_NR_fsetxattr:
11073 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11075 return -TARGET_EFAULT;
11078 n = lock_user_string(arg2);
11080 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
11082 ret = -TARGET_EFAULT;
11084 unlock_user(n, arg2, 0);
11085 unlock_user(v, arg3, 0);
11088 case TARGET_NR_getxattr:
11089 case TARGET_NR_lgetxattr:
11091 void *p, *n, *v = 0;
11093 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11095 return -TARGET_EFAULT;
11098 p = lock_user_string(arg1);
11099 n = lock_user_string(arg2);
11101 if (num == TARGET_NR_getxattr) {
11102 ret = get_errno(getxattr(p, n, v, arg4));
11104 ret = get_errno(lgetxattr(p, n, v, arg4));
11107 ret = -TARGET_EFAULT;
11109 unlock_user(p, arg1, 0);
11110 unlock_user(n, arg2, 0);
11111 unlock_user(v, arg3, arg4);
11114 case TARGET_NR_fgetxattr:
11118 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11120 return -TARGET_EFAULT;
11123 n = lock_user_string(arg2);
11125 ret = get_errno(fgetxattr(arg1, n, v, arg4));
11127 ret = -TARGET_EFAULT;
11129 unlock_user(n, arg2, 0);
11130 unlock_user(v, arg3, arg4);
11133 case TARGET_NR_removexattr:
11134 case TARGET_NR_lremovexattr:
11137 p = lock_user_string(arg1);
11138 n = lock_user_string(arg2);
11140 if (num == TARGET_NR_removexattr) {
11141 ret = get_errno(removexattr(p, n));
11143 ret = get_errno(lremovexattr(p, n));
11146 ret = -TARGET_EFAULT;
11148 unlock_user(p, arg1, 0);
11149 unlock_user(n, arg2, 0);
11152 case TARGET_NR_fremovexattr:
11155 n = lock_user_string(arg2);
11157 ret = get_errno(fremovexattr(arg1, n));
11159 ret = -TARGET_EFAULT;
11161 unlock_user(n, arg2, 0);
11165 #endif /* CONFIG_ATTR */
11166 #ifdef TARGET_NR_set_thread_area
11167 case TARGET_NR_set_thread_area:
11168 #if defined(TARGET_MIPS)
11169 ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
11171 #elif defined(TARGET_CRIS)
11173 ret = -TARGET_EINVAL;
11175 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
11179 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
11180 return do_set_thread_area(cpu_env, arg1);
11181 #elif defined(TARGET_M68K)
11183 TaskState *ts = cpu->opaque;
11184 ts->tp_value = arg1;
11188 return -TARGET_ENOSYS;
11191 #ifdef TARGET_NR_get_thread_area
11192 case TARGET_NR_get_thread_area:
11193 #if defined(TARGET_I386) && defined(TARGET_ABI32)
11194 return do_get_thread_area(cpu_env, arg1);
11195 #elif defined(TARGET_M68K)
11197 TaskState *ts = cpu->opaque;
11198 return ts->tp_value;
11201 return -TARGET_ENOSYS;
11204 #ifdef TARGET_NR_getdomainname
11205 case TARGET_NR_getdomainname:
11206 return -TARGET_ENOSYS;
11209 #ifdef TARGET_NR_clock_settime
11210 case TARGET_NR_clock_settime:
11212 struct timespec ts;
11214 ret = target_to_host_timespec(&ts, arg2);
11215 if (!is_error(ret)) {
11216 ret = get_errno(clock_settime(arg1, &ts));
11221 #ifdef TARGET_NR_clock_gettime
11222 case TARGET_NR_clock_gettime:
11224 struct timespec ts;
11225 ret = get_errno(clock_gettime(arg1, &ts));
11226 if (!is_error(ret)) {
11227 ret = host_to_target_timespec(arg2, &ts);
11232 #ifdef TARGET_NR_clock_getres
11233 case TARGET_NR_clock_getres:
11235 struct timespec ts;
11236 ret = get_errno(clock_getres(arg1, &ts));
11237 if (!is_error(ret)) {
11238 host_to_target_timespec(arg2, &ts);
11243 #ifdef TARGET_NR_clock_nanosleep
11244 case TARGET_NR_clock_nanosleep:
11246 struct timespec ts;
11247 target_to_host_timespec(&ts, arg3);
11248 ret = get_errno(safe_clock_nanosleep(arg1, arg2,
11249 &ts, arg4 ? &ts : NULL));
11251 host_to_target_timespec(arg4, &ts);
11253 #if defined(TARGET_PPC)
11254 /* clock_nanosleep is odd in that it returns positive errno values.
11255 * On PPC, CR0 bit 3 should be set in such a situation. */
11256 if (ret && ret != -TARGET_ERESTARTSYS) {
11257 ((CPUPPCState *)cpu_env)->crf[0] |= 1;
11264 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
11265 case TARGET_NR_set_tid_address:
11266 return get_errno(set_tid_address((int *)g2h(arg1)));
11269 case TARGET_NR_tkill:
11270 return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
11272 case TARGET_NR_tgkill:
11273 return get_errno(safe_tgkill((int)arg1, (int)arg2,
11274 target_to_host_signal(arg3)));
11276 #ifdef TARGET_NR_set_robust_list
11277 case TARGET_NR_set_robust_list:
11278 case TARGET_NR_get_robust_list:
11279 /* The ABI for supporting robust futexes has userspace pass
11280 * the kernel a pointer to a linked list which is updated by
11281 * userspace after the syscall; the list is walked by the kernel
11282 * when the thread exits. Since the linked list in QEMU guest
11283 * memory isn't a valid linked list for the host and we have
11284 * no way to reliably intercept the thread-death event, we can't
11285 * support these. Silently return ENOSYS so that guest userspace
11286 * falls back to a non-robust futex implementation (which should
11287 * be OK except in the corner case of the guest crashing while
11288 * holding a mutex that is shared with another process via
11291 return -TARGET_ENOSYS;
11294 #if defined(TARGET_NR_utimensat)
11295 case TARGET_NR_utimensat:
11297 struct timespec *tsp, ts[2];
11301 target_to_host_timespec(ts, arg3);
11302 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
11306 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
11308 if (!(p = lock_user_string(arg2))) {
11309 return -TARGET_EFAULT;
11311 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
11312 unlock_user(p, arg2, 0);
11317 case TARGET_NR_futex:
11318 return do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
11319 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
11320 case TARGET_NR_inotify_init:
11321 ret = get_errno(sys_inotify_init());
11323 fd_trans_register(ret, &target_inotify_trans);
11327 #ifdef CONFIG_INOTIFY1
11328 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
11329 case TARGET_NR_inotify_init1:
11330 ret = get_errno(sys_inotify_init1(target_to_host_bitmask(arg1,
11331 fcntl_flags_tbl)));
11333 fd_trans_register(ret, &target_inotify_trans);
11338 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
11339 case TARGET_NR_inotify_add_watch:
11340 p = lock_user_string(arg2);
11341 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
11342 unlock_user(p, arg2, 0);
11345 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
11346 case TARGET_NR_inotify_rm_watch:
11347 return get_errno(sys_inotify_rm_watch(arg1, arg2));
11350 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
11351 case TARGET_NR_mq_open:
11353 struct mq_attr posix_mq_attr;
11354 struct mq_attr *pposix_mq_attr;
11357 host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
11358 pposix_mq_attr = NULL;
11360 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
11361 return -TARGET_EFAULT;
11363 pposix_mq_attr = &posix_mq_attr;
11365 p = lock_user_string(arg1 - 1);
11367 return -TARGET_EFAULT;
11369 ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
11370 unlock_user (p, arg1, 0);
11374 case TARGET_NR_mq_unlink:
11375 p = lock_user_string(arg1 - 1);
11377 return -TARGET_EFAULT;
11379 ret = get_errno(mq_unlink(p));
11380 unlock_user (p, arg1, 0);
11383 case TARGET_NR_mq_timedsend:
11385 struct timespec ts;
11387 p = lock_user (VERIFY_READ, arg2, arg3, 1);
11389 target_to_host_timespec(&ts, arg5);
11390 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
11391 host_to_target_timespec(arg5, &ts);
11393 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
11395 unlock_user (p, arg2, arg3);
11399 case TARGET_NR_mq_timedreceive:
11401 struct timespec ts;
11404 p = lock_user (VERIFY_READ, arg2, arg3, 1);
11406 target_to_host_timespec(&ts, arg5);
11407 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
11409 host_to_target_timespec(arg5, &ts);
11411 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
11414 unlock_user (p, arg2, arg3);
11416 put_user_u32(prio, arg4);
11420 /* Not implemented for now... */
11421 /* case TARGET_NR_mq_notify: */
11424 case TARGET_NR_mq_getsetattr:
11426 struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
11429 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
11430 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
11431 &posix_mq_attr_out));
11432 } else if (arg3 != 0) {
11433 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
11435 if (ret == 0 && arg3 != 0) {
11436 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
11442 #ifdef CONFIG_SPLICE
11443 #ifdef TARGET_NR_tee
11444 case TARGET_NR_tee:
11446 ret = get_errno(tee(arg1,arg2,arg3,arg4));
11450 #ifdef TARGET_NR_splice
11451 case TARGET_NR_splice:
11453 loff_t loff_in, loff_out;
11454 loff_t *ploff_in = NULL, *ploff_out = NULL;
11456 if (get_user_u64(loff_in, arg2)) {
11457 return -TARGET_EFAULT;
11459 ploff_in = &loff_in;
11462 if (get_user_u64(loff_out, arg4)) {
11463 return -TARGET_EFAULT;
11465 ploff_out = &loff_out;
11467 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
11469 if (put_user_u64(loff_in, arg2)) {
11470 return -TARGET_EFAULT;
11474 if (put_user_u64(loff_out, arg4)) {
11475 return -TARGET_EFAULT;
11481 #ifdef TARGET_NR_vmsplice
11482 case TARGET_NR_vmsplice:
11484 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
11486 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
11487 unlock_iovec(vec, arg2, arg3, 0);
11489 ret = -host_to_target_errno(errno);
11494 #endif /* CONFIG_SPLICE */
11495 #ifdef CONFIG_EVENTFD
11496 #if defined(TARGET_NR_eventfd)
11497 case TARGET_NR_eventfd:
11498 ret = get_errno(eventfd(arg1, 0));
11500 fd_trans_register(ret, &target_eventfd_trans);
11504 #if defined(TARGET_NR_eventfd2)
11505 case TARGET_NR_eventfd2:
11507 int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
11508 if (arg2 & TARGET_O_NONBLOCK) {
11509 host_flags |= O_NONBLOCK;
11511 if (arg2 & TARGET_O_CLOEXEC) {
11512 host_flags |= O_CLOEXEC;
11514 ret = get_errno(eventfd(arg1, host_flags));
11516 fd_trans_register(ret, &target_eventfd_trans);
11521 #endif /* CONFIG_EVENTFD */
11522 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
11523 case TARGET_NR_fallocate:
11524 #if TARGET_ABI_BITS == 32
11525 ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
11526 target_offset64(arg5, arg6)));
11528 ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
11532 #if defined(CONFIG_SYNC_FILE_RANGE)
11533 #if defined(TARGET_NR_sync_file_range)
11534 case TARGET_NR_sync_file_range:
11535 #if TARGET_ABI_BITS == 32
11536 #if defined(TARGET_MIPS)
11537 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
11538 target_offset64(arg5, arg6), arg7));
11540 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
11541 target_offset64(arg4, arg5), arg6));
11542 #endif /* !TARGET_MIPS */
11544 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
11548 #if defined(TARGET_NR_sync_file_range2)
11549 case TARGET_NR_sync_file_range2:
11550 /* This is like sync_file_range but the arguments are reordered */
11551 #if TARGET_ABI_BITS == 32
11552 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
11553 target_offset64(arg5, arg6), arg2));
11555 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
11560 #if defined(TARGET_NR_signalfd4)
11561 case TARGET_NR_signalfd4:
11562 return do_signalfd4(arg1, arg2, arg4);
11564 #if defined(TARGET_NR_signalfd)
11565 case TARGET_NR_signalfd:
11566 return do_signalfd4(arg1, arg2, 0);
11568 #if defined(CONFIG_EPOLL)
11569 #if defined(TARGET_NR_epoll_create)
11570 case TARGET_NR_epoll_create:
11571 return get_errno(epoll_create(arg1));
11573 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
11574 case TARGET_NR_epoll_create1:
11575 return get_errno(epoll_create1(arg1));
11577 #if defined(TARGET_NR_epoll_ctl)
11578 case TARGET_NR_epoll_ctl:
11580 struct epoll_event ep;
11581 struct epoll_event *epp = 0;
11583 struct target_epoll_event *target_ep;
11584 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
11585 return -TARGET_EFAULT;
11587 ep.events = tswap32(target_ep->events);
11588 /* The epoll_data_t union is just opaque data to the kernel,
11589 * so we transfer all 64 bits across and need not worry what
11590 * actual data type it is.
11592 ep.data.u64 = tswap64(target_ep->data.u64);
11593 unlock_user_struct(target_ep, arg4, 0);
11596 return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
11600 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
11601 #if defined(TARGET_NR_epoll_wait)
11602 case TARGET_NR_epoll_wait:
11604 #if defined(TARGET_NR_epoll_pwait)
11605 case TARGET_NR_epoll_pwait:
11608 struct target_epoll_event *target_ep;
11609 struct epoll_event *ep;
11611 int maxevents = arg3;
11612 int timeout = arg4;
11614 if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
11615 return -TARGET_EINVAL;
11618 target_ep = lock_user(VERIFY_WRITE, arg2,
11619 maxevents * sizeof(struct target_epoll_event), 1);
11621 return -TARGET_EFAULT;
11624 ep = g_try_new(struct epoll_event, maxevents);
11626 unlock_user(target_ep, arg2, 0);
11627 return -TARGET_ENOMEM;
11631 #if defined(TARGET_NR_epoll_pwait)
11632 case TARGET_NR_epoll_pwait:
11634 target_sigset_t *target_set;
11635 sigset_t _set, *set = &_set;
11638 if (arg6 != sizeof(target_sigset_t)) {
11639 ret = -TARGET_EINVAL;
11643 target_set = lock_user(VERIFY_READ, arg5,
11644 sizeof(target_sigset_t), 1);
11646 ret = -TARGET_EFAULT;
11649 target_to_host_sigset(set, target_set);
11650 unlock_user(target_set, arg5, 0);
11655 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
11656 set, SIGSET_T_SIZE));
11660 #if defined(TARGET_NR_epoll_wait)
11661 case TARGET_NR_epoll_wait:
11662 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
11667 ret = -TARGET_ENOSYS;
11669 if (!is_error(ret)) {
11671 for (i = 0; i < ret; i++) {
11672 target_ep[i].events = tswap32(ep[i].events);
11673 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
11675 unlock_user(target_ep, arg2,
11676 ret * sizeof(struct target_epoll_event));
11678 unlock_user(target_ep, arg2, 0);
11685 #ifdef TARGET_NR_prlimit64
11686 case TARGET_NR_prlimit64:
11688 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
11689 struct target_rlimit64 *target_rnew, *target_rold;
11690 struct host_rlimit64 rnew, rold, *rnewp = 0;
11691 int resource = target_to_host_resource(arg2);
11693 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
11694 return -TARGET_EFAULT;
11696 rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
11697 rnew.rlim_max = tswap64(target_rnew->rlim_max);
11698 unlock_user_struct(target_rnew, arg3, 0);
11702 ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
11703 if (!is_error(ret) && arg4) {
11704 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
11705 return -TARGET_EFAULT;
11707 target_rold->rlim_cur = tswap64(rold.rlim_cur);
11708 target_rold->rlim_max = tswap64(rold.rlim_max);
11709 unlock_user_struct(target_rold, arg4, 1);
11714 #ifdef TARGET_NR_gethostname
11715 case TARGET_NR_gethostname:
11717 char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
11719 ret = get_errno(gethostname(name, arg2));
11720 unlock_user(name, arg1, arg2);
11722 ret = -TARGET_EFAULT;
11727 #ifdef TARGET_NR_atomic_cmpxchg_32
11728 case TARGET_NR_atomic_cmpxchg_32:
11730 /* should use start_exclusive from main.c */
11731 abi_ulong mem_value;
11732 if (get_user_u32(mem_value, arg6)) {
11733 target_siginfo_t info;
11734 info.si_signo = SIGSEGV;
11736 info.si_code = TARGET_SEGV_MAPERR;
11737 info._sifields._sigfault._addr = arg6;
11738 queue_signal((CPUArchState *)cpu_env, info.si_signo,
11739 QEMU_SI_FAULT, &info);
11743 if (mem_value == arg2)
11744 put_user_u32(arg1, arg6);
11748 #ifdef TARGET_NR_atomic_barrier
11749 case TARGET_NR_atomic_barrier:
11750 /* Like the kernel implementation and the
11751 qemu arm barrier, no-op this? */
11755 #ifdef TARGET_NR_timer_create
11756 case TARGET_NR_timer_create:
11758 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
11760 struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
11763 int timer_index = next_free_host_timer();
11765 if (timer_index < 0) {
11766 ret = -TARGET_EAGAIN;
11768 timer_t *phtimer = g_posix_timers + timer_index;
11771 phost_sevp = &host_sevp;
11772 ret = target_to_host_sigevent(phost_sevp, arg2);
11778 ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
11782 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
11783 return -TARGET_EFAULT;
11791 #ifdef TARGET_NR_timer_settime
11792 case TARGET_NR_timer_settime:
11794 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
11795 * struct itimerspec * old_value */
11796 target_timer_t timerid = get_timer_id(arg1);
11800 } else if (arg3 == 0) {
11801 ret = -TARGET_EINVAL;
11803 timer_t htimer = g_posix_timers[timerid];
11804 struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
11806 if (target_to_host_itimerspec(&hspec_new, arg3)) {
11807 return -TARGET_EFAULT;
11810 timer_settime(htimer, arg2, &hspec_new, &hspec_old));
11811 if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
11812 return -TARGET_EFAULT;
11819 #ifdef TARGET_NR_timer_gettime
11820 case TARGET_NR_timer_gettime:
11822 /* args: timer_t timerid, struct itimerspec *curr_value */
11823 target_timer_t timerid = get_timer_id(arg1);
11827 } else if (!arg2) {
11828 ret = -TARGET_EFAULT;
11830 timer_t htimer = g_posix_timers[timerid];
11831 struct itimerspec hspec;
11832 ret = get_errno(timer_gettime(htimer, &hspec));
11834 if (host_to_target_itimerspec(arg2, &hspec)) {
11835 ret = -TARGET_EFAULT;
11842 #ifdef TARGET_NR_timer_getoverrun
11843 case TARGET_NR_timer_getoverrun:
11845 /* args: timer_t timerid */
11846 target_timer_t timerid = get_timer_id(arg1);
11851 timer_t htimer = g_posix_timers[timerid];
11852 ret = get_errno(timer_getoverrun(htimer));
11858 #ifdef TARGET_NR_timer_delete
11859 case TARGET_NR_timer_delete:
11861 /* args: timer_t timerid */
11862 target_timer_t timerid = get_timer_id(arg1);
11867 timer_t htimer = g_posix_timers[timerid];
11868 ret = get_errno(timer_delete(htimer));
11869 g_posix_timers[timerid] = 0;
11875 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
11876 case TARGET_NR_timerfd_create:
11877 return get_errno(timerfd_create(arg1,
11878 target_to_host_bitmask(arg2, fcntl_flags_tbl)));
11881 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
11882 case TARGET_NR_timerfd_gettime:
11884 struct itimerspec its_curr;
11886 ret = get_errno(timerfd_gettime(arg1, &its_curr));
11888 if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
11889 return -TARGET_EFAULT;
11895 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
11896 case TARGET_NR_timerfd_settime:
11898 struct itimerspec its_new, its_old, *p_new;
11901 if (target_to_host_itimerspec(&its_new, arg3)) {
11902 return -TARGET_EFAULT;
11909 ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
11911 if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
11912 return -TARGET_EFAULT;
11918 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
11919 case TARGET_NR_ioprio_get:
11920 return get_errno(ioprio_get(arg1, arg2));
11923 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
11924 case TARGET_NR_ioprio_set:
11925 return get_errno(ioprio_set(arg1, arg2, arg3));
11928 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
11929 case TARGET_NR_setns:
11930 return get_errno(setns(arg1, arg2));
11932 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
11933 case TARGET_NR_unshare:
11934 return get_errno(unshare(arg1));
11936 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
11937 case TARGET_NR_kcmp:
11938 return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
11940 #ifdef TARGET_NR_swapcontext
11941 case TARGET_NR_swapcontext:
11942 /* PowerPC specific. */
11943 return do_swapcontext(cpu_env, arg1, arg2, arg3);
11945 #ifdef TARGET_NR_memfd_create
11946 case TARGET_NR_memfd_create:
11947 p = lock_user_string(arg1);
11949 return -TARGET_EFAULT;
11951 ret = get_errno(memfd_create(p, arg2));
11952 fd_trans_unregister(ret);
11953 unlock_user(p, arg1, 0);
11958 qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
11959 return -TARGET_ENOSYS;
11964 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
11965 abi_long arg2, abi_long arg3, abi_long arg4,
11966 abi_long arg5, abi_long arg6, abi_long arg7,
11969 CPUState *cpu = env_cpu(cpu_env);
11972 #ifdef DEBUG_ERESTARTSYS
11973 /* Debug-only code for exercising the syscall-restart code paths
11974 * in the per-architecture cpu main loops: restart every syscall
11975 * the guest makes once before letting it through.
11981 return -TARGET_ERESTARTSYS;
11986 trace_guest_user_syscall(cpu, num, arg1, arg2, arg3, arg4,
11987 arg5, arg6, arg7, arg8);
11989 if (unlikely(do_strace)) {
11990 print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
11991 ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
11992 arg5, arg6, arg7, arg8);
11993 print_syscall_ret(num, ret);
11995 ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
11996 arg5, arg6, arg7, arg8);
11999 trace_guest_user_syscall_ret(cpu, num, ret);