4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
31 #include <sys/mount.h>
33 #include <sys/fsuid.h>
34 #include <sys/personality.h>
35 #include <sys/prctl.h>
36 #include <sys/resource.h>
38 #include <linux/capability.h>
40 #include <sys/timex.h>
41 #include <sys/socket.h>
42 #include <linux/sockios.h>
46 #include <sys/times.h>
49 #include <sys/statfs.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/ip.h>
55 #include <netinet/tcp.h>
56 #include <linux/wireless.h>
57 #include <linux/icmp.h>
58 #include <linux/icmpv6.h>
59 #include <linux/errqueue.h>
60 #include <linux/random.h>
62 #include <sys/timerfd.h>
65 #include <sys/eventfd.h>
68 #include <sys/epoll.h>
71 #include "qemu/xattr.h"
73 #ifdef CONFIG_SENDFILE
74 #include <sys/sendfile.h>
77 #define termios host_termios
78 #define winsize host_winsize
79 #define termio host_termio
80 #define sgttyb host_sgttyb /* same as target */
81 #define tchars host_tchars /* same as target */
82 #define ltchars host_ltchars /* same as target */
84 #include <linux/termios.h>
85 #include <linux/unistd.h>
86 #include <linux/cdrom.h>
87 #include <linux/hdreg.h>
88 #include <linux/soundcard.h>
90 #include <linux/mtio.h>
93 #if defined(CONFIG_FIEMAP)
94 #include <linux/fiemap.h>
97 #if defined(CONFIG_USBFS)
98 #include <linux/usbdevice_fs.h>
99 #include <linux/usb/ch9.h>
101 #include <linux/vt.h>
102 #include <linux/dm-ioctl.h>
103 #include <linux/reboot.h>
104 #include <linux/route.h>
105 #include <linux/filter.h>
106 #include <linux/blkpg.h>
107 #include <netpacket/packet.h>
108 #include <linux/netlink.h>
109 #include <linux/if_alg.h>
110 #include "linux_loop.h"
114 #include "qemu/guest-random.h"
115 #include "user/syscall-trace.h"
116 #include "qapi/error.h"
117 #include "fd-trans.h"
120 #define CLONE_IO 0x80000000 /* Clone io context */
123 /* We can't directly call the host clone syscall, because this will
124 * badly confuse libc (breaking mutexes, for example). So we must
125 * divide clone flags into:
126 * * flag combinations that look like pthread_create()
127 * * flag combinations that look like fork()
128 * * flags we can implement within QEMU itself
129 * * flags we can't support and will return an error for
131 /* For thread creation, all these flags must be present; for
132 * fork, none must be present.
134 #define CLONE_THREAD_FLAGS \
135 (CLONE_VM | CLONE_FS | CLONE_FILES | \
136 CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
138 /* These flags are ignored:
139 * CLONE_DETACHED is now ignored by the kernel;
140 * CLONE_IO is just an optimisation hint to the I/O scheduler
142 #define CLONE_IGNORED_FLAGS \
143 (CLONE_DETACHED | CLONE_IO)
145 /* Flags for fork which we can implement within QEMU itself */
146 #define CLONE_OPTIONAL_FORK_FLAGS \
147 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
148 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
150 /* Flags for thread creation which we can implement within QEMU itself */
151 #define CLONE_OPTIONAL_THREAD_FLAGS \
152 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
153 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
155 #define CLONE_INVALID_FORK_FLAGS \
156 (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
158 #define CLONE_INVALID_THREAD_FLAGS \
159 (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS | \
160 CLONE_IGNORED_FLAGS))
162 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
163 * have almost all been allocated. We cannot support any of
164 * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
165 * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
166 * The checks against the invalid thread masks above will catch these.
167 * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
170 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
171 * once. This exercises the codepaths for restart.
173 //#define DEBUG_ERESTARTSYS
175 //#include <linux/msdos_fs.h>
176 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
177 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
187 #define _syscall0(type,name) \
188 static type name (void) \
190 return syscall(__NR_##name); \
193 #define _syscall1(type,name,type1,arg1) \
194 static type name (type1 arg1) \
196 return syscall(__NR_##name, arg1); \
199 #define _syscall2(type,name,type1,arg1,type2,arg2) \
200 static type name (type1 arg1,type2 arg2) \
202 return syscall(__NR_##name, arg1, arg2); \
205 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
206 static type name (type1 arg1,type2 arg2,type3 arg3) \
208 return syscall(__NR_##name, arg1, arg2, arg3); \
211 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
212 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
214 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
217 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
219 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
221 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
225 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
226 type5,arg5,type6,arg6) \
227 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
230 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
234 #define __NR_sys_uname __NR_uname
235 #define __NR_sys_getcwd1 __NR_getcwd
236 #define __NR_sys_getdents __NR_getdents
237 #define __NR_sys_getdents64 __NR_getdents64
238 #define __NR_sys_getpriority __NR_getpriority
239 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
240 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
241 #define __NR_sys_syslog __NR_syslog
242 #define __NR_sys_futex __NR_futex
243 #define __NR_sys_inotify_init __NR_inotify_init
244 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
245 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
246 #define __NR_sys_statx __NR_statx
248 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
249 #define __NR__llseek __NR_lseek
252 /* Newer kernel ports have llseek() instead of _llseek() */
253 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
254 #define TARGET_NR__llseek TARGET_NR_llseek
257 #define __NR_sys_gettid __NR_gettid
258 _syscall0(int, sys_gettid)
260 /* For the 64-bit guest on 32-bit host case we must emulate
261 * getdents using getdents64, because otherwise the host
262 * might hand us back more dirent records than we can fit
263 * into the guest buffer after structure format conversion.
264 * Otherwise we emulate getdents with getdents if the host has it.
266 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
267 #define EMULATE_GETDENTS_WITH_GETDENTS
270 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
271 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
273 #if (defined(TARGET_NR_getdents) && \
274 !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
275 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
276 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
278 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
279 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
280 loff_t *, res, uint, wh);
282 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
283 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
285 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
286 #ifdef __NR_exit_group
287 _syscall1(int,exit_group,int,error_code)
289 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
290 _syscall1(int,set_tid_address,int *,tidptr)
292 #if defined(TARGET_NR_futex) && defined(__NR_futex)
293 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
294 const struct timespec *,timeout,int *,uaddr2,int,val3)
296 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
297 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
298 unsigned long *, user_mask_ptr);
299 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
300 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
301 unsigned long *, user_mask_ptr);
302 #define __NR_sys_getcpu __NR_getcpu
303 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
304 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
306 _syscall2(int, capget, struct __user_cap_header_struct *, header,
307 struct __user_cap_data_struct *, data);
308 _syscall2(int, capset, struct __user_cap_header_struct *, header,
309 struct __user_cap_data_struct *, data);
310 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
311 _syscall2(int, ioprio_get, int, which, int, who)
313 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
314 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
316 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
317 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
320 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
321 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
322 unsigned long, idx1, unsigned long, idx2)
326 * It is assumed that struct statx is architecture independent.
328 #if defined(TARGET_NR_statx) && defined(__NR_statx)
329 _syscall5(int, sys_statx, int, dirfd, const char *, pathname, int, flags,
330 unsigned int, mask, struct target_statx *, statxbuf)
333 static bitmask_transtbl fcntl_flags_tbl[] = {
334 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
335 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, },
336 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, },
337 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, },
338 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, },
339 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, },
340 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, },
341 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, },
342 { TARGET_O_SYNC, TARGET_O_DSYNC, O_SYNC, O_DSYNC, },
343 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, },
344 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, },
345 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
346 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, },
347 #if defined(O_DIRECT)
348 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, },
350 #if defined(O_NOATIME)
351 { TARGET_O_NOATIME, TARGET_O_NOATIME, O_NOATIME, O_NOATIME },
353 #if defined(O_CLOEXEC)
354 { TARGET_O_CLOEXEC, TARGET_O_CLOEXEC, O_CLOEXEC, O_CLOEXEC },
357 { TARGET_O_PATH, TARGET_O_PATH, O_PATH, O_PATH },
359 #if defined(O_TMPFILE)
360 { TARGET_O_TMPFILE, TARGET_O_TMPFILE, O_TMPFILE, O_TMPFILE },
362 /* Don't terminate the list prematurely on 64-bit host+guest. */
363 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
364 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
369 static int sys_getcwd1(char *buf, size_t size)
371 if (getcwd(buf, size) == NULL) {
372 /* getcwd() sets errno */
375 return strlen(buf)+1;
378 #ifdef TARGET_NR_utimensat
379 #if defined(__NR_utimensat)
380 #define __NR_sys_utimensat __NR_utimensat
381 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
382 const struct timespec *,tsp,int,flags)
384 static int sys_utimensat(int dirfd, const char *pathname,
385 const struct timespec times[2], int flags)
391 #endif /* TARGET_NR_utimensat */
393 #ifdef TARGET_NR_renameat2
394 #if defined(__NR_renameat2)
395 #define __NR_sys_renameat2 __NR_renameat2
396 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
397 const char *, new, unsigned int, flags)
399 static int sys_renameat2(int oldfd, const char *old,
400 int newfd, const char *new, int flags)
403 return renameat(oldfd, old, newfd, new);
409 #endif /* TARGET_NR_renameat2 */
411 #ifdef CONFIG_INOTIFY
412 #include <sys/inotify.h>
414 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
415 static int sys_inotify_init(void)
417 return (inotify_init());
420 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
421 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
423 return (inotify_add_watch(fd, pathname, mask));
426 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
427 static int sys_inotify_rm_watch(int fd, int32_t wd)
429 return (inotify_rm_watch(fd, wd));
432 #ifdef CONFIG_INOTIFY1
433 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
434 static int sys_inotify_init1(int flags)
436 return (inotify_init1(flags));
441 /* Userspace can usually survive runtime without inotify */
442 #undef TARGET_NR_inotify_init
443 #undef TARGET_NR_inotify_init1
444 #undef TARGET_NR_inotify_add_watch
445 #undef TARGET_NR_inotify_rm_watch
446 #endif /* CONFIG_INOTIFY */
448 #if defined(TARGET_NR_prlimit64)
449 #ifndef __NR_prlimit64
450 # define __NR_prlimit64 -1
452 #define __NR_sys_prlimit64 __NR_prlimit64
453 /* The glibc rlimit structure may not be that used by the underlying syscall */
454 struct host_rlimit64 {
458 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
459 const struct host_rlimit64 *, new_limit,
460 struct host_rlimit64 *, old_limit)
464 #if defined(TARGET_NR_timer_create)
465 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
466 static timer_t g_posix_timers[32] = { 0, } ;
468 static inline int next_free_host_timer(void)
471 /* FIXME: Does finding the next free slot require a lock? */
472 for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
473 if (g_posix_timers[k] == 0) {
474 g_posix_timers[k] = (timer_t) 1;
482 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
484 static inline int regpairs_aligned(void *cpu_env, int num)
486 return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
488 #elif defined(TARGET_MIPS) && (TARGET_ABI_BITS == 32)
489 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
490 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
491 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
492 * of registers which translates to the same as ARM/MIPS, because we start with
494 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
495 #elif defined(TARGET_SH4)
496 /* SH4 doesn't align register pairs, except for p{read,write}64 */
497 static inline int regpairs_aligned(void *cpu_env, int num)
500 case TARGET_NR_pread64:
501 case TARGET_NR_pwrite64:
508 #elif defined(TARGET_XTENSA)
509 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
511 static inline int regpairs_aligned(void *cpu_env, int num) { return 0; }
514 #define ERRNO_TABLE_SIZE 1200
516 /* target_to_host_errno_table[] is initialized from
517 * host_to_target_errno_table[] in syscall_init(). */
518 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
522 * This list is the union of errno values overridden in asm-<arch>/errno.h
523 * minus the errnos that are not actually generic to all archs.
525 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
526 [EAGAIN] = TARGET_EAGAIN,
527 [EIDRM] = TARGET_EIDRM,
528 [ECHRNG] = TARGET_ECHRNG,
529 [EL2NSYNC] = TARGET_EL2NSYNC,
530 [EL3HLT] = TARGET_EL3HLT,
531 [EL3RST] = TARGET_EL3RST,
532 [ELNRNG] = TARGET_ELNRNG,
533 [EUNATCH] = TARGET_EUNATCH,
534 [ENOCSI] = TARGET_ENOCSI,
535 [EL2HLT] = TARGET_EL2HLT,
536 [EDEADLK] = TARGET_EDEADLK,
537 [ENOLCK] = TARGET_ENOLCK,
538 [EBADE] = TARGET_EBADE,
539 [EBADR] = TARGET_EBADR,
540 [EXFULL] = TARGET_EXFULL,
541 [ENOANO] = TARGET_ENOANO,
542 [EBADRQC] = TARGET_EBADRQC,
543 [EBADSLT] = TARGET_EBADSLT,
544 [EBFONT] = TARGET_EBFONT,
545 [ENOSTR] = TARGET_ENOSTR,
546 [ENODATA] = TARGET_ENODATA,
547 [ETIME] = TARGET_ETIME,
548 [ENOSR] = TARGET_ENOSR,
549 [ENONET] = TARGET_ENONET,
550 [ENOPKG] = TARGET_ENOPKG,
551 [EREMOTE] = TARGET_EREMOTE,
552 [ENOLINK] = TARGET_ENOLINK,
553 [EADV] = TARGET_EADV,
554 [ESRMNT] = TARGET_ESRMNT,
555 [ECOMM] = TARGET_ECOMM,
556 [EPROTO] = TARGET_EPROTO,
557 [EDOTDOT] = TARGET_EDOTDOT,
558 [EMULTIHOP] = TARGET_EMULTIHOP,
559 [EBADMSG] = TARGET_EBADMSG,
560 [ENAMETOOLONG] = TARGET_ENAMETOOLONG,
561 [EOVERFLOW] = TARGET_EOVERFLOW,
562 [ENOTUNIQ] = TARGET_ENOTUNIQ,
563 [EBADFD] = TARGET_EBADFD,
564 [EREMCHG] = TARGET_EREMCHG,
565 [ELIBACC] = TARGET_ELIBACC,
566 [ELIBBAD] = TARGET_ELIBBAD,
567 [ELIBSCN] = TARGET_ELIBSCN,
568 [ELIBMAX] = TARGET_ELIBMAX,
569 [ELIBEXEC] = TARGET_ELIBEXEC,
570 [EILSEQ] = TARGET_EILSEQ,
571 [ENOSYS] = TARGET_ENOSYS,
572 [ELOOP] = TARGET_ELOOP,
573 [ERESTART] = TARGET_ERESTART,
574 [ESTRPIPE] = TARGET_ESTRPIPE,
575 [ENOTEMPTY] = TARGET_ENOTEMPTY,
576 [EUSERS] = TARGET_EUSERS,
577 [ENOTSOCK] = TARGET_ENOTSOCK,
578 [EDESTADDRREQ] = TARGET_EDESTADDRREQ,
579 [EMSGSIZE] = TARGET_EMSGSIZE,
580 [EPROTOTYPE] = TARGET_EPROTOTYPE,
581 [ENOPROTOOPT] = TARGET_ENOPROTOOPT,
582 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT,
583 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT,
584 [EOPNOTSUPP] = TARGET_EOPNOTSUPP,
585 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT,
586 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT,
587 [EADDRINUSE] = TARGET_EADDRINUSE,
588 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL,
589 [ENETDOWN] = TARGET_ENETDOWN,
590 [ENETUNREACH] = TARGET_ENETUNREACH,
591 [ENETRESET] = TARGET_ENETRESET,
592 [ECONNABORTED] = TARGET_ECONNABORTED,
593 [ECONNRESET] = TARGET_ECONNRESET,
594 [ENOBUFS] = TARGET_ENOBUFS,
595 [EISCONN] = TARGET_EISCONN,
596 [ENOTCONN] = TARGET_ENOTCONN,
597 [EUCLEAN] = TARGET_EUCLEAN,
598 [ENOTNAM] = TARGET_ENOTNAM,
599 [ENAVAIL] = TARGET_ENAVAIL,
600 [EISNAM] = TARGET_EISNAM,
601 [EREMOTEIO] = TARGET_EREMOTEIO,
602 [EDQUOT] = TARGET_EDQUOT,
603 [ESHUTDOWN] = TARGET_ESHUTDOWN,
604 [ETOOMANYREFS] = TARGET_ETOOMANYREFS,
605 [ETIMEDOUT] = TARGET_ETIMEDOUT,
606 [ECONNREFUSED] = TARGET_ECONNREFUSED,
607 [EHOSTDOWN] = TARGET_EHOSTDOWN,
608 [EHOSTUNREACH] = TARGET_EHOSTUNREACH,
609 [EALREADY] = TARGET_EALREADY,
610 [EINPROGRESS] = TARGET_EINPROGRESS,
611 [ESTALE] = TARGET_ESTALE,
612 [ECANCELED] = TARGET_ECANCELED,
613 [ENOMEDIUM] = TARGET_ENOMEDIUM,
614 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE,
616 [ENOKEY] = TARGET_ENOKEY,
619 [EKEYEXPIRED] = TARGET_EKEYEXPIRED,
622 [EKEYREVOKED] = TARGET_EKEYREVOKED,
625 [EKEYREJECTED] = TARGET_EKEYREJECTED,
628 [EOWNERDEAD] = TARGET_EOWNERDEAD,
630 #ifdef ENOTRECOVERABLE
631 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE,
634 [ENOMSG] = TARGET_ENOMSG,
637 [ERFKILL] = TARGET_ERFKILL,
640 [EHWPOISON] = TARGET_EHWPOISON,
644 static inline int host_to_target_errno(int err)
646 if (err >= 0 && err < ERRNO_TABLE_SIZE &&
647 host_to_target_errno_table[err]) {
648 return host_to_target_errno_table[err];
653 static inline int target_to_host_errno(int err)
655 if (err >= 0 && err < ERRNO_TABLE_SIZE &&
656 target_to_host_errno_table[err]) {
657 return target_to_host_errno_table[err];
662 static inline abi_long get_errno(abi_long ret)
665 return -host_to_target_errno(errno);
670 const char *target_strerror(int err)
672 if (err == TARGET_ERESTARTSYS) {
673 return "To be restarted";
675 if (err == TARGET_QEMU_ESIGRETURN) {
676 return "Successful exit from sigreturn";
679 if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
682 return strerror(target_to_host_errno(err));
685 #define safe_syscall0(type, name) \
686 static type safe_##name(void) \
688 return safe_syscall(__NR_##name); \
691 #define safe_syscall1(type, name, type1, arg1) \
692 static type safe_##name(type1 arg1) \
694 return safe_syscall(__NR_##name, arg1); \
697 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
698 static type safe_##name(type1 arg1, type2 arg2) \
700 return safe_syscall(__NR_##name, arg1, arg2); \
703 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
704 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
706 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
709 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
711 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
713 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
716 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
717 type4, arg4, type5, arg5) \
718 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
721 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
724 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
725 type4, arg4, type5, arg5, type6, arg6) \
726 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
727 type5 arg5, type6 arg6) \
729 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
732 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
733 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
734 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
735 int, flags, mode_t, mode)
736 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
737 struct rusage *, rusage)
738 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
739 int, options, struct rusage *, rusage)
740 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
741 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
742 fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
743 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
744 struct timespec *, tsp, const sigset_t *, sigmask,
746 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
747 int, maxevents, int, timeout, const sigset_t *, sigmask,
749 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
750 const struct timespec *,timeout,int *,uaddr2,int,val3)
751 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
752 safe_syscall2(int, kill, pid_t, pid, int, sig)
753 safe_syscall2(int, tkill, int, tid, int, sig)
754 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
755 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
756 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
757 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
758 unsigned long, pos_l, unsigned long, pos_h)
759 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
760 unsigned long, pos_l, unsigned long, pos_h)
761 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
763 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
764 int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
765 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
766 int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
767 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
768 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
769 safe_syscall2(int, flock, int, fd, int, operation)
770 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
771 const struct timespec *, uts, size_t, sigsetsize)
772 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
774 safe_syscall2(int, nanosleep, const struct timespec *, req,
775 struct timespec *, rem)
776 #ifdef TARGET_NR_clock_nanosleep
777 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
778 const struct timespec *, req, struct timespec *, rem)
781 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
782 void *, ptr, long, fifth)
785 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
789 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
790 long, msgtype, int, flags)
792 #ifdef __NR_semtimedop
793 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
794 unsigned, nsops, const struct timespec *, timeout)
796 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
797 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
798 size_t, len, unsigned, prio, const struct timespec *, timeout)
799 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
800 size_t, len, unsigned *, prio, const struct timespec *, timeout)
802 /* We do ioctl like this rather than via safe_syscall3 to preserve the
803 * "third argument might be integer or pointer or not present" behaviour of
806 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
807 /* Similarly for fcntl. Note that callers must always:
808 * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
809 * use the flock64 struct rather than unsuffixed flock
810 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
813 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
815 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
818 static inline int host_to_target_sock_type(int host_type)
822 switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
824 target_type = TARGET_SOCK_DGRAM;
827 target_type = TARGET_SOCK_STREAM;
830 target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
834 #if defined(SOCK_CLOEXEC)
835 if (host_type & SOCK_CLOEXEC) {
836 target_type |= TARGET_SOCK_CLOEXEC;
840 #if defined(SOCK_NONBLOCK)
841 if (host_type & SOCK_NONBLOCK) {
842 target_type |= TARGET_SOCK_NONBLOCK;
849 static abi_ulong target_brk;
850 static abi_ulong target_original_brk;
851 static abi_ulong brk_page;
853 void target_set_brk(abi_ulong new_brk)
855 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
856 brk_page = HOST_PAGE_ALIGN(target_brk);
859 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
860 #define DEBUGF_BRK(message, args...)
862 /* do_brk() must return target values and target errnos. */
863 abi_long do_brk(abi_ulong new_brk)
865 abi_long mapped_addr;
866 abi_ulong new_alloc_size;
868 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
871 DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
874 if (new_brk < target_original_brk) {
875 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
880 /* If the new brk is less than the highest page reserved to the
881 * target heap allocation, set it and we're almost done... */
882 if (new_brk <= brk_page) {
883 /* Heap contents are initialized to zero, as for anonymous
885 if (new_brk > target_brk) {
886 memset(g2h(target_brk), 0, new_brk - target_brk);
888 target_brk = new_brk;
889 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
893 /* We need to allocate more memory after the brk... Note that
894 * we don't use MAP_FIXED because that will map over the top of
895 * any existing mapping (like the one with the host libc or qemu
896 * itself); instead we treat "mapped but at wrong address" as
897 * a failure and unmap again.
899 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
900 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
901 PROT_READ|PROT_WRITE,
902 MAP_ANON|MAP_PRIVATE, 0, 0));
904 if (mapped_addr == brk_page) {
905 /* Heap contents are initialized to zero, as for anonymous
906 * mapped pages. Technically the new pages are already
907 * initialized to zero since they *are* anonymous mapped
908 * pages, however we have to take care with the contents that
909 * come from the remaining part of the previous page: it may
910 * contains garbage data due to a previous heap usage (grown
912 memset(g2h(target_brk), 0, brk_page - target_brk);
914 target_brk = new_brk;
915 brk_page = HOST_PAGE_ALIGN(target_brk);
916 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
919 } else if (mapped_addr != -1) {
920 /* Mapped but at wrong address, meaning there wasn't actually
921 * enough space for this brk.
923 target_munmap(mapped_addr, new_alloc_size);
925 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
928 DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
931 #if defined(TARGET_ALPHA)
932 /* We (partially) emulate OSF/1 on Alpha, which requires we
933 return a proper errno, not an unchanged brk value. */
934 return -TARGET_ENOMEM;
936 /* For everything else, return the previous break. */
940 static inline abi_long copy_from_user_fdset(fd_set *fds,
941 abi_ulong target_fds_addr,
945 abi_ulong b, *target_fds;
947 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
948 if (!(target_fds = lock_user(VERIFY_READ,
950 sizeof(abi_ulong) * nw,
952 return -TARGET_EFAULT;
956 for (i = 0; i < nw; i++) {
957 /* grab the abi_ulong */
958 __get_user(b, &target_fds[i]);
959 for (j = 0; j < TARGET_ABI_BITS; j++) {
960 /* check the bit inside the abi_ulong */
967 unlock_user(target_fds, target_fds_addr, 0);
972 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
973 abi_ulong target_fds_addr,
976 if (target_fds_addr) {
977 if (copy_from_user_fdset(fds, target_fds_addr, n))
978 return -TARGET_EFAULT;
986 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
992 abi_ulong *target_fds;
994 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
995 if (!(target_fds = lock_user(VERIFY_WRITE,
997 sizeof(abi_ulong) * nw,
999 return -TARGET_EFAULT;
1002 for (i = 0; i < nw; i++) {
1004 for (j = 0; j < TARGET_ABI_BITS; j++) {
1005 v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
1008 __put_user(v, &target_fds[i]);
1011 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
1016 #if defined(__alpha__)
1017 #define HOST_HZ 1024
1022 static inline abi_long host_to_target_clock_t(long ticks)
1024 #if HOST_HZ == TARGET_HZ
1027 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
1031 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
1032 const struct rusage *rusage)
1034 struct target_rusage *target_rusage;
1036 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
1037 return -TARGET_EFAULT;
1038 target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
1039 target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
1040 target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1041 target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1042 target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1043 target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1044 target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1045 target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1046 target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1047 target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1048 target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1049 target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1050 target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1051 target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1052 target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1053 target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1054 target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1055 target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1056 unlock_user_struct(target_rusage, target_addr, 1);
1061 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1063 abi_ulong target_rlim_swap;
1066 target_rlim_swap = tswapal(target_rlim);
1067 if (target_rlim_swap == TARGET_RLIM_INFINITY)
1068 return RLIM_INFINITY;
1070 result = target_rlim_swap;
1071 if (target_rlim_swap != (rlim_t)result)
1072 return RLIM_INFINITY;
1077 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1079 abi_ulong target_rlim_swap;
1082 if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1083 target_rlim_swap = TARGET_RLIM_INFINITY;
1085 target_rlim_swap = rlim;
1086 result = tswapal(target_rlim_swap);
1091 static inline int target_to_host_resource(int code)
1094 case TARGET_RLIMIT_AS:
1096 case TARGET_RLIMIT_CORE:
1098 case TARGET_RLIMIT_CPU:
1100 case TARGET_RLIMIT_DATA:
1102 case TARGET_RLIMIT_FSIZE:
1103 return RLIMIT_FSIZE;
1104 case TARGET_RLIMIT_LOCKS:
1105 return RLIMIT_LOCKS;
1106 case TARGET_RLIMIT_MEMLOCK:
1107 return RLIMIT_MEMLOCK;
1108 case TARGET_RLIMIT_MSGQUEUE:
1109 return RLIMIT_MSGQUEUE;
1110 case TARGET_RLIMIT_NICE:
1112 case TARGET_RLIMIT_NOFILE:
1113 return RLIMIT_NOFILE;
1114 case TARGET_RLIMIT_NPROC:
1115 return RLIMIT_NPROC;
1116 case TARGET_RLIMIT_RSS:
1118 case TARGET_RLIMIT_RTPRIO:
1119 return RLIMIT_RTPRIO;
1120 case TARGET_RLIMIT_SIGPENDING:
1121 return RLIMIT_SIGPENDING;
1122 case TARGET_RLIMIT_STACK:
1123 return RLIMIT_STACK;
1129 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1130 abi_ulong target_tv_addr)
1132 struct target_timeval *target_tv;
1134 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1135 return -TARGET_EFAULT;
1138 __get_user(tv->tv_sec, &target_tv->tv_sec);
1139 __get_user(tv->tv_usec, &target_tv->tv_usec);
1141 unlock_user_struct(target_tv, target_tv_addr, 0);
1146 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1147 const struct timeval *tv)
1149 struct target_timeval *target_tv;
1151 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1152 return -TARGET_EFAULT;
1155 __put_user(tv->tv_sec, &target_tv->tv_sec);
1156 __put_user(tv->tv_usec, &target_tv->tv_usec);
1158 unlock_user_struct(target_tv, target_tv_addr, 1);
1163 static inline abi_long copy_to_user_timeval64(abi_ulong target_tv_addr,
1164 const struct timeval *tv)
1166 struct target__kernel_sock_timeval *target_tv;
1168 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1169 return -TARGET_EFAULT;
1172 __put_user(tv->tv_sec, &target_tv->tv_sec);
1173 __put_user(tv->tv_usec, &target_tv->tv_usec);
1175 unlock_user_struct(target_tv, target_tv_addr, 1);
1180 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
1181 abi_ulong target_addr)
1183 struct target_timespec *target_ts;
1185 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1186 return -TARGET_EFAULT;
1188 __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1189 __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1190 unlock_user_struct(target_ts, target_addr, 0);
1194 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
1195 struct timespec *host_ts)
1197 struct target_timespec *target_ts;
1199 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1200 return -TARGET_EFAULT;
1202 __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1203 __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1204 unlock_user_struct(target_ts, target_addr, 1);
1208 static inline abi_long host_to_target_timespec64(abi_ulong target_addr,
1209 struct timespec *host_ts)
1211 struct target__kernel_timespec *target_ts;
1213 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1214 return -TARGET_EFAULT;
1216 __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1217 __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1218 unlock_user_struct(target_ts, target_addr, 1);
1222 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1223 abi_ulong target_tz_addr)
1225 struct target_timezone *target_tz;
1227 if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1228 return -TARGET_EFAULT;
1231 __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1232 __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1234 unlock_user_struct(target_tz, target_tz_addr, 0);
1239 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1242 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1243 abi_ulong target_mq_attr_addr)
1245 struct target_mq_attr *target_mq_attr;
1247 if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1248 target_mq_attr_addr, 1))
1249 return -TARGET_EFAULT;
1251 __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1252 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1253 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1254 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1256 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1261 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1262 const struct mq_attr *attr)
1264 struct target_mq_attr *target_mq_attr;
1266 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1267 target_mq_attr_addr, 0))
1268 return -TARGET_EFAULT;
1270 __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1271 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1272 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1273 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1275 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1281 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1282 /* do_select() must return target values and target errnos. */
1283 static abi_long do_select(int n,
1284 abi_ulong rfd_addr, abi_ulong wfd_addr,
1285 abi_ulong efd_addr, abi_ulong target_tv_addr)
1287 fd_set rfds, wfds, efds;
1288 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1290 struct timespec ts, *ts_ptr;
1293 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1297 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1301 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1306 if (target_tv_addr) {
1307 if (copy_from_user_timeval(&tv, target_tv_addr))
1308 return -TARGET_EFAULT;
1309 ts.tv_sec = tv.tv_sec;
1310 ts.tv_nsec = tv.tv_usec * 1000;
1316 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1319 if (!is_error(ret)) {
1320 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1321 return -TARGET_EFAULT;
1322 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1323 return -TARGET_EFAULT;
1324 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1325 return -TARGET_EFAULT;
1327 if (target_tv_addr) {
1328 tv.tv_sec = ts.tv_sec;
1329 tv.tv_usec = ts.tv_nsec / 1000;
1330 if (copy_to_user_timeval(target_tv_addr, &tv)) {
1331 return -TARGET_EFAULT;
1339 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1340 static abi_long do_old_select(abi_ulong arg1)
1342 struct target_sel_arg_struct *sel;
1343 abi_ulong inp, outp, exp, tvp;
1346 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1347 return -TARGET_EFAULT;
1350 nsel = tswapal(sel->n);
1351 inp = tswapal(sel->inp);
1352 outp = tswapal(sel->outp);
1353 exp = tswapal(sel->exp);
1354 tvp = tswapal(sel->tvp);
1356 unlock_user_struct(sel, arg1, 0);
1358 return do_select(nsel, inp, outp, exp, tvp);
1363 static abi_long do_pipe2(int host_pipe[], int flags)
1366 return pipe2(host_pipe, flags);
1372 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1373 int flags, int is_pipe2)
1377 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1380 return get_errno(ret);
1382 /* Several targets have special calling conventions for the original
1383 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1385 #if defined(TARGET_ALPHA)
1386 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1387 return host_pipe[0];
1388 #elif defined(TARGET_MIPS)
1389 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1390 return host_pipe[0];
1391 #elif defined(TARGET_SH4)
1392 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1393 return host_pipe[0];
1394 #elif defined(TARGET_SPARC)
1395 ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1396 return host_pipe[0];
1400 if (put_user_s32(host_pipe[0], pipedes)
1401 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1402 return -TARGET_EFAULT;
1403 return get_errno(ret);
1406 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1407 abi_ulong target_addr,
1410 struct target_ip_mreqn *target_smreqn;
1412 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1414 return -TARGET_EFAULT;
1415 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1416 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1417 if (len == sizeof(struct target_ip_mreqn))
1418 mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1419 unlock_user(target_smreqn, target_addr, 0);
1424 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1425 abi_ulong target_addr,
1428 const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1429 sa_family_t sa_family;
1430 struct target_sockaddr *target_saddr;
1432 if (fd_trans_target_to_host_addr(fd)) {
1433 return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1436 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1438 return -TARGET_EFAULT;
1440 sa_family = tswap16(target_saddr->sa_family);
1442 /* Oops. The caller might send a incomplete sun_path; sun_path
1443 * must be terminated by \0 (see the manual page), but
1444 * unfortunately it is quite common to specify sockaddr_un
1445 * length as "strlen(x->sun_path)" while it should be
1446 * "strlen(...) + 1". We'll fix that here if needed.
1447 * Linux kernel has a similar feature.
1450 if (sa_family == AF_UNIX) {
1451 if (len < unix_maxlen && len > 0) {
1452 char *cp = (char*)target_saddr;
1454 if ( cp[len-1] && !cp[len] )
1457 if (len > unix_maxlen)
1461 memcpy(addr, target_saddr, len);
1462 addr->sa_family = sa_family;
1463 if (sa_family == AF_NETLINK) {
1464 struct sockaddr_nl *nladdr;
1466 nladdr = (struct sockaddr_nl *)addr;
1467 nladdr->nl_pid = tswap32(nladdr->nl_pid);
1468 nladdr->nl_groups = tswap32(nladdr->nl_groups);
1469 } else if (sa_family == AF_PACKET) {
1470 struct target_sockaddr_ll *lladdr;
1472 lladdr = (struct target_sockaddr_ll *)addr;
1473 lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1474 lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1476 unlock_user(target_saddr, target_addr, 0);
1481 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1482 struct sockaddr *addr,
1485 struct target_sockaddr *target_saddr;
1492 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1494 return -TARGET_EFAULT;
1495 memcpy(target_saddr, addr, len);
1496 if (len >= offsetof(struct target_sockaddr, sa_family) +
1497 sizeof(target_saddr->sa_family)) {
1498 target_saddr->sa_family = tswap16(addr->sa_family);
1500 if (addr->sa_family == AF_NETLINK &&
1501 len >= sizeof(struct target_sockaddr_nl)) {
1502 struct target_sockaddr_nl *target_nl =
1503 (struct target_sockaddr_nl *)target_saddr;
1504 target_nl->nl_pid = tswap32(target_nl->nl_pid);
1505 target_nl->nl_groups = tswap32(target_nl->nl_groups);
1506 } else if (addr->sa_family == AF_PACKET) {
1507 struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1508 target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1509 target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1510 } else if (addr->sa_family == AF_INET6 &&
1511 len >= sizeof(struct target_sockaddr_in6)) {
1512 struct target_sockaddr_in6 *target_in6 =
1513 (struct target_sockaddr_in6 *)target_saddr;
1514 target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1516 unlock_user(target_saddr, target_addr, len);
1521 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1522 struct target_msghdr *target_msgh)
1524 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1525 abi_long msg_controllen;
1526 abi_ulong target_cmsg_addr;
1527 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1528 socklen_t space = 0;
1530 msg_controllen = tswapal(target_msgh->msg_controllen);
1531 if (msg_controllen < sizeof (struct target_cmsghdr))
1533 target_cmsg_addr = tswapal(target_msgh->msg_control);
1534 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1535 target_cmsg_start = target_cmsg;
1537 return -TARGET_EFAULT;
1539 while (cmsg && target_cmsg) {
1540 void *data = CMSG_DATA(cmsg);
1541 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1543 int len = tswapal(target_cmsg->cmsg_len)
1544 - sizeof(struct target_cmsghdr);
1546 space += CMSG_SPACE(len);
1547 if (space > msgh->msg_controllen) {
1548 space -= CMSG_SPACE(len);
1549 /* This is a QEMU bug, since we allocated the payload
1550 * area ourselves (unlike overflow in host-to-target
1551 * conversion, which is just the guest giving us a buffer
1552 * that's too small). It can't happen for the payload types
1553 * we currently support; if it becomes an issue in future
1554 * we would need to improve our allocation strategy to
1555 * something more intelligent than "twice the size of the
1556 * target buffer we're reading from".
1558 gemu_log("Host cmsg overflow\n");
1562 if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1563 cmsg->cmsg_level = SOL_SOCKET;
1565 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1567 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1568 cmsg->cmsg_len = CMSG_LEN(len);
1570 if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1571 int *fd = (int *)data;
1572 int *target_fd = (int *)target_data;
1573 int i, numfds = len / sizeof(int);
1575 for (i = 0; i < numfds; i++) {
1576 __get_user(fd[i], target_fd + i);
1578 } else if (cmsg->cmsg_level == SOL_SOCKET
1579 && cmsg->cmsg_type == SCM_CREDENTIALS) {
1580 struct ucred *cred = (struct ucred *)data;
1581 struct target_ucred *target_cred =
1582 (struct target_ucred *)target_data;
1584 __get_user(cred->pid, &target_cred->pid);
1585 __get_user(cred->uid, &target_cred->uid);
1586 __get_user(cred->gid, &target_cred->gid);
1588 gemu_log("Unsupported ancillary data: %d/%d\n",
1589 cmsg->cmsg_level, cmsg->cmsg_type);
1590 memcpy(data, target_data, len);
1593 cmsg = CMSG_NXTHDR(msgh, cmsg);
1594 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1597 unlock_user(target_cmsg, target_cmsg_addr, 0);
1599 msgh->msg_controllen = space;
1603 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1604 struct msghdr *msgh)
1606 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1607 abi_long msg_controllen;
1608 abi_ulong target_cmsg_addr;
1609 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1610 socklen_t space = 0;
1612 msg_controllen = tswapal(target_msgh->msg_controllen);
1613 if (msg_controllen < sizeof (struct target_cmsghdr))
1615 target_cmsg_addr = tswapal(target_msgh->msg_control);
1616 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1617 target_cmsg_start = target_cmsg;
1619 return -TARGET_EFAULT;
1621 while (cmsg && target_cmsg) {
1622 void *data = CMSG_DATA(cmsg);
1623 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1625 int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1626 int tgt_len, tgt_space;
1628 /* We never copy a half-header but may copy half-data;
1629 * this is Linux's behaviour in put_cmsg(). Note that
1630 * truncation here is a guest problem (which we report
1631 * to the guest via the CTRUNC bit), unlike truncation
1632 * in target_to_host_cmsg, which is a QEMU bug.
1634 if (msg_controllen < sizeof(struct target_cmsghdr)) {
1635 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1639 if (cmsg->cmsg_level == SOL_SOCKET) {
1640 target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1642 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1644 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1646 /* Payload types which need a different size of payload on
1647 * the target must adjust tgt_len here.
1650 switch (cmsg->cmsg_level) {
1652 switch (cmsg->cmsg_type) {
1654 tgt_len = sizeof(struct target_timeval);
1664 if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1665 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1666 tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1669 /* We must now copy-and-convert len bytes of payload
1670 * into tgt_len bytes of destination space. Bear in mind
1671 * that in both source and destination we may be dealing
1672 * with a truncated value!
1674 switch (cmsg->cmsg_level) {
1676 switch (cmsg->cmsg_type) {
1679 int *fd = (int *)data;
1680 int *target_fd = (int *)target_data;
1681 int i, numfds = tgt_len / sizeof(int);
1683 for (i = 0; i < numfds; i++) {
1684 __put_user(fd[i], target_fd + i);
1690 struct timeval *tv = (struct timeval *)data;
1691 struct target_timeval *target_tv =
1692 (struct target_timeval *)target_data;
1694 if (len != sizeof(struct timeval) ||
1695 tgt_len != sizeof(struct target_timeval)) {
1699 /* copy struct timeval to target */
1700 __put_user(tv->tv_sec, &target_tv->tv_sec);
1701 __put_user(tv->tv_usec, &target_tv->tv_usec);
1704 case SCM_CREDENTIALS:
1706 struct ucred *cred = (struct ucred *)data;
1707 struct target_ucred *target_cred =
1708 (struct target_ucred *)target_data;
1710 __put_user(cred->pid, &target_cred->pid);
1711 __put_user(cred->uid, &target_cred->uid);
1712 __put_user(cred->gid, &target_cred->gid);
1721 switch (cmsg->cmsg_type) {
1724 uint32_t *v = (uint32_t *)data;
1725 uint32_t *t_int = (uint32_t *)target_data;
1727 if (len != sizeof(uint32_t) ||
1728 tgt_len != sizeof(uint32_t)) {
1731 __put_user(*v, t_int);
1737 struct sock_extended_err ee;
1738 struct sockaddr_in offender;
1740 struct errhdr_t *errh = (struct errhdr_t *)data;
1741 struct errhdr_t *target_errh =
1742 (struct errhdr_t *)target_data;
1744 if (len != sizeof(struct errhdr_t) ||
1745 tgt_len != sizeof(struct errhdr_t)) {
1748 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1749 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1750 __put_user(errh->ee.ee_type, &target_errh->ee.ee_type);
1751 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1752 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1753 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1754 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1755 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1756 (void *) &errh->offender, sizeof(errh->offender));
1765 switch (cmsg->cmsg_type) {
1768 uint32_t *v = (uint32_t *)data;
1769 uint32_t *t_int = (uint32_t *)target_data;
1771 if (len != sizeof(uint32_t) ||
1772 tgt_len != sizeof(uint32_t)) {
1775 __put_user(*v, t_int);
1781 struct sock_extended_err ee;
1782 struct sockaddr_in6 offender;
1784 struct errhdr6_t *errh = (struct errhdr6_t *)data;
1785 struct errhdr6_t *target_errh =
1786 (struct errhdr6_t *)target_data;
1788 if (len != sizeof(struct errhdr6_t) ||
1789 tgt_len != sizeof(struct errhdr6_t)) {
1792 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1793 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1794 __put_user(errh->ee.ee_type, &target_errh->ee.ee_type);
1795 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1796 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1797 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1798 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1799 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1800 (void *) &errh->offender, sizeof(errh->offender));
1810 gemu_log("Unsupported ancillary data: %d/%d\n",
1811 cmsg->cmsg_level, cmsg->cmsg_type);
1812 memcpy(target_data, data, MIN(len, tgt_len));
1813 if (tgt_len > len) {
1814 memset(target_data + len, 0, tgt_len - len);
1818 target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
1819 tgt_space = TARGET_CMSG_SPACE(tgt_len);
1820 if (msg_controllen < tgt_space) {
1821 tgt_space = msg_controllen;
1823 msg_controllen -= tgt_space;
1825 cmsg = CMSG_NXTHDR(msgh, cmsg);
1826 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1829 unlock_user(target_cmsg, target_cmsg_addr, space);
1831 target_msgh->msg_controllen = tswapal(space);
1835 /* do_setsockopt() Must return target values and target errnos. */
1836 static abi_long do_setsockopt(int sockfd, int level, int optname,
1837 abi_ulong optval_addr, socklen_t optlen)
1841 struct ip_mreqn *ip_mreq;
1842 struct ip_mreq_source *ip_mreq_source;
1846 /* TCP options all take an 'int' value. */
1847 if (optlen < sizeof(uint32_t))
1848 return -TARGET_EINVAL;
1850 if (get_user_u32(val, optval_addr))
1851 return -TARGET_EFAULT;
1852 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1859 case IP_ROUTER_ALERT:
1863 case IP_MTU_DISCOVER:
1870 case IP_MULTICAST_TTL:
1871 case IP_MULTICAST_LOOP:
1873 if (optlen >= sizeof(uint32_t)) {
1874 if (get_user_u32(val, optval_addr))
1875 return -TARGET_EFAULT;
1876 } else if (optlen >= 1) {
1877 if (get_user_u8(val, optval_addr))
1878 return -TARGET_EFAULT;
1880 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1882 case IP_ADD_MEMBERSHIP:
1883 case IP_DROP_MEMBERSHIP:
1884 if (optlen < sizeof (struct target_ip_mreq) ||
1885 optlen > sizeof (struct target_ip_mreqn))
1886 return -TARGET_EINVAL;
1888 ip_mreq = (struct ip_mreqn *) alloca(optlen);
1889 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
1890 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
1893 case IP_BLOCK_SOURCE:
1894 case IP_UNBLOCK_SOURCE:
1895 case IP_ADD_SOURCE_MEMBERSHIP:
1896 case IP_DROP_SOURCE_MEMBERSHIP:
1897 if (optlen != sizeof (struct target_ip_mreq_source))
1898 return -TARGET_EINVAL;
1900 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
1901 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
1902 unlock_user (ip_mreq_source, optval_addr, 0);
1911 case IPV6_MTU_DISCOVER:
1914 case IPV6_RECVPKTINFO:
1915 case IPV6_UNICAST_HOPS:
1916 case IPV6_MULTICAST_HOPS:
1917 case IPV6_MULTICAST_LOOP:
1919 case IPV6_RECVHOPLIMIT:
1920 case IPV6_2292HOPLIMIT:
1923 case IPV6_2292PKTINFO:
1924 case IPV6_RECVTCLASS:
1925 case IPV6_RECVRTHDR:
1926 case IPV6_2292RTHDR:
1927 case IPV6_RECVHOPOPTS:
1928 case IPV6_2292HOPOPTS:
1929 case IPV6_RECVDSTOPTS:
1930 case IPV6_2292DSTOPTS:
1932 #ifdef IPV6_RECVPATHMTU
1933 case IPV6_RECVPATHMTU:
1935 #ifdef IPV6_TRANSPARENT
1936 case IPV6_TRANSPARENT:
1938 #ifdef IPV6_FREEBIND
1941 #ifdef IPV6_RECVORIGDSTADDR
1942 case IPV6_RECVORIGDSTADDR:
1945 if (optlen < sizeof(uint32_t)) {
1946 return -TARGET_EINVAL;
1948 if (get_user_u32(val, optval_addr)) {
1949 return -TARGET_EFAULT;
1951 ret = get_errno(setsockopt(sockfd, level, optname,
1952 &val, sizeof(val)));
1956 struct in6_pktinfo pki;
1958 if (optlen < sizeof(pki)) {
1959 return -TARGET_EINVAL;
1962 if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
1963 return -TARGET_EFAULT;
1966 pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
1968 ret = get_errno(setsockopt(sockfd, level, optname,
1969 &pki, sizeof(pki)));
1972 case IPV6_ADD_MEMBERSHIP:
1973 case IPV6_DROP_MEMBERSHIP:
1975 struct ipv6_mreq ipv6mreq;
1977 if (optlen < sizeof(ipv6mreq)) {
1978 return -TARGET_EINVAL;
1981 if (copy_from_user(&ipv6mreq, optval_addr, sizeof(ipv6mreq))) {
1982 return -TARGET_EFAULT;
1985 ipv6mreq.ipv6mr_interface = tswap32(ipv6mreq.ipv6mr_interface);
1987 ret = get_errno(setsockopt(sockfd, level, optname,
1988 &ipv6mreq, sizeof(ipv6mreq)));
1999 struct icmp6_filter icmp6f;
2001 if (optlen > sizeof(icmp6f)) {
2002 optlen = sizeof(icmp6f);
2005 if (copy_from_user(&icmp6f, optval_addr, optlen)) {
2006 return -TARGET_EFAULT;
2009 for (val = 0; val < 8; val++) {
2010 icmp6f.data[val] = tswap32(icmp6f.data[val]);
2013 ret = get_errno(setsockopt(sockfd, level, optname,
2025 /* those take an u32 value */
2026 if (optlen < sizeof(uint32_t)) {
2027 return -TARGET_EINVAL;
2030 if (get_user_u32(val, optval_addr)) {
2031 return -TARGET_EFAULT;
2033 ret = get_errno(setsockopt(sockfd, level, optname,
2034 &val, sizeof(val)));
2041 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2046 char *alg_key = g_malloc(optlen);
2049 return -TARGET_ENOMEM;
2051 if (copy_from_user(alg_key, optval_addr, optlen)) {
2053 return -TARGET_EFAULT;
2055 ret = get_errno(setsockopt(sockfd, level, optname,
2060 case ALG_SET_AEAD_AUTHSIZE:
2062 ret = get_errno(setsockopt(sockfd, level, optname,
2071 case TARGET_SOL_SOCKET:
2073 case TARGET_SO_RCVTIMEO:
2077 optname = SO_RCVTIMEO;
2080 if (optlen != sizeof(struct target_timeval)) {
2081 return -TARGET_EINVAL;
2084 if (copy_from_user_timeval(&tv, optval_addr)) {
2085 return -TARGET_EFAULT;
2088 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2092 case TARGET_SO_SNDTIMEO:
2093 optname = SO_SNDTIMEO;
2095 case TARGET_SO_ATTACH_FILTER:
2097 struct target_sock_fprog *tfprog;
2098 struct target_sock_filter *tfilter;
2099 struct sock_fprog fprog;
2100 struct sock_filter *filter;
2103 if (optlen != sizeof(*tfprog)) {
2104 return -TARGET_EINVAL;
2106 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2107 return -TARGET_EFAULT;
2109 if (!lock_user_struct(VERIFY_READ, tfilter,
2110 tswapal(tfprog->filter), 0)) {
2111 unlock_user_struct(tfprog, optval_addr, 1);
2112 return -TARGET_EFAULT;
2115 fprog.len = tswap16(tfprog->len);
2116 filter = g_try_new(struct sock_filter, fprog.len);
2117 if (filter == NULL) {
2118 unlock_user_struct(tfilter, tfprog->filter, 1);
2119 unlock_user_struct(tfprog, optval_addr, 1);
2120 return -TARGET_ENOMEM;
2122 for (i = 0; i < fprog.len; i++) {
2123 filter[i].code = tswap16(tfilter[i].code);
2124 filter[i].jt = tfilter[i].jt;
2125 filter[i].jf = tfilter[i].jf;
2126 filter[i].k = tswap32(tfilter[i].k);
2128 fprog.filter = filter;
2130 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2131 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2134 unlock_user_struct(tfilter, tfprog->filter, 1);
2135 unlock_user_struct(tfprog, optval_addr, 1);
2138 case TARGET_SO_BINDTODEVICE:
2140 char *dev_ifname, *addr_ifname;
2142 if (optlen > IFNAMSIZ - 1) {
2143 optlen = IFNAMSIZ - 1;
2145 dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2147 return -TARGET_EFAULT;
2149 optname = SO_BINDTODEVICE;
2150 addr_ifname = alloca(IFNAMSIZ);
2151 memcpy(addr_ifname, dev_ifname, optlen);
2152 addr_ifname[optlen] = 0;
2153 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2154 addr_ifname, optlen));
2155 unlock_user (dev_ifname, optval_addr, 0);
2158 case TARGET_SO_LINGER:
2161 struct target_linger *tlg;
2163 if (optlen != sizeof(struct target_linger)) {
2164 return -TARGET_EINVAL;
2166 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2167 return -TARGET_EFAULT;
2169 __get_user(lg.l_onoff, &tlg->l_onoff);
2170 __get_user(lg.l_linger, &tlg->l_linger);
2171 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2173 unlock_user_struct(tlg, optval_addr, 0);
2176 /* Options with 'int' argument. */
2177 case TARGET_SO_DEBUG:
2180 case TARGET_SO_REUSEADDR:
2181 optname = SO_REUSEADDR;
2184 case TARGET_SO_REUSEPORT:
2185 optname = SO_REUSEPORT;
2188 case TARGET_SO_TYPE:
2191 case TARGET_SO_ERROR:
2194 case TARGET_SO_DONTROUTE:
2195 optname = SO_DONTROUTE;
2197 case TARGET_SO_BROADCAST:
2198 optname = SO_BROADCAST;
2200 case TARGET_SO_SNDBUF:
2201 optname = SO_SNDBUF;
2203 case TARGET_SO_SNDBUFFORCE:
2204 optname = SO_SNDBUFFORCE;
2206 case TARGET_SO_RCVBUF:
2207 optname = SO_RCVBUF;
2209 case TARGET_SO_RCVBUFFORCE:
2210 optname = SO_RCVBUFFORCE;
2212 case TARGET_SO_KEEPALIVE:
2213 optname = SO_KEEPALIVE;
2215 case TARGET_SO_OOBINLINE:
2216 optname = SO_OOBINLINE;
2218 case TARGET_SO_NO_CHECK:
2219 optname = SO_NO_CHECK;
2221 case TARGET_SO_PRIORITY:
2222 optname = SO_PRIORITY;
2225 case TARGET_SO_BSDCOMPAT:
2226 optname = SO_BSDCOMPAT;
2229 case TARGET_SO_PASSCRED:
2230 optname = SO_PASSCRED;
2232 case TARGET_SO_PASSSEC:
2233 optname = SO_PASSSEC;
2235 case TARGET_SO_TIMESTAMP:
2236 optname = SO_TIMESTAMP;
2238 case TARGET_SO_RCVLOWAT:
2239 optname = SO_RCVLOWAT;
2244 if (optlen < sizeof(uint32_t))
2245 return -TARGET_EINVAL;
2247 if (get_user_u32(val, optval_addr))
2248 return -TARGET_EFAULT;
2249 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2253 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname);
2254 ret = -TARGET_ENOPROTOOPT;
2259 /* do_getsockopt() Must return target values and target errnos. */
2260 static abi_long do_getsockopt(int sockfd, int level, int optname,
2261 abi_ulong optval_addr, abi_ulong optlen)
2268 case TARGET_SOL_SOCKET:
2271 /* These don't just return a single integer */
2272 case TARGET_SO_RCVTIMEO:
2273 case TARGET_SO_SNDTIMEO:
2274 case TARGET_SO_PEERNAME:
2276 case TARGET_SO_PEERCRED: {
2279 struct target_ucred *tcr;
2281 if (get_user_u32(len, optlen)) {
2282 return -TARGET_EFAULT;
2285 return -TARGET_EINVAL;
2289 ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2297 if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2298 return -TARGET_EFAULT;
2300 __put_user(cr.pid, &tcr->pid);
2301 __put_user(cr.uid, &tcr->uid);
2302 __put_user(cr.gid, &tcr->gid);
2303 unlock_user_struct(tcr, optval_addr, 1);
2304 if (put_user_u32(len, optlen)) {
2305 return -TARGET_EFAULT;
2309 case TARGET_SO_LINGER:
2313 struct target_linger *tlg;
2315 if (get_user_u32(len, optlen)) {
2316 return -TARGET_EFAULT;
2319 return -TARGET_EINVAL;
2323 ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2331 if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2332 return -TARGET_EFAULT;
2334 __put_user(lg.l_onoff, &tlg->l_onoff);
2335 __put_user(lg.l_linger, &tlg->l_linger);
2336 unlock_user_struct(tlg, optval_addr, 1);
2337 if (put_user_u32(len, optlen)) {
2338 return -TARGET_EFAULT;
2342 /* Options with 'int' argument. */
2343 case TARGET_SO_DEBUG:
2346 case TARGET_SO_REUSEADDR:
2347 optname = SO_REUSEADDR;
2350 case TARGET_SO_REUSEPORT:
2351 optname = SO_REUSEPORT;
2354 case TARGET_SO_TYPE:
2357 case TARGET_SO_ERROR:
2360 case TARGET_SO_DONTROUTE:
2361 optname = SO_DONTROUTE;
2363 case TARGET_SO_BROADCAST:
2364 optname = SO_BROADCAST;
2366 case TARGET_SO_SNDBUF:
2367 optname = SO_SNDBUF;
2369 case TARGET_SO_RCVBUF:
2370 optname = SO_RCVBUF;
2372 case TARGET_SO_KEEPALIVE:
2373 optname = SO_KEEPALIVE;
2375 case TARGET_SO_OOBINLINE:
2376 optname = SO_OOBINLINE;
2378 case TARGET_SO_NO_CHECK:
2379 optname = SO_NO_CHECK;
2381 case TARGET_SO_PRIORITY:
2382 optname = SO_PRIORITY;
2385 case TARGET_SO_BSDCOMPAT:
2386 optname = SO_BSDCOMPAT;
2389 case TARGET_SO_PASSCRED:
2390 optname = SO_PASSCRED;
2392 case TARGET_SO_TIMESTAMP:
2393 optname = SO_TIMESTAMP;
2395 case TARGET_SO_RCVLOWAT:
2396 optname = SO_RCVLOWAT;
2398 case TARGET_SO_ACCEPTCONN:
2399 optname = SO_ACCEPTCONN;
2406 /* TCP options all take an 'int' value. */
2408 if (get_user_u32(len, optlen))
2409 return -TARGET_EFAULT;
2411 return -TARGET_EINVAL;
2413 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2416 if (optname == SO_TYPE) {
2417 val = host_to_target_sock_type(val);
2422 if (put_user_u32(val, optval_addr))
2423 return -TARGET_EFAULT;
2425 if (put_user_u8(val, optval_addr))
2426 return -TARGET_EFAULT;
2428 if (put_user_u32(len, optlen))
2429 return -TARGET_EFAULT;
2436 case IP_ROUTER_ALERT:
2440 case IP_MTU_DISCOVER:
2446 case IP_MULTICAST_TTL:
2447 case IP_MULTICAST_LOOP:
2448 if (get_user_u32(len, optlen))
2449 return -TARGET_EFAULT;
2451 return -TARGET_EINVAL;
2453 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2456 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2458 if (put_user_u32(len, optlen)
2459 || put_user_u8(val, optval_addr))
2460 return -TARGET_EFAULT;
2462 if (len > sizeof(int))
2464 if (put_user_u32(len, optlen)
2465 || put_user_u32(val, optval_addr))
2466 return -TARGET_EFAULT;
2470 ret = -TARGET_ENOPROTOOPT;
2476 case IPV6_MTU_DISCOVER:
2479 case IPV6_RECVPKTINFO:
2480 case IPV6_UNICAST_HOPS:
2481 case IPV6_MULTICAST_HOPS:
2482 case IPV6_MULTICAST_LOOP:
2484 case IPV6_RECVHOPLIMIT:
2485 case IPV6_2292HOPLIMIT:
2488 case IPV6_2292PKTINFO:
2489 case IPV6_RECVTCLASS:
2490 case IPV6_RECVRTHDR:
2491 case IPV6_2292RTHDR:
2492 case IPV6_RECVHOPOPTS:
2493 case IPV6_2292HOPOPTS:
2494 case IPV6_RECVDSTOPTS:
2495 case IPV6_2292DSTOPTS:
2497 #ifdef IPV6_RECVPATHMTU
2498 case IPV6_RECVPATHMTU:
2500 #ifdef IPV6_TRANSPARENT
2501 case IPV6_TRANSPARENT:
2503 #ifdef IPV6_FREEBIND
2506 #ifdef IPV6_RECVORIGDSTADDR
2507 case IPV6_RECVORIGDSTADDR:
2509 if (get_user_u32(len, optlen))
2510 return -TARGET_EFAULT;
2512 return -TARGET_EINVAL;
2514 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2517 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2519 if (put_user_u32(len, optlen)
2520 || put_user_u8(val, optval_addr))
2521 return -TARGET_EFAULT;
2523 if (len > sizeof(int))
2525 if (put_user_u32(len, optlen)
2526 || put_user_u32(val, optval_addr))
2527 return -TARGET_EFAULT;
2531 ret = -TARGET_ENOPROTOOPT;
2537 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
2539 ret = -TARGET_EOPNOTSUPP;
2545 /* Convert target low/high pair representing file offset into the host
2546 * low/high pair. This function doesn't handle offsets bigger than 64 bits
2547 * as the kernel doesn't handle them either.
2549 static void target_to_host_low_high(abi_ulong tlow,
2551 unsigned long *hlow,
2552 unsigned long *hhigh)
2554 uint64_t off = tlow |
2555 ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
2556 TARGET_LONG_BITS / 2;
2559 *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
2562 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
2563 abi_ulong count, int copy)
2565 struct target_iovec *target_vec;
2567 abi_ulong total_len, max_len;
2570 bool bad_address = false;
2576 if (count > IOV_MAX) {
2581 vec = g_try_new0(struct iovec, count);
2587 target_vec = lock_user(VERIFY_READ, target_addr,
2588 count * sizeof(struct target_iovec), 1);
2589 if (target_vec == NULL) {
2594 /* ??? If host page size > target page size, this will result in a
2595 value larger than what we can actually support. */
2596 max_len = 0x7fffffff & TARGET_PAGE_MASK;
2599 for (i = 0; i < count; i++) {
2600 abi_ulong base = tswapal(target_vec[i].iov_base);
2601 abi_long len = tswapal(target_vec[i].iov_len);
2606 } else if (len == 0) {
2607 /* Zero length pointer is ignored. */
2608 vec[i].iov_base = 0;
2610 vec[i].iov_base = lock_user(type, base, len, copy);
2611 /* If the first buffer pointer is bad, this is a fault. But
2612 * subsequent bad buffers will result in a partial write; this
2613 * is realized by filling the vector with null pointers and
2615 if (!vec[i].iov_base) {
2626 if (len > max_len - total_len) {
2627 len = max_len - total_len;
2630 vec[i].iov_len = len;
2634 unlock_user(target_vec, target_addr, 0);
2639 if (tswapal(target_vec[i].iov_len) > 0) {
2640 unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
2643 unlock_user(target_vec, target_addr, 0);
2650 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
2651 abi_ulong count, int copy)
2653 struct target_iovec *target_vec;
2656 target_vec = lock_user(VERIFY_READ, target_addr,
2657 count * sizeof(struct target_iovec), 1);
2659 for (i = 0; i < count; i++) {
2660 abi_ulong base = tswapal(target_vec[i].iov_base);
2661 abi_long len = tswapal(target_vec[i].iov_len);
2665 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
2667 unlock_user(target_vec, target_addr, 0);
2673 static inline int target_to_host_sock_type(int *type)
2676 int target_type = *type;
2678 switch (target_type & TARGET_SOCK_TYPE_MASK) {
2679 case TARGET_SOCK_DGRAM:
2680 host_type = SOCK_DGRAM;
2682 case TARGET_SOCK_STREAM:
2683 host_type = SOCK_STREAM;
2686 host_type = target_type & TARGET_SOCK_TYPE_MASK;
2689 if (target_type & TARGET_SOCK_CLOEXEC) {
2690 #if defined(SOCK_CLOEXEC)
2691 host_type |= SOCK_CLOEXEC;
2693 return -TARGET_EINVAL;
2696 if (target_type & TARGET_SOCK_NONBLOCK) {
2697 #if defined(SOCK_NONBLOCK)
2698 host_type |= SOCK_NONBLOCK;
2699 #elif !defined(O_NONBLOCK)
2700 return -TARGET_EINVAL;
2707 /* Try to emulate socket type flags after socket creation. */
2708 static int sock_flags_fixup(int fd, int target_type)
2710 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
2711 if (target_type & TARGET_SOCK_NONBLOCK) {
2712 int flags = fcntl(fd, F_GETFL);
2713 if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
2715 return -TARGET_EINVAL;
2722 /* do_socket() Must return target values and target errnos. */
2723 static abi_long do_socket(int domain, int type, int protocol)
2725 int target_type = type;
2728 ret = target_to_host_sock_type(&type);
2733 if (domain == PF_NETLINK && !(
2734 #ifdef CONFIG_RTNETLINK
2735 protocol == NETLINK_ROUTE ||
2737 protocol == NETLINK_KOBJECT_UEVENT ||
2738 protocol == NETLINK_AUDIT)) {
2739 return -EPFNOSUPPORT;
2742 if (domain == AF_PACKET ||
2743 (domain == AF_INET && type == SOCK_PACKET)) {
2744 protocol = tswap16(protocol);
2747 ret = get_errno(socket(domain, type, protocol));
2749 ret = sock_flags_fixup(ret, target_type);
2750 if (type == SOCK_PACKET) {
2751 /* Manage an obsolete case :
2752 * if socket type is SOCK_PACKET, bind by name
2754 fd_trans_register(ret, &target_packet_trans);
2755 } else if (domain == PF_NETLINK) {
2757 #ifdef CONFIG_RTNETLINK
2759 fd_trans_register(ret, &target_netlink_route_trans);
2762 case NETLINK_KOBJECT_UEVENT:
2763 /* nothing to do: messages are strings */
2766 fd_trans_register(ret, &target_netlink_audit_trans);
2769 g_assert_not_reached();
2776 /* do_bind() Must return target values and target errnos. */
2777 static abi_long do_bind(int sockfd, abi_ulong target_addr,
2783 if ((int)addrlen < 0) {
2784 return -TARGET_EINVAL;
2787 addr = alloca(addrlen+1);
2789 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
2793 return get_errno(bind(sockfd, addr, addrlen));
2796 /* do_connect() Must return target values and target errnos. */
2797 static abi_long do_connect(int sockfd, abi_ulong target_addr,
2803 if ((int)addrlen < 0) {
2804 return -TARGET_EINVAL;
2807 addr = alloca(addrlen+1);
2809 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
2813 return get_errno(safe_connect(sockfd, addr, addrlen));
2816 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
2817 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
2818 int flags, int send)
2824 abi_ulong target_vec;
2826 if (msgp->msg_name) {
2827 msg.msg_namelen = tswap32(msgp->msg_namelen);
2828 msg.msg_name = alloca(msg.msg_namelen+1);
2829 ret = target_to_host_sockaddr(fd, msg.msg_name,
2830 tswapal(msgp->msg_name),
2832 if (ret == -TARGET_EFAULT) {
2833 /* For connected sockets msg_name and msg_namelen must
2834 * be ignored, so returning EFAULT immediately is wrong.
2835 * Instead, pass a bad msg_name to the host kernel, and
2836 * let it decide whether to return EFAULT or not.
2838 msg.msg_name = (void *)-1;
2843 msg.msg_name = NULL;
2844 msg.msg_namelen = 0;
2846 msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
2847 msg.msg_control = alloca(msg.msg_controllen);
2848 memset(msg.msg_control, 0, msg.msg_controllen);
2850 msg.msg_flags = tswap32(msgp->msg_flags);
2852 count = tswapal(msgp->msg_iovlen);
2853 target_vec = tswapal(msgp->msg_iov);
2855 if (count > IOV_MAX) {
2856 /* sendrcvmsg returns a different errno for this condition than
2857 * readv/writev, so we must catch it here before lock_iovec() does.
2859 ret = -TARGET_EMSGSIZE;
2863 vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
2864 target_vec, count, send);
2866 ret = -host_to_target_errno(errno);
2869 msg.msg_iovlen = count;
2873 if (fd_trans_target_to_host_data(fd)) {
2876 host_msg = g_malloc(msg.msg_iov->iov_len);
2877 memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
2878 ret = fd_trans_target_to_host_data(fd)(host_msg,
2879 msg.msg_iov->iov_len);
2881 msg.msg_iov->iov_base = host_msg;
2882 ret = get_errno(safe_sendmsg(fd, &msg, flags));
2886 ret = target_to_host_cmsg(&msg, msgp);
2888 ret = get_errno(safe_sendmsg(fd, &msg, flags));
2892 ret = get_errno(safe_recvmsg(fd, &msg, flags));
2893 if (!is_error(ret)) {
2895 if (fd_trans_host_to_target_data(fd)) {
2896 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
2897 MIN(msg.msg_iov->iov_len, len));
2899 ret = host_to_target_cmsg(msgp, &msg);
2901 if (!is_error(ret)) {
2902 msgp->msg_namelen = tswap32(msg.msg_namelen);
2903 msgp->msg_flags = tswap32(msg.msg_flags);
2904 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
2905 ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
2906 msg.msg_name, msg.msg_namelen);
2918 unlock_iovec(vec, target_vec, count, !send);
2923 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
2924 int flags, int send)
2927 struct target_msghdr *msgp;
2929 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
2933 return -TARGET_EFAULT;
2935 ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
2936 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
2940 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
2941 * so it might not have this *mmsg-specific flag either.
2943 #ifndef MSG_WAITFORONE
2944 #define MSG_WAITFORONE 0x10000
2947 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
2948 unsigned int vlen, unsigned int flags,
2951 struct target_mmsghdr *mmsgp;
2955 if (vlen > UIO_MAXIOV) {
2959 mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
2961 return -TARGET_EFAULT;
2964 for (i = 0; i < vlen; i++) {
2965 ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
2966 if (is_error(ret)) {
2969 mmsgp[i].msg_len = tswap32(ret);
2970 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
2971 if (flags & MSG_WAITFORONE) {
2972 flags |= MSG_DONTWAIT;
2976 unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
2978 /* Return number of datagrams sent if we sent any at all;
2979 * otherwise return the error.
2987 /* do_accept4() Must return target values and target errnos. */
2988 static abi_long do_accept4(int fd, abi_ulong target_addr,
2989 abi_ulong target_addrlen_addr, int flags)
2991 socklen_t addrlen, ret_addrlen;
2996 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
2998 if (target_addr == 0) {
2999 return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3002 /* linux returns EINVAL if addrlen pointer is invalid */
3003 if (get_user_u32(addrlen, target_addrlen_addr))
3004 return -TARGET_EINVAL;
3006 if ((int)addrlen < 0) {
3007 return -TARGET_EINVAL;
3010 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3011 return -TARGET_EINVAL;
3013 addr = alloca(addrlen);
3015 ret_addrlen = addrlen;
3016 ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
3017 if (!is_error(ret)) {
3018 host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3019 if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3020 ret = -TARGET_EFAULT;
3026 /* do_getpeername() Must return target values and target errnos. */
3027 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3028 abi_ulong target_addrlen_addr)
3030 socklen_t addrlen, ret_addrlen;
3034 if (get_user_u32(addrlen, target_addrlen_addr))
3035 return -TARGET_EFAULT;
3037 if ((int)addrlen < 0) {
3038 return -TARGET_EINVAL;
3041 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3042 return -TARGET_EFAULT;
3044 addr = alloca(addrlen);
3046 ret_addrlen = addrlen;
3047 ret = get_errno(getpeername(fd, addr, &ret_addrlen));
3048 if (!is_error(ret)) {
3049 host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3050 if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3051 ret = -TARGET_EFAULT;
3057 /* do_getsockname() Must return target values and target errnos. */
3058 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3059 abi_ulong target_addrlen_addr)
3061 socklen_t addrlen, ret_addrlen;
3065 if (get_user_u32(addrlen, target_addrlen_addr))
3066 return -TARGET_EFAULT;
3068 if ((int)addrlen < 0) {
3069 return -TARGET_EINVAL;
3072 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3073 return -TARGET_EFAULT;
3075 addr = alloca(addrlen);
3077 ret_addrlen = addrlen;
3078 ret = get_errno(getsockname(fd, addr, &ret_addrlen));
3079 if (!is_error(ret)) {
3080 host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3081 if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3082 ret = -TARGET_EFAULT;
3088 /* do_socketpair() Must return target values and target errnos. */
3089 static abi_long do_socketpair(int domain, int type, int protocol,
3090 abi_ulong target_tab_addr)
3095 target_to_host_sock_type(&type);
3097 ret = get_errno(socketpair(domain, type, protocol, tab));
3098 if (!is_error(ret)) {
3099 if (put_user_s32(tab[0], target_tab_addr)
3100 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3101 ret = -TARGET_EFAULT;
3106 /* do_sendto() Must return target values and target errnos. */
3107 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3108 abi_ulong target_addr, socklen_t addrlen)
3112 void *copy_msg = NULL;
3115 if ((int)addrlen < 0) {
3116 return -TARGET_EINVAL;
3119 host_msg = lock_user(VERIFY_READ, msg, len, 1);
3121 return -TARGET_EFAULT;
3122 if (fd_trans_target_to_host_data(fd)) {
3123 copy_msg = host_msg;
3124 host_msg = g_malloc(len);
3125 memcpy(host_msg, copy_msg, len);
3126 ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3132 addr = alloca(addrlen+1);
3133 ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3137 ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3139 ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3144 host_msg = copy_msg;
3146 unlock_user(host_msg, msg, 0);
3150 /* do_recvfrom() Must return target values and target errnos. */
3151 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3152 abi_ulong target_addr,
3153 abi_ulong target_addrlen)
3155 socklen_t addrlen, ret_addrlen;
3160 host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3162 return -TARGET_EFAULT;
3164 if (get_user_u32(addrlen, target_addrlen)) {
3165 ret = -TARGET_EFAULT;
3168 if ((int)addrlen < 0) {
3169 ret = -TARGET_EINVAL;
3172 addr = alloca(addrlen);
3173 ret_addrlen = addrlen;
3174 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3175 addr, &ret_addrlen));
3177 addr = NULL; /* To keep compiler quiet. */
3178 addrlen = 0; /* To keep compiler quiet. */
3179 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3181 if (!is_error(ret)) {
3182 if (fd_trans_host_to_target_data(fd)) {
3184 trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3185 if (is_error(trans)) {
3191 host_to_target_sockaddr(target_addr, addr,
3192 MIN(addrlen, ret_addrlen));
3193 if (put_user_u32(ret_addrlen, target_addrlen)) {
3194 ret = -TARGET_EFAULT;
3198 unlock_user(host_msg, msg, len);
3201 unlock_user(host_msg, msg, 0);
3206 #ifdef TARGET_NR_socketcall
3207 /* do_socketcall() must return target values and target errnos. */
3208 static abi_long do_socketcall(int num, abi_ulong vptr)
3210 static const unsigned nargs[] = { /* number of arguments per operation */
3211 [TARGET_SYS_SOCKET] = 3, /* domain, type, protocol */
3212 [TARGET_SYS_BIND] = 3, /* fd, addr, addrlen */
3213 [TARGET_SYS_CONNECT] = 3, /* fd, addr, addrlen */
3214 [TARGET_SYS_LISTEN] = 2, /* fd, backlog */
3215 [TARGET_SYS_ACCEPT] = 3, /* fd, addr, addrlen */
3216 [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3217 [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3218 [TARGET_SYS_SOCKETPAIR] = 4, /* domain, type, protocol, tab */
3219 [TARGET_SYS_SEND] = 4, /* fd, msg, len, flags */
3220 [TARGET_SYS_RECV] = 4, /* fd, msg, len, flags */
3221 [TARGET_SYS_SENDTO] = 6, /* fd, msg, len, flags, addr, addrlen */
3222 [TARGET_SYS_RECVFROM] = 6, /* fd, msg, len, flags, addr, addrlen */
3223 [TARGET_SYS_SHUTDOWN] = 2, /* fd, how */
3224 [TARGET_SYS_SETSOCKOPT] = 5, /* fd, level, optname, optval, optlen */
3225 [TARGET_SYS_GETSOCKOPT] = 5, /* fd, level, optname, optval, optlen */
3226 [TARGET_SYS_SENDMSG] = 3, /* fd, msg, flags */
3227 [TARGET_SYS_RECVMSG] = 3, /* fd, msg, flags */
3228 [TARGET_SYS_ACCEPT4] = 4, /* fd, addr, addrlen, flags */
3229 [TARGET_SYS_RECVMMSG] = 4, /* fd, msgvec, vlen, flags */
3230 [TARGET_SYS_SENDMMSG] = 4, /* fd, msgvec, vlen, flags */
3232 abi_long a[6]; /* max 6 args */
3235 /* check the range of the first argument num */
3236 /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3237 if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3238 return -TARGET_EINVAL;
3240 /* ensure we have space for args */
3241 if (nargs[num] > ARRAY_SIZE(a)) {
3242 return -TARGET_EINVAL;
3244 /* collect the arguments in a[] according to nargs[] */
3245 for (i = 0; i < nargs[num]; ++i) {
3246 if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3247 return -TARGET_EFAULT;
3250 /* now when we have the args, invoke the appropriate underlying function */
3252 case TARGET_SYS_SOCKET: /* domain, type, protocol */
3253 return do_socket(a[0], a[1], a[2]);
3254 case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3255 return do_bind(a[0], a[1], a[2]);
3256 case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3257 return do_connect(a[0], a[1], a[2]);
3258 case TARGET_SYS_LISTEN: /* sockfd, backlog */
3259 return get_errno(listen(a[0], a[1]));
3260 case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3261 return do_accept4(a[0], a[1], a[2], 0);
3262 case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3263 return do_getsockname(a[0], a[1], a[2]);
3264 case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3265 return do_getpeername(a[0], a[1], a[2]);
3266 case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3267 return do_socketpair(a[0], a[1], a[2], a[3]);
3268 case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3269 return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3270 case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3271 return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3272 case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3273 return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3274 case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3275 return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3276 case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3277 return get_errno(shutdown(a[0], a[1]));
3278 case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3279 return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3280 case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3281 return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3282 case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3283 return do_sendrecvmsg(a[0], a[1], a[2], 1);
3284 case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3285 return do_sendrecvmsg(a[0], a[1], a[2], 0);
3286 case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3287 return do_accept4(a[0], a[1], a[2], a[3]);
3288 case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3289 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3290 case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3291 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3293 gemu_log("Unsupported socketcall: %d\n", num);
3294 return -TARGET_EINVAL;
3299 #define N_SHM_REGIONS 32
3301 static struct shm_region {
3305 } shm_regions[N_SHM_REGIONS];
3307 #ifndef TARGET_SEMID64_DS
3308 /* asm-generic version of this struct */
3309 struct target_semid64_ds
3311 struct target_ipc_perm sem_perm;
3312 abi_ulong sem_otime;
3313 #if TARGET_ABI_BITS == 32
3314 abi_ulong __unused1;
3316 abi_ulong sem_ctime;
3317 #if TARGET_ABI_BITS == 32
3318 abi_ulong __unused2;
3320 abi_ulong sem_nsems;
3321 abi_ulong __unused3;
3322 abi_ulong __unused4;
3326 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3327 abi_ulong target_addr)
3329 struct target_ipc_perm *target_ip;
3330 struct target_semid64_ds *target_sd;
3332 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3333 return -TARGET_EFAULT;
3334 target_ip = &(target_sd->sem_perm);
3335 host_ip->__key = tswap32(target_ip->__key);
3336 host_ip->uid = tswap32(target_ip->uid);
3337 host_ip->gid = tswap32(target_ip->gid);
3338 host_ip->cuid = tswap32(target_ip->cuid);
3339 host_ip->cgid = tswap32(target_ip->cgid);
3340 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3341 host_ip->mode = tswap32(target_ip->mode);
3343 host_ip->mode = tswap16(target_ip->mode);
3345 #if defined(TARGET_PPC)
3346 host_ip->__seq = tswap32(target_ip->__seq);
3348 host_ip->__seq = tswap16(target_ip->__seq);
3350 unlock_user_struct(target_sd, target_addr, 0);
3354 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3355 struct ipc_perm *host_ip)
3357 struct target_ipc_perm *target_ip;
3358 struct target_semid64_ds *target_sd;
3360 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3361 return -TARGET_EFAULT;
3362 target_ip = &(target_sd->sem_perm);
3363 target_ip->__key = tswap32(host_ip->__key);
3364 target_ip->uid = tswap32(host_ip->uid);
3365 target_ip->gid = tswap32(host_ip->gid);
3366 target_ip->cuid = tswap32(host_ip->cuid);
3367 target_ip->cgid = tswap32(host_ip->cgid);
3368 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3369 target_ip->mode = tswap32(host_ip->mode);
3371 target_ip->mode = tswap16(host_ip->mode);
3373 #if defined(TARGET_PPC)
3374 target_ip->__seq = tswap32(host_ip->__seq);
3376 target_ip->__seq = tswap16(host_ip->__seq);
3378 unlock_user_struct(target_sd, target_addr, 1);
3382 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3383 abi_ulong target_addr)
3385 struct target_semid64_ds *target_sd;
3387 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3388 return -TARGET_EFAULT;
3389 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3390 return -TARGET_EFAULT;
3391 host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3392 host_sd->sem_otime = tswapal(target_sd->sem_otime);
3393 host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3394 unlock_user_struct(target_sd, target_addr, 0);
3398 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3399 struct semid_ds *host_sd)
3401 struct target_semid64_ds *target_sd;
3403 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3404 return -TARGET_EFAULT;
3405 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3406 return -TARGET_EFAULT;
3407 target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3408 target_sd->sem_otime = tswapal(host_sd->sem_otime);
3409 target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3410 unlock_user_struct(target_sd, target_addr, 1);
3414 struct target_seminfo {
3427 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3428 struct seminfo *host_seminfo)
3430 struct target_seminfo *target_seminfo;
3431 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3432 return -TARGET_EFAULT;
3433 __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3434 __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3435 __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3436 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3437 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3438 __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3439 __put_user(host_seminfo->semume, &target_seminfo->semume);
3440 __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3441 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3442 __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3443 unlock_user_struct(target_seminfo, target_addr, 1);
3449 struct semid_ds *buf;
3450 unsigned short *array;
3451 struct seminfo *__buf;
3454 union target_semun {
3461 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3462 abi_ulong target_addr)
3465 unsigned short *array;
3467 struct semid_ds semid_ds;
3470 semun.buf = &semid_ds;
3472 ret = semctl(semid, 0, IPC_STAT, semun);
3474 return get_errno(ret);
3476 nsems = semid_ds.sem_nsems;
3478 *host_array = g_try_new(unsigned short, nsems);
3480 return -TARGET_ENOMEM;
3482 array = lock_user(VERIFY_READ, target_addr,
3483 nsems*sizeof(unsigned short), 1);
3485 g_free(*host_array);
3486 return -TARGET_EFAULT;
3489 for(i=0; i<nsems; i++) {
3490 __get_user((*host_array)[i], &array[i]);
3492 unlock_user(array, target_addr, 0);
3497 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3498 unsigned short **host_array)
3501 unsigned short *array;
3503 struct semid_ds semid_ds;
3506 semun.buf = &semid_ds;
3508 ret = semctl(semid, 0, IPC_STAT, semun);
3510 return get_errno(ret);
3512 nsems = semid_ds.sem_nsems;
3514 array = lock_user(VERIFY_WRITE, target_addr,
3515 nsems*sizeof(unsigned short), 0);
3517 return -TARGET_EFAULT;
3519 for(i=0; i<nsems; i++) {
3520 __put_user((*host_array)[i], &array[i]);
3522 g_free(*host_array);
3523 unlock_user(array, target_addr, 1);
3528 static inline abi_long do_semctl(int semid, int semnum, int cmd,
3529 abi_ulong target_arg)
3531 union target_semun target_su = { .buf = target_arg };
3533 struct semid_ds dsarg;
3534 unsigned short *array = NULL;
3535 struct seminfo seminfo;
3536 abi_long ret = -TARGET_EINVAL;
3543 /* In 64 bit cross-endian situations, we will erroneously pick up
3544 * the wrong half of the union for the "val" element. To rectify
3545 * this, the entire 8-byte structure is byteswapped, followed by
3546 * a swap of the 4 byte val field. In other cases, the data is
3547 * already in proper host byte order. */
3548 if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
3549 target_su.buf = tswapal(target_su.buf);
3550 arg.val = tswap32(target_su.val);
3552 arg.val = target_su.val;
3554 ret = get_errno(semctl(semid, semnum, cmd, arg));
3558 err = target_to_host_semarray(semid, &array, target_su.array);
3562 ret = get_errno(semctl(semid, semnum, cmd, arg));
3563 err = host_to_target_semarray(semid, target_su.array, &array);
3570 err = target_to_host_semid_ds(&dsarg, target_su.buf);
3574 ret = get_errno(semctl(semid, semnum, cmd, arg));
3575 err = host_to_target_semid_ds(target_su.buf, &dsarg);
3581 arg.__buf = &seminfo;
3582 ret = get_errno(semctl(semid, semnum, cmd, arg));
3583 err = host_to_target_seminfo(target_su.__buf, &seminfo);
3591 ret = get_errno(semctl(semid, semnum, cmd, NULL));
3598 struct target_sembuf {
3599 unsigned short sem_num;
3604 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
3605 abi_ulong target_addr,
3608 struct target_sembuf *target_sembuf;
3611 target_sembuf = lock_user(VERIFY_READ, target_addr,
3612 nsops*sizeof(struct target_sembuf), 1);
3614 return -TARGET_EFAULT;
3616 for(i=0; i<nsops; i++) {
3617 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
3618 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
3619 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
3622 unlock_user(target_sembuf, target_addr, 0);
3627 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
3629 struct sembuf sops[nsops];
3632 if (target_to_host_sembuf(sops, ptr, nsops))
3633 return -TARGET_EFAULT;
3635 ret = -TARGET_ENOSYS;
3636 #ifdef __NR_semtimedop
3637 ret = get_errno(safe_semtimedop(semid, sops, nsops, NULL));
3640 if (ret == -TARGET_ENOSYS) {
3641 ret = get_errno(safe_ipc(IPCOP_semtimedop, semid, nsops, 0, sops, 0));
3647 struct target_msqid_ds
3649 struct target_ipc_perm msg_perm;
3650 abi_ulong msg_stime;
3651 #if TARGET_ABI_BITS == 32
3652 abi_ulong __unused1;
3654 abi_ulong msg_rtime;
3655 #if TARGET_ABI_BITS == 32
3656 abi_ulong __unused2;
3658 abi_ulong msg_ctime;
3659 #if TARGET_ABI_BITS == 32
3660 abi_ulong __unused3;
3662 abi_ulong __msg_cbytes;
3664 abi_ulong msg_qbytes;
3665 abi_ulong msg_lspid;
3666 abi_ulong msg_lrpid;
3667 abi_ulong __unused4;
3668 abi_ulong __unused5;
3671 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
3672 abi_ulong target_addr)
3674 struct target_msqid_ds *target_md;
3676 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
3677 return -TARGET_EFAULT;
3678 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
3679 return -TARGET_EFAULT;
3680 host_md->msg_stime = tswapal(target_md->msg_stime);
3681 host_md->msg_rtime = tswapal(target_md->msg_rtime);
3682 host_md->msg_ctime = tswapal(target_md->msg_ctime);
3683 host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
3684 host_md->msg_qnum = tswapal(target_md->msg_qnum);
3685 host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
3686 host_md->msg_lspid = tswapal(target_md->msg_lspid);
3687 host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
3688 unlock_user_struct(target_md, target_addr, 0);
3692 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
3693 struct msqid_ds *host_md)
3695 struct target_msqid_ds *target_md;
3697 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
3698 return -TARGET_EFAULT;
3699 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
3700 return -TARGET_EFAULT;
3701 target_md->msg_stime = tswapal(host_md->msg_stime);
3702 target_md->msg_rtime = tswapal(host_md->msg_rtime);
3703 target_md->msg_ctime = tswapal(host_md->msg_ctime);
3704 target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
3705 target_md->msg_qnum = tswapal(host_md->msg_qnum);
3706 target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
3707 target_md->msg_lspid = tswapal(host_md->msg_lspid);
3708 target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
3709 unlock_user_struct(target_md, target_addr, 1);
3713 struct target_msginfo {
3721 unsigned short int msgseg;
3724 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
3725 struct msginfo *host_msginfo)
3727 struct target_msginfo *target_msginfo;
3728 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
3729 return -TARGET_EFAULT;
3730 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
3731 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
3732 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
3733 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
3734 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
3735 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
3736 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
3737 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
3738 unlock_user_struct(target_msginfo, target_addr, 1);
3742 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
3744 struct msqid_ds dsarg;
3745 struct msginfo msginfo;
3746 abi_long ret = -TARGET_EINVAL;
3754 if (target_to_host_msqid_ds(&dsarg,ptr))
3755 return -TARGET_EFAULT;
3756 ret = get_errno(msgctl(msgid, cmd, &dsarg));
3757 if (host_to_target_msqid_ds(ptr,&dsarg))
3758 return -TARGET_EFAULT;
3761 ret = get_errno(msgctl(msgid, cmd, NULL));
3765 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
3766 if (host_to_target_msginfo(ptr, &msginfo))
3767 return -TARGET_EFAULT;
3774 struct target_msgbuf {
3779 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
3780 ssize_t msgsz, int msgflg)
3782 struct target_msgbuf *target_mb;
3783 struct msgbuf *host_mb;
3787 return -TARGET_EINVAL;
3790 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
3791 return -TARGET_EFAULT;
3792 host_mb = g_try_malloc(msgsz + sizeof(long));
3794 unlock_user_struct(target_mb, msgp, 0);
3795 return -TARGET_ENOMEM;
3797 host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
3798 memcpy(host_mb->mtext, target_mb->mtext, msgsz);
3799 ret = -TARGET_ENOSYS;
3801 ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
3804 if (ret == -TARGET_ENOSYS) {
3805 ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
3810 unlock_user_struct(target_mb, msgp, 0);
3815 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
3816 ssize_t msgsz, abi_long msgtyp,
3819 struct target_msgbuf *target_mb;
3821 struct msgbuf *host_mb;
3825 return -TARGET_EINVAL;
3828 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
3829 return -TARGET_EFAULT;
3831 host_mb = g_try_malloc(msgsz + sizeof(long));
3833 ret = -TARGET_ENOMEM;
3836 ret = -TARGET_ENOSYS;
3838 ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
3841 if (ret == -TARGET_ENOSYS) {
3842 ret = get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv), msqid, msgsz,
3843 msgflg, host_mb, msgtyp));
3848 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
3849 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
3850 if (!target_mtext) {
3851 ret = -TARGET_EFAULT;
3854 memcpy(target_mb->mtext, host_mb->mtext, ret);
3855 unlock_user(target_mtext, target_mtext_addr, ret);
3858 target_mb->mtype = tswapal(host_mb->mtype);
3862 unlock_user_struct(target_mb, msgp, 1);
3867 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
3868 abi_ulong target_addr)
3870 struct target_shmid_ds *target_sd;
3872 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3873 return -TARGET_EFAULT;
3874 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
3875 return -TARGET_EFAULT;
3876 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
3877 __get_user(host_sd->shm_atime, &target_sd->shm_atime);
3878 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
3879 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
3880 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
3881 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
3882 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
3883 unlock_user_struct(target_sd, target_addr, 0);
3887 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
3888 struct shmid_ds *host_sd)
3890 struct target_shmid_ds *target_sd;
3892 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3893 return -TARGET_EFAULT;
3894 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
3895 return -TARGET_EFAULT;
3896 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
3897 __put_user(host_sd->shm_atime, &target_sd->shm_atime);
3898 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
3899 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
3900 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
3901 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
3902 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
3903 unlock_user_struct(target_sd, target_addr, 1);
3907 struct target_shminfo {
3915 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
3916 struct shminfo *host_shminfo)
3918 struct target_shminfo *target_shminfo;
3919 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
3920 return -TARGET_EFAULT;
3921 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
3922 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
3923 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
3924 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
3925 __put_user(host_shminfo->shmall, &target_shminfo->shmall);
3926 unlock_user_struct(target_shminfo, target_addr, 1);
3930 struct target_shm_info {
3935 abi_ulong swap_attempts;
3936 abi_ulong swap_successes;
3939 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
3940 struct shm_info *host_shm_info)
3942 struct target_shm_info *target_shm_info;
3943 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
3944 return -TARGET_EFAULT;
3945 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
3946 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
3947 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
3948 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
3949 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
3950 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
3951 unlock_user_struct(target_shm_info, target_addr, 1);
3955 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
3957 struct shmid_ds dsarg;
3958 struct shminfo shminfo;
3959 struct shm_info shm_info;
3960 abi_long ret = -TARGET_EINVAL;
3968 if (target_to_host_shmid_ds(&dsarg, buf))
3969 return -TARGET_EFAULT;
3970 ret = get_errno(shmctl(shmid, cmd, &dsarg));
3971 if (host_to_target_shmid_ds(buf, &dsarg))
3972 return -TARGET_EFAULT;
3975 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
3976 if (host_to_target_shminfo(buf, &shminfo))
3977 return -TARGET_EFAULT;
3980 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
3981 if (host_to_target_shm_info(buf, &shm_info))
3982 return -TARGET_EFAULT;
3987 ret = get_errno(shmctl(shmid, cmd, NULL));
3994 #ifndef TARGET_FORCE_SHMLBA
3995 /* For most architectures, SHMLBA is the same as the page size;
3996 * some architectures have larger values, in which case they should
3997 * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
3998 * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
3999 * and defining its own value for SHMLBA.
4001 * The kernel also permits SHMLBA to be set by the architecture to a
4002 * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4003 * this means that addresses are rounded to the large size if
4004 * SHM_RND is set but addresses not aligned to that size are not rejected
4005 * as long as they are at least page-aligned. Since the only architecture
4006 * which uses this is ia64 this code doesn't provide for that oddity.
4008 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
4010 return TARGET_PAGE_SIZE;
4014 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
4015 int shmid, abi_ulong shmaddr, int shmflg)
4019 struct shmid_ds shm_info;
4023 /* find out the length of the shared memory segment */
4024 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4025 if (is_error(ret)) {
4026 /* can't get length, bail out */
4030 shmlba = target_shmlba(cpu_env);
4032 if (shmaddr & (shmlba - 1)) {
4033 if (shmflg & SHM_RND) {
4034 shmaddr &= ~(shmlba - 1);
4036 return -TARGET_EINVAL;
4039 if (!guest_range_valid(shmaddr, shm_info.shm_segsz)) {
4040 return -TARGET_EINVAL;
4046 host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
4048 abi_ulong mmap_start;
4050 /* In order to use the host shmat, we need to honor host SHMLBA. */
4051 mmap_start = mmap_find_vma(0, shm_info.shm_segsz, MAX(SHMLBA, shmlba));
4053 if (mmap_start == -1) {
4055 host_raddr = (void *)-1;
4057 host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
4060 if (host_raddr == (void *)-1) {
4062 return get_errno((long)host_raddr);
4064 raddr=h2g((unsigned long)host_raddr);
4066 page_set_flags(raddr, raddr + shm_info.shm_segsz,
4067 PAGE_VALID | PAGE_READ |
4068 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
4070 for (i = 0; i < N_SHM_REGIONS; i++) {
4071 if (!shm_regions[i].in_use) {
4072 shm_regions[i].in_use = true;
4073 shm_regions[i].start = raddr;
4074 shm_regions[i].size = shm_info.shm_segsz;
4084 static inline abi_long do_shmdt(abi_ulong shmaddr)
4091 for (i = 0; i < N_SHM_REGIONS; ++i) {
4092 if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4093 shm_regions[i].in_use = false;
4094 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
4098 rv = get_errno(shmdt(g2h(shmaddr)));
4105 #ifdef TARGET_NR_ipc
4106 /* ??? This only works with linear mappings. */
4107 /* do_ipc() must return target values and target errnos. */
4108 static abi_long do_ipc(CPUArchState *cpu_env,
4109 unsigned int call, abi_long first,
4110 abi_long second, abi_long third,
4111 abi_long ptr, abi_long fifth)
4116 version = call >> 16;
4121 ret = do_semop(first, ptr, second);
4125 ret = get_errno(semget(first, second, third));
4128 case IPCOP_semctl: {
4129 /* The semun argument to semctl is passed by value, so dereference the
4132 get_user_ual(atptr, ptr);
4133 ret = do_semctl(first, second, third, atptr);
4138 ret = get_errno(msgget(first, second));
4142 ret = do_msgsnd(first, ptr, second, third);
4146 ret = do_msgctl(first, second, ptr);
4153 struct target_ipc_kludge {
4158 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4159 ret = -TARGET_EFAULT;
4163 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4165 unlock_user_struct(tmp, ptr, 0);
4169 ret = do_msgrcv(first, ptr, second, fifth, third);
4178 raddr = do_shmat(cpu_env, first, ptr, second);
4179 if (is_error(raddr))
4180 return get_errno(raddr);
4181 if (put_user_ual(raddr, third))
4182 return -TARGET_EFAULT;
4186 ret = -TARGET_EINVAL;
4191 ret = do_shmdt(ptr);
4195 /* IPC_* flag values are the same on all linux platforms */
4196 ret = get_errno(shmget(first, second, third));
4199 /* IPC_* and SHM_* command values are the same on all linux platforms */
4201 ret = do_shmctl(first, second, ptr);
4204 gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
4205 ret = -TARGET_ENOSYS;
4212 /* kernel structure types definitions */
4214 #define STRUCT(name, ...) STRUCT_ ## name,
4215 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4217 #include "syscall_types.h"
4221 #undef STRUCT_SPECIAL
4223 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
4224 #define STRUCT_SPECIAL(name)
4225 #include "syscall_types.h"
4227 #undef STRUCT_SPECIAL
4229 typedef struct IOCTLEntry IOCTLEntry;
4231 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
4232 int fd, int cmd, abi_long arg);
4236 unsigned int host_cmd;
4239 do_ioctl_fn *do_ioctl;
4240 const argtype arg_type[5];
4243 #define IOC_R 0x0001
4244 #define IOC_W 0x0002
4245 #define IOC_RW (IOC_R | IOC_W)
4247 #define MAX_STRUCT_SIZE 4096
4249 #ifdef CONFIG_FIEMAP
4250 /* So fiemap access checks don't overflow on 32 bit systems.
4251 * This is very slightly smaller than the limit imposed by
4252 * the underlying kernel.
4254 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
4255 / sizeof(struct fiemap_extent))
4257 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4258 int fd, int cmd, abi_long arg)
4260 /* The parameter for this ioctl is a struct fiemap followed
4261 * by an array of struct fiemap_extent whose size is set
4262 * in fiemap->fm_extent_count. The array is filled in by the
4265 int target_size_in, target_size_out;
4267 const argtype *arg_type = ie->arg_type;
4268 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4271 int i, extent_size = thunk_type_size(extent_arg_type, 0);
4275 assert(arg_type[0] == TYPE_PTR);
4276 assert(ie->access == IOC_RW);
4278 target_size_in = thunk_type_size(arg_type, 0);
4279 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4281 return -TARGET_EFAULT;
4283 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4284 unlock_user(argptr, arg, 0);
4285 fm = (struct fiemap *)buf_temp;
4286 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4287 return -TARGET_EINVAL;
4290 outbufsz = sizeof (*fm) +
4291 (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4293 if (outbufsz > MAX_STRUCT_SIZE) {
4294 /* We can't fit all the extents into the fixed size buffer.
4295 * Allocate one that is large enough and use it instead.
4297 fm = g_try_malloc(outbufsz);
4299 return -TARGET_ENOMEM;
4301 memcpy(fm, buf_temp, sizeof(struct fiemap));
4304 ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4305 if (!is_error(ret)) {
4306 target_size_out = target_size_in;
4307 /* An extent_count of 0 means we were only counting the extents
4308 * so there are no structs to copy
4310 if (fm->fm_extent_count != 0) {
4311 target_size_out += fm->fm_mapped_extents * extent_size;
4313 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4315 ret = -TARGET_EFAULT;
4317 /* Convert the struct fiemap */
4318 thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4319 if (fm->fm_extent_count != 0) {
4320 p = argptr + target_size_in;
4321 /* ...and then all the struct fiemap_extents */
4322 for (i = 0; i < fm->fm_mapped_extents; i++) {
4323 thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4328 unlock_user(argptr, arg, target_size_out);
4338 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4339 int fd, int cmd, abi_long arg)
4341 const argtype *arg_type = ie->arg_type;
4345 struct ifconf *host_ifconf;
4347 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4348 int target_ifreq_size;
4353 abi_long target_ifc_buf;
4357 assert(arg_type[0] == TYPE_PTR);
4358 assert(ie->access == IOC_RW);
4361 target_size = thunk_type_size(arg_type, 0);
4363 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4365 return -TARGET_EFAULT;
4366 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4367 unlock_user(argptr, arg, 0);
4369 host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4370 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4371 target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
4373 if (target_ifc_buf != 0) {
4374 target_ifc_len = host_ifconf->ifc_len;
4375 nb_ifreq = target_ifc_len / target_ifreq_size;
4376 host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4378 outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4379 if (outbufsz > MAX_STRUCT_SIZE) {
4381 * We can't fit all the extents into the fixed size buffer.
4382 * Allocate one that is large enough and use it instead.
4384 host_ifconf = malloc(outbufsz);
4386 return -TARGET_ENOMEM;
4388 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4391 host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4393 host_ifconf->ifc_len = host_ifc_len;
4395 host_ifc_buf = NULL;
4397 host_ifconf->ifc_buf = host_ifc_buf;
4399 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4400 if (!is_error(ret)) {
4401 /* convert host ifc_len to target ifc_len */
4403 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4404 target_ifc_len = nb_ifreq * target_ifreq_size;
4405 host_ifconf->ifc_len = target_ifc_len;
4407 /* restore target ifc_buf */
4409 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4411 /* copy struct ifconf to target user */
4413 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4415 return -TARGET_EFAULT;
4416 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4417 unlock_user(argptr, arg, target_size);
4419 if (target_ifc_buf != 0) {
4420 /* copy ifreq[] to target user */
4421 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4422 for (i = 0; i < nb_ifreq ; i++) {
4423 thunk_convert(argptr + i * target_ifreq_size,
4424 host_ifc_buf + i * sizeof(struct ifreq),
4425 ifreq_arg_type, THUNK_TARGET);
4427 unlock_user(argptr, target_ifc_buf, target_ifc_len);
4438 #if defined(CONFIG_USBFS)
4439 #if HOST_LONG_BITS > 64
4440 #error USBDEVFS thunks do not support >64 bit hosts yet.
4443 uint64_t target_urb_adr;
4444 uint64_t target_buf_adr;
4445 char *target_buf_ptr;
4446 struct usbdevfs_urb host_urb;
4449 static GHashTable *usbdevfs_urb_hashtable(void)
4451 static GHashTable *urb_hashtable;
4453 if (!urb_hashtable) {
4454 urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
4456 return urb_hashtable;
4459 static void urb_hashtable_insert(struct live_urb *urb)
4461 GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4462 g_hash_table_insert(urb_hashtable, urb, urb);
4465 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
4467 GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4468 return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
4471 static void urb_hashtable_remove(struct live_urb *urb)
4473 GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4474 g_hash_table_remove(urb_hashtable, urb);
4478 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
4479 int fd, int cmd, abi_long arg)
4481 const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
4482 const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
4483 struct live_urb *lurb;
4487 uintptr_t target_urb_adr;
4490 target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
4492 memset(buf_temp, 0, sizeof(uint64_t));
4493 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4494 if (is_error(ret)) {
4498 memcpy(&hurb, buf_temp, sizeof(uint64_t));
4499 lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
4500 if (!lurb->target_urb_adr) {
4501 return -TARGET_EFAULT;
4503 urb_hashtable_remove(lurb);
4504 unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
4505 lurb->host_urb.buffer_length);
4506 lurb->target_buf_ptr = NULL;
4508 /* restore the guest buffer pointer */
4509 lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
4511 /* update the guest urb struct */
4512 argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
4515 return -TARGET_EFAULT;
4517 thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
4518 unlock_user(argptr, lurb->target_urb_adr, target_size);
4520 target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
4521 /* write back the urb handle */
4522 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4525 return -TARGET_EFAULT;
4528 /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
4529 target_urb_adr = lurb->target_urb_adr;
4530 thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
4531 unlock_user(argptr, arg, target_size);
4538 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
4539 uint8_t *buf_temp __attribute__((unused)),
4540 int fd, int cmd, abi_long arg)
4542 struct live_urb *lurb;
4544 /* map target address back to host URB with metadata. */
4545 lurb = urb_hashtable_lookup(arg);
4547 return -TARGET_EFAULT;
4549 return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4553 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
4554 int fd, int cmd, abi_long arg)
4556 const argtype *arg_type = ie->arg_type;
4561 struct live_urb *lurb;
4564 * each submitted URB needs to map to a unique ID for the
4565 * kernel, and that unique ID needs to be a pointer to
4566 * host memory. hence, we need to malloc for each URB.
4567 * isochronous transfers have a variable length struct.
4570 target_size = thunk_type_size(arg_type, THUNK_TARGET);
4572 /* construct host copy of urb and metadata */
4573 lurb = g_try_malloc0(sizeof(struct live_urb));
4575 return -TARGET_ENOMEM;
4578 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4581 return -TARGET_EFAULT;
4583 thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
4584 unlock_user(argptr, arg, 0);
4586 lurb->target_urb_adr = arg;
4587 lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
4589 /* buffer space used depends on endpoint type so lock the entire buffer */
4590 /* control type urbs should check the buffer contents for true direction */
4591 rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
4592 lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
4593 lurb->host_urb.buffer_length, 1);
4594 if (lurb->target_buf_ptr == NULL) {
4596 return -TARGET_EFAULT;
4599 /* update buffer pointer in host copy */
4600 lurb->host_urb.buffer = lurb->target_buf_ptr;
4602 ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4603 if (is_error(ret)) {
4604 unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
4607 urb_hashtable_insert(lurb);
4612 #endif /* CONFIG_USBFS */
4614 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
4615 int cmd, abi_long arg)
4618 struct dm_ioctl *host_dm;
4619 abi_long guest_data;
4620 uint32_t guest_data_size;
4622 const argtype *arg_type = ie->arg_type;
4624 void *big_buf = NULL;
4628 target_size = thunk_type_size(arg_type, 0);
4629 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4631 ret = -TARGET_EFAULT;
4634 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4635 unlock_user(argptr, arg, 0);
4637 /* buf_temp is too small, so fetch things into a bigger buffer */
4638 big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
4639 memcpy(big_buf, buf_temp, target_size);
4643 guest_data = arg + host_dm->data_start;
4644 if ((guest_data - arg) < 0) {
4645 ret = -TARGET_EINVAL;
4648 guest_data_size = host_dm->data_size - host_dm->data_start;
4649 host_data = (char*)host_dm + host_dm->data_start;
4651 argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
4653 ret = -TARGET_EFAULT;
4657 switch (ie->host_cmd) {
4659 case DM_LIST_DEVICES:
4662 case DM_DEV_SUSPEND:
4665 case DM_TABLE_STATUS:
4666 case DM_TABLE_CLEAR:
4668 case DM_LIST_VERSIONS:
4672 case DM_DEV_SET_GEOMETRY:
4673 /* data contains only strings */
4674 memcpy(host_data, argptr, guest_data_size);
4677 memcpy(host_data, argptr, guest_data_size);
4678 *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
4682 void *gspec = argptr;
4683 void *cur_data = host_data;
4684 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
4685 int spec_size = thunk_type_size(arg_type, 0);
4688 for (i = 0; i < host_dm->target_count; i++) {
4689 struct dm_target_spec *spec = cur_data;
4693 thunk_convert(spec, gspec, arg_type, THUNK_HOST);
4694 slen = strlen((char*)gspec + spec_size) + 1;
4696 spec->next = sizeof(*spec) + slen;
4697 strcpy((char*)&spec[1], gspec + spec_size);
4699 cur_data += spec->next;
4704 ret = -TARGET_EINVAL;
4705 unlock_user(argptr, guest_data, 0);
4708 unlock_user(argptr, guest_data, 0);
4710 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4711 if (!is_error(ret)) {
4712 guest_data = arg + host_dm->data_start;
4713 guest_data_size = host_dm->data_size - host_dm->data_start;
4714 argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
4715 switch (ie->host_cmd) {
4720 case DM_DEV_SUSPEND:
4723 case DM_TABLE_CLEAR:
4725 case DM_DEV_SET_GEOMETRY:
4726 /* no return data */
4728 case DM_LIST_DEVICES:
4730 struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
4731 uint32_t remaining_data = guest_data_size;
4732 void *cur_data = argptr;
4733 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
4734 int nl_size = 12; /* can't use thunk_size due to alignment */
4737 uint32_t next = nl->next;
4739 nl->next = nl_size + (strlen(nl->name) + 1);
4741 if (remaining_data < nl->next) {
4742 host_dm->flags |= DM_BUFFER_FULL_FLAG;
4745 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
4746 strcpy(cur_data + nl_size, nl->name);
4747 cur_data += nl->next;
4748 remaining_data -= nl->next;
4752 nl = (void*)nl + next;
4757 case DM_TABLE_STATUS:
4759 struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
4760 void *cur_data = argptr;
4761 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
4762 int spec_size = thunk_type_size(arg_type, 0);
4765 for (i = 0; i < host_dm->target_count; i++) {
4766 uint32_t next = spec->next;
4767 int slen = strlen((char*)&spec[1]) + 1;
4768 spec->next = (cur_data - argptr) + spec_size + slen;
4769 if (guest_data_size < spec->next) {
4770 host_dm->flags |= DM_BUFFER_FULL_FLAG;
4773 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
4774 strcpy(cur_data + spec_size, (char*)&spec[1]);
4775 cur_data = argptr + spec->next;
4776 spec = (void*)host_dm + host_dm->data_start + next;
4782 void *hdata = (void*)host_dm + host_dm->data_start;
4783 int count = *(uint32_t*)hdata;
4784 uint64_t *hdev = hdata + 8;
4785 uint64_t *gdev = argptr + 8;
4788 *(uint32_t*)argptr = tswap32(count);
4789 for (i = 0; i < count; i++) {
4790 *gdev = tswap64(*hdev);
4796 case DM_LIST_VERSIONS:
4798 struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
4799 uint32_t remaining_data = guest_data_size;
4800 void *cur_data = argptr;
4801 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
4802 int vers_size = thunk_type_size(arg_type, 0);
4805 uint32_t next = vers->next;
4807 vers->next = vers_size + (strlen(vers->name) + 1);
4809 if (remaining_data < vers->next) {
4810 host_dm->flags |= DM_BUFFER_FULL_FLAG;
4813 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
4814 strcpy(cur_data + vers_size, vers->name);
4815 cur_data += vers->next;
4816 remaining_data -= vers->next;
4820 vers = (void*)vers + next;
4825 unlock_user(argptr, guest_data, 0);
4826 ret = -TARGET_EINVAL;
4829 unlock_user(argptr, guest_data, guest_data_size);
4831 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4833 ret = -TARGET_EFAULT;
4836 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
4837 unlock_user(argptr, arg, target_size);
4844 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
4845 int cmd, abi_long arg)
4849 const argtype *arg_type = ie->arg_type;
4850 const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
4853 struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
4854 struct blkpg_partition host_part;
4856 /* Read and convert blkpg */
4858 target_size = thunk_type_size(arg_type, 0);
4859 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4861 ret = -TARGET_EFAULT;
4864 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4865 unlock_user(argptr, arg, 0);
4867 switch (host_blkpg->op) {
4868 case BLKPG_ADD_PARTITION:
4869 case BLKPG_DEL_PARTITION:
4870 /* payload is struct blkpg_partition */
4873 /* Unknown opcode */
4874 ret = -TARGET_EINVAL;
4878 /* Read and convert blkpg->data */
4879 arg = (abi_long)(uintptr_t)host_blkpg->data;
4880 target_size = thunk_type_size(part_arg_type, 0);
4881 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4883 ret = -TARGET_EFAULT;
4886 thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
4887 unlock_user(argptr, arg, 0);
4889 /* Swizzle the data pointer to our local copy and call! */
4890 host_blkpg->data = &host_part;
4891 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
4897 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
4898 int fd, int cmd, abi_long arg)
4900 const argtype *arg_type = ie->arg_type;
4901 const StructEntry *se;
4902 const argtype *field_types;
4903 const int *dst_offsets, *src_offsets;
4906 abi_ulong *target_rt_dev_ptr = NULL;
4907 unsigned long *host_rt_dev_ptr = NULL;
4911 assert(ie->access == IOC_W);
4912 assert(*arg_type == TYPE_PTR);
4914 assert(*arg_type == TYPE_STRUCT);
4915 target_size = thunk_type_size(arg_type, 0);
4916 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4918 return -TARGET_EFAULT;
4921 assert(*arg_type == (int)STRUCT_rtentry);
4922 se = struct_entries + *arg_type++;
4923 assert(se->convert[0] == NULL);
4924 /* convert struct here to be able to catch rt_dev string */
4925 field_types = se->field_types;
4926 dst_offsets = se->field_offsets[THUNK_HOST];
4927 src_offsets = se->field_offsets[THUNK_TARGET];
4928 for (i = 0; i < se->nb_fields; i++) {
4929 if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
4930 assert(*field_types == TYPE_PTRVOID);
4931 target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
4932 host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
4933 if (*target_rt_dev_ptr != 0) {
4934 *host_rt_dev_ptr = (unsigned long)lock_user_string(
4935 tswapal(*target_rt_dev_ptr));
4936 if (!*host_rt_dev_ptr) {
4937 unlock_user(argptr, arg, 0);
4938 return -TARGET_EFAULT;
4941 *host_rt_dev_ptr = 0;
4946 field_types = thunk_convert(buf_temp + dst_offsets[i],
4947 argptr + src_offsets[i],
4948 field_types, THUNK_HOST);
4950 unlock_user(argptr, arg, 0);
4952 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4954 assert(host_rt_dev_ptr != NULL);
4955 assert(target_rt_dev_ptr != NULL);
4956 if (*host_rt_dev_ptr != 0) {
4957 unlock_user((void *)*host_rt_dev_ptr,
4958 *target_rt_dev_ptr, 0);
4963 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
4964 int fd, int cmd, abi_long arg)
4966 int sig = target_to_host_signal(arg);
4967 return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
4970 static abi_long do_ioctl_SIOCGSTAMP(const IOCTLEntry *ie, uint8_t *buf_temp,
4971 int fd, int cmd, abi_long arg)
4976 ret = get_errno(safe_ioctl(fd, SIOCGSTAMP, &tv));
4977 if (is_error(ret)) {
4981 if (cmd == (int)TARGET_SIOCGSTAMP_OLD) {
4982 if (copy_to_user_timeval(arg, &tv)) {
4983 return -TARGET_EFAULT;
4986 if (copy_to_user_timeval64(arg, &tv)) {
4987 return -TARGET_EFAULT;
4994 static abi_long do_ioctl_SIOCGSTAMPNS(const IOCTLEntry *ie, uint8_t *buf_temp,
4995 int fd, int cmd, abi_long arg)
5000 ret = get_errno(safe_ioctl(fd, SIOCGSTAMPNS, &ts));
5001 if (is_error(ret)) {
5005 if (cmd == (int)TARGET_SIOCGSTAMPNS_OLD) {
5006 if (host_to_target_timespec(arg, &ts)) {
5007 return -TARGET_EFAULT;
5010 if (host_to_target_timespec64(arg, &ts)) {
5011 return -TARGET_EFAULT;
5019 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5020 int fd, int cmd, abi_long arg)
5022 int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5023 return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5027 static IOCTLEntry ioctl_entries[] = {
5028 #define IOCTL(cmd, access, ...) \
5029 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
5030 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
5031 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
5032 #define IOCTL_IGNORE(cmd) \
5033 { TARGET_ ## cmd, 0, #cmd },
5038 /* ??? Implement proper locking for ioctls. */
5039 /* do_ioctl() Must return target values and target errnos. */
5040 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5042 const IOCTLEntry *ie;
5043 const argtype *arg_type;
5045 uint8_t buf_temp[MAX_STRUCT_SIZE];
5051 if (ie->target_cmd == 0) {
5052 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5053 return -TARGET_ENOSYS;
5055 if (ie->target_cmd == cmd)
5059 arg_type = ie->arg_type;
5061 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5062 } else if (!ie->host_cmd) {
5063 /* Some architectures define BSD ioctls in their headers
5064 that are not implemented in Linux. */
5065 return -TARGET_ENOSYS;
5068 switch(arg_type[0]) {
5071 ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5075 ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5079 target_size = thunk_type_size(arg_type, 0);
5080 switch(ie->access) {
5082 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5083 if (!is_error(ret)) {
5084 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5086 return -TARGET_EFAULT;
5087 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5088 unlock_user(argptr, arg, target_size);
5092 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5094 return -TARGET_EFAULT;
5095 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5096 unlock_user(argptr, arg, 0);
5097 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5101 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5103 return -TARGET_EFAULT;
5104 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5105 unlock_user(argptr, arg, 0);
5106 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5107 if (!is_error(ret)) {
5108 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5110 return -TARGET_EFAULT;
5111 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5112 unlock_user(argptr, arg, target_size);
5118 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5119 (long)cmd, arg_type[0]);
5120 ret = -TARGET_ENOSYS;
5126 static const bitmask_transtbl iflag_tbl[] = {
5127 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5128 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5129 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5130 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5131 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5132 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5133 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5134 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5135 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5136 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5137 { TARGET_IXON, TARGET_IXON, IXON, IXON },
5138 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5139 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5140 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5144 static const bitmask_transtbl oflag_tbl[] = {
5145 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5146 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5147 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5148 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5149 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5150 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5151 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5152 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5153 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5154 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5155 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5156 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5157 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5158 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5159 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5160 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5161 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5162 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5163 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5164 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5165 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5166 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5167 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5168 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5172 static const bitmask_transtbl cflag_tbl[] = {
5173 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5174 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5175 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5176 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5177 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5178 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5179 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5180 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5181 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5182 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5183 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5184 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5185 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5186 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5187 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5188 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5189 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5190 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5191 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5192 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5193 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5194 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5195 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5196 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5197 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5198 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5199 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5200 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5201 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5202 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5203 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5207 static const bitmask_transtbl lflag_tbl[] = {
5208 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5209 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5210 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5211 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5212 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5213 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5214 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5215 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5216 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5217 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5218 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5219 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5220 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5221 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5222 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5226 static void target_to_host_termios (void *dst, const void *src)
5228 struct host_termios *host = dst;
5229 const struct target_termios *target = src;
5232 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5234 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5236 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5238 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5239 host->c_line = target->c_line;
5241 memset(host->c_cc, 0, sizeof(host->c_cc));
5242 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5243 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5244 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5245 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5246 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5247 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5248 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5249 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5250 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5251 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5252 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5253 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5254 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5255 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5256 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5257 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5258 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5261 static void host_to_target_termios (void *dst, const void *src)
5263 struct target_termios *target = dst;
5264 const struct host_termios *host = src;
5267 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5269 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5271 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5273 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5274 target->c_line = host->c_line;
5276 memset(target->c_cc, 0, sizeof(target->c_cc));
5277 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5278 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5279 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5280 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5281 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5282 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5283 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5284 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5285 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5286 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5287 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5288 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5289 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5290 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5291 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5292 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5293 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5296 static const StructEntry struct_termios_def = {
5297 .convert = { host_to_target_termios, target_to_host_termios },
5298 .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5299 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5302 static bitmask_transtbl mmap_flags_tbl[] = {
5303 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
5304 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
5305 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5306 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
5307 MAP_ANONYMOUS, MAP_ANONYMOUS },
5308 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
5309 MAP_GROWSDOWN, MAP_GROWSDOWN },
5310 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
5311 MAP_DENYWRITE, MAP_DENYWRITE },
5312 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
5313 MAP_EXECUTABLE, MAP_EXECUTABLE },
5314 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5315 { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
5316 MAP_NORESERVE, MAP_NORESERVE },
5317 { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
5318 /* MAP_STACK had been ignored by the kernel for quite some time.
5319 Recognize it for the target insofar as we do not want to pass
5320 it through to the host. */
5321 { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
5325 #if defined(TARGET_I386)
5327 /* NOTE: there is really one LDT for all the threads */
5328 static uint8_t *ldt_table;
5330 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
5337 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
5338 if (size > bytecount)
5340 p = lock_user(VERIFY_WRITE, ptr, size, 0);
5342 return -TARGET_EFAULT;
5343 /* ??? Should this by byteswapped? */
5344 memcpy(p, ldt_table, size);
5345 unlock_user(p, ptr, size);
5349 /* XXX: add locking support */
5350 static abi_long write_ldt(CPUX86State *env,
5351 abi_ulong ptr, unsigned long bytecount, int oldmode)
5353 struct target_modify_ldt_ldt_s ldt_info;
5354 struct target_modify_ldt_ldt_s *target_ldt_info;
5355 int seg_32bit, contents, read_exec_only, limit_in_pages;
5356 int seg_not_present, useable, lm;
5357 uint32_t *lp, entry_1, entry_2;
5359 if (bytecount != sizeof(ldt_info))
5360 return -TARGET_EINVAL;
5361 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
5362 return -TARGET_EFAULT;
5363 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5364 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5365 ldt_info.limit = tswap32(target_ldt_info->limit);
5366 ldt_info.flags = tswap32(target_ldt_info->flags);
5367 unlock_user_struct(target_ldt_info, ptr, 0);
5369 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
5370 return -TARGET_EINVAL;
5371 seg_32bit = ldt_info.flags & 1;
5372 contents = (ldt_info.flags >> 1) & 3;
5373 read_exec_only = (ldt_info.flags >> 3) & 1;
5374 limit_in_pages = (ldt_info.flags >> 4) & 1;
5375 seg_not_present = (ldt_info.flags >> 5) & 1;
5376 useable = (ldt_info.flags >> 6) & 1;
5380 lm = (ldt_info.flags >> 7) & 1;
5382 if (contents == 3) {
5384 return -TARGET_EINVAL;
5385 if (seg_not_present == 0)
5386 return -TARGET_EINVAL;
5388 /* allocate the LDT */
5390 env->ldt.base = target_mmap(0,
5391 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
5392 PROT_READ|PROT_WRITE,
5393 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
5394 if (env->ldt.base == -1)
5395 return -TARGET_ENOMEM;
5396 memset(g2h(env->ldt.base), 0,
5397 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
5398 env->ldt.limit = 0xffff;
5399 ldt_table = g2h(env->ldt.base);
5402 /* NOTE: same code as Linux kernel */
5403 /* Allow LDTs to be cleared by the user. */
5404 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5407 read_exec_only == 1 &&
5409 limit_in_pages == 0 &&
5410 seg_not_present == 1 &&
5418 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5419 (ldt_info.limit & 0x0ffff);
5420 entry_2 = (ldt_info.base_addr & 0xff000000) |
5421 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5422 (ldt_info.limit & 0xf0000) |
5423 ((read_exec_only ^ 1) << 9) |
5425 ((seg_not_present ^ 1) << 15) |
5427 (limit_in_pages << 23) |
5431 entry_2 |= (useable << 20);
5433 /* Install the new entry ... */
5435 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
5436 lp[0] = tswap32(entry_1);
5437 lp[1] = tswap32(entry_2);
5441 /* specific and weird i386 syscalls */
5442 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
5443 unsigned long bytecount)
5449 ret = read_ldt(ptr, bytecount);
5452 ret = write_ldt(env, ptr, bytecount, 1);
5455 ret = write_ldt(env, ptr, bytecount, 0);
5458 ret = -TARGET_ENOSYS;
5464 #if defined(TARGET_I386) && defined(TARGET_ABI32)
5465 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
5467 uint64_t *gdt_table = g2h(env->gdt.base);
5468 struct target_modify_ldt_ldt_s ldt_info;
5469 struct target_modify_ldt_ldt_s *target_ldt_info;
5470 int seg_32bit, contents, read_exec_only, limit_in_pages;
5471 int seg_not_present, useable, lm;
5472 uint32_t *lp, entry_1, entry_2;
5475 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5476 if (!target_ldt_info)
5477 return -TARGET_EFAULT;
5478 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5479 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5480 ldt_info.limit = tswap32(target_ldt_info->limit);
5481 ldt_info.flags = tswap32(target_ldt_info->flags);
5482 if (ldt_info.entry_number == -1) {
5483 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
5484 if (gdt_table[i] == 0) {
5485 ldt_info.entry_number = i;
5486 target_ldt_info->entry_number = tswap32(i);
5491 unlock_user_struct(target_ldt_info, ptr, 1);
5493 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
5494 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
5495 return -TARGET_EINVAL;
5496 seg_32bit = ldt_info.flags & 1;
5497 contents = (ldt_info.flags >> 1) & 3;
5498 read_exec_only = (ldt_info.flags >> 3) & 1;
5499 limit_in_pages = (ldt_info.flags >> 4) & 1;
5500 seg_not_present = (ldt_info.flags >> 5) & 1;
5501 useable = (ldt_info.flags >> 6) & 1;
5505 lm = (ldt_info.flags >> 7) & 1;
5508 if (contents == 3) {
5509 if (seg_not_present == 0)
5510 return -TARGET_EINVAL;
5513 /* NOTE: same code as Linux kernel */
5514 /* Allow LDTs to be cleared by the user. */
5515 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5516 if ((contents == 0 &&
5517 read_exec_only == 1 &&
5519 limit_in_pages == 0 &&
5520 seg_not_present == 1 &&
5528 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5529 (ldt_info.limit & 0x0ffff);
5530 entry_2 = (ldt_info.base_addr & 0xff000000) |
5531 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5532 (ldt_info.limit & 0xf0000) |
5533 ((read_exec_only ^ 1) << 9) |
5535 ((seg_not_present ^ 1) << 15) |
5537 (limit_in_pages << 23) |
5542 /* Install the new entry ... */
5544 lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
5545 lp[0] = tswap32(entry_1);
5546 lp[1] = tswap32(entry_2);
5550 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
5552 struct target_modify_ldt_ldt_s *target_ldt_info;
5553 uint64_t *gdt_table = g2h(env->gdt.base);
5554 uint32_t base_addr, limit, flags;
5555 int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
5556 int seg_not_present, useable, lm;
5557 uint32_t *lp, entry_1, entry_2;
5559 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5560 if (!target_ldt_info)
5561 return -TARGET_EFAULT;
5562 idx = tswap32(target_ldt_info->entry_number);
5563 if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
5564 idx > TARGET_GDT_ENTRY_TLS_MAX) {
5565 unlock_user_struct(target_ldt_info, ptr, 1);
5566 return -TARGET_EINVAL;
5568 lp = (uint32_t *)(gdt_table + idx);
5569 entry_1 = tswap32(lp[0]);
5570 entry_2 = tswap32(lp[1]);
5572 read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
5573 contents = (entry_2 >> 10) & 3;
5574 seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
5575 seg_32bit = (entry_2 >> 22) & 1;
5576 limit_in_pages = (entry_2 >> 23) & 1;
5577 useable = (entry_2 >> 20) & 1;
5581 lm = (entry_2 >> 21) & 1;
5583 flags = (seg_32bit << 0) | (contents << 1) |
5584 (read_exec_only << 3) | (limit_in_pages << 4) |
5585 (seg_not_present << 5) | (useable << 6) | (lm << 7);
5586 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000);
5587 base_addr = (entry_1 >> 16) |
5588 (entry_2 & 0xff000000) |
5589 ((entry_2 & 0xff) << 16);
5590 target_ldt_info->base_addr = tswapal(base_addr);
5591 target_ldt_info->limit = tswap32(limit);
5592 target_ldt_info->flags = tswap32(flags);
5593 unlock_user_struct(target_ldt_info, ptr, 1);
5596 #endif /* TARGET_I386 && TARGET_ABI32 */
5598 #ifndef TARGET_ABI32
5599 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
5606 case TARGET_ARCH_SET_GS:
5607 case TARGET_ARCH_SET_FS:
5608 if (code == TARGET_ARCH_SET_GS)
5612 cpu_x86_load_seg(env, idx, 0);
5613 env->segs[idx].base = addr;
5615 case TARGET_ARCH_GET_GS:
5616 case TARGET_ARCH_GET_FS:
5617 if (code == TARGET_ARCH_GET_GS)
5621 val = env->segs[idx].base;
5622 if (put_user(val, addr, abi_ulong))
5623 ret = -TARGET_EFAULT;
5626 ret = -TARGET_EINVAL;
5633 #endif /* defined(TARGET_I386) */
5635 #define NEW_STACK_SIZE 0x40000
5638 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
5641 pthread_mutex_t mutex;
5642 pthread_cond_t cond;
5645 abi_ulong child_tidptr;
5646 abi_ulong parent_tidptr;
5650 static void *clone_func(void *arg)
5652 new_thread_info *info = arg;
5657 rcu_register_thread();
5658 tcg_register_thread();
5662 ts = (TaskState *)cpu->opaque;
5663 info->tid = sys_gettid();
5665 if (info->child_tidptr)
5666 put_user_u32(info->tid, info->child_tidptr);
5667 if (info->parent_tidptr)
5668 put_user_u32(info->tid, info->parent_tidptr);
5669 qemu_guest_random_seed_thread_part2(cpu->random_seed);
5670 /* Enable signals. */
5671 sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
5672 /* Signal to the parent that we're ready. */
5673 pthread_mutex_lock(&info->mutex);
5674 pthread_cond_broadcast(&info->cond);
5675 pthread_mutex_unlock(&info->mutex);
5676 /* Wait until the parent has finished initializing the tls state. */
5677 pthread_mutex_lock(&clone_lock);
5678 pthread_mutex_unlock(&clone_lock);
5684 /* do_fork() Must return host values and target errnos (unlike most
5685 do_*() functions). */
5686 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
5687 abi_ulong parent_tidptr, target_ulong newtls,
5688 abi_ulong child_tidptr)
5690 CPUState *cpu = env_cpu(env);
5694 CPUArchState *new_env;
5697 flags &= ~CLONE_IGNORED_FLAGS;
5699 /* Emulate vfork() with fork() */
5700 if (flags & CLONE_VFORK)
5701 flags &= ~(CLONE_VFORK | CLONE_VM);
5703 if (flags & CLONE_VM) {
5704 TaskState *parent_ts = (TaskState *)cpu->opaque;
5705 new_thread_info info;
5706 pthread_attr_t attr;
5708 if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
5709 (flags & CLONE_INVALID_THREAD_FLAGS)) {
5710 return -TARGET_EINVAL;
5713 ts = g_new0(TaskState, 1);
5714 init_task_state(ts);
5716 /* Grab a mutex so that thread setup appears atomic. */
5717 pthread_mutex_lock(&clone_lock);
5719 /* we create a new CPU instance. */
5720 new_env = cpu_copy(env);
5721 /* Init regs that differ from the parent. */
5722 cpu_clone_regs(new_env, newsp);
5723 new_cpu = env_cpu(new_env);
5724 new_cpu->opaque = ts;
5725 ts->bprm = parent_ts->bprm;
5726 ts->info = parent_ts->info;
5727 ts->signal_mask = parent_ts->signal_mask;
5729 if (flags & CLONE_CHILD_CLEARTID) {
5730 ts->child_tidptr = child_tidptr;
5733 if (flags & CLONE_SETTLS) {
5734 cpu_set_tls (new_env, newtls);
5737 memset(&info, 0, sizeof(info));
5738 pthread_mutex_init(&info.mutex, NULL);
5739 pthread_mutex_lock(&info.mutex);
5740 pthread_cond_init(&info.cond, NULL);
5742 if (flags & CLONE_CHILD_SETTID) {
5743 info.child_tidptr = child_tidptr;
5745 if (flags & CLONE_PARENT_SETTID) {
5746 info.parent_tidptr = parent_tidptr;
5749 ret = pthread_attr_init(&attr);
5750 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
5751 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
5752 /* It is not safe to deliver signals until the child has finished
5753 initializing, so temporarily block all signals. */
5754 sigfillset(&sigmask);
5755 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
5756 cpu->random_seed = qemu_guest_random_seed_thread_part1();
5758 /* If this is our first additional thread, we need to ensure we
5759 * generate code for parallel execution and flush old translations.
5761 if (!parallel_cpus) {
5762 parallel_cpus = true;
5766 ret = pthread_create(&info.thread, &attr, clone_func, &info);
5767 /* TODO: Free new CPU state if thread creation failed. */
5769 sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
5770 pthread_attr_destroy(&attr);
5772 /* Wait for the child to initialize. */
5773 pthread_cond_wait(&info.cond, &info.mutex);
5778 pthread_mutex_unlock(&info.mutex);
5779 pthread_cond_destroy(&info.cond);
5780 pthread_mutex_destroy(&info.mutex);
5781 pthread_mutex_unlock(&clone_lock);
5783 /* if no CLONE_VM, we consider it is a fork */
5784 if (flags & CLONE_INVALID_FORK_FLAGS) {
5785 return -TARGET_EINVAL;
5788 /* We can't support custom termination signals */
5789 if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
5790 return -TARGET_EINVAL;
5793 if (block_signals()) {
5794 return -TARGET_ERESTARTSYS;
5800 /* Child Process. */
5801 cpu_clone_regs(env, newsp);
5803 /* There is a race condition here. The parent process could
5804 theoretically read the TID in the child process before the child
5805 tid is set. This would require using either ptrace
5806 (not implemented) or having *_tidptr to point at a shared memory
5807 mapping. We can't repeat the spinlock hack used above because
5808 the child process gets its own copy of the lock. */
5809 if (flags & CLONE_CHILD_SETTID)
5810 put_user_u32(sys_gettid(), child_tidptr);
5811 if (flags & CLONE_PARENT_SETTID)
5812 put_user_u32(sys_gettid(), parent_tidptr);
5813 ts = (TaskState *)cpu->opaque;
5814 if (flags & CLONE_SETTLS)
5815 cpu_set_tls (env, newtls);
5816 if (flags & CLONE_CHILD_CLEARTID)
5817 ts->child_tidptr = child_tidptr;
5825 /* warning : doesn't handle linux specific flags... */
5826 static int target_to_host_fcntl_cmd(int cmd)
5831 case TARGET_F_DUPFD:
5832 case TARGET_F_GETFD:
5833 case TARGET_F_SETFD:
5834 case TARGET_F_GETFL:
5835 case TARGET_F_SETFL:
5838 case TARGET_F_GETLK:
5841 case TARGET_F_SETLK:
5844 case TARGET_F_SETLKW:
5847 case TARGET_F_GETOWN:
5850 case TARGET_F_SETOWN:
5853 case TARGET_F_GETSIG:
5856 case TARGET_F_SETSIG:
5859 #if TARGET_ABI_BITS == 32
5860 case TARGET_F_GETLK64:
5863 case TARGET_F_SETLK64:
5866 case TARGET_F_SETLKW64:
5870 case TARGET_F_SETLEASE:
5873 case TARGET_F_GETLEASE:
5876 #ifdef F_DUPFD_CLOEXEC
5877 case TARGET_F_DUPFD_CLOEXEC:
5878 ret = F_DUPFD_CLOEXEC;
5881 case TARGET_F_NOTIFY:
5885 case TARGET_F_GETOWN_EX:
5890 case TARGET_F_SETOWN_EX:
5895 case TARGET_F_SETPIPE_SZ:
5898 case TARGET_F_GETPIPE_SZ:
5903 ret = -TARGET_EINVAL;
5907 #if defined(__powerpc64__)
5908 /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
5909 * is not supported by kernel. The glibc fcntl call actually adjusts
5910 * them to 5, 6 and 7 before making the syscall(). Since we make the
5911 * syscall directly, adjust to what is supported by the kernel.
5913 if (ret >= F_GETLK64 && ret <= F_SETLKW64) {
5914 ret -= F_GETLK64 - 5;
5921 #define FLOCK_TRANSTBL \
5923 TRANSTBL_CONVERT(F_RDLCK); \
5924 TRANSTBL_CONVERT(F_WRLCK); \
5925 TRANSTBL_CONVERT(F_UNLCK); \
5926 TRANSTBL_CONVERT(F_EXLCK); \
5927 TRANSTBL_CONVERT(F_SHLCK); \
5930 static int target_to_host_flock(int type)
5932 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
5934 #undef TRANSTBL_CONVERT
5935 return -TARGET_EINVAL;
5938 static int host_to_target_flock(int type)
5940 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
5942 #undef TRANSTBL_CONVERT
5943 /* if we don't know how to convert the value coming
5944 * from the host we copy to the target field as-is
5949 static inline abi_long copy_from_user_flock(struct flock64 *fl,
5950 abi_ulong target_flock_addr)
5952 struct target_flock *target_fl;
5955 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
5956 return -TARGET_EFAULT;
5959 __get_user(l_type, &target_fl->l_type);
5960 l_type = target_to_host_flock(l_type);
5964 fl->l_type = l_type;
5965 __get_user(fl->l_whence, &target_fl->l_whence);
5966 __get_user(fl->l_start, &target_fl->l_start);
5967 __get_user(fl->l_len, &target_fl->l_len);
5968 __get_user(fl->l_pid, &target_fl->l_pid);
5969 unlock_user_struct(target_fl, target_flock_addr, 0);
5973 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
5974 const struct flock64 *fl)
5976 struct target_flock *target_fl;
5979 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
5980 return -TARGET_EFAULT;
5983 l_type = host_to_target_flock(fl->l_type);
5984 __put_user(l_type, &target_fl->l_type);
5985 __put_user(fl->l_whence, &target_fl->l_whence);
5986 __put_user(fl->l_start, &target_fl->l_start);
5987 __put_user(fl->l_len, &target_fl->l_len);
5988 __put_user(fl->l_pid, &target_fl->l_pid);
5989 unlock_user_struct(target_fl, target_flock_addr, 1);
5993 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
5994 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
5996 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
5997 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
5998 abi_ulong target_flock_addr)
6000 struct target_oabi_flock64 *target_fl;
6003 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6004 return -TARGET_EFAULT;
6007 __get_user(l_type, &target_fl->l_type);
6008 l_type = target_to_host_flock(l_type);
6012 fl->l_type = l_type;
6013 __get_user(fl->l_whence, &target_fl->l_whence);
6014 __get_user(fl->l_start, &target_fl->l_start);
6015 __get_user(fl->l_len, &target_fl->l_len);
6016 __get_user(fl->l_pid, &target_fl->l_pid);
6017 unlock_user_struct(target_fl, target_flock_addr, 0);
6021 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
6022 const struct flock64 *fl)
6024 struct target_oabi_flock64 *target_fl;
6027 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6028 return -TARGET_EFAULT;
6031 l_type = host_to_target_flock(fl->l_type);
6032 __put_user(l_type, &target_fl->l_type);
6033 __put_user(fl->l_whence, &target_fl->l_whence);
6034 __put_user(fl->l_start, &target_fl->l_start);
6035 __put_user(fl->l_len, &target_fl->l_len);
6036 __put_user(fl->l_pid, &target_fl->l_pid);
6037 unlock_user_struct(target_fl, target_flock_addr, 1);
6042 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
6043 abi_ulong target_flock_addr)
6045 struct target_flock64 *target_fl;
6048 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6049 return -TARGET_EFAULT;
6052 __get_user(l_type, &target_fl->l_type);
6053 l_type = target_to_host_flock(l_type);
6057 fl->l_type = l_type;
6058 __get_user(fl->l_whence, &target_fl->l_whence);
6059 __get_user(fl->l_start, &target_fl->l_start);
6060 __get_user(fl->l_len, &target_fl->l_len);
6061 __get_user(fl->l_pid, &target_fl->l_pid);
6062 unlock_user_struct(target_fl, target_flock_addr, 0);
6066 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
6067 const struct flock64 *fl)
6069 struct target_flock64 *target_fl;
6072 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6073 return -TARGET_EFAULT;
6076 l_type = host_to_target_flock(fl->l_type);
6077 __put_user(l_type, &target_fl->l_type);
6078 __put_user(fl->l_whence, &target_fl->l_whence);
6079 __put_user(fl->l_start, &target_fl->l_start);
6080 __put_user(fl->l_len, &target_fl->l_len);
6081 __put_user(fl->l_pid, &target_fl->l_pid);
6082 unlock_user_struct(target_fl, target_flock_addr, 1);
6086 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
6088 struct flock64 fl64;
6090 struct f_owner_ex fox;
6091 struct target_f_owner_ex *target_fox;
6094 int host_cmd = target_to_host_fcntl_cmd(cmd);
6096 if (host_cmd == -TARGET_EINVAL)
6100 case TARGET_F_GETLK:
6101 ret = copy_from_user_flock(&fl64, arg);
6105 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6107 ret = copy_to_user_flock(arg, &fl64);
6111 case TARGET_F_SETLK:
6112 case TARGET_F_SETLKW:
6113 ret = copy_from_user_flock(&fl64, arg);
6117 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6120 case TARGET_F_GETLK64:
6121 ret = copy_from_user_flock64(&fl64, arg);
6125 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6127 ret = copy_to_user_flock64(arg, &fl64);
6130 case TARGET_F_SETLK64:
6131 case TARGET_F_SETLKW64:
6132 ret = copy_from_user_flock64(&fl64, arg);
6136 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6139 case TARGET_F_GETFL:
6140 ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6142 ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
6146 case TARGET_F_SETFL:
6147 ret = get_errno(safe_fcntl(fd, host_cmd,
6148 target_to_host_bitmask(arg,
6153 case TARGET_F_GETOWN_EX:
6154 ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6156 if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
6157 return -TARGET_EFAULT;
6158 target_fox->type = tswap32(fox.type);
6159 target_fox->pid = tswap32(fox.pid);
6160 unlock_user_struct(target_fox, arg, 1);
6166 case TARGET_F_SETOWN_EX:
6167 if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
6168 return -TARGET_EFAULT;
6169 fox.type = tswap32(target_fox->type);
6170 fox.pid = tswap32(target_fox->pid);
6171 unlock_user_struct(target_fox, arg, 0);
6172 ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6176 case TARGET_F_SETOWN:
6177 case TARGET_F_GETOWN:
6178 case TARGET_F_SETSIG:
6179 case TARGET_F_GETSIG:
6180 case TARGET_F_SETLEASE:
6181 case TARGET_F_GETLEASE:
6182 case TARGET_F_SETPIPE_SZ:
6183 case TARGET_F_GETPIPE_SZ:
6184 ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6188 ret = get_errno(safe_fcntl(fd, cmd, arg));
6196 static inline int high2lowuid(int uid)
6204 static inline int high2lowgid(int gid)
6212 static inline int low2highuid(int uid)
6214 if ((int16_t)uid == -1)
6220 static inline int low2highgid(int gid)
6222 if ((int16_t)gid == -1)
6227 static inline int tswapid(int id)
6232 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6234 #else /* !USE_UID16 */
6235 static inline int high2lowuid(int uid)
6239 static inline int high2lowgid(int gid)
6243 static inline int low2highuid(int uid)
6247 static inline int low2highgid(int gid)
6251 static inline int tswapid(int id)
6256 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6258 #endif /* USE_UID16 */
6260 /* We must do direct syscalls for setting UID/GID, because we want to
6261 * implement the Linux system call semantics of "change only for this thread",
6262 * not the libc/POSIX semantics of "change for all threads in process".
6263 * (See http://ewontfix.com/17/ for more details.)
6264 * We use the 32-bit version of the syscalls if present; if it is not
6265 * then either the host architecture supports 32-bit UIDs natively with
6266 * the standard syscall, or the 16-bit UID is the best we can do.
6268 #ifdef __NR_setuid32
6269 #define __NR_sys_setuid __NR_setuid32
6271 #define __NR_sys_setuid __NR_setuid
6273 #ifdef __NR_setgid32
6274 #define __NR_sys_setgid __NR_setgid32
6276 #define __NR_sys_setgid __NR_setgid
6278 #ifdef __NR_setresuid32
6279 #define __NR_sys_setresuid __NR_setresuid32
6281 #define __NR_sys_setresuid __NR_setresuid
6283 #ifdef __NR_setresgid32
6284 #define __NR_sys_setresgid __NR_setresgid32
6286 #define __NR_sys_setresgid __NR_setresgid
6289 _syscall1(int, sys_setuid, uid_t, uid)
6290 _syscall1(int, sys_setgid, gid_t, gid)
6291 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
6292 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
6294 void syscall_init(void)
6297 const argtype *arg_type;
6301 thunk_init(STRUCT_MAX);
6303 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6304 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6305 #include "syscall_types.h"
6307 #undef STRUCT_SPECIAL
6309 /* Build target_to_host_errno_table[] table from
6310 * host_to_target_errno_table[]. */
6311 for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
6312 target_to_host_errno_table[host_to_target_errno_table[i]] = i;
6315 /* we patch the ioctl size if necessary. We rely on the fact that
6316 no ioctl has all the bits at '1' in the size field */
6318 while (ie->target_cmd != 0) {
6319 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
6320 TARGET_IOC_SIZEMASK) {
6321 arg_type = ie->arg_type;
6322 if (arg_type[0] != TYPE_PTR) {
6323 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
6328 size = thunk_type_size(arg_type, 0);
6329 ie->target_cmd = (ie->target_cmd &
6330 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
6331 (size << TARGET_IOC_SIZESHIFT);
6334 /* automatic consistency check if same arch */
6335 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
6336 (defined(__x86_64__) && defined(TARGET_X86_64))
6337 if (unlikely(ie->target_cmd != ie->host_cmd)) {
6338 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
6339 ie->name, ie->target_cmd, ie->host_cmd);
6346 #if TARGET_ABI_BITS == 32
6347 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
6349 #ifdef TARGET_WORDS_BIGENDIAN
6350 return ((uint64_t)word0 << 32) | word1;
6352 return ((uint64_t)word1 << 32) | word0;
6355 #else /* TARGET_ABI_BITS == 32 */
6356 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
6360 #endif /* TARGET_ABI_BITS != 32 */
6362 #ifdef TARGET_NR_truncate64
6363 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
6368 if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
6372 return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
6376 #ifdef TARGET_NR_ftruncate64
6377 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
6382 if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
6386 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
6390 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_itspec,
6391 abi_ulong target_addr)
6393 struct target_itimerspec *target_itspec;
6395 if (!lock_user_struct(VERIFY_READ, target_itspec, target_addr, 1)) {
6396 return -TARGET_EFAULT;
6399 host_itspec->it_interval.tv_sec =
6400 tswapal(target_itspec->it_interval.tv_sec);
6401 host_itspec->it_interval.tv_nsec =
6402 tswapal(target_itspec->it_interval.tv_nsec);
6403 host_itspec->it_value.tv_sec = tswapal(target_itspec->it_value.tv_sec);
6404 host_itspec->it_value.tv_nsec = tswapal(target_itspec->it_value.tv_nsec);
6406 unlock_user_struct(target_itspec, target_addr, 1);
6410 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
6411 struct itimerspec *host_its)
6413 struct target_itimerspec *target_itspec;
6415 if (!lock_user_struct(VERIFY_WRITE, target_itspec, target_addr, 0)) {
6416 return -TARGET_EFAULT;
6419 target_itspec->it_interval.tv_sec = tswapal(host_its->it_interval.tv_sec);
6420 target_itspec->it_interval.tv_nsec = tswapal(host_its->it_interval.tv_nsec);
6422 target_itspec->it_value.tv_sec = tswapal(host_its->it_value.tv_sec);
6423 target_itspec->it_value.tv_nsec = tswapal(host_its->it_value.tv_nsec);
6425 unlock_user_struct(target_itspec, target_addr, 0);
6429 static inline abi_long target_to_host_timex(struct timex *host_tx,
6430 abi_long target_addr)
6432 struct target_timex *target_tx;
6434 if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
6435 return -TARGET_EFAULT;
6438 __get_user(host_tx->modes, &target_tx->modes);
6439 __get_user(host_tx->offset, &target_tx->offset);
6440 __get_user(host_tx->freq, &target_tx->freq);
6441 __get_user(host_tx->maxerror, &target_tx->maxerror);
6442 __get_user(host_tx->esterror, &target_tx->esterror);
6443 __get_user(host_tx->status, &target_tx->status);
6444 __get_user(host_tx->constant, &target_tx->constant);
6445 __get_user(host_tx->precision, &target_tx->precision);
6446 __get_user(host_tx->tolerance, &target_tx->tolerance);
6447 __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
6448 __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
6449 __get_user(host_tx->tick, &target_tx->tick);
6450 __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
6451 __get_user(host_tx->jitter, &target_tx->jitter);
6452 __get_user(host_tx->shift, &target_tx->shift);
6453 __get_user(host_tx->stabil, &target_tx->stabil);
6454 __get_user(host_tx->jitcnt, &target_tx->jitcnt);
6455 __get_user(host_tx->calcnt, &target_tx->calcnt);
6456 __get_user(host_tx->errcnt, &target_tx->errcnt);
6457 __get_user(host_tx->stbcnt, &target_tx->stbcnt);
6458 __get_user(host_tx->tai, &target_tx->tai);
6460 unlock_user_struct(target_tx, target_addr, 0);
6464 static inline abi_long host_to_target_timex(abi_long target_addr,
6465 struct timex *host_tx)
6467 struct target_timex *target_tx;
6469 if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
6470 return -TARGET_EFAULT;
6473 __put_user(host_tx->modes, &target_tx->modes);
6474 __put_user(host_tx->offset, &target_tx->offset);
6475 __put_user(host_tx->freq, &target_tx->freq);
6476 __put_user(host_tx->maxerror, &target_tx->maxerror);
6477 __put_user(host_tx->esterror, &target_tx->esterror);
6478 __put_user(host_tx->status, &target_tx->status);
6479 __put_user(host_tx->constant, &target_tx->constant);
6480 __put_user(host_tx->precision, &target_tx->precision);
6481 __put_user(host_tx->tolerance, &target_tx->tolerance);
6482 __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
6483 __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
6484 __put_user(host_tx->tick, &target_tx->tick);
6485 __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
6486 __put_user(host_tx->jitter, &target_tx->jitter);
6487 __put_user(host_tx->shift, &target_tx->shift);
6488 __put_user(host_tx->stabil, &target_tx->stabil);
6489 __put_user(host_tx->jitcnt, &target_tx->jitcnt);
6490 __put_user(host_tx->calcnt, &target_tx->calcnt);
6491 __put_user(host_tx->errcnt, &target_tx->errcnt);
6492 __put_user(host_tx->stbcnt, &target_tx->stbcnt);
6493 __put_user(host_tx->tai, &target_tx->tai);
6495 unlock_user_struct(target_tx, target_addr, 1);
6500 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
6501 abi_ulong target_addr)
6503 struct target_sigevent *target_sevp;
6505 if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
6506 return -TARGET_EFAULT;
6509 /* This union is awkward on 64 bit systems because it has a 32 bit
6510 * integer and a pointer in it; we follow the conversion approach
6511 * used for handling sigval types in signal.c so the guest should get
6512 * the correct value back even if we did a 64 bit byteswap and it's
6513 * using the 32 bit integer.
6515 host_sevp->sigev_value.sival_ptr =
6516 (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
6517 host_sevp->sigev_signo =
6518 target_to_host_signal(tswap32(target_sevp->sigev_signo));
6519 host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
6520 host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid);
6522 unlock_user_struct(target_sevp, target_addr, 1);
6526 #if defined(TARGET_NR_mlockall)
6527 static inline int target_to_host_mlockall_arg(int arg)
6531 if (arg & TARGET_MLOCKALL_MCL_CURRENT) {
6532 result |= MCL_CURRENT;
6534 if (arg & TARGET_MLOCKALL_MCL_FUTURE) {
6535 result |= MCL_FUTURE;
6541 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) || \
6542 defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) || \
6543 defined(TARGET_NR_newfstatat))
6544 static inline abi_long host_to_target_stat64(void *cpu_env,
6545 abi_ulong target_addr,
6546 struct stat *host_st)
6548 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
6549 if (((CPUARMState *)cpu_env)->eabi) {
6550 struct target_eabi_stat64 *target_st;
6552 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6553 return -TARGET_EFAULT;
6554 memset(target_st, 0, sizeof(struct target_eabi_stat64));
6555 __put_user(host_st->st_dev, &target_st->st_dev);
6556 __put_user(host_st->st_ino, &target_st->st_ino);
6557 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6558 __put_user(host_st->st_ino, &target_st->__st_ino);
6560 __put_user(host_st->st_mode, &target_st->st_mode);
6561 __put_user(host_st->st_nlink, &target_st->st_nlink);
6562 __put_user(host_st->st_uid, &target_st->st_uid);
6563 __put_user(host_st->st_gid, &target_st->st_gid);
6564 __put_user(host_st->st_rdev, &target_st->st_rdev);
6565 __put_user(host_st->st_size, &target_st->st_size);
6566 __put_user(host_st->st_blksize, &target_st->st_blksize);
6567 __put_user(host_st->st_blocks, &target_st->st_blocks);
6568 __put_user(host_st->st_atime, &target_st->target_st_atime);
6569 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6570 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6571 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
6572 __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
6573 __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
6574 __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
6576 unlock_user_struct(target_st, target_addr, 1);
6580 #if defined(TARGET_HAS_STRUCT_STAT64)
6581 struct target_stat64 *target_st;
6583 struct target_stat *target_st;
6586 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6587 return -TARGET_EFAULT;
6588 memset(target_st, 0, sizeof(*target_st));
6589 __put_user(host_st->st_dev, &target_st->st_dev);
6590 __put_user(host_st->st_ino, &target_st->st_ino);
6591 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6592 __put_user(host_st->st_ino, &target_st->__st_ino);
6594 __put_user(host_st->st_mode, &target_st->st_mode);
6595 __put_user(host_st->st_nlink, &target_st->st_nlink);
6596 __put_user(host_st->st_uid, &target_st->st_uid);
6597 __put_user(host_st->st_gid, &target_st->st_gid);
6598 __put_user(host_st->st_rdev, &target_st->st_rdev);
6599 /* XXX: better use of kernel struct */
6600 __put_user(host_st->st_size, &target_st->st_size);
6601 __put_user(host_st->st_blksize, &target_st->st_blksize);
6602 __put_user(host_st->st_blocks, &target_st->st_blocks);
6603 __put_user(host_st->st_atime, &target_st->target_st_atime);
6604 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6605 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6606 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
6607 __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
6608 __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
6609 __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
6611 unlock_user_struct(target_st, target_addr, 1);
6618 #if defined(TARGET_NR_statx) && defined(__NR_statx)
6619 static inline abi_long host_to_target_statx(struct target_statx *host_stx,
6620 abi_ulong target_addr)
6622 struct target_statx *target_stx;
6624 if (!lock_user_struct(VERIFY_WRITE, target_stx, target_addr, 0)) {
6625 return -TARGET_EFAULT;
6627 memset(target_stx, 0, sizeof(*target_stx));
6629 __put_user(host_stx->stx_mask, &target_stx->stx_mask);
6630 __put_user(host_stx->stx_blksize, &target_stx->stx_blksize);
6631 __put_user(host_stx->stx_attributes, &target_stx->stx_attributes);
6632 __put_user(host_stx->stx_nlink, &target_stx->stx_nlink);
6633 __put_user(host_stx->stx_uid, &target_stx->stx_uid);
6634 __put_user(host_stx->stx_gid, &target_stx->stx_gid);
6635 __put_user(host_stx->stx_mode, &target_stx->stx_mode);
6636 __put_user(host_stx->stx_ino, &target_stx->stx_ino);
6637 __put_user(host_stx->stx_size, &target_stx->stx_size);
6638 __put_user(host_stx->stx_blocks, &target_stx->stx_blocks);
6639 __put_user(host_stx->stx_attributes_mask, &target_stx->stx_attributes_mask);
6640 __put_user(host_stx->stx_atime.tv_sec, &target_stx->stx_atime.tv_sec);
6641 __put_user(host_stx->stx_atime.tv_nsec, &target_stx->stx_atime.tv_nsec);
6642 __put_user(host_stx->stx_btime.tv_sec, &target_stx->stx_atime.tv_sec);
6643 __put_user(host_stx->stx_btime.tv_nsec, &target_stx->stx_atime.tv_nsec);
6644 __put_user(host_stx->stx_ctime.tv_sec, &target_stx->stx_atime.tv_sec);
6645 __put_user(host_stx->stx_ctime.tv_nsec, &target_stx->stx_atime.tv_nsec);
6646 __put_user(host_stx->stx_mtime.tv_sec, &target_stx->stx_atime.tv_sec);
6647 __put_user(host_stx->stx_mtime.tv_nsec, &target_stx->stx_atime.tv_nsec);
6648 __put_user(host_stx->stx_rdev_major, &target_stx->stx_rdev_major);
6649 __put_user(host_stx->stx_rdev_minor, &target_stx->stx_rdev_minor);
6650 __put_user(host_stx->stx_dev_major, &target_stx->stx_dev_major);
6651 __put_user(host_stx->stx_dev_minor, &target_stx->stx_dev_minor);
6653 unlock_user_struct(target_stx, target_addr, 1);
6660 /* ??? Using host futex calls even when target atomic operations
6661 are not really atomic probably breaks things. However implementing
6662 futexes locally would make futexes shared between multiple processes
6663 tricky. However they're probably useless because guest atomic
6664 operations won't work either. */
6665 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
6666 target_ulong uaddr2, int val3)
6668 struct timespec ts, *pts;
6671 /* ??? We assume FUTEX_* constants are the same on both host
6673 #ifdef FUTEX_CMD_MASK
6674 base_op = op & FUTEX_CMD_MASK;
6680 case FUTEX_WAIT_BITSET:
6683 target_to_host_timespec(pts, timeout);
6687 return get_errno(safe_futex(g2h(uaddr), op, tswap32(val),
6690 return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
6692 return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
6694 case FUTEX_CMP_REQUEUE:
6696 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
6697 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
6698 But the prototype takes a `struct timespec *'; insert casts
6699 to satisfy the compiler. We do not need to tswap TIMEOUT
6700 since it's not compared to guest memory. */
6701 pts = (struct timespec *)(uintptr_t) timeout;
6702 return get_errno(safe_futex(g2h(uaddr), op, val, pts,
6704 (base_op == FUTEX_CMP_REQUEUE
6708 return -TARGET_ENOSYS;
6711 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6712 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
6713 abi_long handle, abi_long mount_id,
6716 struct file_handle *target_fh;
6717 struct file_handle *fh;
6721 unsigned int size, total_size;
6723 if (get_user_s32(size, handle)) {
6724 return -TARGET_EFAULT;
6727 name = lock_user_string(pathname);
6729 return -TARGET_EFAULT;
6732 total_size = sizeof(struct file_handle) + size;
6733 target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
6735 unlock_user(name, pathname, 0);
6736 return -TARGET_EFAULT;
6739 fh = g_malloc0(total_size);
6740 fh->handle_bytes = size;
6742 ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
6743 unlock_user(name, pathname, 0);
6745 /* man name_to_handle_at(2):
6746 * Other than the use of the handle_bytes field, the caller should treat
6747 * the file_handle structure as an opaque data type
6750 memcpy(target_fh, fh, total_size);
6751 target_fh->handle_bytes = tswap32(fh->handle_bytes);
6752 target_fh->handle_type = tswap32(fh->handle_type);
6754 unlock_user(target_fh, handle, total_size);
6756 if (put_user_s32(mid, mount_id)) {
6757 return -TARGET_EFAULT;
6765 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6766 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
6769 struct file_handle *target_fh;
6770 struct file_handle *fh;
6771 unsigned int size, total_size;
6774 if (get_user_s32(size, handle)) {
6775 return -TARGET_EFAULT;
6778 total_size = sizeof(struct file_handle) + size;
6779 target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
6781 return -TARGET_EFAULT;
6784 fh = g_memdup(target_fh, total_size);
6785 fh->handle_bytes = size;
6786 fh->handle_type = tswap32(target_fh->handle_type);
6788 ret = get_errno(open_by_handle_at(mount_fd, fh,
6789 target_to_host_bitmask(flags, fcntl_flags_tbl)));
6793 unlock_user(target_fh, handle, total_size);
6799 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
6801 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
6804 target_sigset_t *target_mask;
6808 if (flags & ~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)) {
6809 return -TARGET_EINVAL;
6811 if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
6812 return -TARGET_EFAULT;
6815 target_to_host_sigset(&host_mask, target_mask);
6817 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
6819 ret = get_errno(signalfd(fd, &host_mask, host_flags));
6821 fd_trans_register(ret, &target_signalfd_trans);
6824 unlock_user_struct(target_mask, mask, 0);
6830 /* Map host to target signal numbers for the wait family of syscalls.
6831 Assume all other status bits are the same. */
6832 int host_to_target_waitstatus(int status)
6834 if (WIFSIGNALED(status)) {
6835 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
6837 if (WIFSTOPPED(status)) {
6838 return (host_to_target_signal(WSTOPSIG(status)) << 8)
6844 static int open_self_cmdline(void *cpu_env, int fd)
6846 CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
6847 struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
6850 for (i = 0; i < bprm->argc; i++) {
6851 size_t len = strlen(bprm->argv[i]) + 1;
6853 if (write(fd, bprm->argv[i], len) != len) {
6861 static int open_self_maps(void *cpu_env, int fd)
6863 CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
6864 TaskState *ts = cpu->opaque;
6870 fp = fopen("/proc/self/maps", "r");
6875 while ((read = getline(&line, &len, fp)) != -1) {
6876 int fields, dev_maj, dev_min, inode;
6877 uint64_t min, max, offset;
6878 char flag_r, flag_w, flag_x, flag_p;
6879 char path[512] = "";
6880 fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d"
6881 " %512s", &min, &max, &flag_r, &flag_w, &flag_x,
6882 &flag_p, &offset, &dev_maj, &dev_min, &inode, path);
6884 if ((fields < 10) || (fields > 11)) {
6887 if (h2g_valid(min)) {
6888 int flags = page_get_flags(h2g(min));
6889 max = h2g_valid(max - 1) ? max : (uintptr_t)g2h(GUEST_ADDR_MAX) + 1;
6890 if (page_check_range(h2g(min), max - min, flags) == -1) {
6893 if (h2g(min) == ts->info->stack_limit) {
6894 pstrcpy(path, sizeof(path), " [stack]");
6896 dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
6897 " %c%c%c%c %08" PRIx64 " %02x:%02x %d %s%s\n",
6898 h2g(min), h2g(max - 1) + 1, flag_r, flag_w,
6899 flag_x, flag_p, offset, dev_maj, dev_min, inode,
6900 path[0] ? " " : "", path);
6910 static int open_self_stat(void *cpu_env, int fd)
6912 CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
6913 TaskState *ts = cpu->opaque;
6914 abi_ulong start_stack = ts->info->start_stack;
6917 for (i = 0; i < 44; i++) {
6925 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
6926 } else if (i == 1) {
6928 snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]);
6929 } else if (i == 27) {
6932 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
6934 /* for the rest, there is MasterCard */
6935 snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' ');
6939 if (write(fd, buf, len) != len) {
6947 static int open_self_auxv(void *cpu_env, int fd)
6949 CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
6950 TaskState *ts = cpu->opaque;
6951 abi_ulong auxv = ts->info->saved_auxv;
6952 abi_ulong len = ts->info->auxv_len;
6956 * Auxiliary vector is stored in target process stack.
6957 * read in whole auxv vector and copy it to file
6959 ptr = lock_user(VERIFY_READ, auxv, len, 0);
6963 r = write(fd, ptr, len);
6970 lseek(fd, 0, SEEK_SET);
6971 unlock_user(ptr, auxv, len);
6977 static int is_proc_myself(const char *filename, const char *entry)
6979 if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
6980 filename += strlen("/proc/");
6981 if (!strncmp(filename, "self/", strlen("self/"))) {
6982 filename += strlen("self/");
6983 } else if (*filename >= '1' && *filename <= '9') {
6985 snprintf(myself, sizeof(myself), "%d/", getpid());
6986 if (!strncmp(filename, myself, strlen(myself))) {
6987 filename += strlen(myself);
6994 if (!strcmp(filename, entry)) {
7001 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) || \
7002 defined(TARGET_SPARC) || defined(TARGET_M68K)
7003 static int is_proc(const char *filename, const char *entry)
7005 return strcmp(filename, entry) == 0;
7009 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7010 static int open_net_route(void *cpu_env, int fd)
7017 fp = fopen("/proc/net/route", "r");
7024 read = getline(&line, &len, fp);
7025 dprintf(fd, "%s", line);
7029 while ((read = getline(&line, &len, fp)) != -1) {
7031 uint32_t dest, gw, mask;
7032 unsigned int flags, refcnt, use, metric, mtu, window, irtt;
7035 fields = sscanf(line,
7036 "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7037 iface, &dest, &gw, &flags, &refcnt, &use, &metric,
7038 &mask, &mtu, &window, &irtt);
7042 dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7043 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
7044 metric, tswap32(mask), mtu, window, irtt);
7054 #if defined(TARGET_SPARC)
7055 static int open_cpuinfo(void *cpu_env, int fd)
7057 dprintf(fd, "type\t\t: sun4u\n");
7062 #if defined(TARGET_M68K)
7063 static int open_hardware(void *cpu_env, int fd)
7065 dprintf(fd, "Model:\t\tqemu-m68k\n");
7070 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
7073 const char *filename;
7074 int (*fill)(void *cpu_env, int fd);
7075 int (*cmp)(const char *s1, const char *s2);
7077 const struct fake_open *fake_open;
7078 static const struct fake_open fakes[] = {
7079 { "maps", open_self_maps, is_proc_myself },
7080 { "stat", open_self_stat, is_proc_myself },
7081 { "auxv", open_self_auxv, is_proc_myself },
7082 { "cmdline", open_self_cmdline, is_proc_myself },
7083 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7084 { "/proc/net/route", open_net_route, is_proc },
7086 #if defined(TARGET_SPARC)
7087 { "/proc/cpuinfo", open_cpuinfo, is_proc },
7089 #if defined(TARGET_M68K)
7090 { "/proc/hardware", open_hardware, is_proc },
7092 { NULL, NULL, NULL }
7095 if (is_proc_myself(pathname, "exe")) {
7096 int execfd = qemu_getauxval(AT_EXECFD);
7097 return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
7100 for (fake_open = fakes; fake_open->filename; fake_open++) {
7101 if (fake_open->cmp(pathname, fake_open->filename)) {
7106 if (fake_open->filename) {
7108 char filename[PATH_MAX];
7111 /* create temporary file to map stat to */
7112 tmpdir = getenv("TMPDIR");
7115 snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
7116 fd = mkstemp(filename);
7122 if ((r = fake_open->fill(cpu_env, fd))) {
7128 lseek(fd, 0, SEEK_SET);
7133 return safe_openat(dirfd, path(pathname), flags, mode);
7136 #define TIMER_MAGIC 0x0caf0000
7137 #define TIMER_MAGIC_MASK 0xffff0000
7139 /* Convert QEMU provided timer ID back to internal 16bit index format */
7140 static target_timer_t get_timer_id(abi_long arg)
7142 target_timer_t timerid = arg;
7144 if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
7145 return -TARGET_EINVAL;
7150 if (timerid >= ARRAY_SIZE(g_posix_timers)) {
7151 return -TARGET_EINVAL;
7157 static int target_to_host_cpu_mask(unsigned long *host_mask,
7159 abi_ulong target_addr,
7162 unsigned target_bits = sizeof(abi_ulong) * 8;
7163 unsigned host_bits = sizeof(*host_mask) * 8;
7164 abi_ulong *target_mask;
7167 assert(host_size >= target_size);
7169 target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
7171 return -TARGET_EFAULT;
7173 memset(host_mask, 0, host_size);
7175 for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
7176 unsigned bit = i * target_bits;
7179 __get_user(val, &target_mask[i]);
7180 for (j = 0; j < target_bits; j++, bit++) {
7181 if (val & (1UL << j)) {
7182 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
7187 unlock_user(target_mask, target_addr, 0);
7191 static int host_to_target_cpu_mask(const unsigned long *host_mask,
7193 abi_ulong target_addr,
7196 unsigned target_bits = sizeof(abi_ulong) * 8;
7197 unsigned host_bits = sizeof(*host_mask) * 8;
7198 abi_ulong *target_mask;
7201 assert(host_size >= target_size);
7203 target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
7205 return -TARGET_EFAULT;
7208 for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
7209 unsigned bit = i * target_bits;
7212 for (j = 0; j < target_bits; j++, bit++) {
7213 if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
7217 __put_user(val, &target_mask[i]);
7220 unlock_user(target_mask, target_addr, target_size);
7224 /* This is an internal helper for do_syscall so that it is easier
7225 * to have a single return point, so that actions, such as logging
7226 * of syscall results, can be performed.
7227 * All errnos that do_syscall() returns must be -TARGET_<errcode>.
7229 static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
7230 abi_long arg2, abi_long arg3, abi_long arg4,
7231 abi_long arg5, abi_long arg6, abi_long arg7,
7234 CPUState *cpu = env_cpu(cpu_env);
7236 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
7237 || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
7238 || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
7239 || defined(TARGET_NR_statx)
7242 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
7243 || defined(TARGET_NR_fstatfs)
7249 case TARGET_NR_exit:
7250 /* In old applications this may be used to implement _exit(2).
7251 However in threaded applictions it is used for thread termination,
7252 and _exit_group is used for application termination.
7253 Do thread termination if we have more then one thread. */
7255 if (block_signals()) {
7256 return -TARGET_ERESTARTSYS;
7261 if (CPU_NEXT(first_cpu)) {
7264 /* Remove the CPU from the list. */
7265 QTAILQ_REMOVE_RCU(&cpus, cpu, node);
7270 if (ts->child_tidptr) {
7271 put_user_u32(0, ts->child_tidptr);
7272 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
7276 object_unref(OBJECT(cpu));
7278 rcu_unregister_thread();
7283 preexit_cleanup(cpu_env, arg1);
7285 return 0; /* avoid warning */
7286 case TARGET_NR_read:
7287 if (arg2 == 0 && arg3 == 0) {
7288 return get_errno(safe_read(arg1, 0, 0));
7290 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
7291 return -TARGET_EFAULT;
7292 ret = get_errno(safe_read(arg1, p, arg3));
7294 fd_trans_host_to_target_data(arg1)) {
7295 ret = fd_trans_host_to_target_data(arg1)(p, ret);
7297 unlock_user(p, arg2, ret);
7300 case TARGET_NR_write:
7301 if (arg2 == 0 && arg3 == 0) {
7302 return get_errno(safe_write(arg1, 0, 0));
7304 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
7305 return -TARGET_EFAULT;
7306 if (fd_trans_target_to_host_data(arg1)) {
7307 void *copy = g_malloc(arg3);
7308 memcpy(copy, p, arg3);
7309 ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
7311 ret = get_errno(safe_write(arg1, copy, ret));
7315 ret = get_errno(safe_write(arg1, p, arg3));
7317 unlock_user(p, arg2, 0);
7320 #ifdef TARGET_NR_open
7321 case TARGET_NR_open:
7322 if (!(p = lock_user_string(arg1)))
7323 return -TARGET_EFAULT;
7324 ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
7325 target_to_host_bitmask(arg2, fcntl_flags_tbl),
7327 fd_trans_unregister(ret);
7328 unlock_user(p, arg1, 0);
7331 case TARGET_NR_openat:
7332 if (!(p = lock_user_string(arg2)))
7333 return -TARGET_EFAULT;
7334 ret = get_errno(do_openat(cpu_env, arg1, p,
7335 target_to_host_bitmask(arg3, fcntl_flags_tbl),
7337 fd_trans_unregister(ret);
7338 unlock_user(p, arg2, 0);
7340 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7341 case TARGET_NR_name_to_handle_at:
7342 ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
7345 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7346 case TARGET_NR_open_by_handle_at:
7347 ret = do_open_by_handle_at(arg1, arg2, arg3);
7348 fd_trans_unregister(ret);
7351 case TARGET_NR_close:
7352 fd_trans_unregister(arg1);
7353 return get_errno(close(arg1));
7356 return do_brk(arg1);
7357 #ifdef TARGET_NR_fork
7358 case TARGET_NR_fork:
7359 return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
7361 #ifdef TARGET_NR_waitpid
7362 case TARGET_NR_waitpid:
7365 ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
7366 if (!is_error(ret) && arg2 && ret
7367 && put_user_s32(host_to_target_waitstatus(status), arg2))
7368 return -TARGET_EFAULT;
7372 #ifdef TARGET_NR_waitid
7373 case TARGET_NR_waitid:
7377 ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
7378 if (!is_error(ret) && arg3 && info.si_pid != 0) {
7379 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
7380 return -TARGET_EFAULT;
7381 host_to_target_siginfo(p, &info);
7382 unlock_user(p, arg3, sizeof(target_siginfo_t));
7387 #ifdef TARGET_NR_creat /* not on alpha */
7388 case TARGET_NR_creat:
7389 if (!(p = lock_user_string(arg1)))
7390 return -TARGET_EFAULT;
7391 ret = get_errno(creat(p, arg2));
7392 fd_trans_unregister(ret);
7393 unlock_user(p, arg1, 0);
7396 #ifdef TARGET_NR_link
7397 case TARGET_NR_link:
7400 p = lock_user_string(arg1);
7401 p2 = lock_user_string(arg2);
7403 ret = -TARGET_EFAULT;
7405 ret = get_errno(link(p, p2));
7406 unlock_user(p2, arg2, 0);
7407 unlock_user(p, arg1, 0);
7411 #if defined(TARGET_NR_linkat)
7412 case TARGET_NR_linkat:
7416 return -TARGET_EFAULT;
7417 p = lock_user_string(arg2);
7418 p2 = lock_user_string(arg4);
7420 ret = -TARGET_EFAULT;
7422 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
7423 unlock_user(p, arg2, 0);
7424 unlock_user(p2, arg4, 0);
7428 #ifdef TARGET_NR_unlink
7429 case TARGET_NR_unlink:
7430 if (!(p = lock_user_string(arg1)))
7431 return -TARGET_EFAULT;
7432 ret = get_errno(unlink(p));
7433 unlock_user(p, arg1, 0);
7436 #if defined(TARGET_NR_unlinkat)
7437 case TARGET_NR_unlinkat:
7438 if (!(p = lock_user_string(arg2)))
7439 return -TARGET_EFAULT;
7440 ret = get_errno(unlinkat(arg1, p, arg3));
7441 unlock_user(p, arg2, 0);
7444 case TARGET_NR_execve:
7446 char **argp, **envp;
7449 abi_ulong guest_argp;
7450 abi_ulong guest_envp;
7457 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
7458 if (get_user_ual(addr, gp))
7459 return -TARGET_EFAULT;
7466 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
7467 if (get_user_ual(addr, gp))
7468 return -TARGET_EFAULT;
7474 argp = g_new0(char *, argc + 1);
7475 envp = g_new0(char *, envc + 1);
7477 for (gp = guest_argp, q = argp; gp;
7478 gp += sizeof(abi_ulong), q++) {
7479 if (get_user_ual(addr, gp))
7483 if (!(*q = lock_user_string(addr)))
7485 total_size += strlen(*q) + 1;
7489 for (gp = guest_envp, q = envp; gp;
7490 gp += sizeof(abi_ulong), q++) {
7491 if (get_user_ual(addr, gp))
7495 if (!(*q = lock_user_string(addr)))
7497 total_size += strlen(*q) + 1;
7501 if (!(p = lock_user_string(arg1)))
7503 /* Although execve() is not an interruptible syscall it is
7504 * a special case where we must use the safe_syscall wrapper:
7505 * if we allow a signal to happen before we make the host
7506 * syscall then we will 'lose' it, because at the point of
7507 * execve the process leaves QEMU's control. So we use the
7508 * safe syscall wrapper to ensure that we either take the
7509 * signal as a guest signal, or else it does not happen
7510 * before the execve completes and makes it the other
7511 * program's problem.
7513 ret = get_errno(safe_execve(p, argp, envp));
7514 unlock_user(p, arg1, 0);
7519 ret = -TARGET_EFAULT;
7522 for (gp = guest_argp, q = argp; *q;
7523 gp += sizeof(abi_ulong), q++) {
7524 if (get_user_ual(addr, gp)
7527 unlock_user(*q, addr, 0);
7529 for (gp = guest_envp, q = envp; *q;
7530 gp += sizeof(abi_ulong), q++) {
7531 if (get_user_ual(addr, gp)
7534 unlock_user(*q, addr, 0);
7541 case TARGET_NR_chdir:
7542 if (!(p = lock_user_string(arg1)))
7543 return -TARGET_EFAULT;
7544 ret = get_errno(chdir(p));
7545 unlock_user(p, arg1, 0);
7547 #ifdef TARGET_NR_time
7548 case TARGET_NR_time:
7551 ret = get_errno(time(&host_time));
7554 && put_user_sal(host_time, arg1))
7555 return -TARGET_EFAULT;
7559 #ifdef TARGET_NR_mknod
7560 case TARGET_NR_mknod:
7561 if (!(p = lock_user_string(arg1)))
7562 return -TARGET_EFAULT;
7563 ret = get_errno(mknod(p, arg2, arg3));
7564 unlock_user(p, arg1, 0);
7567 #if defined(TARGET_NR_mknodat)
7568 case TARGET_NR_mknodat:
7569 if (!(p = lock_user_string(arg2)))
7570 return -TARGET_EFAULT;
7571 ret = get_errno(mknodat(arg1, p, arg3, arg4));
7572 unlock_user(p, arg2, 0);
7575 #ifdef TARGET_NR_chmod
7576 case TARGET_NR_chmod:
7577 if (!(p = lock_user_string(arg1)))
7578 return -TARGET_EFAULT;
7579 ret = get_errno(chmod(p, arg2));
7580 unlock_user(p, arg1, 0);
7583 #ifdef TARGET_NR_lseek
7584 case TARGET_NR_lseek:
7585 return get_errno(lseek(arg1, arg2, arg3));
7587 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
7588 /* Alpha specific */
7589 case TARGET_NR_getxpid:
7590 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
7591 return get_errno(getpid());
7593 #ifdef TARGET_NR_getpid
7594 case TARGET_NR_getpid:
7595 return get_errno(getpid());
7597 case TARGET_NR_mount:
7599 /* need to look at the data field */
7603 p = lock_user_string(arg1);
7605 return -TARGET_EFAULT;
7611 p2 = lock_user_string(arg2);
7614 unlock_user(p, arg1, 0);
7616 return -TARGET_EFAULT;
7620 p3 = lock_user_string(arg3);
7623 unlock_user(p, arg1, 0);
7625 unlock_user(p2, arg2, 0);
7626 return -TARGET_EFAULT;
7632 /* FIXME - arg5 should be locked, but it isn't clear how to
7633 * do that since it's not guaranteed to be a NULL-terminated
7637 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
7639 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5));
7641 ret = get_errno(ret);
7644 unlock_user(p, arg1, 0);
7646 unlock_user(p2, arg2, 0);
7648 unlock_user(p3, arg3, 0);
7652 #ifdef TARGET_NR_umount
7653 case TARGET_NR_umount:
7654 if (!(p = lock_user_string(arg1)))
7655 return -TARGET_EFAULT;
7656 ret = get_errno(umount(p));
7657 unlock_user(p, arg1, 0);
7660 #ifdef TARGET_NR_stime /* not on alpha */
7661 case TARGET_NR_stime:
7664 if (get_user_sal(host_time, arg1))
7665 return -TARGET_EFAULT;
7666 return get_errno(stime(&host_time));
7669 #ifdef TARGET_NR_alarm /* not on alpha */
7670 case TARGET_NR_alarm:
7673 #ifdef TARGET_NR_pause /* not on alpha */
7674 case TARGET_NR_pause:
7675 if (!block_signals()) {
7676 sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
7678 return -TARGET_EINTR;
7680 #ifdef TARGET_NR_utime
7681 case TARGET_NR_utime:
7683 struct utimbuf tbuf, *host_tbuf;
7684 struct target_utimbuf *target_tbuf;
7686 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
7687 return -TARGET_EFAULT;
7688 tbuf.actime = tswapal(target_tbuf->actime);
7689 tbuf.modtime = tswapal(target_tbuf->modtime);
7690 unlock_user_struct(target_tbuf, arg2, 0);
7695 if (!(p = lock_user_string(arg1)))
7696 return -TARGET_EFAULT;
7697 ret = get_errno(utime(p, host_tbuf));
7698 unlock_user(p, arg1, 0);
7702 #ifdef TARGET_NR_utimes
7703 case TARGET_NR_utimes:
7705 struct timeval *tvp, tv[2];
7707 if (copy_from_user_timeval(&tv[0], arg2)
7708 || copy_from_user_timeval(&tv[1],
7709 arg2 + sizeof(struct target_timeval)))
7710 return -TARGET_EFAULT;
7715 if (!(p = lock_user_string(arg1)))
7716 return -TARGET_EFAULT;
7717 ret = get_errno(utimes(p, tvp));
7718 unlock_user(p, arg1, 0);
7722 #if defined(TARGET_NR_futimesat)
7723 case TARGET_NR_futimesat:
7725 struct timeval *tvp, tv[2];
7727 if (copy_from_user_timeval(&tv[0], arg3)
7728 || copy_from_user_timeval(&tv[1],
7729 arg3 + sizeof(struct target_timeval)))
7730 return -TARGET_EFAULT;
7735 if (!(p = lock_user_string(arg2))) {
7736 return -TARGET_EFAULT;
7738 ret = get_errno(futimesat(arg1, path(p), tvp));
7739 unlock_user(p, arg2, 0);
7743 #ifdef TARGET_NR_access
7744 case TARGET_NR_access:
7745 if (!(p = lock_user_string(arg1))) {
7746 return -TARGET_EFAULT;
7748 ret = get_errno(access(path(p), arg2));
7749 unlock_user(p, arg1, 0);
7752 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
7753 case TARGET_NR_faccessat:
7754 if (!(p = lock_user_string(arg2))) {
7755 return -TARGET_EFAULT;
7757 ret = get_errno(faccessat(arg1, p, arg3, 0));
7758 unlock_user(p, arg2, 0);
7761 #ifdef TARGET_NR_nice /* not on alpha */
7762 case TARGET_NR_nice:
7763 return get_errno(nice(arg1));
7765 case TARGET_NR_sync:
7768 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
7769 case TARGET_NR_syncfs:
7770 return get_errno(syncfs(arg1));
7772 case TARGET_NR_kill:
7773 return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
7774 #ifdef TARGET_NR_rename
7775 case TARGET_NR_rename:
7778 p = lock_user_string(arg1);
7779 p2 = lock_user_string(arg2);
7781 ret = -TARGET_EFAULT;
7783 ret = get_errno(rename(p, p2));
7784 unlock_user(p2, arg2, 0);
7785 unlock_user(p, arg1, 0);
7789 #if defined(TARGET_NR_renameat)
7790 case TARGET_NR_renameat:
7793 p = lock_user_string(arg2);
7794 p2 = lock_user_string(arg4);
7796 ret = -TARGET_EFAULT;
7798 ret = get_errno(renameat(arg1, p, arg3, p2));
7799 unlock_user(p2, arg4, 0);
7800 unlock_user(p, arg2, 0);
7804 #if defined(TARGET_NR_renameat2)
7805 case TARGET_NR_renameat2:
7808 p = lock_user_string(arg2);
7809 p2 = lock_user_string(arg4);
7811 ret = -TARGET_EFAULT;
7813 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
7815 unlock_user(p2, arg4, 0);
7816 unlock_user(p, arg2, 0);
7820 #ifdef TARGET_NR_mkdir
7821 case TARGET_NR_mkdir:
7822 if (!(p = lock_user_string(arg1)))
7823 return -TARGET_EFAULT;
7824 ret = get_errno(mkdir(p, arg2));
7825 unlock_user(p, arg1, 0);
7828 #if defined(TARGET_NR_mkdirat)
7829 case TARGET_NR_mkdirat:
7830 if (!(p = lock_user_string(arg2)))
7831 return -TARGET_EFAULT;
7832 ret = get_errno(mkdirat(arg1, p, arg3));
7833 unlock_user(p, arg2, 0);
7836 #ifdef TARGET_NR_rmdir
7837 case TARGET_NR_rmdir:
7838 if (!(p = lock_user_string(arg1)))
7839 return -TARGET_EFAULT;
7840 ret = get_errno(rmdir(p));
7841 unlock_user(p, arg1, 0);
7845 ret = get_errno(dup(arg1));
7847 fd_trans_dup(arg1, ret);
7850 #ifdef TARGET_NR_pipe
7851 case TARGET_NR_pipe:
7852 return do_pipe(cpu_env, arg1, 0, 0);
7854 #ifdef TARGET_NR_pipe2
7855 case TARGET_NR_pipe2:
7856 return do_pipe(cpu_env, arg1,
7857 target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
7859 case TARGET_NR_times:
7861 struct target_tms *tmsp;
7863 ret = get_errno(times(&tms));
7865 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
7867 return -TARGET_EFAULT;
7868 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
7869 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
7870 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
7871 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
7874 ret = host_to_target_clock_t(ret);
7877 case TARGET_NR_acct:
7879 ret = get_errno(acct(NULL));
7881 if (!(p = lock_user_string(arg1))) {
7882 return -TARGET_EFAULT;
7884 ret = get_errno(acct(path(p)));
7885 unlock_user(p, arg1, 0);
7888 #ifdef TARGET_NR_umount2
7889 case TARGET_NR_umount2:
7890 if (!(p = lock_user_string(arg1)))
7891 return -TARGET_EFAULT;
7892 ret = get_errno(umount2(p, arg2));
7893 unlock_user(p, arg1, 0);
7896 case TARGET_NR_ioctl:
7897 return do_ioctl(arg1, arg2, arg3);
7898 #ifdef TARGET_NR_fcntl
7899 case TARGET_NR_fcntl:
7900 return do_fcntl(arg1, arg2, arg3);
7902 case TARGET_NR_setpgid:
7903 return get_errno(setpgid(arg1, arg2));
7904 case TARGET_NR_umask:
7905 return get_errno(umask(arg1));
7906 case TARGET_NR_chroot:
7907 if (!(p = lock_user_string(arg1)))
7908 return -TARGET_EFAULT;
7909 ret = get_errno(chroot(p));
7910 unlock_user(p, arg1, 0);
7912 #ifdef TARGET_NR_dup2
7913 case TARGET_NR_dup2:
7914 ret = get_errno(dup2(arg1, arg2));
7916 fd_trans_dup(arg1, arg2);
7920 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
7921 case TARGET_NR_dup3:
7925 if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
7928 host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
7929 ret = get_errno(dup3(arg1, arg2, host_flags));
7931 fd_trans_dup(arg1, arg2);
7936 #ifdef TARGET_NR_getppid /* not on alpha */
7937 case TARGET_NR_getppid:
7938 return get_errno(getppid());
7940 #ifdef TARGET_NR_getpgrp
7941 case TARGET_NR_getpgrp:
7942 return get_errno(getpgrp());
7944 case TARGET_NR_setsid:
7945 return get_errno(setsid());
7946 #ifdef TARGET_NR_sigaction
7947 case TARGET_NR_sigaction:
7949 #if defined(TARGET_ALPHA)
7950 struct target_sigaction act, oact, *pact = 0;
7951 struct target_old_sigaction *old_act;
7953 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
7954 return -TARGET_EFAULT;
7955 act._sa_handler = old_act->_sa_handler;
7956 target_siginitset(&act.sa_mask, old_act->sa_mask);
7957 act.sa_flags = old_act->sa_flags;
7958 act.sa_restorer = 0;
7959 unlock_user_struct(old_act, arg2, 0);
7962 ret = get_errno(do_sigaction(arg1, pact, &oact));
7963 if (!is_error(ret) && arg3) {
7964 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
7965 return -TARGET_EFAULT;
7966 old_act->_sa_handler = oact._sa_handler;
7967 old_act->sa_mask = oact.sa_mask.sig[0];
7968 old_act->sa_flags = oact.sa_flags;
7969 unlock_user_struct(old_act, arg3, 1);
7971 #elif defined(TARGET_MIPS)
7972 struct target_sigaction act, oact, *pact, *old_act;
7975 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
7976 return -TARGET_EFAULT;
7977 act._sa_handler = old_act->_sa_handler;
7978 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
7979 act.sa_flags = old_act->sa_flags;
7980 unlock_user_struct(old_act, arg2, 0);
7986 ret = get_errno(do_sigaction(arg1, pact, &oact));
7988 if (!is_error(ret) && arg3) {
7989 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
7990 return -TARGET_EFAULT;
7991 old_act->_sa_handler = oact._sa_handler;
7992 old_act->sa_flags = oact.sa_flags;
7993 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
7994 old_act->sa_mask.sig[1] = 0;
7995 old_act->sa_mask.sig[2] = 0;
7996 old_act->sa_mask.sig[3] = 0;
7997 unlock_user_struct(old_act, arg3, 1);
8000 struct target_old_sigaction *old_act;
8001 struct target_sigaction act, oact, *pact;
8003 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8004 return -TARGET_EFAULT;
8005 act._sa_handler = old_act->_sa_handler;
8006 target_siginitset(&act.sa_mask, old_act->sa_mask);
8007 act.sa_flags = old_act->sa_flags;
8008 act.sa_restorer = old_act->sa_restorer;
8009 #ifdef TARGET_ARCH_HAS_KA_RESTORER
8010 act.ka_restorer = 0;
8012 unlock_user_struct(old_act, arg2, 0);
8017 ret = get_errno(do_sigaction(arg1, pact, &oact));
8018 if (!is_error(ret) && arg3) {
8019 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8020 return -TARGET_EFAULT;
8021 old_act->_sa_handler = oact._sa_handler;
8022 old_act->sa_mask = oact.sa_mask.sig[0];
8023 old_act->sa_flags = oact.sa_flags;
8024 old_act->sa_restorer = oact.sa_restorer;
8025 unlock_user_struct(old_act, arg3, 1);
8031 case TARGET_NR_rt_sigaction:
8033 #if defined(TARGET_ALPHA)
8034 /* For Alpha and SPARC this is a 5 argument syscall, with
8035 * a 'restorer' parameter which must be copied into the
8036 * sa_restorer field of the sigaction struct.
8037 * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
8038 * and arg5 is the sigsetsize.
8039 * Alpha also has a separate rt_sigaction struct that it uses
8040 * here; SPARC uses the usual sigaction struct.
8042 struct target_rt_sigaction *rt_act;
8043 struct target_sigaction act, oact, *pact = 0;
8045 if (arg4 != sizeof(target_sigset_t)) {
8046 return -TARGET_EINVAL;
8049 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
8050 return -TARGET_EFAULT;
8051 act._sa_handler = rt_act->_sa_handler;
8052 act.sa_mask = rt_act->sa_mask;
8053 act.sa_flags = rt_act->sa_flags;
8054 act.sa_restorer = arg5;
8055 unlock_user_struct(rt_act, arg2, 0);
8058 ret = get_errno(do_sigaction(arg1, pact, &oact));
8059 if (!is_error(ret) && arg3) {
8060 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
8061 return -TARGET_EFAULT;
8062 rt_act->_sa_handler = oact._sa_handler;
8063 rt_act->sa_mask = oact.sa_mask;
8064 rt_act->sa_flags = oact.sa_flags;
8065 unlock_user_struct(rt_act, arg3, 1);
8069 target_ulong restorer = arg4;
8070 target_ulong sigsetsize = arg5;
8072 target_ulong sigsetsize = arg4;
8074 struct target_sigaction *act;
8075 struct target_sigaction *oact;
8077 if (sigsetsize != sizeof(target_sigset_t)) {
8078 return -TARGET_EINVAL;
8081 if (!lock_user_struct(VERIFY_READ, act, arg2, 1)) {
8082 return -TARGET_EFAULT;
8084 #ifdef TARGET_ARCH_HAS_KA_RESTORER
8085 act->ka_restorer = restorer;
8091 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
8092 ret = -TARGET_EFAULT;
8093 goto rt_sigaction_fail;
8097 ret = get_errno(do_sigaction(arg1, act, oact));
8100 unlock_user_struct(act, arg2, 0);
8102 unlock_user_struct(oact, arg3, 1);
8106 #ifdef TARGET_NR_sgetmask /* not on alpha */
8107 case TARGET_NR_sgetmask:
8110 abi_ulong target_set;
8111 ret = do_sigprocmask(0, NULL, &cur_set);
8113 host_to_target_old_sigset(&target_set, &cur_set);
8119 #ifdef TARGET_NR_ssetmask /* not on alpha */
8120 case TARGET_NR_ssetmask:
8123 abi_ulong target_set = arg1;
8124 target_to_host_old_sigset(&set, &target_set);
8125 ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
8127 host_to_target_old_sigset(&target_set, &oset);
8133 #ifdef TARGET_NR_sigprocmask
8134 case TARGET_NR_sigprocmask:
8136 #if defined(TARGET_ALPHA)
8137 sigset_t set, oldset;
8142 case TARGET_SIG_BLOCK:
8145 case TARGET_SIG_UNBLOCK:
8148 case TARGET_SIG_SETMASK:
8152 return -TARGET_EINVAL;
8155 target_to_host_old_sigset(&set, &mask);
8157 ret = do_sigprocmask(how, &set, &oldset);
8158 if (!is_error(ret)) {
8159 host_to_target_old_sigset(&mask, &oldset);
8161 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
8164 sigset_t set, oldset, *set_ptr;
8169 case TARGET_SIG_BLOCK:
8172 case TARGET_SIG_UNBLOCK:
8175 case TARGET_SIG_SETMASK:
8179 return -TARGET_EINVAL;
8181 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
8182 return -TARGET_EFAULT;
8183 target_to_host_old_sigset(&set, p);
8184 unlock_user(p, arg2, 0);
8190 ret = do_sigprocmask(how, set_ptr, &oldset);
8191 if (!is_error(ret) && arg3) {
8192 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
8193 return -TARGET_EFAULT;
8194 host_to_target_old_sigset(p, &oldset);
8195 unlock_user(p, arg3, sizeof(target_sigset_t));
8201 case TARGET_NR_rt_sigprocmask:
8204 sigset_t set, oldset, *set_ptr;
8206 if (arg4 != sizeof(target_sigset_t)) {
8207 return -TARGET_EINVAL;
8212 case TARGET_SIG_BLOCK:
8215 case TARGET_SIG_UNBLOCK:
8218 case TARGET_SIG_SETMASK:
8222 return -TARGET_EINVAL;
8224 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
8225 return -TARGET_EFAULT;
8226 target_to_host_sigset(&set, p);
8227 unlock_user(p, arg2, 0);
8233 ret = do_sigprocmask(how, set_ptr, &oldset);
8234 if (!is_error(ret) && arg3) {
8235 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
8236 return -TARGET_EFAULT;
8237 host_to_target_sigset(p, &oldset);
8238 unlock_user(p, arg3, sizeof(target_sigset_t));
8242 #ifdef TARGET_NR_sigpending
8243 case TARGET_NR_sigpending:
8246 ret = get_errno(sigpending(&set));
8247 if (!is_error(ret)) {
8248 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8249 return -TARGET_EFAULT;
8250 host_to_target_old_sigset(p, &set);
8251 unlock_user(p, arg1, sizeof(target_sigset_t));
8256 case TARGET_NR_rt_sigpending:
8260 /* Yes, this check is >, not != like most. We follow the kernel's
8261 * logic and it does it like this because it implements
8262 * NR_sigpending through the same code path, and in that case
8263 * the old_sigset_t is smaller in size.
8265 if (arg2 > sizeof(target_sigset_t)) {
8266 return -TARGET_EINVAL;
8269 ret = get_errno(sigpending(&set));
8270 if (!is_error(ret)) {
8271 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8272 return -TARGET_EFAULT;
8273 host_to_target_sigset(p, &set);
8274 unlock_user(p, arg1, sizeof(target_sigset_t));
8278 #ifdef TARGET_NR_sigsuspend
8279 case TARGET_NR_sigsuspend:
8281 TaskState *ts = cpu->opaque;
8282 #if defined(TARGET_ALPHA)
8283 abi_ulong mask = arg1;
8284 target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
8286 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8287 return -TARGET_EFAULT;
8288 target_to_host_old_sigset(&ts->sigsuspend_mask, p);
8289 unlock_user(p, arg1, 0);
8291 ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8293 if (ret != -TARGET_ERESTARTSYS) {
8294 ts->in_sigsuspend = 1;
8299 case TARGET_NR_rt_sigsuspend:
8301 TaskState *ts = cpu->opaque;
8303 if (arg2 != sizeof(target_sigset_t)) {
8304 return -TARGET_EINVAL;
8306 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8307 return -TARGET_EFAULT;
8308 target_to_host_sigset(&ts->sigsuspend_mask, p);
8309 unlock_user(p, arg1, 0);
8310 ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8312 if (ret != -TARGET_ERESTARTSYS) {
8313 ts->in_sigsuspend = 1;
8317 case TARGET_NR_rt_sigtimedwait:
8320 struct timespec uts, *puts;
8323 if (arg4 != sizeof(target_sigset_t)) {
8324 return -TARGET_EINVAL;
8327 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8328 return -TARGET_EFAULT;
8329 target_to_host_sigset(&set, p);
8330 unlock_user(p, arg1, 0);
8333 target_to_host_timespec(puts, arg3);
8337 ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
8339 if (!is_error(ret)) {
8341 p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
8344 return -TARGET_EFAULT;
8346 host_to_target_siginfo(p, &uinfo);
8347 unlock_user(p, arg2, sizeof(target_siginfo_t));
8349 ret = host_to_target_signal(ret);
8353 case TARGET_NR_rt_sigqueueinfo:
8357 p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
8359 return -TARGET_EFAULT;
8361 target_to_host_siginfo(&uinfo, p);
8362 unlock_user(p, arg3, 0);
8363 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
8366 case TARGET_NR_rt_tgsigqueueinfo:
8370 p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
8372 return -TARGET_EFAULT;
8374 target_to_host_siginfo(&uinfo, p);
8375 unlock_user(p, arg4, 0);
8376 ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, arg3, &uinfo));
8379 #ifdef TARGET_NR_sigreturn
8380 case TARGET_NR_sigreturn:
8381 if (block_signals()) {
8382 return -TARGET_ERESTARTSYS;
8384 return do_sigreturn(cpu_env);
8386 case TARGET_NR_rt_sigreturn:
8387 if (block_signals()) {
8388 return -TARGET_ERESTARTSYS;
8390 return do_rt_sigreturn(cpu_env);
8391 case TARGET_NR_sethostname:
8392 if (!(p = lock_user_string(arg1)))
8393 return -TARGET_EFAULT;
8394 ret = get_errno(sethostname(p, arg2));
8395 unlock_user(p, arg1, 0);
8397 #ifdef TARGET_NR_setrlimit
8398 case TARGET_NR_setrlimit:
8400 int resource = target_to_host_resource(arg1);
8401 struct target_rlimit *target_rlim;
8403 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
8404 return -TARGET_EFAULT;
8405 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
8406 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
8407 unlock_user_struct(target_rlim, arg2, 0);
8409 * If we just passed through resource limit settings for memory then
8410 * they would also apply to QEMU's own allocations, and QEMU will
8411 * crash or hang or die if its allocations fail. Ideally we would
8412 * track the guest allocations in QEMU and apply the limits ourselves.
8413 * For now, just tell the guest the call succeeded but don't actually
8416 if (resource != RLIMIT_AS &&
8417 resource != RLIMIT_DATA &&
8418 resource != RLIMIT_STACK) {
8419 return get_errno(setrlimit(resource, &rlim));
8425 #ifdef TARGET_NR_getrlimit
8426 case TARGET_NR_getrlimit:
8428 int resource = target_to_host_resource(arg1);
8429 struct target_rlimit *target_rlim;
8432 ret = get_errno(getrlimit(resource, &rlim));
8433 if (!is_error(ret)) {
8434 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
8435 return -TARGET_EFAULT;
8436 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
8437 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
8438 unlock_user_struct(target_rlim, arg2, 1);
8443 case TARGET_NR_getrusage:
8445 struct rusage rusage;
8446 ret = get_errno(getrusage(arg1, &rusage));
8447 if (!is_error(ret)) {
8448 ret = host_to_target_rusage(arg2, &rusage);
8452 case TARGET_NR_gettimeofday:
8455 ret = get_errno(gettimeofday(&tv, NULL));
8456 if (!is_error(ret)) {
8457 if (copy_to_user_timeval(arg1, &tv))
8458 return -TARGET_EFAULT;
8462 case TARGET_NR_settimeofday:
8464 struct timeval tv, *ptv = NULL;
8465 struct timezone tz, *ptz = NULL;
8468 if (copy_from_user_timeval(&tv, arg1)) {
8469 return -TARGET_EFAULT;
8475 if (copy_from_user_timezone(&tz, arg2)) {
8476 return -TARGET_EFAULT;
8481 return get_errno(settimeofday(ptv, ptz));
8483 #if defined(TARGET_NR_select)
8484 case TARGET_NR_select:
8485 #if defined(TARGET_WANT_NI_OLD_SELECT)
8486 /* some architectures used to have old_select here
8487 * but now ENOSYS it.
8489 ret = -TARGET_ENOSYS;
8490 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
8491 ret = do_old_select(arg1);
8493 ret = do_select(arg1, arg2, arg3, arg4, arg5);
8497 #ifdef TARGET_NR_pselect6
8498 case TARGET_NR_pselect6:
8500 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
8501 fd_set rfds, wfds, efds;
8502 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
8503 struct timespec ts, *ts_ptr;
8506 * The 6th arg is actually two args smashed together,
8507 * so we cannot use the C library.
8515 abi_ulong arg_sigset, arg_sigsize, *arg7;
8516 target_sigset_t *target_sigset;
8524 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
8528 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
8532 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
8538 * This takes a timespec, and not a timeval, so we cannot
8539 * use the do_select() helper ...
8542 if (target_to_host_timespec(&ts, ts_addr)) {
8543 return -TARGET_EFAULT;
8550 /* Extract the two packed args for the sigset */
8553 sig.size = SIGSET_T_SIZE;
8555 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
8557 return -TARGET_EFAULT;
8559 arg_sigset = tswapal(arg7[0]);
8560 arg_sigsize = tswapal(arg7[1]);
8561 unlock_user(arg7, arg6, 0);
8565 if (arg_sigsize != sizeof(*target_sigset)) {
8566 /* Like the kernel, we enforce correct size sigsets */
8567 return -TARGET_EINVAL;
8569 target_sigset = lock_user(VERIFY_READ, arg_sigset,
8570 sizeof(*target_sigset), 1);
8571 if (!target_sigset) {
8572 return -TARGET_EFAULT;
8574 target_to_host_sigset(&set, target_sigset);
8575 unlock_user(target_sigset, arg_sigset, 0);
8583 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
8586 if (!is_error(ret)) {
8587 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
8588 return -TARGET_EFAULT;
8589 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
8590 return -TARGET_EFAULT;
8591 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
8592 return -TARGET_EFAULT;
8594 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
8595 return -TARGET_EFAULT;
8600 #ifdef TARGET_NR_symlink
8601 case TARGET_NR_symlink:
8604 p = lock_user_string(arg1);
8605 p2 = lock_user_string(arg2);
8607 ret = -TARGET_EFAULT;
8609 ret = get_errno(symlink(p, p2));
8610 unlock_user(p2, arg2, 0);
8611 unlock_user(p, arg1, 0);
8615 #if defined(TARGET_NR_symlinkat)
8616 case TARGET_NR_symlinkat:
8619 p = lock_user_string(arg1);
8620 p2 = lock_user_string(arg3);
8622 ret = -TARGET_EFAULT;
8624 ret = get_errno(symlinkat(p, arg2, p2));
8625 unlock_user(p2, arg3, 0);
8626 unlock_user(p, arg1, 0);
8630 #ifdef TARGET_NR_readlink
8631 case TARGET_NR_readlink:
8634 p = lock_user_string(arg1);
8635 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8637 ret = -TARGET_EFAULT;
8639 /* Short circuit this for the magic exe check. */
8640 ret = -TARGET_EINVAL;
8641 } else if (is_proc_myself((const char *)p, "exe")) {
8642 char real[PATH_MAX], *temp;
8643 temp = realpath(exec_path, real);
8644 /* Return value is # of bytes that we wrote to the buffer. */
8646 ret = get_errno(-1);
8648 /* Don't worry about sign mismatch as earlier mapping
8649 * logic would have thrown a bad address error. */
8650 ret = MIN(strlen(real), arg3);
8651 /* We cannot NUL terminate the string. */
8652 memcpy(p2, real, ret);
8655 ret = get_errno(readlink(path(p), p2, arg3));
8657 unlock_user(p2, arg2, ret);
8658 unlock_user(p, arg1, 0);
8662 #if defined(TARGET_NR_readlinkat)
8663 case TARGET_NR_readlinkat:
8666 p = lock_user_string(arg2);
8667 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
8669 ret = -TARGET_EFAULT;
8670 } else if (is_proc_myself((const char *)p, "exe")) {
8671 char real[PATH_MAX], *temp;
8672 temp = realpath(exec_path, real);
8673 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
8674 snprintf((char *)p2, arg4, "%s", real);
8676 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
8678 unlock_user(p2, arg3, ret);
8679 unlock_user(p, arg2, 0);
8683 #ifdef TARGET_NR_swapon
8684 case TARGET_NR_swapon:
8685 if (!(p = lock_user_string(arg1)))
8686 return -TARGET_EFAULT;
8687 ret = get_errno(swapon(p, arg2));
8688 unlock_user(p, arg1, 0);
8691 case TARGET_NR_reboot:
8692 if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
8693 /* arg4 must be ignored in all other cases */
8694 p = lock_user_string(arg4);
8696 return -TARGET_EFAULT;
8698 ret = get_errno(reboot(arg1, arg2, arg3, p));
8699 unlock_user(p, arg4, 0);
8701 ret = get_errno(reboot(arg1, arg2, arg3, NULL));
8704 #ifdef TARGET_NR_mmap
8705 case TARGET_NR_mmap:
8706 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
8707 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
8708 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
8709 || defined(TARGET_S390X)
8712 abi_ulong v1, v2, v3, v4, v5, v6;
8713 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
8714 return -TARGET_EFAULT;
8721 unlock_user(v, arg1, 0);
8722 ret = get_errno(target_mmap(v1, v2, v3,
8723 target_to_host_bitmask(v4, mmap_flags_tbl),
8727 ret = get_errno(target_mmap(arg1, arg2, arg3,
8728 target_to_host_bitmask(arg4, mmap_flags_tbl),
8734 #ifdef TARGET_NR_mmap2
8735 case TARGET_NR_mmap2:
8737 #define MMAP_SHIFT 12
8739 ret = target_mmap(arg1, arg2, arg3,
8740 target_to_host_bitmask(arg4, mmap_flags_tbl),
8741 arg5, arg6 << MMAP_SHIFT);
8742 return get_errno(ret);
8744 case TARGET_NR_munmap:
8745 return get_errno(target_munmap(arg1, arg2));
8746 case TARGET_NR_mprotect:
8748 TaskState *ts = cpu->opaque;
8749 /* Special hack to detect libc making the stack executable. */
8750 if ((arg3 & PROT_GROWSDOWN)
8751 && arg1 >= ts->info->stack_limit
8752 && arg1 <= ts->info->start_stack) {
8753 arg3 &= ~PROT_GROWSDOWN;
8754 arg2 = arg2 + arg1 - ts->info->stack_limit;
8755 arg1 = ts->info->stack_limit;
8758 return get_errno(target_mprotect(arg1, arg2, arg3));
8759 #ifdef TARGET_NR_mremap
8760 case TARGET_NR_mremap:
8761 return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
8763 /* ??? msync/mlock/munlock are broken for softmmu. */
8764 #ifdef TARGET_NR_msync
8765 case TARGET_NR_msync:
8766 return get_errno(msync(g2h(arg1), arg2, arg3));
8768 #ifdef TARGET_NR_mlock
8769 case TARGET_NR_mlock:
8770 return get_errno(mlock(g2h(arg1), arg2));
8772 #ifdef TARGET_NR_munlock
8773 case TARGET_NR_munlock:
8774 return get_errno(munlock(g2h(arg1), arg2));
8776 #ifdef TARGET_NR_mlockall
8777 case TARGET_NR_mlockall:
8778 return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
8780 #ifdef TARGET_NR_munlockall
8781 case TARGET_NR_munlockall:
8782 return get_errno(munlockall());
8784 #ifdef TARGET_NR_truncate
8785 case TARGET_NR_truncate:
8786 if (!(p = lock_user_string(arg1)))
8787 return -TARGET_EFAULT;
8788 ret = get_errno(truncate(p, arg2));
8789 unlock_user(p, arg1, 0);
8792 #ifdef TARGET_NR_ftruncate
8793 case TARGET_NR_ftruncate:
8794 return get_errno(ftruncate(arg1, arg2));
8796 case TARGET_NR_fchmod:
8797 return get_errno(fchmod(arg1, arg2));
8798 #if defined(TARGET_NR_fchmodat)
8799 case TARGET_NR_fchmodat:
8800 if (!(p = lock_user_string(arg2)))
8801 return -TARGET_EFAULT;
8802 ret = get_errno(fchmodat(arg1, p, arg3, 0));
8803 unlock_user(p, arg2, 0);
8806 case TARGET_NR_getpriority:
8807 /* Note that negative values are valid for getpriority, so we must
8808 differentiate based on errno settings. */
8810 ret = getpriority(arg1, arg2);
8811 if (ret == -1 && errno != 0) {
8812 return -host_to_target_errno(errno);
8815 /* Return value is the unbiased priority. Signal no error. */
8816 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
8818 /* Return value is a biased priority to avoid negative numbers. */
8822 case TARGET_NR_setpriority:
8823 return get_errno(setpriority(arg1, arg2, arg3));
8824 #ifdef TARGET_NR_statfs
8825 case TARGET_NR_statfs:
8826 if (!(p = lock_user_string(arg1))) {
8827 return -TARGET_EFAULT;
8829 ret = get_errno(statfs(path(p), &stfs));
8830 unlock_user(p, arg1, 0);
8832 if (!is_error(ret)) {
8833 struct target_statfs *target_stfs;
8835 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
8836 return -TARGET_EFAULT;
8837 __put_user(stfs.f_type, &target_stfs->f_type);
8838 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
8839 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
8840 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
8841 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
8842 __put_user(stfs.f_files, &target_stfs->f_files);
8843 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
8844 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
8845 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
8846 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
8847 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
8848 #ifdef _STATFS_F_FLAGS
8849 __put_user(stfs.f_flags, &target_stfs->f_flags);
8851 __put_user(0, &target_stfs->f_flags);
8853 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
8854 unlock_user_struct(target_stfs, arg2, 1);
8858 #ifdef TARGET_NR_fstatfs
8859 case TARGET_NR_fstatfs:
8860 ret = get_errno(fstatfs(arg1, &stfs));
8861 goto convert_statfs;
8863 #ifdef TARGET_NR_statfs64
8864 case TARGET_NR_statfs64:
8865 if (!(p = lock_user_string(arg1))) {
8866 return -TARGET_EFAULT;
8868 ret = get_errno(statfs(path(p), &stfs));
8869 unlock_user(p, arg1, 0);
8871 if (!is_error(ret)) {
8872 struct target_statfs64 *target_stfs;
8874 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
8875 return -TARGET_EFAULT;
8876 __put_user(stfs.f_type, &target_stfs->f_type);
8877 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
8878 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
8879 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
8880 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
8881 __put_user(stfs.f_files, &target_stfs->f_files);
8882 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
8883 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
8884 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
8885 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
8886 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
8887 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
8888 unlock_user_struct(target_stfs, arg3, 1);
8891 case TARGET_NR_fstatfs64:
8892 ret = get_errno(fstatfs(arg1, &stfs));
8893 goto convert_statfs64;
8895 #ifdef TARGET_NR_socketcall
8896 case TARGET_NR_socketcall:
8897 return do_socketcall(arg1, arg2);
8899 #ifdef TARGET_NR_accept
8900 case TARGET_NR_accept:
8901 return do_accept4(arg1, arg2, arg3, 0);
8903 #ifdef TARGET_NR_accept4
8904 case TARGET_NR_accept4:
8905 return do_accept4(arg1, arg2, arg3, arg4);
8907 #ifdef TARGET_NR_bind
8908 case TARGET_NR_bind:
8909 return do_bind(arg1, arg2, arg3);
8911 #ifdef TARGET_NR_connect
8912 case TARGET_NR_connect:
8913 return do_connect(arg1, arg2, arg3);
8915 #ifdef TARGET_NR_getpeername
8916 case TARGET_NR_getpeername:
8917 return do_getpeername(arg1, arg2, arg3);
8919 #ifdef TARGET_NR_getsockname
8920 case TARGET_NR_getsockname:
8921 return do_getsockname(arg1, arg2, arg3);
8923 #ifdef TARGET_NR_getsockopt
8924 case TARGET_NR_getsockopt:
8925 return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
8927 #ifdef TARGET_NR_listen
8928 case TARGET_NR_listen:
8929 return get_errno(listen(arg1, arg2));
8931 #ifdef TARGET_NR_recv
8932 case TARGET_NR_recv:
8933 return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
8935 #ifdef TARGET_NR_recvfrom
8936 case TARGET_NR_recvfrom:
8937 return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
8939 #ifdef TARGET_NR_recvmsg
8940 case TARGET_NR_recvmsg:
8941 return do_sendrecvmsg(arg1, arg2, arg3, 0);
8943 #ifdef TARGET_NR_send
8944 case TARGET_NR_send:
8945 return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
8947 #ifdef TARGET_NR_sendmsg
8948 case TARGET_NR_sendmsg:
8949 return do_sendrecvmsg(arg1, arg2, arg3, 1);
8951 #ifdef TARGET_NR_sendmmsg
8952 case TARGET_NR_sendmmsg:
8953 return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
8954 case TARGET_NR_recvmmsg:
8955 return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
8957 #ifdef TARGET_NR_sendto
8958 case TARGET_NR_sendto:
8959 return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
8961 #ifdef TARGET_NR_shutdown
8962 case TARGET_NR_shutdown:
8963 return get_errno(shutdown(arg1, arg2));
8965 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
8966 case TARGET_NR_getrandom:
8967 p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
8969 return -TARGET_EFAULT;
8971 ret = get_errno(getrandom(p, arg2, arg3));
8972 unlock_user(p, arg1, ret);
8975 #ifdef TARGET_NR_socket
8976 case TARGET_NR_socket:
8977 return do_socket(arg1, arg2, arg3);
8979 #ifdef TARGET_NR_socketpair
8980 case TARGET_NR_socketpair:
8981 return do_socketpair(arg1, arg2, arg3, arg4);
8983 #ifdef TARGET_NR_setsockopt
8984 case TARGET_NR_setsockopt:
8985 return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
8987 #if defined(TARGET_NR_syslog)
8988 case TARGET_NR_syslog:
8993 case TARGET_SYSLOG_ACTION_CLOSE: /* Close log */
8994 case TARGET_SYSLOG_ACTION_OPEN: /* Open log */
8995 case TARGET_SYSLOG_ACTION_CLEAR: /* Clear ring buffer */
8996 case TARGET_SYSLOG_ACTION_CONSOLE_OFF: /* Disable logging */
8997 case TARGET_SYSLOG_ACTION_CONSOLE_ON: /* Enable logging */
8998 case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
8999 case TARGET_SYSLOG_ACTION_SIZE_UNREAD: /* Number of chars */
9000 case TARGET_SYSLOG_ACTION_SIZE_BUFFER: /* Size of the buffer */
9001 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
9002 case TARGET_SYSLOG_ACTION_READ: /* Read from log */
9003 case TARGET_SYSLOG_ACTION_READ_CLEAR: /* Read/clear msgs */
9004 case TARGET_SYSLOG_ACTION_READ_ALL: /* Read last messages */
9007 return -TARGET_EINVAL;
9012 p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9014 return -TARGET_EFAULT;
9016 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
9017 unlock_user(p, arg2, arg3);
9021 return -TARGET_EINVAL;
9026 case TARGET_NR_setitimer:
9028 struct itimerval value, ovalue, *pvalue;
9032 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
9033 || copy_from_user_timeval(&pvalue->it_value,
9034 arg2 + sizeof(struct target_timeval)))
9035 return -TARGET_EFAULT;
9039 ret = get_errno(setitimer(arg1, pvalue, &ovalue));
9040 if (!is_error(ret) && arg3) {
9041 if (copy_to_user_timeval(arg3,
9042 &ovalue.it_interval)
9043 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
9045 return -TARGET_EFAULT;
9049 case TARGET_NR_getitimer:
9051 struct itimerval value;
9053 ret = get_errno(getitimer(arg1, &value));
9054 if (!is_error(ret) && arg2) {
9055 if (copy_to_user_timeval(arg2,
9057 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
9059 return -TARGET_EFAULT;
9063 #ifdef TARGET_NR_stat
9064 case TARGET_NR_stat:
9065 if (!(p = lock_user_string(arg1))) {
9066 return -TARGET_EFAULT;
9068 ret = get_errno(stat(path(p), &st));
9069 unlock_user(p, arg1, 0);
9072 #ifdef TARGET_NR_lstat
9073 case TARGET_NR_lstat:
9074 if (!(p = lock_user_string(arg1))) {
9075 return -TARGET_EFAULT;
9077 ret = get_errno(lstat(path(p), &st));
9078 unlock_user(p, arg1, 0);
9081 #ifdef TARGET_NR_fstat
9082 case TARGET_NR_fstat:
9084 ret = get_errno(fstat(arg1, &st));
9085 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
9088 if (!is_error(ret)) {
9089 struct target_stat *target_st;
9091 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
9092 return -TARGET_EFAULT;
9093 memset(target_st, 0, sizeof(*target_st));
9094 __put_user(st.st_dev, &target_st->st_dev);
9095 __put_user(st.st_ino, &target_st->st_ino);
9096 __put_user(st.st_mode, &target_st->st_mode);
9097 __put_user(st.st_uid, &target_st->st_uid);
9098 __put_user(st.st_gid, &target_st->st_gid);
9099 __put_user(st.st_nlink, &target_st->st_nlink);
9100 __put_user(st.st_rdev, &target_st->st_rdev);
9101 __put_user(st.st_size, &target_st->st_size);
9102 __put_user(st.st_blksize, &target_st->st_blksize);
9103 __put_user(st.st_blocks, &target_st->st_blocks);
9104 __put_user(st.st_atime, &target_st->target_st_atime);
9105 __put_user(st.st_mtime, &target_st->target_st_mtime);
9106 __put_user(st.st_ctime, &target_st->target_st_ctime);
9107 #if (_POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700) && \
9108 defined(TARGET_STAT_HAVE_NSEC)
9109 __put_user(st.st_atim.tv_nsec,
9110 &target_st->target_st_atime_nsec);
9111 __put_user(st.st_mtim.tv_nsec,
9112 &target_st->target_st_mtime_nsec);
9113 __put_user(st.st_ctim.tv_nsec,
9114 &target_st->target_st_ctime_nsec);
9116 unlock_user_struct(target_st, arg2, 1);
9121 case TARGET_NR_vhangup:
9122 return get_errno(vhangup());
9123 #ifdef TARGET_NR_syscall
9124 case TARGET_NR_syscall:
9125 return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
9126 arg6, arg7, arg8, 0);
9128 case TARGET_NR_wait4:
9131 abi_long status_ptr = arg2;
9132 struct rusage rusage, *rusage_ptr;
9133 abi_ulong target_rusage = arg4;
9134 abi_long rusage_err;
9136 rusage_ptr = &rusage;
9139 ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
9140 if (!is_error(ret)) {
9141 if (status_ptr && ret) {
9142 status = host_to_target_waitstatus(status);
9143 if (put_user_s32(status, status_ptr))
9144 return -TARGET_EFAULT;
9146 if (target_rusage) {
9147 rusage_err = host_to_target_rusage(target_rusage, &rusage);
9155 #ifdef TARGET_NR_swapoff
9156 case TARGET_NR_swapoff:
9157 if (!(p = lock_user_string(arg1)))
9158 return -TARGET_EFAULT;
9159 ret = get_errno(swapoff(p));
9160 unlock_user(p, arg1, 0);
9163 case TARGET_NR_sysinfo:
9165 struct target_sysinfo *target_value;
9166 struct sysinfo value;
9167 ret = get_errno(sysinfo(&value));
9168 if (!is_error(ret) && arg1)
9170 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
9171 return -TARGET_EFAULT;
9172 __put_user(value.uptime, &target_value->uptime);
9173 __put_user(value.loads[0], &target_value->loads[0]);
9174 __put_user(value.loads[1], &target_value->loads[1]);
9175 __put_user(value.loads[2], &target_value->loads[2]);
9176 __put_user(value.totalram, &target_value->totalram);
9177 __put_user(value.freeram, &target_value->freeram);
9178 __put_user(value.sharedram, &target_value->sharedram);
9179 __put_user(value.bufferram, &target_value->bufferram);
9180 __put_user(value.totalswap, &target_value->totalswap);
9181 __put_user(value.freeswap, &target_value->freeswap);
9182 __put_user(value.procs, &target_value->procs);
9183 __put_user(value.totalhigh, &target_value->totalhigh);
9184 __put_user(value.freehigh, &target_value->freehigh);
9185 __put_user(value.mem_unit, &target_value->mem_unit);
9186 unlock_user_struct(target_value, arg1, 1);
9190 #ifdef TARGET_NR_ipc
9192 return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
9194 #ifdef TARGET_NR_semget
9195 case TARGET_NR_semget:
9196 return get_errno(semget(arg1, arg2, arg3));
9198 #ifdef TARGET_NR_semop
9199 case TARGET_NR_semop:
9200 return do_semop(arg1, arg2, arg3);
9202 #ifdef TARGET_NR_semctl
9203 case TARGET_NR_semctl:
9204 return do_semctl(arg1, arg2, arg3, arg4);
9206 #ifdef TARGET_NR_msgctl
9207 case TARGET_NR_msgctl:
9208 return do_msgctl(arg1, arg2, arg3);
9210 #ifdef TARGET_NR_msgget
9211 case TARGET_NR_msgget:
9212 return get_errno(msgget(arg1, arg2));
9214 #ifdef TARGET_NR_msgrcv
9215 case TARGET_NR_msgrcv:
9216 return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
9218 #ifdef TARGET_NR_msgsnd
9219 case TARGET_NR_msgsnd:
9220 return do_msgsnd(arg1, arg2, arg3, arg4);
9222 #ifdef TARGET_NR_shmget
9223 case TARGET_NR_shmget:
9224 return get_errno(shmget(arg1, arg2, arg3));
9226 #ifdef TARGET_NR_shmctl
9227 case TARGET_NR_shmctl:
9228 return do_shmctl(arg1, arg2, arg3);
9230 #ifdef TARGET_NR_shmat
9231 case TARGET_NR_shmat:
9232 return do_shmat(cpu_env, arg1, arg2, arg3);
9234 #ifdef TARGET_NR_shmdt
9235 case TARGET_NR_shmdt:
9236 return do_shmdt(arg1);
9238 case TARGET_NR_fsync:
9239 return get_errno(fsync(arg1));
9240 case TARGET_NR_clone:
9241 /* Linux manages to have three different orderings for its
9242 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
9243 * match the kernel's CONFIG_CLONE_* settings.
9244 * Microblaze is further special in that it uses a sixth
9245 * implicit argument to clone for the TLS pointer.
9247 #if defined(TARGET_MICROBLAZE)
9248 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
9249 #elif defined(TARGET_CLONE_BACKWARDS)
9250 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
9251 #elif defined(TARGET_CLONE_BACKWARDS2)
9252 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
9254 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
9257 #ifdef __NR_exit_group
9258 /* new thread calls */
9259 case TARGET_NR_exit_group:
9260 preexit_cleanup(cpu_env, arg1);
9261 return get_errno(exit_group(arg1));
9263 case TARGET_NR_setdomainname:
9264 if (!(p = lock_user_string(arg1)))
9265 return -TARGET_EFAULT;
9266 ret = get_errno(setdomainname(p, arg2));
9267 unlock_user(p, arg1, 0);
9269 case TARGET_NR_uname:
9270 /* no need to transcode because we use the linux syscall */
9272 struct new_utsname * buf;
9274 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
9275 return -TARGET_EFAULT;
9276 ret = get_errno(sys_uname(buf));
9277 if (!is_error(ret)) {
9278 /* Overwrite the native machine name with whatever is being
9280 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
9281 sizeof(buf->machine));
9282 /* Allow the user to override the reported release. */
9283 if (qemu_uname_release && *qemu_uname_release) {
9284 g_strlcpy(buf->release, qemu_uname_release,
9285 sizeof(buf->release));
9288 unlock_user_struct(buf, arg1, 1);
9292 case TARGET_NR_modify_ldt:
9293 return do_modify_ldt(cpu_env, arg1, arg2, arg3);
9294 #if !defined(TARGET_X86_64)
9295 case TARGET_NR_vm86:
9296 return do_vm86(cpu_env, arg1, arg2);
9299 case TARGET_NR_adjtimex:
9301 struct timex host_buf;
9303 if (target_to_host_timex(&host_buf, arg1) != 0) {
9304 return -TARGET_EFAULT;
9306 ret = get_errno(adjtimex(&host_buf));
9307 if (!is_error(ret)) {
9308 if (host_to_target_timex(arg1, &host_buf) != 0) {
9309 return -TARGET_EFAULT;
9314 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
9315 case TARGET_NR_clock_adjtime:
9317 struct timex htx, *phtx = &htx;
9319 if (target_to_host_timex(phtx, arg2) != 0) {
9320 return -TARGET_EFAULT;
9322 ret = get_errno(clock_adjtime(arg1, phtx));
9323 if (!is_error(ret) && phtx) {
9324 if (host_to_target_timex(arg2, phtx) != 0) {
9325 return -TARGET_EFAULT;
9331 case TARGET_NR_getpgid:
9332 return get_errno(getpgid(arg1));
9333 case TARGET_NR_fchdir:
9334 return get_errno(fchdir(arg1));
9335 case TARGET_NR_personality:
9336 return get_errno(personality(arg1));
9337 #ifdef TARGET_NR__llseek /* Not on alpha */
9338 case TARGET_NR__llseek:
9341 #if !defined(__NR_llseek)
9342 res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
9344 ret = get_errno(res);
9349 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
9351 if ((ret == 0) && put_user_s64(res, arg4)) {
9352 return -TARGET_EFAULT;
9357 #ifdef TARGET_NR_getdents
9358 case TARGET_NR_getdents:
9359 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
9360 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
9362 struct target_dirent *target_dirp;
9363 struct linux_dirent *dirp;
9364 abi_long count = arg3;
9366 dirp = g_try_malloc(count);
9368 return -TARGET_ENOMEM;
9371 ret = get_errno(sys_getdents(arg1, dirp, count));
9372 if (!is_error(ret)) {
9373 struct linux_dirent *de;
9374 struct target_dirent *tde;
9376 int reclen, treclen;
9377 int count1, tnamelen;
9381 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9382 return -TARGET_EFAULT;
9385 reclen = de->d_reclen;
9386 tnamelen = reclen - offsetof(struct linux_dirent, d_name);
9387 assert(tnamelen >= 0);
9388 treclen = tnamelen + offsetof(struct target_dirent, d_name);
9389 assert(count1 + treclen <= count);
9390 tde->d_reclen = tswap16(treclen);
9391 tde->d_ino = tswapal(de->d_ino);
9392 tde->d_off = tswapal(de->d_off);
9393 memcpy(tde->d_name, de->d_name, tnamelen);
9394 de = (struct linux_dirent *)((char *)de + reclen);
9396 tde = (struct target_dirent *)((char *)tde + treclen);
9400 unlock_user(target_dirp, arg2, ret);
9406 struct linux_dirent *dirp;
9407 abi_long count = arg3;
9409 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9410 return -TARGET_EFAULT;
9411 ret = get_errno(sys_getdents(arg1, dirp, count));
9412 if (!is_error(ret)) {
9413 struct linux_dirent *de;
9418 reclen = de->d_reclen;
9421 de->d_reclen = tswap16(reclen);
9422 tswapls(&de->d_ino);
9423 tswapls(&de->d_off);
9424 de = (struct linux_dirent *)((char *)de + reclen);
9428 unlock_user(dirp, arg2, ret);
9432 /* Implement getdents in terms of getdents64 */
9434 struct linux_dirent64 *dirp;
9435 abi_long count = arg3;
9437 dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
9439 return -TARGET_EFAULT;
9441 ret = get_errno(sys_getdents64(arg1, dirp, count));
9442 if (!is_error(ret)) {
9443 /* Convert the dirent64 structs to target dirent. We do this
9444 * in-place, since we can guarantee that a target_dirent is no
9445 * larger than a dirent64; however this means we have to be
9446 * careful to read everything before writing in the new format.
9448 struct linux_dirent64 *de;
9449 struct target_dirent *tde;
9454 tde = (struct target_dirent *)dirp;
9456 int namelen, treclen;
9457 int reclen = de->d_reclen;
9458 uint64_t ino = de->d_ino;
9459 int64_t off = de->d_off;
9460 uint8_t type = de->d_type;
9462 namelen = strlen(de->d_name);
9463 treclen = offsetof(struct target_dirent, d_name)
9465 treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
9467 memmove(tde->d_name, de->d_name, namelen + 1);
9468 tde->d_ino = tswapal(ino);
9469 tde->d_off = tswapal(off);
9470 tde->d_reclen = tswap16(treclen);
9471 /* The target_dirent type is in what was formerly a padding
9472 * byte at the end of the structure:
9474 *(((char *)tde) + treclen - 1) = type;
9476 de = (struct linux_dirent64 *)((char *)de + reclen);
9477 tde = (struct target_dirent *)((char *)tde + treclen);
9483 unlock_user(dirp, arg2, ret);
9487 #endif /* TARGET_NR_getdents */
9488 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
9489 case TARGET_NR_getdents64:
9491 struct linux_dirent64 *dirp;
9492 abi_long count = arg3;
9493 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9494 return -TARGET_EFAULT;
9495 ret = get_errno(sys_getdents64(arg1, dirp, count));
9496 if (!is_error(ret)) {
9497 struct linux_dirent64 *de;
9502 reclen = de->d_reclen;
9505 de->d_reclen = tswap16(reclen);
9506 tswap64s((uint64_t *)&de->d_ino);
9507 tswap64s((uint64_t *)&de->d_off);
9508 de = (struct linux_dirent64 *)((char *)de + reclen);
9512 unlock_user(dirp, arg2, ret);
9515 #endif /* TARGET_NR_getdents64 */
9516 #if defined(TARGET_NR__newselect)
9517 case TARGET_NR__newselect:
9518 return do_select(arg1, arg2, arg3, arg4, arg5);
9520 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
9521 # ifdef TARGET_NR_poll
9522 case TARGET_NR_poll:
9524 # ifdef TARGET_NR_ppoll
9525 case TARGET_NR_ppoll:
9528 struct target_pollfd *target_pfd;
9529 unsigned int nfds = arg2;
9536 if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
9537 return -TARGET_EINVAL;
9540 target_pfd = lock_user(VERIFY_WRITE, arg1,
9541 sizeof(struct target_pollfd) * nfds, 1);
9543 return -TARGET_EFAULT;
9546 pfd = alloca(sizeof(struct pollfd) * nfds);
9547 for (i = 0; i < nfds; i++) {
9548 pfd[i].fd = tswap32(target_pfd[i].fd);
9549 pfd[i].events = tswap16(target_pfd[i].events);
9554 # ifdef TARGET_NR_ppoll
9555 case TARGET_NR_ppoll:
9557 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
9558 target_sigset_t *target_set;
9559 sigset_t _set, *set = &_set;
9562 if (target_to_host_timespec(timeout_ts, arg3)) {
9563 unlock_user(target_pfd, arg1, 0);
9564 return -TARGET_EFAULT;
9571 if (arg5 != sizeof(target_sigset_t)) {
9572 unlock_user(target_pfd, arg1, 0);
9573 return -TARGET_EINVAL;
9576 target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
9578 unlock_user(target_pfd, arg1, 0);
9579 return -TARGET_EFAULT;
9581 target_to_host_sigset(set, target_set);
9586 ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
9587 set, SIGSET_T_SIZE));
9589 if (!is_error(ret) && arg3) {
9590 host_to_target_timespec(arg3, timeout_ts);
9593 unlock_user(target_set, arg4, 0);
9598 # ifdef TARGET_NR_poll
9599 case TARGET_NR_poll:
9601 struct timespec ts, *pts;
9604 /* Convert ms to secs, ns */
9605 ts.tv_sec = arg3 / 1000;
9606 ts.tv_nsec = (arg3 % 1000) * 1000000LL;
9609 /* -ve poll() timeout means "infinite" */
9612 ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
9617 g_assert_not_reached();
9620 if (!is_error(ret)) {
9621 for(i = 0; i < nfds; i++) {
9622 target_pfd[i].revents = tswap16(pfd[i].revents);
9625 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
9629 case TARGET_NR_flock:
9630 /* NOTE: the flock constant seems to be the same for every
9632 return get_errno(safe_flock(arg1, arg2));
9633 case TARGET_NR_readv:
9635 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
9637 ret = get_errno(safe_readv(arg1, vec, arg3));
9638 unlock_iovec(vec, arg2, arg3, 1);
9640 ret = -host_to_target_errno(errno);
9644 case TARGET_NR_writev:
9646 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
9648 ret = get_errno(safe_writev(arg1, vec, arg3));
9649 unlock_iovec(vec, arg2, arg3, 0);
9651 ret = -host_to_target_errno(errno);
9655 #if defined(TARGET_NR_preadv)
9656 case TARGET_NR_preadv:
9658 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
9660 unsigned long low, high;
9662 target_to_host_low_high(arg4, arg5, &low, &high);
9663 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
9664 unlock_iovec(vec, arg2, arg3, 1);
9666 ret = -host_to_target_errno(errno);
9671 #if defined(TARGET_NR_pwritev)
9672 case TARGET_NR_pwritev:
9674 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
9676 unsigned long low, high;
9678 target_to_host_low_high(arg4, arg5, &low, &high);
9679 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
9680 unlock_iovec(vec, arg2, arg3, 0);
9682 ret = -host_to_target_errno(errno);
9687 case TARGET_NR_getsid:
9688 return get_errno(getsid(arg1));
9689 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
9690 case TARGET_NR_fdatasync:
9691 return get_errno(fdatasync(arg1));
9693 #ifdef TARGET_NR__sysctl
9694 case TARGET_NR__sysctl:
9695 /* We don't implement this, but ENOTDIR is always a safe
9697 return -TARGET_ENOTDIR;
9699 case TARGET_NR_sched_getaffinity:
9701 unsigned int mask_size;
9702 unsigned long *mask;
9705 * sched_getaffinity needs multiples of ulong, so need to take
9706 * care of mismatches between target ulong and host ulong sizes.
9708 if (arg2 & (sizeof(abi_ulong) - 1)) {
9709 return -TARGET_EINVAL;
9711 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
9713 mask = alloca(mask_size);
9714 memset(mask, 0, mask_size);
9715 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
9717 if (!is_error(ret)) {
9719 /* More data returned than the caller's buffer will fit.
9720 * This only happens if sizeof(abi_long) < sizeof(long)
9721 * and the caller passed us a buffer holding an odd number
9722 * of abi_longs. If the host kernel is actually using the
9723 * extra 4 bytes then fail EINVAL; otherwise we can just
9724 * ignore them and only copy the interesting part.
9726 int numcpus = sysconf(_SC_NPROCESSORS_CONF);
9727 if (numcpus > arg2 * 8) {
9728 return -TARGET_EINVAL;
9733 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
9734 return -TARGET_EFAULT;
9739 case TARGET_NR_sched_setaffinity:
9741 unsigned int mask_size;
9742 unsigned long *mask;
9745 * sched_setaffinity needs multiples of ulong, so need to take
9746 * care of mismatches between target ulong and host ulong sizes.
9748 if (arg2 & (sizeof(abi_ulong) - 1)) {
9749 return -TARGET_EINVAL;
9751 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
9752 mask = alloca(mask_size);
9754 ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
9759 return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
9761 case TARGET_NR_getcpu:
9764 ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
9765 arg2 ? &node : NULL,
9767 if (is_error(ret)) {
9770 if (arg1 && put_user_u32(cpu, arg1)) {
9771 return -TARGET_EFAULT;
9773 if (arg2 && put_user_u32(node, arg2)) {
9774 return -TARGET_EFAULT;
9778 case TARGET_NR_sched_setparam:
9780 struct sched_param *target_schp;
9781 struct sched_param schp;
9784 return -TARGET_EINVAL;
9786 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
9787 return -TARGET_EFAULT;
9788 schp.sched_priority = tswap32(target_schp->sched_priority);
9789 unlock_user_struct(target_schp, arg2, 0);
9790 return get_errno(sched_setparam(arg1, &schp));
9792 case TARGET_NR_sched_getparam:
9794 struct sched_param *target_schp;
9795 struct sched_param schp;
9798 return -TARGET_EINVAL;
9800 ret = get_errno(sched_getparam(arg1, &schp));
9801 if (!is_error(ret)) {
9802 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
9803 return -TARGET_EFAULT;
9804 target_schp->sched_priority = tswap32(schp.sched_priority);
9805 unlock_user_struct(target_schp, arg2, 1);
9809 case TARGET_NR_sched_setscheduler:
9811 struct sched_param *target_schp;
9812 struct sched_param schp;
9814 return -TARGET_EINVAL;
9816 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
9817 return -TARGET_EFAULT;
9818 schp.sched_priority = tswap32(target_schp->sched_priority);
9819 unlock_user_struct(target_schp, arg3, 0);
9820 return get_errno(sched_setscheduler(arg1, arg2, &schp));
9822 case TARGET_NR_sched_getscheduler:
9823 return get_errno(sched_getscheduler(arg1));
9824 case TARGET_NR_sched_yield:
9825 return get_errno(sched_yield());
9826 case TARGET_NR_sched_get_priority_max:
9827 return get_errno(sched_get_priority_max(arg1));
9828 case TARGET_NR_sched_get_priority_min:
9829 return get_errno(sched_get_priority_min(arg1));
9830 case TARGET_NR_sched_rr_get_interval:
9833 ret = get_errno(sched_rr_get_interval(arg1, &ts));
9834 if (!is_error(ret)) {
9835 ret = host_to_target_timespec(arg2, &ts);
9839 case TARGET_NR_nanosleep:
9841 struct timespec req, rem;
9842 target_to_host_timespec(&req, arg1);
9843 ret = get_errno(safe_nanosleep(&req, &rem));
9844 if (is_error(ret) && arg2) {
9845 host_to_target_timespec(arg2, &rem);
9849 case TARGET_NR_prctl:
9851 case PR_GET_PDEATHSIG:
9854 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
9855 if (!is_error(ret) && arg2
9856 && put_user_ual(deathsig, arg2)) {
9857 return -TARGET_EFAULT;
9864 void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
9866 return -TARGET_EFAULT;
9868 ret = get_errno(prctl(arg1, (unsigned long)name,
9870 unlock_user(name, arg2, 16);
9875 void *name = lock_user(VERIFY_READ, arg2, 16, 1);
9877 return -TARGET_EFAULT;
9879 ret = get_errno(prctl(arg1, (unsigned long)name,
9881 unlock_user(name, arg2, 0);
9886 case TARGET_PR_GET_FP_MODE:
9888 CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
9890 if (env->CP0_Status & (1 << CP0St_FR)) {
9891 ret |= TARGET_PR_FP_MODE_FR;
9893 if (env->CP0_Config5 & (1 << CP0C5_FRE)) {
9894 ret |= TARGET_PR_FP_MODE_FRE;
9898 case TARGET_PR_SET_FP_MODE:
9900 CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
9901 bool old_fr = env->CP0_Status & (1 << CP0St_FR);
9902 bool old_fre = env->CP0_Config5 & (1 << CP0C5_FRE);
9903 bool new_fr = arg2 & TARGET_PR_FP_MODE_FR;
9904 bool new_fre = arg2 & TARGET_PR_FP_MODE_FRE;
9906 const unsigned int known_bits = TARGET_PR_FP_MODE_FR |
9907 TARGET_PR_FP_MODE_FRE;
9909 /* If nothing to change, return right away, successfully. */
9910 if (old_fr == new_fr && old_fre == new_fre) {
9913 /* Check the value is valid */
9914 if (arg2 & ~known_bits) {
9915 return -TARGET_EOPNOTSUPP;
9917 /* Setting FRE without FR is not supported. */
9918 if (new_fre && !new_fr) {
9919 return -TARGET_EOPNOTSUPP;
9921 if (new_fr && !(env->active_fpu.fcr0 & (1 << FCR0_F64))) {
9922 /* FR1 is not supported */
9923 return -TARGET_EOPNOTSUPP;
9925 if (!new_fr && (env->active_fpu.fcr0 & (1 << FCR0_F64))
9926 && !(env->CP0_Status_rw_bitmask & (1 << CP0St_FR))) {
9927 /* cannot set FR=0 */
9928 return -TARGET_EOPNOTSUPP;
9930 if (new_fre && !(env->active_fpu.fcr0 & (1 << FCR0_FREP))) {
9931 /* Cannot set FRE=1 */
9932 return -TARGET_EOPNOTSUPP;
9936 fpr_t *fpr = env->active_fpu.fpr;
9937 for (i = 0; i < 32 ; i += 2) {
9938 if (!old_fr && new_fr) {
9939 fpr[i].w[!FP_ENDIAN_IDX] = fpr[i + 1].w[FP_ENDIAN_IDX];
9940 } else if (old_fr && !new_fr) {
9941 fpr[i + 1].w[FP_ENDIAN_IDX] = fpr[i].w[!FP_ENDIAN_IDX];
9946 env->CP0_Status |= (1 << CP0St_FR);
9947 env->hflags |= MIPS_HFLAG_F64;
9949 env->CP0_Status &= ~(1 << CP0St_FR);
9950 env->hflags &= ~MIPS_HFLAG_F64;
9953 env->CP0_Config5 |= (1 << CP0C5_FRE);
9954 if (env->active_fpu.fcr0 & (1 << FCR0_FREP)) {
9955 env->hflags |= MIPS_HFLAG_FRE;
9958 env->CP0_Config5 &= ~(1 << CP0C5_FRE);
9959 env->hflags &= ~MIPS_HFLAG_FRE;
9965 #ifdef TARGET_AARCH64
9966 case TARGET_PR_SVE_SET_VL:
9968 * We cannot support either PR_SVE_SET_VL_ONEXEC or
9969 * PR_SVE_VL_INHERIT. Note the kernel definition
9970 * of sve_vl_valid allows for VQ=512, i.e. VL=8192,
9971 * even though the current architectural maximum is VQ=16.
9973 ret = -TARGET_EINVAL;
9974 if (cpu_isar_feature(aa64_sve, env_archcpu(cpu_env))
9975 && arg2 >= 0 && arg2 <= 512 * 16 && !(arg2 & 15)) {
9976 CPUARMState *env = cpu_env;
9977 ARMCPU *cpu = env_archcpu(env);
9978 uint32_t vq, old_vq;
9980 old_vq = (env->vfp.zcr_el[1] & 0xf) + 1;
9981 vq = MAX(arg2 / 16, 1);
9982 vq = MIN(vq, cpu->sve_max_vq);
9985 aarch64_sve_narrow_vq(env, vq);
9987 env->vfp.zcr_el[1] = vq - 1;
9988 arm_rebuild_hflags(env);
9992 case TARGET_PR_SVE_GET_VL:
9993 ret = -TARGET_EINVAL;
9995 ARMCPU *cpu = env_archcpu(cpu_env);
9996 if (cpu_isar_feature(aa64_sve, cpu)) {
9997 ret = ((cpu->env.vfp.zcr_el[1] & 0xf) + 1) * 16;
10001 case TARGET_PR_PAC_RESET_KEYS:
10003 CPUARMState *env = cpu_env;
10004 ARMCPU *cpu = env_archcpu(env);
10006 if (arg3 || arg4 || arg5) {
10007 return -TARGET_EINVAL;
10009 if (cpu_isar_feature(aa64_pauth, cpu)) {
10010 int all = (TARGET_PR_PAC_APIAKEY | TARGET_PR_PAC_APIBKEY |
10011 TARGET_PR_PAC_APDAKEY | TARGET_PR_PAC_APDBKEY |
10012 TARGET_PR_PAC_APGAKEY);
10018 } else if (arg2 & ~all) {
10019 return -TARGET_EINVAL;
10021 if (arg2 & TARGET_PR_PAC_APIAKEY) {
10022 ret |= qemu_guest_getrandom(&env->keys.apia,
10023 sizeof(ARMPACKey), &err);
10025 if (arg2 & TARGET_PR_PAC_APIBKEY) {
10026 ret |= qemu_guest_getrandom(&env->keys.apib,
10027 sizeof(ARMPACKey), &err);
10029 if (arg2 & TARGET_PR_PAC_APDAKEY) {
10030 ret |= qemu_guest_getrandom(&env->keys.apda,
10031 sizeof(ARMPACKey), &err);
10033 if (arg2 & TARGET_PR_PAC_APDBKEY) {
10034 ret |= qemu_guest_getrandom(&env->keys.apdb,
10035 sizeof(ARMPACKey), &err);
10037 if (arg2 & TARGET_PR_PAC_APGAKEY) {
10038 ret |= qemu_guest_getrandom(&env->keys.apga,
10039 sizeof(ARMPACKey), &err);
10043 * Some unknown failure in the crypto. The best
10044 * we can do is log it and fail the syscall.
10045 * The real syscall cannot fail this way.
10047 qemu_log_mask(LOG_UNIMP,
10048 "PR_PAC_RESET_KEYS: Crypto failure: %s",
10049 error_get_pretty(err));
10051 return -TARGET_EIO;
10056 return -TARGET_EINVAL;
10057 #endif /* AARCH64 */
10058 case PR_GET_SECCOMP:
10059 case PR_SET_SECCOMP:
10060 /* Disable seccomp to prevent the target disabling syscalls we
10062 return -TARGET_EINVAL;
10064 /* Most prctl options have no pointer arguments */
10065 return get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
10068 #ifdef TARGET_NR_arch_prctl
10069 case TARGET_NR_arch_prctl:
10070 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
10071 return do_arch_prctl(cpu_env, arg1, arg2);
10076 #ifdef TARGET_NR_pread64
10077 case TARGET_NR_pread64:
10078 if (regpairs_aligned(cpu_env, num)) {
10082 if (arg2 == 0 && arg3 == 0) {
10083 /* Special-case NULL buffer and zero length, which should succeed */
10086 p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10088 return -TARGET_EFAULT;
10091 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
10092 unlock_user(p, arg2, ret);
10094 case TARGET_NR_pwrite64:
10095 if (regpairs_aligned(cpu_env, num)) {
10099 if (arg2 == 0 && arg3 == 0) {
10100 /* Special-case NULL buffer and zero length, which should succeed */
10103 p = lock_user(VERIFY_READ, arg2, arg3, 1);
10105 return -TARGET_EFAULT;
10108 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
10109 unlock_user(p, arg2, 0);
10112 case TARGET_NR_getcwd:
10113 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
10114 return -TARGET_EFAULT;
10115 ret = get_errno(sys_getcwd1(p, arg2));
10116 unlock_user(p, arg1, ret);
10118 case TARGET_NR_capget:
10119 case TARGET_NR_capset:
10121 struct target_user_cap_header *target_header;
10122 struct target_user_cap_data *target_data = NULL;
10123 struct __user_cap_header_struct header;
10124 struct __user_cap_data_struct data[2];
10125 struct __user_cap_data_struct *dataptr = NULL;
10126 int i, target_datalen;
10127 int data_items = 1;
10129 if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
10130 return -TARGET_EFAULT;
10132 header.version = tswap32(target_header->version);
10133 header.pid = tswap32(target_header->pid);
10135 if (header.version != _LINUX_CAPABILITY_VERSION) {
10136 /* Version 2 and up takes pointer to two user_data structs */
10140 target_datalen = sizeof(*target_data) * data_items;
10143 if (num == TARGET_NR_capget) {
10144 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
10146 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
10148 if (!target_data) {
10149 unlock_user_struct(target_header, arg1, 0);
10150 return -TARGET_EFAULT;
10153 if (num == TARGET_NR_capset) {
10154 for (i = 0; i < data_items; i++) {
10155 data[i].effective = tswap32(target_data[i].effective);
10156 data[i].permitted = tswap32(target_data[i].permitted);
10157 data[i].inheritable = tswap32(target_data[i].inheritable);
10164 if (num == TARGET_NR_capget) {
10165 ret = get_errno(capget(&header, dataptr));
10167 ret = get_errno(capset(&header, dataptr));
10170 /* The kernel always updates version for both capget and capset */
10171 target_header->version = tswap32(header.version);
10172 unlock_user_struct(target_header, arg1, 1);
10175 if (num == TARGET_NR_capget) {
10176 for (i = 0; i < data_items; i++) {
10177 target_data[i].effective = tswap32(data[i].effective);
10178 target_data[i].permitted = tswap32(data[i].permitted);
10179 target_data[i].inheritable = tswap32(data[i].inheritable);
10181 unlock_user(target_data, arg2, target_datalen);
10183 unlock_user(target_data, arg2, 0);
10188 case TARGET_NR_sigaltstack:
10189 return do_sigaltstack(arg1, arg2,
10190 get_sp_from_cpustate((CPUArchState *)cpu_env));
10192 #ifdef CONFIG_SENDFILE
10193 #ifdef TARGET_NR_sendfile
10194 case TARGET_NR_sendfile:
10196 off_t *offp = NULL;
10199 ret = get_user_sal(off, arg3);
10200 if (is_error(ret)) {
10205 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
10206 if (!is_error(ret) && arg3) {
10207 abi_long ret2 = put_user_sal(off, arg3);
10208 if (is_error(ret2)) {
10215 #ifdef TARGET_NR_sendfile64
10216 case TARGET_NR_sendfile64:
10218 off_t *offp = NULL;
10221 ret = get_user_s64(off, arg3);
10222 if (is_error(ret)) {
10227 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
10228 if (!is_error(ret) && arg3) {
10229 abi_long ret2 = put_user_s64(off, arg3);
10230 if (is_error(ret2)) {
10238 #ifdef TARGET_NR_vfork
10239 case TARGET_NR_vfork:
10240 return get_errno(do_fork(cpu_env,
10241 CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
10244 #ifdef TARGET_NR_ugetrlimit
10245 case TARGET_NR_ugetrlimit:
10247 struct rlimit rlim;
10248 int resource = target_to_host_resource(arg1);
10249 ret = get_errno(getrlimit(resource, &rlim));
10250 if (!is_error(ret)) {
10251 struct target_rlimit *target_rlim;
10252 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
10253 return -TARGET_EFAULT;
10254 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
10255 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
10256 unlock_user_struct(target_rlim, arg2, 1);
10261 #ifdef TARGET_NR_truncate64
10262 case TARGET_NR_truncate64:
10263 if (!(p = lock_user_string(arg1)))
10264 return -TARGET_EFAULT;
10265 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
10266 unlock_user(p, arg1, 0);
10269 #ifdef TARGET_NR_ftruncate64
10270 case TARGET_NR_ftruncate64:
10271 return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
10273 #ifdef TARGET_NR_stat64
10274 case TARGET_NR_stat64:
10275 if (!(p = lock_user_string(arg1))) {
10276 return -TARGET_EFAULT;
10278 ret = get_errno(stat(path(p), &st));
10279 unlock_user(p, arg1, 0);
10280 if (!is_error(ret))
10281 ret = host_to_target_stat64(cpu_env, arg2, &st);
10284 #ifdef TARGET_NR_lstat64
10285 case TARGET_NR_lstat64:
10286 if (!(p = lock_user_string(arg1))) {
10287 return -TARGET_EFAULT;
10289 ret = get_errno(lstat(path(p), &st));
10290 unlock_user(p, arg1, 0);
10291 if (!is_error(ret))
10292 ret = host_to_target_stat64(cpu_env, arg2, &st);
10295 #ifdef TARGET_NR_fstat64
10296 case TARGET_NR_fstat64:
10297 ret = get_errno(fstat(arg1, &st));
10298 if (!is_error(ret))
10299 ret = host_to_target_stat64(cpu_env, arg2, &st);
10302 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
10303 #ifdef TARGET_NR_fstatat64
10304 case TARGET_NR_fstatat64:
10306 #ifdef TARGET_NR_newfstatat
10307 case TARGET_NR_newfstatat:
10309 if (!(p = lock_user_string(arg2))) {
10310 return -TARGET_EFAULT;
10312 ret = get_errno(fstatat(arg1, path(p), &st, arg4));
10313 unlock_user(p, arg2, 0);
10314 if (!is_error(ret))
10315 ret = host_to_target_stat64(cpu_env, arg3, &st);
10318 #if defined(TARGET_NR_statx)
10319 case TARGET_NR_statx:
10321 struct target_statx *target_stx;
10325 p = lock_user_string(arg2);
10327 return -TARGET_EFAULT;
10329 #if defined(__NR_statx)
10332 * It is assumed that struct statx is architecture independent.
10334 struct target_statx host_stx;
10337 ret = get_errno(sys_statx(dirfd, p, flags, mask, &host_stx));
10338 if (!is_error(ret)) {
10339 if (host_to_target_statx(&host_stx, arg5) != 0) {
10340 unlock_user(p, arg2, 0);
10341 return -TARGET_EFAULT;
10345 if (ret != -TARGET_ENOSYS) {
10346 unlock_user(p, arg2, 0);
10351 ret = get_errno(fstatat(dirfd, path(p), &st, flags));
10352 unlock_user(p, arg2, 0);
10354 if (!is_error(ret)) {
10355 if (!lock_user_struct(VERIFY_WRITE, target_stx, arg5, 0)) {
10356 return -TARGET_EFAULT;
10358 memset(target_stx, 0, sizeof(*target_stx));
10359 __put_user(major(st.st_dev), &target_stx->stx_dev_major);
10360 __put_user(minor(st.st_dev), &target_stx->stx_dev_minor);
10361 __put_user(st.st_ino, &target_stx->stx_ino);
10362 __put_user(st.st_mode, &target_stx->stx_mode);
10363 __put_user(st.st_uid, &target_stx->stx_uid);
10364 __put_user(st.st_gid, &target_stx->stx_gid);
10365 __put_user(st.st_nlink, &target_stx->stx_nlink);
10366 __put_user(major(st.st_rdev), &target_stx->stx_rdev_major);
10367 __put_user(minor(st.st_rdev), &target_stx->stx_rdev_minor);
10368 __put_user(st.st_size, &target_stx->stx_size);
10369 __put_user(st.st_blksize, &target_stx->stx_blksize);
10370 __put_user(st.st_blocks, &target_stx->stx_blocks);
10371 __put_user(st.st_atime, &target_stx->stx_atime.tv_sec);
10372 __put_user(st.st_mtime, &target_stx->stx_mtime.tv_sec);
10373 __put_user(st.st_ctime, &target_stx->stx_ctime.tv_sec);
10374 unlock_user_struct(target_stx, arg5, 1);
10379 #ifdef TARGET_NR_lchown
10380 case TARGET_NR_lchown:
10381 if (!(p = lock_user_string(arg1)))
10382 return -TARGET_EFAULT;
10383 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
10384 unlock_user(p, arg1, 0);
10387 #ifdef TARGET_NR_getuid
10388 case TARGET_NR_getuid:
10389 return get_errno(high2lowuid(getuid()));
10391 #ifdef TARGET_NR_getgid
10392 case TARGET_NR_getgid:
10393 return get_errno(high2lowgid(getgid()));
10395 #ifdef TARGET_NR_geteuid
10396 case TARGET_NR_geteuid:
10397 return get_errno(high2lowuid(geteuid()));
10399 #ifdef TARGET_NR_getegid
10400 case TARGET_NR_getegid:
10401 return get_errno(high2lowgid(getegid()));
10403 case TARGET_NR_setreuid:
10404 return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
10405 case TARGET_NR_setregid:
10406 return get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
10407 case TARGET_NR_getgroups:
10409 int gidsetsize = arg1;
10410 target_id *target_grouplist;
10414 grouplist = alloca(gidsetsize * sizeof(gid_t));
10415 ret = get_errno(getgroups(gidsetsize, grouplist));
10416 if (gidsetsize == 0)
10418 if (!is_error(ret)) {
10419 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
10420 if (!target_grouplist)
10421 return -TARGET_EFAULT;
10422 for(i = 0;i < ret; i++)
10423 target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
10424 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
10428 case TARGET_NR_setgroups:
10430 int gidsetsize = arg1;
10431 target_id *target_grouplist;
10432 gid_t *grouplist = NULL;
10435 grouplist = alloca(gidsetsize * sizeof(gid_t));
10436 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
10437 if (!target_grouplist) {
10438 return -TARGET_EFAULT;
10440 for (i = 0; i < gidsetsize; i++) {
10441 grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
10443 unlock_user(target_grouplist, arg2, 0);
10445 return get_errno(setgroups(gidsetsize, grouplist));
10447 case TARGET_NR_fchown:
10448 return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
10449 #if defined(TARGET_NR_fchownat)
10450 case TARGET_NR_fchownat:
10451 if (!(p = lock_user_string(arg2)))
10452 return -TARGET_EFAULT;
10453 ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
10454 low2highgid(arg4), arg5));
10455 unlock_user(p, arg2, 0);
10458 #ifdef TARGET_NR_setresuid
10459 case TARGET_NR_setresuid:
10460 return get_errno(sys_setresuid(low2highuid(arg1),
10462 low2highuid(arg3)));
10464 #ifdef TARGET_NR_getresuid
10465 case TARGET_NR_getresuid:
10467 uid_t ruid, euid, suid;
10468 ret = get_errno(getresuid(&ruid, &euid, &suid));
10469 if (!is_error(ret)) {
10470 if (put_user_id(high2lowuid(ruid), arg1)
10471 || put_user_id(high2lowuid(euid), arg2)
10472 || put_user_id(high2lowuid(suid), arg3))
10473 return -TARGET_EFAULT;
10478 #ifdef TARGET_NR_getresgid
10479 case TARGET_NR_setresgid:
10480 return get_errno(sys_setresgid(low2highgid(arg1),
10482 low2highgid(arg3)));
10484 #ifdef TARGET_NR_getresgid
10485 case TARGET_NR_getresgid:
10487 gid_t rgid, egid, sgid;
10488 ret = get_errno(getresgid(&rgid, &egid, &sgid));
10489 if (!is_error(ret)) {
10490 if (put_user_id(high2lowgid(rgid), arg1)
10491 || put_user_id(high2lowgid(egid), arg2)
10492 || put_user_id(high2lowgid(sgid), arg3))
10493 return -TARGET_EFAULT;
10498 #ifdef TARGET_NR_chown
10499 case TARGET_NR_chown:
10500 if (!(p = lock_user_string(arg1)))
10501 return -TARGET_EFAULT;
10502 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
10503 unlock_user(p, arg1, 0);
10506 case TARGET_NR_setuid:
10507 return get_errno(sys_setuid(low2highuid(arg1)));
10508 case TARGET_NR_setgid:
10509 return get_errno(sys_setgid(low2highgid(arg1)));
10510 case TARGET_NR_setfsuid:
10511 return get_errno(setfsuid(arg1));
10512 case TARGET_NR_setfsgid:
10513 return get_errno(setfsgid(arg1));
10515 #ifdef TARGET_NR_lchown32
10516 case TARGET_NR_lchown32:
10517 if (!(p = lock_user_string(arg1)))
10518 return -TARGET_EFAULT;
10519 ret = get_errno(lchown(p, arg2, arg3));
10520 unlock_user(p, arg1, 0);
10523 #ifdef TARGET_NR_getuid32
10524 case TARGET_NR_getuid32:
10525 return get_errno(getuid());
10528 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
10529 /* Alpha specific */
10530 case TARGET_NR_getxuid:
10534 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
10536 return get_errno(getuid());
10538 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
10539 /* Alpha specific */
10540 case TARGET_NR_getxgid:
10544 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
10546 return get_errno(getgid());
10548 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
10549 /* Alpha specific */
10550 case TARGET_NR_osf_getsysinfo:
10551 ret = -TARGET_EOPNOTSUPP;
10553 case TARGET_GSI_IEEE_FP_CONTROL:
10555 uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env);
10556 uint64_t swcr = ((CPUAlphaState *)cpu_env)->swcr;
10558 swcr &= ~SWCR_STATUS_MASK;
10559 swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
10561 if (put_user_u64 (swcr, arg2))
10562 return -TARGET_EFAULT;
10567 /* case GSI_IEEE_STATE_AT_SIGNAL:
10568 -- Not implemented in linux kernel.
10570 -- Retrieves current unaligned access state; not much used.
10571 case GSI_PROC_TYPE:
10572 -- Retrieves implver information; surely not used.
10573 case GSI_GET_HWRPB:
10574 -- Grabs a copy of the HWRPB; surely not used.
10579 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
10580 /* Alpha specific */
10581 case TARGET_NR_osf_setsysinfo:
10582 ret = -TARGET_EOPNOTSUPP;
10584 case TARGET_SSI_IEEE_FP_CONTROL:
10586 uint64_t swcr, fpcr;
10588 if (get_user_u64 (swcr, arg2)) {
10589 return -TARGET_EFAULT;
10593 * The kernel calls swcr_update_status to update the
10594 * status bits from the fpcr at every point that it
10595 * could be queried. Therefore, we store the status
10596 * bits only in FPCR.
10598 ((CPUAlphaState *)cpu_env)->swcr
10599 = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK);
10601 fpcr = cpu_alpha_load_fpcr(cpu_env);
10602 fpcr &= ((uint64_t)FPCR_DYN_MASK << 32);
10603 fpcr |= alpha_ieee_swcr_to_fpcr(swcr);
10604 cpu_alpha_store_fpcr(cpu_env, fpcr);
10609 case TARGET_SSI_IEEE_RAISE_EXCEPTION:
10611 uint64_t exc, fpcr, fex;
10613 if (get_user_u64(exc, arg2)) {
10614 return -TARGET_EFAULT;
10616 exc &= SWCR_STATUS_MASK;
10617 fpcr = cpu_alpha_load_fpcr(cpu_env);
10619 /* Old exceptions are not signaled. */
10620 fex = alpha_ieee_fpcr_to_swcr(fpcr);
10622 fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT;
10623 fex &= ((CPUArchState *)cpu_env)->swcr;
10625 /* Update the hardware fpcr. */
10626 fpcr |= alpha_ieee_swcr_to_fpcr(exc);
10627 cpu_alpha_store_fpcr(cpu_env, fpcr);
10630 int si_code = TARGET_FPE_FLTUNK;
10631 target_siginfo_t info;
10633 if (fex & SWCR_TRAP_ENABLE_DNO) {
10634 si_code = TARGET_FPE_FLTUND;
10636 if (fex & SWCR_TRAP_ENABLE_INE) {
10637 si_code = TARGET_FPE_FLTRES;
10639 if (fex & SWCR_TRAP_ENABLE_UNF) {
10640 si_code = TARGET_FPE_FLTUND;
10642 if (fex & SWCR_TRAP_ENABLE_OVF) {
10643 si_code = TARGET_FPE_FLTOVF;
10645 if (fex & SWCR_TRAP_ENABLE_DZE) {
10646 si_code = TARGET_FPE_FLTDIV;
10648 if (fex & SWCR_TRAP_ENABLE_INV) {
10649 si_code = TARGET_FPE_FLTINV;
10652 info.si_signo = SIGFPE;
10654 info.si_code = si_code;
10655 info._sifields._sigfault._addr
10656 = ((CPUArchState *)cpu_env)->pc;
10657 queue_signal((CPUArchState *)cpu_env, info.si_signo,
10658 QEMU_SI_FAULT, &info);
10664 /* case SSI_NVPAIRS:
10665 -- Used with SSIN_UACPROC to enable unaligned accesses.
10666 case SSI_IEEE_STATE_AT_SIGNAL:
10667 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
10668 -- Not implemented in linux kernel
10673 #ifdef TARGET_NR_osf_sigprocmask
10674 /* Alpha specific. */
10675 case TARGET_NR_osf_sigprocmask:
10679 sigset_t set, oldset;
10682 case TARGET_SIG_BLOCK:
10685 case TARGET_SIG_UNBLOCK:
10688 case TARGET_SIG_SETMASK:
10692 return -TARGET_EINVAL;
10695 target_to_host_old_sigset(&set, &mask);
10696 ret = do_sigprocmask(how, &set, &oldset);
10698 host_to_target_old_sigset(&mask, &oldset);
10705 #ifdef TARGET_NR_getgid32
10706 case TARGET_NR_getgid32:
10707 return get_errno(getgid());
10709 #ifdef TARGET_NR_geteuid32
10710 case TARGET_NR_geteuid32:
10711 return get_errno(geteuid());
10713 #ifdef TARGET_NR_getegid32
10714 case TARGET_NR_getegid32:
10715 return get_errno(getegid());
10717 #ifdef TARGET_NR_setreuid32
10718 case TARGET_NR_setreuid32:
10719 return get_errno(setreuid(arg1, arg2));
10721 #ifdef TARGET_NR_setregid32
10722 case TARGET_NR_setregid32:
10723 return get_errno(setregid(arg1, arg2));
10725 #ifdef TARGET_NR_getgroups32
10726 case TARGET_NR_getgroups32:
10728 int gidsetsize = arg1;
10729 uint32_t *target_grouplist;
10733 grouplist = alloca(gidsetsize * sizeof(gid_t));
10734 ret = get_errno(getgroups(gidsetsize, grouplist));
10735 if (gidsetsize == 0)
10737 if (!is_error(ret)) {
10738 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
10739 if (!target_grouplist) {
10740 return -TARGET_EFAULT;
10742 for(i = 0;i < ret; i++)
10743 target_grouplist[i] = tswap32(grouplist[i]);
10744 unlock_user(target_grouplist, arg2, gidsetsize * 4);
10749 #ifdef TARGET_NR_setgroups32
10750 case TARGET_NR_setgroups32:
10752 int gidsetsize = arg1;
10753 uint32_t *target_grouplist;
10757 grouplist = alloca(gidsetsize * sizeof(gid_t));
10758 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
10759 if (!target_grouplist) {
10760 return -TARGET_EFAULT;
10762 for(i = 0;i < gidsetsize; i++)
10763 grouplist[i] = tswap32(target_grouplist[i]);
10764 unlock_user(target_grouplist, arg2, 0);
10765 return get_errno(setgroups(gidsetsize, grouplist));
10768 #ifdef TARGET_NR_fchown32
10769 case TARGET_NR_fchown32:
10770 return get_errno(fchown(arg1, arg2, arg3));
10772 #ifdef TARGET_NR_setresuid32
10773 case TARGET_NR_setresuid32:
10774 return get_errno(sys_setresuid(arg1, arg2, arg3));
10776 #ifdef TARGET_NR_getresuid32
10777 case TARGET_NR_getresuid32:
10779 uid_t ruid, euid, suid;
10780 ret = get_errno(getresuid(&ruid, &euid, &suid));
10781 if (!is_error(ret)) {
10782 if (put_user_u32(ruid, arg1)
10783 || put_user_u32(euid, arg2)
10784 || put_user_u32(suid, arg3))
10785 return -TARGET_EFAULT;
10790 #ifdef TARGET_NR_setresgid32
10791 case TARGET_NR_setresgid32:
10792 return get_errno(sys_setresgid(arg1, arg2, arg3));
10794 #ifdef TARGET_NR_getresgid32
10795 case TARGET_NR_getresgid32:
10797 gid_t rgid, egid, sgid;
10798 ret = get_errno(getresgid(&rgid, &egid, &sgid));
10799 if (!is_error(ret)) {
10800 if (put_user_u32(rgid, arg1)
10801 || put_user_u32(egid, arg2)
10802 || put_user_u32(sgid, arg3))
10803 return -TARGET_EFAULT;
10808 #ifdef TARGET_NR_chown32
10809 case TARGET_NR_chown32:
10810 if (!(p = lock_user_string(arg1)))
10811 return -TARGET_EFAULT;
10812 ret = get_errno(chown(p, arg2, arg3));
10813 unlock_user(p, arg1, 0);
10816 #ifdef TARGET_NR_setuid32
10817 case TARGET_NR_setuid32:
10818 return get_errno(sys_setuid(arg1));
10820 #ifdef TARGET_NR_setgid32
10821 case TARGET_NR_setgid32:
10822 return get_errno(sys_setgid(arg1));
10824 #ifdef TARGET_NR_setfsuid32
10825 case TARGET_NR_setfsuid32:
10826 return get_errno(setfsuid(arg1));
10828 #ifdef TARGET_NR_setfsgid32
10829 case TARGET_NR_setfsgid32:
10830 return get_errno(setfsgid(arg1));
10832 #ifdef TARGET_NR_mincore
10833 case TARGET_NR_mincore:
10835 void *a = lock_user(VERIFY_READ, arg1, arg2, 0);
10837 return -TARGET_ENOMEM;
10839 p = lock_user_string(arg3);
10841 ret = -TARGET_EFAULT;
10843 ret = get_errno(mincore(a, arg2, p));
10844 unlock_user(p, arg3, ret);
10846 unlock_user(a, arg1, 0);
10850 #ifdef TARGET_NR_arm_fadvise64_64
10851 case TARGET_NR_arm_fadvise64_64:
10852 /* arm_fadvise64_64 looks like fadvise64_64 but
10853 * with different argument order: fd, advice, offset, len
10854 * rather than the usual fd, offset, len, advice.
10855 * Note that offset and len are both 64-bit so appear as
10856 * pairs of 32-bit registers.
10858 ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
10859 target_offset64(arg5, arg6), arg2);
10860 return -host_to_target_errno(ret);
10863 #if TARGET_ABI_BITS == 32
10865 #ifdef TARGET_NR_fadvise64_64
10866 case TARGET_NR_fadvise64_64:
10867 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
10868 /* 6 args: fd, advice, offset (high, low), len (high, low) */
10876 /* 6 args: fd, offset (high, low), len (high, low), advice */
10877 if (regpairs_aligned(cpu_env, num)) {
10878 /* offset is in (3,4), len in (5,6) and advice in 7 */
10886 ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
10887 target_offset64(arg4, arg5), arg6);
10888 return -host_to_target_errno(ret);
10891 #ifdef TARGET_NR_fadvise64
10892 case TARGET_NR_fadvise64:
10893 /* 5 args: fd, offset (high, low), len, advice */
10894 if (regpairs_aligned(cpu_env, num)) {
10895 /* offset is in (3,4), len in 5 and advice in 6 */
10901 ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
10902 return -host_to_target_errno(ret);
10905 #else /* not a 32-bit ABI */
10906 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
10907 #ifdef TARGET_NR_fadvise64_64
10908 case TARGET_NR_fadvise64_64:
10910 #ifdef TARGET_NR_fadvise64
10911 case TARGET_NR_fadvise64:
10913 #ifdef TARGET_S390X
10915 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
10916 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
10917 case 6: arg4 = POSIX_FADV_DONTNEED; break;
10918 case 7: arg4 = POSIX_FADV_NOREUSE; break;
10922 return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
10924 #endif /* end of 64-bit ABI fadvise handling */
10926 #ifdef TARGET_NR_madvise
10927 case TARGET_NR_madvise:
10928 /* A straight passthrough may not be safe because qemu sometimes
10929 turns private file-backed mappings into anonymous mappings.
10930 This will break MADV_DONTNEED.
10931 This is a hint, so ignoring and returning success is ok. */
10934 #if TARGET_ABI_BITS == 32
10935 case TARGET_NR_fcntl64:
10939 from_flock64_fn *copyfrom = copy_from_user_flock64;
10940 to_flock64_fn *copyto = copy_to_user_flock64;
10943 if (!((CPUARMState *)cpu_env)->eabi) {
10944 copyfrom = copy_from_user_oabi_flock64;
10945 copyto = copy_to_user_oabi_flock64;
10949 cmd = target_to_host_fcntl_cmd(arg2);
10950 if (cmd == -TARGET_EINVAL) {
10955 case TARGET_F_GETLK64:
10956 ret = copyfrom(&fl, arg3);
10960 ret = get_errno(safe_fcntl(arg1, cmd, &fl));
10962 ret = copyto(arg3, &fl);
10966 case TARGET_F_SETLK64:
10967 case TARGET_F_SETLKW64:
10968 ret = copyfrom(&fl, arg3);
10972 ret = get_errno(safe_fcntl(arg1, cmd, &fl));
10975 ret = do_fcntl(arg1, arg2, arg3);
10981 #ifdef TARGET_NR_cacheflush
10982 case TARGET_NR_cacheflush:
10983 /* self-modifying code is handled automatically, so nothing needed */
10986 #ifdef TARGET_NR_getpagesize
10987 case TARGET_NR_getpagesize:
10988 return TARGET_PAGE_SIZE;
10990 case TARGET_NR_gettid:
10991 return get_errno(sys_gettid());
10992 #ifdef TARGET_NR_readahead
10993 case TARGET_NR_readahead:
10994 #if TARGET_ABI_BITS == 32
10995 if (regpairs_aligned(cpu_env, num)) {
11000 ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
11002 ret = get_errno(readahead(arg1, arg2, arg3));
11007 #ifdef TARGET_NR_setxattr
11008 case TARGET_NR_listxattr:
11009 case TARGET_NR_llistxattr:
11013 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11015 return -TARGET_EFAULT;
11018 p = lock_user_string(arg1);
11020 if (num == TARGET_NR_listxattr) {
11021 ret = get_errno(listxattr(p, b, arg3));
11023 ret = get_errno(llistxattr(p, b, arg3));
11026 ret = -TARGET_EFAULT;
11028 unlock_user(p, arg1, 0);
11029 unlock_user(b, arg2, arg3);
11032 case TARGET_NR_flistxattr:
11036 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11038 return -TARGET_EFAULT;
11041 ret = get_errno(flistxattr(arg1, b, arg3));
11042 unlock_user(b, arg2, arg3);
11045 case TARGET_NR_setxattr:
11046 case TARGET_NR_lsetxattr:
11048 void *p, *n, *v = 0;
11050 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11052 return -TARGET_EFAULT;
11055 p = lock_user_string(arg1);
11056 n = lock_user_string(arg2);
11058 if (num == TARGET_NR_setxattr) {
11059 ret = get_errno(setxattr(p, n, v, arg4, arg5));
11061 ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
11064 ret = -TARGET_EFAULT;
11066 unlock_user(p, arg1, 0);
11067 unlock_user(n, arg2, 0);
11068 unlock_user(v, arg3, 0);
11071 case TARGET_NR_fsetxattr:
11075 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11077 return -TARGET_EFAULT;
11080 n = lock_user_string(arg2);
11082 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
11084 ret = -TARGET_EFAULT;
11086 unlock_user(n, arg2, 0);
11087 unlock_user(v, arg3, 0);
11090 case TARGET_NR_getxattr:
11091 case TARGET_NR_lgetxattr:
11093 void *p, *n, *v = 0;
11095 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11097 return -TARGET_EFAULT;
11100 p = lock_user_string(arg1);
11101 n = lock_user_string(arg2);
11103 if (num == TARGET_NR_getxattr) {
11104 ret = get_errno(getxattr(p, n, v, arg4));
11106 ret = get_errno(lgetxattr(p, n, v, arg4));
11109 ret = -TARGET_EFAULT;
11111 unlock_user(p, arg1, 0);
11112 unlock_user(n, arg2, 0);
11113 unlock_user(v, arg3, arg4);
11116 case TARGET_NR_fgetxattr:
11120 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11122 return -TARGET_EFAULT;
11125 n = lock_user_string(arg2);
11127 ret = get_errno(fgetxattr(arg1, n, v, arg4));
11129 ret = -TARGET_EFAULT;
11131 unlock_user(n, arg2, 0);
11132 unlock_user(v, arg3, arg4);
11135 case TARGET_NR_removexattr:
11136 case TARGET_NR_lremovexattr:
11139 p = lock_user_string(arg1);
11140 n = lock_user_string(arg2);
11142 if (num == TARGET_NR_removexattr) {
11143 ret = get_errno(removexattr(p, n));
11145 ret = get_errno(lremovexattr(p, n));
11148 ret = -TARGET_EFAULT;
11150 unlock_user(p, arg1, 0);
11151 unlock_user(n, arg2, 0);
11154 case TARGET_NR_fremovexattr:
11157 n = lock_user_string(arg2);
11159 ret = get_errno(fremovexattr(arg1, n));
11161 ret = -TARGET_EFAULT;
11163 unlock_user(n, arg2, 0);
11167 #endif /* CONFIG_ATTR */
11168 #ifdef TARGET_NR_set_thread_area
11169 case TARGET_NR_set_thread_area:
11170 #if defined(TARGET_MIPS)
11171 ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
11173 #elif defined(TARGET_CRIS)
11175 ret = -TARGET_EINVAL;
11177 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
11181 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
11182 return do_set_thread_area(cpu_env, arg1);
11183 #elif defined(TARGET_M68K)
11185 TaskState *ts = cpu->opaque;
11186 ts->tp_value = arg1;
11190 return -TARGET_ENOSYS;
11193 #ifdef TARGET_NR_get_thread_area
11194 case TARGET_NR_get_thread_area:
11195 #if defined(TARGET_I386) && defined(TARGET_ABI32)
11196 return do_get_thread_area(cpu_env, arg1);
11197 #elif defined(TARGET_M68K)
11199 TaskState *ts = cpu->opaque;
11200 return ts->tp_value;
11203 return -TARGET_ENOSYS;
11206 #ifdef TARGET_NR_getdomainname
11207 case TARGET_NR_getdomainname:
11208 return -TARGET_ENOSYS;
11211 #ifdef TARGET_NR_clock_settime
11212 case TARGET_NR_clock_settime:
11214 struct timespec ts;
11216 ret = target_to_host_timespec(&ts, arg2);
11217 if (!is_error(ret)) {
11218 ret = get_errno(clock_settime(arg1, &ts));
11223 #ifdef TARGET_NR_clock_gettime
11224 case TARGET_NR_clock_gettime:
11226 struct timespec ts;
11227 ret = get_errno(clock_gettime(arg1, &ts));
11228 if (!is_error(ret)) {
11229 ret = host_to_target_timespec(arg2, &ts);
11234 #ifdef TARGET_NR_clock_getres
11235 case TARGET_NR_clock_getres:
11237 struct timespec ts;
11238 ret = get_errno(clock_getres(arg1, &ts));
11239 if (!is_error(ret)) {
11240 host_to_target_timespec(arg2, &ts);
11245 #ifdef TARGET_NR_clock_nanosleep
11246 case TARGET_NR_clock_nanosleep:
11248 struct timespec ts;
11249 target_to_host_timespec(&ts, arg3);
11250 ret = get_errno(safe_clock_nanosleep(arg1, arg2,
11251 &ts, arg4 ? &ts : NULL));
11253 host_to_target_timespec(arg4, &ts);
11255 #if defined(TARGET_PPC)
11256 /* clock_nanosleep is odd in that it returns positive errno values.
11257 * On PPC, CR0 bit 3 should be set in such a situation. */
11258 if (ret && ret != -TARGET_ERESTARTSYS) {
11259 ((CPUPPCState *)cpu_env)->crf[0] |= 1;
11266 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
11267 case TARGET_NR_set_tid_address:
11268 return get_errno(set_tid_address((int *)g2h(arg1)));
11271 case TARGET_NR_tkill:
11272 return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
11274 case TARGET_NR_tgkill:
11275 return get_errno(safe_tgkill((int)arg1, (int)arg2,
11276 target_to_host_signal(arg3)));
11278 #ifdef TARGET_NR_set_robust_list
11279 case TARGET_NR_set_robust_list:
11280 case TARGET_NR_get_robust_list:
11281 /* The ABI for supporting robust futexes has userspace pass
11282 * the kernel a pointer to a linked list which is updated by
11283 * userspace after the syscall; the list is walked by the kernel
11284 * when the thread exits. Since the linked list in QEMU guest
11285 * memory isn't a valid linked list for the host and we have
11286 * no way to reliably intercept the thread-death event, we can't
11287 * support these. Silently return ENOSYS so that guest userspace
11288 * falls back to a non-robust futex implementation (which should
11289 * be OK except in the corner case of the guest crashing while
11290 * holding a mutex that is shared with another process via
11293 return -TARGET_ENOSYS;
11296 #if defined(TARGET_NR_utimensat)
11297 case TARGET_NR_utimensat:
11299 struct timespec *tsp, ts[2];
11303 target_to_host_timespec(ts, arg3);
11304 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
11308 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
11310 if (!(p = lock_user_string(arg2))) {
11311 return -TARGET_EFAULT;
11313 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
11314 unlock_user(p, arg2, 0);
11319 case TARGET_NR_futex:
11320 return do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
11321 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
11322 case TARGET_NR_inotify_init:
11323 ret = get_errno(sys_inotify_init());
11325 fd_trans_register(ret, &target_inotify_trans);
11329 #ifdef CONFIG_INOTIFY1
11330 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
11331 case TARGET_NR_inotify_init1:
11332 ret = get_errno(sys_inotify_init1(target_to_host_bitmask(arg1,
11333 fcntl_flags_tbl)));
11335 fd_trans_register(ret, &target_inotify_trans);
11340 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
11341 case TARGET_NR_inotify_add_watch:
11342 p = lock_user_string(arg2);
11343 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
11344 unlock_user(p, arg2, 0);
11347 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
11348 case TARGET_NR_inotify_rm_watch:
11349 return get_errno(sys_inotify_rm_watch(arg1, arg2));
11352 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
11353 case TARGET_NR_mq_open:
11355 struct mq_attr posix_mq_attr;
11356 struct mq_attr *pposix_mq_attr;
11359 host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
11360 pposix_mq_attr = NULL;
11362 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
11363 return -TARGET_EFAULT;
11365 pposix_mq_attr = &posix_mq_attr;
11367 p = lock_user_string(arg1 - 1);
11369 return -TARGET_EFAULT;
11371 ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
11372 unlock_user (p, arg1, 0);
11376 case TARGET_NR_mq_unlink:
11377 p = lock_user_string(arg1 - 1);
11379 return -TARGET_EFAULT;
11381 ret = get_errno(mq_unlink(p));
11382 unlock_user (p, arg1, 0);
11385 case TARGET_NR_mq_timedsend:
11387 struct timespec ts;
11389 p = lock_user (VERIFY_READ, arg2, arg3, 1);
11391 target_to_host_timespec(&ts, arg5);
11392 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
11393 host_to_target_timespec(arg5, &ts);
11395 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
11397 unlock_user (p, arg2, arg3);
11401 case TARGET_NR_mq_timedreceive:
11403 struct timespec ts;
11406 p = lock_user (VERIFY_READ, arg2, arg3, 1);
11408 target_to_host_timespec(&ts, arg5);
11409 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
11411 host_to_target_timespec(arg5, &ts);
11413 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
11416 unlock_user (p, arg2, arg3);
11418 put_user_u32(prio, arg4);
11422 /* Not implemented for now... */
11423 /* case TARGET_NR_mq_notify: */
11426 case TARGET_NR_mq_getsetattr:
11428 struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
11431 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
11432 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
11433 &posix_mq_attr_out));
11434 } else if (arg3 != 0) {
11435 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
11437 if (ret == 0 && arg3 != 0) {
11438 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
11444 #ifdef CONFIG_SPLICE
11445 #ifdef TARGET_NR_tee
11446 case TARGET_NR_tee:
11448 ret = get_errno(tee(arg1,arg2,arg3,arg4));
11452 #ifdef TARGET_NR_splice
11453 case TARGET_NR_splice:
11455 loff_t loff_in, loff_out;
11456 loff_t *ploff_in = NULL, *ploff_out = NULL;
11458 if (get_user_u64(loff_in, arg2)) {
11459 return -TARGET_EFAULT;
11461 ploff_in = &loff_in;
11464 if (get_user_u64(loff_out, arg4)) {
11465 return -TARGET_EFAULT;
11467 ploff_out = &loff_out;
11469 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
11471 if (put_user_u64(loff_in, arg2)) {
11472 return -TARGET_EFAULT;
11476 if (put_user_u64(loff_out, arg4)) {
11477 return -TARGET_EFAULT;
11483 #ifdef TARGET_NR_vmsplice
11484 case TARGET_NR_vmsplice:
11486 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
11488 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
11489 unlock_iovec(vec, arg2, arg3, 0);
11491 ret = -host_to_target_errno(errno);
11496 #endif /* CONFIG_SPLICE */
11497 #ifdef CONFIG_EVENTFD
11498 #if defined(TARGET_NR_eventfd)
11499 case TARGET_NR_eventfd:
11500 ret = get_errno(eventfd(arg1, 0));
11502 fd_trans_register(ret, &target_eventfd_trans);
11506 #if defined(TARGET_NR_eventfd2)
11507 case TARGET_NR_eventfd2:
11509 int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
11510 if (arg2 & TARGET_O_NONBLOCK) {
11511 host_flags |= O_NONBLOCK;
11513 if (arg2 & TARGET_O_CLOEXEC) {
11514 host_flags |= O_CLOEXEC;
11516 ret = get_errno(eventfd(arg1, host_flags));
11518 fd_trans_register(ret, &target_eventfd_trans);
11523 #endif /* CONFIG_EVENTFD */
11524 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
11525 case TARGET_NR_fallocate:
11526 #if TARGET_ABI_BITS == 32
11527 ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
11528 target_offset64(arg5, arg6)));
11530 ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
11534 #if defined(CONFIG_SYNC_FILE_RANGE)
11535 #if defined(TARGET_NR_sync_file_range)
11536 case TARGET_NR_sync_file_range:
11537 #if TARGET_ABI_BITS == 32
11538 #if defined(TARGET_MIPS)
11539 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
11540 target_offset64(arg5, arg6), arg7));
11542 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
11543 target_offset64(arg4, arg5), arg6));
11544 #endif /* !TARGET_MIPS */
11546 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
11550 #if defined(TARGET_NR_sync_file_range2)
11551 case TARGET_NR_sync_file_range2:
11552 /* This is like sync_file_range but the arguments are reordered */
11553 #if TARGET_ABI_BITS == 32
11554 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
11555 target_offset64(arg5, arg6), arg2));
11557 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
11562 #if defined(TARGET_NR_signalfd4)
11563 case TARGET_NR_signalfd4:
11564 return do_signalfd4(arg1, arg2, arg4);
11566 #if defined(TARGET_NR_signalfd)
11567 case TARGET_NR_signalfd:
11568 return do_signalfd4(arg1, arg2, 0);
11570 #if defined(CONFIG_EPOLL)
11571 #if defined(TARGET_NR_epoll_create)
11572 case TARGET_NR_epoll_create:
11573 return get_errno(epoll_create(arg1));
11575 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
11576 case TARGET_NR_epoll_create1:
11577 return get_errno(epoll_create1(arg1));
11579 #if defined(TARGET_NR_epoll_ctl)
11580 case TARGET_NR_epoll_ctl:
11582 struct epoll_event ep;
11583 struct epoll_event *epp = 0;
11585 struct target_epoll_event *target_ep;
11586 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
11587 return -TARGET_EFAULT;
11589 ep.events = tswap32(target_ep->events);
11590 /* The epoll_data_t union is just opaque data to the kernel,
11591 * so we transfer all 64 bits across and need not worry what
11592 * actual data type it is.
11594 ep.data.u64 = tswap64(target_ep->data.u64);
11595 unlock_user_struct(target_ep, arg4, 0);
11598 return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
11602 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
11603 #if defined(TARGET_NR_epoll_wait)
11604 case TARGET_NR_epoll_wait:
11606 #if defined(TARGET_NR_epoll_pwait)
11607 case TARGET_NR_epoll_pwait:
11610 struct target_epoll_event *target_ep;
11611 struct epoll_event *ep;
11613 int maxevents = arg3;
11614 int timeout = arg4;
11616 if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
11617 return -TARGET_EINVAL;
11620 target_ep = lock_user(VERIFY_WRITE, arg2,
11621 maxevents * sizeof(struct target_epoll_event), 1);
11623 return -TARGET_EFAULT;
11626 ep = g_try_new(struct epoll_event, maxevents);
11628 unlock_user(target_ep, arg2, 0);
11629 return -TARGET_ENOMEM;
11633 #if defined(TARGET_NR_epoll_pwait)
11634 case TARGET_NR_epoll_pwait:
11636 target_sigset_t *target_set;
11637 sigset_t _set, *set = &_set;
11640 if (arg6 != sizeof(target_sigset_t)) {
11641 ret = -TARGET_EINVAL;
11645 target_set = lock_user(VERIFY_READ, arg5,
11646 sizeof(target_sigset_t), 1);
11648 ret = -TARGET_EFAULT;
11651 target_to_host_sigset(set, target_set);
11652 unlock_user(target_set, arg5, 0);
11657 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
11658 set, SIGSET_T_SIZE));
11662 #if defined(TARGET_NR_epoll_wait)
11663 case TARGET_NR_epoll_wait:
11664 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
11669 ret = -TARGET_ENOSYS;
11671 if (!is_error(ret)) {
11673 for (i = 0; i < ret; i++) {
11674 target_ep[i].events = tswap32(ep[i].events);
11675 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
11677 unlock_user(target_ep, arg2,
11678 ret * sizeof(struct target_epoll_event));
11680 unlock_user(target_ep, arg2, 0);
11687 #ifdef TARGET_NR_prlimit64
11688 case TARGET_NR_prlimit64:
11690 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
11691 struct target_rlimit64 *target_rnew, *target_rold;
11692 struct host_rlimit64 rnew, rold, *rnewp = 0;
11693 int resource = target_to_host_resource(arg2);
11695 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
11696 return -TARGET_EFAULT;
11698 rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
11699 rnew.rlim_max = tswap64(target_rnew->rlim_max);
11700 unlock_user_struct(target_rnew, arg3, 0);
11704 ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
11705 if (!is_error(ret) && arg4) {
11706 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
11707 return -TARGET_EFAULT;
11709 target_rold->rlim_cur = tswap64(rold.rlim_cur);
11710 target_rold->rlim_max = tswap64(rold.rlim_max);
11711 unlock_user_struct(target_rold, arg4, 1);
11716 #ifdef TARGET_NR_gethostname
11717 case TARGET_NR_gethostname:
11719 char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
11721 ret = get_errno(gethostname(name, arg2));
11722 unlock_user(name, arg1, arg2);
11724 ret = -TARGET_EFAULT;
11729 #ifdef TARGET_NR_atomic_cmpxchg_32
11730 case TARGET_NR_atomic_cmpxchg_32:
11732 /* should use start_exclusive from main.c */
11733 abi_ulong mem_value;
11734 if (get_user_u32(mem_value, arg6)) {
11735 target_siginfo_t info;
11736 info.si_signo = SIGSEGV;
11738 info.si_code = TARGET_SEGV_MAPERR;
11739 info._sifields._sigfault._addr = arg6;
11740 queue_signal((CPUArchState *)cpu_env, info.si_signo,
11741 QEMU_SI_FAULT, &info);
11745 if (mem_value == arg2)
11746 put_user_u32(arg1, arg6);
11750 #ifdef TARGET_NR_atomic_barrier
11751 case TARGET_NR_atomic_barrier:
11752 /* Like the kernel implementation and the
11753 qemu arm barrier, no-op this? */
11757 #ifdef TARGET_NR_timer_create
11758 case TARGET_NR_timer_create:
11760 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
11762 struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
11765 int timer_index = next_free_host_timer();
11767 if (timer_index < 0) {
11768 ret = -TARGET_EAGAIN;
11770 timer_t *phtimer = g_posix_timers + timer_index;
11773 phost_sevp = &host_sevp;
11774 ret = target_to_host_sigevent(phost_sevp, arg2);
11780 ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
11784 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
11785 return -TARGET_EFAULT;
11793 #ifdef TARGET_NR_timer_settime
11794 case TARGET_NR_timer_settime:
11796 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
11797 * struct itimerspec * old_value */
11798 target_timer_t timerid = get_timer_id(arg1);
11802 } else if (arg3 == 0) {
11803 ret = -TARGET_EINVAL;
11805 timer_t htimer = g_posix_timers[timerid];
11806 struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
11808 if (target_to_host_itimerspec(&hspec_new, arg3)) {
11809 return -TARGET_EFAULT;
11812 timer_settime(htimer, arg2, &hspec_new, &hspec_old));
11813 if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
11814 return -TARGET_EFAULT;
11821 #ifdef TARGET_NR_timer_gettime
11822 case TARGET_NR_timer_gettime:
11824 /* args: timer_t timerid, struct itimerspec *curr_value */
11825 target_timer_t timerid = get_timer_id(arg1);
11829 } else if (!arg2) {
11830 ret = -TARGET_EFAULT;
11832 timer_t htimer = g_posix_timers[timerid];
11833 struct itimerspec hspec;
11834 ret = get_errno(timer_gettime(htimer, &hspec));
11836 if (host_to_target_itimerspec(arg2, &hspec)) {
11837 ret = -TARGET_EFAULT;
11844 #ifdef TARGET_NR_timer_getoverrun
11845 case TARGET_NR_timer_getoverrun:
11847 /* args: timer_t timerid */
11848 target_timer_t timerid = get_timer_id(arg1);
11853 timer_t htimer = g_posix_timers[timerid];
11854 ret = get_errno(timer_getoverrun(htimer));
11860 #ifdef TARGET_NR_timer_delete
11861 case TARGET_NR_timer_delete:
11863 /* args: timer_t timerid */
11864 target_timer_t timerid = get_timer_id(arg1);
11869 timer_t htimer = g_posix_timers[timerid];
11870 ret = get_errno(timer_delete(htimer));
11871 g_posix_timers[timerid] = 0;
11877 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
11878 case TARGET_NR_timerfd_create:
11879 return get_errno(timerfd_create(arg1,
11880 target_to_host_bitmask(arg2, fcntl_flags_tbl)));
11883 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
11884 case TARGET_NR_timerfd_gettime:
11886 struct itimerspec its_curr;
11888 ret = get_errno(timerfd_gettime(arg1, &its_curr));
11890 if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
11891 return -TARGET_EFAULT;
11897 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
11898 case TARGET_NR_timerfd_settime:
11900 struct itimerspec its_new, its_old, *p_new;
11903 if (target_to_host_itimerspec(&its_new, arg3)) {
11904 return -TARGET_EFAULT;
11911 ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
11913 if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
11914 return -TARGET_EFAULT;
11920 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
11921 case TARGET_NR_ioprio_get:
11922 return get_errno(ioprio_get(arg1, arg2));
11925 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
11926 case TARGET_NR_ioprio_set:
11927 return get_errno(ioprio_set(arg1, arg2, arg3));
11930 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
11931 case TARGET_NR_setns:
11932 return get_errno(setns(arg1, arg2));
11934 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
11935 case TARGET_NR_unshare:
11936 return get_errno(unshare(arg1));
11938 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
11939 case TARGET_NR_kcmp:
11940 return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
11942 #ifdef TARGET_NR_swapcontext
11943 case TARGET_NR_swapcontext:
11944 /* PowerPC specific. */
11945 return do_swapcontext(cpu_env, arg1, arg2, arg3);
11947 #ifdef TARGET_NR_memfd_create
11948 case TARGET_NR_memfd_create:
11949 p = lock_user_string(arg1);
11951 return -TARGET_EFAULT;
11953 ret = get_errno(memfd_create(p, arg2));
11954 fd_trans_unregister(ret);
11955 unlock_user(p, arg1, 0);
11960 qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
11961 return -TARGET_ENOSYS;
11966 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
11967 abi_long arg2, abi_long arg3, abi_long arg4,
11968 abi_long arg5, abi_long arg6, abi_long arg7,
11971 CPUState *cpu = env_cpu(cpu_env);
11974 #ifdef DEBUG_ERESTARTSYS
11975 /* Debug-only code for exercising the syscall-restart code paths
11976 * in the per-architecture cpu main loops: restart every syscall
11977 * the guest makes once before letting it through.
11983 return -TARGET_ERESTARTSYS;
11988 record_syscall_start(cpu, num, arg1,
11989 arg2, arg3, arg4, arg5, arg6, arg7, arg8);
11991 if (unlikely(do_strace)) {
11992 print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
11993 ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
11994 arg5, arg6, arg7, arg8);
11995 print_syscall_ret(num, ret);
11997 ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
11998 arg5, arg6, arg7, arg8);
12001 record_syscall_return(cpu, num, ret);