4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
29 #include <sys/mount.h>
31 #include <sys/fsuid.h>
32 #include <sys/personality.h>
33 #include <sys/prctl.h>
34 #include <sys/resource.h>
36 #include <linux/capability.h>
38 #include <sys/timex.h>
39 #include <sys/socket.h>
43 #include <sys/times.h>
46 #include <sys/statfs.h>
48 #include <sys/sysinfo.h>
49 #include <sys/signalfd.h>
50 //#include <sys/user.h>
51 #include <netinet/ip.h>
52 #include <netinet/tcp.h>
53 #include <linux/wireless.h>
54 #include <linux/icmp.h>
55 #include <linux/icmpv6.h>
56 #include <linux/errqueue.h>
57 #include <linux/random.h>
58 #include "qemu-common.h"
60 #include <sys/timerfd.h>
66 #include <sys/eventfd.h>
69 #include <sys/epoll.h>
72 #include "qemu/xattr.h"
74 #ifdef CONFIG_SENDFILE
75 #include <sys/sendfile.h>
78 #define termios host_termios
79 #define winsize host_winsize
80 #define termio host_termio
81 #define sgttyb host_sgttyb /* same as target */
82 #define tchars host_tchars /* same as target */
83 #define ltchars host_ltchars /* same as target */
85 #include <linux/termios.h>
86 #include <linux/unistd.h>
87 #include <linux/cdrom.h>
88 #include <linux/hdreg.h>
89 #include <linux/soundcard.h>
91 #include <linux/mtio.h>
93 #if defined(CONFIG_FIEMAP)
94 #include <linux/fiemap.h>
97 #if defined(CONFIG_USBFS)
98 #include <linux/usbdevice_fs.h>
99 #include <linux/usb/ch9.h>
101 #include <linux/vt.h>
102 #include <linux/dm-ioctl.h>
103 #include <linux/reboot.h>
104 #include <linux/route.h>
105 #include <linux/filter.h>
106 #include <linux/blkpg.h>
107 #include <netpacket/packet.h>
108 #include <linux/netlink.h>
109 #include "linux_loop.h"
113 #include "fd-trans.h"
116 #define CLONE_IO 0x80000000 /* Clone io context */
119 /* We can't directly call the host clone syscall, because this will
120 * badly confuse libc (breaking mutexes, for example). So we must
121 * divide clone flags into:
122 * * flag combinations that look like pthread_create()
123 * * flag combinations that look like fork()
124 * * flags we can implement within QEMU itself
125 * * flags we can't support and will return an error for
127 /* For thread creation, all these flags must be present; for
128 * fork, none must be present.
130 #define CLONE_THREAD_FLAGS \
131 (CLONE_VM | CLONE_FS | CLONE_FILES | \
132 CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
134 /* These flags are ignored:
135 * CLONE_DETACHED is now ignored by the kernel;
136 * CLONE_IO is just an optimisation hint to the I/O scheduler
138 #define CLONE_IGNORED_FLAGS \
139 (CLONE_DETACHED | CLONE_IO)
141 /* Flags for fork which we can implement within QEMU itself */
142 #define CLONE_OPTIONAL_FORK_FLAGS \
143 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
144 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
146 /* Flags for thread creation which we can implement within QEMU itself */
147 #define CLONE_OPTIONAL_THREAD_FLAGS \
148 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
149 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
151 #define CLONE_INVALID_FORK_FLAGS \
152 (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
154 #define CLONE_INVALID_THREAD_FLAGS \
155 (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS | \
156 CLONE_IGNORED_FLAGS))
158 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
159 * have almost all been allocated. We cannot support any of
160 * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
161 * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
162 * The checks against the invalid thread masks above will catch these.
163 * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
166 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
167 * once. This exercises the codepaths for restart.
169 //#define DEBUG_ERESTARTSYS
171 //#include <linux/msdos_fs.h>
172 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
173 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
183 #define _syscall0(type,name) \
184 static type name (void) \
186 return syscall(__NR_##name); \
189 #define _syscall1(type,name,type1,arg1) \
190 static type name (type1 arg1) \
192 return syscall(__NR_##name, arg1); \
195 #define _syscall2(type,name,type1,arg1,type2,arg2) \
196 static type name (type1 arg1,type2 arg2) \
198 return syscall(__NR_##name, arg1, arg2); \
201 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
202 static type name (type1 arg1,type2 arg2,type3 arg3) \
204 return syscall(__NR_##name, arg1, arg2, arg3); \
207 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
208 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
210 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
213 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
215 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
217 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
221 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
222 type5,arg5,type6,arg6) \
223 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
226 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
230 #define __NR_sys_uname __NR_uname
231 #define __NR_sys_getcwd1 __NR_getcwd
232 #define __NR_sys_getdents __NR_getdents
233 #define __NR_sys_getdents64 __NR_getdents64
234 #define __NR_sys_getpriority __NR_getpriority
235 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
236 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
237 #define __NR_sys_syslog __NR_syslog
238 #define __NR_sys_futex __NR_futex
239 #define __NR_sys_inotify_init __NR_inotify_init
240 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
241 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
243 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
244 #define __NR__llseek __NR_lseek
247 /* Newer kernel ports have llseek() instead of _llseek() */
248 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
249 #define TARGET_NR__llseek TARGET_NR_llseek
253 _syscall0(int, gettid)
255 /* This is a replacement for the host gettid() and must return a host
257 static int gettid(void) {
262 /* For the 64-bit guest on 32-bit host case we must emulate
263 * getdents using getdents64, because otherwise the host
264 * might hand us back more dirent records than we can fit
265 * into the guest buffer after structure format conversion.
266 * Otherwise we emulate getdents with getdents if the host has it.
268 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
269 #define EMULATE_GETDENTS_WITH_GETDENTS
272 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
273 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
275 #if (defined(TARGET_NR_getdents) && \
276 !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
277 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
278 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
280 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
281 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
282 loff_t *, res, uint, wh);
284 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
285 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
287 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
288 #ifdef __NR_exit_group
289 _syscall1(int,exit_group,int,error_code)
291 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
292 _syscall1(int,set_tid_address,int *,tidptr)
294 #if defined(TARGET_NR_futex) && defined(__NR_futex)
295 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
296 const struct timespec *,timeout,int *,uaddr2,int,val3)
298 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
299 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
300 unsigned long *, user_mask_ptr);
301 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
302 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
303 unsigned long *, user_mask_ptr);
304 #define __NR_sys_getcpu __NR_getcpu
305 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
306 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
308 _syscall2(int, capget, struct __user_cap_header_struct *, header,
309 struct __user_cap_data_struct *, data);
310 _syscall2(int, capset, struct __user_cap_header_struct *, header,
311 struct __user_cap_data_struct *, data);
312 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
313 _syscall2(int, ioprio_get, int, which, int, who)
315 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
316 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
318 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
319 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
322 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
323 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
324 unsigned long, idx1, unsigned long, idx2)
327 static bitmask_transtbl fcntl_flags_tbl[] = {
328 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
329 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, },
330 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, },
331 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, },
332 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, },
333 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, },
334 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, },
335 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, },
336 { TARGET_O_SYNC, TARGET_O_DSYNC, O_SYNC, O_DSYNC, },
337 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, },
338 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, },
339 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
340 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, },
341 #if defined(O_DIRECT)
342 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, },
344 #if defined(O_NOATIME)
345 { TARGET_O_NOATIME, TARGET_O_NOATIME, O_NOATIME, O_NOATIME },
347 #if defined(O_CLOEXEC)
348 { TARGET_O_CLOEXEC, TARGET_O_CLOEXEC, O_CLOEXEC, O_CLOEXEC },
351 { TARGET_O_PATH, TARGET_O_PATH, O_PATH, O_PATH },
353 #if defined(O_TMPFILE)
354 { TARGET_O_TMPFILE, TARGET_O_TMPFILE, O_TMPFILE, O_TMPFILE },
356 /* Don't terminate the list prematurely on 64-bit host+guest. */
357 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
358 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
363 static int sys_getcwd1(char *buf, size_t size)
365 if (getcwd(buf, size) == NULL) {
366 /* getcwd() sets errno */
369 return strlen(buf)+1;
372 #ifdef TARGET_NR_utimensat
373 #if defined(__NR_utimensat)
374 #define __NR_sys_utimensat __NR_utimensat
375 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
376 const struct timespec *,tsp,int,flags)
378 static int sys_utimensat(int dirfd, const char *pathname,
379 const struct timespec times[2], int flags)
385 #endif /* TARGET_NR_utimensat */
387 #ifdef TARGET_NR_renameat2
388 #if defined(__NR_renameat2)
389 #define __NR_sys_renameat2 __NR_renameat2
390 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
391 const char *, new, unsigned int, flags)
393 static int sys_renameat2(int oldfd, const char *old,
394 int newfd, const char *new, int flags)
397 return renameat(oldfd, old, newfd, new);
403 #endif /* TARGET_NR_renameat2 */
405 #ifdef CONFIG_INOTIFY
406 #include <sys/inotify.h>
408 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
409 static int sys_inotify_init(void)
411 return (inotify_init());
414 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
415 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
417 return (inotify_add_watch(fd, pathname, mask));
420 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
421 static int sys_inotify_rm_watch(int fd, int32_t wd)
423 return (inotify_rm_watch(fd, wd));
426 #ifdef CONFIG_INOTIFY1
427 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
428 static int sys_inotify_init1(int flags)
430 return (inotify_init1(flags));
435 /* Userspace can usually survive runtime without inotify */
436 #undef TARGET_NR_inotify_init
437 #undef TARGET_NR_inotify_init1
438 #undef TARGET_NR_inotify_add_watch
439 #undef TARGET_NR_inotify_rm_watch
440 #endif /* CONFIG_INOTIFY */
442 #if defined(TARGET_NR_prlimit64)
443 #ifndef __NR_prlimit64
444 # define __NR_prlimit64 -1
446 #define __NR_sys_prlimit64 __NR_prlimit64
447 /* The glibc rlimit structure may not be that used by the underlying syscall */
448 struct host_rlimit64 {
452 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
453 const struct host_rlimit64 *, new_limit,
454 struct host_rlimit64 *, old_limit)
458 #if defined(TARGET_NR_timer_create)
459 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
460 static timer_t g_posix_timers[32] = { 0, } ;
462 static inline int next_free_host_timer(void)
465 /* FIXME: Does finding the next free slot require a lock? */
466 for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
467 if (g_posix_timers[k] == 0) {
468 g_posix_timers[k] = (timer_t) 1;
476 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
478 static inline int regpairs_aligned(void *cpu_env, int num)
480 return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
482 #elif defined(TARGET_MIPS) && (TARGET_ABI_BITS == 32)
483 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
484 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
485 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
486 * of registers which translates to the same as ARM/MIPS, because we start with
488 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
489 #elif defined(TARGET_SH4)
490 /* SH4 doesn't align register pairs, except for p{read,write}64 */
491 static inline int regpairs_aligned(void *cpu_env, int num)
494 case TARGET_NR_pread64:
495 case TARGET_NR_pwrite64:
502 #elif defined(TARGET_XTENSA)
503 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
505 static inline int regpairs_aligned(void *cpu_env, int num) { return 0; }
508 #define ERRNO_TABLE_SIZE 1200
510 /* target_to_host_errno_table[] is initialized from
511 * host_to_target_errno_table[] in syscall_init(). */
512 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
516 * This list is the union of errno values overridden in asm-<arch>/errno.h
517 * minus the errnos that are not actually generic to all archs.
519 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
520 [EAGAIN] = TARGET_EAGAIN,
521 [EIDRM] = TARGET_EIDRM,
522 [ECHRNG] = TARGET_ECHRNG,
523 [EL2NSYNC] = TARGET_EL2NSYNC,
524 [EL3HLT] = TARGET_EL3HLT,
525 [EL3RST] = TARGET_EL3RST,
526 [ELNRNG] = TARGET_ELNRNG,
527 [EUNATCH] = TARGET_EUNATCH,
528 [ENOCSI] = TARGET_ENOCSI,
529 [EL2HLT] = TARGET_EL2HLT,
530 [EDEADLK] = TARGET_EDEADLK,
531 [ENOLCK] = TARGET_ENOLCK,
532 [EBADE] = TARGET_EBADE,
533 [EBADR] = TARGET_EBADR,
534 [EXFULL] = TARGET_EXFULL,
535 [ENOANO] = TARGET_ENOANO,
536 [EBADRQC] = TARGET_EBADRQC,
537 [EBADSLT] = TARGET_EBADSLT,
538 [EBFONT] = TARGET_EBFONT,
539 [ENOSTR] = TARGET_ENOSTR,
540 [ENODATA] = TARGET_ENODATA,
541 [ETIME] = TARGET_ETIME,
542 [ENOSR] = TARGET_ENOSR,
543 [ENONET] = TARGET_ENONET,
544 [ENOPKG] = TARGET_ENOPKG,
545 [EREMOTE] = TARGET_EREMOTE,
546 [ENOLINK] = TARGET_ENOLINK,
547 [EADV] = TARGET_EADV,
548 [ESRMNT] = TARGET_ESRMNT,
549 [ECOMM] = TARGET_ECOMM,
550 [EPROTO] = TARGET_EPROTO,
551 [EDOTDOT] = TARGET_EDOTDOT,
552 [EMULTIHOP] = TARGET_EMULTIHOP,
553 [EBADMSG] = TARGET_EBADMSG,
554 [ENAMETOOLONG] = TARGET_ENAMETOOLONG,
555 [EOVERFLOW] = TARGET_EOVERFLOW,
556 [ENOTUNIQ] = TARGET_ENOTUNIQ,
557 [EBADFD] = TARGET_EBADFD,
558 [EREMCHG] = TARGET_EREMCHG,
559 [ELIBACC] = TARGET_ELIBACC,
560 [ELIBBAD] = TARGET_ELIBBAD,
561 [ELIBSCN] = TARGET_ELIBSCN,
562 [ELIBMAX] = TARGET_ELIBMAX,
563 [ELIBEXEC] = TARGET_ELIBEXEC,
564 [EILSEQ] = TARGET_EILSEQ,
565 [ENOSYS] = TARGET_ENOSYS,
566 [ELOOP] = TARGET_ELOOP,
567 [ERESTART] = TARGET_ERESTART,
568 [ESTRPIPE] = TARGET_ESTRPIPE,
569 [ENOTEMPTY] = TARGET_ENOTEMPTY,
570 [EUSERS] = TARGET_EUSERS,
571 [ENOTSOCK] = TARGET_ENOTSOCK,
572 [EDESTADDRREQ] = TARGET_EDESTADDRREQ,
573 [EMSGSIZE] = TARGET_EMSGSIZE,
574 [EPROTOTYPE] = TARGET_EPROTOTYPE,
575 [ENOPROTOOPT] = TARGET_ENOPROTOOPT,
576 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT,
577 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT,
578 [EOPNOTSUPP] = TARGET_EOPNOTSUPP,
579 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT,
580 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT,
581 [EADDRINUSE] = TARGET_EADDRINUSE,
582 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL,
583 [ENETDOWN] = TARGET_ENETDOWN,
584 [ENETUNREACH] = TARGET_ENETUNREACH,
585 [ENETRESET] = TARGET_ENETRESET,
586 [ECONNABORTED] = TARGET_ECONNABORTED,
587 [ECONNRESET] = TARGET_ECONNRESET,
588 [ENOBUFS] = TARGET_ENOBUFS,
589 [EISCONN] = TARGET_EISCONN,
590 [ENOTCONN] = TARGET_ENOTCONN,
591 [EUCLEAN] = TARGET_EUCLEAN,
592 [ENOTNAM] = TARGET_ENOTNAM,
593 [ENAVAIL] = TARGET_ENAVAIL,
594 [EISNAM] = TARGET_EISNAM,
595 [EREMOTEIO] = TARGET_EREMOTEIO,
596 [EDQUOT] = TARGET_EDQUOT,
597 [ESHUTDOWN] = TARGET_ESHUTDOWN,
598 [ETOOMANYREFS] = TARGET_ETOOMANYREFS,
599 [ETIMEDOUT] = TARGET_ETIMEDOUT,
600 [ECONNREFUSED] = TARGET_ECONNREFUSED,
601 [EHOSTDOWN] = TARGET_EHOSTDOWN,
602 [EHOSTUNREACH] = TARGET_EHOSTUNREACH,
603 [EALREADY] = TARGET_EALREADY,
604 [EINPROGRESS] = TARGET_EINPROGRESS,
605 [ESTALE] = TARGET_ESTALE,
606 [ECANCELED] = TARGET_ECANCELED,
607 [ENOMEDIUM] = TARGET_ENOMEDIUM,
608 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE,
610 [ENOKEY] = TARGET_ENOKEY,
613 [EKEYEXPIRED] = TARGET_EKEYEXPIRED,
616 [EKEYREVOKED] = TARGET_EKEYREVOKED,
619 [EKEYREJECTED] = TARGET_EKEYREJECTED,
622 [EOWNERDEAD] = TARGET_EOWNERDEAD,
624 #ifdef ENOTRECOVERABLE
625 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE,
628 [ENOMSG] = TARGET_ENOMSG,
631 [ERFKILL] = TARGET_ERFKILL,
634 [EHWPOISON] = TARGET_EHWPOISON,
638 static inline int host_to_target_errno(int err)
640 if (err >= 0 && err < ERRNO_TABLE_SIZE &&
641 host_to_target_errno_table[err]) {
642 return host_to_target_errno_table[err];
647 static inline int target_to_host_errno(int err)
649 if (err >= 0 && err < ERRNO_TABLE_SIZE &&
650 target_to_host_errno_table[err]) {
651 return target_to_host_errno_table[err];
656 static inline abi_long get_errno(abi_long ret)
659 return -host_to_target_errno(errno);
664 const char *target_strerror(int err)
666 if (err == TARGET_ERESTARTSYS) {
667 return "To be restarted";
669 if (err == TARGET_QEMU_ESIGRETURN) {
670 return "Successful exit from sigreturn";
673 if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
676 return strerror(target_to_host_errno(err));
679 #define safe_syscall0(type, name) \
680 static type safe_##name(void) \
682 return safe_syscall(__NR_##name); \
685 #define safe_syscall1(type, name, type1, arg1) \
686 static type safe_##name(type1 arg1) \
688 return safe_syscall(__NR_##name, arg1); \
691 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
692 static type safe_##name(type1 arg1, type2 arg2) \
694 return safe_syscall(__NR_##name, arg1, arg2); \
697 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
698 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
700 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
703 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
705 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
707 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
710 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
711 type4, arg4, type5, arg5) \
712 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
715 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
718 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
719 type4, arg4, type5, arg5, type6, arg6) \
720 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
721 type5 arg5, type6 arg6) \
723 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
726 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
727 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
728 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
729 int, flags, mode_t, mode)
730 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
731 struct rusage *, rusage)
732 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
733 int, options, struct rusage *, rusage)
734 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
735 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
736 fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
737 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
738 struct timespec *, tsp, const sigset_t *, sigmask,
740 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
741 int, maxevents, int, timeout, const sigset_t *, sigmask,
743 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
744 const struct timespec *,timeout,int *,uaddr2,int,val3)
745 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
746 safe_syscall2(int, kill, pid_t, pid, int, sig)
747 safe_syscall2(int, tkill, int, tid, int, sig)
748 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
749 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
750 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
751 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
752 unsigned long, pos_l, unsigned long, pos_h)
753 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
754 unsigned long, pos_l, unsigned long, pos_h)
755 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
757 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
758 int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
759 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
760 int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
761 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
762 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
763 safe_syscall2(int, flock, int, fd, int, operation)
764 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
765 const struct timespec *, uts, size_t, sigsetsize)
766 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
768 safe_syscall2(int, nanosleep, const struct timespec *, req,
769 struct timespec *, rem)
770 #ifdef TARGET_NR_clock_nanosleep
771 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
772 const struct timespec *, req, struct timespec *, rem)
775 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
777 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
778 long, msgtype, int, flags)
779 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
780 unsigned, nsops, const struct timespec *, timeout)
782 /* This host kernel architecture uses a single ipc syscall; fake up
783 * wrappers for the sub-operations to hide this implementation detail.
784 * Annoyingly we can't include linux/ipc.h to get the constant definitions
785 * for the call parameter because some structs in there conflict with the
786 * sys/ipc.h ones. So we just define them here, and rely on them being
787 * the same for all host architectures.
789 #define Q_SEMTIMEDOP 4
792 #define Q_IPCCALL(VERSION, OP) ((VERSION) << 16 | (OP))
794 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
795 void *, ptr, long, fifth)
796 static int safe_msgsnd(int msgid, const void *msgp, size_t sz, int flags)
798 return safe_ipc(Q_IPCCALL(0, Q_MSGSND), msgid, sz, flags, (void *)msgp, 0);
800 static int safe_msgrcv(int msgid, void *msgp, size_t sz, long type, int flags)
802 return safe_ipc(Q_IPCCALL(1, Q_MSGRCV), msgid, sz, flags, msgp, type);
804 static int safe_semtimedop(int semid, struct sembuf *tsops, unsigned nsops,
805 const struct timespec *timeout)
807 return safe_ipc(Q_IPCCALL(0, Q_SEMTIMEDOP), semid, nsops, 0, tsops,
811 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
812 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
813 size_t, len, unsigned, prio, const struct timespec *, timeout)
814 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
815 size_t, len, unsigned *, prio, const struct timespec *, timeout)
817 /* We do ioctl like this rather than via safe_syscall3 to preserve the
818 * "third argument might be integer or pointer or not present" behaviour of
821 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
822 /* Similarly for fcntl. Note that callers must always:
823 * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
824 * use the flock64 struct rather than unsuffixed flock
825 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
828 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
830 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
833 static inline int host_to_target_sock_type(int host_type)
837 switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
839 target_type = TARGET_SOCK_DGRAM;
842 target_type = TARGET_SOCK_STREAM;
845 target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
849 #if defined(SOCK_CLOEXEC)
850 if (host_type & SOCK_CLOEXEC) {
851 target_type |= TARGET_SOCK_CLOEXEC;
855 #if defined(SOCK_NONBLOCK)
856 if (host_type & SOCK_NONBLOCK) {
857 target_type |= TARGET_SOCK_NONBLOCK;
864 static abi_ulong target_brk;
865 static abi_ulong target_original_brk;
866 static abi_ulong brk_page;
868 void target_set_brk(abi_ulong new_brk)
870 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
871 brk_page = HOST_PAGE_ALIGN(target_brk);
874 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
875 #define DEBUGF_BRK(message, args...)
877 /* do_brk() must return target values and target errnos. */
878 abi_long do_brk(abi_ulong new_brk)
880 abi_long mapped_addr;
881 abi_ulong new_alloc_size;
883 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
886 DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
889 if (new_brk < target_original_brk) {
890 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
895 /* If the new brk is less than the highest page reserved to the
896 * target heap allocation, set it and we're almost done... */
897 if (new_brk <= brk_page) {
898 /* Heap contents are initialized to zero, as for anonymous
900 if (new_brk > target_brk) {
901 memset(g2h(target_brk), 0, new_brk - target_brk);
903 target_brk = new_brk;
904 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
908 /* We need to allocate more memory after the brk... Note that
909 * we don't use MAP_FIXED because that will map over the top of
910 * any existing mapping (like the one with the host libc or qemu
911 * itself); instead we treat "mapped but at wrong address" as
912 * a failure and unmap again.
914 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
915 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
916 PROT_READ|PROT_WRITE,
917 MAP_ANON|MAP_PRIVATE, 0, 0));
919 if (mapped_addr == brk_page) {
920 /* Heap contents are initialized to zero, as for anonymous
921 * mapped pages. Technically the new pages are already
922 * initialized to zero since they *are* anonymous mapped
923 * pages, however we have to take care with the contents that
924 * come from the remaining part of the previous page: it may
925 * contains garbage data due to a previous heap usage (grown
927 memset(g2h(target_brk), 0, brk_page - target_brk);
929 target_brk = new_brk;
930 brk_page = HOST_PAGE_ALIGN(target_brk);
931 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
934 } else if (mapped_addr != -1) {
935 /* Mapped but at wrong address, meaning there wasn't actually
936 * enough space for this brk.
938 target_munmap(mapped_addr, new_alloc_size);
940 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
943 DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
946 #if defined(TARGET_ALPHA)
947 /* We (partially) emulate OSF/1 on Alpha, which requires we
948 return a proper errno, not an unchanged brk value. */
949 return -TARGET_ENOMEM;
951 /* For everything else, return the previous break. */
955 static inline abi_long copy_from_user_fdset(fd_set *fds,
956 abi_ulong target_fds_addr,
960 abi_ulong b, *target_fds;
962 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
963 if (!(target_fds = lock_user(VERIFY_READ,
965 sizeof(abi_ulong) * nw,
967 return -TARGET_EFAULT;
971 for (i = 0; i < nw; i++) {
972 /* grab the abi_ulong */
973 __get_user(b, &target_fds[i]);
974 for (j = 0; j < TARGET_ABI_BITS; j++) {
975 /* check the bit inside the abi_ulong */
982 unlock_user(target_fds, target_fds_addr, 0);
987 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
988 abi_ulong target_fds_addr,
991 if (target_fds_addr) {
992 if (copy_from_user_fdset(fds, target_fds_addr, n))
993 return -TARGET_EFAULT;
1001 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
1007 abi_ulong *target_fds;
1009 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
1010 if (!(target_fds = lock_user(VERIFY_WRITE,
1012 sizeof(abi_ulong) * nw,
1014 return -TARGET_EFAULT;
1017 for (i = 0; i < nw; i++) {
1019 for (j = 0; j < TARGET_ABI_BITS; j++) {
1020 v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
1023 __put_user(v, &target_fds[i]);
1026 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
1031 #if defined(__alpha__)
1032 #define HOST_HZ 1024
1037 static inline abi_long host_to_target_clock_t(long ticks)
1039 #if HOST_HZ == TARGET_HZ
1042 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
1046 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
1047 const struct rusage *rusage)
1049 struct target_rusage *target_rusage;
1051 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
1052 return -TARGET_EFAULT;
1053 target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
1054 target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
1055 target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1056 target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1057 target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1058 target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1059 target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1060 target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1061 target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1062 target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1063 target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1064 target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1065 target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1066 target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1067 target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1068 target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1069 target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1070 target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1071 unlock_user_struct(target_rusage, target_addr, 1);
1076 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1078 abi_ulong target_rlim_swap;
1081 target_rlim_swap = tswapal(target_rlim);
1082 if (target_rlim_swap == TARGET_RLIM_INFINITY)
1083 return RLIM_INFINITY;
1085 result = target_rlim_swap;
1086 if (target_rlim_swap != (rlim_t)result)
1087 return RLIM_INFINITY;
1092 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1094 abi_ulong target_rlim_swap;
1097 if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1098 target_rlim_swap = TARGET_RLIM_INFINITY;
1100 target_rlim_swap = rlim;
1101 result = tswapal(target_rlim_swap);
1106 static inline int target_to_host_resource(int code)
1109 case TARGET_RLIMIT_AS:
1111 case TARGET_RLIMIT_CORE:
1113 case TARGET_RLIMIT_CPU:
1115 case TARGET_RLIMIT_DATA:
1117 case TARGET_RLIMIT_FSIZE:
1118 return RLIMIT_FSIZE;
1119 case TARGET_RLIMIT_LOCKS:
1120 return RLIMIT_LOCKS;
1121 case TARGET_RLIMIT_MEMLOCK:
1122 return RLIMIT_MEMLOCK;
1123 case TARGET_RLIMIT_MSGQUEUE:
1124 return RLIMIT_MSGQUEUE;
1125 case TARGET_RLIMIT_NICE:
1127 case TARGET_RLIMIT_NOFILE:
1128 return RLIMIT_NOFILE;
1129 case TARGET_RLIMIT_NPROC:
1130 return RLIMIT_NPROC;
1131 case TARGET_RLIMIT_RSS:
1133 case TARGET_RLIMIT_RTPRIO:
1134 return RLIMIT_RTPRIO;
1135 case TARGET_RLIMIT_SIGPENDING:
1136 return RLIMIT_SIGPENDING;
1137 case TARGET_RLIMIT_STACK:
1138 return RLIMIT_STACK;
1144 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1145 abi_ulong target_tv_addr)
1147 struct target_timeval *target_tv;
1149 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1))
1150 return -TARGET_EFAULT;
1152 __get_user(tv->tv_sec, &target_tv->tv_sec);
1153 __get_user(tv->tv_usec, &target_tv->tv_usec);
1155 unlock_user_struct(target_tv, target_tv_addr, 0);
1160 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1161 const struct timeval *tv)
1163 struct target_timeval *target_tv;
1165 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0))
1166 return -TARGET_EFAULT;
1168 __put_user(tv->tv_sec, &target_tv->tv_sec);
1169 __put_user(tv->tv_usec, &target_tv->tv_usec);
1171 unlock_user_struct(target_tv, target_tv_addr, 1);
1176 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1177 abi_ulong target_tz_addr)
1179 struct target_timezone *target_tz;
1181 if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1182 return -TARGET_EFAULT;
1185 __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1186 __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1188 unlock_user_struct(target_tz, target_tz_addr, 0);
1193 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1196 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1197 abi_ulong target_mq_attr_addr)
1199 struct target_mq_attr *target_mq_attr;
1201 if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1202 target_mq_attr_addr, 1))
1203 return -TARGET_EFAULT;
1205 __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1206 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1207 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1208 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1210 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1215 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1216 const struct mq_attr *attr)
1218 struct target_mq_attr *target_mq_attr;
1220 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1221 target_mq_attr_addr, 0))
1222 return -TARGET_EFAULT;
1224 __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1225 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1226 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1227 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1229 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1235 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1236 /* do_select() must return target values and target errnos. */
1237 static abi_long do_select(int n,
1238 abi_ulong rfd_addr, abi_ulong wfd_addr,
1239 abi_ulong efd_addr, abi_ulong target_tv_addr)
1241 fd_set rfds, wfds, efds;
1242 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1244 struct timespec ts, *ts_ptr;
1247 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1251 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1255 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1260 if (target_tv_addr) {
1261 if (copy_from_user_timeval(&tv, target_tv_addr))
1262 return -TARGET_EFAULT;
1263 ts.tv_sec = tv.tv_sec;
1264 ts.tv_nsec = tv.tv_usec * 1000;
1270 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1273 if (!is_error(ret)) {
1274 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1275 return -TARGET_EFAULT;
1276 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1277 return -TARGET_EFAULT;
1278 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1279 return -TARGET_EFAULT;
1281 if (target_tv_addr) {
1282 tv.tv_sec = ts.tv_sec;
1283 tv.tv_usec = ts.tv_nsec / 1000;
1284 if (copy_to_user_timeval(target_tv_addr, &tv)) {
1285 return -TARGET_EFAULT;
1293 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1294 static abi_long do_old_select(abi_ulong arg1)
1296 struct target_sel_arg_struct *sel;
1297 abi_ulong inp, outp, exp, tvp;
1300 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1301 return -TARGET_EFAULT;
1304 nsel = tswapal(sel->n);
1305 inp = tswapal(sel->inp);
1306 outp = tswapal(sel->outp);
1307 exp = tswapal(sel->exp);
1308 tvp = tswapal(sel->tvp);
1310 unlock_user_struct(sel, arg1, 0);
1312 return do_select(nsel, inp, outp, exp, tvp);
1317 static abi_long do_pipe2(int host_pipe[], int flags)
1320 return pipe2(host_pipe, flags);
1326 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1327 int flags, int is_pipe2)
1331 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1334 return get_errno(ret);
1336 /* Several targets have special calling conventions for the original
1337 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1339 #if defined(TARGET_ALPHA)
1340 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1341 return host_pipe[0];
1342 #elif defined(TARGET_MIPS)
1343 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1344 return host_pipe[0];
1345 #elif defined(TARGET_SH4)
1346 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1347 return host_pipe[0];
1348 #elif defined(TARGET_SPARC)
1349 ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1350 return host_pipe[0];
1354 if (put_user_s32(host_pipe[0], pipedes)
1355 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1356 return -TARGET_EFAULT;
1357 return get_errno(ret);
1360 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1361 abi_ulong target_addr,
1364 struct target_ip_mreqn *target_smreqn;
1366 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1368 return -TARGET_EFAULT;
1369 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1370 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1371 if (len == sizeof(struct target_ip_mreqn))
1372 mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1373 unlock_user(target_smreqn, target_addr, 0);
1378 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1379 abi_ulong target_addr,
1382 const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1383 sa_family_t sa_family;
1384 struct target_sockaddr *target_saddr;
1386 if (fd_trans_target_to_host_addr(fd)) {
1387 return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1390 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1392 return -TARGET_EFAULT;
1394 sa_family = tswap16(target_saddr->sa_family);
1396 /* Oops. The caller might send a incomplete sun_path; sun_path
1397 * must be terminated by \0 (see the manual page), but
1398 * unfortunately it is quite common to specify sockaddr_un
1399 * length as "strlen(x->sun_path)" while it should be
1400 * "strlen(...) + 1". We'll fix that here if needed.
1401 * Linux kernel has a similar feature.
1404 if (sa_family == AF_UNIX) {
1405 if (len < unix_maxlen && len > 0) {
1406 char *cp = (char*)target_saddr;
1408 if ( cp[len-1] && !cp[len] )
1411 if (len > unix_maxlen)
1415 memcpy(addr, target_saddr, len);
1416 addr->sa_family = sa_family;
1417 if (sa_family == AF_NETLINK) {
1418 struct sockaddr_nl *nladdr;
1420 nladdr = (struct sockaddr_nl *)addr;
1421 nladdr->nl_pid = tswap32(nladdr->nl_pid);
1422 nladdr->nl_groups = tswap32(nladdr->nl_groups);
1423 } else if (sa_family == AF_PACKET) {
1424 struct target_sockaddr_ll *lladdr;
1426 lladdr = (struct target_sockaddr_ll *)addr;
1427 lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1428 lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1430 unlock_user(target_saddr, target_addr, 0);
1435 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1436 struct sockaddr *addr,
1439 struct target_sockaddr *target_saddr;
1446 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1448 return -TARGET_EFAULT;
1449 memcpy(target_saddr, addr, len);
1450 if (len >= offsetof(struct target_sockaddr, sa_family) +
1451 sizeof(target_saddr->sa_family)) {
1452 target_saddr->sa_family = tswap16(addr->sa_family);
1454 if (addr->sa_family == AF_NETLINK && len >= sizeof(struct sockaddr_nl)) {
1455 struct sockaddr_nl *target_nl = (struct sockaddr_nl *)target_saddr;
1456 target_nl->nl_pid = tswap32(target_nl->nl_pid);
1457 target_nl->nl_groups = tswap32(target_nl->nl_groups);
1458 } else if (addr->sa_family == AF_PACKET) {
1459 struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1460 target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1461 target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1462 } else if (addr->sa_family == AF_INET6 &&
1463 len >= sizeof(struct target_sockaddr_in6)) {
1464 struct target_sockaddr_in6 *target_in6 =
1465 (struct target_sockaddr_in6 *)target_saddr;
1466 target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1468 unlock_user(target_saddr, target_addr, len);
1473 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1474 struct target_msghdr *target_msgh)
1476 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1477 abi_long msg_controllen;
1478 abi_ulong target_cmsg_addr;
1479 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1480 socklen_t space = 0;
1482 msg_controllen = tswapal(target_msgh->msg_controllen);
1483 if (msg_controllen < sizeof (struct target_cmsghdr))
1485 target_cmsg_addr = tswapal(target_msgh->msg_control);
1486 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1487 target_cmsg_start = target_cmsg;
1489 return -TARGET_EFAULT;
1491 while (cmsg && target_cmsg) {
1492 void *data = CMSG_DATA(cmsg);
1493 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1495 int len = tswapal(target_cmsg->cmsg_len)
1496 - sizeof(struct target_cmsghdr);
1498 space += CMSG_SPACE(len);
1499 if (space > msgh->msg_controllen) {
1500 space -= CMSG_SPACE(len);
1501 /* This is a QEMU bug, since we allocated the payload
1502 * area ourselves (unlike overflow in host-to-target
1503 * conversion, which is just the guest giving us a buffer
1504 * that's too small). It can't happen for the payload types
1505 * we currently support; if it becomes an issue in future
1506 * we would need to improve our allocation strategy to
1507 * something more intelligent than "twice the size of the
1508 * target buffer we're reading from".
1510 gemu_log("Host cmsg overflow\n");
1514 if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1515 cmsg->cmsg_level = SOL_SOCKET;
1517 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1519 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1520 cmsg->cmsg_len = CMSG_LEN(len);
1522 if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1523 int *fd = (int *)data;
1524 int *target_fd = (int *)target_data;
1525 int i, numfds = len / sizeof(int);
1527 for (i = 0; i < numfds; i++) {
1528 __get_user(fd[i], target_fd + i);
1530 } else if (cmsg->cmsg_level == SOL_SOCKET
1531 && cmsg->cmsg_type == SCM_CREDENTIALS) {
1532 struct ucred *cred = (struct ucred *)data;
1533 struct target_ucred *target_cred =
1534 (struct target_ucred *)target_data;
1536 __get_user(cred->pid, &target_cred->pid);
1537 __get_user(cred->uid, &target_cred->uid);
1538 __get_user(cred->gid, &target_cred->gid);
1540 gemu_log("Unsupported ancillary data: %d/%d\n",
1541 cmsg->cmsg_level, cmsg->cmsg_type);
1542 memcpy(data, target_data, len);
1545 cmsg = CMSG_NXTHDR(msgh, cmsg);
1546 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1549 unlock_user(target_cmsg, target_cmsg_addr, 0);
1551 msgh->msg_controllen = space;
1555 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1556 struct msghdr *msgh)
1558 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1559 abi_long msg_controllen;
1560 abi_ulong target_cmsg_addr;
1561 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1562 socklen_t space = 0;
1564 msg_controllen = tswapal(target_msgh->msg_controllen);
1565 if (msg_controllen < sizeof (struct target_cmsghdr))
1567 target_cmsg_addr = tswapal(target_msgh->msg_control);
1568 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1569 target_cmsg_start = target_cmsg;
1571 return -TARGET_EFAULT;
1573 while (cmsg && target_cmsg) {
1574 void *data = CMSG_DATA(cmsg);
1575 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1577 int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1578 int tgt_len, tgt_space;
1580 /* We never copy a half-header but may copy half-data;
1581 * this is Linux's behaviour in put_cmsg(). Note that
1582 * truncation here is a guest problem (which we report
1583 * to the guest via the CTRUNC bit), unlike truncation
1584 * in target_to_host_cmsg, which is a QEMU bug.
1586 if (msg_controllen < sizeof(struct target_cmsghdr)) {
1587 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1591 if (cmsg->cmsg_level == SOL_SOCKET) {
1592 target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1594 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1596 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1598 /* Payload types which need a different size of payload on
1599 * the target must adjust tgt_len here.
1602 switch (cmsg->cmsg_level) {
1604 switch (cmsg->cmsg_type) {
1606 tgt_len = sizeof(struct target_timeval);
1616 if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1617 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1618 tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1621 /* We must now copy-and-convert len bytes of payload
1622 * into tgt_len bytes of destination space. Bear in mind
1623 * that in both source and destination we may be dealing
1624 * with a truncated value!
1626 switch (cmsg->cmsg_level) {
1628 switch (cmsg->cmsg_type) {
1631 int *fd = (int *)data;
1632 int *target_fd = (int *)target_data;
1633 int i, numfds = tgt_len / sizeof(int);
1635 for (i = 0; i < numfds; i++) {
1636 __put_user(fd[i], target_fd + i);
1642 struct timeval *tv = (struct timeval *)data;
1643 struct target_timeval *target_tv =
1644 (struct target_timeval *)target_data;
1646 if (len != sizeof(struct timeval) ||
1647 tgt_len != sizeof(struct target_timeval)) {
1651 /* copy struct timeval to target */
1652 __put_user(tv->tv_sec, &target_tv->tv_sec);
1653 __put_user(tv->tv_usec, &target_tv->tv_usec);
1656 case SCM_CREDENTIALS:
1658 struct ucred *cred = (struct ucred *)data;
1659 struct target_ucred *target_cred =
1660 (struct target_ucred *)target_data;
1662 __put_user(cred->pid, &target_cred->pid);
1663 __put_user(cred->uid, &target_cred->uid);
1664 __put_user(cred->gid, &target_cred->gid);
1673 switch (cmsg->cmsg_type) {
1676 uint32_t *v = (uint32_t *)data;
1677 uint32_t *t_int = (uint32_t *)target_data;
1679 if (len != sizeof(uint32_t) ||
1680 tgt_len != sizeof(uint32_t)) {
1683 __put_user(*v, t_int);
1689 struct sock_extended_err ee;
1690 struct sockaddr_in offender;
1692 struct errhdr_t *errh = (struct errhdr_t *)data;
1693 struct errhdr_t *target_errh =
1694 (struct errhdr_t *)target_data;
1696 if (len != sizeof(struct errhdr_t) ||
1697 tgt_len != sizeof(struct errhdr_t)) {
1700 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1701 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1702 __put_user(errh->ee.ee_type, &target_errh->ee.ee_type);
1703 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1704 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1705 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1706 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1707 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1708 (void *) &errh->offender, sizeof(errh->offender));
1717 switch (cmsg->cmsg_type) {
1720 uint32_t *v = (uint32_t *)data;
1721 uint32_t *t_int = (uint32_t *)target_data;
1723 if (len != sizeof(uint32_t) ||
1724 tgt_len != sizeof(uint32_t)) {
1727 __put_user(*v, t_int);
1733 struct sock_extended_err ee;
1734 struct sockaddr_in6 offender;
1736 struct errhdr6_t *errh = (struct errhdr6_t *)data;
1737 struct errhdr6_t *target_errh =
1738 (struct errhdr6_t *)target_data;
1740 if (len != sizeof(struct errhdr6_t) ||
1741 tgt_len != sizeof(struct errhdr6_t)) {
1744 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1745 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1746 __put_user(errh->ee.ee_type, &target_errh->ee.ee_type);
1747 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1748 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1749 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1750 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1751 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1752 (void *) &errh->offender, sizeof(errh->offender));
1762 gemu_log("Unsupported ancillary data: %d/%d\n",
1763 cmsg->cmsg_level, cmsg->cmsg_type);
1764 memcpy(target_data, data, MIN(len, tgt_len));
1765 if (tgt_len > len) {
1766 memset(target_data + len, 0, tgt_len - len);
1770 target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
1771 tgt_space = TARGET_CMSG_SPACE(tgt_len);
1772 if (msg_controllen < tgt_space) {
1773 tgt_space = msg_controllen;
1775 msg_controllen -= tgt_space;
1777 cmsg = CMSG_NXTHDR(msgh, cmsg);
1778 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1781 unlock_user(target_cmsg, target_cmsg_addr, space);
1783 target_msgh->msg_controllen = tswapal(space);
1787 /* do_setsockopt() Must return target values and target errnos. */
1788 static abi_long do_setsockopt(int sockfd, int level, int optname,
1789 abi_ulong optval_addr, socklen_t optlen)
1793 struct ip_mreqn *ip_mreq;
1794 struct ip_mreq_source *ip_mreq_source;
1798 /* TCP options all take an 'int' value. */
1799 if (optlen < sizeof(uint32_t))
1800 return -TARGET_EINVAL;
1802 if (get_user_u32(val, optval_addr))
1803 return -TARGET_EFAULT;
1804 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1811 case IP_ROUTER_ALERT:
1815 case IP_MTU_DISCOVER:
1822 case IP_MULTICAST_TTL:
1823 case IP_MULTICAST_LOOP:
1825 if (optlen >= sizeof(uint32_t)) {
1826 if (get_user_u32(val, optval_addr))
1827 return -TARGET_EFAULT;
1828 } else if (optlen >= 1) {
1829 if (get_user_u8(val, optval_addr))
1830 return -TARGET_EFAULT;
1832 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1834 case IP_ADD_MEMBERSHIP:
1835 case IP_DROP_MEMBERSHIP:
1836 if (optlen < sizeof (struct target_ip_mreq) ||
1837 optlen > sizeof (struct target_ip_mreqn))
1838 return -TARGET_EINVAL;
1840 ip_mreq = (struct ip_mreqn *) alloca(optlen);
1841 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
1842 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
1845 case IP_BLOCK_SOURCE:
1846 case IP_UNBLOCK_SOURCE:
1847 case IP_ADD_SOURCE_MEMBERSHIP:
1848 case IP_DROP_SOURCE_MEMBERSHIP:
1849 if (optlen != sizeof (struct target_ip_mreq_source))
1850 return -TARGET_EINVAL;
1852 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
1853 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
1854 unlock_user (ip_mreq_source, optval_addr, 0);
1863 case IPV6_MTU_DISCOVER:
1866 case IPV6_RECVPKTINFO:
1867 case IPV6_UNICAST_HOPS:
1868 case IPV6_MULTICAST_HOPS:
1869 case IPV6_MULTICAST_LOOP:
1871 case IPV6_RECVHOPLIMIT:
1872 case IPV6_2292HOPLIMIT:
1875 if (optlen < sizeof(uint32_t)) {
1876 return -TARGET_EINVAL;
1878 if (get_user_u32(val, optval_addr)) {
1879 return -TARGET_EFAULT;
1881 ret = get_errno(setsockopt(sockfd, level, optname,
1882 &val, sizeof(val)));
1886 struct in6_pktinfo pki;
1888 if (optlen < sizeof(pki)) {
1889 return -TARGET_EINVAL;
1892 if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
1893 return -TARGET_EFAULT;
1896 pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
1898 ret = get_errno(setsockopt(sockfd, level, optname,
1899 &pki, sizeof(pki)));
1910 struct icmp6_filter icmp6f;
1912 if (optlen > sizeof(icmp6f)) {
1913 optlen = sizeof(icmp6f);
1916 if (copy_from_user(&icmp6f, optval_addr, optlen)) {
1917 return -TARGET_EFAULT;
1920 for (val = 0; val < 8; val++) {
1921 icmp6f.data[val] = tswap32(icmp6f.data[val]);
1924 ret = get_errno(setsockopt(sockfd, level, optname,
1936 /* those take an u32 value */
1937 if (optlen < sizeof(uint32_t)) {
1938 return -TARGET_EINVAL;
1941 if (get_user_u32(val, optval_addr)) {
1942 return -TARGET_EFAULT;
1944 ret = get_errno(setsockopt(sockfd, level, optname,
1945 &val, sizeof(val)));
1952 case TARGET_SOL_SOCKET:
1954 case TARGET_SO_RCVTIMEO:
1958 optname = SO_RCVTIMEO;
1961 if (optlen != sizeof(struct target_timeval)) {
1962 return -TARGET_EINVAL;
1965 if (copy_from_user_timeval(&tv, optval_addr)) {
1966 return -TARGET_EFAULT;
1969 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
1973 case TARGET_SO_SNDTIMEO:
1974 optname = SO_SNDTIMEO;
1976 case TARGET_SO_ATTACH_FILTER:
1978 struct target_sock_fprog *tfprog;
1979 struct target_sock_filter *tfilter;
1980 struct sock_fprog fprog;
1981 struct sock_filter *filter;
1984 if (optlen != sizeof(*tfprog)) {
1985 return -TARGET_EINVAL;
1987 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
1988 return -TARGET_EFAULT;
1990 if (!lock_user_struct(VERIFY_READ, tfilter,
1991 tswapal(tfprog->filter), 0)) {
1992 unlock_user_struct(tfprog, optval_addr, 1);
1993 return -TARGET_EFAULT;
1996 fprog.len = tswap16(tfprog->len);
1997 filter = g_try_new(struct sock_filter, fprog.len);
1998 if (filter == NULL) {
1999 unlock_user_struct(tfilter, tfprog->filter, 1);
2000 unlock_user_struct(tfprog, optval_addr, 1);
2001 return -TARGET_ENOMEM;
2003 for (i = 0; i < fprog.len; i++) {
2004 filter[i].code = tswap16(tfilter[i].code);
2005 filter[i].jt = tfilter[i].jt;
2006 filter[i].jf = tfilter[i].jf;
2007 filter[i].k = tswap32(tfilter[i].k);
2009 fprog.filter = filter;
2011 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2012 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2015 unlock_user_struct(tfilter, tfprog->filter, 1);
2016 unlock_user_struct(tfprog, optval_addr, 1);
2019 case TARGET_SO_BINDTODEVICE:
2021 char *dev_ifname, *addr_ifname;
2023 if (optlen > IFNAMSIZ - 1) {
2024 optlen = IFNAMSIZ - 1;
2026 dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2028 return -TARGET_EFAULT;
2030 optname = SO_BINDTODEVICE;
2031 addr_ifname = alloca(IFNAMSIZ);
2032 memcpy(addr_ifname, dev_ifname, optlen);
2033 addr_ifname[optlen] = 0;
2034 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2035 addr_ifname, optlen));
2036 unlock_user (dev_ifname, optval_addr, 0);
2039 case TARGET_SO_LINGER:
2042 struct target_linger *tlg;
2044 if (optlen != sizeof(struct target_linger)) {
2045 return -TARGET_EINVAL;
2047 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2048 return -TARGET_EFAULT;
2050 __get_user(lg.l_onoff, &tlg->l_onoff);
2051 __get_user(lg.l_linger, &tlg->l_linger);
2052 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2054 unlock_user_struct(tlg, optval_addr, 0);
2057 /* Options with 'int' argument. */
2058 case TARGET_SO_DEBUG:
2061 case TARGET_SO_REUSEADDR:
2062 optname = SO_REUSEADDR;
2065 case TARGET_SO_REUSEPORT:
2066 optname = SO_REUSEPORT;
2069 case TARGET_SO_TYPE:
2072 case TARGET_SO_ERROR:
2075 case TARGET_SO_DONTROUTE:
2076 optname = SO_DONTROUTE;
2078 case TARGET_SO_BROADCAST:
2079 optname = SO_BROADCAST;
2081 case TARGET_SO_SNDBUF:
2082 optname = SO_SNDBUF;
2084 case TARGET_SO_SNDBUFFORCE:
2085 optname = SO_SNDBUFFORCE;
2087 case TARGET_SO_RCVBUF:
2088 optname = SO_RCVBUF;
2090 case TARGET_SO_RCVBUFFORCE:
2091 optname = SO_RCVBUFFORCE;
2093 case TARGET_SO_KEEPALIVE:
2094 optname = SO_KEEPALIVE;
2096 case TARGET_SO_OOBINLINE:
2097 optname = SO_OOBINLINE;
2099 case TARGET_SO_NO_CHECK:
2100 optname = SO_NO_CHECK;
2102 case TARGET_SO_PRIORITY:
2103 optname = SO_PRIORITY;
2106 case TARGET_SO_BSDCOMPAT:
2107 optname = SO_BSDCOMPAT;
2110 case TARGET_SO_PASSCRED:
2111 optname = SO_PASSCRED;
2113 case TARGET_SO_PASSSEC:
2114 optname = SO_PASSSEC;
2116 case TARGET_SO_TIMESTAMP:
2117 optname = SO_TIMESTAMP;
2119 case TARGET_SO_RCVLOWAT:
2120 optname = SO_RCVLOWAT;
2125 if (optlen < sizeof(uint32_t))
2126 return -TARGET_EINVAL;
2128 if (get_user_u32(val, optval_addr))
2129 return -TARGET_EFAULT;
2130 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2134 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname);
2135 ret = -TARGET_ENOPROTOOPT;
2140 /* do_getsockopt() Must return target values and target errnos. */
2141 static abi_long do_getsockopt(int sockfd, int level, int optname,
2142 abi_ulong optval_addr, abi_ulong optlen)
2149 case TARGET_SOL_SOCKET:
2152 /* These don't just return a single integer */
2153 case TARGET_SO_RCVTIMEO:
2154 case TARGET_SO_SNDTIMEO:
2155 case TARGET_SO_PEERNAME:
2157 case TARGET_SO_PEERCRED: {
2160 struct target_ucred *tcr;
2162 if (get_user_u32(len, optlen)) {
2163 return -TARGET_EFAULT;
2166 return -TARGET_EINVAL;
2170 ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2178 if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2179 return -TARGET_EFAULT;
2181 __put_user(cr.pid, &tcr->pid);
2182 __put_user(cr.uid, &tcr->uid);
2183 __put_user(cr.gid, &tcr->gid);
2184 unlock_user_struct(tcr, optval_addr, 1);
2185 if (put_user_u32(len, optlen)) {
2186 return -TARGET_EFAULT;
2190 case TARGET_SO_LINGER:
2194 struct target_linger *tlg;
2196 if (get_user_u32(len, optlen)) {
2197 return -TARGET_EFAULT;
2200 return -TARGET_EINVAL;
2204 ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2212 if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2213 return -TARGET_EFAULT;
2215 __put_user(lg.l_onoff, &tlg->l_onoff);
2216 __put_user(lg.l_linger, &tlg->l_linger);
2217 unlock_user_struct(tlg, optval_addr, 1);
2218 if (put_user_u32(len, optlen)) {
2219 return -TARGET_EFAULT;
2223 /* Options with 'int' argument. */
2224 case TARGET_SO_DEBUG:
2227 case TARGET_SO_REUSEADDR:
2228 optname = SO_REUSEADDR;
2231 case TARGET_SO_REUSEPORT:
2232 optname = SO_REUSEPORT;
2235 case TARGET_SO_TYPE:
2238 case TARGET_SO_ERROR:
2241 case TARGET_SO_DONTROUTE:
2242 optname = SO_DONTROUTE;
2244 case TARGET_SO_BROADCAST:
2245 optname = SO_BROADCAST;
2247 case TARGET_SO_SNDBUF:
2248 optname = SO_SNDBUF;
2250 case TARGET_SO_RCVBUF:
2251 optname = SO_RCVBUF;
2253 case TARGET_SO_KEEPALIVE:
2254 optname = SO_KEEPALIVE;
2256 case TARGET_SO_OOBINLINE:
2257 optname = SO_OOBINLINE;
2259 case TARGET_SO_NO_CHECK:
2260 optname = SO_NO_CHECK;
2262 case TARGET_SO_PRIORITY:
2263 optname = SO_PRIORITY;
2266 case TARGET_SO_BSDCOMPAT:
2267 optname = SO_BSDCOMPAT;
2270 case TARGET_SO_PASSCRED:
2271 optname = SO_PASSCRED;
2273 case TARGET_SO_TIMESTAMP:
2274 optname = SO_TIMESTAMP;
2276 case TARGET_SO_RCVLOWAT:
2277 optname = SO_RCVLOWAT;
2279 case TARGET_SO_ACCEPTCONN:
2280 optname = SO_ACCEPTCONN;
2287 /* TCP options all take an 'int' value. */
2289 if (get_user_u32(len, optlen))
2290 return -TARGET_EFAULT;
2292 return -TARGET_EINVAL;
2294 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2297 if (optname == SO_TYPE) {
2298 val = host_to_target_sock_type(val);
2303 if (put_user_u32(val, optval_addr))
2304 return -TARGET_EFAULT;
2306 if (put_user_u8(val, optval_addr))
2307 return -TARGET_EFAULT;
2309 if (put_user_u32(len, optlen))
2310 return -TARGET_EFAULT;
2317 case IP_ROUTER_ALERT:
2321 case IP_MTU_DISCOVER:
2327 case IP_MULTICAST_TTL:
2328 case IP_MULTICAST_LOOP:
2329 if (get_user_u32(len, optlen))
2330 return -TARGET_EFAULT;
2332 return -TARGET_EINVAL;
2334 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2337 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2339 if (put_user_u32(len, optlen)
2340 || put_user_u8(val, optval_addr))
2341 return -TARGET_EFAULT;
2343 if (len > sizeof(int))
2345 if (put_user_u32(len, optlen)
2346 || put_user_u32(val, optval_addr))
2347 return -TARGET_EFAULT;
2351 ret = -TARGET_ENOPROTOOPT;
2357 case IPV6_MTU_DISCOVER:
2360 case IPV6_RECVPKTINFO:
2361 case IPV6_UNICAST_HOPS:
2362 case IPV6_MULTICAST_HOPS:
2363 case IPV6_MULTICAST_LOOP:
2365 case IPV6_RECVHOPLIMIT:
2366 case IPV6_2292HOPLIMIT:
2368 if (get_user_u32(len, optlen))
2369 return -TARGET_EFAULT;
2371 return -TARGET_EINVAL;
2373 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2376 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2378 if (put_user_u32(len, optlen)
2379 || put_user_u8(val, optval_addr))
2380 return -TARGET_EFAULT;
2382 if (len > sizeof(int))
2384 if (put_user_u32(len, optlen)
2385 || put_user_u32(val, optval_addr))
2386 return -TARGET_EFAULT;
2390 ret = -TARGET_ENOPROTOOPT;
2396 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
2398 ret = -TARGET_EOPNOTSUPP;
2404 /* Convert target low/high pair representing file offset into the host
2405 * low/high pair. This function doesn't handle offsets bigger than 64 bits
2406 * as the kernel doesn't handle them either.
2408 static void target_to_host_low_high(abi_ulong tlow,
2410 unsigned long *hlow,
2411 unsigned long *hhigh)
2413 uint64_t off = tlow |
2414 ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
2415 TARGET_LONG_BITS / 2;
2418 *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
2421 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
2422 abi_ulong count, int copy)
2424 struct target_iovec *target_vec;
2426 abi_ulong total_len, max_len;
2429 bool bad_address = false;
2435 if (count > IOV_MAX) {
2440 vec = g_try_new0(struct iovec, count);
2446 target_vec = lock_user(VERIFY_READ, target_addr,
2447 count * sizeof(struct target_iovec), 1);
2448 if (target_vec == NULL) {
2453 /* ??? If host page size > target page size, this will result in a
2454 value larger than what we can actually support. */
2455 max_len = 0x7fffffff & TARGET_PAGE_MASK;
2458 for (i = 0; i < count; i++) {
2459 abi_ulong base = tswapal(target_vec[i].iov_base);
2460 abi_long len = tswapal(target_vec[i].iov_len);
2465 } else if (len == 0) {
2466 /* Zero length pointer is ignored. */
2467 vec[i].iov_base = 0;
2469 vec[i].iov_base = lock_user(type, base, len, copy);
2470 /* If the first buffer pointer is bad, this is a fault. But
2471 * subsequent bad buffers will result in a partial write; this
2472 * is realized by filling the vector with null pointers and
2474 if (!vec[i].iov_base) {
2485 if (len > max_len - total_len) {
2486 len = max_len - total_len;
2489 vec[i].iov_len = len;
2493 unlock_user(target_vec, target_addr, 0);
2498 if (tswapal(target_vec[i].iov_len) > 0) {
2499 unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
2502 unlock_user(target_vec, target_addr, 0);
2509 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
2510 abi_ulong count, int copy)
2512 struct target_iovec *target_vec;
2515 target_vec = lock_user(VERIFY_READ, target_addr,
2516 count * sizeof(struct target_iovec), 1);
2518 for (i = 0; i < count; i++) {
2519 abi_ulong base = tswapal(target_vec[i].iov_base);
2520 abi_long len = tswapal(target_vec[i].iov_len);
2524 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
2526 unlock_user(target_vec, target_addr, 0);
2532 static inline int target_to_host_sock_type(int *type)
2535 int target_type = *type;
2537 switch (target_type & TARGET_SOCK_TYPE_MASK) {
2538 case TARGET_SOCK_DGRAM:
2539 host_type = SOCK_DGRAM;
2541 case TARGET_SOCK_STREAM:
2542 host_type = SOCK_STREAM;
2545 host_type = target_type & TARGET_SOCK_TYPE_MASK;
2548 if (target_type & TARGET_SOCK_CLOEXEC) {
2549 #if defined(SOCK_CLOEXEC)
2550 host_type |= SOCK_CLOEXEC;
2552 return -TARGET_EINVAL;
2555 if (target_type & TARGET_SOCK_NONBLOCK) {
2556 #if defined(SOCK_NONBLOCK)
2557 host_type |= SOCK_NONBLOCK;
2558 #elif !defined(O_NONBLOCK)
2559 return -TARGET_EINVAL;
2566 /* Try to emulate socket type flags after socket creation. */
2567 static int sock_flags_fixup(int fd, int target_type)
2569 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
2570 if (target_type & TARGET_SOCK_NONBLOCK) {
2571 int flags = fcntl(fd, F_GETFL);
2572 if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
2574 return -TARGET_EINVAL;
2581 /* do_socket() Must return target values and target errnos. */
2582 static abi_long do_socket(int domain, int type, int protocol)
2584 int target_type = type;
2587 ret = target_to_host_sock_type(&type);
2592 if (domain == PF_NETLINK && !(
2593 #ifdef CONFIG_RTNETLINK
2594 protocol == NETLINK_ROUTE ||
2596 protocol == NETLINK_KOBJECT_UEVENT ||
2597 protocol == NETLINK_AUDIT)) {
2598 return -EPFNOSUPPORT;
2601 if (domain == AF_PACKET ||
2602 (domain == AF_INET && type == SOCK_PACKET)) {
2603 protocol = tswap16(protocol);
2606 ret = get_errno(socket(domain, type, protocol));
2608 ret = sock_flags_fixup(ret, target_type);
2609 if (type == SOCK_PACKET) {
2610 /* Manage an obsolete case :
2611 * if socket type is SOCK_PACKET, bind by name
2613 fd_trans_register(ret, &target_packet_trans);
2614 } else if (domain == PF_NETLINK) {
2616 #ifdef CONFIG_RTNETLINK
2618 fd_trans_register(ret, &target_netlink_route_trans);
2621 case NETLINK_KOBJECT_UEVENT:
2622 /* nothing to do: messages are strings */
2625 fd_trans_register(ret, &target_netlink_audit_trans);
2628 g_assert_not_reached();
2635 /* do_bind() Must return target values and target errnos. */
2636 static abi_long do_bind(int sockfd, abi_ulong target_addr,
2642 if ((int)addrlen < 0) {
2643 return -TARGET_EINVAL;
2646 addr = alloca(addrlen+1);
2648 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
2652 return get_errno(bind(sockfd, addr, addrlen));
2655 /* do_connect() Must return target values and target errnos. */
2656 static abi_long do_connect(int sockfd, abi_ulong target_addr,
2662 if ((int)addrlen < 0) {
2663 return -TARGET_EINVAL;
2666 addr = alloca(addrlen+1);
2668 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
2672 return get_errno(safe_connect(sockfd, addr, addrlen));
2675 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
2676 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
2677 int flags, int send)
2683 abi_ulong target_vec;
2685 if (msgp->msg_name) {
2686 msg.msg_namelen = tswap32(msgp->msg_namelen);
2687 msg.msg_name = alloca(msg.msg_namelen+1);
2688 ret = target_to_host_sockaddr(fd, msg.msg_name,
2689 tswapal(msgp->msg_name),
2691 if (ret == -TARGET_EFAULT) {
2692 /* For connected sockets msg_name and msg_namelen must
2693 * be ignored, so returning EFAULT immediately is wrong.
2694 * Instead, pass a bad msg_name to the host kernel, and
2695 * let it decide whether to return EFAULT or not.
2697 msg.msg_name = (void *)-1;
2702 msg.msg_name = NULL;
2703 msg.msg_namelen = 0;
2705 msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
2706 msg.msg_control = alloca(msg.msg_controllen);
2707 memset(msg.msg_control, 0, msg.msg_controllen);
2709 msg.msg_flags = tswap32(msgp->msg_flags);
2711 count = tswapal(msgp->msg_iovlen);
2712 target_vec = tswapal(msgp->msg_iov);
2714 if (count > IOV_MAX) {
2715 /* sendrcvmsg returns a different errno for this condition than
2716 * readv/writev, so we must catch it here before lock_iovec() does.
2718 ret = -TARGET_EMSGSIZE;
2722 vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
2723 target_vec, count, send);
2725 ret = -host_to_target_errno(errno);
2728 msg.msg_iovlen = count;
2732 if (fd_trans_target_to_host_data(fd)) {
2735 host_msg = g_malloc(msg.msg_iov->iov_len);
2736 memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
2737 ret = fd_trans_target_to_host_data(fd)(host_msg,
2738 msg.msg_iov->iov_len);
2740 msg.msg_iov->iov_base = host_msg;
2741 ret = get_errno(safe_sendmsg(fd, &msg, flags));
2745 ret = target_to_host_cmsg(&msg, msgp);
2747 ret = get_errno(safe_sendmsg(fd, &msg, flags));
2751 ret = get_errno(safe_recvmsg(fd, &msg, flags));
2752 if (!is_error(ret)) {
2754 if (fd_trans_host_to_target_data(fd)) {
2755 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
2756 MIN(msg.msg_iov->iov_len, len));
2758 ret = host_to_target_cmsg(msgp, &msg);
2760 if (!is_error(ret)) {
2761 msgp->msg_namelen = tswap32(msg.msg_namelen);
2762 msgp->msg_flags = tswap32(msg.msg_flags);
2763 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
2764 ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
2765 msg.msg_name, msg.msg_namelen);
2777 unlock_iovec(vec, target_vec, count, !send);
2782 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
2783 int flags, int send)
2786 struct target_msghdr *msgp;
2788 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
2792 return -TARGET_EFAULT;
2794 ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
2795 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
2799 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
2800 * so it might not have this *mmsg-specific flag either.
2802 #ifndef MSG_WAITFORONE
2803 #define MSG_WAITFORONE 0x10000
2806 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
2807 unsigned int vlen, unsigned int flags,
2810 struct target_mmsghdr *mmsgp;
2814 if (vlen > UIO_MAXIOV) {
2818 mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
2820 return -TARGET_EFAULT;
2823 for (i = 0; i < vlen; i++) {
2824 ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
2825 if (is_error(ret)) {
2828 mmsgp[i].msg_len = tswap32(ret);
2829 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
2830 if (flags & MSG_WAITFORONE) {
2831 flags |= MSG_DONTWAIT;
2835 unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
2837 /* Return number of datagrams sent if we sent any at all;
2838 * otherwise return the error.
2846 /* do_accept4() Must return target values and target errnos. */
2847 static abi_long do_accept4(int fd, abi_ulong target_addr,
2848 abi_ulong target_addrlen_addr, int flags)
2850 socklen_t addrlen, ret_addrlen;
2855 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
2857 if (target_addr == 0) {
2858 return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
2861 /* linux returns EINVAL if addrlen pointer is invalid */
2862 if (get_user_u32(addrlen, target_addrlen_addr))
2863 return -TARGET_EINVAL;
2865 if ((int)addrlen < 0) {
2866 return -TARGET_EINVAL;
2869 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2870 return -TARGET_EINVAL;
2872 addr = alloca(addrlen);
2874 ret_addrlen = addrlen;
2875 ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
2876 if (!is_error(ret)) {
2877 host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
2878 if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
2879 ret = -TARGET_EFAULT;
2885 /* do_getpeername() Must return target values and target errnos. */
2886 static abi_long do_getpeername(int fd, abi_ulong target_addr,
2887 abi_ulong target_addrlen_addr)
2889 socklen_t addrlen, ret_addrlen;
2893 if (get_user_u32(addrlen, target_addrlen_addr))
2894 return -TARGET_EFAULT;
2896 if ((int)addrlen < 0) {
2897 return -TARGET_EINVAL;
2900 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2901 return -TARGET_EFAULT;
2903 addr = alloca(addrlen);
2905 ret_addrlen = addrlen;
2906 ret = get_errno(getpeername(fd, addr, &ret_addrlen));
2907 if (!is_error(ret)) {
2908 host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
2909 if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
2910 ret = -TARGET_EFAULT;
2916 /* do_getsockname() Must return target values and target errnos. */
2917 static abi_long do_getsockname(int fd, abi_ulong target_addr,
2918 abi_ulong target_addrlen_addr)
2920 socklen_t addrlen, ret_addrlen;
2924 if (get_user_u32(addrlen, target_addrlen_addr))
2925 return -TARGET_EFAULT;
2927 if ((int)addrlen < 0) {
2928 return -TARGET_EINVAL;
2931 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2932 return -TARGET_EFAULT;
2934 addr = alloca(addrlen);
2936 ret_addrlen = addrlen;
2937 ret = get_errno(getsockname(fd, addr, &ret_addrlen));
2938 if (!is_error(ret)) {
2939 host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
2940 if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
2941 ret = -TARGET_EFAULT;
2947 /* do_socketpair() Must return target values and target errnos. */
2948 static abi_long do_socketpair(int domain, int type, int protocol,
2949 abi_ulong target_tab_addr)
2954 target_to_host_sock_type(&type);
2956 ret = get_errno(socketpair(domain, type, protocol, tab));
2957 if (!is_error(ret)) {
2958 if (put_user_s32(tab[0], target_tab_addr)
2959 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
2960 ret = -TARGET_EFAULT;
2965 /* do_sendto() Must return target values and target errnos. */
2966 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
2967 abi_ulong target_addr, socklen_t addrlen)
2971 void *copy_msg = NULL;
2974 if ((int)addrlen < 0) {
2975 return -TARGET_EINVAL;
2978 host_msg = lock_user(VERIFY_READ, msg, len, 1);
2980 return -TARGET_EFAULT;
2981 if (fd_trans_target_to_host_data(fd)) {
2982 copy_msg = host_msg;
2983 host_msg = g_malloc(len);
2984 memcpy(host_msg, copy_msg, len);
2985 ret = fd_trans_target_to_host_data(fd)(host_msg, len);
2991 addr = alloca(addrlen+1);
2992 ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
2996 ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
2998 ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3003 host_msg = copy_msg;
3005 unlock_user(host_msg, msg, 0);
3009 /* do_recvfrom() Must return target values and target errnos. */
3010 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3011 abi_ulong target_addr,
3012 abi_ulong target_addrlen)
3014 socklen_t addrlen, ret_addrlen;
3019 host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3021 return -TARGET_EFAULT;
3023 if (get_user_u32(addrlen, target_addrlen)) {
3024 ret = -TARGET_EFAULT;
3027 if ((int)addrlen < 0) {
3028 ret = -TARGET_EINVAL;
3031 addr = alloca(addrlen);
3032 ret_addrlen = addrlen;
3033 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3034 addr, &ret_addrlen));
3036 addr = NULL; /* To keep compiler quiet. */
3037 addrlen = 0; /* To keep compiler quiet. */
3038 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3040 if (!is_error(ret)) {
3041 if (fd_trans_host_to_target_data(fd)) {
3043 trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3044 if (is_error(trans)) {
3050 host_to_target_sockaddr(target_addr, addr,
3051 MIN(addrlen, ret_addrlen));
3052 if (put_user_u32(ret_addrlen, target_addrlen)) {
3053 ret = -TARGET_EFAULT;
3057 unlock_user(host_msg, msg, len);
3060 unlock_user(host_msg, msg, 0);
3065 #ifdef TARGET_NR_socketcall
3066 /* do_socketcall() must return target values and target errnos. */
3067 static abi_long do_socketcall(int num, abi_ulong vptr)
3069 static const unsigned nargs[] = { /* number of arguments per operation */
3070 [TARGET_SYS_SOCKET] = 3, /* domain, type, protocol */
3071 [TARGET_SYS_BIND] = 3, /* fd, addr, addrlen */
3072 [TARGET_SYS_CONNECT] = 3, /* fd, addr, addrlen */
3073 [TARGET_SYS_LISTEN] = 2, /* fd, backlog */
3074 [TARGET_SYS_ACCEPT] = 3, /* fd, addr, addrlen */
3075 [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3076 [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3077 [TARGET_SYS_SOCKETPAIR] = 4, /* domain, type, protocol, tab */
3078 [TARGET_SYS_SEND] = 4, /* fd, msg, len, flags */
3079 [TARGET_SYS_RECV] = 4, /* fd, msg, len, flags */
3080 [TARGET_SYS_SENDTO] = 6, /* fd, msg, len, flags, addr, addrlen */
3081 [TARGET_SYS_RECVFROM] = 6, /* fd, msg, len, flags, addr, addrlen */
3082 [TARGET_SYS_SHUTDOWN] = 2, /* fd, how */
3083 [TARGET_SYS_SETSOCKOPT] = 5, /* fd, level, optname, optval, optlen */
3084 [TARGET_SYS_GETSOCKOPT] = 5, /* fd, level, optname, optval, optlen */
3085 [TARGET_SYS_SENDMSG] = 3, /* fd, msg, flags */
3086 [TARGET_SYS_RECVMSG] = 3, /* fd, msg, flags */
3087 [TARGET_SYS_ACCEPT4] = 4, /* fd, addr, addrlen, flags */
3088 [TARGET_SYS_RECVMMSG] = 4, /* fd, msgvec, vlen, flags */
3089 [TARGET_SYS_SENDMMSG] = 4, /* fd, msgvec, vlen, flags */
3091 abi_long a[6]; /* max 6 args */
3094 /* check the range of the first argument num */
3095 /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3096 if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3097 return -TARGET_EINVAL;
3099 /* ensure we have space for args */
3100 if (nargs[num] > ARRAY_SIZE(a)) {
3101 return -TARGET_EINVAL;
3103 /* collect the arguments in a[] according to nargs[] */
3104 for (i = 0; i < nargs[num]; ++i) {
3105 if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3106 return -TARGET_EFAULT;
3109 /* now when we have the args, invoke the appropriate underlying function */
3111 case TARGET_SYS_SOCKET: /* domain, type, protocol */
3112 return do_socket(a[0], a[1], a[2]);
3113 case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3114 return do_bind(a[0], a[1], a[2]);
3115 case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3116 return do_connect(a[0], a[1], a[2]);
3117 case TARGET_SYS_LISTEN: /* sockfd, backlog */
3118 return get_errno(listen(a[0], a[1]));
3119 case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3120 return do_accept4(a[0], a[1], a[2], 0);
3121 case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3122 return do_getsockname(a[0], a[1], a[2]);
3123 case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3124 return do_getpeername(a[0], a[1], a[2]);
3125 case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3126 return do_socketpair(a[0], a[1], a[2], a[3]);
3127 case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3128 return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3129 case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3130 return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3131 case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3132 return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3133 case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3134 return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3135 case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3136 return get_errno(shutdown(a[0], a[1]));
3137 case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3138 return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3139 case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3140 return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3141 case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3142 return do_sendrecvmsg(a[0], a[1], a[2], 1);
3143 case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3144 return do_sendrecvmsg(a[0], a[1], a[2], 0);
3145 case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3146 return do_accept4(a[0], a[1], a[2], a[3]);
3147 case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3148 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3149 case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3150 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3152 gemu_log("Unsupported socketcall: %d\n", num);
3153 return -TARGET_EINVAL;
3158 #define N_SHM_REGIONS 32
3160 static struct shm_region {
3164 } shm_regions[N_SHM_REGIONS];
3166 #ifndef TARGET_SEMID64_DS
3167 /* asm-generic version of this struct */
3168 struct target_semid64_ds
3170 struct target_ipc_perm sem_perm;
3171 abi_ulong sem_otime;
3172 #if TARGET_ABI_BITS == 32
3173 abi_ulong __unused1;
3175 abi_ulong sem_ctime;
3176 #if TARGET_ABI_BITS == 32
3177 abi_ulong __unused2;
3179 abi_ulong sem_nsems;
3180 abi_ulong __unused3;
3181 abi_ulong __unused4;
3185 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3186 abi_ulong target_addr)
3188 struct target_ipc_perm *target_ip;
3189 struct target_semid64_ds *target_sd;
3191 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3192 return -TARGET_EFAULT;
3193 target_ip = &(target_sd->sem_perm);
3194 host_ip->__key = tswap32(target_ip->__key);
3195 host_ip->uid = tswap32(target_ip->uid);
3196 host_ip->gid = tswap32(target_ip->gid);
3197 host_ip->cuid = tswap32(target_ip->cuid);
3198 host_ip->cgid = tswap32(target_ip->cgid);
3199 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3200 host_ip->mode = tswap32(target_ip->mode);
3202 host_ip->mode = tswap16(target_ip->mode);
3204 #if defined(TARGET_PPC)
3205 host_ip->__seq = tswap32(target_ip->__seq);
3207 host_ip->__seq = tswap16(target_ip->__seq);
3209 unlock_user_struct(target_sd, target_addr, 0);
3213 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3214 struct ipc_perm *host_ip)
3216 struct target_ipc_perm *target_ip;
3217 struct target_semid64_ds *target_sd;
3219 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3220 return -TARGET_EFAULT;
3221 target_ip = &(target_sd->sem_perm);
3222 target_ip->__key = tswap32(host_ip->__key);
3223 target_ip->uid = tswap32(host_ip->uid);
3224 target_ip->gid = tswap32(host_ip->gid);
3225 target_ip->cuid = tswap32(host_ip->cuid);
3226 target_ip->cgid = tswap32(host_ip->cgid);
3227 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3228 target_ip->mode = tswap32(host_ip->mode);
3230 target_ip->mode = tswap16(host_ip->mode);
3232 #if defined(TARGET_PPC)
3233 target_ip->__seq = tswap32(host_ip->__seq);
3235 target_ip->__seq = tswap16(host_ip->__seq);
3237 unlock_user_struct(target_sd, target_addr, 1);
3241 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3242 abi_ulong target_addr)
3244 struct target_semid64_ds *target_sd;
3246 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3247 return -TARGET_EFAULT;
3248 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3249 return -TARGET_EFAULT;
3250 host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3251 host_sd->sem_otime = tswapal(target_sd->sem_otime);
3252 host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3253 unlock_user_struct(target_sd, target_addr, 0);
3257 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3258 struct semid_ds *host_sd)
3260 struct target_semid64_ds *target_sd;
3262 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3263 return -TARGET_EFAULT;
3264 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3265 return -TARGET_EFAULT;
3266 target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3267 target_sd->sem_otime = tswapal(host_sd->sem_otime);
3268 target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3269 unlock_user_struct(target_sd, target_addr, 1);
3273 struct target_seminfo {
3286 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3287 struct seminfo *host_seminfo)
3289 struct target_seminfo *target_seminfo;
3290 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3291 return -TARGET_EFAULT;
3292 __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3293 __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3294 __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3295 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3296 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3297 __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3298 __put_user(host_seminfo->semume, &target_seminfo->semume);
3299 __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3300 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3301 __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3302 unlock_user_struct(target_seminfo, target_addr, 1);
3308 struct semid_ds *buf;
3309 unsigned short *array;
3310 struct seminfo *__buf;
3313 union target_semun {
3320 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3321 abi_ulong target_addr)
3324 unsigned short *array;
3326 struct semid_ds semid_ds;
3329 semun.buf = &semid_ds;
3331 ret = semctl(semid, 0, IPC_STAT, semun);
3333 return get_errno(ret);
3335 nsems = semid_ds.sem_nsems;
3337 *host_array = g_try_new(unsigned short, nsems);
3339 return -TARGET_ENOMEM;
3341 array = lock_user(VERIFY_READ, target_addr,
3342 nsems*sizeof(unsigned short), 1);
3344 g_free(*host_array);
3345 return -TARGET_EFAULT;
3348 for(i=0; i<nsems; i++) {
3349 __get_user((*host_array)[i], &array[i]);
3351 unlock_user(array, target_addr, 0);
3356 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3357 unsigned short **host_array)
3360 unsigned short *array;
3362 struct semid_ds semid_ds;
3365 semun.buf = &semid_ds;
3367 ret = semctl(semid, 0, IPC_STAT, semun);
3369 return get_errno(ret);
3371 nsems = semid_ds.sem_nsems;
3373 array = lock_user(VERIFY_WRITE, target_addr,
3374 nsems*sizeof(unsigned short), 0);
3376 return -TARGET_EFAULT;
3378 for(i=0; i<nsems; i++) {
3379 __put_user((*host_array)[i], &array[i]);
3381 g_free(*host_array);
3382 unlock_user(array, target_addr, 1);
3387 static inline abi_long do_semctl(int semid, int semnum, int cmd,
3388 abi_ulong target_arg)
3390 union target_semun target_su = { .buf = target_arg };
3392 struct semid_ds dsarg;
3393 unsigned short *array = NULL;
3394 struct seminfo seminfo;
3395 abi_long ret = -TARGET_EINVAL;
3402 /* In 64 bit cross-endian situations, we will erroneously pick up
3403 * the wrong half of the union for the "val" element. To rectify
3404 * this, the entire 8-byte structure is byteswapped, followed by
3405 * a swap of the 4 byte val field. In other cases, the data is
3406 * already in proper host byte order. */
3407 if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
3408 target_su.buf = tswapal(target_su.buf);
3409 arg.val = tswap32(target_su.val);
3411 arg.val = target_su.val;
3413 ret = get_errno(semctl(semid, semnum, cmd, arg));
3417 err = target_to_host_semarray(semid, &array, target_su.array);
3421 ret = get_errno(semctl(semid, semnum, cmd, arg));
3422 err = host_to_target_semarray(semid, target_su.array, &array);
3429 err = target_to_host_semid_ds(&dsarg, target_su.buf);
3433 ret = get_errno(semctl(semid, semnum, cmd, arg));
3434 err = host_to_target_semid_ds(target_su.buf, &dsarg);
3440 arg.__buf = &seminfo;
3441 ret = get_errno(semctl(semid, semnum, cmd, arg));
3442 err = host_to_target_seminfo(target_su.__buf, &seminfo);
3450 ret = get_errno(semctl(semid, semnum, cmd, NULL));
3457 struct target_sembuf {
3458 unsigned short sem_num;
3463 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
3464 abi_ulong target_addr,
3467 struct target_sembuf *target_sembuf;
3470 target_sembuf = lock_user(VERIFY_READ, target_addr,
3471 nsops*sizeof(struct target_sembuf), 1);
3473 return -TARGET_EFAULT;
3475 for(i=0; i<nsops; i++) {
3476 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
3477 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
3478 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
3481 unlock_user(target_sembuf, target_addr, 0);
3486 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
3488 struct sembuf sops[nsops];
3490 if (target_to_host_sembuf(sops, ptr, nsops))
3491 return -TARGET_EFAULT;
3493 return get_errno(safe_semtimedop(semid, sops, nsops, NULL));
3496 struct target_msqid_ds
3498 struct target_ipc_perm msg_perm;
3499 abi_ulong msg_stime;
3500 #if TARGET_ABI_BITS == 32
3501 abi_ulong __unused1;
3503 abi_ulong msg_rtime;
3504 #if TARGET_ABI_BITS == 32
3505 abi_ulong __unused2;
3507 abi_ulong msg_ctime;
3508 #if TARGET_ABI_BITS == 32
3509 abi_ulong __unused3;
3511 abi_ulong __msg_cbytes;
3513 abi_ulong msg_qbytes;
3514 abi_ulong msg_lspid;
3515 abi_ulong msg_lrpid;
3516 abi_ulong __unused4;
3517 abi_ulong __unused5;
3520 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
3521 abi_ulong target_addr)
3523 struct target_msqid_ds *target_md;
3525 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
3526 return -TARGET_EFAULT;
3527 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
3528 return -TARGET_EFAULT;
3529 host_md->msg_stime = tswapal(target_md->msg_stime);
3530 host_md->msg_rtime = tswapal(target_md->msg_rtime);
3531 host_md->msg_ctime = tswapal(target_md->msg_ctime);
3532 host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
3533 host_md->msg_qnum = tswapal(target_md->msg_qnum);
3534 host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
3535 host_md->msg_lspid = tswapal(target_md->msg_lspid);
3536 host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
3537 unlock_user_struct(target_md, target_addr, 0);
3541 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
3542 struct msqid_ds *host_md)
3544 struct target_msqid_ds *target_md;
3546 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
3547 return -TARGET_EFAULT;
3548 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
3549 return -TARGET_EFAULT;
3550 target_md->msg_stime = tswapal(host_md->msg_stime);
3551 target_md->msg_rtime = tswapal(host_md->msg_rtime);
3552 target_md->msg_ctime = tswapal(host_md->msg_ctime);
3553 target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
3554 target_md->msg_qnum = tswapal(host_md->msg_qnum);
3555 target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
3556 target_md->msg_lspid = tswapal(host_md->msg_lspid);
3557 target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
3558 unlock_user_struct(target_md, target_addr, 1);
3562 struct target_msginfo {
3570 unsigned short int msgseg;
3573 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
3574 struct msginfo *host_msginfo)
3576 struct target_msginfo *target_msginfo;
3577 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
3578 return -TARGET_EFAULT;
3579 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
3580 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
3581 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
3582 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
3583 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
3584 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
3585 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
3586 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
3587 unlock_user_struct(target_msginfo, target_addr, 1);
3591 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
3593 struct msqid_ds dsarg;
3594 struct msginfo msginfo;
3595 abi_long ret = -TARGET_EINVAL;
3603 if (target_to_host_msqid_ds(&dsarg,ptr))
3604 return -TARGET_EFAULT;
3605 ret = get_errno(msgctl(msgid, cmd, &dsarg));
3606 if (host_to_target_msqid_ds(ptr,&dsarg))
3607 return -TARGET_EFAULT;
3610 ret = get_errno(msgctl(msgid, cmd, NULL));
3614 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
3615 if (host_to_target_msginfo(ptr, &msginfo))
3616 return -TARGET_EFAULT;
3623 struct target_msgbuf {
3628 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
3629 ssize_t msgsz, int msgflg)
3631 struct target_msgbuf *target_mb;
3632 struct msgbuf *host_mb;
3636 return -TARGET_EINVAL;
3639 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
3640 return -TARGET_EFAULT;
3641 host_mb = g_try_malloc(msgsz + sizeof(long));
3643 unlock_user_struct(target_mb, msgp, 0);
3644 return -TARGET_ENOMEM;
3646 host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
3647 memcpy(host_mb->mtext, target_mb->mtext, msgsz);
3648 ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
3650 unlock_user_struct(target_mb, msgp, 0);
3655 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
3656 ssize_t msgsz, abi_long msgtyp,
3659 struct target_msgbuf *target_mb;
3661 struct msgbuf *host_mb;
3665 return -TARGET_EINVAL;
3668 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
3669 return -TARGET_EFAULT;
3671 host_mb = g_try_malloc(msgsz + sizeof(long));
3673 ret = -TARGET_ENOMEM;
3676 ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
3679 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
3680 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
3681 if (!target_mtext) {
3682 ret = -TARGET_EFAULT;
3685 memcpy(target_mb->mtext, host_mb->mtext, ret);
3686 unlock_user(target_mtext, target_mtext_addr, ret);
3689 target_mb->mtype = tswapal(host_mb->mtype);
3693 unlock_user_struct(target_mb, msgp, 1);
3698 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
3699 abi_ulong target_addr)
3701 struct target_shmid_ds *target_sd;
3703 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3704 return -TARGET_EFAULT;
3705 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
3706 return -TARGET_EFAULT;
3707 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
3708 __get_user(host_sd->shm_atime, &target_sd->shm_atime);
3709 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
3710 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
3711 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
3712 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
3713 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
3714 unlock_user_struct(target_sd, target_addr, 0);
3718 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
3719 struct shmid_ds *host_sd)
3721 struct target_shmid_ds *target_sd;
3723 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3724 return -TARGET_EFAULT;
3725 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
3726 return -TARGET_EFAULT;
3727 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
3728 __put_user(host_sd->shm_atime, &target_sd->shm_atime);
3729 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
3730 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
3731 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
3732 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
3733 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
3734 unlock_user_struct(target_sd, target_addr, 1);
3738 struct target_shminfo {
3746 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
3747 struct shminfo *host_shminfo)
3749 struct target_shminfo *target_shminfo;
3750 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
3751 return -TARGET_EFAULT;
3752 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
3753 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
3754 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
3755 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
3756 __put_user(host_shminfo->shmall, &target_shminfo->shmall);
3757 unlock_user_struct(target_shminfo, target_addr, 1);
3761 struct target_shm_info {
3766 abi_ulong swap_attempts;
3767 abi_ulong swap_successes;
3770 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
3771 struct shm_info *host_shm_info)
3773 struct target_shm_info *target_shm_info;
3774 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
3775 return -TARGET_EFAULT;
3776 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
3777 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
3778 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
3779 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
3780 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
3781 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
3782 unlock_user_struct(target_shm_info, target_addr, 1);
3786 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
3788 struct shmid_ds dsarg;
3789 struct shminfo shminfo;
3790 struct shm_info shm_info;
3791 abi_long ret = -TARGET_EINVAL;
3799 if (target_to_host_shmid_ds(&dsarg, buf))
3800 return -TARGET_EFAULT;
3801 ret = get_errno(shmctl(shmid, cmd, &dsarg));
3802 if (host_to_target_shmid_ds(buf, &dsarg))
3803 return -TARGET_EFAULT;
3806 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
3807 if (host_to_target_shminfo(buf, &shminfo))
3808 return -TARGET_EFAULT;
3811 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
3812 if (host_to_target_shm_info(buf, &shm_info))
3813 return -TARGET_EFAULT;
3818 ret = get_errno(shmctl(shmid, cmd, NULL));
3825 #ifndef TARGET_FORCE_SHMLBA
3826 /* For most architectures, SHMLBA is the same as the page size;
3827 * some architectures have larger values, in which case they should
3828 * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
3829 * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
3830 * and defining its own value for SHMLBA.
3832 * The kernel also permits SHMLBA to be set by the architecture to a
3833 * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
3834 * this means that addresses are rounded to the large size if
3835 * SHM_RND is set but addresses not aligned to that size are not rejected
3836 * as long as they are at least page-aligned. Since the only architecture
3837 * which uses this is ia64 this code doesn't provide for that oddity.
3839 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
3841 return TARGET_PAGE_SIZE;
3845 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
3846 int shmid, abi_ulong shmaddr, int shmflg)
3850 struct shmid_ds shm_info;
3854 /* find out the length of the shared memory segment */
3855 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
3856 if (is_error(ret)) {
3857 /* can't get length, bail out */
3861 shmlba = target_shmlba(cpu_env);
3863 if (shmaddr & (shmlba - 1)) {
3864 if (shmflg & SHM_RND) {
3865 shmaddr &= ~(shmlba - 1);
3867 return -TARGET_EINVAL;
3870 if (!guest_range_valid(shmaddr, shm_info.shm_segsz)) {
3871 return -TARGET_EINVAL;
3877 host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
3879 abi_ulong mmap_start;
3881 mmap_start = mmap_find_vma(0, shm_info.shm_segsz);
3883 if (mmap_start == -1) {
3885 host_raddr = (void *)-1;
3887 host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
3890 if (host_raddr == (void *)-1) {
3892 return get_errno((long)host_raddr);
3894 raddr=h2g((unsigned long)host_raddr);
3896 page_set_flags(raddr, raddr + shm_info.shm_segsz,
3897 PAGE_VALID | PAGE_READ |
3898 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
3900 for (i = 0; i < N_SHM_REGIONS; i++) {
3901 if (!shm_regions[i].in_use) {
3902 shm_regions[i].in_use = true;
3903 shm_regions[i].start = raddr;
3904 shm_regions[i].size = shm_info.shm_segsz;
3914 static inline abi_long do_shmdt(abi_ulong shmaddr)
3921 for (i = 0; i < N_SHM_REGIONS; ++i) {
3922 if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
3923 shm_regions[i].in_use = false;
3924 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
3928 rv = get_errno(shmdt(g2h(shmaddr)));
3935 #ifdef TARGET_NR_ipc
3936 /* ??? This only works with linear mappings. */
3937 /* do_ipc() must return target values and target errnos. */
3938 static abi_long do_ipc(CPUArchState *cpu_env,
3939 unsigned int call, abi_long first,
3940 abi_long second, abi_long third,
3941 abi_long ptr, abi_long fifth)
3946 version = call >> 16;
3951 ret = do_semop(first, ptr, second);
3955 ret = get_errno(semget(first, second, third));
3958 case IPCOP_semctl: {
3959 /* The semun argument to semctl is passed by value, so dereference the
3962 get_user_ual(atptr, ptr);
3963 ret = do_semctl(first, second, third, atptr);
3968 ret = get_errno(msgget(first, second));
3972 ret = do_msgsnd(first, ptr, second, third);
3976 ret = do_msgctl(first, second, ptr);
3983 struct target_ipc_kludge {
3988 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
3989 ret = -TARGET_EFAULT;
3993 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
3995 unlock_user_struct(tmp, ptr, 0);
3999 ret = do_msgrcv(first, ptr, second, fifth, third);
4008 raddr = do_shmat(cpu_env, first, ptr, second);
4009 if (is_error(raddr))
4010 return get_errno(raddr);
4011 if (put_user_ual(raddr, third))
4012 return -TARGET_EFAULT;
4016 ret = -TARGET_EINVAL;
4021 ret = do_shmdt(ptr);
4025 /* IPC_* flag values are the same on all linux platforms */
4026 ret = get_errno(shmget(first, second, third));
4029 /* IPC_* and SHM_* command values are the same on all linux platforms */
4031 ret = do_shmctl(first, second, ptr);
4034 gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
4035 ret = -TARGET_ENOSYS;
4042 /* kernel structure types definitions */
4044 #define STRUCT(name, ...) STRUCT_ ## name,
4045 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4047 #include "syscall_types.h"
4051 #undef STRUCT_SPECIAL
4053 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
4054 #define STRUCT_SPECIAL(name)
4055 #include "syscall_types.h"
4057 #undef STRUCT_SPECIAL
4059 typedef struct IOCTLEntry IOCTLEntry;
4061 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
4062 int fd, int cmd, abi_long arg);
4066 unsigned int host_cmd;
4069 do_ioctl_fn *do_ioctl;
4070 const argtype arg_type[5];
4073 #define IOC_R 0x0001
4074 #define IOC_W 0x0002
4075 #define IOC_RW (IOC_R | IOC_W)
4077 #define MAX_STRUCT_SIZE 4096
4079 #ifdef CONFIG_FIEMAP
4080 /* So fiemap access checks don't overflow on 32 bit systems.
4081 * This is very slightly smaller than the limit imposed by
4082 * the underlying kernel.
4084 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
4085 / sizeof(struct fiemap_extent))
4087 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4088 int fd, int cmd, abi_long arg)
4090 /* The parameter for this ioctl is a struct fiemap followed
4091 * by an array of struct fiemap_extent whose size is set
4092 * in fiemap->fm_extent_count. The array is filled in by the
4095 int target_size_in, target_size_out;
4097 const argtype *arg_type = ie->arg_type;
4098 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4101 int i, extent_size = thunk_type_size(extent_arg_type, 0);
4105 assert(arg_type[0] == TYPE_PTR);
4106 assert(ie->access == IOC_RW);
4108 target_size_in = thunk_type_size(arg_type, 0);
4109 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4111 return -TARGET_EFAULT;
4113 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4114 unlock_user(argptr, arg, 0);
4115 fm = (struct fiemap *)buf_temp;
4116 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4117 return -TARGET_EINVAL;
4120 outbufsz = sizeof (*fm) +
4121 (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4123 if (outbufsz > MAX_STRUCT_SIZE) {
4124 /* We can't fit all the extents into the fixed size buffer.
4125 * Allocate one that is large enough and use it instead.
4127 fm = g_try_malloc(outbufsz);
4129 return -TARGET_ENOMEM;
4131 memcpy(fm, buf_temp, sizeof(struct fiemap));
4134 ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4135 if (!is_error(ret)) {
4136 target_size_out = target_size_in;
4137 /* An extent_count of 0 means we were only counting the extents
4138 * so there are no structs to copy
4140 if (fm->fm_extent_count != 0) {
4141 target_size_out += fm->fm_mapped_extents * extent_size;
4143 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4145 ret = -TARGET_EFAULT;
4147 /* Convert the struct fiemap */
4148 thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4149 if (fm->fm_extent_count != 0) {
4150 p = argptr + target_size_in;
4151 /* ...and then all the struct fiemap_extents */
4152 for (i = 0; i < fm->fm_mapped_extents; i++) {
4153 thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4158 unlock_user(argptr, arg, target_size_out);
4168 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4169 int fd, int cmd, abi_long arg)
4171 const argtype *arg_type = ie->arg_type;
4175 struct ifconf *host_ifconf;
4177 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4178 int target_ifreq_size;
4183 abi_long target_ifc_buf;
4187 assert(arg_type[0] == TYPE_PTR);
4188 assert(ie->access == IOC_RW);
4191 target_size = thunk_type_size(arg_type, 0);
4193 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4195 return -TARGET_EFAULT;
4196 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4197 unlock_user(argptr, arg, 0);
4199 host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4200 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4201 target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
4203 if (target_ifc_buf != 0) {
4204 target_ifc_len = host_ifconf->ifc_len;
4205 nb_ifreq = target_ifc_len / target_ifreq_size;
4206 host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4208 outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4209 if (outbufsz > MAX_STRUCT_SIZE) {
4211 * We can't fit all the extents into the fixed size buffer.
4212 * Allocate one that is large enough and use it instead.
4214 host_ifconf = malloc(outbufsz);
4216 return -TARGET_ENOMEM;
4218 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4221 host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4223 host_ifconf->ifc_len = host_ifc_len;
4225 host_ifc_buf = NULL;
4227 host_ifconf->ifc_buf = host_ifc_buf;
4229 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4230 if (!is_error(ret)) {
4231 /* convert host ifc_len to target ifc_len */
4233 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4234 target_ifc_len = nb_ifreq * target_ifreq_size;
4235 host_ifconf->ifc_len = target_ifc_len;
4237 /* restore target ifc_buf */
4239 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4241 /* copy struct ifconf to target user */
4243 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4245 return -TARGET_EFAULT;
4246 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4247 unlock_user(argptr, arg, target_size);
4249 if (target_ifc_buf != 0) {
4250 /* copy ifreq[] to target user */
4251 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4252 for (i = 0; i < nb_ifreq ; i++) {
4253 thunk_convert(argptr + i * target_ifreq_size,
4254 host_ifc_buf + i * sizeof(struct ifreq),
4255 ifreq_arg_type, THUNK_TARGET);
4257 unlock_user(argptr, target_ifc_buf, target_ifc_len);
4268 #if defined(CONFIG_USBFS)
4269 #if HOST_LONG_BITS > 64
4270 #error USBDEVFS thunks do not support >64 bit hosts yet.
4273 uint64_t target_urb_adr;
4274 uint64_t target_buf_adr;
4275 char *target_buf_ptr;
4276 struct usbdevfs_urb host_urb;
4279 static GHashTable *usbdevfs_urb_hashtable(void)
4281 static GHashTable *urb_hashtable;
4283 if (!urb_hashtable) {
4284 urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
4286 return urb_hashtable;
4289 static void urb_hashtable_insert(struct live_urb *urb)
4291 GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4292 g_hash_table_insert(urb_hashtable, urb, urb);
4295 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
4297 GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4298 return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
4301 static void urb_hashtable_remove(struct live_urb *urb)
4303 GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4304 g_hash_table_remove(urb_hashtable, urb);
4308 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
4309 int fd, int cmd, abi_long arg)
4311 const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
4312 const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
4313 struct live_urb *lurb;
4317 uintptr_t target_urb_adr;
4320 target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
4322 memset(buf_temp, 0, sizeof(uint64_t));
4323 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4324 if (is_error(ret)) {
4328 memcpy(&hurb, buf_temp, sizeof(uint64_t));
4329 lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
4330 if (!lurb->target_urb_adr) {
4331 return -TARGET_EFAULT;
4333 urb_hashtable_remove(lurb);
4334 unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
4335 lurb->host_urb.buffer_length);
4336 lurb->target_buf_ptr = NULL;
4338 /* restore the guest buffer pointer */
4339 lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
4341 /* update the guest urb struct */
4342 argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
4345 return -TARGET_EFAULT;
4347 thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
4348 unlock_user(argptr, lurb->target_urb_adr, target_size);
4350 target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
4351 /* write back the urb handle */
4352 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4355 return -TARGET_EFAULT;
4358 /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
4359 target_urb_adr = lurb->target_urb_adr;
4360 thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
4361 unlock_user(argptr, arg, target_size);
4368 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
4369 uint8_t *buf_temp __attribute__((unused)),
4370 int fd, int cmd, abi_long arg)
4372 struct live_urb *lurb;
4374 /* map target address back to host URB with metadata. */
4375 lurb = urb_hashtable_lookup(arg);
4377 return -TARGET_EFAULT;
4379 return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4383 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
4384 int fd, int cmd, abi_long arg)
4386 const argtype *arg_type = ie->arg_type;
4391 struct live_urb *lurb;
4394 * each submitted URB needs to map to a unique ID for the
4395 * kernel, and that unique ID needs to be a pointer to
4396 * host memory. hence, we need to malloc for each URB.
4397 * isochronous transfers have a variable length struct.
4400 target_size = thunk_type_size(arg_type, THUNK_TARGET);
4402 /* construct host copy of urb and metadata */
4403 lurb = g_try_malloc0(sizeof(struct live_urb));
4405 return -TARGET_ENOMEM;
4408 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4411 return -TARGET_EFAULT;
4413 thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
4414 unlock_user(argptr, arg, 0);
4416 lurb->target_urb_adr = arg;
4417 lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
4419 /* buffer space used depends on endpoint type so lock the entire buffer */
4420 /* control type urbs should check the buffer contents for true direction */
4421 rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
4422 lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
4423 lurb->host_urb.buffer_length, 1);
4424 if (lurb->target_buf_ptr == NULL) {
4426 return -TARGET_EFAULT;
4429 /* update buffer pointer in host copy */
4430 lurb->host_urb.buffer = lurb->target_buf_ptr;
4432 ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4433 if (is_error(ret)) {
4434 unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
4437 urb_hashtable_insert(lurb);
4442 #endif /* CONFIG_USBFS */
4444 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
4445 int cmd, abi_long arg)
4448 struct dm_ioctl *host_dm;
4449 abi_long guest_data;
4450 uint32_t guest_data_size;
4452 const argtype *arg_type = ie->arg_type;
4454 void *big_buf = NULL;
4458 target_size = thunk_type_size(arg_type, 0);
4459 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4461 ret = -TARGET_EFAULT;
4464 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4465 unlock_user(argptr, arg, 0);
4467 /* buf_temp is too small, so fetch things into a bigger buffer */
4468 big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
4469 memcpy(big_buf, buf_temp, target_size);
4473 guest_data = arg + host_dm->data_start;
4474 if ((guest_data - arg) < 0) {
4475 ret = -TARGET_EINVAL;
4478 guest_data_size = host_dm->data_size - host_dm->data_start;
4479 host_data = (char*)host_dm + host_dm->data_start;
4481 argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
4483 ret = -TARGET_EFAULT;
4487 switch (ie->host_cmd) {
4489 case DM_LIST_DEVICES:
4492 case DM_DEV_SUSPEND:
4495 case DM_TABLE_STATUS:
4496 case DM_TABLE_CLEAR:
4498 case DM_LIST_VERSIONS:
4502 case DM_DEV_SET_GEOMETRY:
4503 /* data contains only strings */
4504 memcpy(host_data, argptr, guest_data_size);
4507 memcpy(host_data, argptr, guest_data_size);
4508 *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
4512 void *gspec = argptr;
4513 void *cur_data = host_data;
4514 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
4515 int spec_size = thunk_type_size(arg_type, 0);
4518 for (i = 0; i < host_dm->target_count; i++) {
4519 struct dm_target_spec *spec = cur_data;
4523 thunk_convert(spec, gspec, arg_type, THUNK_HOST);
4524 slen = strlen((char*)gspec + spec_size) + 1;
4526 spec->next = sizeof(*spec) + slen;
4527 strcpy((char*)&spec[1], gspec + spec_size);
4529 cur_data += spec->next;
4534 ret = -TARGET_EINVAL;
4535 unlock_user(argptr, guest_data, 0);
4538 unlock_user(argptr, guest_data, 0);
4540 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4541 if (!is_error(ret)) {
4542 guest_data = arg + host_dm->data_start;
4543 guest_data_size = host_dm->data_size - host_dm->data_start;
4544 argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
4545 switch (ie->host_cmd) {
4550 case DM_DEV_SUSPEND:
4553 case DM_TABLE_CLEAR:
4555 case DM_DEV_SET_GEOMETRY:
4556 /* no return data */
4558 case DM_LIST_DEVICES:
4560 struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
4561 uint32_t remaining_data = guest_data_size;
4562 void *cur_data = argptr;
4563 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
4564 int nl_size = 12; /* can't use thunk_size due to alignment */
4567 uint32_t next = nl->next;
4569 nl->next = nl_size + (strlen(nl->name) + 1);
4571 if (remaining_data < nl->next) {
4572 host_dm->flags |= DM_BUFFER_FULL_FLAG;
4575 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
4576 strcpy(cur_data + nl_size, nl->name);
4577 cur_data += nl->next;
4578 remaining_data -= nl->next;
4582 nl = (void*)nl + next;
4587 case DM_TABLE_STATUS:
4589 struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
4590 void *cur_data = argptr;
4591 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
4592 int spec_size = thunk_type_size(arg_type, 0);
4595 for (i = 0; i < host_dm->target_count; i++) {
4596 uint32_t next = spec->next;
4597 int slen = strlen((char*)&spec[1]) + 1;
4598 spec->next = (cur_data - argptr) + spec_size + slen;
4599 if (guest_data_size < spec->next) {
4600 host_dm->flags |= DM_BUFFER_FULL_FLAG;
4603 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
4604 strcpy(cur_data + spec_size, (char*)&spec[1]);
4605 cur_data = argptr + spec->next;
4606 spec = (void*)host_dm + host_dm->data_start + next;
4612 void *hdata = (void*)host_dm + host_dm->data_start;
4613 int count = *(uint32_t*)hdata;
4614 uint64_t *hdev = hdata + 8;
4615 uint64_t *gdev = argptr + 8;
4618 *(uint32_t*)argptr = tswap32(count);
4619 for (i = 0; i < count; i++) {
4620 *gdev = tswap64(*hdev);
4626 case DM_LIST_VERSIONS:
4628 struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
4629 uint32_t remaining_data = guest_data_size;
4630 void *cur_data = argptr;
4631 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
4632 int vers_size = thunk_type_size(arg_type, 0);
4635 uint32_t next = vers->next;
4637 vers->next = vers_size + (strlen(vers->name) + 1);
4639 if (remaining_data < vers->next) {
4640 host_dm->flags |= DM_BUFFER_FULL_FLAG;
4643 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
4644 strcpy(cur_data + vers_size, vers->name);
4645 cur_data += vers->next;
4646 remaining_data -= vers->next;
4650 vers = (void*)vers + next;
4655 unlock_user(argptr, guest_data, 0);
4656 ret = -TARGET_EINVAL;
4659 unlock_user(argptr, guest_data, guest_data_size);
4661 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4663 ret = -TARGET_EFAULT;
4666 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
4667 unlock_user(argptr, arg, target_size);
4674 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
4675 int cmd, abi_long arg)
4679 const argtype *arg_type = ie->arg_type;
4680 const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
4683 struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
4684 struct blkpg_partition host_part;
4686 /* Read and convert blkpg */
4688 target_size = thunk_type_size(arg_type, 0);
4689 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4691 ret = -TARGET_EFAULT;
4694 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4695 unlock_user(argptr, arg, 0);
4697 switch (host_blkpg->op) {
4698 case BLKPG_ADD_PARTITION:
4699 case BLKPG_DEL_PARTITION:
4700 /* payload is struct blkpg_partition */
4703 /* Unknown opcode */
4704 ret = -TARGET_EINVAL;
4708 /* Read and convert blkpg->data */
4709 arg = (abi_long)(uintptr_t)host_blkpg->data;
4710 target_size = thunk_type_size(part_arg_type, 0);
4711 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4713 ret = -TARGET_EFAULT;
4716 thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
4717 unlock_user(argptr, arg, 0);
4719 /* Swizzle the data pointer to our local copy and call! */
4720 host_blkpg->data = &host_part;
4721 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
4727 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
4728 int fd, int cmd, abi_long arg)
4730 const argtype *arg_type = ie->arg_type;
4731 const StructEntry *se;
4732 const argtype *field_types;
4733 const int *dst_offsets, *src_offsets;
4736 abi_ulong *target_rt_dev_ptr;
4737 unsigned long *host_rt_dev_ptr;
4741 assert(ie->access == IOC_W);
4742 assert(*arg_type == TYPE_PTR);
4744 assert(*arg_type == TYPE_STRUCT);
4745 target_size = thunk_type_size(arg_type, 0);
4746 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4748 return -TARGET_EFAULT;
4751 assert(*arg_type == (int)STRUCT_rtentry);
4752 se = struct_entries + *arg_type++;
4753 assert(se->convert[0] == NULL);
4754 /* convert struct here to be able to catch rt_dev string */
4755 field_types = se->field_types;
4756 dst_offsets = se->field_offsets[THUNK_HOST];
4757 src_offsets = se->field_offsets[THUNK_TARGET];
4758 for (i = 0; i < se->nb_fields; i++) {
4759 if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
4760 assert(*field_types == TYPE_PTRVOID);
4761 target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
4762 host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
4763 if (*target_rt_dev_ptr != 0) {
4764 *host_rt_dev_ptr = (unsigned long)lock_user_string(
4765 tswapal(*target_rt_dev_ptr));
4766 if (!*host_rt_dev_ptr) {
4767 unlock_user(argptr, arg, 0);
4768 return -TARGET_EFAULT;
4771 *host_rt_dev_ptr = 0;
4776 field_types = thunk_convert(buf_temp + dst_offsets[i],
4777 argptr + src_offsets[i],
4778 field_types, THUNK_HOST);
4780 unlock_user(argptr, arg, 0);
4782 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4783 if (*host_rt_dev_ptr != 0) {
4784 unlock_user((void *)*host_rt_dev_ptr,
4785 *target_rt_dev_ptr, 0);
4790 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
4791 int fd, int cmd, abi_long arg)
4793 int sig = target_to_host_signal(arg);
4794 return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
4798 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
4799 int fd, int cmd, abi_long arg)
4801 int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
4802 return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
4806 static IOCTLEntry ioctl_entries[] = {
4807 #define IOCTL(cmd, access, ...) \
4808 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
4809 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
4810 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
4811 #define IOCTL_IGNORE(cmd) \
4812 { TARGET_ ## cmd, 0, #cmd },
4817 /* ??? Implement proper locking for ioctls. */
4818 /* do_ioctl() Must return target values and target errnos. */
4819 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
4821 const IOCTLEntry *ie;
4822 const argtype *arg_type;
4824 uint8_t buf_temp[MAX_STRUCT_SIZE];
4830 if (ie->target_cmd == 0) {
4831 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
4832 return -TARGET_ENOSYS;
4834 if (ie->target_cmd == cmd)
4838 arg_type = ie->arg_type;
4840 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
4841 } else if (!ie->host_cmd) {
4842 /* Some architectures define BSD ioctls in their headers
4843 that are not implemented in Linux. */
4844 return -TARGET_ENOSYS;
4847 switch(arg_type[0]) {
4850 ret = get_errno(safe_ioctl(fd, ie->host_cmd));
4854 ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
4858 target_size = thunk_type_size(arg_type, 0);
4859 switch(ie->access) {
4861 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4862 if (!is_error(ret)) {
4863 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4865 return -TARGET_EFAULT;
4866 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
4867 unlock_user(argptr, arg, target_size);
4871 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4873 return -TARGET_EFAULT;
4874 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4875 unlock_user(argptr, arg, 0);
4876 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4880 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4882 return -TARGET_EFAULT;
4883 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4884 unlock_user(argptr, arg, 0);
4885 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4886 if (!is_error(ret)) {
4887 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4889 return -TARGET_EFAULT;
4890 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
4891 unlock_user(argptr, arg, target_size);
4897 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
4898 (long)cmd, arg_type[0]);
4899 ret = -TARGET_ENOSYS;
4905 static const bitmask_transtbl iflag_tbl[] = {
4906 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
4907 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
4908 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
4909 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
4910 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
4911 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
4912 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
4913 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
4914 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
4915 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
4916 { TARGET_IXON, TARGET_IXON, IXON, IXON },
4917 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
4918 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
4919 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
4923 static const bitmask_transtbl oflag_tbl[] = {
4924 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
4925 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
4926 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
4927 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
4928 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
4929 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
4930 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
4931 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
4932 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
4933 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
4934 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
4935 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
4936 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
4937 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
4938 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
4939 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
4940 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
4941 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
4942 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
4943 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
4944 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
4945 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
4946 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
4947 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
4951 static const bitmask_transtbl cflag_tbl[] = {
4952 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
4953 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
4954 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
4955 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
4956 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
4957 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
4958 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
4959 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
4960 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
4961 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
4962 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
4963 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
4964 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
4965 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
4966 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
4967 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
4968 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
4969 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
4970 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
4971 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
4972 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
4973 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
4974 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
4975 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
4976 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
4977 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
4978 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
4979 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
4980 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
4981 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
4982 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
4986 static const bitmask_transtbl lflag_tbl[] = {
4987 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
4988 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
4989 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
4990 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
4991 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
4992 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
4993 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
4994 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
4995 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
4996 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
4997 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
4998 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
4999 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5000 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5001 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5005 static void target_to_host_termios (void *dst, const void *src)
5007 struct host_termios *host = dst;
5008 const struct target_termios *target = src;
5011 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5013 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5015 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5017 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5018 host->c_line = target->c_line;
5020 memset(host->c_cc, 0, sizeof(host->c_cc));
5021 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5022 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5023 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5024 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5025 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5026 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5027 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5028 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5029 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5030 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5031 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5032 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5033 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5034 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5035 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5036 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5037 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5040 static void host_to_target_termios (void *dst, const void *src)
5042 struct target_termios *target = dst;
5043 const struct host_termios *host = src;
5046 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5048 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5050 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5052 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5053 target->c_line = host->c_line;
5055 memset(target->c_cc, 0, sizeof(target->c_cc));
5056 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5057 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5058 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5059 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5060 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5061 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5062 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5063 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5064 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5065 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5066 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5067 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5068 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5069 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5070 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5071 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5072 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5075 static const StructEntry struct_termios_def = {
5076 .convert = { host_to_target_termios, target_to_host_termios },
5077 .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5078 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5081 static bitmask_transtbl mmap_flags_tbl[] = {
5082 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
5083 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
5084 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5085 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
5086 MAP_ANONYMOUS, MAP_ANONYMOUS },
5087 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
5088 MAP_GROWSDOWN, MAP_GROWSDOWN },
5089 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
5090 MAP_DENYWRITE, MAP_DENYWRITE },
5091 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
5092 MAP_EXECUTABLE, MAP_EXECUTABLE },
5093 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5094 { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
5095 MAP_NORESERVE, MAP_NORESERVE },
5096 { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
5097 /* MAP_STACK had been ignored by the kernel for quite some time.
5098 Recognize it for the target insofar as we do not want to pass
5099 it through to the host. */
5100 { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
5104 #if defined(TARGET_I386)
5106 /* NOTE: there is really one LDT for all the threads */
5107 static uint8_t *ldt_table;
5109 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
5116 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
5117 if (size > bytecount)
5119 p = lock_user(VERIFY_WRITE, ptr, size, 0);
5121 return -TARGET_EFAULT;
5122 /* ??? Should this by byteswapped? */
5123 memcpy(p, ldt_table, size);
5124 unlock_user(p, ptr, size);
5128 /* XXX: add locking support */
5129 static abi_long write_ldt(CPUX86State *env,
5130 abi_ulong ptr, unsigned long bytecount, int oldmode)
5132 struct target_modify_ldt_ldt_s ldt_info;
5133 struct target_modify_ldt_ldt_s *target_ldt_info;
5134 int seg_32bit, contents, read_exec_only, limit_in_pages;
5135 int seg_not_present, useable, lm;
5136 uint32_t *lp, entry_1, entry_2;
5138 if (bytecount != sizeof(ldt_info))
5139 return -TARGET_EINVAL;
5140 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
5141 return -TARGET_EFAULT;
5142 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5143 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5144 ldt_info.limit = tswap32(target_ldt_info->limit);
5145 ldt_info.flags = tswap32(target_ldt_info->flags);
5146 unlock_user_struct(target_ldt_info, ptr, 0);
5148 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
5149 return -TARGET_EINVAL;
5150 seg_32bit = ldt_info.flags & 1;
5151 contents = (ldt_info.flags >> 1) & 3;
5152 read_exec_only = (ldt_info.flags >> 3) & 1;
5153 limit_in_pages = (ldt_info.flags >> 4) & 1;
5154 seg_not_present = (ldt_info.flags >> 5) & 1;
5155 useable = (ldt_info.flags >> 6) & 1;
5159 lm = (ldt_info.flags >> 7) & 1;
5161 if (contents == 3) {
5163 return -TARGET_EINVAL;
5164 if (seg_not_present == 0)
5165 return -TARGET_EINVAL;
5167 /* allocate the LDT */
5169 env->ldt.base = target_mmap(0,
5170 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
5171 PROT_READ|PROT_WRITE,
5172 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
5173 if (env->ldt.base == -1)
5174 return -TARGET_ENOMEM;
5175 memset(g2h(env->ldt.base), 0,
5176 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
5177 env->ldt.limit = 0xffff;
5178 ldt_table = g2h(env->ldt.base);
5181 /* NOTE: same code as Linux kernel */
5182 /* Allow LDTs to be cleared by the user. */
5183 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5186 read_exec_only == 1 &&
5188 limit_in_pages == 0 &&
5189 seg_not_present == 1 &&
5197 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5198 (ldt_info.limit & 0x0ffff);
5199 entry_2 = (ldt_info.base_addr & 0xff000000) |
5200 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5201 (ldt_info.limit & 0xf0000) |
5202 ((read_exec_only ^ 1) << 9) |
5204 ((seg_not_present ^ 1) << 15) |
5206 (limit_in_pages << 23) |
5210 entry_2 |= (useable << 20);
5212 /* Install the new entry ... */
5214 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
5215 lp[0] = tswap32(entry_1);
5216 lp[1] = tswap32(entry_2);
5220 /* specific and weird i386 syscalls */
5221 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
5222 unsigned long bytecount)
5228 ret = read_ldt(ptr, bytecount);
5231 ret = write_ldt(env, ptr, bytecount, 1);
5234 ret = write_ldt(env, ptr, bytecount, 0);
5237 ret = -TARGET_ENOSYS;
5243 #if defined(TARGET_I386) && defined(TARGET_ABI32)
5244 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
5246 uint64_t *gdt_table = g2h(env->gdt.base);
5247 struct target_modify_ldt_ldt_s ldt_info;
5248 struct target_modify_ldt_ldt_s *target_ldt_info;
5249 int seg_32bit, contents, read_exec_only, limit_in_pages;
5250 int seg_not_present, useable, lm;
5251 uint32_t *lp, entry_1, entry_2;
5254 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5255 if (!target_ldt_info)
5256 return -TARGET_EFAULT;
5257 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5258 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5259 ldt_info.limit = tswap32(target_ldt_info->limit);
5260 ldt_info.flags = tswap32(target_ldt_info->flags);
5261 if (ldt_info.entry_number == -1) {
5262 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
5263 if (gdt_table[i] == 0) {
5264 ldt_info.entry_number = i;
5265 target_ldt_info->entry_number = tswap32(i);
5270 unlock_user_struct(target_ldt_info, ptr, 1);
5272 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
5273 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
5274 return -TARGET_EINVAL;
5275 seg_32bit = ldt_info.flags & 1;
5276 contents = (ldt_info.flags >> 1) & 3;
5277 read_exec_only = (ldt_info.flags >> 3) & 1;
5278 limit_in_pages = (ldt_info.flags >> 4) & 1;
5279 seg_not_present = (ldt_info.flags >> 5) & 1;
5280 useable = (ldt_info.flags >> 6) & 1;
5284 lm = (ldt_info.flags >> 7) & 1;
5287 if (contents == 3) {
5288 if (seg_not_present == 0)
5289 return -TARGET_EINVAL;
5292 /* NOTE: same code as Linux kernel */
5293 /* Allow LDTs to be cleared by the user. */
5294 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5295 if ((contents == 0 &&
5296 read_exec_only == 1 &&
5298 limit_in_pages == 0 &&
5299 seg_not_present == 1 &&
5307 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5308 (ldt_info.limit & 0x0ffff);
5309 entry_2 = (ldt_info.base_addr & 0xff000000) |
5310 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5311 (ldt_info.limit & 0xf0000) |
5312 ((read_exec_only ^ 1) << 9) |
5314 ((seg_not_present ^ 1) << 15) |
5316 (limit_in_pages << 23) |
5321 /* Install the new entry ... */
5323 lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
5324 lp[0] = tswap32(entry_1);
5325 lp[1] = tswap32(entry_2);
5329 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
5331 struct target_modify_ldt_ldt_s *target_ldt_info;
5332 uint64_t *gdt_table = g2h(env->gdt.base);
5333 uint32_t base_addr, limit, flags;
5334 int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
5335 int seg_not_present, useable, lm;
5336 uint32_t *lp, entry_1, entry_2;
5338 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5339 if (!target_ldt_info)
5340 return -TARGET_EFAULT;
5341 idx = tswap32(target_ldt_info->entry_number);
5342 if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
5343 idx > TARGET_GDT_ENTRY_TLS_MAX) {
5344 unlock_user_struct(target_ldt_info, ptr, 1);
5345 return -TARGET_EINVAL;
5347 lp = (uint32_t *)(gdt_table + idx);
5348 entry_1 = tswap32(lp[0]);
5349 entry_2 = tswap32(lp[1]);
5351 read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
5352 contents = (entry_2 >> 10) & 3;
5353 seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
5354 seg_32bit = (entry_2 >> 22) & 1;
5355 limit_in_pages = (entry_2 >> 23) & 1;
5356 useable = (entry_2 >> 20) & 1;
5360 lm = (entry_2 >> 21) & 1;
5362 flags = (seg_32bit << 0) | (contents << 1) |
5363 (read_exec_only << 3) | (limit_in_pages << 4) |
5364 (seg_not_present << 5) | (useable << 6) | (lm << 7);
5365 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000);
5366 base_addr = (entry_1 >> 16) |
5367 (entry_2 & 0xff000000) |
5368 ((entry_2 & 0xff) << 16);
5369 target_ldt_info->base_addr = tswapal(base_addr);
5370 target_ldt_info->limit = tswap32(limit);
5371 target_ldt_info->flags = tswap32(flags);
5372 unlock_user_struct(target_ldt_info, ptr, 1);
5375 #endif /* TARGET_I386 && TARGET_ABI32 */
5377 #ifndef TARGET_ABI32
5378 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
5385 case TARGET_ARCH_SET_GS:
5386 case TARGET_ARCH_SET_FS:
5387 if (code == TARGET_ARCH_SET_GS)
5391 cpu_x86_load_seg(env, idx, 0);
5392 env->segs[idx].base = addr;
5394 case TARGET_ARCH_GET_GS:
5395 case TARGET_ARCH_GET_FS:
5396 if (code == TARGET_ARCH_GET_GS)
5400 val = env->segs[idx].base;
5401 if (put_user(val, addr, abi_ulong))
5402 ret = -TARGET_EFAULT;
5405 ret = -TARGET_EINVAL;
5412 #endif /* defined(TARGET_I386) */
5414 #define NEW_STACK_SIZE 0x40000
5417 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
5420 pthread_mutex_t mutex;
5421 pthread_cond_t cond;
5424 abi_ulong child_tidptr;
5425 abi_ulong parent_tidptr;
5429 static void *clone_func(void *arg)
5431 new_thread_info *info = arg;
5436 rcu_register_thread();
5437 tcg_register_thread();
5439 cpu = ENV_GET_CPU(env);
5441 ts = (TaskState *)cpu->opaque;
5442 info->tid = gettid();
5444 if (info->child_tidptr)
5445 put_user_u32(info->tid, info->child_tidptr);
5446 if (info->parent_tidptr)
5447 put_user_u32(info->tid, info->parent_tidptr);
5448 /* Enable signals. */
5449 sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
5450 /* Signal to the parent that we're ready. */
5451 pthread_mutex_lock(&info->mutex);
5452 pthread_cond_broadcast(&info->cond);
5453 pthread_mutex_unlock(&info->mutex);
5454 /* Wait until the parent has finished initializing the tls state. */
5455 pthread_mutex_lock(&clone_lock);
5456 pthread_mutex_unlock(&clone_lock);
5462 /* do_fork() Must return host values and target errnos (unlike most
5463 do_*() functions). */
5464 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
5465 abi_ulong parent_tidptr, target_ulong newtls,
5466 abi_ulong child_tidptr)
5468 CPUState *cpu = ENV_GET_CPU(env);
5472 CPUArchState *new_env;
5475 flags &= ~CLONE_IGNORED_FLAGS;
5477 /* Emulate vfork() with fork() */
5478 if (flags & CLONE_VFORK)
5479 flags &= ~(CLONE_VFORK | CLONE_VM);
5481 if (flags & CLONE_VM) {
5482 TaskState *parent_ts = (TaskState *)cpu->opaque;
5483 new_thread_info info;
5484 pthread_attr_t attr;
5486 if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
5487 (flags & CLONE_INVALID_THREAD_FLAGS)) {
5488 return -TARGET_EINVAL;
5491 ts = g_new0(TaskState, 1);
5492 init_task_state(ts);
5494 /* Grab a mutex so that thread setup appears atomic. */
5495 pthread_mutex_lock(&clone_lock);
5497 /* we create a new CPU instance. */
5498 new_env = cpu_copy(env);
5499 /* Init regs that differ from the parent. */
5500 cpu_clone_regs(new_env, newsp);
5501 new_cpu = ENV_GET_CPU(new_env);
5502 new_cpu->opaque = ts;
5503 ts->bprm = parent_ts->bprm;
5504 ts->info = parent_ts->info;
5505 ts->signal_mask = parent_ts->signal_mask;
5507 if (flags & CLONE_CHILD_CLEARTID) {
5508 ts->child_tidptr = child_tidptr;
5511 if (flags & CLONE_SETTLS) {
5512 cpu_set_tls (new_env, newtls);
5515 memset(&info, 0, sizeof(info));
5516 pthread_mutex_init(&info.mutex, NULL);
5517 pthread_mutex_lock(&info.mutex);
5518 pthread_cond_init(&info.cond, NULL);
5520 if (flags & CLONE_CHILD_SETTID) {
5521 info.child_tidptr = child_tidptr;
5523 if (flags & CLONE_PARENT_SETTID) {
5524 info.parent_tidptr = parent_tidptr;
5527 ret = pthread_attr_init(&attr);
5528 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
5529 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
5530 /* It is not safe to deliver signals until the child has finished
5531 initializing, so temporarily block all signals. */
5532 sigfillset(&sigmask);
5533 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
5535 /* If this is our first additional thread, we need to ensure we
5536 * generate code for parallel execution and flush old translations.
5538 if (!parallel_cpus) {
5539 parallel_cpus = true;
5543 ret = pthread_create(&info.thread, &attr, clone_func, &info);
5544 /* TODO: Free new CPU state if thread creation failed. */
5546 sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
5547 pthread_attr_destroy(&attr);
5549 /* Wait for the child to initialize. */
5550 pthread_cond_wait(&info.cond, &info.mutex);
5555 pthread_mutex_unlock(&info.mutex);
5556 pthread_cond_destroy(&info.cond);
5557 pthread_mutex_destroy(&info.mutex);
5558 pthread_mutex_unlock(&clone_lock);
5560 /* if no CLONE_VM, we consider it is a fork */
5561 if (flags & CLONE_INVALID_FORK_FLAGS) {
5562 return -TARGET_EINVAL;
5565 /* We can't support custom termination signals */
5566 if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
5567 return -TARGET_EINVAL;
5570 if (block_signals()) {
5571 return -TARGET_ERESTARTSYS;
5577 /* Child Process. */
5578 cpu_clone_regs(env, newsp);
5580 /* There is a race condition here. The parent process could
5581 theoretically read the TID in the child process before the child
5582 tid is set. This would require using either ptrace
5583 (not implemented) or having *_tidptr to point at a shared memory
5584 mapping. We can't repeat the spinlock hack used above because
5585 the child process gets its own copy of the lock. */
5586 if (flags & CLONE_CHILD_SETTID)
5587 put_user_u32(gettid(), child_tidptr);
5588 if (flags & CLONE_PARENT_SETTID)
5589 put_user_u32(gettid(), parent_tidptr);
5590 ts = (TaskState *)cpu->opaque;
5591 if (flags & CLONE_SETTLS)
5592 cpu_set_tls (env, newtls);
5593 if (flags & CLONE_CHILD_CLEARTID)
5594 ts->child_tidptr = child_tidptr;
5602 /* warning : doesn't handle linux specific flags... */
5603 static int target_to_host_fcntl_cmd(int cmd)
5608 case TARGET_F_DUPFD:
5609 case TARGET_F_GETFD:
5610 case TARGET_F_SETFD:
5611 case TARGET_F_GETFL:
5612 case TARGET_F_SETFL:
5615 case TARGET_F_GETLK:
5618 case TARGET_F_SETLK:
5621 case TARGET_F_SETLKW:
5624 case TARGET_F_GETOWN:
5627 case TARGET_F_SETOWN:
5630 case TARGET_F_GETSIG:
5633 case TARGET_F_SETSIG:
5636 #if TARGET_ABI_BITS == 32
5637 case TARGET_F_GETLK64:
5640 case TARGET_F_SETLK64:
5643 case TARGET_F_SETLKW64:
5647 case TARGET_F_SETLEASE:
5650 case TARGET_F_GETLEASE:
5653 #ifdef F_DUPFD_CLOEXEC
5654 case TARGET_F_DUPFD_CLOEXEC:
5655 ret = F_DUPFD_CLOEXEC;
5658 case TARGET_F_NOTIFY:
5662 case TARGET_F_GETOWN_EX:
5667 case TARGET_F_SETOWN_EX:
5672 case TARGET_F_SETPIPE_SZ:
5675 case TARGET_F_GETPIPE_SZ:
5680 ret = -TARGET_EINVAL;
5684 #if defined(__powerpc64__)
5685 /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
5686 * is not supported by kernel. The glibc fcntl call actually adjusts
5687 * them to 5, 6 and 7 before making the syscall(). Since we make the
5688 * syscall directly, adjust to what is supported by the kernel.
5690 if (ret >= F_GETLK64 && ret <= F_SETLKW64) {
5691 ret -= F_GETLK64 - 5;
5698 #define FLOCK_TRANSTBL \
5700 TRANSTBL_CONVERT(F_RDLCK); \
5701 TRANSTBL_CONVERT(F_WRLCK); \
5702 TRANSTBL_CONVERT(F_UNLCK); \
5703 TRANSTBL_CONVERT(F_EXLCK); \
5704 TRANSTBL_CONVERT(F_SHLCK); \
5707 static int target_to_host_flock(int type)
5709 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
5711 #undef TRANSTBL_CONVERT
5712 return -TARGET_EINVAL;
5715 static int host_to_target_flock(int type)
5717 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
5719 #undef TRANSTBL_CONVERT
5720 /* if we don't know how to convert the value coming
5721 * from the host we copy to the target field as-is
5726 static inline abi_long copy_from_user_flock(struct flock64 *fl,
5727 abi_ulong target_flock_addr)
5729 struct target_flock *target_fl;
5732 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
5733 return -TARGET_EFAULT;
5736 __get_user(l_type, &target_fl->l_type);
5737 l_type = target_to_host_flock(l_type);
5741 fl->l_type = l_type;
5742 __get_user(fl->l_whence, &target_fl->l_whence);
5743 __get_user(fl->l_start, &target_fl->l_start);
5744 __get_user(fl->l_len, &target_fl->l_len);
5745 __get_user(fl->l_pid, &target_fl->l_pid);
5746 unlock_user_struct(target_fl, target_flock_addr, 0);
5750 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
5751 const struct flock64 *fl)
5753 struct target_flock *target_fl;
5756 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
5757 return -TARGET_EFAULT;
5760 l_type = host_to_target_flock(fl->l_type);
5761 __put_user(l_type, &target_fl->l_type);
5762 __put_user(fl->l_whence, &target_fl->l_whence);
5763 __put_user(fl->l_start, &target_fl->l_start);
5764 __put_user(fl->l_len, &target_fl->l_len);
5765 __put_user(fl->l_pid, &target_fl->l_pid);
5766 unlock_user_struct(target_fl, target_flock_addr, 1);
5770 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
5771 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
5773 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
5774 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
5775 abi_ulong target_flock_addr)
5777 struct target_oabi_flock64 *target_fl;
5780 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
5781 return -TARGET_EFAULT;
5784 __get_user(l_type, &target_fl->l_type);
5785 l_type = target_to_host_flock(l_type);
5789 fl->l_type = l_type;
5790 __get_user(fl->l_whence, &target_fl->l_whence);
5791 __get_user(fl->l_start, &target_fl->l_start);
5792 __get_user(fl->l_len, &target_fl->l_len);
5793 __get_user(fl->l_pid, &target_fl->l_pid);
5794 unlock_user_struct(target_fl, target_flock_addr, 0);
5798 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
5799 const struct flock64 *fl)
5801 struct target_oabi_flock64 *target_fl;
5804 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
5805 return -TARGET_EFAULT;
5808 l_type = host_to_target_flock(fl->l_type);
5809 __put_user(l_type, &target_fl->l_type);
5810 __put_user(fl->l_whence, &target_fl->l_whence);
5811 __put_user(fl->l_start, &target_fl->l_start);
5812 __put_user(fl->l_len, &target_fl->l_len);
5813 __put_user(fl->l_pid, &target_fl->l_pid);
5814 unlock_user_struct(target_fl, target_flock_addr, 1);
5819 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
5820 abi_ulong target_flock_addr)
5822 struct target_flock64 *target_fl;
5825 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
5826 return -TARGET_EFAULT;
5829 __get_user(l_type, &target_fl->l_type);
5830 l_type = target_to_host_flock(l_type);
5834 fl->l_type = l_type;
5835 __get_user(fl->l_whence, &target_fl->l_whence);
5836 __get_user(fl->l_start, &target_fl->l_start);
5837 __get_user(fl->l_len, &target_fl->l_len);
5838 __get_user(fl->l_pid, &target_fl->l_pid);
5839 unlock_user_struct(target_fl, target_flock_addr, 0);
5843 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
5844 const struct flock64 *fl)
5846 struct target_flock64 *target_fl;
5849 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
5850 return -TARGET_EFAULT;
5853 l_type = host_to_target_flock(fl->l_type);
5854 __put_user(l_type, &target_fl->l_type);
5855 __put_user(fl->l_whence, &target_fl->l_whence);
5856 __put_user(fl->l_start, &target_fl->l_start);
5857 __put_user(fl->l_len, &target_fl->l_len);
5858 __put_user(fl->l_pid, &target_fl->l_pid);
5859 unlock_user_struct(target_fl, target_flock_addr, 1);
5863 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
5865 struct flock64 fl64;
5867 struct f_owner_ex fox;
5868 struct target_f_owner_ex *target_fox;
5871 int host_cmd = target_to_host_fcntl_cmd(cmd);
5873 if (host_cmd == -TARGET_EINVAL)
5877 case TARGET_F_GETLK:
5878 ret = copy_from_user_flock(&fl64, arg);
5882 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
5884 ret = copy_to_user_flock(arg, &fl64);
5888 case TARGET_F_SETLK:
5889 case TARGET_F_SETLKW:
5890 ret = copy_from_user_flock(&fl64, arg);
5894 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
5897 case TARGET_F_GETLK64:
5898 ret = copy_from_user_flock64(&fl64, arg);
5902 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
5904 ret = copy_to_user_flock64(arg, &fl64);
5907 case TARGET_F_SETLK64:
5908 case TARGET_F_SETLKW64:
5909 ret = copy_from_user_flock64(&fl64, arg);
5913 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
5916 case TARGET_F_GETFL:
5917 ret = get_errno(safe_fcntl(fd, host_cmd, arg));
5919 ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
5923 case TARGET_F_SETFL:
5924 ret = get_errno(safe_fcntl(fd, host_cmd,
5925 target_to_host_bitmask(arg,
5930 case TARGET_F_GETOWN_EX:
5931 ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
5933 if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
5934 return -TARGET_EFAULT;
5935 target_fox->type = tswap32(fox.type);
5936 target_fox->pid = tswap32(fox.pid);
5937 unlock_user_struct(target_fox, arg, 1);
5943 case TARGET_F_SETOWN_EX:
5944 if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
5945 return -TARGET_EFAULT;
5946 fox.type = tswap32(target_fox->type);
5947 fox.pid = tswap32(target_fox->pid);
5948 unlock_user_struct(target_fox, arg, 0);
5949 ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
5953 case TARGET_F_SETOWN:
5954 case TARGET_F_GETOWN:
5955 case TARGET_F_SETSIG:
5956 case TARGET_F_GETSIG:
5957 case TARGET_F_SETLEASE:
5958 case TARGET_F_GETLEASE:
5959 case TARGET_F_SETPIPE_SZ:
5960 case TARGET_F_GETPIPE_SZ:
5961 ret = get_errno(safe_fcntl(fd, host_cmd, arg));
5965 ret = get_errno(safe_fcntl(fd, cmd, arg));
5973 static inline int high2lowuid(int uid)
5981 static inline int high2lowgid(int gid)
5989 static inline int low2highuid(int uid)
5991 if ((int16_t)uid == -1)
5997 static inline int low2highgid(int gid)
5999 if ((int16_t)gid == -1)
6004 static inline int tswapid(int id)
6009 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6011 #else /* !USE_UID16 */
6012 static inline int high2lowuid(int uid)
6016 static inline int high2lowgid(int gid)
6020 static inline int low2highuid(int uid)
6024 static inline int low2highgid(int gid)
6028 static inline int tswapid(int id)
6033 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6035 #endif /* USE_UID16 */
6037 /* We must do direct syscalls for setting UID/GID, because we want to
6038 * implement the Linux system call semantics of "change only for this thread",
6039 * not the libc/POSIX semantics of "change for all threads in process".
6040 * (See http://ewontfix.com/17/ for more details.)
6041 * We use the 32-bit version of the syscalls if present; if it is not
6042 * then either the host architecture supports 32-bit UIDs natively with
6043 * the standard syscall, or the 16-bit UID is the best we can do.
6045 #ifdef __NR_setuid32
6046 #define __NR_sys_setuid __NR_setuid32
6048 #define __NR_sys_setuid __NR_setuid
6050 #ifdef __NR_setgid32
6051 #define __NR_sys_setgid __NR_setgid32
6053 #define __NR_sys_setgid __NR_setgid
6055 #ifdef __NR_setresuid32
6056 #define __NR_sys_setresuid __NR_setresuid32
6058 #define __NR_sys_setresuid __NR_setresuid
6060 #ifdef __NR_setresgid32
6061 #define __NR_sys_setresgid __NR_setresgid32
6063 #define __NR_sys_setresgid __NR_setresgid
6066 _syscall1(int, sys_setuid, uid_t, uid)
6067 _syscall1(int, sys_setgid, gid_t, gid)
6068 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
6069 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
6071 void syscall_init(void)
6074 const argtype *arg_type;
6078 thunk_init(STRUCT_MAX);
6080 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6081 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6082 #include "syscall_types.h"
6084 #undef STRUCT_SPECIAL
6086 /* Build target_to_host_errno_table[] table from
6087 * host_to_target_errno_table[]. */
6088 for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
6089 target_to_host_errno_table[host_to_target_errno_table[i]] = i;
6092 /* we patch the ioctl size if necessary. We rely on the fact that
6093 no ioctl has all the bits at '1' in the size field */
6095 while (ie->target_cmd != 0) {
6096 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
6097 TARGET_IOC_SIZEMASK) {
6098 arg_type = ie->arg_type;
6099 if (arg_type[0] != TYPE_PTR) {
6100 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
6105 size = thunk_type_size(arg_type, 0);
6106 ie->target_cmd = (ie->target_cmd &
6107 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
6108 (size << TARGET_IOC_SIZESHIFT);
6111 /* automatic consistency check if same arch */
6112 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
6113 (defined(__x86_64__) && defined(TARGET_X86_64))
6114 if (unlikely(ie->target_cmd != ie->host_cmd)) {
6115 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
6116 ie->name, ie->target_cmd, ie->host_cmd);
6123 #if TARGET_ABI_BITS == 32
6124 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
6126 #ifdef TARGET_WORDS_BIGENDIAN
6127 return ((uint64_t)word0 << 32) | word1;
6129 return ((uint64_t)word1 << 32) | word0;
6132 #else /* TARGET_ABI_BITS == 32 */
6133 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
6137 #endif /* TARGET_ABI_BITS != 32 */
6139 #ifdef TARGET_NR_truncate64
6140 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
6145 if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
6149 return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
6153 #ifdef TARGET_NR_ftruncate64
6154 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
6159 if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
6163 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
6167 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
6168 abi_ulong target_addr)
6170 struct target_timespec *target_ts;
6172 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1))
6173 return -TARGET_EFAULT;
6174 __get_user(host_ts->tv_sec, &target_ts->tv_sec);
6175 __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
6176 unlock_user_struct(target_ts, target_addr, 0);
6180 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
6181 struct timespec *host_ts)
6183 struct target_timespec *target_ts;
6185 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0))
6186 return -TARGET_EFAULT;
6187 __put_user(host_ts->tv_sec, &target_ts->tv_sec);
6188 __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
6189 unlock_user_struct(target_ts, target_addr, 1);
6193 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_itspec,
6194 abi_ulong target_addr)
6196 struct target_itimerspec *target_itspec;
6198 if (!lock_user_struct(VERIFY_READ, target_itspec, target_addr, 1)) {
6199 return -TARGET_EFAULT;
6202 host_itspec->it_interval.tv_sec =
6203 tswapal(target_itspec->it_interval.tv_sec);
6204 host_itspec->it_interval.tv_nsec =
6205 tswapal(target_itspec->it_interval.tv_nsec);
6206 host_itspec->it_value.tv_sec = tswapal(target_itspec->it_value.tv_sec);
6207 host_itspec->it_value.tv_nsec = tswapal(target_itspec->it_value.tv_nsec);
6209 unlock_user_struct(target_itspec, target_addr, 1);
6213 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
6214 struct itimerspec *host_its)
6216 struct target_itimerspec *target_itspec;
6218 if (!lock_user_struct(VERIFY_WRITE, target_itspec, target_addr, 0)) {
6219 return -TARGET_EFAULT;
6222 target_itspec->it_interval.tv_sec = tswapal(host_its->it_interval.tv_sec);
6223 target_itspec->it_interval.tv_nsec = tswapal(host_its->it_interval.tv_nsec);
6225 target_itspec->it_value.tv_sec = tswapal(host_its->it_value.tv_sec);
6226 target_itspec->it_value.tv_nsec = tswapal(host_its->it_value.tv_nsec);
6228 unlock_user_struct(target_itspec, target_addr, 0);
6232 static inline abi_long target_to_host_timex(struct timex *host_tx,
6233 abi_long target_addr)
6235 struct target_timex *target_tx;
6237 if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
6238 return -TARGET_EFAULT;
6241 __get_user(host_tx->modes, &target_tx->modes);
6242 __get_user(host_tx->offset, &target_tx->offset);
6243 __get_user(host_tx->freq, &target_tx->freq);
6244 __get_user(host_tx->maxerror, &target_tx->maxerror);
6245 __get_user(host_tx->esterror, &target_tx->esterror);
6246 __get_user(host_tx->status, &target_tx->status);
6247 __get_user(host_tx->constant, &target_tx->constant);
6248 __get_user(host_tx->precision, &target_tx->precision);
6249 __get_user(host_tx->tolerance, &target_tx->tolerance);
6250 __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
6251 __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
6252 __get_user(host_tx->tick, &target_tx->tick);
6253 __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
6254 __get_user(host_tx->jitter, &target_tx->jitter);
6255 __get_user(host_tx->shift, &target_tx->shift);
6256 __get_user(host_tx->stabil, &target_tx->stabil);
6257 __get_user(host_tx->jitcnt, &target_tx->jitcnt);
6258 __get_user(host_tx->calcnt, &target_tx->calcnt);
6259 __get_user(host_tx->errcnt, &target_tx->errcnt);
6260 __get_user(host_tx->stbcnt, &target_tx->stbcnt);
6261 __get_user(host_tx->tai, &target_tx->tai);
6263 unlock_user_struct(target_tx, target_addr, 0);
6267 static inline abi_long host_to_target_timex(abi_long target_addr,
6268 struct timex *host_tx)
6270 struct target_timex *target_tx;
6272 if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
6273 return -TARGET_EFAULT;
6276 __put_user(host_tx->modes, &target_tx->modes);
6277 __put_user(host_tx->offset, &target_tx->offset);
6278 __put_user(host_tx->freq, &target_tx->freq);
6279 __put_user(host_tx->maxerror, &target_tx->maxerror);
6280 __put_user(host_tx->esterror, &target_tx->esterror);
6281 __put_user(host_tx->status, &target_tx->status);
6282 __put_user(host_tx->constant, &target_tx->constant);
6283 __put_user(host_tx->precision, &target_tx->precision);
6284 __put_user(host_tx->tolerance, &target_tx->tolerance);
6285 __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
6286 __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
6287 __put_user(host_tx->tick, &target_tx->tick);
6288 __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
6289 __put_user(host_tx->jitter, &target_tx->jitter);
6290 __put_user(host_tx->shift, &target_tx->shift);
6291 __put_user(host_tx->stabil, &target_tx->stabil);
6292 __put_user(host_tx->jitcnt, &target_tx->jitcnt);
6293 __put_user(host_tx->calcnt, &target_tx->calcnt);
6294 __put_user(host_tx->errcnt, &target_tx->errcnt);
6295 __put_user(host_tx->stbcnt, &target_tx->stbcnt);
6296 __put_user(host_tx->tai, &target_tx->tai);
6298 unlock_user_struct(target_tx, target_addr, 1);
6303 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
6304 abi_ulong target_addr)
6306 struct target_sigevent *target_sevp;
6308 if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
6309 return -TARGET_EFAULT;
6312 /* This union is awkward on 64 bit systems because it has a 32 bit
6313 * integer and a pointer in it; we follow the conversion approach
6314 * used for handling sigval types in signal.c so the guest should get
6315 * the correct value back even if we did a 64 bit byteswap and it's
6316 * using the 32 bit integer.
6318 host_sevp->sigev_value.sival_ptr =
6319 (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
6320 host_sevp->sigev_signo =
6321 target_to_host_signal(tswap32(target_sevp->sigev_signo));
6322 host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
6323 host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid);
6325 unlock_user_struct(target_sevp, target_addr, 1);
6329 #if defined(TARGET_NR_mlockall)
6330 static inline int target_to_host_mlockall_arg(int arg)
6334 if (arg & TARGET_MLOCKALL_MCL_CURRENT) {
6335 result |= MCL_CURRENT;
6337 if (arg & TARGET_MLOCKALL_MCL_FUTURE) {
6338 result |= MCL_FUTURE;
6344 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) || \
6345 defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) || \
6346 defined(TARGET_NR_newfstatat))
6347 static inline abi_long host_to_target_stat64(void *cpu_env,
6348 abi_ulong target_addr,
6349 struct stat *host_st)
6351 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
6352 if (((CPUARMState *)cpu_env)->eabi) {
6353 struct target_eabi_stat64 *target_st;
6355 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6356 return -TARGET_EFAULT;
6357 memset(target_st, 0, sizeof(struct target_eabi_stat64));
6358 __put_user(host_st->st_dev, &target_st->st_dev);
6359 __put_user(host_st->st_ino, &target_st->st_ino);
6360 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6361 __put_user(host_st->st_ino, &target_st->__st_ino);
6363 __put_user(host_st->st_mode, &target_st->st_mode);
6364 __put_user(host_st->st_nlink, &target_st->st_nlink);
6365 __put_user(host_st->st_uid, &target_st->st_uid);
6366 __put_user(host_st->st_gid, &target_st->st_gid);
6367 __put_user(host_st->st_rdev, &target_st->st_rdev);
6368 __put_user(host_st->st_size, &target_st->st_size);
6369 __put_user(host_st->st_blksize, &target_st->st_blksize);
6370 __put_user(host_st->st_blocks, &target_st->st_blocks);
6371 __put_user(host_st->st_atime, &target_st->target_st_atime);
6372 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6373 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6374 unlock_user_struct(target_st, target_addr, 1);
6378 #if defined(TARGET_HAS_STRUCT_STAT64)
6379 struct target_stat64 *target_st;
6381 struct target_stat *target_st;
6384 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6385 return -TARGET_EFAULT;
6386 memset(target_st, 0, sizeof(*target_st));
6387 __put_user(host_st->st_dev, &target_st->st_dev);
6388 __put_user(host_st->st_ino, &target_st->st_ino);
6389 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6390 __put_user(host_st->st_ino, &target_st->__st_ino);
6392 __put_user(host_st->st_mode, &target_st->st_mode);
6393 __put_user(host_st->st_nlink, &target_st->st_nlink);
6394 __put_user(host_st->st_uid, &target_st->st_uid);
6395 __put_user(host_st->st_gid, &target_st->st_gid);
6396 __put_user(host_st->st_rdev, &target_st->st_rdev);
6397 /* XXX: better use of kernel struct */
6398 __put_user(host_st->st_size, &target_st->st_size);
6399 __put_user(host_st->st_blksize, &target_st->st_blksize);
6400 __put_user(host_st->st_blocks, &target_st->st_blocks);
6401 __put_user(host_st->st_atime, &target_st->target_st_atime);
6402 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6403 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6404 unlock_user_struct(target_st, target_addr, 1);
6411 /* ??? Using host futex calls even when target atomic operations
6412 are not really atomic probably breaks things. However implementing
6413 futexes locally would make futexes shared between multiple processes
6414 tricky. However they're probably useless because guest atomic
6415 operations won't work either. */
6416 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
6417 target_ulong uaddr2, int val3)
6419 struct timespec ts, *pts;
6422 /* ??? We assume FUTEX_* constants are the same on both host
6424 #ifdef FUTEX_CMD_MASK
6425 base_op = op & FUTEX_CMD_MASK;
6431 case FUTEX_WAIT_BITSET:
6434 target_to_host_timespec(pts, timeout);
6438 return get_errno(safe_futex(g2h(uaddr), op, tswap32(val),
6441 return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
6443 return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
6445 case FUTEX_CMP_REQUEUE:
6447 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
6448 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
6449 But the prototype takes a `struct timespec *'; insert casts
6450 to satisfy the compiler. We do not need to tswap TIMEOUT
6451 since it's not compared to guest memory. */
6452 pts = (struct timespec *)(uintptr_t) timeout;
6453 return get_errno(safe_futex(g2h(uaddr), op, val, pts,
6455 (base_op == FUTEX_CMP_REQUEUE
6459 return -TARGET_ENOSYS;
6462 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6463 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
6464 abi_long handle, abi_long mount_id,
6467 struct file_handle *target_fh;
6468 struct file_handle *fh;
6472 unsigned int size, total_size;
6474 if (get_user_s32(size, handle)) {
6475 return -TARGET_EFAULT;
6478 name = lock_user_string(pathname);
6480 return -TARGET_EFAULT;
6483 total_size = sizeof(struct file_handle) + size;
6484 target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
6486 unlock_user(name, pathname, 0);
6487 return -TARGET_EFAULT;
6490 fh = g_malloc0(total_size);
6491 fh->handle_bytes = size;
6493 ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
6494 unlock_user(name, pathname, 0);
6496 /* man name_to_handle_at(2):
6497 * Other than the use of the handle_bytes field, the caller should treat
6498 * the file_handle structure as an opaque data type
6501 memcpy(target_fh, fh, total_size);
6502 target_fh->handle_bytes = tswap32(fh->handle_bytes);
6503 target_fh->handle_type = tswap32(fh->handle_type);
6505 unlock_user(target_fh, handle, total_size);
6507 if (put_user_s32(mid, mount_id)) {
6508 return -TARGET_EFAULT;
6516 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6517 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
6520 struct file_handle *target_fh;
6521 struct file_handle *fh;
6522 unsigned int size, total_size;
6525 if (get_user_s32(size, handle)) {
6526 return -TARGET_EFAULT;
6529 total_size = sizeof(struct file_handle) + size;
6530 target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
6532 return -TARGET_EFAULT;
6535 fh = g_memdup(target_fh, total_size);
6536 fh->handle_bytes = size;
6537 fh->handle_type = tswap32(target_fh->handle_type);
6539 ret = get_errno(open_by_handle_at(mount_fd, fh,
6540 target_to_host_bitmask(flags, fcntl_flags_tbl)));
6544 unlock_user(target_fh, handle, total_size);
6550 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
6552 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
6555 target_sigset_t *target_mask;
6559 if (flags & ~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)) {
6560 return -TARGET_EINVAL;
6562 if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
6563 return -TARGET_EFAULT;
6566 target_to_host_sigset(&host_mask, target_mask);
6568 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
6570 ret = get_errno(signalfd(fd, &host_mask, host_flags));
6572 fd_trans_register(ret, &target_signalfd_trans);
6575 unlock_user_struct(target_mask, mask, 0);
6581 /* Map host to target signal numbers for the wait family of syscalls.
6582 Assume all other status bits are the same. */
6583 int host_to_target_waitstatus(int status)
6585 if (WIFSIGNALED(status)) {
6586 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
6588 if (WIFSTOPPED(status)) {
6589 return (host_to_target_signal(WSTOPSIG(status)) << 8)
6595 static int open_self_cmdline(void *cpu_env, int fd)
6597 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
6598 struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
6601 for (i = 0; i < bprm->argc; i++) {
6602 size_t len = strlen(bprm->argv[i]) + 1;
6604 if (write(fd, bprm->argv[i], len) != len) {
6612 static int open_self_maps(void *cpu_env, int fd)
6614 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
6615 TaskState *ts = cpu->opaque;
6621 fp = fopen("/proc/self/maps", "r");
6626 while ((read = getline(&line, &len, fp)) != -1) {
6627 int fields, dev_maj, dev_min, inode;
6628 uint64_t min, max, offset;
6629 char flag_r, flag_w, flag_x, flag_p;
6630 char path[512] = "";
6631 fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d"
6632 " %512s", &min, &max, &flag_r, &flag_w, &flag_x,
6633 &flag_p, &offset, &dev_maj, &dev_min, &inode, path);
6635 if ((fields < 10) || (fields > 11)) {
6638 if (h2g_valid(min)) {
6639 int flags = page_get_flags(h2g(min));
6640 max = h2g_valid(max - 1) ? max : (uintptr_t)g2h(GUEST_ADDR_MAX) + 1;
6641 if (page_check_range(h2g(min), max - min, flags) == -1) {
6644 if (h2g(min) == ts->info->stack_limit) {
6645 pstrcpy(path, sizeof(path), " [stack]");
6647 dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
6648 " %c%c%c%c %08" PRIx64 " %02x:%02x %d %s%s\n",
6649 h2g(min), h2g(max - 1) + 1, flag_r, flag_w,
6650 flag_x, flag_p, offset, dev_maj, dev_min, inode,
6651 path[0] ? " " : "", path);
6661 static int open_self_stat(void *cpu_env, int fd)
6663 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
6664 TaskState *ts = cpu->opaque;
6665 abi_ulong start_stack = ts->info->start_stack;
6668 for (i = 0; i < 44; i++) {
6676 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
6677 } else if (i == 1) {
6679 snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]);
6680 } else if (i == 27) {
6683 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
6685 /* for the rest, there is MasterCard */
6686 snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' ');
6690 if (write(fd, buf, len) != len) {
6698 static int open_self_auxv(void *cpu_env, int fd)
6700 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
6701 TaskState *ts = cpu->opaque;
6702 abi_ulong auxv = ts->info->saved_auxv;
6703 abi_ulong len = ts->info->auxv_len;
6707 * Auxiliary vector is stored in target process stack.
6708 * read in whole auxv vector and copy it to file
6710 ptr = lock_user(VERIFY_READ, auxv, len, 0);
6714 r = write(fd, ptr, len);
6721 lseek(fd, 0, SEEK_SET);
6722 unlock_user(ptr, auxv, len);
6728 static int is_proc_myself(const char *filename, const char *entry)
6730 if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
6731 filename += strlen("/proc/");
6732 if (!strncmp(filename, "self/", strlen("self/"))) {
6733 filename += strlen("self/");
6734 } else if (*filename >= '1' && *filename <= '9') {
6736 snprintf(myself, sizeof(myself), "%d/", getpid());
6737 if (!strncmp(filename, myself, strlen(myself))) {
6738 filename += strlen(myself);
6745 if (!strcmp(filename, entry)) {
6752 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
6753 static int is_proc(const char *filename, const char *entry)
6755 return strcmp(filename, entry) == 0;
6758 static int open_net_route(void *cpu_env, int fd)
6765 fp = fopen("/proc/net/route", "r");
6772 read = getline(&line, &len, fp);
6773 dprintf(fd, "%s", line);
6777 while ((read = getline(&line, &len, fp)) != -1) {
6779 uint32_t dest, gw, mask;
6780 unsigned int flags, refcnt, use, metric, mtu, window, irtt;
6783 fields = sscanf(line,
6784 "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
6785 iface, &dest, &gw, &flags, &refcnt, &use, &metric,
6786 &mask, &mtu, &window, &irtt);
6790 dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
6791 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
6792 metric, tswap32(mask), mtu, window, irtt);
6802 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
6805 const char *filename;
6806 int (*fill)(void *cpu_env, int fd);
6807 int (*cmp)(const char *s1, const char *s2);
6809 const struct fake_open *fake_open;
6810 static const struct fake_open fakes[] = {
6811 { "maps", open_self_maps, is_proc_myself },
6812 { "stat", open_self_stat, is_proc_myself },
6813 { "auxv", open_self_auxv, is_proc_myself },
6814 { "cmdline", open_self_cmdline, is_proc_myself },
6815 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
6816 { "/proc/net/route", open_net_route, is_proc },
6818 { NULL, NULL, NULL }
6821 if (is_proc_myself(pathname, "exe")) {
6822 int execfd = qemu_getauxval(AT_EXECFD);
6823 return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
6826 for (fake_open = fakes; fake_open->filename; fake_open++) {
6827 if (fake_open->cmp(pathname, fake_open->filename)) {
6832 if (fake_open->filename) {
6834 char filename[PATH_MAX];
6837 /* create temporary file to map stat to */
6838 tmpdir = getenv("TMPDIR");
6841 snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
6842 fd = mkstemp(filename);
6848 if ((r = fake_open->fill(cpu_env, fd))) {
6854 lseek(fd, 0, SEEK_SET);
6859 return safe_openat(dirfd, path(pathname), flags, mode);
6862 #define TIMER_MAGIC 0x0caf0000
6863 #define TIMER_MAGIC_MASK 0xffff0000
6865 /* Convert QEMU provided timer ID back to internal 16bit index format */
6866 static target_timer_t get_timer_id(abi_long arg)
6868 target_timer_t timerid = arg;
6870 if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
6871 return -TARGET_EINVAL;
6876 if (timerid >= ARRAY_SIZE(g_posix_timers)) {
6877 return -TARGET_EINVAL;
6883 static int target_to_host_cpu_mask(unsigned long *host_mask,
6885 abi_ulong target_addr,
6888 unsigned target_bits = sizeof(abi_ulong) * 8;
6889 unsigned host_bits = sizeof(*host_mask) * 8;
6890 abi_ulong *target_mask;
6893 assert(host_size >= target_size);
6895 target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
6897 return -TARGET_EFAULT;
6899 memset(host_mask, 0, host_size);
6901 for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
6902 unsigned bit = i * target_bits;
6905 __get_user(val, &target_mask[i]);
6906 for (j = 0; j < target_bits; j++, bit++) {
6907 if (val & (1UL << j)) {
6908 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
6913 unlock_user(target_mask, target_addr, 0);
6917 static int host_to_target_cpu_mask(const unsigned long *host_mask,
6919 abi_ulong target_addr,
6922 unsigned target_bits = sizeof(abi_ulong) * 8;
6923 unsigned host_bits = sizeof(*host_mask) * 8;
6924 abi_ulong *target_mask;
6927 assert(host_size >= target_size);
6929 target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
6931 return -TARGET_EFAULT;
6934 for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
6935 unsigned bit = i * target_bits;
6938 for (j = 0; j < target_bits; j++, bit++) {
6939 if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
6943 __put_user(val, &target_mask[i]);
6946 unlock_user(target_mask, target_addr, target_size);
6950 /* This is an internal helper for do_syscall so that it is easier
6951 * to have a single return point, so that actions, such as logging
6952 * of syscall results, can be performed.
6953 * All errnos that do_syscall() returns must be -TARGET_<errcode>.
6955 static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
6956 abi_long arg2, abi_long arg3, abi_long arg4,
6957 abi_long arg5, abi_long arg6, abi_long arg7,
6960 CPUState *cpu = ENV_GET_CPU(cpu_env);
6962 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
6963 || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
6964 || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64)
6967 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
6968 || defined(TARGET_NR_fstatfs)
6974 case TARGET_NR_exit:
6975 /* In old applications this may be used to implement _exit(2).
6976 However in threaded applictions it is used for thread termination,
6977 and _exit_group is used for application termination.
6978 Do thread termination if we have more then one thread. */
6980 if (block_signals()) {
6981 return -TARGET_ERESTARTSYS;
6986 if (CPU_NEXT(first_cpu)) {
6989 /* Remove the CPU from the list. */
6990 QTAILQ_REMOVE_RCU(&cpus, cpu, node);
6995 if (ts->child_tidptr) {
6996 put_user_u32(0, ts->child_tidptr);
6997 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
7001 object_unref(OBJECT(cpu));
7003 rcu_unregister_thread();
7008 preexit_cleanup(cpu_env, arg1);
7010 return 0; /* avoid warning */
7011 case TARGET_NR_read:
7015 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
7016 return -TARGET_EFAULT;
7017 ret = get_errno(safe_read(arg1, p, arg3));
7019 fd_trans_host_to_target_data(arg1)) {
7020 ret = fd_trans_host_to_target_data(arg1)(p, ret);
7022 unlock_user(p, arg2, ret);
7025 case TARGET_NR_write:
7026 if (arg2 == 0 && arg3 == 0) {
7027 return get_errno(safe_write(arg1, 0, 0));
7029 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
7030 return -TARGET_EFAULT;
7031 if (fd_trans_target_to_host_data(arg1)) {
7032 void *copy = g_malloc(arg3);
7033 memcpy(copy, p, arg3);
7034 ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
7036 ret = get_errno(safe_write(arg1, copy, ret));
7040 ret = get_errno(safe_write(arg1, p, arg3));
7042 unlock_user(p, arg2, 0);
7045 #ifdef TARGET_NR_open
7046 case TARGET_NR_open:
7047 if (!(p = lock_user_string(arg1)))
7048 return -TARGET_EFAULT;
7049 ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
7050 target_to_host_bitmask(arg2, fcntl_flags_tbl),
7052 fd_trans_unregister(ret);
7053 unlock_user(p, arg1, 0);
7056 case TARGET_NR_openat:
7057 if (!(p = lock_user_string(arg2)))
7058 return -TARGET_EFAULT;
7059 ret = get_errno(do_openat(cpu_env, arg1, p,
7060 target_to_host_bitmask(arg3, fcntl_flags_tbl),
7062 fd_trans_unregister(ret);
7063 unlock_user(p, arg2, 0);
7065 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7066 case TARGET_NR_name_to_handle_at:
7067 ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
7070 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7071 case TARGET_NR_open_by_handle_at:
7072 ret = do_open_by_handle_at(arg1, arg2, arg3);
7073 fd_trans_unregister(ret);
7076 case TARGET_NR_close:
7077 fd_trans_unregister(arg1);
7078 return get_errno(close(arg1));
7081 return do_brk(arg1);
7082 #ifdef TARGET_NR_fork
7083 case TARGET_NR_fork:
7084 return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
7086 #ifdef TARGET_NR_waitpid
7087 case TARGET_NR_waitpid:
7090 ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
7091 if (!is_error(ret) && arg2 && ret
7092 && put_user_s32(host_to_target_waitstatus(status), arg2))
7093 return -TARGET_EFAULT;
7097 #ifdef TARGET_NR_waitid
7098 case TARGET_NR_waitid:
7102 ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
7103 if (!is_error(ret) && arg3 && info.si_pid != 0) {
7104 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
7105 return -TARGET_EFAULT;
7106 host_to_target_siginfo(p, &info);
7107 unlock_user(p, arg3, sizeof(target_siginfo_t));
7112 #ifdef TARGET_NR_creat /* not on alpha */
7113 case TARGET_NR_creat:
7114 if (!(p = lock_user_string(arg1)))
7115 return -TARGET_EFAULT;
7116 ret = get_errno(creat(p, arg2));
7117 fd_trans_unregister(ret);
7118 unlock_user(p, arg1, 0);
7121 #ifdef TARGET_NR_link
7122 case TARGET_NR_link:
7125 p = lock_user_string(arg1);
7126 p2 = lock_user_string(arg2);
7128 ret = -TARGET_EFAULT;
7130 ret = get_errno(link(p, p2));
7131 unlock_user(p2, arg2, 0);
7132 unlock_user(p, arg1, 0);
7136 #if defined(TARGET_NR_linkat)
7137 case TARGET_NR_linkat:
7141 return -TARGET_EFAULT;
7142 p = lock_user_string(arg2);
7143 p2 = lock_user_string(arg4);
7145 ret = -TARGET_EFAULT;
7147 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
7148 unlock_user(p, arg2, 0);
7149 unlock_user(p2, arg4, 0);
7153 #ifdef TARGET_NR_unlink
7154 case TARGET_NR_unlink:
7155 if (!(p = lock_user_string(arg1)))
7156 return -TARGET_EFAULT;
7157 ret = get_errno(unlink(p));
7158 unlock_user(p, arg1, 0);
7161 #if defined(TARGET_NR_unlinkat)
7162 case TARGET_NR_unlinkat:
7163 if (!(p = lock_user_string(arg2)))
7164 return -TARGET_EFAULT;
7165 ret = get_errno(unlinkat(arg1, p, arg3));
7166 unlock_user(p, arg2, 0);
7169 case TARGET_NR_execve:
7171 char **argp, **envp;
7174 abi_ulong guest_argp;
7175 abi_ulong guest_envp;
7182 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
7183 if (get_user_ual(addr, gp))
7184 return -TARGET_EFAULT;
7191 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
7192 if (get_user_ual(addr, gp))
7193 return -TARGET_EFAULT;
7199 argp = g_new0(char *, argc + 1);
7200 envp = g_new0(char *, envc + 1);
7202 for (gp = guest_argp, q = argp; gp;
7203 gp += sizeof(abi_ulong), q++) {
7204 if (get_user_ual(addr, gp))
7208 if (!(*q = lock_user_string(addr)))
7210 total_size += strlen(*q) + 1;
7214 for (gp = guest_envp, q = envp; gp;
7215 gp += sizeof(abi_ulong), q++) {
7216 if (get_user_ual(addr, gp))
7220 if (!(*q = lock_user_string(addr)))
7222 total_size += strlen(*q) + 1;
7226 if (!(p = lock_user_string(arg1)))
7228 /* Although execve() is not an interruptible syscall it is
7229 * a special case where we must use the safe_syscall wrapper:
7230 * if we allow a signal to happen before we make the host
7231 * syscall then we will 'lose' it, because at the point of
7232 * execve the process leaves QEMU's control. So we use the
7233 * safe syscall wrapper to ensure that we either take the
7234 * signal as a guest signal, or else it does not happen
7235 * before the execve completes and makes it the other
7236 * program's problem.
7238 ret = get_errno(safe_execve(p, argp, envp));
7239 unlock_user(p, arg1, 0);
7244 ret = -TARGET_EFAULT;
7247 for (gp = guest_argp, q = argp; *q;
7248 gp += sizeof(abi_ulong), q++) {
7249 if (get_user_ual(addr, gp)
7252 unlock_user(*q, addr, 0);
7254 for (gp = guest_envp, q = envp; *q;
7255 gp += sizeof(abi_ulong), q++) {
7256 if (get_user_ual(addr, gp)
7259 unlock_user(*q, addr, 0);
7266 case TARGET_NR_chdir:
7267 if (!(p = lock_user_string(arg1)))
7268 return -TARGET_EFAULT;
7269 ret = get_errno(chdir(p));
7270 unlock_user(p, arg1, 0);
7272 #ifdef TARGET_NR_time
7273 case TARGET_NR_time:
7276 ret = get_errno(time(&host_time));
7279 && put_user_sal(host_time, arg1))
7280 return -TARGET_EFAULT;
7284 #ifdef TARGET_NR_mknod
7285 case TARGET_NR_mknod:
7286 if (!(p = lock_user_string(arg1)))
7287 return -TARGET_EFAULT;
7288 ret = get_errno(mknod(p, arg2, arg3));
7289 unlock_user(p, arg1, 0);
7292 #if defined(TARGET_NR_mknodat)
7293 case TARGET_NR_mknodat:
7294 if (!(p = lock_user_string(arg2)))
7295 return -TARGET_EFAULT;
7296 ret = get_errno(mknodat(arg1, p, arg3, arg4));
7297 unlock_user(p, arg2, 0);
7300 #ifdef TARGET_NR_chmod
7301 case TARGET_NR_chmod:
7302 if (!(p = lock_user_string(arg1)))
7303 return -TARGET_EFAULT;
7304 ret = get_errno(chmod(p, arg2));
7305 unlock_user(p, arg1, 0);
7308 #ifdef TARGET_NR_lseek
7309 case TARGET_NR_lseek:
7310 return get_errno(lseek(arg1, arg2, arg3));
7312 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
7313 /* Alpha specific */
7314 case TARGET_NR_getxpid:
7315 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
7316 return get_errno(getpid());
7318 #ifdef TARGET_NR_getpid
7319 case TARGET_NR_getpid:
7320 return get_errno(getpid());
7322 case TARGET_NR_mount:
7324 /* need to look at the data field */
7328 p = lock_user_string(arg1);
7330 return -TARGET_EFAULT;
7336 p2 = lock_user_string(arg2);
7339 unlock_user(p, arg1, 0);
7341 return -TARGET_EFAULT;
7345 p3 = lock_user_string(arg3);
7348 unlock_user(p, arg1, 0);
7350 unlock_user(p2, arg2, 0);
7351 return -TARGET_EFAULT;
7357 /* FIXME - arg5 should be locked, but it isn't clear how to
7358 * do that since it's not guaranteed to be a NULL-terminated
7362 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
7364 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5));
7366 ret = get_errno(ret);
7369 unlock_user(p, arg1, 0);
7371 unlock_user(p2, arg2, 0);
7373 unlock_user(p3, arg3, 0);
7377 #ifdef TARGET_NR_umount
7378 case TARGET_NR_umount:
7379 if (!(p = lock_user_string(arg1)))
7380 return -TARGET_EFAULT;
7381 ret = get_errno(umount(p));
7382 unlock_user(p, arg1, 0);
7385 #ifdef TARGET_NR_stime /* not on alpha */
7386 case TARGET_NR_stime:
7389 if (get_user_sal(host_time, arg1))
7390 return -TARGET_EFAULT;
7391 return get_errno(stime(&host_time));
7394 #ifdef TARGET_NR_alarm /* not on alpha */
7395 case TARGET_NR_alarm:
7398 #ifdef TARGET_NR_pause /* not on alpha */
7399 case TARGET_NR_pause:
7400 if (!block_signals()) {
7401 sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
7403 return -TARGET_EINTR;
7405 #ifdef TARGET_NR_utime
7406 case TARGET_NR_utime:
7408 struct utimbuf tbuf, *host_tbuf;
7409 struct target_utimbuf *target_tbuf;
7411 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
7412 return -TARGET_EFAULT;
7413 tbuf.actime = tswapal(target_tbuf->actime);
7414 tbuf.modtime = tswapal(target_tbuf->modtime);
7415 unlock_user_struct(target_tbuf, arg2, 0);
7420 if (!(p = lock_user_string(arg1)))
7421 return -TARGET_EFAULT;
7422 ret = get_errno(utime(p, host_tbuf));
7423 unlock_user(p, arg1, 0);
7427 #ifdef TARGET_NR_utimes
7428 case TARGET_NR_utimes:
7430 struct timeval *tvp, tv[2];
7432 if (copy_from_user_timeval(&tv[0], arg2)
7433 || copy_from_user_timeval(&tv[1],
7434 arg2 + sizeof(struct target_timeval)))
7435 return -TARGET_EFAULT;
7440 if (!(p = lock_user_string(arg1)))
7441 return -TARGET_EFAULT;
7442 ret = get_errno(utimes(p, tvp));
7443 unlock_user(p, arg1, 0);
7447 #if defined(TARGET_NR_futimesat)
7448 case TARGET_NR_futimesat:
7450 struct timeval *tvp, tv[2];
7452 if (copy_from_user_timeval(&tv[0], arg3)
7453 || copy_from_user_timeval(&tv[1],
7454 arg3 + sizeof(struct target_timeval)))
7455 return -TARGET_EFAULT;
7460 if (!(p = lock_user_string(arg2))) {
7461 return -TARGET_EFAULT;
7463 ret = get_errno(futimesat(arg1, path(p), tvp));
7464 unlock_user(p, arg2, 0);
7468 #ifdef TARGET_NR_access
7469 case TARGET_NR_access:
7470 if (!(p = lock_user_string(arg1))) {
7471 return -TARGET_EFAULT;
7473 ret = get_errno(access(path(p), arg2));
7474 unlock_user(p, arg1, 0);
7477 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
7478 case TARGET_NR_faccessat:
7479 if (!(p = lock_user_string(arg2))) {
7480 return -TARGET_EFAULT;
7482 ret = get_errno(faccessat(arg1, p, arg3, 0));
7483 unlock_user(p, arg2, 0);
7486 #ifdef TARGET_NR_nice /* not on alpha */
7487 case TARGET_NR_nice:
7488 return get_errno(nice(arg1));
7490 case TARGET_NR_sync:
7493 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
7494 case TARGET_NR_syncfs:
7495 return get_errno(syncfs(arg1));
7497 case TARGET_NR_kill:
7498 return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
7499 #ifdef TARGET_NR_rename
7500 case TARGET_NR_rename:
7503 p = lock_user_string(arg1);
7504 p2 = lock_user_string(arg2);
7506 ret = -TARGET_EFAULT;
7508 ret = get_errno(rename(p, p2));
7509 unlock_user(p2, arg2, 0);
7510 unlock_user(p, arg1, 0);
7514 #if defined(TARGET_NR_renameat)
7515 case TARGET_NR_renameat:
7518 p = lock_user_string(arg2);
7519 p2 = lock_user_string(arg4);
7521 ret = -TARGET_EFAULT;
7523 ret = get_errno(renameat(arg1, p, arg3, p2));
7524 unlock_user(p2, arg4, 0);
7525 unlock_user(p, arg2, 0);
7529 #if defined(TARGET_NR_renameat2)
7530 case TARGET_NR_renameat2:
7533 p = lock_user_string(arg2);
7534 p2 = lock_user_string(arg4);
7536 ret = -TARGET_EFAULT;
7538 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
7540 unlock_user(p2, arg4, 0);
7541 unlock_user(p, arg2, 0);
7545 #ifdef TARGET_NR_mkdir
7546 case TARGET_NR_mkdir:
7547 if (!(p = lock_user_string(arg1)))
7548 return -TARGET_EFAULT;
7549 ret = get_errno(mkdir(p, arg2));
7550 unlock_user(p, arg1, 0);
7553 #if defined(TARGET_NR_mkdirat)
7554 case TARGET_NR_mkdirat:
7555 if (!(p = lock_user_string(arg2)))
7556 return -TARGET_EFAULT;
7557 ret = get_errno(mkdirat(arg1, p, arg3));
7558 unlock_user(p, arg2, 0);
7561 #ifdef TARGET_NR_rmdir
7562 case TARGET_NR_rmdir:
7563 if (!(p = lock_user_string(arg1)))
7564 return -TARGET_EFAULT;
7565 ret = get_errno(rmdir(p));
7566 unlock_user(p, arg1, 0);
7570 ret = get_errno(dup(arg1));
7572 fd_trans_dup(arg1, ret);
7575 #ifdef TARGET_NR_pipe
7576 case TARGET_NR_pipe:
7577 return do_pipe(cpu_env, arg1, 0, 0);
7579 #ifdef TARGET_NR_pipe2
7580 case TARGET_NR_pipe2:
7581 return do_pipe(cpu_env, arg1,
7582 target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
7584 case TARGET_NR_times:
7586 struct target_tms *tmsp;
7588 ret = get_errno(times(&tms));
7590 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
7592 return -TARGET_EFAULT;
7593 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
7594 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
7595 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
7596 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
7599 ret = host_to_target_clock_t(ret);
7602 case TARGET_NR_acct:
7604 ret = get_errno(acct(NULL));
7606 if (!(p = lock_user_string(arg1))) {
7607 return -TARGET_EFAULT;
7609 ret = get_errno(acct(path(p)));
7610 unlock_user(p, arg1, 0);
7613 #ifdef TARGET_NR_umount2
7614 case TARGET_NR_umount2:
7615 if (!(p = lock_user_string(arg1)))
7616 return -TARGET_EFAULT;
7617 ret = get_errno(umount2(p, arg2));
7618 unlock_user(p, arg1, 0);
7621 case TARGET_NR_ioctl:
7622 return do_ioctl(arg1, arg2, arg3);
7623 #ifdef TARGET_NR_fcntl
7624 case TARGET_NR_fcntl:
7625 return do_fcntl(arg1, arg2, arg3);
7627 case TARGET_NR_setpgid:
7628 return get_errno(setpgid(arg1, arg2));
7629 case TARGET_NR_umask:
7630 return get_errno(umask(arg1));
7631 case TARGET_NR_chroot:
7632 if (!(p = lock_user_string(arg1)))
7633 return -TARGET_EFAULT;
7634 ret = get_errno(chroot(p));
7635 unlock_user(p, arg1, 0);
7637 #ifdef TARGET_NR_dup2
7638 case TARGET_NR_dup2:
7639 ret = get_errno(dup2(arg1, arg2));
7641 fd_trans_dup(arg1, arg2);
7645 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
7646 case TARGET_NR_dup3:
7650 if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
7653 host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
7654 ret = get_errno(dup3(arg1, arg2, host_flags));
7656 fd_trans_dup(arg1, arg2);
7661 #ifdef TARGET_NR_getppid /* not on alpha */
7662 case TARGET_NR_getppid:
7663 return get_errno(getppid());
7665 #ifdef TARGET_NR_getpgrp
7666 case TARGET_NR_getpgrp:
7667 return get_errno(getpgrp());
7669 case TARGET_NR_setsid:
7670 return get_errno(setsid());
7671 #ifdef TARGET_NR_sigaction
7672 case TARGET_NR_sigaction:
7674 #if defined(TARGET_ALPHA)
7675 struct target_sigaction act, oact, *pact = 0;
7676 struct target_old_sigaction *old_act;
7678 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
7679 return -TARGET_EFAULT;
7680 act._sa_handler = old_act->_sa_handler;
7681 target_siginitset(&act.sa_mask, old_act->sa_mask);
7682 act.sa_flags = old_act->sa_flags;
7683 act.sa_restorer = 0;
7684 unlock_user_struct(old_act, arg2, 0);
7687 ret = get_errno(do_sigaction(arg1, pact, &oact));
7688 if (!is_error(ret) && arg3) {
7689 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
7690 return -TARGET_EFAULT;
7691 old_act->_sa_handler = oact._sa_handler;
7692 old_act->sa_mask = oact.sa_mask.sig[0];
7693 old_act->sa_flags = oact.sa_flags;
7694 unlock_user_struct(old_act, arg3, 1);
7696 #elif defined(TARGET_MIPS)
7697 struct target_sigaction act, oact, *pact, *old_act;
7700 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
7701 return -TARGET_EFAULT;
7702 act._sa_handler = old_act->_sa_handler;
7703 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
7704 act.sa_flags = old_act->sa_flags;
7705 unlock_user_struct(old_act, arg2, 0);
7711 ret = get_errno(do_sigaction(arg1, pact, &oact));
7713 if (!is_error(ret) && arg3) {
7714 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
7715 return -TARGET_EFAULT;
7716 old_act->_sa_handler = oact._sa_handler;
7717 old_act->sa_flags = oact.sa_flags;
7718 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
7719 old_act->sa_mask.sig[1] = 0;
7720 old_act->sa_mask.sig[2] = 0;
7721 old_act->sa_mask.sig[3] = 0;
7722 unlock_user_struct(old_act, arg3, 1);
7725 struct target_old_sigaction *old_act;
7726 struct target_sigaction act, oact, *pact;
7728 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
7729 return -TARGET_EFAULT;
7730 act._sa_handler = old_act->_sa_handler;
7731 target_siginitset(&act.sa_mask, old_act->sa_mask);
7732 act.sa_flags = old_act->sa_flags;
7733 act.sa_restorer = old_act->sa_restorer;
7734 #ifdef TARGET_ARCH_HAS_KA_RESTORER
7735 act.ka_restorer = 0;
7737 unlock_user_struct(old_act, arg2, 0);
7742 ret = get_errno(do_sigaction(arg1, pact, &oact));
7743 if (!is_error(ret) && arg3) {
7744 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
7745 return -TARGET_EFAULT;
7746 old_act->_sa_handler = oact._sa_handler;
7747 old_act->sa_mask = oact.sa_mask.sig[0];
7748 old_act->sa_flags = oact.sa_flags;
7749 old_act->sa_restorer = oact.sa_restorer;
7750 unlock_user_struct(old_act, arg3, 1);
7756 case TARGET_NR_rt_sigaction:
7758 #if defined(TARGET_ALPHA)
7759 /* For Alpha and SPARC this is a 5 argument syscall, with
7760 * a 'restorer' parameter which must be copied into the
7761 * sa_restorer field of the sigaction struct.
7762 * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
7763 * and arg5 is the sigsetsize.
7764 * Alpha also has a separate rt_sigaction struct that it uses
7765 * here; SPARC uses the usual sigaction struct.
7767 struct target_rt_sigaction *rt_act;
7768 struct target_sigaction act, oact, *pact = 0;
7770 if (arg4 != sizeof(target_sigset_t)) {
7771 return -TARGET_EINVAL;
7774 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
7775 return -TARGET_EFAULT;
7776 act._sa_handler = rt_act->_sa_handler;
7777 act.sa_mask = rt_act->sa_mask;
7778 act.sa_flags = rt_act->sa_flags;
7779 act.sa_restorer = arg5;
7780 unlock_user_struct(rt_act, arg2, 0);
7783 ret = get_errno(do_sigaction(arg1, pact, &oact));
7784 if (!is_error(ret) && arg3) {
7785 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
7786 return -TARGET_EFAULT;
7787 rt_act->_sa_handler = oact._sa_handler;
7788 rt_act->sa_mask = oact.sa_mask;
7789 rt_act->sa_flags = oact.sa_flags;
7790 unlock_user_struct(rt_act, arg3, 1);
7794 target_ulong restorer = arg4;
7795 target_ulong sigsetsize = arg5;
7797 target_ulong sigsetsize = arg4;
7799 struct target_sigaction *act;
7800 struct target_sigaction *oact;
7802 if (sigsetsize != sizeof(target_sigset_t)) {
7803 return -TARGET_EINVAL;
7806 if (!lock_user_struct(VERIFY_READ, act, arg2, 1)) {
7807 return -TARGET_EFAULT;
7809 #ifdef TARGET_ARCH_HAS_KA_RESTORER
7810 act->ka_restorer = restorer;
7816 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
7817 ret = -TARGET_EFAULT;
7818 goto rt_sigaction_fail;
7822 ret = get_errno(do_sigaction(arg1, act, oact));
7825 unlock_user_struct(act, arg2, 0);
7827 unlock_user_struct(oact, arg3, 1);
7831 #ifdef TARGET_NR_sgetmask /* not on alpha */
7832 case TARGET_NR_sgetmask:
7835 abi_ulong target_set;
7836 ret = do_sigprocmask(0, NULL, &cur_set);
7838 host_to_target_old_sigset(&target_set, &cur_set);
7844 #ifdef TARGET_NR_ssetmask /* not on alpha */
7845 case TARGET_NR_ssetmask:
7848 abi_ulong target_set = arg1;
7849 target_to_host_old_sigset(&set, &target_set);
7850 ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
7852 host_to_target_old_sigset(&target_set, &oset);
7858 #ifdef TARGET_NR_sigprocmask
7859 case TARGET_NR_sigprocmask:
7861 #if defined(TARGET_ALPHA)
7862 sigset_t set, oldset;
7867 case TARGET_SIG_BLOCK:
7870 case TARGET_SIG_UNBLOCK:
7873 case TARGET_SIG_SETMASK:
7877 return -TARGET_EINVAL;
7880 target_to_host_old_sigset(&set, &mask);
7882 ret = do_sigprocmask(how, &set, &oldset);
7883 if (!is_error(ret)) {
7884 host_to_target_old_sigset(&mask, &oldset);
7886 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
7889 sigset_t set, oldset, *set_ptr;
7894 case TARGET_SIG_BLOCK:
7897 case TARGET_SIG_UNBLOCK:
7900 case TARGET_SIG_SETMASK:
7904 return -TARGET_EINVAL;
7906 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
7907 return -TARGET_EFAULT;
7908 target_to_host_old_sigset(&set, p);
7909 unlock_user(p, arg2, 0);
7915 ret = do_sigprocmask(how, set_ptr, &oldset);
7916 if (!is_error(ret) && arg3) {
7917 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
7918 return -TARGET_EFAULT;
7919 host_to_target_old_sigset(p, &oldset);
7920 unlock_user(p, arg3, sizeof(target_sigset_t));
7926 case TARGET_NR_rt_sigprocmask:
7929 sigset_t set, oldset, *set_ptr;
7931 if (arg4 != sizeof(target_sigset_t)) {
7932 return -TARGET_EINVAL;
7937 case TARGET_SIG_BLOCK:
7940 case TARGET_SIG_UNBLOCK:
7943 case TARGET_SIG_SETMASK:
7947 return -TARGET_EINVAL;
7949 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
7950 return -TARGET_EFAULT;
7951 target_to_host_sigset(&set, p);
7952 unlock_user(p, arg2, 0);
7958 ret = do_sigprocmask(how, set_ptr, &oldset);
7959 if (!is_error(ret) && arg3) {
7960 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
7961 return -TARGET_EFAULT;
7962 host_to_target_sigset(p, &oldset);
7963 unlock_user(p, arg3, sizeof(target_sigset_t));
7967 #ifdef TARGET_NR_sigpending
7968 case TARGET_NR_sigpending:
7971 ret = get_errno(sigpending(&set));
7972 if (!is_error(ret)) {
7973 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
7974 return -TARGET_EFAULT;
7975 host_to_target_old_sigset(p, &set);
7976 unlock_user(p, arg1, sizeof(target_sigset_t));
7981 case TARGET_NR_rt_sigpending:
7985 /* Yes, this check is >, not != like most. We follow the kernel's
7986 * logic and it does it like this because it implements
7987 * NR_sigpending through the same code path, and in that case
7988 * the old_sigset_t is smaller in size.
7990 if (arg2 > sizeof(target_sigset_t)) {
7991 return -TARGET_EINVAL;
7994 ret = get_errno(sigpending(&set));
7995 if (!is_error(ret)) {
7996 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
7997 return -TARGET_EFAULT;
7998 host_to_target_sigset(p, &set);
7999 unlock_user(p, arg1, sizeof(target_sigset_t));
8003 #ifdef TARGET_NR_sigsuspend
8004 case TARGET_NR_sigsuspend:
8006 TaskState *ts = cpu->opaque;
8007 #if defined(TARGET_ALPHA)
8008 abi_ulong mask = arg1;
8009 target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
8011 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8012 return -TARGET_EFAULT;
8013 target_to_host_old_sigset(&ts->sigsuspend_mask, p);
8014 unlock_user(p, arg1, 0);
8016 ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8018 if (ret != -TARGET_ERESTARTSYS) {
8019 ts->in_sigsuspend = 1;
8024 case TARGET_NR_rt_sigsuspend:
8026 TaskState *ts = cpu->opaque;
8028 if (arg2 != sizeof(target_sigset_t)) {
8029 return -TARGET_EINVAL;
8031 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8032 return -TARGET_EFAULT;
8033 target_to_host_sigset(&ts->sigsuspend_mask, p);
8034 unlock_user(p, arg1, 0);
8035 ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8037 if (ret != -TARGET_ERESTARTSYS) {
8038 ts->in_sigsuspend = 1;
8042 case TARGET_NR_rt_sigtimedwait:
8045 struct timespec uts, *puts;
8048 if (arg4 != sizeof(target_sigset_t)) {
8049 return -TARGET_EINVAL;
8052 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8053 return -TARGET_EFAULT;
8054 target_to_host_sigset(&set, p);
8055 unlock_user(p, arg1, 0);
8058 target_to_host_timespec(puts, arg3);
8062 ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
8064 if (!is_error(ret)) {
8066 p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
8069 return -TARGET_EFAULT;
8071 host_to_target_siginfo(p, &uinfo);
8072 unlock_user(p, arg2, sizeof(target_siginfo_t));
8074 ret = host_to_target_signal(ret);
8078 case TARGET_NR_rt_sigqueueinfo:
8082 p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
8084 return -TARGET_EFAULT;
8086 target_to_host_siginfo(&uinfo, p);
8087 unlock_user(p, arg3, 0);
8088 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
8091 case TARGET_NR_rt_tgsigqueueinfo:
8095 p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
8097 return -TARGET_EFAULT;
8099 target_to_host_siginfo(&uinfo, p);
8100 unlock_user(p, arg4, 0);
8101 ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, arg3, &uinfo));
8104 #ifdef TARGET_NR_sigreturn
8105 case TARGET_NR_sigreturn:
8106 if (block_signals()) {
8107 return -TARGET_ERESTARTSYS;
8109 return do_sigreturn(cpu_env);
8111 case TARGET_NR_rt_sigreturn:
8112 if (block_signals()) {
8113 return -TARGET_ERESTARTSYS;
8115 return do_rt_sigreturn(cpu_env);
8116 case TARGET_NR_sethostname:
8117 if (!(p = lock_user_string(arg1)))
8118 return -TARGET_EFAULT;
8119 ret = get_errno(sethostname(p, arg2));
8120 unlock_user(p, arg1, 0);
8122 #ifdef TARGET_NR_setrlimit
8123 case TARGET_NR_setrlimit:
8125 int resource = target_to_host_resource(arg1);
8126 struct target_rlimit *target_rlim;
8128 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
8129 return -TARGET_EFAULT;
8130 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
8131 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
8132 unlock_user_struct(target_rlim, arg2, 0);
8134 * If we just passed through resource limit settings for memory then
8135 * they would also apply to QEMU's own allocations, and QEMU will
8136 * crash or hang or die if its allocations fail. Ideally we would
8137 * track the guest allocations in QEMU and apply the limits ourselves.
8138 * For now, just tell the guest the call succeeded but don't actually
8141 if (resource != RLIMIT_AS &&
8142 resource != RLIMIT_DATA &&
8143 resource != RLIMIT_STACK) {
8144 return get_errno(setrlimit(resource, &rlim));
8150 #ifdef TARGET_NR_getrlimit
8151 case TARGET_NR_getrlimit:
8153 int resource = target_to_host_resource(arg1);
8154 struct target_rlimit *target_rlim;
8157 ret = get_errno(getrlimit(resource, &rlim));
8158 if (!is_error(ret)) {
8159 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
8160 return -TARGET_EFAULT;
8161 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
8162 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
8163 unlock_user_struct(target_rlim, arg2, 1);
8168 case TARGET_NR_getrusage:
8170 struct rusage rusage;
8171 ret = get_errno(getrusage(arg1, &rusage));
8172 if (!is_error(ret)) {
8173 ret = host_to_target_rusage(arg2, &rusage);
8177 case TARGET_NR_gettimeofday:
8180 ret = get_errno(gettimeofday(&tv, NULL));
8181 if (!is_error(ret)) {
8182 if (copy_to_user_timeval(arg1, &tv))
8183 return -TARGET_EFAULT;
8187 case TARGET_NR_settimeofday:
8189 struct timeval tv, *ptv = NULL;
8190 struct timezone tz, *ptz = NULL;
8193 if (copy_from_user_timeval(&tv, arg1)) {
8194 return -TARGET_EFAULT;
8200 if (copy_from_user_timezone(&tz, arg2)) {
8201 return -TARGET_EFAULT;
8206 return get_errno(settimeofday(ptv, ptz));
8208 #if defined(TARGET_NR_select)
8209 case TARGET_NR_select:
8210 #if defined(TARGET_WANT_NI_OLD_SELECT)
8211 /* some architectures used to have old_select here
8212 * but now ENOSYS it.
8214 ret = -TARGET_ENOSYS;
8215 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
8216 ret = do_old_select(arg1);
8218 ret = do_select(arg1, arg2, arg3, arg4, arg5);
8222 #ifdef TARGET_NR_pselect6
8223 case TARGET_NR_pselect6:
8225 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
8226 fd_set rfds, wfds, efds;
8227 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
8228 struct timespec ts, *ts_ptr;
8231 * The 6th arg is actually two args smashed together,
8232 * so we cannot use the C library.
8240 abi_ulong arg_sigset, arg_sigsize, *arg7;
8241 target_sigset_t *target_sigset;
8249 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
8253 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
8257 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
8263 * This takes a timespec, and not a timeval, so we cannot
8264 * use the do_select() helper ...
8267 if (target_to_host_timespec(&ts, ts_addr)) {
8268 return -TARGET_EFAULT;
8275 /* Extract the two packed args for the sigset */
8278 sig.size = SIGSET_T_SIZE;
8280 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
8282 return -TARGET_EFAULT;
8284 arg_sigset = tswapal(arg7[0]);
8285 arg_sigsize = tswapal(arg7[1]);
8286 unlock_user(arg7, arg6, 0);
8290 if (arg_sigsize != sizeof(*target_sigset)) {
8291 /* Like the kernel, we enforce correct size sigsets */
8292 return -TARGET_EINVAL;
8294 target_sigset = lock_user(VERIFY_READ, arg_sigset,
8295 sizeof(*target_sigset), 1);
8296 if (!target_sigset) {
8297 return -TARGET_EFAULT;
8299 target_to_host_sigset(&set, target_sigset);
8300 unlock_user(target_sigset, arg_sigset, 0);
8308 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
8311 if (!is_error(ret)) {
8312 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
8313 return -TARGET_EFAULT;
8314 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
8315 return -TARGET_EFAULT;
8316 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
8317 return -TARGET_EFAULT;
8319 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
8320 return -TARGET_EFAULT;
8325 #ifdef TARGET_NR_symlink
8326 case TARGET_NR_symlink:
8329 p = lock_user_string(arg1);
8330 p2 = lock_user_string(arg2);
8332 ret = -TARGET_EFAULT;
8334 ret = get_errno(symlink(p, p2));
8335 unlock_user(p2, arg2, 0);
8336 unlock_user(p, arg1, 0);
8340 #if defined(TARGET_NR_symlinkat)
8341 case TARGET_NR_symlinkat:
8344 p = lock_user_string(arg1);
8345 p2 = lock_user_string(arg3);
8347 ret = -TARGET_EFAULT;
8349 ret = get_errno(symlinkat(p, arg2, p2));
8350 unlock_user(p2, arg3, 0);
8351 unlock_user(p, arg1, 0);
8355 #ifdef TARGET_NR_readlink
8356 case TARGET_NR_readlink:
8359 p = lock_user_string(arg1);
8360 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8362 ret = -TARGET_EFAULT;
8364 /* Short circuit this for the magic exe check. */
8365 ret = -TARGET_EINVAL;
8366 } else if (is_proc_myself((const char *)p, "exe")) {
8367 char real[PATH_MAX], *temp;
8368 temp = realpath(exec_path, real);
8369 /* Return value is # of bytes that we wrote to the buffer. */
8371 ret = get_errno(-1);
8373 /* Don't worry about sign mismatch as earlier mapping
8374 * logic would have thrown a bad address error. */
8375 ret = MIN(strlen(real), arg3);
8376 /* We cannot NUL terminate the string. */
8377 memcpy(p2, real, ret);
8380 ret = get_errno(readlink(path(p), p2, arg3));
8382 unlock_user(p2, arg2, ret);
8383 unlock_user(p, arg1, 0);
8387 #if defined(TARGET_NR_readlinkat)
8388 case TARGET_NR_readlinkat:
8391 p = lock_user_string(arg2);
8392 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
8394 ret = -TARGET_EFAULT;
8395 } else if (is_proc_myself((const char *)p, "exe")) {
8396 char real[PATH_MAX], *temp;
8397 temp = realpath(exec_path, real);
8398 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
8399 snprintf((char *)p2, arg4, "%s", real);
8401 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
8403 unlock_user(p2, arg3, ret);
8404 unlock_user(p, arg2, 0);
8408 #ifdef TARGET_NR_swapon
8409 case TARGET_NR_swapon:
8410 if (!(p = lock_user_string(arg1)))
8411 return -TARGET_EFAULT;
8412 ret = get_errno(swapon(p, arg2));
8413 unlock_user(p, arg1, 0);
8416 case TARGET_NR_reboot:
8417 if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
8418 /* arg4 must be ignored in all other cases */
8419 p = lock_user_string(arg4);
8421 return -TARGET_EFAULT;
8423 ret = get_errno(reboot(arg1, arg2, arg3, p));
8424 unlock_user(p, arg4, 0);
8426 ret = get_errno(reboot(arg1, arg2, arg3, NULL));
8429 #ifdef TARGET_NR_mmap
8430 case TARGET_NR_mmap:
8431 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
8432 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
8433 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
8434 || defined(TARGET_S390X)
8437 abi_ulong v1, v2, v3, v4, v5, v6;
8438 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
8439 return -TARGET_EFAULT;
8446 unlock_user(v, arg1, 0);
8447 ret = get_errno(target_mmap(v1, v2, v3,
8448 target_to_host_bitmask(v4, mmap_flags_tbl),
8452 ret = get_errno(target_mmap(arg1, arg2, arg3,
8453 target_to_host_bitmask(arg4, mmap_flags_tbl),
8459 #ifdef TARGET_NR_mmap2
8460 case TARGET_NR_mmap2:
8462 #define MMAP_SHIFT 12
8464 ret = target_mmap(arg1, arg2, arg3,
8465 target_to_host_bitmask(arg4, mmap_flags_tbl),
8466 arg5, arg6 << MMAP_SHIFT);
8467 return get_errno(ret);
8469 case TARGET_NR_munmap:
8470 return get_errno(target_munmap(arg1, arg2));
8471 case TARGET_NR_mprotect:
8473 TaskState *ts = cpu->opaque;
8474 /* Special hack to detect libc making the stack executable. */
8475 if ((arg3 & PROT_GROWSDOWN)
8476 && arg1 >= ts->info->stack_limit
8477 && arg1 <= ts->info->start_stack) {
8478 arg3 &= ~PROT_GROWSDOWN;
8479 arg2 = arg2 + arg1 - ts->info->stack_limit;
8480 arg1 = ts->info->stack_limit;
8483 return get_errno(target_mprotect(arg1, arg2, arg3));
8484 #ifdef TARGET_NR_mremap
8485 case TARGET_NR_mremap:
8486 return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
8488 /* ??? msync/mlock/munlock are broken for softmmu. */
8489 #ifdef TARGET_NR_msync
8490 case TARGET_NR_msync:
8491 return get_errno(msync(g2h(arg1), arg2, arg3));
8493 #ifdef TARGET_NR_mlock
8494 case TARGET_NR_mlock:
8495 return get_errno(mlock(g2h(arg1), arg2));
8497 #ifdef TARGET_NR_munlock
8498 case TARGET_NR_munlock:
8499 return get_errno(munlock(g2h(arg1), arg2));
8501 #ifdef TARGET_NR_mlockall
8502 case TARGET_NR_mlockall:
8503 return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
8505 #ifdef TARGET_NR_munlockall
8506 case TARGET_NR_munlockall:
8507 return get_errno(munlockall());
8509 #ifdef TARGET_NR_truncate
8510 case TARGET_NR_truncate:
8511 if (!(p = lock_user_string(arg1)))
8512 return -TARGET_EFAULT;
8513 ret = get_errno(truncate(p, arg2));
8514 unlock_user(p, arg1, 0);
8517 #ifdef TARGET_NR_ftruncate
8518 case TARGET_NR_ftruncate:
8519 return get_errno(ftruncate(arg1, arg2));
8521 case TARGET_NR_fchmod:
8522 return get_errno(fchmod(arg1, arg2));
8523 #if defined(TARGET_NR_fchmodat)
8524 case TARGET_NR_fchmodat:
8525 if (!(p = lock_user_string(arg2)))
8526 return -TARGET_EFAULT;
8527 ret = get_errno(fchmodat(arg1, p, arg3, 0));
8528 unlock_user(p, arg2, 0);
8531 case TARGET_NR_getpriority:
8532 /* Note that negative values are valid for getpriority, so we must
8533 differentiate based on errno settings. */
8535 ret = getpriority(arg1, arg2);
8536 if (ret == -1 && errno != 0) {
8537 return -host_to_target_errno(errno);
8540 /* Return value is the unbiased priority. Signal no error. */
8541 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
8543 /* Return value is a biased priority to avoid negative numbers. */
8547 case TARGET_NR_setpriority:
8548 return get_errno(setpriority(arg1, arg2, arg3));
8549 #ifdef TARGET_NR_statfs
8550 case TARGET_NR_statfs:
8551 if (!(p = lock_user_string(arg1))) {
8552 return -TARGET_EFAULT;
8554 ret = get_errno(statfs(path(p), &stfs));
8555 unlock_user(p, arg1, 0);
8557 if (!is_error(ret)) {
8558 struct target_statfs *target_stfs;
8560 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
8561 return -TARGET_EFAULT;
8562 __put_user(stfs.f_type, &target_stfs->f_type);
8563 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
8564 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
8565 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
8566 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
8567 __put_user(stfs.f_files, &target_stfs->f_files);
8568 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
8569 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
8570 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
8571 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
8572 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
8573 #ifdef _STATFS_F_FLAGS
8574 __put_user(stfs.f_flags, &target_stfs->f_flags);
8576 __put_user(0, &target_stfs->f_flags);
8578 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
8579 unlock_user_struct(target_stfs, arg2, 1);
8583 #ifdef TARGET_NR_fstatfs
8584 case TARGET_NR_fstatfs:
8585 ret = get_errno(fstatfs(arg1, &stfs));
8586 goto convert_statfs;
8588 #ifdef TARGET_NR_statfs64
8589 case TARGET_NR_statfs64:
8590 if (!(p = lock_user_string(arg1))) {
8591 return -TARGET_EFAULT;
8593 ret = get_errno(statfs(path(p), &stfs));
8594 unlock_user(p, arg1, 0);
8596 if (!is_error(ret)) {
8597 struct target_statfs64 *target_stfs;
8599 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
8600 return -TARGET_EFAULT;
8601 __put_user(stfs.f_type, &target_stfs->f_type);
8602 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
8603 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
8604 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
8605 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
8606 __put_user(stfs.f_files, &target_stfs->f_files);
8607 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
8608 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
8609 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
8610 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
8611 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
8612 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
8613 unlock_user_struct(target_stfs, arg3, 1);
8616 case TARGET_NR_fstatfs64:
8617 ret = get_errno(fstatfs(arg1, &stfs));
8618 goto convert_statfs64;
8620 #ifdef TARGET_NR_socketcall
8621 case TARGET_NR_socketcall:
8622 return do_socketcall(arg1, arg2);
8624 #ifdef TARGET_NR_accept
8625 case TARGET_NR_accept:
8626 return do_accept4(arg1, arg2, arg3, 0);
8628 #ifdef TARGET_NR_accept4
8629 case TARGET_NR_accept4:
8630 return do_accept4(arg1, arg2, arg3, arg4);
8632 #ifdef TARGET_NR_bind
8633 case TARGET_NR_bind:
8634 return do_bind(arg1, arg2, arg3);
8636 #ifdef TARGET_NR_connect
8637 case TARGET_NR_connect:
8638 return do_connect(arg1, arg2, arg3);
8640 #ifdef TARGET_NR_getpeername
8641 case TARGET_NR_getpeername:
8642 return do_getpeername(arg1, arg2, arg3);
8644 #ifdef TARGET_NR_getsockname
8645 case TARGET_NR_getsockname:
8646 return do_getsockname(arg1, arg2, arg3);
8648 #ifdef TARGET_NR_getsockopt
8649 case TARGET_NR_getsockopt:
8650 return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
8652 #ifdef TARGET_NR_listen
8653 case TARGET_NR_listen:
8654 return get_errno(listen(arg1, arg2));
8656 #ifdef TARGET_NR_recv
8657 case TARGET_NR_recv:
8658 return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
8660 #ifdef TARGET_NR_recvfrom
8661 case TARGET_NR_recvfrom:
8662 return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
8664 #ifdef TARGET_NR_recvmsg
8665 case TARGET_NR_recvmsg:
8666 return do_sendrecvmsg(arg1, arg2, arg3, 0);
8668 #ifdef TARGET_NR_send
8669 case TARGET_NR_send:
8670 return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
8672 #ifdef TARGET_NR_sendmsg
8673 case TARGET_NR_sendmsg:
8674 return do_sendrecvmsg(arg1, arg2, arg3, 1);
8676 #ifdef TARGET_NR_sendmmsg
8677 case TARGET_NR_sendmmsg:
8678 return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
8679 case TARGET_NR_recvmmsg:
8680 return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
8682 #ifdef TARGET_NR_sendto
8683 case TARGET_NR_sendto:
8684 return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
8686 #ifdef TARGET_NR_shutdown
8687 case TARGET_NR_shutdown:
8688 return get_errno(shutdown(arg1, arg2));
8690 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
8691 case TARGET_NR_getrandom:
8692 p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
8694 return -TARGET_EFAULT;
8696 ret = get_errno(getrandom(p, arg2, arg3));
8697 unlock_user(p, arg1, ret);
8700 #ifdef TARGET_NR_socket
8701 case TARGET_NR_socket:
8702 return do_socket(arg1, arg2, arg3);
8704 #ifdef TARGET_NR_socketpair
8705 case TARGET_NR_socketpair:
8706 return do_socketpair(arg1, arg2, arg3, arg4);
8708 #ifdef TARGET_NR_setsockopt
8709 case TARGET_NR_setsockopt:
8710 return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
8712 #if defined(TARGET_NR_syslog)
8713 case TARGET_NR_syslog:
8718 case TARGET_SYSLOG_ACTION_CLOSE: /* Close log */
8719 case TARGET_SYSLOG_ACTION_OPEN: /* Open log */
8720 case TARGET_SYSLOG_ACTION_CLEAR: /* Clear ring buffer */
8721 case TARGET_SYSLOG_ACTION_CONSOLE_OFF: /* Disable logging */
8722 case TARGET_SYSLOG_ACTION_CONSOLE_ON: /* Enable logging */
8723 case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
8724 case TARGET_SYSLOG_ACTION_SIZE_UNREAD: /* Number of chars */
8725 case TARGET_SYSLOG_ACTION_SIZE_BUFFER: /* Size of the buffer */
8726 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
8727 case TARGET_SYSLOG_ACTION_READ: /* Read from log */
8728 case TARGET_SYSLOG_ACTION_READ_CLEAR: /* Read/clear msgs */
8729 case TARGET_SYSLOG_ACTION_READ_ALL: /* Read last messages */
8732 return -TARGET_EINVAL;
8737 p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8739 return -TARGET_EFAULT;
8741 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
8742 unlock_user(p, arg2, arg3);
8746 return -TARGET_EINVAL;
8751 case TARGET_NR_setitimer:
8753 struct itimerval value, ovalue, *pvalue;
8757 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
8758 || copy_from_user_timeval(&pvalue->it_value,
8759 arg2 + sizeof(struct target_timeval)))
8760 return -TARGET_EFAULT;
8764 ret = get_errno(setitimer(arg1, pvalue, &ovalue));
8765 if (!is_error(ret) && arg3) {
8766 if (copy_to_user_timeval(arg3,
8767 &ovalue.it_interval)
8768 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
8770 return -TARGET_EFAULT;
8774 case TARGET_NR_getitimer:
8776 struct itimerval value;
8778 ret = get_errno(getitimer(arg1, &value));
8779 if (!is_error(ret) && arg2) {
8780 if (copy_to_user_timeval(arg2,
8782 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
8784 return -TARGET_EFAULT;
8788 #ifdef TARGET_NR_stat
8789 case TARGET_NR_stat:
8790 if (!(p = lock_user_string(arg1))) {
8791 return -TARGET_EFAULT;
8793 ret = get_errno(stat(path(p), &st));
8794 unlock_user(p, arg1, 0);
8797 #ifdef TARGET_NR_lstat
8798 case TARGET_NR_lstat:
8799 if (!(p = lock_user_string(arg1))) {
8800 return -TARGET_EFAULT;
8802 ret = get_errno(lstat(path(p), &st));
8803 unlock_user(p, arg1, 0);
8806 #ifdef TARGET_NR_fstat
8807 case TARGET_NR_fstat:
8809 ret = get_errno(fstat(arg1, &st));
8810 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
8813 if (!is_error(ret)) {
8814 struct target_stat *target_st;
8816 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
8817 return -TARGET_EFAULT;
8818 memset(target_st, 0, sizeof(*target_st));
8819 __put_user(st.st_dev, &target_st->st_dev);
8820 __put_user(st.st_ino, &target_st->st_ino);
8821 __put_user(st.st_mode, &target_st->st_mode);
8822 __put_user(st.st_uid, &target_st->st_uid);
8823 __put_user(st.st_gid, &target_st->st_gid);
8824 __put_user(st.st_nlink, &target_st->st_nlink);
8825 __put_user(st.st_rdev, &target_st->st_rdev);
8826 __put_user(st.st_size, &target_st->st_size);
8827 __put_user(st.st_blksize, &target_st->st_blksize);
8828 __put_user(st.st_blocks, &target_st->st_blocks);
8829 __put_user(st.st_atime, &target_st->target_st_atime);
8830 __put_user(st.st_mtime, &target_st->target_st_mtime);
8831 __put_user(st.st_ctime, &target_st->target_st_ctime);
8832 unlock_user_struct(target_st, arg2, 1);
8837 case TARGET_NR_vhangup:
8838 return get_errno(vhangup());
8839 #ifdef TARGET_NR_syscall
8840 case TARGET_NR_syscall:
8841 return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
8842 arg6, arg7, arg8, 0);
8844 case TARGET_NR_wait4:
8847 abi_long status_ptr = arg2;
8848 struct rusage rusage, *rusage_ptr;
8849 abi_ulong target_rusage = arg4;
8850 abi_long rusage_err;
8852 rusage_ptr = &rusage;
8855 ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
8856 if (!is_error(ret)) {
8857 if (status_ptr && ret) {
8858 status = host_to_target_waitstatus(status);
8859 if (put_user_s32(status, status_ptr))
8860 return -TARGET_EFAULT;
8862 if (target_rusage) {
8863 rusage_err = host_to_target_rusage(target_rusage, &rusage);
8871 #ifdef TARGET_NR_swapoff
8872 case TARGET_NR_swapoff:
8873 if (!(p = lock_user_string(arg1)))
8874 return -TARGET_EFAULT;
8875 ret = get_errno(swapoff(p));
8876 unlock_user(p, arg1, 0);
8879 case TARGET_NR_sysinfo:
8881 struct target_sysinfo *target_value;
8882 struct sysinfo value;
8883 ret = get_errno(sysinfo(&value));
8884 if (!is_error(ret) && arg1)
8886 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
8887 return -TARGET_EFAULT;
8888 __put_user(value.uptime, &target_value->uptime);
8889 __put_user(value.loads[0], &target_value->loads[0]);
8890 __put_user(value.loads[1], &target_value->loads[1]);
8891 __put_user(value.loads[2], &target_value->loads[2]);
8892 __put_user(value.totalram, &target_value->totalram);
8893 __put_user(value.freeram, &target_value->freeram);
8894 __put_user(value.sharedram, &target_value->sharedram);
8895 __put_user(value.bufferram, &target_value->bufferram);
8896 __put_user(value.totalswap, &target_value->totalswap);
8897 __put_user(value.freeswap, &target_value->freeswap);
8898 __put_user(value.procs, &target_value->procs);
8899 __put_user(value.totalhigh, &target_value->totalhigh);
8900 __put_user(value.freehigh, &target_value->freehigh);
8901 __put_user(value.mem_unit, &target_value->mem_unit);
8902 unlock_user_struct(target_value, arg1, 1);
8906 #ifdef TARGET_NR_ipc
8908 return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
8910 #ifdef TARGET_NR_semget
8911 case TARGET_NR_semget:
8912 return get_errno(semget(arg1, arg2, arg3));
8914 #ifdef TARGET_NR_semop
8915 case TARGET_NR_semop:
8916 return do_semop(arg1, arg2, arg3);
8918 #ifdef TARGET_NR_semctl
8919 case TARGET_NR_semctl:
8920 return do_semctl(arg1, arg2, arg3, arg4);
8922 #ifdef TARGET_NR_msgctl
8923 case TARGET_NR_msgctl:
8924 return do_msgctl(arg1, arg2, arg3);
8926 #ifdef TARGET_NR_msgget
8927 case TARGET_NR_msgget:
8928 return get_errno(msgget(arg1, arg2));
8930 #ifdef TARGET_NR_msgrcv
8931 case TARGET_NR_msgrcv:
8932 return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
8934 #ifdef TARGET_NR_msgsnd
8935 case TARGET_NR_msgsnd:
8936 return do_msgsnd(arg1, arg2, arg3, arg4);
8938 #ifdef TARGET_NR_shmget
8939 case TARGET_NR_shmget:
8940 return get_errno(shmget(arg1, arg2, arg3));
8942 #ifdef TARGET_NR_shmctl
8943 case TARGET_NR_shmctl:
8944 return do_shmctl(arg1, arg2, arg3);
8946 #ifdef TARGET_NR_shmat
8947 case TARGET_NR_shmat:
8948 return do_shmat(cpu_env, arg1, arg2, arg3);
8950 #ifdef TARGET_NR_shmdt
8951 case TARGET_NR_shmdt:
8952 return do_shmdt(arg1);
8954 case TARGET_NR_fsync:
8955 return get_errno(fsync(arg1));
8956 case TARGET_NR_clone:
8957 /* Linux manages to have three different orderings for its
8958 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
8959 * match the kernel's CONFIG_CLONE_* settings.
8960 * Microblaze is further special in that it uses a sixth
8961 * implicit argument to clone for the TLS pointer.
8963 #if defined(TARGET_MICROBLAZE)
8964 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
8965 #elif defined(TARGET_CLONE_BACKWARDS)
8966 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
8967 #elif defined(TARGET_CLONE_BACKWARDS2)
8968 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
8970 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
8973 #ifdef __NR_exit_group
8974 /* new thread calls */
8975 case TARGET_NR_exit_group:
8976 preexit_cleanup(cpu_env, arg1);
8977 return get_errno(exit_group(arg1));
8979 case TARGET_NR_setdomainname:
8980 if (!(p = lock_user_string(arg1)))
8981 return -TARGET_EFAULT;
8982 ret = get_errno(setdomainname(p, arg2));
8983 unlock_user(p, arg1, 0);
8985 case TARGET_NR_uname:
8986 /* no need to transcode because we use the linux syscall */
8988 struct new_utsname * buf;
8990 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
8991 return -TARGET_EFAULT;
8992 ret = get_errno(sys_uname(buf));
8993 if (!is_error(ret)) {
8994 /* Overwrite the native machine name with whatever is being
8996 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
8997 sizeof(buf->machine));
8998 /* Allow the user to override the reported release. */
8999 if (qemu_uname_release && *qemu_uname_release) {
9000 g_strlcpy(buf->release, qemu_uname_release,
9001 sizeof(buf->release));
9004 unlock_user_struct(buf, arg1, 1);
9008 case TARGET_NR_modify_ldt:
9009 return do_modify_ldt(cpu_env, arg1, arg2, arg3);
9010 #if !defined(TARGET_X86_64)
9011 case TARGET_NR_vm86:
9012 return do_vm86(cpu_env, arg1, arg2);
9015 case TARGET_NR_adjtimex:
9017 struct timex host_buf;
9019 if (target_to_host_timex(&host_buf, arg1) != 0) {
9020 return -TARGET_EFAULT;
9022 ret = get_errno(adjtimex(&host_buf));
9023 if (!is_error(ret)) {
9024 if (host_to_target_timex(arg1, &host_buf) != 0) {
9025 return -TARGET_EFAULT;
9030 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
9031 case TARGET_NR_clock_adjtime:
9033 struct timex htx, *phtx = &htx;
9035 if (target_to_host_timex(phtx, arg2) != 0) {
9036 return -TARGET_EFAULT;
9038 ret = get_errno(clock_adjtime(arg1, phtx));
9039 if (!is_error(ret) && phtx) {
9040 if (host_to_target_timex(arg2, phtx) != 0) {
9041 return -TARGET_EFAULT;
9047 case TARGET_NR_getpgid:
9048 return get_errno(getpgid(arg1));
9049 case TARGET_NR_fchdir:
9050 return get_errno(fchdir(arg1));
9051 case TARGET_NR_personality:
9052 return get_errno(personality(arg1));
9053 #ifdef TARGET_NR__llseek /* Not on alpha */
9054 case TARGET_NR__llseek:
9057 #if !defined(__NR_llseek)
9058 res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
9060 ret = get_errno(res);
9065 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
9067 if ((ret == 0) && put_user_s64(res, arg4)) {
9068 return -TARGET_EFAULT;
9073 #ifdef TARGET_NR_getdents
9074 case TARGET_NR_getdents:
9075 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
9076 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
9078 struct target_dirent *target_dirp;
9079 struct linux_dirent *dirp;
9080 abi_long count = arg3;
9082 dirp = g_try_malloc(count);
9084 return -TARGET_ENOMEM;
9087 ret = get_errno(sys_getdents(arg1, dirp, count));
9088 if (!is_error(ret)) {
9089 struct linux_dirent *de;
9090 struct target_dirent *tde;
9092 int reclen, treclen;
9093 int count1, tnamelen;
9097 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9098 return -TARGET_EFAULT;
9101 reclen = de->d_reclen;
9102 tnamelen = reclen - offsetof(struct linux_dirent, d_name);
9103 assert(tnamelen >= 0);
9104 treclen = tnamelen + offsetof(struct target_dirent, d_name);
9105 assert(count1 + treclen <= count);
9106 tde->d_reclen = tswap16(treclen);
9107 tde->d_ino = tswapal(de->d_ino);
9108 tde->d_off = tswapal(de->d_off);
9109 memcpy(tde->d_name, de->d_name, tnamelen);
9110 de = (struct linux_dirent *)((char *)de + reclen);
9112 tde = (struct target_dirent *)((char *)tde + treclen);
9116 unlock_user(target_dirp, arg2, ret);
9122 struct linux_dirent *dirp;
9123 abi_long count = arg3;
9125 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9126 return -TARGET_EFAULT;
9127 ret = get_errno(sys_getdents(arg1, dirp, count));
9128 if (!is_error(ret)) {
9129 struct linux_dirent *de;
9134 reclen = de->d_reclen;
9137 de->d_reclen = tswap16(reclen);
9138 tswapls(&de->d_ino);
9139 tswapls(&de->d_off);
9140 de = (struct linux_dirent *)((char *)de + reclen);
9144 unlock_user(dirp, arg2, ret);
9148 /* Implement getdents in terms of getdents64 */
9150 struct linux_dirent64 *dirp;
9151 abi_long count = arg3;
9153 dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
9155 return -TARGET_EFAULT;
9157 ret = get_errno(sys_getdents64(arg1, dirp, count));
9158 if (!is_error(ret)) {
9159 /* Convert the dirent64 structs to target dirent. We do this
9160 * in-place, since we can guarantee that a target_dirent is no
9161 * larger than a dirent64; however this means we have to be
9162 * careful to read everything before writing in the new format.
9164 struct linux_dirent64 *de;
9165 struct target_dirent *tde;
9170 tde = (struct target_dirent *)dirp;
9172 int namelen, treclen;
9173 int reclen = de->d_reclen;
9174 uint64_t ino = de->d_ino;
9175 int64_t off = de->d_off;
9176 uint8_t type = de->d_type;
9178 namelen = strlen(de->d_name);
9179 treclen = offsetof(struct target_dirent, d_name)
9181 treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
9183 memmove(tde->d_name, de->d_name, namelen + 1);
9184 tde->d_ino = tswapal(ino);
9185 tde->d_off = tswapal(off);
9186 tde->d_reclen = tswap16(treclen);
9187 /* The target_dirent type is in what was formerly a padding
9188 * byte at the end of the structure:
9190 *(((char *)tde) + treclen - 1) = type;
9192 de = (struct linux_dirent64 *)((char *)de + reclen);
9193 tde = (struct target_dirent *)((char *)tde + treclen);
9199 unlock_user(dirp, arg2, ret);
9203 #endif /* TARGET_NR_getdents */
9204 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
9205 case TARGET_NR_getdents64:
9207 struct linux_dirent64 *dirp;
9208 abi_long count = arg3;
9209 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9210 return -TARGET_EFAULT;
9211 ret = get_errno(sys_getdents64(arg1, dirp, count));
9212 if (!is_error(ret)) {
9213 struct linux_dirent64 *de;
9218 reclen = de->d_reclen;
9221 de->d_reclen = tswap16(reclen);
9222 tswap64s((uint64_t *)&de->d_ino);
9223 tswap64s((uint64_t *)&de->d_off);
9224 de = (struct linux_dirent64 *)((char *)de + reclen);
9228 unlock_user(dirp, arg2, ret);
9231 #endif /* TARGET_NR_getdents64 */
9232 #if defined(TARGET_NR__newselect)
9233 case TARGET_NR__newselect:
9234 return do_select(arg1, arg2, arg3, arg4, arg5);
9236 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
9237 # ifdef TARGET_NR_poll
9238 case TARGET_NR_poll:
9240 # ifdef TARGET_NR_ppoll
9241 case TARGET_NR_ppoll:
9244 struct target_pollfd *target_pfd;
9245 unsigned int nfds = arg2;
9252 if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
9253 return -TARGET_EINVAL;
9256 target_pfd = lock_user(VERIFY_WRITE, arg1,
9257 sizeof(struct target_pollfd) * nfds, 1);
9259 return -TARGET_EFAULT;
9262 pfd = alloca(sizeof(struct pollfd) * nfds);
9263 for (i = 0; i < nfds; i++) {
9264 pfd[i].fd = tswap32(target_pfd[i].fd);
9265 pfd[i].events = tswap16(target_pfd[i].events);
9270 # ifdef TARGET_NR_ppoll
9271 case TARGET_NR_ppoll:
9273 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
9274 target_sigset_t *target_set;
9275 sigset_t _set, *set = &_set;
9278 if (target_to_host_timespec(timeout_ts, arg3)) {
9279 unlock_user(target_pfd, arg1, 0);
9280 return -TARGET_EFAULT;
9287 if (arg5 != sizeof(target_sigset_t)) {
9288 unlock_user(target_pfd, arg1, 0);
9289 return -TARGET_EINVAL;
9292 target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
9294 unlock_user(target_pfd, arg1, 0);
9295 return -TARGET_EFAULT;
9297 target_to_host_sigset(set, target_set);
9302 ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
9303 set, SIGSET_T_SIZE));
9305 if (!is_error(ret) && arg3) {
9306 host_to_target_timespec(arg3, timeout_ts);
9309 unlock_user(target_set, arg4, 0);
9314 # ifdef TARGET_NR_poll
9315 case TARGET_NR_poll:
9317 struct timespec ts, *pts;
9320 /* Convert ms to secs, ns */
9321 ts.tv_sec = arg3 / 1000;
9322 ts.tv_nsec = (arg3 % 1000) * 1000000LL;
9325 /* -ve poll() timeout means "infinite" */
9328 ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
9333 g_assert_not_reached();
9336 if (!is_error(ret)) {
9337 for(i = 0; i < nfds; i++) {
9338 target_pfd[i].revents = tswap16(pfd[i].revents);
9341 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
9345 case TARGET_NR_flock:
9346 /* NOTE: the flock constant seems to be the same for every
9348 return get_errno(safe_flock(arg1, arg2));
9349 case TARGET_NR_readv:
9351 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
9353 ret = get_errno(safe_readv(arg1, vec, arg3));
9354 unlock_iovec(vec, arg2, arg3, 1);
9356 ret = -host_to_target_errno(errno);
9360 case TARGET_NR_writev:
9362 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
9364 ret = get_errno(safe_writev(arg1, vec, arg3));
9365 unlock_iovec(vec, arg2, arg3, 0);
9367 ret = -host_to_target_errno(errno);
9371 #if defined(TARGET_NR_preadv)
9372 case TARGET_NR_preadv:
9374 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
9376 unsigned long low, high;
9378 target_to_host_low_high(arg4, arg5, &low, &high);
9379 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
9380 unlock_iovec(vec, arg2, arg3, 1);
9382 ret = -host_to_target_errno(errno);
9387 #if defined(TARGET_NR_pwritev)
9388 case TARGET_NR_pwritev:
9390 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
9392 unsigned long low, high;
9394 target_to_host_low_high(arg4, arg5, &low, &high);
9395 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
9396 unlock_iovec(vec, arg2, arg3, 0);
9398 ret = -host_to_target_errno(errno);
9403 case TARGET_NR_getsid:
9404 return get_errno(getsid(arg1));
9405 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
9406 case TARGET_NR_fdatasync:
9407 return get_errno(fdatasync(arg1));
9409 #ifdef TARGET_NR__sysctl
9410 case TARGET_NR__sysctl:
9411 /* We don't implement this, but ENOTDIR is always a safe
9413 return -TARGET_ENOTDIR;
9415 case TARGET_NR_sched_getaffinity:
9417 unsigned int mask_size;
9418 unsigned long *mask;
9421 * sched_getaffinity needs multiples of ulong, so need to take
9422 * care of mismatches between target ulong and host ulong sizes.
9424 if (arg2 & (sizeof(abi_ulong) - 1)) {
9425 return -TARGET_EINVAL;
9427 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
9429 mask = alloca(mask_size);
9430 memset(mask, 0, mask_size);
9431 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
9433 if (!is_error(ret)) {
9435 /* More data returned than the caller's buffer will fit.
9436 * This only happens if sizeof(abi_long) < sizeof(long)
9437 * and the caller passed us a buffer holding an odd number
9438 * of abi_longs. If the host kernel is actually using the
9439 * extra 4 bytes then fail EINVAL; otherwise we can just
9440 * ignore them and only copy the interesting part.
9442 int numcpus = sysconf(_SC_NPROCESSORS_CONF);
9443 if (numcpus > arg2 * 8) {
9444 return -TARGET_EINVAL;
9449 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
9450 return -TARGET_EFAULT;
9455 case TARGET_NR_sched_setaffinity:
9457 unsigned int mask_size;
9458 unsigned long *mask;
9461 * sched_setaffinity needs multiples of ulong, so need to take
9462 * care of mismatches between target ulong and host ulong sizes.
9464 if (arg2 & (sizeof(abi_ulong) - 1)) {
9465 return -TARGET_EINVAL;
9467 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
9468 mask = alloca(mask_size);
9470 ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
9475 return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
9477 case TARGET_NR_getcpu:
9480 ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
9481 arg2 ? &node : NULL,
9483 if (is_error(ret)) {
9486 if (arg1 && put_user_u32(cpu, arg1)) {
9487 return -TARGET_EFAULT;
9489 if (arg2 && put_user_u32(node, arg2)) {
9490 return -TARGET_EFAULT;
9494 case TARGET_NR_sched_setparam:
9496 struct sched_param *target_schp;
9497 struct sched_param schp;
9500 return -TARGET_EINVAL;
9502 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
9503 return -TARGET_EFAULT;
9504 schp.sched_priority = tswap32(target_schp->sched_priority);
9505 unlock_user_struct(target_schp, arg2, 0);
9506 return get_errno(sched_setparam(arg1, &schp));
9508 case TARGET_NR_sched_getparam:
9510 struct sched_param *target_schp;
9511 struct sched_param schp;
9514 return -TARGET_EINVAL;
9516 ret = get_errno(sched_getparam(arg1, &schp));
9517 if (!is_error(ret)) {
9518 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
9519 return -TARGET_EFAULT;
9520 target_schp->sched_priority = tswap32(schp.sched_priority);
9521 unlock_user_struct(target_schp, arg2, 1);
9525 case TARGET_NR_sched_setscheduler:
9527 struct sched_param *target_schp;
9528 struct sched_param schp;
9530 return -TARGET_EINVAL;
9532 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
9533 return -TARGET_EFAULT;
9534 schp.sched_priority = tswap32(target_schp->sched_priority);
9535 unlock_user_struct(target_schp, arg3, 0);
9536 return get_errno(sched_setscheduler(arg1, arg2, &schp));
9538 case TARGET_NR_sched_getscheduler:
9539 return get_errno(sched_getscheduler(arg1));
9540 case TARGET_NR_sched_yield:
9541 return get_errno(sched_yield());
9542 case TARGET_NR_sched_get_priority_max:
9543 return get_errno(sched_get_priority_max(arg1));
9544 case TARGET_NR_sched_get_priority_min:
9545 return get_errno(sched_get_priority_min(arg1));
9546 case TARGET_NR_sched_rr_get_interval:
9549 ret = get_errno(sched_rr_get_interval(arg1, &ts));
9550 if (!is_error(ret)) {
9551 ret = host_to_target_timespec(arg2, &ts);
9555 case TARGET_NR_nanosleep:
9557 struct timespec req, rem;
9558 target_to_host_timespec(&req, arg1);
9559 ret = get_errno(safe_nanosleep(&req, &rem));
9560 if (is_error(ret) && arg2) {
9561 host_to_target_timespec(arg2, &rem);
9565 case TARGET_NR_prctl:
9567 case PR_GET_PDEATHSIG:
9570 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
9571 if (!is_error(ret) && arg2
9572 && put_user_ual(deathsig, arg2)) {
9573 return -TARGET_EFAULT;
9580 void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
9582 return -TARGET_EFAULT;
9584 ret = get_errno(prctl(arg1, (unsigned long)name,
9586 unlock_user(name, arg2, 16);
9591 void *name = lock_user(VERIFY_READ, arg2, 16, 1);
9593 return -TARGET_EFAULT;
9595 ret = get_errno(prctl(arg1, (unsigned long)name,
9597 unlock_user(name, arg2, 0);
9602 case TARGET_PR_GET_FP_MODE:
9604 CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
9606 if (env->CP0_Status & (1 << CP0St_FR)) {
9607 ret |= TARGET_PR_FP_MODE_FR;
9609 if (env->CP0_Config5 & (1 << CP0C5_FRE)) {
9610 ret |= TARGET_PR_FP_MODE_FRE;
9614 case TARGET_PR_SET_FP_MODE:
9616 CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
9617 bool old_fr = env->CP0_Status & (1 << CP0St_FR);
9618 bool old_fre = env->CP0_Config5 & (1 << CP0C5_FRE);
9619 bool new_fr = arg2 & TARGET_PR_FP_MODE_FR;
9620 bool new_fre = arg2 & TARGET_PR_FP_MODE_FRE;
9622 const unsigned int known_bits = TARGET_PR_FP_MODE_FR |
9623 TARGET_PR_FP_MODE_FRE;
9625 /* If nothing to change, return right away, successfully. */
9626 if (old_fr == new_fr && old_fre == new_fre) {
9629 /* Check the value is valid */
9630 if (arg2 & ~known_bits) {
9631 return -TARGET_EOPNOTSUPP;
9633 /* Setting FRE without FR is not supported. */
9634 if (new_fre && !new_fr) {
9635 return -TARGET_EOPNOTSUPP;
9637 if (new_fr && !(env->active_fpu.fcr0 & (1 << FCR0_F64))) {
9638 /* FR1 is not supported */
9639 return -TARGET_EOPNOTSUPP;
9641 if (!new_fr && (env->active_fpu.fcr0 & (1 << FCR0_F64))
9642 && !(env->CP0_Status_rw_bitmask & (1 << CP0St_FR))) {
9643 /* cannot set FR=0 */
9644 return -TARGET_EOPNOTSUPP;
9646 if (new_fre && !(env->active_fpu.fcr0 & (1 << FCR0_FREP))) {
9647 /* Cannot set FRE=1 */
9648 return -TARGET_EOPNOTSUPP;
9652 fpr_t *fpr = env->active_fpu.fpr;
9653 for (i = 0; i < 32 ; i += 2) {
9654 if (!old_fr && new_fr) {
9655 fpr[i].w[!FP_ENDIAN_IDX] = fpr[i + 1].w[FP_ENDIAN_IDX];
9656 } else if (old_fr && !new_fr) {
9657 fpr[i + 1].w[FP_ENDIAN_IDX] = fpr[i].w[!FP_ENDIAN_IDX];
9662 env->CP0_Status |= (1 << CP0St_FR);
9663 env->hflags |= MIPS_HFLAG_F64;
9665 env->CP0_Status &= ~(1 << CP0St_FR);
9666 env->hflags &= ~MIPS_HFLAG_F64;
9669 env->CP0_Config5 |= (1 << CP0C5_FRE);
9670 if (env->active_fpu.fcr0 & (1 << FCR0_FREP)) {
9671 env->hflags |= MIPS_HFLAG_FRE;
9674 env->CP0_Config5 &= ~(1 << CP0C5_FRE);
9675 env->hflags &= ~MIPS_HFLAG_FRE;
9681 #ifdef TARGET_AARCH64
9682 case TARGET_PR_SVE_SET_VL:
9684 * We cannot support either PR_SVE_SET_VL_ONEXEC or
9685 * PR_SVE_VL_INHERIT. Note the kernel definition
9686 * of sve_vl_valid allows for VQ=512, i.e. VL=8192,
9687 * even though the current architectural maximum is VQ=16.
9689 ret = -TARGET_EINVAL;
9690 if (cpu_isar_feature(aa64_sve, arm_env_get_cpu(cpu_env))
9691 && arg2 >= 0 && arg2 <= 512 * 16 && !(arg2 & 15)) {
9692 CPUARMState *env = cpu_env;
9693 ARMCPU *cpu = arm_env_get_cpu(env);
9694 uint32_t vq, old_vq;
9696 old_vq = (env->vfp.zcr_el[1] & 0xf) + 1;
9697 vq = MAX(arg2 / 16, 1);
9698 vq = MIN(vq, cpu->sve_max_vq);
9701 aarch64_sve_narrow_vq(env, vq);
9703 env->vfp.zcr_el[1] = vq - 1;
9707 case TARGET_PR_SVE_GET_VL:
9708 ret = -TARGET_EINVAL;
9710 ARMCPU *cpu = arm_env_get_cpu(cpu_env);
9711 if (cpu_isar_feature(aa64_sve, cpu)) {
9712 ret = ((cpu->env.vfp.zcr_el[1] & 0xf) + 1) * 16;
9716 case TARGET_PR_PAC_RESET_KEYS:
9718 CPUARMState *env = cpu_env;
9719 ARMCPU *cpu = arm_env_get_cpu(env);
9721 if (arg3 || arg4 || arg5) {
9722 return -TARGET_EINVAL;
9724 if (cpu_isar_feature(aa64_pauth, cpu)) {
9725 int all = (TARGET_PR_PAC_APIAKEY | TARGET_PR_PAC_APIBKEY |
9726 TARGET_PR_PAC_APDAKEY | TARGET_PR_PAC_APDBKEY |
9727 TARGET_PR_PAC_APGAKEY);
9730 } else if (arg2 & ~all) {
9731 return -TARGET_EINVAL;
9733 if (arg2 & TARGET_PR_PAC_APIAKEY) {
9734 arm_init_pauth_key(&env->apia_key);
9736 if (arg2 & TARGET_PR_PAC_APIBKEY) {
9737 arm_init_pauth_key(&env->apib_key);
9739 if (arg2 & TARGET_PR_PAC_APDAKEY) {
9740 arm_init_pauth_key(&env->apda_key);
9742 if (arg2 & TARGET_PR_PAC_APDBKEY) {
9743 arm_init_pauth_key(&env->apdb_key);
9745 if (arg2 & TARGET_PR_PAC_APGAKEY) {
9746 arm_init_pauth_key(&env->apga_key);
9751 return -TARGET_EINVAL;
9752 #endif /* AARCH64 */
9753 case PR_GET_SECCOMP:
9754 case PR_SET_SECCOMP:
9755 /* Disable seccomp to prevent the target disabling syscalls we
9757 return -TARGET_EINVAL;
9759 /* Most prctl options have no pointer arguments */
9760 return get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
9763 #ifdef TARGET_NR_arch_prctl
9764 case TARGET_NR_arch_prctl:
9765 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
9766 return do_arch_prctl(cpu_env, arg1, arg2);
9771 #ifdef TARGET_NR_pread64
9772 case TARGET_NR_pread64:
9773 if (regpairs_aligned(cpu_env, num)) {
9777 if (arg2 == 0 && arg3 == 0) {
9778 /* Special-case NULL buffer and zero length, which should succeed */
9781 p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9783 return -TARGET_EFAULT;
9786 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
9787 unlock_user(p, arg2, ret);
9789 case TARGET_NR_pwrite64:
9790 if (regpairs_aligned(cpu_env, num)) {
9794 if (arg2 == 0 && arg3 == 0) {
9795 /* Special-case NULL buffer and zero length, which should succeed */
9798 p = lock_user(VERIFY_READ, arg2, arg3, 1);
9800 return -TARGET_EFAULT;
9803 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
9804 unlock_user(p, arg2, 0);
9807 case TARGET_NR_getcwd:
9808 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
9809 return -TARGET_EFAULT;
9810 ret = get_errno(sys_getcwd1(p, arg2));
9811 unlock_user(p, arg1, ret);
9813 case TARGET_NR_capget:
9814 case TARGET_NR_capset:
9816 struct target_user_cap_header *target_header;
9817 struct target_user_cap_data *target_data = NULL;
9818 struct __user_cap_header_struct header;
9819 struct __user_cap_data_struct data[2];
9820 struct __user_cap_data_struct *dataptr = NULL;
9821 int i, target_datalen;
9824 if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
9825 return -TARGET_EFAULT;
9827 header.version = tswap32(target_header->version);
9828 header.pid = tswap32(target_header->pid);
9830 if (header.version != _LINUX_CAPABILITY_VERSION) {
9831 /* Version 2 and up takes pointer to two user_data structs */
9835 target_datalen = sizeof(*target_data) * data_items;
9838 if (num == TARGET_NR_capget) {
9839 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
9841 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
9844 unlock_user_struct(target_header, arg1, 0);
9845 return -TARGET_EFAULT;
9848 if (num == TARGET_NR_capset) {
9849 for (i = 0; i < data_items; i++) {
9850 data[i].effective = tswap32(target_data[i].effective);
9851 data[i].permitted = tswap32(target_data[i].permitted);
9852 data[i].inheritable = tswap32(target_data[i].inheritable);
9859 if (num == TARGET_NR_capget) {
9860 ret = get_errno(capget(&header, dataptr));
9862 ret = get_errno(capset(&header, dataptr));
9865 /* The kernel always updates version for both capget and capset */
9866 target_header->version = tswap32(header.version);
9867 unlock_user_struct(target_header, arg1, 1);
9870 if (num == TARGET_NR_capget) {
9871 for (i = 0; i < data_items; i++) {
9872 target_data[i].effective = tswap32(data[i].effective);
9873 target_data[i].permitted = tswap32(data[i].permitted);
9874 target_data[i].inheritable = tswap32(data[i].inheritable);
9876 unlock_user(target_data, arg2, target_datalen);
9878 unlock_user(target_data, arg2, 0);
9883 case TARGET_NR_sigaltstack:
9884 return do_sigaltstack(arg1, arg2,
9885 get_sp_from_cpustate((CPUArchState *)cpu_env));
9887 #ifdef CONFIG_SENDFILE
9888 #ifdef TARGET_NR_sendfile
9889 case TARGET_NR_sendfile:
9894 ret = get_user_sal(off, arg3);
9895 if (is_error(ret)) {
9900 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
9901 if (!is_error(ret) && arg3) {
9902 abi_long ret2 = put_user_sal(off, arg3);
9903 if (is_error(ret2)) {
9910 #ifdef TARGET_NR_sendfile64
9911 case TARGET_NR_sendfile64:
9916 ret = get_user_s64(off, arg3);
9917 if (is_error(ret)) {
9922 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
9923 if (!is_error(ret) && arg3) {
9924 abi_long ret2 = put_user_s64(off, arg3);
9925 if (is_error(ret2)) {
9933 #ifdef TARGET_NR_vfork
9934 case TARGET_NR_vfork:
9935 return get_errno(do_fork(cpu_env,
9936 CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
9939 #ifdef TARGET_NR_ugetrlimit
9940 case TARGET_NR_ugetrlimit:
9943 int resource = target_to_host_resource(arg1);
9944 ret = get_errno(getrlimit(resource, &rlim));
9945 if (!is_error(ret)) {
9946 struct target_rlimit *target_rlim;
9947 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
9948 return -TARGET_EFAULT;
9949 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
9950 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
9951 unlock_user_struct(target_rlim, arg2, 1);
9956 #ifdef TARGET_NR_truncate64
9957 case TARGET_NR_truncate64:
9958 if (!(p = lock_user_string(arg1)))
9959 return -TARGET_EFAULT;
9960 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
9961 unlock_user(p, arg1, 0);
9964 #ifdef TARGET_NR_ftruncate64
9965 case TARGET_NR_ftruncate64:
9966 return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
9968 #ifdef TARGET_NR_stat64
9969 case TARGET_NR_stat64:
9970 if (!(p = lock_user_string(arg1))) {
9971 return -TARGET_EFAULT;
9973 ret = get_errno(stat(path(p), &st));
9974 unlock_user(p, arg1, 0);
9976 ret = host_to_target_stat64(cpu_env, arg2, &st);
9979 #ifdef TARGET_NR_lstat64
9980 case TARGET_NR_lstat64:
9981 if (!(p = lock_user_string(arg1))) {
9982 return -TARGET_EFAULT;
9984 ret = get_errno(lstat(path(p), &st));
9985 unlock_user(p, arg1, 0);
9987 ret = host_to_target_stat64(cpu_env, arg2, &st);
9990 #ifdef TARGET_NR_fstat64
9991 case TARGET_NR_fstat64:
9992 ret = get_errno(fstat(arg1, &st));
9994 ret = host_to_target_stat64(cpu_env, arg2, &st);
9997 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
9998 #ifdef TARGET_NR_fstatat64
9999 case TARGET_NR_fstatat64:
10001 #ifdef TARGET_NR_newfstatat
10002 case TARGET_NR_newfstatat:
10004 if (!(p = lock_user_string(arg2))) {
10005 return -TARGET_EFAULT;
10007 ret = get_errno(fstatat(arg1, path(p), &st, arg4));
10008 unlock_user(p, arg2, 0);
10009 if (!is_error(ret))
10010 ret = host_to_target_stat64(cpu_env, arg3, &st);
10013 #ifdef TARGET_NR_lchown
10014 case TARGET_NR_lchown:
10015 if (!(p = lock_user_string(arg1)))
10016 return -TARGET_EFAULT;
10017 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
10018 unlock_user(p, arg1, 0);
10021 #ifdef TARGET_NR_getuid
10022 case TARGET_NR_getuid:
10023 return get_errno(high2lowuid(getuid()));
10025 #ifdef TARGET_NR_getgid
10026 case TARGET_NR_getgid:
10027 return get_errno(high2lowgid(getgid()));
10029 #ifdef TARGET_NR_geteuid
10030 case TARGET_NR_geteuid:
10031 return get_errno(high2lowuid(geteuid()));
10033 #ifdef TARGET_NR_getegid
10034 case TARGET_NR_getegid:
10035 return get_errno(high2lowgid(getegid()));
10037 case TARGET_NR_setreuid:
10038 return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
10039 case TARGET_NR_setregid:
10040 return get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
10041 case TARGET_NR_getgroups:
10043 int gidsetsize = arg1;
10044 target_id *target_grouplist;
10048 grouplist = alloca(gidsetsize * sizeof(gid_t));
10049 ret = get_errno(getgroups(gidsetsize, grouplist));
10050 if (gidsetsize == 0)
10052 if (!is_error(ret)) {
10053 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
10054 if (!target_grouplist)
10055 return -TARGET_EFAULT;
10056 for(i = 0;i < ret; i++)
10057 target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
10058 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
10062 case TARGET_NR_setgroups:
10064 int gidsetsize = arg1;
10065 target_id *target_grouplist;
10066 gid_t *grouplist = NULL;
10069 grouplist = alloca(gidsetsize * sizeof(gid_t));
10070 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
10071 if (!target_grouplist) {
10072 return -TARGET_EFAULT;
10074 for (i = 0; i < gidsetsize; i++) {
10075 grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
10077 unlock_user(target_grouplist, arg2, 0);
10079 return get_errno(setgroups(gidsetsize, grouplist));
10081 case TARGET_NR_fchown:
10082 return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
10083 #if defined(TARGET_NR_fchownat)
10084 case TARGET_NR_fchownat:
10085 if (!(p = lock_user_string(arg2)))
10086 return -TARGET_EFAULT;
10087 ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
10088 low2highgid(arg4), arg5));
10089 unlock_user(p, arg2, 0);
10092 #ifdef TARGET_NR_setresuid
10093 case TARGET_NR_setresuid:
10094 return get_errno(sys_setresuid(low2highuid(arg1),
10096 low2highuid(arg3)));
10098 #ifdef TARGET_NR_getresuid
10099 case TARGET_NR_getresuid:
10101 uid_t ruid, euid, suid;
10102 ret = get_errno(getresuid(&ruid, &euid, &suid));
10103 if (!is_error(ret)) {
10104 if (put_user_id(high2lowuid(ruid), arg1)
10105 || put_user_id(high2lowuid(euid), arg2)
10106 || put_user_id(high2lowuid(suid), arg3))
10107 return -TARGET_EFAULT;
10112 #ifdef TARGET_NR_getresgid
10113 case TARGET_NR_setresgid:
10114 return get_errno(sys_setresgid(low2highgid(arg1),
10116 low2highgid(arg3)));
10118 #ifdef TARGET_NR_getresgid
10119 case TARGET_NR_getresgid:
10121 gid_t rgid, egid, sgid;
10122 ret = get_errno(getresgid(&rgid, &egid, &sgid));
10123 if (!is_error(ret)) {
10124 if (put_user_id(high2lowgid(rgid), arg1)
10125 || put_user_id(high2lowgid(egid), arg2)
10126 || put_user_id(high2lowgid(sgid), arg3))
10127 return -TARGET_EFAULT;
10132 #ifdef TARGET_NR_chown
10133 case TARGET_NR_chown:
10134 if (!(p = lock_user_string(arg1)))
10135 return -TARGET_EFAULT;
10136 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
10137 unlock_user(p, arg1, 0);
10140 case TARGET_NR_setuid:
10141 return get_errno(sys_setuid(low2highuid(arg1)));
10142 case TARGET_NR_setgid:
10143 return get_errno(sys_setgid(low2highgid(arg1)));
10144 case TARGET_NR_setfsuid:
10145 return get_errno(setfsuid(arg1));
10146 case TARGET_NR_setfsgid:
10147 return get_errno(setfsgid(arg1));
10149 #ifdef TARGET_NR_lchown32
10150 case TARGET_NR_lchown32:
10151 if (!(p = lock_user_string(arg1)))
10152 return -TARGET_EFAULT;
10153 ret = get_errno(lchown(p, arg2, arg3));
10154 unlock_user(p, arg1, 0);
10157 #ifdef TARGET_NR_getuid32
10158 case TARGET_NR_getuid32:
10159 return get_errno(getuid());
10162 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
10163 /* Alpha specific */
10164 case TARGET_NR_getxuid:
10168 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
10170 return get_errno(getuid());
10172 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
10173 /* Alpha specific */
10174 case TARGET_NR_getxgid:
10178 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
10180 return get_errno(getgid());
10182 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
10183 /* Alpha specific */
10184 case TARGET_NR_osf_getsysinfo:
10185 ret = -TARGET_EOPNOTSUPP;
10187 case TARGET_GSI_IEEE_FP_CONTROL:
10189 uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env);
10191 /* Copied from linux ieee_fpcr_to_swcr. */
10192 swcr = (fpcr >> 35) & SWCR_STATUS_MASK;
10193 swcr |= (fpcr >> 36) & SWCR_MAP_DMZ;
10194 swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV
10195 | SWCR_TRAP_ENABLE_DZE
10196 | SWCR_TRAP_ENABLE_OVF);
10197 swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF
10198 | SWCR_TRAP_ENABLE_INE);
10199 swcr |= (fpcr >> 47) & SWCR_MAP_UMZ;
10200 swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO;
10202 if (put_user_u64 (swcr, arg2))
10203 return -TARGET_EFAULT;
10208 /* case GSI_IEEE_STATE_AT_SIGNAL:
10209 -- Not implemented in linux kernel.
10211 -- Retrieves current unaligned access state; not much used.
10212 case GSI_PROC_TYPE:
10213 -- Retrieves implver information; surely not used.
10214 case GSI_GET_HWRPB:
10215 -- Grabs a copy of the HWRPB; surely not used.
10220 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
10221 /* Alpha specific */
10222 case TARGET_NR_osf_setsysinfo:
10223 ret = -TARGET_EOPNOTSUPP;
10225 case TARGET_SSI_IEEE_FP_CONTROL:
10227 uint64_t swcr, fpcr, orig_fpcr;
10229 if (get_user_u64 (swcr, arg2)) {
10230 return -TARGET_EFAULT;
10232 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
10233 fpcr = orig_fpcr & FPCR_DYN_MASK;
10235 /* Copied from linux ieee_swcr_to_fpcr. */
10236 fpcr |= (swcr & SWCR_STATUS_MASK) << 35;
10237 fpcr |= (swcr & SWCR_MAP_DMZ) << 36;
10238 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV
10239 | SWCR_TRAP_ENABLE_DZE
10240 | SWCR_TRAP_ENABLE_OVF)) << 48;
10241 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF
10242 | SWCR_TRAP_ENABLE_INE)) << 57;
10243 fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0);
10244 fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41;
10246 cpu_alpha_store_fpcr(cpu_env, fpcr);
10251 case TARGET_SSI_IEEE_RAISE_EXCEPTION:
10253 uint64_t exc, fpcr, orig_fpcr;
10256 if (get_user_u64(exc, arg2)) {
10257 return -TARGET_EFAULT;
10260 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
10262 /* We only add to the exception status here. */
10263 fpcr = orig_fpcr | ((exc & SWCR_STATUS_MASK) << 35);
10265 cpu_alpha_store_fpcr(cpu_env, fpcr);
10268 /* Old exceptions are not signaled. */
10269 fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK);
10271 /* If any exceptions set by this call,
10272 and are unmasked, send a signal. */
10274 if ((fpcr & (FPCR_INE | FPCR_INED)) == FPCR_INE) {
10275 si_code = TARGET_FPE_FLTRES;
10277 if ((fpcr & (FPCR_UNF | FPCR_UNFD)) == FPCR_UNF) {
10278 si_code = TARGET_FPE_FLTUND;
10280 if ((fpcr & (FPCR_OVF | FPCR_OVFD)) == FPCR_OVF) {
10281 si_code = TARGET_FPE_FLTOVF;
10283 if ((fpcr & (FPCR_DZE | FPCR_DZED)) == FPCR_DZE) {
10284 si_code = TARGET_FPE_FLTDIV;
10286 if ((fpcr & (FPCR_INV | FPCR_INVD)) == FPCR_INV) {
10287 si_code = TARGET_FPE_FLTINV;
10289 if (si_code != 0) {
10290 target_siginfo_t info;
10291 info.si_signo = SIGFPE;
10293 info.si_code = si_code;
10294 info._sifields._sigfault._addr
10295 = ((CPUArchState *)cpu_env)->pc;
10296 queue_signal((CPUArchState *)cpu_env, info.si_signo,
10297 QEMU_SI_FAULT, &info);
10302 /* case SSI_NVPAIRS:
10303 -- Used with SSIN_UACPROC to enable unaligned accesses.
10304 case SSI_IEEE_STATE_AT_SIGNAL:
10305 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
10306 -- Not implemented in linux kernel
10311 #ifdef TARGET_NR_osf_sigprocmask
10312 /* Alpha specific. */
10313 case TARGET_NR_osf_sigprocmask:
10317 sigset_t set, oldset;
10320 case TARGET_SIG_BLOCK:
10323 case TARGET_SIG_UNBLOCK:
10326 case TARGET_SIG_SETMASK:
10330 return -TARGET_EINVAL;
10333 target_to_host_old_sigset(&set, &mask);
10334 ret = do_sigprocmask(how, &set, &oldset);
10336 host_to_target_old_sigset(&mask, &oldset);
10343 #ifdef TARGET_NR_getgid32
10344 case TARGET_NR_getgid32:
10345 return get_errno(getgid());
10347 #ifdef TARGET_NR_geteuid32
10348 case TARGET_NR_geteuid32:
10349 return get_errno(geteuid());
10351 #ifdef TARGET_NR_getegid32
10352 case TARGET_NR_getegid32:
10353 return get_errno(getegid());
10355 #ifdef TARGET_NR_setreuid32
10356 case TARGET_NR_setreuid32:
10357 return get_errno(setreuid(arg1, arg2));
10359 #ifdef TARGET_NR_setregid32
10360 case TARGET_NR_setregid32:
10361 return get_errno(setregid(arg1, arg2));
10363 #ifdef TARGET_NR_getgroups32
10364 case TARGET_NR_getgroups32:
10366 int gidsetsize = arg1;
10367 uint32_t *target_grouplist;
10371 grouplist = alloca(gidsetsize * sizeof(gid_t));
10372 ret = get_errno(getgroups(gidsetsize, grouplist));
10373 if (gidsetsize == 0)
10375 if (!is_error(ret)) {
10376 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
10377 if (!target_grouplist) {
10378 return -TARGET_EFAULT;
10380 for(i = 0;i < ret; i++)
10381 target_grouplist[i] = tswap32(grouplist[i]);
10382 unlock_user(target_grouplist, arg2, gidsetsize * 4);
10387 #ifdef TARGET_NR_setgroups32
10388 case TARGET_NR_setgroups32:
10390 int gidsetsize = arg1;
10391 uint32_t *target_grouplist;
10395 grouplist = alloca(gidsetsize * sizeof(gid_t));
10396 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
10397 if (!target_grouplist) {
10398 return -TARGET_EFAULT;
10400 for(i = 0;i < gidsetsize; i++)
10401 grouplist[i] = tswap32(target_grouplist[i]);
10402 unlock_user(target_grouplist, arg2, 0);
10403 return get_errno(setgroups(gidsetsize, grouplist));
10406 #ifdef TARGET_NR_fchown32
10407 case TARGET_NR_fchown32:
10408 return get_errno(fchown(arg1, arg2, arg3));
10410 #ifdef TARGET_NR_setresuid32
10411 case TARGET_NR_setresuid32:
10412 return get_errno(sys_setresuid(arg1, arg2, arg3));
10414 #ifdef TARGET_NR_getresuid32
10415 case TARGET_NR_getresuid32:
10417 uid_t ruid, euid, suid;
10418 ret = get_errno(getresuid(&ruid, &euid, &suid));
10419 if (!is_error(ret)) {
10420 if (put_user_u32(ruid, arg1)
10421 || put_user_u32(euid, arg2)
10422 || put_user_u32(suid, arg3))
10423 return -TARGET_EFAULT;
10428 #ifdef TARGET_NR_setresgid32
10429 case TARGET_NR_setresgid32:
10430 return get_errno(sys_setresgid(arg1, arg2, arg3));
10432 #ifdef TARGET_NR_getresgid32
10433 case TARGET_NR_getresgid32:
10435 gid_t rgid, egid, sgid;
10436 ret = get_errno(getresgid(&rgid, &egid, &sgid));
10437 if (!is_error(ret)) {
10438 if (put_user_u32(rgid, arg1)
10439 || put_user_u32(egid, arg2)
10440 || put_user_u32(sgid, arg3))
10441 return -TARGET_EFAULT;
10446 #ifdef TARGET_NR_chown32
10447 case TARGET_NR_chown32:
10448 if (!(p = lock_user_string(arg1)))
10449 return -TARGET_EFAULT;
10450 ret = get_errno(chown(p, arg2, arg3));
10451 unlock_user(p, arg1, 0);
10454 #ifdef TARGET_NR_setuid32
10455 case TARGET_NR_setuid32:
10456 return get_errno(sys_setuid(arg1));
10458 #ifdef TARGET_NR_setgid32
10459 case TARGET_NR_setgid32:
10460 return get_errno(sys_setgid(arg1));
10462 #ifdef TARGET_NR_setfsuid32
10463 case TARGET_NR_setfsuid32:
10464 return get_errno(setfsuid(arg1));
10466 #ifdef TARGET_NR_setfsgid32
10467 case TARGET_NR_setfsgid32:
10468 return get_errno(setfsgid(arg1));
10470 #ifdef TARGET_NR_mincore
10471 case TARGET_NR_mincore:
10473 void *a = lock_user(VERIFY_READ, arg1, arg2, 0);
10475 return -TARGET_ENOMEM;
10477 p = lock_user_string(arg3);
10479 ret = -TARGET_EFAULT;
10481 ret = get_errno(mincore(a, arg2, p));
10482 unlock_user(p, arg3, ret);
10484 unlock_user(a, arg1, 0);
10488 #ifdef TARGET_NR_arm_fadvise64_64
10489 case TARGET_NR_arm_fadvise64_64:
10490 /* arm_fadvise64_64 looks like fadvise64_64 but
10491 * with different argument order: fd, advice, offset, len
10492 * rather than the usual fd, offset, len, advice.
10493 * Note that offset and len are both 64-bit so appear as
10494 * pairs of 32-bit registers.
10496 ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
10497 target_offset64(arg5, arg6), arg2);
10498 return -host_to_target_errno(ret);
10501 #if TARGET_ABI_BITS == 32
10503 #ifdef TARGET_NR_fadvise64_64
10504 case TARGET_NR_fadvise64_64:
10505 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
10506 /* 6 args: fd, advice, offset (high, low), len (high, low) */
10514 /* 6 args: fd, offset (high, low), len (high, low), advice */
10515 if (regpairs_aligned(cpu_env, num)) {
10516 /* offset is in (3,4), len in (5,6) and advice in 7 */
10524 ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
10525 target_offset64(arg4, arg5), arg6);
10526 return -host_to_target_errno(ret);
10529 #ifdef TARGET_NR_fadvise64
10530 case TARGET_NR_fadvise64:
10531 /* 5 args: fd, offset (high, low), len, advice */
10532 if (regpairs_aligned(cpu_env, num)) {
10533 /* offset is in (3,4), len in 5 and advice in 6 */
10539 ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
10540 return -host_to_target_errno(ret);
10543 #else /* not a 32-bit ABI */
10544 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
10545 #ifdef TARGET_NR_fadvise64_64
10546 case TARGET_NR_fadvise64_64:
10548 #ifdef TARGET_NR_fadvise64
10549 case TARGET_NR_fadvise64:
10551 #ifdef TARGET_S390X
10553 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
10554 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
10555 case 6: arg4 = POSIX_FADV_DONTNEED; break;
10556 case 7: arg4 = POSIX_FADV_NOREUSE; break;
10560 return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
10562 #endif /* end of 64-bit ABI fadvise handling */
10564 #ifdef TARGET_NR_madvise
10565 case TARGET_NR_madvise:
10566 /* A straight passthrough may not be safe because qemu sometimes
10567 turns private file-backed mappings into anonymous mappings.
10568 This will break MADV_DONTNEED.
10569 This is a hint, so ignoring and returning success is ok. */
10572 #if TARGET_ABI_BITS == 32
10573 case TARGET_NR_fcntl64:
10577 from_flock64_fn *copyfrom = copy_from_user_flock64;
10578 to_flock64_fn *copyto = copy_to_user_flock64;
10581 if (!((CPUARMState *)cpu_env)->eabi) {
10582 copyfrom = copy_from_user_oabi_flock64;
10583 copyto = copy_to_user_oabi_flock64;
10587 cmd = target_to_host_fcntl_cmd(arg2);
10588 if (cmd == -TARGET_EINVAL) {
10593 case TARGET_F_GETLK64:
10594 ret = copyfrom(&fl, arg3);
10598 ret = get_errno(safe_fcntl(arg1, cmd, &fl));
10600 ret = copyto(arg3, &fl);
10604 case TARGET_F_SETLK64:
10605 case TARGET_F_SETLKW64:
10606 ret = copyfrom(&fl, arg3);
10610 ret = get_errno(safe_fcntl(arg1, cmd, &fl));
10613 ret = do_fcntl(arg1, arg2, arg3);
10619 #ifdef TARGET_NR_cacheflush
10620 case TARGET_NR_cacheflush:
10621 /* self-modifying code is handled automatically, so nothing needed */
10624 #ifdef TARGET_NR_getpagesize
10625 case TARGET_NR_getpagesize:
10626 return TARGET_PAGE_SIZE;
10628 case TARGET_NR_gettid:
10629 return get_errno(gettid());
10630 #ifdef TARGET_NR_readahead
10631 case TARGET_NR_readahead:
10632 #if TARGET_ABI_BITS == 32
10633 if (regpairs_aligned(cpu_env, num)) {
10638 ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
10640 ret = get_errno(readahead(arg1, arg2, arg3));
10645 #ifdef TARGET_NR_setxattr
10646 case TARGET_NR_listxattr:
10647 case TARGET_NR_llistxattr:
10651 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10653 return -TARGET_EFAULT;
10656 p = lock_user_string(arg1);
10658 if (num == TARGET_NR_listxattr) {
10659 ret = get_errno(listxattr(p, b, arg3));
10661 ret = get_errno(llistxattr(p, b, arg3));
10664 ret = -TARGET_EFAULT;
10666 unlock_user(p, arg1, 0);
10667 unlock_user(b, arg2, arg3);
10670 case TARGET_NR_flistxattr:
10674 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10676 return -TARGET_EFAULT;
10679 ret = get_errno(flistxattr(arg1, b, arg3));
10680 unlock_user(b, arg2, arg3);
10683 case TARGET_NR_setxattr:
10684 case TARGET_NR_lsetxattr:
10686 void *p, *n, *v = 0;
10688 v = lock_user(VERIFY_READ, arg3, arg4, 1);
10690 return -TARGET_EFAULT;
10693 p = lock_user_string(arg1);
10694 n = lock_user_string(arg2);
10696 if (num == TARGET_NR_setxattr) {
10697 ret = get_errno(setxattr(p, n, v, arg4, arg5));
10699 ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
10702 ret = -TARGET_EFAULT;
10704 unlock_user(p, arg1, 0);
10705 unlock_user(n, arg2, 0);
10706 unlock_user(v, arg3, 0);
10709 case TARGET_NR_fsetxattr:
10713 v = lock_user(VERIFY_READ, arg3, arg4, 1);
10715 return -TARGET_EFAULT;
10718 n = lock_user_string(arg2);
10720 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
10722 ret = -TARGET_EFAULT;
10724 unlock_user(n, arg2, 0);
10725 unlock_user(v, arg3, 0);
10728 case TARGET_NR_getxattr:
10729 case TARGET_NR_lgetxattr:
10731 void *p, *n, *v = 0;
10733 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
10735 return -TARGET_EFAULT;
10738 p = lock_user_string(arg1);
10739 n = lock_user_string(arg2);
10741 if (num == TARGET_NR_getxattr) {
10742 ret = get_errno(getxattr(p, n, v, arg4));
10744 ret = get_errno(lgetxattr(p, n, v, arg4));
10747 ret = -TARGET_EFAULT;
10749 unlock_user(p, arg1, 0);
10750 unlock_user(n, arg2, 0);
10751 unlock_user(v, arg3, arg4);
10754 case TARGET_NR_fgetxattr:
10758 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
10760 return -TARGET_EFAULT;
10763 n = lock_user_string(arg2);
10765 ret = get_errno(fgetxattr(arg1, n, v, arg4));
10767 ret = -TARGET_EFAULT;
10769 unlock_user(n, arg2, 0);
10770 unlock_user(v, arg3, arg4);
10773 case TARGET_NR_removexattr:
10774 case TARGET_NR_lremovexattr:
10777 p = lock_user_string(arg1);
10778 n = lock_user_string(arg2);
10780 if (num == TARGET_NR_removexattr) {
10781 ret = get_errno(removexattr(p, n));
10783 ret = get_errno(lremovexattr(p, n));
10786 ret = -TARGET_EFAULT;
10788 unlock_user(p, arg1, 0);
10789 unlock_user(n, arg2, 0);
10792 case TARGET_NR_fremovexattr:
10795 n = lock_user_string(arg2);
10797 ret = get_errno(fremovexattr(arg1, n));
10799 ret = -TARGET_EFAULT;
10801 unlock_user(n, arg2, 0);
10805 #endif /* CONFIG_ATTR */
10806 #ifdef TARGET_NR_set_thread_area
10807 case TARGET_NR_set_thread_area:
10808 #if defined(TARGET_MIPS)
10809 ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
10811 #elif defined(TARGET_CRIS)
10813 ret = -TARGET_EINVAL;
10815 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
10819 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
10820 return do_set_thread_area(cpu_env, arg1);
10821 #elif defined(TARGET_M68K)
10823 TaskState *ts = cpu->opaque;
10824 ts->tp_value = arg1;
10828 return -TARGET_ENOSYS;
10831 #ifdef TARGET_NR_get_thread_area
10832 case TARGET_NR_get_thread_area:
10833 #if defined(TARGET_I386) && defined(TARGET_ABI32)
10834 return do_get_thread_area(cpu_env, arg1);
10835 #elif defined(TARGET_M68K)
10837 TaskState *ts = cpu->opaque;
10838 return ts->tp_value;
10841 return -TARGET_ENOSYS;
10844 #ifdef TARGET_NR_getdomainname
10845 case TARGET_NR_getdomainname:
10846 return -TARGET_ENOSYS;
10849 #ifdef TARGET_NR_clock_settime
10850 case TARGET_NR_clock_settime:
10852 struct timespec ts;
10854 ret = target_to_host_timespec(&ts, arg2);
10855 if (!is_error(ret)) {
10856 ret = get_errno(clock_settime(arg1, &ts));
10861 #ifdef TARGET_NR_clock_gettime
10862 case TARGET_NR_clock_gettime:
10864 struct timespec ts;
10865 ret = get_errno(clock_gettime(arg1, &ts));
10866 if (!is_error(ret)) {
10867 ret = host_to_target_timespec(arg2, &ts);
10872 #ifdef TARGET_NR_clock_getres
10873 case TARGET_NR_clock_getres:
10875 struct timespec ts;
10876 ret = get_errno(clock_getres(arg1, &ts));
10877 if (!is_error(ret)) {
10878 host_to_target_timespec(arg2, &ts);
10883 #ifdef TARGET_NR_clock_nanosleep
10884 case TARGET_NR_clock_nanosleep:
10886 struct timespec ts;
10887 target_to_host_timespec(&ts, arg3);
10888 ret = get_errno(safe_clock_nanosleep(arg1, arg2,
10889 &ts, arg4 ? &ts : NULL));
10891 host_to_target_timespec(arg4, &ts);
10893 #if defined(TARGET_PPC)
10894 /* clock_nanosleep is odd in that it returns positive errno values.
10895 * On PPC, CR0 bit 3 should be set in such a situation. */
10896 if (ret && ret != -TARGET_ERESTARTSYS) {
10897 ((CPUPPCState *)cpu_env)->crf[0] |= 1;
10904 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
10905 case TARGET_NR_set_tid_address:
10906 return get_errno(set_tid_address((int *)g2h(arg1)));
10909 case TARGET_NR_tkill:
10910 return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
10912 case TARGET_NR_tgkill:
10913 return get_errno(safe_tgkill((int)arg1, (int)arg2,
10914 target_to_host_signal(arg3)));
10916 #ifdef TARGET_NR_set_robust_list
10917 case TARGET_NR_set_robust_list:
10918 case TARGET_NR_get_robust_list:
10919 /* The ABI for supporting robust futexes has userspace pass
10920 * the kernel a pointer to a linked list which is updated by
10921 * userspace after the syscall; the list is walked by the kernel
10922 * when the thread exits. Since the linked list in QEMU guest
10923 * memory isn't a valid linked list for the host and we have
10924 * no way to reliably intercept the thread-death event, we can't
10925 * support these. Silently return ENOSYS so that guest userspace
10926 * falls back to a non-robust futex implementation (which should
10927 * be OK except in the corner case of the guest crashing while
10928 * holding a mutex that is shared with another process via
10931 return -TARGET_ENOSYS;
10934 #if defined(TARGET_NR_utimensat)
10935 case TARGET_NR_utimensat:
10937 struct timespec *tsp, ts[2];
10941 target_to_host_timespec(ts, arg3);
10942 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
10946 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
10948 if (!(p = lock_user_string(arg2))) {
10949 return -TARGET_EFAULT;
10951 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
10952 unlock_user(p, arg2, 0);
10957 case TARGET_NR_futex:
10958 return do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
10959 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
10960 case TARGET_NR_inotify_init:
10961 ret = get_errno(sys_inotify_init());
10963 fd_trans_register(ret, &target_inotify_trans);
10967 #ifdef CONFIG_INOTIFY1
10968 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
10969 case TARGET_NR_inotify_init1:
10970 ret = get_errno(sys_inotify_init1(target_to_host_bitmask(arg1,
10971 fcntl_flags_tbl)));
10973 fd_trans_register(ret, &target_inotify_trans);
10978 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
10979 case TARGET_NR_inotify_add_watch:
10980 p = lock_user_string(arg2);
10981 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
10982 unlock_user(p, arg2, 0);
10985 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
10986 case TARGET_NR_inotify_rm_watch:
10987 return get_errno(sys_inotify_rm_watch(arg1, arg2));
10990 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
10991 case TARGET_NR_mq_open:
10993 struct mq_attr posix_mq_attr;
10994 struct mq_attr *pposix_mq_attr;
10997 host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
10998 pposix_mq_attr = NULL;
11000 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
11001 return -TARGET_EFAULT;
11003 pposix_mq_attr = &posix_mq_attr;
11005 p = lock_user_string(arg1 - 1);
11007 return -TARGET_EFAULT;
11009 ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
11010 unlock_user (p, arg1, 0);
11014 case TARGET_NR_mq_unlink:
11015 p = lock_user_string(arg1 - 1);
11017 return -TARGET_EFAULT;
11019 ret = get_errno(mq_unlink(p));
11020 unlock_user (p, arg1, 0);
11023 case TARGET_NR_mq_timedsend:
11025 struct timespec ts;
11027 p = lock_user (VERIFY_READ, arg2, arg3, 1);
11029 target_to_host_timespec(&ts, arg5);
11030 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
11031 host_to_target_timespec(arg5, &ts);
11033 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
11035 unlock_user (p, arg2, arg3);
11039 case TARGET_NR_mq_timedreceive:
11041 struct timespec ts;
11044 p = lock_user (VERIFY_READ, arg2, arg3, 1);
11046 target_to_host_timespec(&ts, arg5);
11047 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
11049 host_to_target_timespec(arg5, &ts);
11051 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
11054 unlock_user (p, arg2, arg3);
11056 put_user_u32(prio, arg4);
11060 /* Not implemented for now... */
11061 /* case TARGET_NR_mq_notify: */
11064 case TARGET_NR_mq_getsetattr:
11066 struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
11069 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
11070 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
11071 &posix_mq_attr_out));
11072 } else if (arg3 != 0) {
11073 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
11075 if (ret == 0 && arg3 != 0) {
11076 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
11082 #ifdef CONFIG_SPLICE
11083 #ifdef TARGET_NR_tee
11084 case TARGET_NR_tee:
11086 ret = get_errno(tee(arg1,arg2,arg3,arg4));
11090 #ifdef TARGET_NR_splice
11091 case TARGET_NR_splice:
11093 loff_t loff_in, loff_out;
11094 loff_t *ploff_in = NULL, *ploff_out = NULL;
11096 if (get_user_u64(loff_in, arg2)) {
11097 return -TARGET_EFAULT;
11099 ploff_in = &loff_in;
11102 if (get_user_u64(loff_out, arg4)) {
11103 return -TARGET_EFAULT;
11105 ploff_out = &loff_out;
11107 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
11109 if (put_user_u64(loff_in, arg2)) {
11110 return -TARGET_EFAULT;
11114 if (put_user_u64(loff_out, arg4)) {
11115 return -TARGET_EFAULT;
11121 #ifdef TARGET_NR_vmsplice
11122 case TARGET_NR_vmsplice:
11124 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
11126 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
11127 unlock_iovec(vec, arg2, arg3, 0);
11129 ret = -host_to_target_errno(errno);
11134 #endif /* CONFIG_SPLICE */
11135 #ifdef CONFIG_EVENTFD
11136 #if defined(TARGET_NR_eventfd)
11137 case TARGET_NR_eventfd:
11138 ret = get_errno(eventfd(arg1, 0));
11140 fd_trans_register(ret, &target_eventfd_trans);
11144 #if defined(TARGET_NR_eventfd2)
11145 case TARGET_NR_eventfd2:
11147 int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
11148 if (arg2 & TARGET_O_NONBLOCK) {
11149 host_flags |= O_NONBLOCK;
11151 if (arg2 & TARGET_O_CLOEXEC) {
11152 host_flags |= O_CLOEXEC;
11154 ret = get_errno(eventfd(arg1, host_flags));
11156 fd_trans_register(ret, &target_eventfd_trans);
11161 #endif /* CONFIG_EVENTFD */
11162 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
11163 case TARGET_NR_fallocate:
11164 #if TARGET_ABI_BITS == 32
11165 ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
11166 target_offset64(arg5, arg6)));
11168 ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
11172 #if defined(CONFIG_SYNC_FILE_RANGE)
11173 #if defined(TARGET_NR_sync_file_range)
11174 case TARGET_NR_sync_file_range:
11175 #if TARGET_ABI_BITS == 32
11176 #if defined(TARGET_MIPS)
11177 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
11178 target_offset64(arg5, arg6), arg7));
11180 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
11181 target_offset64(arg4, arg5), arg6));
11182 #endif /* !TARGET_MIPS */
11184 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
11188 #if defined(TARGET_NR_sync_file_range2)
11189 case TARGET_NR_sync_file_range2:
11190 /* This is like sync_file_range but the arguments are reordered */
11191 #if TARGET_ABI_BITS == 32
11192 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
11193 target_offset64(arg5, arg6), arg2));
11195 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
11200 #if defined(TARGET_NR_signalfd4)
11201 case TARGET_NR_signalfd4:
11202 return do_signalfd4(arg1, arg2, arg4);
11204 #if defined(TARGET_NR_signalfd)
11205 case TARGET_NR_signalfd:
11206 return do_signalfd4(arg1, arg2, 0);
11208 #if defined(CONFIG_EPOLL)
11209 #if defined(TARGET_NR_epoll_create)
11210 case TARGET_NR_epoll_create:
11211 return get_errno(epoll_create(arg1));
11213 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
11214 case TARGET_NR_epoll_create1:
11215 return get_errno(epoll_create1(arg1));
11217 #if defined(TARGET_NR_epoll_ctl)
11218 case TARGET_NR_epoll_ctl:
11220 struct epoll_event ep;
11221 struct epoll_event *epp = 0;
11223 struct target_epoll_event *target_ep;
11224 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
11225 return -TARGET_EFAULT;
11227 ep.events = tswap32(target_ep->events);
11228 /* The epoll_data_t union is just opaque data to the kernel,
11229 * so we transfer all 64 bits across and need not worry what
11230 * actual data type it is.
11232 ep.data.u64 = tswap64(target_ep->data.u64);
11233 unlock_user_struct(target_ep, arg4, 0);
11236 return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
11240 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
11241 #if defined(TARGET_NR_epoll_wait)
11242 case TARGET_NR_epoll_wait:
11244 #if defined(TARGET_NR_epoll_pwait)
11245 case TARGET_NR_epoll_pwait:
11248 struct target_epoll_event *target_ep;
11249 struct epoll_event *ep;
11251 int maxevents = arg3;
11252 int timeout = arg4;
11254 if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
11255 return -TARGET_EINVAL;
11258 target_ep = lock_user(VERIFY_WRITE, arg2,
11259 maxevents * sizeof(struct target_epoll_event), 1);
11261 return -TARGET_EFAULT;
11264 ep = g_try_new(struct epoll_event, maxevents);
11266 unlock_user(target_ep, arg2, 0);
11267 return -TARGET_ENOMEM;
11271 #if defined(TARGET_NR_epoll_pwait)
11272 case TARGET_NR_epoll_pwait:
11274 target_sigset_t *target_set;
11275 sigset_t _set, *set = &_set;
11278 if (arg6 != sizeof(target_sigset_t)) {
11279 ret = -TARGET_EINVAL;
11283 target_set = lock_user(VERIFY_READ, arg5,
11284 sizeof(target_sigset_t), 1);
11286 ret = -TARGET_EFAULT;
11289 target_to_host_sigset(set, target_set);
11290 unlock_user(target_set, arg5, 0);
11295 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
11296 set, SIGSET_T_SIZE));
11300 #if defined(TARGET_NR_epoll_wait)
11301 case TARGET_NR_epoll_wait:
11302 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
11307 ret = -TARGET_ENOSYS;
11309 if (!is_error(ret)) {
11311 for (i = 0; i < ret; i++) {
11312 target_ep[i].events = tswap32(ep[i].events);
11313 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
11315 unlock_user(target_ep, arg2,
11316 ret * sizeof(struct target_epoll_event));
11318 unlock_user(target_ep, arg2, 0);
11325 #ifdef TARGET_NR_prlimit64
11326 case TARGET_NR_prlimit64:
11328 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
11329 struct target_rlimit64 *target_rnew, *target_rold;
11330 struct host_rlimit64 rnew, rold, *rnewp = 0;
11331 int resource = target_to_host_resource(arg2);
11333 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
11334 return -TARGET_EFAULT;
11336 rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
11337 rnew.rlim_max = tswap64(target_rnew->rlim_max);
11338 unlock_user_struct(target_rnew, arg3, 0);
11342 ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
11343 if (!is_error(ret) && arg4) {
11344 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
11345 return -TARGET_EFAULT;
11347 target_rold->rlim_cur = tswap64(rold.rlim_cur);
11348 target_rold->rlim_max = tswap64(rold.rlim_max);
11349 unlock_user_struct(target_rold, arg4, 1);
11354 #ifdef TARGET_NR_gethostname
11355 case TARGET_NR_gethostname:
11357 char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
11359 ret = get_errno(gethostname(name, arg2));
11360 unlock_user(name, arg1, arg2);
11362 ret = -TARGET_EFAULT;
11367 #ifdef TARGET_NR_atomic_cmpxchg_32
11368 case TARGET_NR_atomic_cmpxchg_32:
11370 /* should use start_exclusive from main.c */
11371 abi_ulong mem_value;
11372 if (get_user_u32(mem_value, arg6)) {
11373 target_siginfo_t info;
11374 info.si_signo = SIGSEGV;
11376 info.si_code = TARGET_SEGV_MAPERR;
11377 info._sifields._sigfault._addr = arg6;
11378 queue_signal((CPUArchState *)cpu_env, info.si_signo,
11379 QEMU_SI_FAULT, &info);
11383 if (mem_value == arg2)
11384 put_user_u32(arg1, arg6);
11388 #ifdef TARGET_NR_atomic_barrier
11389 case TARGET_NR_atomic_barrier:
11390 /* Like the kernel implementation and the
11391 qemu arm barrier, no-op this? */
11395 #ifdef TARGET_NR_timer_create
11396 case TARGET_NR_timer_create:
11398 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
11400 struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
11403 int timer_index = next_free_host_timer();
11405 if (timer_index < 0) {
11406 ret = -TARGET_EAGAIN;
11408 timer_t *phtimer = g_posix_timers + timer_index;
11411 phost_sevp = &host_sevp;
11412 ret = target_to_host_sigevent(phost_sevp, arg2);
11418 ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
11422 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
11423 return -TARGET_EFAULT;
11431 #ifdef TARGET_NR_timer_settime
11432 case TARGET_NR_timer_settime:
11434 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
11435 * struct itimerspec * old_value */
11436 target_timer_t timerid = get_timer_id(arg1);
11440 } else if (arg3 == 0) {
11441 ret = -TARGET_EINVAL;
11443 timer_t htimer = g_posix_timers[timerid];
11444 struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
11446 if (target_to_host_itimerspec(&hspec_new, arg3)) {
11447 return -TARGET_EFAULT;
11450 timer_settime(htimer, arg2, &hspec_new, &hspec_old));
11451 if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
11452 return -TARGET_EFAULT;
11459 #ifdef TARGET_NR_timer_gettime
11460 case TARGET_NR_timer_gettime:
11462 /* args: timer_t timerid, struct itimerspec *curr_value */
11463 target_timer_t timerid = get_timer_id(arg1);
11467 } else if (!arg2) {
11468 ret = -TARGET_EFAULT;
11470 timer_t htimer = g_posix_timers[timerid];
11471 struct itimerspec hspec;
11472 ret = get_errno(timer_gettime(htimer, &hspec));
11474 if (host_to_target_itimerspec(arg2, &hspec)) {
11475 ret = -TARGET_EFAULT;
11482 #ifdef TARGET_NR_timer_getoverrun
11483 case TARGET_NR_timer_getoverrun:
11485 /* args: timer_t timerid */
11486 target_timer_t timerid = get_timer_id(arg1);
11491 timer_t htimer = g_posix_timers[timerid];
11492 ret = get_errno(timer_getoverrun(htimer));
11494 fd_trans_unregister(ret);
11499 #ifdef TARGET_NR_timer_delete
11500 case TARGET_NR_timer_delete:
11502 /* args: timer_t timerid */
11503 target_timer_t timerid = get_timer_id(arg1);
11508 timer_t htimer = g_posix_timers[timerid];
11509 ret = get_errno(timer_delete(htimer));
11510 g_posix_timers[timerid] = 0;
11516 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
11517 case TARGET_NR_timerfd_create:
11518 return get_errno(timerfd_create(arg1,
11519 target_to_host_bitmask(arg2, fcntl_flags_tbl)));
11522 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
11523 case TARGET_NR_timerfd_gettime:
11525 struct itimerspec its_curr;
11527 ret = get_errno(timerfd_gettime(arg1, &its_curr));
11529 if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
11530 return -TARGET_EFAULT;
11536 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
11537 case TARGET_NR_timerfd_settime:
11539 struct itimerspec its_new, its_old, *p_new;
11542 if (target_to_host_itimerspec(&its_new, arg3)) {
11543 return -TARGET_EFAULT;
11550 ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
11552 if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
11553 return -TARGET_EFAULT;
11559 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
11560 case TARGET_NR_ioprio_get:
11561 return get_errno(ioprio_get(arg1, arg2));
11564 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
11565 case TARGET_NR_ioprio_set:
11566 return get_errno(ioprio_set(arg1, arg2, arg3));
11569 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
11570 case TARGET_NR_setns:
11571 return get_errno(setns(arg1, arg2));
11573 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
11574 case TARGET_NR_unshare:
11575 return get_errno(unshare(arg1));
11577 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
11578 case TARGET_NR_kcmp:
11579 return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
11581 #ifdef TARGET_NR_swapcontext
11582 case TARGET_NR_swapcontext:
11583 /* PowerPC specific. */
11584 return do_swapcontext(cpu_env, arg1, arg2, arg3);
11588 qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
11589 return -TARGET_ENOSYS;
11594 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
11595 abi_long arg2, abi_long arg3, abi_long arg4,
11596 abi_long arg5, abi_long arg6, abi_long arg7,
11599 CPUState *cpu = ENV_GET_CPU(cpu_env);
11602 #ifdef DEBUG_ERESTARTSYS
11603 /* Debug-only code for exercising the syscall-restart code paths
11604 * in the per-architecture cpu main loops: restart every syscall
11605 * the guest makes once before letting it through.
11611 return -TARGET_ERESTARTSYS;
11616 trace_guest_user_syscall(cpu, num, arg1, arg2, arg3, arg4,
11617 arg5, arg6, arg7, arg8);
11619 if (unlikely(do_strace)) {
11620 print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
11621 ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
11622 arg5, arg6, arg7, arg8);
11623 print_syscall_ret(num, ret);
11625 ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
11626 arg5, arg6, arg7, arg8);
11629 trace_guest_user_syscall_ret(cpu, num, ret);