4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
31 #include <sys/mount.h>
33 #include <sys/fsuid.h>
34 #include <sys/personality.h>
35 #include <sys/prctl.h>
36 #include <sys/resource.h>
38 #include <linux/capability.h>
40 #include <sys/timex.h>
41 #include <sys/socket.h>
42 #include <linux/sockios.h>
46 #include <sys/times.h>
49 #include <sys/statfs.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/ip.h>
55 #include <netinet/tcp.h>
56 #include <linux/wireless.h>
57 #include <linux/icmp.h>
58 #include <linux/icmpv6.h>
59 #include <linux/errqueue.h>
60 #include <linux/random.h>
62 #include <sys/timerfd.h>
65 #include <sys/eventfd.h>
68 #include <sys/epoll.h>
71 #include "qemu/xattr.h"
73 #ifdef CONFIG_SENDFILE
74 #include <sys/sendfile.h>
80 #define termios host_termios
81 #define winsize host_winsize
82 #define termio host_termio
83 #define sgttyb host_sgttyb /* same as target */
84 #define tchars host_tchars /* same as target */
85 #define ltchars host_ltchars /* same as target */
87 #include <linux/termios.h>
88 #include <linux/unistd.h>
89 #include <linux/cdrom.h>
90 #include <linux/hdreg.h>
91 #include <linux/soundcard.h>
93 #include <linux/mtio.h>
96 #if defined(CONFIG_FIEMAP)
97 #include <linux/fiemap.h>
100 #if defined(CONFIG_USBFS)
101 #include <linux/usbdevice_fs.h>
102 #include <linux/usb/ch9.h>
104 #include <linux/vt.h>
105 #include <linux/dm-ioctl.h>
106 #include <linux/reboot.h>
107 #include <linux/route.h>
108 #include <linux/filter.h>
109 #include <linux/blkpg.h>
110 #include <netpacket/packet.h>
111 #include <linux/netlink.h>
112 #include <linux/if_alg.h>
113 #include <linux/rtc.h>
114 #include <sound/asound.h>
116 #include <linux/btrfs.h>
119 #include <libdrm/drm.h>
120 #include <libdrm/i915_drm.h>
122 #include "linux_loop.h"
126 #include "qemu/guest-random.h"
127 #include "qemu/selfmap.h"
128 #include "user/syscall-trace.h"
129 #include "qapi/error.h"
130 #include "fd-trans.h"
134 #define CLONE_IO 0x80000000 /* Clone io context */
137 /* We can't directly call the host clone syscall, because this will
138 * badly confuse libc (breaking mutexes, for example). So we must
139 * divide clone flags into:
140 * * flag combinations that look like pthread_create()
141 * * flag combinations that look like fork()
142 * * flags we can implement within QEMU itself
143 * * flags we can't support and will return an error for
145 /* For thread creation, all these flags must be present; for
146 * fork, none must be present.
148 #define CLONE_THREAD_FLAGS \
149 (CLONE_VM | CLONE_FS | CLONE_FILES | \
150 CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
152 /* These flags are ignored:
153 * CLONE_DETACHED is now ignored by the kernel;
154 * CLONE_IO is just an optimisation hint to the I/O scheduler
156 #define CLONE_IGNORED_FLAGS \
157 (CLONE_DETACHED | CLONE_IO)
159 /* Flags for fork which we can implement within QEMU itself */
160 #define CLONE_OPTIONAL_FORK_FLAGS \
161 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
162 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
164 /* Flags for thread creation which we can implement within QEMU itself */
165 #define CLONE_OPTIONAL_THREAD_FLAGS \
166 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
167 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
169 #define CLONE_INVALID_FORK_FLAGS \
170 (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
172 #define CLONE_INVALID_THREAD_FLAGS \
173 (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS | \
174 CLONE_IGNORED_FLAGS))
176 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
177 * have almost all been allocated. We cannot support any of
178 * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
179 * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
180 * The checks against the invalid thread masks above will catch these.
181 * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
184 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
185 * once. This exercises the codepaths for restart.
187 //#define DEBUG_ERESTARTSYS
189 //#include <linux/msdos_fs.h>
190 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
191 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
201 #define _syscall0(type,name) \
202 static type name (void) \
204 return syscall(__NR_##name); \
207 #define _syscall1(type,name,type1,arg1) \
208 static type name (type1 arg1) \
210 return syscall(__NR_##name, arg1); \
213 #define _syscall2(type,name,type1,arg1,type2,arg2) \
214 static type name (type1 arg1,type2 arg2) \
216 return syscall(__NR_##name, arg1, arg2); \
219 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
220 static type name (type1 arg1,type2 arg2,type3 arg3) \
222 return syscall(__NR_##name, arg1, arg2, arg3); \
225 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
226 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
228 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
231 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
233 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
235 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
239 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
240 type5,arg5,type6,arg6) \
241 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
244 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
248 #define __NR_sys_uname __NR_uname
249 #define __NR_sys_getcwd1 __NR_getcwd
250 #define __NR_sys_getdents __NR_getdents
251 #define __NR_sys_getdents64 __NR_getdents64
252 #define __NR_sys_getpriority __NR_getpriority
253 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
254 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
255 #define __NR_sys_syslog __NR_syslog
256 #if defined(__NR_futex)
257 # define __NR_sys_futex __NR_futex
259 #if defined(__NR_futex_time64)
260 # define __NR_sys_futex_time64 __NR_futex_time64
262 #define __NR_sys_inotify_init __NR_inotify_init
263 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
264 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
265 #define __NR_sys_statx __NR_statx
267 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
268 #define __NR__llseek __NR_lseek
271 /* Newer kernel ports have llseek() instead of _llseek() */
272 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
273 #define TARGET_NR__llseek TARGET_NR_llseek
276 #define __NR_sys_gettid __NR_gettid
277 _syscall0(int, sys_gettid)
279 /* For the 64-bit guest on 32-bit host case we must emulate
280 * getdents using getdents64, because otherwise the host
281 * might hand us back more dirent records than we can fit
282 * into the guest buffer after structure format conversion.
283 * Otherwise we emulate getdents with getdents if the host has it.
285 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
286 #define EMULATE_GETDENTS_WITH_GETDENTS
289 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
290 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
292 #if (defined(TARGET_NR_getdents) && \
293 !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
294 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
295 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
297 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
298 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
299 loff_t *, res, uint, wh);
301 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
302 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
304 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
305 #ifdef __NR_exit_group
306 _syscall1(int,exit_group,int,error_code)
308 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
309 _syscall1(int,set_tid_address,int *,tidptr)
311 #if defined(__NR_futex)
312 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
313 const struct timespec *,timeout,int *,uaddr2,int,val3)
315 #if defined(__NR_futex_time64)
316 _syscall6(int,sys_futex_time64,int *,uaddr,int,op,int,val,
317 const struct timespec *,timeout,int *,uaddr2,int,val3)
319 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
320 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
321 unsigned long *, user_mask_ptr);
322 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
323 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
324 unsigned long *, user_mask_ptr);
325 #define __NR_sys_getcpu __NR_getcpu
326 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
327 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
329 _syscall2(int, capget, struct __user_cap_header_struct *, header,
330 struct __user_cap_data_struct *, data);
331 _syscall2(int, capset, struct __user_cap_header_struct *, header,
332 struct __user_cap_data_struct *, data);
333 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
334 _syscall2(int, ioprio_get, int, which, int, who)
336 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
337 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
339 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
340 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
343 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
344 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
345 unsigned long, idx1, unsigned long, idx2)
349 * It is assumed that struct statx is architecture independent.
351 #if defined(TARGET_NR_statx) && defined(__NR_statx)
352 _syscall5(int, sys_statx, int, dirfd, const char *, pathname, int, flags,
353 unsigned int, mask, struct target_statx *, statxbuf)
355 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
356 _syscall2(int, membarrier, int, cmd, int, flags)
359 static bitmask_transtbl fcntl_flags_tbl[] = {
360 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
361 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, },
362 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, },
363 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, },
364 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, },
365 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, },
366 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, },
367 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, },
368 { TARGET_O_SYNC, TARGET_O_DSYNC, O_SYNC, O_DSYNC, },
369 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, },
370 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, },
371 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
372 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, },
373 #if defined(O_DIRECT)
374 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, },
376 #if defined(O_NOATIME)
377 { TARGET_O_NOATIME, TARGET_O_NOATIME, O_NOATIME, O_NOATIME },
379 #if defined(O_CLOEXEC)
380 { TARGET_O_CLOEXEC, TARGET_O_CLOEXEC, O_CLOEXEC, O_CLOEXEC },
383 { TARGET_O_PATH, TARGET_O_PATH, O_PATH, O_PATH },
385 #if defined(O_TMPFILE)
386 { TARGET_O_TMPFILE, TARGET_O_TMPFILE, O_TMPFILE, O_TMPFILE },
388 /* Don't terminate the list prematurely on 64-bit host+guest. */
389 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
390 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
395 _syscall2(int, sys_getcwd1, char *, buf, size_t, size)
397 #if defined(TARGET_NR_utimensat) || defined(TARGET_NR_utimensat_time64)
398 #if defined(__NR_utimensat)
399 #define __NR_sys_utimensat __NR_utimensat
400 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
401 const struct timespec *,tsp,int,flags)
403 static int sys_utimensat(int dirfd, const char *pathname,
404 const struct timespec times[2], int flags)
410 #endif /* TARGET_NR_utimensat */
412 #ifdef TARGET_NR_renameat2
413 #if defined(__NR_renameat2)
414 #define __NR_sys_renameat2 __NR_renameat2
415 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
416 const char *, new, unsigned int, flags)
418 static int sys_renameat2(int oldfd, const char *old,
419 int newfd, const char *new, int flags)
422 return renameat(oldfd, old, newfd, new);
428 #endif /* TARGET_NR_renameat2 */
430 #ifdef CONFIG_INOTIFY
431 #include <sys/inotify.h>
433 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
434 static int sys_inotify_init(void)
436 return (inotify_init());
439 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
440 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
442 return (inotify_add_watch(fd, pathname, mask));
445 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
446 static int sys_inotify_rm_watch(int fd, int32_t wd)
448 return (inotify_rm_watch(fd, wd));
451 #ifdef CONFIG_INOTIFY1
452 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
453 static int sys_inotify_init1(int flags)
455 return (inotify_init1(flags));
460 /* Userspace can usually survive runtime without inotify */
461 #undef TARGET_NR_inotify_init
462 #undef TARGET_NR_inotify_init1
463 #undef TARGET_NR_inotify_add_watch
464 #undef TARGET_NR_inotify_rm_watch
465 #endif /* CONFIG_INOTIFY */
467 #if defined(TARGET_NR_prlimit64)
468 #ifndef __NR_prlimit64
469 # define __NR_prlimit64 -1
471 #define __NR_sys_prlimit64 __NR_prlimit64
472 /* The glibc rlimit structure may not be that used by the underlying syscall */
473 struct host_rlimit64 {
477 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
478 const struct host_rlimit64 *, new_limit,
479 struct host_rlimit64 *, old_limit)
483 #if defined(TARGET_NR_timer_create)
484 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
485 static timer_t g_posix_timers[32] = { 0, } ;
487 static inline int next_free_host_timer(void)
490 /* FIXME: Does finding the next free slot require a lock? */
491 for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
492 if (g_posix_timers[k] == 0) {
493 g_posix_timers[k] = (timer_t) 1;
501 #define ERRNO_TABLE_SIZE 1200
503 /* target_to_host_errno_table[] is initialized from
504 * host_to_target_errno_table[] in syscall_init(). */
505 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
509 * This list is the union of errno values overridden in asm-<arch>/errno.h
510 * minus the errnos that are not actually generic to all archs.
512 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
513 [EAGAIN] = TARGET_EAGAIN,
514 [EIDRM] = TARGET_EIDRM,
515 [ECHRNG] = TARGET_ECHRNG,
516 [EL2NSYNC] = TARGET_EL2NSYNC,
517 [EL3HLT] = TARGET_EL3HLT,
518 [EL3RST] = TARGET_EL3RST,
519 [ELNRNG] = TARGET_ELNRNG,
520 [EUNATCH] = TARGET_EUNATCH,
521 [ENOCSI] = TARGET_ENOCSI,
522 [EL2HLT] = TARGET_EL2HLT,
523 [EDEADLK] = TARGET_EDEADLK,
524 [ENOLCK] = TARGET_ENOLCK,
525 [EBADE] = TARGET_EBADE,
526 [EBADR] = TARGET_EBADR,
527 [EXFULL] = TARGET_EXFULL,
528 [ENOANO] = TARGET_ENOANO,
529 [EBADRQC] = TARGET_EBADRQC,
530 [EBADSLT] = TARGET_EBADSLT,
531 [EBFONT] = TARGET_EBFONT,
532 [ENOSTR] = TARGET_ENOSTR,
533 [ENODATA] = TARGET_ENODATA,
534 [ETIME] = TARGET_ETIME,
535 [ENOSR] = TARGET_ENOSR,
536 [ENONET] = TARGET_ENONET,
537 [ENOPKG] = TARGET_ENOPKG,
538 [EREMOTE] = TARGET_EREMOTE,
539 [ENOLINK] = TARGET_ENOLINK,
540 [EADV] = TARGET_EADV,
541 [ESRMNT] = TARGET_ESRMNT,
542 [ECOMM] = TARGET_ECOMM,
543 [EPROTO] = TARGET_EPROTO,
544 [EDOTDOT] = TARGET_EDOTDOT,
545 [EMULTIHOP] = TARGET_EMULTIHOP,
546 [EBADMSG] = TARGET_EBADMSG,
547 [ENAMETOOLONG] = TARGET_ENAMETOOLONG,
548 [EOVERFLOW] = TARGET_EOVERFLOW,
549 [ENOTUNIQ] = TARGET_ENOTUNIQ,
550 [EBADFD] = TARGET_EBADFD,
551 [EREMCHG] = TARGET_EREMCHG,
552 [ELIBACC] = TARGET_ELIBACC,
553 [ELIBBAD] = TARGET_ELIBBAD,
554 [ELIBSCN] = TARGET_ELIBSCN,
555 [ELIBMAX] = TARGET_ELIBMAX,
556 [ELIBEXEC] = TARGET_ELIBEXEC,
557 [EILSEQ] = TARGET_EILSEQ,
558 [ENOSYS] = TARGET_ENOSYS,
559 [ELOOP] = TARGET_ELOOP,
560 [ERESTART] = TARGET_ERESTART,
561 [ESTRPIPE] = TARGET_ESTRPIPE,
562 [ENOTEMPTY] = TARGET_ENOTEMPTY,
563 [EUSERS] = TARGET_EUSERS,
564 [ENOTSOCK] = TARGET_ENOTSOCK,
565 [EDESTADDRREQ] = TARGET_EDESTADDRREQ,
566 [EMSGSIZE] = TARGET_EMSGSIZE,
567 [EPROTOTYPE] = TARGET_EPROTOTYPE,
568 [ENOPROTOOPT] = TARGET_ENOPROTOOPT,
569 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT,
570 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT,
571 [EOPNOTSUPP] = TARGET_EOPNOTSUPP,
572 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT,
573 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT,
574 [EADDRINUSE] = TARGET_EADDRINUSE,
575 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL,
576 [ENETDOWN] = TARGET_ENETDOWN,
577 [ENETUNREACH] = TARGET_ENETUNREACH,
578 [ENETRESET] = TARGET_ENETRESET,
579 [ECONNABORTED] = TARGET_ECONNABORTED,
580 [ECONNRESET] = TARGET_ECONNRESET,
581 [ENOBUFS] = TARGET_ENOBUFS,
582 [EISCONN] = TARGET_EISCONN,
583 [ENOTCONN] = TARGET_ENOTCONN,
584 [EUCLEAN] = TARGET_EUCLEAN,
585 [ENOTNAM] = TARGET_ENOTNAM,
586 [ENAVAIL] = TARGET_ENAVAIL,
587 [EISNAM] = TARGET_EISNAM,
588 [EREMOTEIO] = TARGET_EREMOTEIO,
589 [EDQUOT] = TARGET_EDQUOT,
590 [ESHUTDOWN] = TARGET_ESHUTDOWN,
591 [ETOOMANYREFS] = TARGET_ETOOMANYREFS,
592 [ETIMEDOUT] = TARGET_ETIMEDOUT,
593 [ECONNREFUSED] = TARGET_ECONNREFUSED,
594 [EHOSTDOWN] = TARGET_EHOSTDOWN,
595 [EHOSTUNREACH] = TARGET_EHOSTUNREACH,
596 [EALREADY] = TARGET_EALREADY,
597 [EINPROGRESS] = TARGET_EINPROGRESS,
598 [ESTALE] = TARGET_ESTALE,
599 [ECANCELED] = TARGET_ECANCELED,
600 [ENOMEDIUM] = TARGET_ENOMEDIUM,
601 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE,
603 [ENOKEY] = TARGET_ENOKEY,
606 [EKEYEXPIRED] = TARGET_EKEYEXPIRED,
609 [EKEYREVOKED] = TARGET_EKEYREVOKED,
612 [EKEYREJECTED] = TARGET_EKEYREJECTED,
615 [EOWNERDEAD] = TARGET_EOWNERDEAD,
617 #ifdef ENOTRECOVERABLE
618 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE,
621 [ENOMSG] = TARGET_ENOMSG,
624 [ERFKILL] = TARGET_ERFKILL,
627 [EHWPOISON] = TARGET_EHWPOISON,
631 static inline int host_to_target_errno(int err)
633 if (err >= 0 && err < ERRNO_TABLE_SIZE &&
634 host_to_target_errno_table[err]) {
635 return host_to_target_errno_table[err];
640 static inline int target_to_host_errno(int err)
642 if (err >= 0 && err < ERRNO_TABLE_SIZE &&
643 target_to_host_errno_table[err]) {
644 return target_to_host_errno_table[err];
649 static inline abi_long get_errno(abi_long ret)
652 return -host_to_target_errno(errno);
657 const char *target_strerror(int err)
659 if (err == TARGET_ERESTARTSYS) {
660 return "To be restarted";
662 if (err == TARGET_QEMU_ESIGRETURN) {
663 return "Successful exit from sigreturn";
666 if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
669 return strerror(target_to_host_errno(err));
672 #define safe_syscall0(type, name) \
673 static type safe_##name(void) \
675 return safe_syscall(__NR_##name); \
678 #define safe_syscall1(type, name, type1, arg1) \
679 static type safe_##name(type1 arg1) \
681 return safe_syscall(__NR_##name, arg1); \
684 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
685 static type safe_##name(type1 arg1, type2 arg2) \
687 return safe_syscall(__NR_##name, arg1, arg2); \
690 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
691 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
693 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
696 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
698 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
700 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
703 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
704 type4, arg4, type5, arg5) \
705 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
708 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
711 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
712 type4, arg4, type5, arg5, type6, arg6) \
713 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
714 type5 arg5, type6 arg6) \
716 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
719 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
720 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
721 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
722 int, flags, mode_t, mode)
723 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
724 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
725 struct rusage *, rusage)
727 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
728 int, options, struct rusage *, rusage)
729 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
730 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
731 defined(TARGET_NR_pselect6)
732 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
733 fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
735 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_poll)
736 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
737 struct timespec *, tsp, const sigset_t *, sigmask,
740 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
741 int, maxevents, int, timeout, const sigset_t *, sigmask,
743 #if defined(__NR_futex)
744 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
745 const struct timespec *,timeout,int *,uaddr2,int,val3)
747 #if defined(__NR_futex_time64)
748 safe_syscall6(int,futex_time64,int *,uaddr,int,op,int,val, \
749 const struct timespec *,timeout,int *,uaddr2,int,val3)
751 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
752 safe_syscall2(int, kill, pid_t, pid, int, sig)
753 safe_syscall2(int, tkill, int, tid, int, sig)
754 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
755 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
756 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
757 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
758 unsigned long, pos_l, unsigned long, pos_h)
759 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
760 unsigned long, pos_l, unsigned long, pos_h)
761 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
763 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
764 int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
765 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
766 int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
767 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
768 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
769 safe_syscall2(int, flock, int, fd, int, operation)
770 #if defined(TARGET_NR_rt_sigtimedwait) || defined(TARGET_NR_rt_sigtimedwait_time64)
771 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
772 const struct timespec *, uts, size_t, sigsetsize)
774 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
776 #if defined(TARGET_NR_nanosleep)
777 safe_syscall2(int, nanosleep, const struct timespec *, req,
778 struct timespec *, rem)
780 #if defined(TARGET_NR_clock_nanosleep) || \
781 defined(TARGET_NR_clock_nanosleep_time64)
782 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
783 const struct timespec *, req, struct timespec *, rem)
787 safe_syscall5(int, ipc, int, call, long, first, long, second, long, third,
790 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
791 void *, ptr, long, fifth)
795 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
799 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
800 long, msgtype, int, flags)
802 #ifdef __NR_semtimedop
803 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
804 unsigned, nsops, const struct timespec *, timeout)
806 #if defined(TARGET_NR_mq_timedsend) || \
807 defined(TARGET_NR_mq_timedsend_time64)
808 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
809 size_t, len, unsigned, prio, const struct timespec *, timeout)
811 #if defined(TARGET_NR_mq_timedreceive) || \
812 defined(TARGET_NR_mq_timedreceive_time64)
813 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
814 size_t, len, unsigned *, prio, const struct timespec *, timeout)
816 /* We do ioctl like this rather than via safe_syscall3 to preserve the
817 * "third argument might be integer or pointer or not present" behaviour of
820 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
821 /* Similarly for fcntl. Note that callers must always:
822 * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
823 * use the flock64 struct rather than unsuffixed flock
824 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
827 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
829 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
832 static inline int host_to_target_sock_type(int host_type)
836 switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
838 target_type = TARGET_SOCK_DGRAM;
841 target_type = TARGET_SOCK_STREAM;
844 target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
848 #if defined(SOCK_CLOEXEC)
849 if (host_type & SOCK_CLOEXEC) {
850 target_type |= TARGET_SOCK_CLOEXEC;
854 #if defined(SOCK_NONBLOCK)
855 if (host_type & SOCK_NONBLOCK) {
856 target_type |= TARGET_SOCK_NONBLOCK;
863 static abi_ulong target_brk;
864 static abi_ulong target_original_brk;
865 static abi_ulong brk_page;
867 void target_set_brk(abi_ulong new_brk)
869 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
870 brk_page = HOST_PAGE_ALIGN(target_brk);
873 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
874 #define DEBUGF_BRK(message, args...)
876 /* do_brk() must return target values and target errnos. */
877 abi_long do_brk(abi_ulong new_brk)
879 abi_long mapped_addr;
880 abi_ulong new_alloc_size;
882 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
885 DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
888 if (new_brk < target_original_brk) {
889 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
894 /* If the new brk is less than the highest page reserved to the
895 * target heap allocation, set it and we're almost done... */
896 if (new_brk <= brk_page) {
897 /* Heap contents are initialized to zero, as for anonymous
899 if (new_brk > target_brk) {
900 memset(g2h(target_brk), 0, new_brk - target_brk);
902 target_brk = new_brk;
903 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
907 /* We need to allocate more memory after the brk... Note that
908 * we don't use MAP_FIXED because that will map over the top of
909 * any existing mapping (like the one with the host libc or qemu
910 * itself); instead we treat "mapped but at wrong address" as
911 * a failure and unmap again.
913 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
914 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
915 PROT_READ|PROT_WRITE,
916 MAP_ANON|MAP_PRIVATE, 0, 0));
918 if (mapped_addr == brk_page) {
919 /* Heap contents are initialized to zero, as for anonymous
920 * mapped pages. Technically the new pages are already
921 * initialized to zero since they *are* anonymous mapped
922 * pages, however we have to take care with the contents that
923 * come from the remaining part of the previous page: it may
924 * contains garbage data due to a previous heap usage (grown
926 memset(g2h(target_brk), 0, brk_page - target_brk);
928 target_brk = new_brk;
929 brk_page = HOST_PAGE_ALIGN(target_brk);
930 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
933 } else if (mapped_addr != -1) {
934 /* Mapped but at wrong address, meaning there wasn't actually
935 * enough space for this brk.
937 target_munmap(mapped_addr, new_alloc_size);
939 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
942 DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
945 #if defined(TARGET_ALPHA)
946 /* We (partially) emulate OSF/1 on Alpha, which requires we
947 return a proper errno, not an unchanged brk value. */
948 return -TARGET_ENOMEM;
950 /* For everything else, return the previous break. */
954 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
955 defined(TARGET_NR_pselect6)
956 static inline abi_long copy_from_user_fdset(fd_set *fds,
957 abi_ulong target_fds_addr,
961 abi_ulong b, *target_fds;
963 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
964 if (!(target_fds = lock_user(VERIFY_READ,
966 sizeof(abi_ulong) * nw,
968 return -TARGET_EFAULT;
972 for (i = 0; i < nw; i++) {
973 /* grab the abi_ulong */
974 __get_user(b, &target_fds[i]);
975 for (j = 0; j < TARGET_ABI_BITS; j++) {
976 /* check the bit inside the abi_ulong */
983 unlock_user(target_fds, target_fds_addr, 0);
988 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
989 abi_ulong target_fds_addr,
992 if (target_fds_addr) {
993 if (copy_from_user_fdset(fds, target_fds_addr, n))
994 return -TARGET_EFAULT;
1002 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
1008 abi_ulong *target_fds;
1010 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
1011 if (!(target_fds = lock_user(VERIFY_WRITE,
1013 sizeof(abi_ulong) * nw,
1015 return -TARGET_EFAULT;
1018 for (i = 0; i < nw; i++) {
1020 for (j = 0; j < TARGET_ABI_BITS; j++) {
1021 v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
1024 __put_user(v, &target_fds[i]);
1027 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
1033 #if defined(__alpha__)
1034 #define HOST_HZ 1024
1039 static inline abi_long host_to_target_clock_t(long ticks)
1041 #if HOST_HZ == TARGET_HZ
1044 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
1048 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
1049 const struct rusage *rusage)
1051 struct target_rusage *target_rusage;
1053 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
1054 return -TARGET_EFAULT;
1055 target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
1056 target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
1057 target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1058 target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1059 target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1060 target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1061 target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1062 target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1063 target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1064 target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1065 target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1066 target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1067 target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1068 target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1069 target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1070 target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1071 target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1072 target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1073 unlock_user_struct(target_rusage, target_addr, 1);
1078 #ifdef TARGET_NR_setrlimit
1079 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1081 abi_ulong target_rlim_swap;
1084 target_rlim_swap = tswapal(target_rlim);
1085 if (target_rlim_swap == TARGET_RLIM_INFINITY)
1086 return RLIM_INFINITY;
1088 result = target_rlim_swap;
1089 if (target_rlim_swap != (rlim_t)result)
1090 return RLIM_INFINITY;
1096 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
1097 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1099 abi_ulong target_rlim_swap;
1102 if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1103 target_rlim_swap = TARGET_RLIM_INFINITY;
1105 target_rlim_swap = rlim;
1106 result = tswapal(target_rlim_swap);
1112 static inline int target_to_host_resource(int code)
1115 case TARGET_RLIMIT_AS:
1117 case TARGET_RLIMIT_CORE:
1119 case TARGET_RLIMIT_CPU:
1121 case TARGET_RLIMIT_DATA:
1123 case TARGET_RLIMIT_FSIZE:
1124 return RLIMIT_FSIZE;
1125 case TARGET_RLIMIT_LOCKS:
1126 return RLIMIT_LOCKS;
1127 case TARGET_RLIMIT_MEMLOCK:
1128 return RLIMIT_MEMLOCK;
1129 case TARGET_RLIMIT_MSGQUEUE:
1130 return RLIMIT_MSGQUEUE;
1131 case TARGET_RLIMIT_NICE:
1133 case TARGET_RLIMIT_NOFILE:
1134 return RLIMIT_NOFILE;
1135 case TARGET_RLIMIT_NPROC:
1136 return RLIMIT_NPROC;
1137 case TARGET_RLIMIT_RSS:
1139 case TARGET_RLIMIT_RTPRIO:
1140 return RLIMIT_RTPRIO;
1141 case TARGET_RLIMIT_SIGPENDING:
1142 return RLIMIT_SIGPENDING;
1143 case TARGET_RLIMIT_STACK:
1144 return RLIMIT_STACK;
1150 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1151 abi_ulong target_tv_addr)
1153 struct target_timeval *target_tv;
1155 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1156 return -TARGET_EFAULT;
1159 __get_user(tv->tv_sec, &target_tv->tv_sec);
1160 __get_user(tv->tv_usec, &target_tv->tv_usec);
1162 unlock_user_struct(target_tv, target_tv_addr, 0);
1167 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1168 const struct timeval *tv)
1170 struct target_timeval *target_tv;
1172 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1173 return -TARGET_EFAULT;
1176 __put_user(tv->tv_sec, &target_tv->tv_sec);
1177 __put_user(tv->tv_usec, &target_tv->tv_usec);
1179 unlock_user_struct(target_tv, target_tv_addr, 1);
1184 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
1185 static inline abi_long copy_from_user_timeval64(struct timeval *tv,
1186 abi_ulong target_tv_addr)
1188 struct target__kernel_sock_timeval *target_tv;
1190 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1191 return -TARGET_EFAULT;
1194 __get_user(tv->tv_sec, &target_tv->tv_sec);
1195 __get_user(tv->tv_usec, &target_tv->tv_usec);
1197 unlock_user_struct(target_tv, target_tv_addr, 0);
1203 static inline abi_long copy_to_user_timeval64(abi_ulong target_tv_addr,
1204 const struct timeval *tv)
1206 struct target__kernel_sock_timeval *target_tv;
1208 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1209 return -TARGET_EFAULT;
1212 __put_user(tv->tv_sec, &target_tv->tv_sec);
1213 __put_user(tv->tv_usec, &target_tv->tv_usec);
1215 unlock_user_struct(target_tv, target_tv_addr, 1);
1220 #if defined(TARGET_NR_futex) || \
1221 defined(TARGET_NR_rt_sigtimedwait) || \
1222 defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1223 defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1224 defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1225 defined(TARGET_NR_mq_timedreceive) || defined(TARGET_NR_ipc) || \
1226 defined(TARGET_NR_semop) || defined(TARGET_NR_semtimedop) || \
1227 defined(TARGET_NR_timer_settime) || \
1228 (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
1229 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
1230 abi_ulong target_addr)
1232 struct target_timespec *target_ts;
1234 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1235 return -TARGET_EFAULT;
1237 __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1238 __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1239 unlock_user_struct(target_ts, target_addr, 0);
1244 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64) || \
1245 defined(TARGET_NR_timer_settime64) || \
1246 defined(TARGET_NR_mq_timedsend_time64) || \
1247 defined(TARGET_NR_mq_timedreceive_time64) || \
1248 (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)) || \
1249 defined(TARGET_NR_clock_nanosleep_time64) || \
1250 defined(TARGET_NR_rt_sigtimedwait_time64) || \
1251 defined(TARGET_NR_utimensat) || \
1252 defined(TARGET_NR_utimensat_time64) || \
1253 defined(TARGET_NR_semtimedop_time64)
1254 static inline abi_long target_to_host_timespec64(struct timespec *host_ts,
1255 abi_ulong target_addr)
1257 struct target__kernel_timespec *target_ts;
1259 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1260 return -TARGET_EFAULT;
1262 __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1263 __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1264 /* in 32bit mode, this drops the padding */
1265 host_ts->tv_nsec = (long)(abi_long)host_ts->tv_nsec;
1266 unlock_user_struct(target_ts, target_addr, 0);
1271 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
1272 struct timespec *host_ts)
1274 struct target_timespec *target_ts;
1276 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1277 return -TARGET_EFAULT;
1279 __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1280 __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1281 unlock_user_struct(target_ts, target_addr, 1);
1285 static inline abi_long host_to_target_timespec64(abi_ulong target_addr,
1286 struct timespec *host_ts)
1288 struct target__kernel_timespec *target_ts;
1290 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1291 return -TARGET_EFAULT;
1293 __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1294 __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1295 unlock_user_struct(target_ts, target_addr, 1);
1299 #if defined(TARGET_NR_gettimeofday)
1300 static inline abi_long copy_to_user_timezone(abi_ulong target_tz_addr,
1301 struct timezone *tz)
1303 struct target_timezone *target_tz;
1305 if (!lock_user_struct(VERIFY_WRITE, target_tz, target_tz_addr, 1)) {
1306 return -TARGET_EFAULT;
1309 __put_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1310 __put_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1312 unlock_user_struct(target_tz, target_tz_addr, 1);
1318 #if defined(TARGET_NR_settimeofday)
1319 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1320 abi_ulong target_tz_addr)
1322 struct target_timezone *target_tz;
1324 if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1325 return -TARGET_EFAULT;
1328 __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1329 __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1331 unlock_user_struct(target_tz, target_tz_addr, 0);
1337 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1340 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1341 abi_ulong target_mq_attr_addr)
1343 struct target_mq_attr *target_mq_attr;
1345 if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1346 target_mq_attr_addr, 1))
1347 return -TARGET_EFAULT;
1349 __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1350 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1351 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1352 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1354 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1359 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1360 const struct mq_attr *attr)
1362 struct target_mq_attr *target_mq_attr;
1364 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1365 target_mq_attr_addr, 0))
1366 return -TARGET_EFAULT;
1368 __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1369 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1370 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1371 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1373 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1379 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1380 /* do_select() must return target values and target errnos. */
1381 static abi_long do_select(int n,
1382 abi_ulong rfd_addr, abi_ulong wfd_addr,
1383 abi_ulong efd_addr, abi_ulong target_tv_addr)
1385 fd_set rfds, wfds, efds;
1386 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1388 struct timespec ts, *ts_ptr;
1391 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1395 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1399 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1404 if (target_tv_addr) {
1405 if (copy_from_user_timeval(&tv, target_tv_addr))
1406 return -TARGET_EFAULT;
1407 ts.tv_sec = tv.tv_sec;
1408 ts.tv_nsec = tv.tv_usec * 1000;
1414 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1417 if (!is_error(ret)) {
1418 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1419 return -TARGET_EFAULT;
1420 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1421 return -TARGET_EFAULT;
1422 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1423 return -TARGET_EFAULT;
1425 if (target_tv_addr) {
1426 tv.tv_sec = ts.tv_sec;
1427 tv.tv_usec = ts.tv_nsec / 1000;
1428 if (copy_to_user_timeval(target_tv_addr, &tv)) {
1429 return -TARGET_EFAULT;
1437 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1438 static abi_long do_old_select(abi_ulong arg1)
1440 struct target_sel_arg_struct *sel;
1441 abi_ulong inp, outp, exp, tvp;
1444 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1445 return -TARGET_EFAULT;
1448 nsel = tswapal(sel->n);
1449 inp = tswapal(sel->inp);
1450 outp = tswapal(sel->outp);
1451 exp = tswapal(sel->exp);
1452 tvp = tswapal(sel->tvp);
1454 unlock_user_struct(sel, arg1, 0);
1456 return do_select(nsel, inp, outp, exp, tvp);
1461 static abi_long do_pipe2(int host_pipe[], int flags)
1464 return pipe2(host_pipe, flags);
1470 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1471 int flags, int is_pipe2)
1475 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1478 return get_errno(ret);
1480 /* Several targets have special calling conventions for the original
1481 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1483 #if defined(TARGET_ALPHA)
1484 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1485 return host_pipe[0];
1486 #elif defined(TARGET_MIPS)
1487 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1488 return host_pipe[0];
1489 #elif defined(TARGET_SH4)
1490 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1491 return host_pipe[0];
1492 #elif defined(TARGET_SPARC)
1493 ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1494 return host_pipe[0];
1498 if (put_user_s32(host_pipe[0], pipedes)
1499 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1500 return -TARGET_EFAULT;
1501 return get_errno(ret);
1504 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1505 abi_ulong target_addr,
1508 struct target_ip_mreqn *target_smreqn;
1510 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1512 return -TARGET_EFAULT;
1513 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1514 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1515 if (len == sizeof(struct target_ip_mreqn))
1516 mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1517 unlock_user(target_smreqn, target_addr, 0);
1522 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1523 abi_ulong target_addr,
1526 const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1527 sa_family_t sa_family;
1528 struct target_sockaddr *target_saddr;
1530 if (fd_trans_target_to_host_addr(fd)) {
1531 return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1534 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1536 return -TARGET_EFAULT;
1538 sa_family = tswap16(target_saddr->sa_family);
1540 /* Oops. The caller might send a incomplete sun_path; sun_path
1541 * must be terminated by \0 (see the manual page), but
1542 * unfortunately it is quite common to specify sockaddr_un
1543 * length as "strlen(x->sun_path)" while it should be
1544 * "strlen(...) + 1". We'll fix that here if needed.
1545 * Linux kernel has a similar feature.
1548 if (sa_family == AF_UNIX) {
1549 if (len < unix_maxlen && len > 0) {
1550 char *cp = (char*)target_saddr;
1552 if ( cp[len-1] && !cp[len] )
1555 if (len > unix_maxlen)
1559 memcpy(addr, target_saddr, len);
1560 addr->sa_family = sa_family;
1561 if (sa_family == AF_NETLINK) {
1562 struct sockaddr_nl *nladdr;
1564 nladdr = (struct sockaddr_nl *)addr;
1565 nladdr->nl_pid = tswap32(nladdr->nl_pid);
1566 nladdr->nl_groups = tswap32(nladdr->nl_groups);
1567 } else if (sa_family == AF_PACKET) {
1568 struct target_sockaddr_ll *lladdr;
1570 lladdr = (struct target_sockaddr_ll *)addr;
1571 lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1572 lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1574 unlock_user(target_saddr, target_addr, 0);
1579 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1580 struct sockaddr *addr,
1583 struct target_sockaddr *target_saddr;
1590 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1592 return -TARGET_EFAULT;
1593 memcpy(target_saddr, addr, len);
1594 if (len >= offsetof(struct target_sockaddr, sa_family) +
1595 sizeof(target_saddr->sa_family)) {
1596 target_saddr->sa_family = tswap16(addr->sa_family);
1598 if (addr->sa_family == AF_NETLINK &&
1599 len >= sizeof(struct target_sockaddr_nl)) {
1600 struct target_sockaddr_nl *target_nl =
1601 (struct target_sockaddr_nl *)target_saddr;
1602 target_nl->nl_pid = tswap32(target_nl->nl_pid);
1603 target_nl->nl_groups = tswap32(target_nl->nl_groups);
1604 } else if (addr->sa_family == AF_PACKET) {
1605 struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1606 target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1607 target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1608 } else if (addr->sa_family == AF_INET6 &&
1609 len >= sizeof(struct target_sockaddr_in6)) {
1610 struct target_sockaddr_in6 *target_in6 =
1611 (struct target_sockaddr_in6 *)target_saddr;
1612 target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1614 unlock_user(target_saddr, target_addr, len);
1619 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1620 struct target_msghdr *target_msgh)
1622 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1623 abi_long msg_controllen;
1624 abi_ulong target_cmsg_addr;
1625 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1626 socklen_t space = 0;
1628 msg_controllen = tswapal(target_msgh->msg_controllen);
1629 if (msg_controllen < sizeof (struct target_cmsghdr))
1631 target_cmsg_addr = tswapal(target_msgh->msg_control);
1632 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1633 target_cmsg_start = target_cmsg;
1635 return -TARGET_EFAULT;
1637 while (cmsg && target_cmsg) {
1638 void *data = CMSG_DATA(cmsg);
1639 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1641 int len = tswapal(target_cmsg->cmsg_len)
1642 - sizeof(struct target_cmsghdr);
1644 space += CMSG_SPACE(len);
1645 if (space > msgh->msg_controllen) {
1646 space -= CMSG_SPACE(len);
1647 /* This is a QEMU bug, since we allocated the payload
1648 * area ourselves (unlike overflow in host-to-target
1649 * conversion, which is just the guest giving us a buffer
1650 * that's too small). It can't happen for the payload types
1651 * we currently support; if it becomes an issue in future
1652 * we would need to improve our allocation strategy to
1653 * something more intelligent than "twice the size of the
1654 * target buffer we're reading from".
1656 qemu_log_mask(LOG_UNIMP,
1657 ("Unsupported ancillary data %d/%d: "
1658 "unhandled msg size\n"),
1659 tswap32(target_cmsg->cmsg_level),
1660 tswap32(target_cmsg->cmsg_type));
1664 if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1665 cmsg->cmsg_level = SOL_SOCKET;
1667 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1669 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1670 cmsg->cmsg_len = CMSG_LEN(len);
1672 if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1673 int *fd = (int *)data;
1674 int *target_fd = (int *)target_data;
1675 int i, numfds = len / sizeof(int);
1677 for (i = 0; i < numfds; i++) {
1678 __get_user(fd[i], target_fd + i);
1680 } else if (cmsg->cmsg_level == SOL_SOCKET
1681 && cmsg->cmsg_type == SCM_CREDENTIALS) {
1682 struct ucred *cred = (struct ucred *)data;
1683 struct target_ucred *target_cred =
1684 (struct target_ucred *)target_data;
1686 __get_user(cred->pid, &target_cred->pid);
1687 __get_user(cred->uid, &target_cred->uid);
1688 __get_user(cred->gid, &target_cred->gid);
1690 qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
1691 cmsg->cmsg_level, cmsg->cmsg_type);
1692 memcpy(data, target_data, len);
1695 cmsg = CMSG_NXTHDR(msgh, cmsg);
1696 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1699 unlock_user(target_cmsg, target_cmsg_addr, 0);
1701 msgh->msg_controllen = space;
1705 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1706 struct msghdr *msgh)
1708 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1709 abi_long msg_controllen;
1710 abi_ulong target_cmsg_addr;
1711 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1712 socklen_t space = 0;
1714 msg_controllen = tswapal(target_msgh->msg_controllen);
1715 if (msg_controllen < sizeof (struct target_cmsghdr))
1717 target_cmsg_addr = tswapal(target_msgh->msg_control);
1718 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1719 target_cmsg_start = target_cmsg;
1721 return -TARGET_EFAULT;
1723 while (cmsg && target_cmsg) {
1724 void *data = CMSG_DATA(cmsg);
1725 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1727 int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1728 int tgt_len, tgt_space;
1730 /* We never copy a half-header but may copy half-data;
1731 * this is Linux's behaviour in put_cmsg(). Note that
1732 * truncation here is a guest problem (which we report
1733 * to the guest via the CTRUNC bit), unlike truncation
1734 * in target_to_host_cmsg, which is a QEMU bug.
1736 if (msg_controllen < sizeof(struct target_cmsghdr)) {
1737 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1741 if (cmsg->cmsg_level == SOL_SOCKET) {
1742 target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1744 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1746 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1748 /* Payload types which need a different size of payload on
1749 * the target must adjust tgt_len here.
1752 switch (cmsg->cmsg_level) {
1754 switch (cmsg->cmsg_type) {
1756 tgt_len = sizeof(struct target_timeval);
1766 if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1767 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1768 tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1771 /* We must now copy-and-convert len bytes of payload
1772 * into tgt_len bytes of destination space. Bear in mind
1773 * that in both source and destination we may be dealing
1774 * with a truncated value!
1776 switch (cmsg->cmsg_level) {
1778 switch (cmsg->cmsg_type) {
1781 int *fd = (int *)data;
1782 int *target_fd = (int *)target_data;
1783 int i, numfds = tgt_len / sizeof(int);
1785 for (i = 0; i < numfds; i++) {
1786 __put_user(fd[i], target_fd + i);
1792 struct timeval *tv = (struct timeval *)data;
1793 struct target_timeval *target_tv =
1794 (struct target_timeval *)target_data;
1796 if (len != sizeof(struct timeval) ||
1797 tgt_len != sizeof(struct target_timeval)) {
1801 /* copy struct timeval to target */
1802 __put_user(tv->tv_sec, &target_tv->tv_sec);
1803 __put_user(tv->tv_usec, &target_tv->tv_usec);
1806 case SCM_CREDENTIALS:
1808 struct ucred *cred = (struct ucred *)data;
1809 struct target_ucred *target_cred =
1810 (struct target_ucred *)target_data;
1812 __put_user(cred->pid, &target_cred->pid);
1813 __put_user(cred->uid, &target_cred->uid);
1814 __put_user(cred->gid, &target_cred->gid);
1823 switch (cmsg->cmsg_type) {
1826 uint32_t *v = (uint32_t *)data;
1827 uint32_t *t_int = (uint32_t *)target_data;
1829 if (len != sizeof(uint32_t) ||
1830 tgt_len != sizeof(uint32_t)) {
1833 __put_user(*v, t_int);
1839 struct sock_extended_err ee;
1840 struct sockaddr_in offender;
1842 struct errhdr_t *errh = (struct errhdr_t *)data;
1843 struct errhdr_t *target_errh =
1844 (struct errhdr_t *)target_data;
1846 if (len != sizeof(struct errhdr_t) ||
1847 tgt_len != sizeof(struct errhdr_t)) {
1850 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1851 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1852 __put_user(errh->ee.ee_type, &target_errh->ee.ee_type);
1853 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1854 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1855 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1856 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1857 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1858 (void *) &errh->offender, sizeof(errh->offender));
1867 switch (cmsg->cmsg_type) {
1870 uint32_t *v = (uint32_t *)data;
1871 uint32_t *t_int = (uint32_t *)target_data;
1873 if (len != sizeof(uint32_t) ||
1874 tgt_len != sizeof(uint32_t)) {
1877 __put_user(*v, t_int);
1883 struct sock_extended_err ee;
1884 struct sockaddr_in6 offender;
1886 struct errhdr6_t *errh = (struct errhdr6_t *)data;
1887 struct errhdr6_t *target_errh =
1888 (struct errhdr6_t *)target_data;
1890 if (len != sizeof(struct errhdr6_t) ||
1891 tgt_len != sizeof(struct errhdr6_t)) {
1894 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1895 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1896 __put_user(errh->ee.ee_type, &target_errh->ee.ee_type);
1897 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1898 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1899 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1900 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1901 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1902 (void *) &errh->offender, sizeof(errh->offender));
1912 qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
1913 cmsg->cmsg_level, cmsg->cmsg_type);
1914 memcpy(target_data, data, MIN(len, tgt_len));
1915 if (tgt_len > len) {
1916 memset(target_data + len, 0, tgt_len - len);
1920 target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
1921 tgt_space = TARGET_CMSG_SPACE(tgt_len);
1922 if (msg_controllen < tgt_space) {
1923 tgt_space = msg_controllen;
1925 msg_controllen -= tgt_space;
1927 cmsg = CMSG_NXTHDR(msgh, cmsg);
1928 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1931 unlock_user(target_cmsg, target_cmsg_addr, space);
1933 target_msgh->msg_controllen = tswapal(space);
1937 /* do_setsockopt() Must return target values and target errnos. */
1938 static abi_long do_setsockopt(int sockfd, int level, int optname,
1939 abi_ulong optval_addr, socklen_t optlen)
1943 struct ip_mreqn *ip_mreq;
1944 struct ip_mreq_source *ip_mreq_source;
1948 /* TCP options all take an 'int' value. */
1949 if (optlen < sizeof(uint32_t))
1950 return -TARGET_EINVAL;
1952 if (get_user_u32(val, optval_addr))
1953 return -TARGET_EFAULT;
1954 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1961 case IP_ROUTER_ALERT:
1965 case IP_MTU_DISCOVER:
1972 case IP_MULTICAST_TTL:
1973 case IP_MULTICAST_LOOP:
1975 if (optlen >= sizeof(uint32_t)) {
1976 if (get_user_u32(val, optval_addr))
1977 return -TARGET_EFAULT;
1978 } else if (optlen >= 1) {
1979 if (get_user_u8(val, optval_addr))
1980 return -TARGET_EFAULT;
1982 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1984 case IP_ADD_MEMBERSHIP:
1985 case IP_DROP_MEMBERSHIP:
1986 if (optlen < sizeof (struct target_ip_mreq) ||
1987 optlen > sizeof (struct target_ip_mreqn))
1988 return -TARGET_EINVAL;
1990 ip_mreq = (struct ip_mreqn *) alloca(optlen);
1991 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
1992 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
1995 case IP_BLOCK_SOURCE:
1996 case IP_UNBLOCK_SOURCE:
1997 case IP_ADD_SOURCE_MEMBERSHIP:
1998 case IP_DROP_SOURCE_MEMBERSHIP:
1999 if (optlen != sizeof (struct target_ip_mreq_source))
2000 return -TARGET_EINVAL;
2002 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2003 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
2004 unlock_user (ip_mreq_source, optval_addr, 0);
2013 case IPV6_MTU_DISCOVER:
2016 case IPV6_RECVPKTINFO:
2017 case IPV6_UNICAST_HOPS:
2018 case IPV6_MULTICAST_HOPS:
2019 case IPV6_MULTICAST_LOOP:
2021 case IPV6_RECVHOPLIMIT:
2022 case IPV6_2292HOPLIMIT:
2025 case IPV6_2292PKTINFO:
2026 case IPV6_RECVTCLASS:
2027 case IPV6_RECVRTHDR:
2028 case IPV6_2292RTHDR:
2029 case IPV6_RECVHOPOPTS:
2030 case IPV6_2292HOPOPTS:
2031 case IPV6_RECVDSTOPTS:
2032 case IPV6_2292DSTOPTS:
2034 #ifdef IPV6_RECVPATHMTU
2035 case IPV6_RECVPATHMTU:
2037 #ifdef IPV6_TRANSPARENT
2038 case IPV6_TRANSPARENT:
2040 #ifdef IPV6_FREEBIND
2043 #ifdef IPV6_RECVORIGDSTADDR
2044 case IPV6_RECVORIGDSTADDR:
2047 if (optlen < sizeof(uint32_t)) {
2048 return -TARGET_EINVAL;
2050 if (get_user_u32(val, optval_addr)) {
2051 return -TARGET_EFAULT;
2053 ret = get_errno(setsockopt(sockfd, level, optname,
2054 &val, sizeof(val)));
2058 struct in6_pktinfo pki;
2060 if (optlen < sizeof(pki)) {
2061 return -TARGET_EINVAL;
2064 if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
2065 return -TARGET_EFAULT;
2068 pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
2070 ret = get_errno(setsockopt(sockfd, level, optname,
2071 &pki, sizeof(pki)));
2074 case IPV6_ADD_MEMBERSHIP:
2075 case IPV6_DROP_MEMBERSHIP:
2077 struct ipv6_mreq ipv6mreq;
2079 if (optlen < sizeof(ipv6mreq)) {
2080 return -TARGET_EINVAL;
2083 if (copy_from_user(&ipv6mreq, optval_addr, sizeof(ipv6mreq))) {
2084 return -TARGET_EFAULT;
2087 ipv6mreq.ipv6mr_interface = tswap32(ipv6mreq.ipv6mr_interface);
2089 ret = get_errno(setsockopt(sockfd, level, optname,
2090 &ipv6mreq, sizeof(ipv6mreq)));
2101 struct icmp6_filter icmp6f;
2103 if (optlen > sizeof(icmp6f)) {
2104 optlen = sizeof(icmp6f);
2107 if (copy_from_user(&icmp6f, optval_addr, optlen)) {
2108 return -TARGET_EFAULT;
2111 for (val = 0; val < 8; val++) {
2112 icmp6f.data[val] = tswap32(icmp6f.data[val]);
2115 ret = get_errno(setsockopt(sockfd, level, optname,
2127 /* those take an u32 value */
2128 if (optlen < sizeof(uint32_t)) {
2129 return -TARGET_EINVAL;
2132 if (get_user_u32(val, optval_addr)) {
2133 return -TARGET_EFAULT;
2135 ret = get_errno(setsockopt(sockfd, level, optname,
2136 &val, sizeof(val)));
2143 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2148 char *alg_key = g_malloc(optlen);
2151 return -TARGET_ENOMEM;
2153 if (copy_from_user(alg_key, optval_addr, optlen)) {
2155 return -TARGET_EFAULT;
2157 ret = get_errno(setsockopt(sockfd, level, optname,
2162 case ALG_SET_AEAD_AUTHSIZE:
2164 ret = get_errno(setsockopt(sockfd, level, optname,
2173 case TARGET_SOL_SOCKET:
2175 case TARGET_SO_RCVTIMEO:
2179 optname = SO_RCVTIMEO;
2182 if (optlen != sizeof(struct target_timeval)) {
2183 return -TARGET_EINVAL;
2186 if (copy_from_user_timeval(&tv, optval_addr)) {
2187 return -TARGET_EFAULT;
2190 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2194 case TARGET_SO_SNDTIMEO:
2195 optname = SO_SNDTIMEO;
2197 case TARGET_SO_ATTACH_FILTER:
2199 struct target_sock_fprog *tfprog;
2200 struct target_sock_filter *tfilter;
2201 struct sock_fprog fprog;
2202 struct sock_filter *filter;
2205 if (optlen != sizeof(*tfprog)) {
2206 return -TARGET_EINVAL;
2208 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2209 return -TARGET_EFAULT;
2211 if (!lock_user_struct(VERIFY_READ, tfilter,
2212 tswapal(tfprog->filter), 0)) {
2213 unlock_user_struct(tfprog, optval_addr, 1);
2214 return -TARGET_EFAULT;
2217 fprog.len = tswap16(tfprog->len);
2218 filter = g_try_new(struct sock_filter, fprog.len);
2219 if (filter == NULL) {
2220 unlock_user_struct(tfilter, tfprog->filter, 1);
2221 unlock_user_struct(tfprog, optval_addr, 1);
2222 return -TARGET_ENOMEM;
2224 for (i = 0; i < fprog.len; i++) {
2225 filter[i].code = tswap16(tfilter[i].code);
2226 filter[i].jt = tfilter[i].jt;
2227 filter[i].jf = tfilter[i].jf;
2228 filter[i].k = tswap32(tfilter[i].k);
2230 fprog.filter = filter;
2232 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2233 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2236 unlock_user_struct(tfilter, tfprog->filter, 1);
2237 unlock_user_struct(tfprog, optval_addr, 1);
2240 case TARGET_SO_BINDTODEVICE:
2242 char *dev_ifname, *addr_ifname;
2244 if (optlen > IFNAMSIZ - 1) {
2245 optlen = IFNAMSIZ - 1;
2247 dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2249 return -TARGET_EFAULT;
2251 optname = SO_BINDTODEVICE;
2252 addr_ifname = alloca(IFNAMSIZ);
2253 memcpy(addr_ifname, dev_ifname, optlen);
2254 addr_ifname[optlen] = 0;
2255 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2256 addr_ifname, optlen));
2257 unlock_user (dev_ifname, optval_addr, 0);
2260 case TARGET_SO_LINGER:
2263 struct target_linger *tlg;
2265 if (optlen != sizeof(struct target_linger)) {
2266 return -TARGET_EINVAL;
2268 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2269 return -TARGET_EFAULT;
2271 __get_user(lg.l_onoff, &tlg->l_onoff);
2272 __get_user(lg.l_linger, &tlg->l_linger);
2273 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2275 unlock_user_struct(tlg, optval_addr, 0);
2278 /* Options with 'int' argument. */
2279 case TARGET_SO_DEBUG:
2282 case TARGET_SO_REUSEADDR:
2283 optname = SO_REUSEADDR;
2286 case TARGET_SO_REUSEPORT:
2287 optname = SO_REUSEPORT;
2290 case TARGET_SO_TYPE:
2293 case TARGET_SO_ERROR:
2296 case TARGET_SO_DONTROUTE:
2297 optname = SO_DONTROUTE;
2299 case TARGET_SO_BROADCAST:
2300 optname = SO_BROADCAST;
2302 case TARGET_SO_SNDBUF:
2303 optname = SO_SNDBUF;
2305 case TARGET_SO_SNDBUFFORCE:
2306 optname = SO_SNDBUFFORCE;
2308 case TARGET_SO_RCVBUF:
2309 optname = SO_RCVBUF;
2311 case TARGET_SO_RCVBUFFORCE:
2312 optname = SO_RCVBUFFORCE;
2314 case TARGET_SO_KEEPALIVE:
2315 optname = SO_KEEPALIVE;
2317 case TARGET_SO_OOBINLINE:
2318 optname = SO_OOBINLINE;
2320 case TARGET_SO_NO_CHECK:
2321 optname = SO_NO_CHECK;
2323 case TARGET_SO_PRIORITY:
2324 optname = SO_PRIORITY;
2327 case TARGET_SO_BSDCOMPAT:
2328 optname = SO_BSDCOMPAT;
2331 case TARGET_SO_PASSCRED:
2332 optname = SO_PASSCRED;
2334 case TARGET_SO_PASSSEC:
2335 optname = SO_PASSSEC;
2337 case TARGET_SO_TIMESTAMP:
2338 optname = SO_TIMESTAMP;
2340 case TARGET_SO_RCVLOWAT:
2341 optname = SO_RCVLOWAT;
2346 if (optlen < sizeof(uint32_t))
2347 return -TARGET_EINVAL;
2349 if (get_user_u32(val, optval_addr))
2350 return -TARGET_EFAULT;
2351 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2356 case NETLINK_PKTINFO:
2357 case NETLINK_ADD_MEMBERSHIP:
2358 case NETLINK_DROP_MEMBERSHIP:
2359 case NETLINK_BROADCAST_ERROR:
2360 case NETLINK_NO_ENOBUFS:
2361 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2362 case NETLINK_LISTEN_ALL_NSID:
2363 case NETLINK_CAP_ACK:
2364 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2365 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2366 case NETLINK_EXT_ACK:
2367 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2368 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2369 case NETLINK_GET_STRICT_CHK:
2370 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2376 if (optlen < sizeof(uint32_t)) {
2377 return -TARGET_EINVAL;
2379 if (get_user_u32(val, optval_addr)) {
2380 return -TARGET_EFAULT;
2382 ret = get_errno(setsockopt(sockfd, SOL_NETLINK, optname, &val,
2385 #endif /* SOL_NETLINK */
2388 qemu_log_mask(LOG_UNIMP, "Unsupported setsockopt level=%d optname=%d\n",
2390 ret = -TARGET_ENOPROTOOPT;
2395 /* do_getsockopt() Must return target values and target errnos. */
2396 static abi_long do_getsockopt(int sockfd, int level, int optname,
2397 abi_ulong optval_addr, abi_ulong optlen)
2404 case TARGET_SOL_SOCKET:
2407 /* These don't just return a single integer */
2408 case TARGET_SO_PEERNAME:
2410 case TARGET_SO_RCVTIMEO: {
2414 optname = SO_RCVTIMEO;
2417 if (get_user_u32(len, optlen)) {
2418 return -TARGET_EFAULT;
2421 return -TARGET_EINVAL;
2425 ret = get_errno(getsockopt(sockfd, level, optname,
2430 if (len > sizeof(struct target_timeval)) {
2431 len = sizeof(struct target_timeval);
2433 if (copy_to_user_timeval(optval_addr, &tv)) {
2434 return -TARGET_EFAULT;
2436 if (put_user_u32(len, optlen)) {
2437 return -TARGET_EFAULT;
2441 case TARGET_SO_SNDTIMEO:
2442 optname = SO_SNDTIMEO;
2444 case TARGET_SO_PEERCRED: {
2447 struct target_ucred *tcr;
2449 if (get_user_u32(len, optlen)) {
2450 return -TARGET_EFAULT;
2453 return -TARGET_EINVAL;
2457 ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2465 if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2466 return -TARGET_EFAULT;
2468 __put_user(cr.pid, &tcr->pid);
2469 __put_user(cr.uid, &tcr->uid);
2470 __put_user(cr.gid, &tcr->gid);
2471 unlock_user_struct(tcr, optval_addr, 1);
2472 if (put_user_u32(len, optlen)) {
2473 return -TARGET_EFAULT;
2477 case TARGET_SO_PEERSEC: {
2480 if (get_user_u32(len, optlen)) {
2481 return -TARGET_EFAULT;
2484 return -TARGET_EINVAL;
2486 name = lock_user(VERIFY_WRITE, optval_addr, len, 0);
2488 return -TARGET_EFAULT;
2491 ret = get_errno(getsockopt(sockfd, level, SO_PEERSEC,
2493 if (put_user_u32(lv, optlen)) {
2494 ret = -TARGET_EFAULT;
2496 unlock_user(name, optval_addr, lv);
2499 case TARGET_SO_LINGER:
2503 struct target_linger *tlg;
2505 if (get_user_u32(len, optlen)) {
2506 return -TARGET_EFAULT;
2509 return -TARGET_EINVAL;
2513 ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2521 if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2522 return -TARGET_EFAULT;
2524 __put_user(lg.l_onoff, &tlg->l_onoff);
2525 __put_user(lg.l_linger, &tlg->l_linger);
2526 unlock_user_struct(tlg, optval_addr, 1);
2527 if (put_user_u32(len, optlen)) {
2528 return -TARGET_EFAULT;
2532 /* Options with 'int' argument. */
2533 case TARGET_SO_DEBUG:
2536 case TARGET_SO_REUSEADDR:
2537 optname = SO_REUSEADDR;
2540 case TARGET_SO_REUSEPORT:
2541 optname = SO_REUSEPORT;
2544 case TARGET_SO_TYPE:
2547 case TARGET_SO_ERROR:
2550 case TARGET_SO_DONTROUTE:
2551 optname = SO_DONTROUTE;
2553 case TARGET_SO_BROADCAST:
2554 optname = SO_BROADCAST;
2556 case TARGET_SO_SNDBUF:
2557 optname = SO_SNDBUF;
2559 case TARGET_SO_RCVBUF:
2560 optname = SO_RCVBUF;
2562 case TARGET_SO_KEEPALIVE:
2563 optname = SO_KEEPALIVE;
2565 case TARGET_SO_OOBINLINE:
2566 optname = SO_OOBINLINE;
2568 case TARGET_SO_NO_CHECK:
2569 optname = SO_NO_CHECK;
2571 case TARGET_SO_PRIORITY:
2572 optname = SO_PRIORITY;
2575 case TARGET_SO_BSDCOMPAT:
2576 optname = SO_BSDCOMPAT;
2579 case TARGET_SO_PASSCRED:
2580 optname = SO_PASSCRED;
2582 case TARGET_SO_TIMESTAMP:
2583 optname = SO_TIMESTAMP;
2585 case TARGET_SO_RCVLOWAT:
2586 optname = SO_RCVLOWAT;
2588 case TARGET_SO_ACCEPTCONN:
2589 optname = SO_ACCEPTCONN;
2596 /* TCP options all take an 'int' value. */
2598 if (get_user_u32(len, optlen))
2599 return -TARGET_EFAULT;
2601 return -TARGET_EINVAL;
2603 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2606 if (optname == SO_TYPE) {
2607 val = host_to_target_sock_type(val);
2612 if (put_user_u32(val, optval_addr))
2613 return -TARGET_EFAULT;
2615 if (put_user_u8(val, optval_addr))
2616 return -TARGET_EFAULT;
2618 if (put_user_u32(len, optlen))
2619 return -TARGET_EFAULT;
2626 case IP_ROUTER_ALERT:
2630 case IP_MTU_DISCOVER:
2636 case IP_MULTICAST_TTL:
2637 case IP_MULTICAST_LOOP:
2638 if (get_user_u32(len, optlen))
2639 return -TARGET_EFAULT;
2641 return -TARGET_EINVAL;
2643 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2646 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2648 if (put_user_u32(len, optlen)
2649 || put_user_u8(val, optval_addr))
2650 return -TARGET_EFAULT;
2652 if (len > sizeof(int))
2654 if (put_user_u32(len, optlen)
2655 || put_user_u32(val, optval_addr))
2656 return -TARGET_EFAULT;
2660 ret = -TARGET_ENOPROTOOPT;
2666 case IPV6_MTU_DISCOVER:
2669 case IPV6_RECVPKTINFO:
2670 case IPV6_UNICAST_HOPS:
2671 case IPV6_MULTICAST_HOPS:
2672 case IPV6_MULTICAST_LOOP:
2674 case IPV6_RECVHOPLIMIT:
2675 case IPV6_2292HOPLIMIT:
2678 case IPV6_2292PKTINFO:
2679 case IPV6_RECVTCLASS:
2680 case IPV6_RECVRTHDR:
2681 case IPV6_2292RTHDR:
2682 case IPV6_RECVHOPOPTS:
2683 case IPV6_2292HOPOPTS:
2684 case IPV6_RECVDSTOPTS:
2685 case IPV6_2292DSTOPTS:
2687 #ifdef IPV6_RECVPATHMTU
2688 case IPV6_RECVPATHMTU:
2690 #ifdef IPV6_TRANSPARENT
2691 case IPV6_TRANSPARENT:
2693 #ifdef IPV6_FREEBIND
2696 #ifdef IPV6_RECVORIGDSTADDR
2697 case IPV6_RECVORIGDSTADDR:
2699 if (get_user_u32(len, optlen))
2700 return -TARGET_EFAULT;
2702 return -TARGET_EINVAL;
2704 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2707 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2709 if (put_user_u32(len, optlen)
2710 || put_user_u8(val, optval_addr))
2711 return -TARGET_EFAULT;
2713 if (len > sizeof(int))
2715 if (put_user_u32(len, optlen)
2716 || put_user_u32(val, optval_addr))
2717 return -TARGET_EFAULT;
2721 ret = -TARGET_ENOPROTOOPT;
2728 case NETLINK_PKTINFO:
2729 case NETLINK_BROADCAST_ERROR:
2730 case NETLINK_NO_ENOBUFS:
2731 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2732 case NETLINK_LISTEN_ALL_NSID:
2733 case NETLINK_CAP_ACK:
2734 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2735 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2736 case NETLINK_EXT_ACK:
2737 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2738 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2739 case NETLINK_GET_STRICT_CHK:
2740 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2741 if (get_user_u32(len, optlen)) {
2742 return -TARGET_EFAULT;
2744 if (len != sizeof(val)) {
2745 return -TARGET_EINVAL;
2748 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2752 if (put_user_u32(lv, optlen)
2753 || put_user_u32(val, optval_addr)) {
2754 return -TARGET_EFAULT;
2757 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2758 case NETLINK_LIST_MEMBERSHIPS:
2762 if (get_user_u32(len, optlen)) {
2763 return -TARGET_EFAULT;
2766 return -TARGET_EINVAL;
2768 results = lock_user(VERIFY_WRITE, optval_addr, len, 1);
2770 return -TARGET_EFAULT;
2773 ret = get_errno(getsockopt(sockfd, level, optname, results, &lv));
2775 unlock_user(results, optval_addr, 0);
2778 /* swap host endianess to target endianess. */
2779 for (i = 0; i < (len / sizeof(uint32_t)); i++) {
2780 results[i] = tswap32(results[i]);
2782 if (put_user_u32(lv, optlen)) {
2783 return -TARGET_EFAULT;
2785 unlock_user(results, optval_addr, 0);
2788 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2793 #endif /* SOL_NETLINK */
2796 qemu_log_mask(LOG_UNIMP,
2797 "getsockopt level=%d optname=%d not yet supported\n",
2799 ret = -TARGET_EOPNOTSUPP;
2805 /* Convert target low/high pair representing file offset into the host
2806 * low/high pair. This function doesn't handle offsets bigger than 64 bits
2807 * as the kernel doesn't handle them either.
2809 static void target_to_host_low_high(abi_ulong tlow,
2811 unsigned long *hlow,
2812 unsigned long *hhigh)
2814 uint64_t off = tlow |
2815 ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
2816 TARGET_LONG_BITS / 2;
2819 *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
2822 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
2823 abi_ulong count, int copy)
2825 struct target_iovec *target_vec;
2827 abi_ulong total_len, max_len;
2830 bool bad_address = false;
2836 if (count > IOV_MAX) {
2841 vec = g_try_new0(struct iovec, count);
2847 target_vec = lock_user(VERIFY_READ, target_addr,
2848 count * sizeof(struct target_iovec), 1);
2849 if (target_vec == NULL) {
2854 /* ??? If host page size > target page size, this will result in a
2855 value larger than what we can actually support. */
2856 max_len = 0x7fffffff & TARGET_PAGE_MASK;
2859 for (i = 0; i < count; i++) {
2860 abi_ulong base = tswapal(target_vec[i].iov_base);
2861 abi_long len = tswapal(target_vec[i].iov_len);
2866 } else if (len == 0) {
2867 /* Zero length pointer is ignored. */
2868 vec[i].iov_base = 0;
2870 vec[i].iov_base = lock_user(type, base, len, copy);
2871 /* If the first buffer pointer is bad, this is a fault. But
2872 * subsequent bad buffers will result in a partial write; this
2873 * is realized by filling the vector with null pointers and
2875 if (!vec[i].iov_base) {
2886 if (len > max_len - total_len) {
2887 len = max_len - total_len;
2890 vec[i].iov_len = len;
2894 unlock_user(target_vec, target_addr, 0);
2899 if (tswapal(target_vec[i].iov_len) > 0) {
2900 unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
2903 unlock_user(target_vec, target_addr, 0);
2910 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
2911 abi_ulong count, int copy)
2913 struct target_iovec *target_vec;
2916 target_vec = lock_user(VERIFY_READ, target_addr,
2917 count * sizeof(struct target_iovec), 1);
2919 for (i = 0; i < count; i++) {
2920 abi_ulong base = tswapal(target_vec[i].iov_base);
2921 abi_long len = tswapal(target_vec[i].iov_len);
2925 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
2927 unlock_user(target_vec, target_addr, 0);
2933 static inline int target_to_host_sock_type(int *type)
2936 int target_type = *type;
2938 switch (target_type & TARGET_SOCK_TYPE_MASK) {
2939 case TARGET_SOCK_DGRAM:
2940 host_type = SOCK_DGRAM;
2942 case TARGET_SOCK_STREAM:
2943 host_type = SOCK_STREAM;
2946 host_type = target_type & TARGET_SOCK_TYPE_MASK;
2949 if (target_type & TARGET_SOCK_CLOEXEC) {
2950 #if defined(SOCK_CLOEXEC)
2951 host_type |= SOCK_CLOEXEC;
2953 return -TARGET_EINVAL;
2956 if (target_type & TARGET_SOCK_NONBLOCK) {
2957 #if defined(SOCK_NONBLOCK)
2958 host_type |= SOCK_NONBLOCK;
2959 #elif !defined(O_NONBLOCK)
2960 return -TARGET_EINVAL;
2967 /* Try to emulate socket type flags after socket creation. */
2968 static int sock_flags_fixup(int fd, int target_type)
2970 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
2971 if (target_type & TARGET_SOCK_NONBLOCK) {
2972 int flags = fcntl(fd, F_GETFL);
2973 if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
2975 return -TARGET_EINVAL;
2982 /* do_socket() Must return target values and target errnos. */
2983 static abi_long do_socket(int domain, int type, int protocol)
2985 int target_type = type;
2988 ret = target_to_host_sock_type(&type);
2993 if (domain == PF_NETLINK && !(
2994 #ifdef CONFIG_RTNETLINK
2995 protocol == NETLINK_ROUTE ||
2997 protocol == NETLINK_KOBJECT_UEVENT ||
2998 protocol == NETLINK_AUDIT)) {
2999 return -TARGET_EPROTONOSUPPORT;
3002 if (domain == AF_PACKET ||
3003 (domain == AF_INET && type == SOCK_PACKET)) {
3004 protocol = tswap16(protocol);
3007 ret = get_errno(socket(domain, type, protocol));
3009 ret = sock_flags_fixup(ret, target_type);
3010 if (type == SOCK_PACKET) {
3011 /* Manage an obsolete case :
3012 * if socket type is SOCK_PACKET, bind by name
3014 fd_trans_register(ret, &target_packet_trans);
3015 } else if (domain == PF_NETLINK) {
3017 #ifdef CONFIG_RTNETLINK
3019 fd_trans_register(ret, &target_netlink_route_trans);
3022 case NETLINK_KOBJECT_UEVENT:
3023 /* nothing to do: messages are strings */
3026 fd_trans_register(ret, &target_netlink_audit_trans);
3029 g_assert_not_reached();
3036 /* do_bind() Must return target values and target errnos. */
3037 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3043 if ((int)addrlen < 0) {
3044 return -TARGET_EINVAL;
3047 addr = alloca(addrlen+1);
3049 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3053 return get_errno(bind(sockfd, addr, addrlen));
3056 /* do_connect() Must return target values and target errnos. */
3057 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3063 if ((int)addrlen < 0) {
3064 return -TARGET_EINVAL;
3067 addr = alloca(addrlen+1);
3069 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3073 return get_errno(safe_connect(sockfd, addr, addrlen));
3076 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3077 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3078 int flags, int send)
3084 abi_ulong target_vec;
3086 if (msgp->msg_name) {
3087 msg.msg_namelen = tswap32(msgp->msg_namelen);
3088 msg.msg_name = alloca(msg.msg_namelen+1);
3089 ret = target_to_host_sockaddr(fd, msg.msg_name,
3090 tswapal(msgp->msg_name),
3092 if (ret == -TARGET_EFAULT) {
3093 /* For connected sockets msg_name and msg_namelen must
3094 * be ignored, so returning EFAULT immediately is wrong.
3095 * Instead, pass a bad msg_name to the host kernel, and
3096 * let it decide whether to return EFAULT or not.
3098 msg.msg_name = (void *)-1;
3103 msg.msg_name = NULL;
3104 msg.msg_namelen = 0;
3106 msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3107 msg.msg_control = alloca(msg.msg_controllen);
3108 memset(msg.msg_control, 0, msg.msg_controllen);
3110 msg.msg_flags = tswap32(msgp->msg_flags);
3112 count = tswapal(msgp->msg_iovlen);
3113 target_vec = tswapal(msgp->msg_iov);
3115 if (count > IOV_MAX) {
3116 /* sendrcvmsg returns a different errno for this condition than
3117 * readv/writev, so we must catch it here before lock_iovec() does.
3119 ret = -TARGET_EMSGSIZE;
3123 vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3124 target_vec, count, send);
3126 ret = -host_to_target_errno(errno);
3129 msg.msg_iovlen = count;
3133 if (fd_trans_target_to_host_data(fd)) {
3136 host_msg = g_malloc(msg.msg_iov->iov_len);
3137 memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3138 ret = fd_trans_target_to_host_data(fd)(host_msg,
3139 msg.msg_iov->iov_len);
3141 msg.msg_iov->iov_base = host_msg;
3142 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3146 ret = target_to_host_cmsg(&msg, msgp);
3148 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3152 ret = get_errno(safe_recvmsg(fd, &msg, flags));
3153 if (!is_error(ret)) {
3155 if (fd_trans_host_to_target_data(fd)) {
3156 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3157 MIN(msg.msg_iov->iov_len, len));
3159 ret = host_to_target_cmsg(msgp, &msg);
3161 if (!is_error(ret)) {
3162 msgp->msg_namelen = tswap32(msg.msg_namelen);
3163 msgp->msg_flags = tswap32(msg.msg_flags);
3164 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3165 ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3166 msg.msg_name, msg.msg_namelen);
3178 unlock_iovec(vec, target_vec, count, !send);
3183 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3184 int flags, int send)
3187 struct target_msghdr *msgp;
3189 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3193 return -TARGET_EFAULT;
3195 ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3196 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3200 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3201 * so it might not have this *mmsg-specific flag either.
3203 #ifndef MSG_WAITFORONE
3204 #define MSG_WAITFORONE 0x10000
3207 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3208 unsigned int vlen, unsigned int flags,
3211 struct target_mmsghdr *mmsgp;
3215 if (vlen > UIO_MAXIOV) {
3219 mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3221 return -TARGET_EFAULT;
3224 for (i = 0; i < vlen; i++) {
3225 ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3226 if (is_error(ret)) {
3229 mmsgp[i].msg_len = tswap32(ret);
3230 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3231 if (flags & MSG_WAITFORONE) {
3232 flags |= MSG_DONTWAIT;
3236 unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3238 /* Return number of datagrams sent if we sent any at all;
3239 * otherwise return the error.
3247 /* do_accept4() Must return target values and target errnos. */
3248 static abi_long do_accept4(int fd, abi_ulong target_addr,
3249 abi_ulong target_addrlen_addr, int flags)
3251 socklen_t addrlen, ret_addrlen;
3256 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
3258 if (target_addr == 0) {
3259 return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3262 /* linux returns EINVAL if addrlen pointer is invalid */
3263 if (get_user_u32(addrlen, target_addrlen_addr))
3264 return -TARGET_EINVAL;
3266 if ((int)addrlen < 0) {
3267 return -TARGET_EINVAL;
3270 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3271 return -TARGET_EINVAL;
3273 addr = alloca(addrlen);
3275 ret_addrlen = addrlen;
3276 ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
3277 if (!is_error(ret)) {
3278 host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3279 if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3280 ret = -TARGET_EFAULT;
3286 /* do_getpeername() Must return target values and target errnos. */
3287 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3288 abi_ulong target_addrlen_addr)
3290 socklen_t addrlen, ret_addrlen;
3294 if (get_user_u32(addrlen, target_addrlen_addr))
3295 return -TARGET_EFAULT;
3297 if ((int)addrlen < 0) {
3298 return -TARGET_EINVAL;
3301 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3302 return -TARGET_EFAULT;
3304 addr = alloca(addrlen);
3306 ret_addrlen = addrlen;
3307 ret = get_errno(getpeername(fd, addr, &ret_addrlen));
3308 if (!is_error(ret)) {
3309 host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3310 if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3311 ret = -TARGET_EFAULT;
3317 /* do_getsockname() Must return target values and target errnos. */
3318 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3319 abi_ulong target_addrlen_addr)
3321 socklen_t addrlen, ret_addrlen;
3325 if (get_user_u32(addrlen, target_addrlen_addr))
3326 return -TARGET_EFAULT;
3328 if ((int)addrlen < 0) {
3329 return -TARGET_EINVAL;
3332 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3333 return -TARGET_EFAULT;
3335 addr = alloca(addrlen);
3337 ret_addrlen = addrlen;
3338 ret = get_errno(getsockname(fd, addr, &ret_addrlen));
3339 if (!is_error(ret)) {
3340 host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3341 if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3342 ret = -TARGET_EFAULT;
3348 /* do_socketpair() Must return target values and target errnos. */
3349 static abi_long do_socketpair(int domain, int type, int protocol,
3350 abi_ulong target_tab_addr)
3355 target_to_host_sock_type(&type);
3357 ret = get_errno(socketpair(domain, type, protocol, tab));
3358 if (!is_error(ret)) {
3359 if (put_user_s32(tab[0], target_tab_addr)
3360 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3361 ret = -TARGET_EFAULT;
3366 /* do_sendto() Must return target values and target errnos. */
3367 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3368 abi_ulong target_addr, socklen_t addrlen)
3372 void *copy_msg = NULL;
3375 if ((int)addrlen < 0) {
3376 return -TARGET_EINVAL;
3379 host_msg = lock_user(VERIFY_READ, msg, len, 1);
3381 return -TARGET_EFAULT;
3382 if (fd_trans_target_to_host_data(fd)) {
3383 copy_msg = host_msg;
3384 host_msg = g_malloc(len);
3385 memcpy(host_msg, copy_msg, len);
3386 ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3392 addr = alloca(addrlen+1);
3393 ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3397 ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3399 ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3404 host_msg = copy_msg;
3406 unlock_user(host_msg, msg, 0);
3410 /* do_recvfrom() Must return target values and target errnos. */
3411 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3412 abi_ulong target_addr,
3413 abi_ulong target_addrlen)
3415 socklen_t addrlen, ret_addrlen;
3420 host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3422 return -TARGET_EFAULT;
3424 if (get_user_u32(addrlen, target_addrlen)) {
3425 ret = -TARGET_EFAULT;
3428 if ((int)addrlen < 0) {
3429 ret = -TARGET_EINVAL;
3432 addr = alloca(addrlen);
3433 ret_addrlen = addrlen;
3434 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3435 addr, &ret_addrlen));
3437 addr = NULL; /* To keep compiler quiet. */
3438 addrlen = 0; /* To keep compiler quiet. */
3439 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3441 if (!is_error(ret)) {
3442 if (fd_trans_host_to_target_data(fd)) {
3444 trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3445 if (is_error(trans)) {
3451 host_to_target_sockaddr(target_addr, addr,
3452 MIN(addrlen, ret_addrlen));
3453 if (put_user_u32(ret_addrlen, target_addrlen)) {
3454 ret = -TARGET_EFAULT;
3458 unlock_user(host_msg, msg, len);
3461 unlock_user(host_msg, msg, 0);
3466 #ifdef TARGET_NR_socketcall
3467 /* do_socketcall() must return target values and target errnos. */
3468 static abi_long do_socketcall(int num, abi_ulong vptr)
3470 static const unsigned nargs[] = { /* number of arguments per operation */
3471 [TARGET_SYS_SOCKET] = 3, /* domain, type, protocol */
3472 [TARGET_SYS_BIND] = 3, /* fd, addr, addrlen */
3473 [TARGET_SYS_CONNECT] = 3, /* fd, addr, addrlen */
3474 [TARGET_SYS_LISTEN] = 2, /* fd, backlog */
3475 [TARGET_SYS_ACCEPT] = 3, /* fd, addr, addrlen */
3476 [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3477 [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3478 [TARGET_SYS_SOCKETPAIR] = 4, /* domain, type, protocol, tab */
3479 [TARGET_SYS_SEND] = 4, /* fd, msg, len, flags */
3480 [TARGET_SYS_RECV] = 4, /* fd, msg, len, flags */
3481 [TARGET_SYS_SENDTO] = 6, /* fd, msg, len, flags, addr, addrlen */
3482 [TARGET_SYS_RECVFROM] = 6, /* fd, msg, len, flags, addr, addrlen */
3483 [TARGET_SYS_SHUTDOWN] = 2, /* fd, how */
3484 [TARGET_SYS_SETSOCKOPT] = 5, /* fd, level, optname, optval, optlen */
3485 [TARGET_SYS_GETSOCKOPT] = 5, /* fd, level, optname, optval, optlen */
3486 [TARGET_SYS_SENDMSG] = 3, /* fd, msg, flags */
3487 [TARGET_SYS_RECVMSG] = 3, /* fd, msg, flags */
3488 [TARGET_SYS_ACCEPT4] = 4, /* fd, addr, addrlen, flags */
3489 [TARGET_SYS_RECVMMSG] = 4, /* fd, msgvec, vlen, flags */
3490 [TARGET_SYS_SENDMMSG] = 4, /* fd, msgvec, vlen, flags */
3492 abi_long a[6]; /* max 6 args */
3495 /* check the range of the first argument num */
3496 /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3497 if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3498 return -TARGET_EINVAL;
3500 /* ensure we have space for args */
3501 if (nargs[num] > ARRAY_SIZE(a)) {
3502 return -TARGET_EINVAL;
3504 /* collect the arguments in a[] according to nargs[] */
3505 for (i = 0; i < nargs[num]; ++i) {
3506 if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3507 return -TARGET_EFAULT;
3510 /* now when we have the args, invoke the appropriate underlying function */
3512 case TARGET_SYS_SOCKET: /* domain, type, protocol */
3513 return do_socket(a[0], a[1], a[2]);
3514 case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3515 return do_bind(a[0], a[1], a[2]);
3516 case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3517 return do_connect(a[0], a[1], a[2]);
3518 case TARGET_SYS_LISTEN: /* sockfd, backlog */
3519 return get_errno(listen(a[0], a[1]));
3520 case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3521 return do_accept4(a[0], a[1], a[2], 0);
3522 case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3523 return do_getsockname(a[0], a[1], a[2]);
3524 case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3525 return do_getpeername(a[0], a[1], a[2]);
3526 case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3527 return do_socketpair(a[0], a[1], a[2], a[3]);
3528 case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3529 return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3530 case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3531 return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3532 case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3533 return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3534 case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3535 return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3536 case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3537 return get_errno(shutdown(a[0], a[1]));
3538 case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3539 return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3540 case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3541 return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3542 case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3543 return do_sendrecvmsg(a[0], a[1], a[2], 1);
3544 case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3545 return do_sendrecvmsg(a[0], a[1], a[2], 0);
3546 case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3547 return do_accept4(a[0], a[1], a[2], a[3]);
3548 case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3549 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3550 case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3551 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3553 qemu_log_mask(LOG_UNIMP, "Unsupported socketcall: %d\n", num);
3554 return -TARGET_EINVAL;
3559 #define N_SHM_REGIONS 32
3561 static struct shm_region {
3565 } shm_regions[N_SHM_REGIONS];
3567 #ifndef TARGET_SEMID64_DS
3568 /* asm-generic version of this struct */
3569 struct target_semid64_ds
3571 struct target_ipc_perm sem_perm;
3572 abi_ulong sem_otime;
3573 #if TARGET_ABI_BITS == 32
3574 abi_ulong __unused1;
3576 abi_ulong sem_ctime;
3577 #if TARGET_ABI_BITS == 32
3578 abi_ulong __unused2;
3580 abi_ulong sem_nsems;
3581 abi_ulong __unused3;
3582 abi_ulong __unused4;
3586 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3587 abi_ulong target_addr)
3589 struct target_ipc_perm *target_ip;
3590 struct target_semid64_ds *target_sd;
3592 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3593 return -TARGET_EFAULT;
3594 target_ip = &(target_sd->sem_perm);
3595 host_ip->__key = tswap32(target_ip->__key);
3596 host_ip->uid = tswap32(target_ip->uid);
3597 host_ip->gid = tswap32(target_ip->gid);
3598 host_ip->cuid = tswap32(target_ip->cuid);
3599 host_ip->cgid = tswap32(target_ip->cgid);
3600 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3601 host_ip->mode = tswap32(target_ip->mode);
3603 host_ip->mode = tswap16(target_ip->mode);
3605 #if defined(TARGET_PPC)
3606 host_ip->__seq = tswap32(target_ip->__seq);
3608 host_ip->__seq = tswap16(target_ip->__seq);
3610 unlock_user_struct(target_sd, target_addr, 0);
3614 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3615 struct ipc_perm *host_ip)
3617 struct target_ipc_perm *target_ip;
3618 struct target_semid64_ds *target_sd;
3620 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3621 return -TARGET_EFAULT;
3622 target_ip = &(target_sd->sem_perm);
3623 target_ip->__key = tswap32(host_ip->__key);
3624 target_ip->uid = tswap32(host_ip->uid);
3625 target_ip->gid = tswap32(host_ip->gid);
3626 target_ip->cuid = tswap32(host_ip->cuid);
3627 target_ip->cgid = tswap32(host_ip->cgid);
3628 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3629 target_ip->mode = tswap32(host_ip->mode);
3631 target_ip->mode = tswap16(host_ip->mode);
3633 #if defined(TARGET_PPC)
3634 target_ip->__seq = tswap32(host_ip->__seq);
3636 target_ip->__seq = tswap16(host_ip->__seq);
3638 unlock_user_struct(target_sd, target_addr, 1);
3642 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3643 abi_ulong target_addr)
3645 struct target_semid64_ds *target_sd;
3647 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3648 return -TARGET_EFAULT;
3649 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3650 return -TARGET_EFAULT;
3651 host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3652 host_sd->sem_otime = tswapal(target_sd->sem_otime);
3653 host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3654 unlock_user_struct(target_sd, target_addr, 0);
3658 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3659 struct semid_ds *host_sd)
3661 struct target_semid64_ds *target_sd;
3663 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3664 return -TARGET_EFAULT;
3665 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3666 return -TARGET_EFAULT;
3667 target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3668 target_sd->sem_otime = tswapal(host_sd->sem_otime);
3669 target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3670 unlock_user_struct(target_sd, target_addr, 1);
3674 struct target_seminfo {
3687 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3688 struct seminfo *host_seminfo)
3690 struct target_seminfo *target_seminfo;
3691 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3692 return -TARGET_EFAULT;
3693 __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3694 __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3695 __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3696 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3697 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3698 __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3699 __put_user(host_seminfo->semume, &target_seminfo->semume);
3700 __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3701 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3702 __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3703 unlock_user_struct(target_seminfo, target_addr, 1);
3709 struct semid_ds *buf;
3710 unsigned short *array;
3711 struct seminfo *__buf;
3714 union target_semun {
3721 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3722 abi_ulong target_addr)
3725 unsigned short *array;
3727 struct semid_ds semid_ds;
3730 semun.buf = &semid_ds;
3732 ret = semctl(semid, 0, IPC_STAT, semun);
3734 return get_errno(ret);
3736 nsems = semid_ds.sem_nsems;
3738 *host_array = g_try_new(unsigned short, nsems);
3740 return -TARGET_ENOMEM;
3742 array = lock_user(VERIFY_READ, target_addr,
3743 nsems*sizeof(unsigned short), 1);
3745 g_free(*host_array);
3746 return -TARGET_EFAULT;
3749 for(i=0; i<nsems; i++) {
3750 __get_user((*host_array)[i], &array[i]);
3752 unlock_user(array, target_addr, 0);
3757 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3758 unsigned short **host_array)
3761 unsigned short *array;
3763 struct semid_ds semid_ds;
3766 semun.buf = &semid_ds;
3768 ret = semctl(semid, 0, IPC_STAT, semun);
3770 return get_errno(ret);
3772 nsems = semid_ds.sem_nsems;
3774 array = lock_user(VERIFY_WRITE, target_addr,
3775 nsems*sizeof(unsigned short), 0);
3777 return -TARGET_EFAULT;
3779 for(i=0; i<nsems; i++) {
3780 __put_user((*host_array)[i], &array[i]);
3782 g_free(*host_array);
3783 unlock_user(array, target_addr, 1);
3788 static inline abi_long do_semctl(int semid, int semnum, int cmd,
3789 abi_ulong target_arg)
3791 union target_semun target_su = { .buf = target_arg };
3793 struct semid_ds dsarg;
3794 unsigned short *array = NULL;
3795 struct seminfo seminfo;
3796 abi_long ret = -TARGET_EINVAL;
3803 /* In 64 bit cross-endian situations, we will erroneously pick up
3804 * the wrong half of the union for the "val" element. To rectify
3805 * this, the entire 8-byte structure is byteswapped, followed by
3806 * a swap of the 4 byte val field. In other cases, the data is
3807 * already in proper host byte order. */
3808 if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
3809 target_su.buf = tswapal(target_su.buf);
3810 arg.val = tswap32(target_su.val);
3812 arg.val = target_su.val;
3814 ret = get_errno(semctl(semid, semnum, cmd, arg));
3818 err = target_to_host_semarray(semid, &array, target_su.array);
3822 ret = get_errno(semctl(semid, semnum, cmd, arg));
3823 err = host_to_target_semarray(semid, target_su.array, &array);
3830 err = target_to_host_semid_ds(&dsarg, target_su.buf);
3834 ret = get_errno(semctl(semid, semnum, cmd, arg));
3835 err = host_to_target_semid_ds(target_su.buf, &dsarg);
3841 arg.__buf = &seminfo;
3842 ret = get_errno(semctl(semid, semnum, cmd, arg));
3843 err = host_to_target_seminfo(target_su.__buf, &seminfo);
3851 ret = get_errno(semctl(semid, semnum, cmd, NULL));
3858 struct target_sembuf {
3859 unsigned short sem_num;
3864 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
3865 abi_ulong target_addr,
3868 struct target_sembuf *target_sembuf;
3871 target_sembuf = lock_user(VERIFY_READ, target_addr,
3872 nsops*sizeof(struct target_sembuf), 1);
3874 return -TARGET_EFAULT;
3876 for(i=0; i<nsops; i++) {
3877 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
3878 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
3879 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
3882 unlock_user(target_sembuf, target_addr, 0);
3887 #if defined(TARGET_NR_ipc) || defined(TARGET_NR_semop) || \
3888 defined(TARGET_NR_semtimedop) || defined(TARGET_NR_semtimedop_time64)
3891 * This macro is required to handle the s390 variants, which passes the
3892 * arguments in a different order than default.
3895 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
3896 (__nsops), (__timeout), (__sops)
3898 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
3899 (__nsops), 0, (__sops), (__timeout)
3902 static inline abi_long do_semtimedop(int semid,
3905 abi_long timeout, bool time64)
3907 struct sembuf *sops;
3908 struct timespec ts, *pts = NULL;
3914 if (target_to_host_timespec64(pts, timeout)) {
3915 return -TARGET_EFAULT;
3918 if (target_to_host_timespec(pts, timeout)) {
3919 return -TARGET_EFAULT;
3924 if (nsops > TARGET_SEMOPM) {
3925 return -TARGET_E2BIG;
3928 sops = g_new(struct sembuf, nsops);
3930 if (target_to_host_sembuf(sops, ptr, nsops)) {
3932 return -TARGET_EFAULT;
3935 ret = -TARGET_ENOSYS;
3936 #ifdef __NR_semtimedop
3937 ret = get_errno(safe_semtimedop(semid, sops, nsops, pts));
3940 if (ret == -TARGET_ENOSYS) {
3941 ret = get_errno(safe_ipc(IPCOP_semtimedop, semid,
3942 SEMTIMEDOP_IPC_ARGS(nsops, sops, (long)pts)));
3950 struct target_msqid_ds
3952 struct target_ipc_perm msg_perm;
3953 abi_ulong msg_stime;
3954 #if TARGET_ABI_BITS == 32
3955 abi_ulong __unused1;
3957 abi_ulong msg_rtime;
3958 #if TARGET_ABI_BITS == 32
3959 abi_ulong __unused2;
3961 abi_ulong msg_ctime;
3962 #if TARGET_ABI_BITS == 32
3963 abi_ulong __unused3;
3965 abi_ulong __msg_cbytes;
3967 abi_ulong msg_qbytes;
3968 abi_ulong msg_lspid;
3969 abi_ulong msg_lrpid;
3970 abi_ulong __unused4;
3971 abi_ulong __unused5;
3974 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
3975 abi_ulong target_addr)
3977 struct target_msqid_ds *target_md;
3979 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
3980 return -TARGET_EFAULT;
3981 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
3982 return -TARGET_EFAULT;
3983 host_md->msg_stime = tswapal(target_md->msg_stime);
3984 host_md->msg_rtime = tswapal(target_md->msg_rtime);
3985 host_md->msg_ctime = tswapal(target_md->msg_ctime);
3986 host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
3987 host_md->msg_qnum = tswapal(target_md->msg_qnum);
3988 host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
3989 host_md->msg_lspid = tswapal(target_md->msg_lspid);
3990 host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
3991 unlock_user_struct(target_md, target_addr, 0);
3995 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
3996 struct msqid_ds *host_md)
3998 struct target_msqid_ds *target_md;
4000 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
4001 return -TARGET_EFAULT;
4002 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
4003 return -TARGET_EFAULT;
4004 target_md->msg_stime = tswapal(host_md->msg_stime);
4005 target_md->msg_rtime = tswapal(host_md->msg_rtime);
4006 target_md->msg_ctime = tswapal(host_md->msg_ctime);
4007 target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
4008 target_md->msg_qnum = tswapal(host_md->msg_qnum);
4009 target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
4010 target_md->msg_lspid = tswapal(host_md->msg_lspid);
4011 target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
4012 unlock_user_struct(target_md, target_addr, 1);
4016 struct target_msginfo {
4024 unsigned short int msgseg;
4027 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
4028 struct msginfo *host_msginfo)
4030 struct target_msginfo *target_msginfo;
4031 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
4032 return -TARGET_EFAULT;
4033 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
4034 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
4035 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
4036 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
4037 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
4038 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
4039 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4040 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4041 unlock_user_struct(target_msginfo, target_addr, 1);
4045 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4047 struct msqid_ds dsarg;
4048 struct msginfo msginfo;
4049 abi_long ret = -TARGET_EINVAL;
4057 if (target_to_host_msqid_ds(&dsarg,ptr))
4058 return -TARGET_EFAULT;
4059 ret = get_errno(msgctl(msgid, cmd, &dsarg));
4060 if (host_to_target_msqid_ds(ptr,&dsarg))
4061 return -TARGET_EFAULT;
4064 ret = get_errno(msgctl(msgid, cmd, NULL));
4068 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4069 if (host_to_target_msginfo(ptr, &msginfo))
4070 return -TARGET_EFAULT;
4077 struct target_msgbuf {
4082 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4083 ssize_t msgsz, int msgflg)
4085 struct target_msgbuf *target_mb;
4086 struct msgbuf *host_mb;
4090 return -TARGET_EINVAL;
4093 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4094 return -TARGET_EFAULT;
4095 host_mb = g_try_malloc(msgsz + sizeof(long));
4097 unlock_user_struct(target_mb, msgp, 0);
4098 return -TARGET_ENOMEM;
4100 host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4101 memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4102 ret = -TARGET_ENOSYS;
4104 ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4107 if (ret == -TARGET_ENOSYS) {
4109 ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4112 ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4118 unlock_user_struct(target_mb, msgp, 0);
4124 #if defined(__sparc__)
4125 /* SPARC for msgrcv it does not use the kludge on final 2 arguments. */
4126 #define MSGRCV_ARGS(__msgp, __msgtyp) __msgp, __msgtyp
4127 #elif defined(__s390x__)
4128 /* The s390 sys_ipc variant has only five parameters. */
4129 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4130 ((long int[]){(long int)__msgp, __msgtyp})
4132 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4133 ((long int[]){(long int)__msgp, __msgtyp}), 0
4137 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4138 ssize_t msgsz, abi_long msgtyp,
4141 struct target_msgbuf *target_mb;
4143 struct msgbuf *host_mb;
4147 return -TARGET_EINVAL;
4150 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4151 return -TARGET_EFAULT;
4153 host_mb = g_try_malloc(msgsz + sizeof(long));
4155 ret = -TARGET_ENOMEM;
4158 ret = -TARGET_ENOSYS;
4160 ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4163 if (ret == -TARGET_ENOSYS) {
4164 ret = get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv), msqid, msgsz,
4165 msgflg, MSGRCV_ARGS(host_mb, msgtyp)));
4170 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4171 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4172 if (!target_mtext) {
4173 ret = -TARGET_EFAULT;
4176 memcpy(target_mb->mtext, host_mb->mtext, ret);
4177 unlock_user(target_mtext, target_mtext_addr, ret);
4180 target_mb->mtype = tswapal(host_mb->mtype);
4184 unlock_user_struct(target_mb, msgp, 1);
4189 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4190 abi_ulong target_addr)
4192 struct target_shmid_ds *target_sd;
4194 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4195 return -TARGET_EFAULT;
4196 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4197 return -TARGET_EFAULT;
4198 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4199 __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4200 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4201 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4202 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4203 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4204 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4205 unlock_user_struct(target_sd, target_addr, 0);
4209 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4210 struct shmid_ds *host_sd)
4212 struct target_shmid_ds *target_sd;
4214 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4215 return -TARGET_EFAULT;
4216 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4217 return -TARGET_EFAULT;
4218 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4219 __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4220 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4221 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4222 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4223 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4224 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4225 unlock_user_struct(target_sd, target_addr, 1);
4229 struct target_shminfo {
4237 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4238 struct shminfo *host_shminfo)
4240 struct target_shminfo *target_shminfo;
4241 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4242 return -TARGET_EFAULT;
4243 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4244 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4245 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4246 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4247 __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4248 unlock_user_struct(target_shminfo, target_addr, 1);
4252 struct target_shm_info {
4257 abi_ulong swap_attempts;
4258 abi_ulong swap_successes;
4261 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4262 struct shm_info *host_shm_info)
4264 struct target_shm_info *target_shm_info;
4265 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4266 return -TARGET_EFAULT;
4267 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4268 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4269 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4270 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4271 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4272 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4273 unlock_user_struct(target_shm_info, target_addr, 1);
4277 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4279 struct shmid_ds dsarg;
4280 struct shminfo shminfo;
4281 struct shm_info shm_info;
4282 abi_long ret = -TARGET_EINVAL;
4290 if (target_to_host_shmid_ds(&dsarg, buf))
4291 return -TARGET_EFAULT;
4292 ret = get_errno(shmctl(shmid, cmd, &dsarg));
4293 if (host_to_target_shmid_ds(buf, &dsarg))
4294 return -TARGET_EFAULT;
4297 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4298 if (host_to_target_shminfo(buf, &shminfo))
4299 return -TARGET_EFAULT;
4302 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4303 if (host_to_target_shm_info(buf, &shm_info))
4304 return -TARGET_EFAULT;
4309 ret = get_errno(shmctl(shmid, cmd, NULL));
4316 #ifndef TARGET_FORCE_SHMLBA
4317 /* For most architectures, SHMLBA is the same as the page size;
4318 * some architectures have larger values, in which case they should
4319 * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4320 * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4321 * and defining its own value for SHMLBA.
4323 * The kernel also permits SHMLBA to be set by the architecture to a
4324 * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4325 * this means that addresses are rounded to the large size if
4326 * SHM_RND is set but addresses not aligned to that size are not rejected
4327 * as long as they are at least page-aligned. Since the only architecture
4328 * which uses this is ia64 this code doesn't provide for that oddity.
4330 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
4332 return TARGET_PAGE_SIZE;
4336 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
4337 int shmid, abi_ulong shmaddr, int shmflg)
4341 struct shmid_ds shm_info;
4345 /* find out the length of the shared memory segment */
4346 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4347 if (is_error(ret)) {
4348 /* can't get length, bail out */
4352 shmlba = target_shmlba(cpu_env);
4354 if (shmaddr & (shmlba - 1)) {
4355 if (shmflg & SHM_RND) {
4356 shmaddr &= ~(shmlba - 1);
4358 return -TARGET_EINVAL;
4361 if (!guest_range_valid(shmaddr, shm_info.shm_segsz)) {
4362 return -TARGET_EINVAL;
4368 host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
4370 abi_ulong mmap_start;
4372 /* In order to use the host shmat, we need to honor host SHMLBA. */
4373 mmap_start = mmap_find_vma(0, shm_info.shm_segsz, MAX(SHMLBA, shmlba));
4375 if (mmap_start == -1) {
4377 host_raddr = (void *)-1;
4379 host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
4382 if (host_raddr == (void *)-1) {
4384 return get_errno((long)host_raddr);
4386 raddr=h2g((unsigned long)host_raddr);
4388 page_set_flags(raddr, raddr + shm_info.shm_segsz,
4389 PAGE_VALID | PAGE_READ |
4390 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
4392 for (i = 0; i < N_SHM_REGIONS; i++) {
4393 if (!shm_regions[i].in_use) {
4394 shm_regions[i].in_use = true;
4395 shm_regions[i].start = raddr;
4396 shm_regions[i].size = shm_info.shm_segsz;
4406 static inline abi_long do_shmdt(abi_ulong shmaddr)
4413 for (i = 0; i < N_SHM_REGIONS; ++i) {
4414 if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4415 shm_regions[i].in_use = false;
4416 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
4420 rv = get_errno(shmdt(g2h(shmaddr)));
4427 #ifdef TARGET_NR_ipc
4428 /* ??? This only works with linear mappings. */
4429 /* do_ipc() must return target values and target errnos. */
4430 static abi_long do_ipc(CPUArchState *cpu_env,
4431 unsigned int call, abi_long first,
4432 abi_long second, abi_long third,
4433 abi_long ptr, abi_long fifth)
4438 version = call >> 16;
4443 ret = do_semtimedop(first, ptr, second, 0, false);
4445 case IPCOP_semtimedop:
4447 * The s390 sys_ipc variant has only five parameters instead of six
4448 * (as for default variant) and the only difference is the handling of
4449 * SEMTIMEDOP where on s390 the third parameter is used as a pointer
4450 * to a struct timespec where the generic variant uses fifth parameter.
4452 #if defined(TARGET_S390X)
4453 ret = do_semtimedop(first, ptr, second, third, TARGET_ABI_BITS == 64);
4455 ret = do_semtimedop(first, ptr, second, fifth, TARGET_ABI_BITS == 64);
4460 ret = get_errno(semget(first, second, third));
4463 case IPCOP_semctl: {
4464 /* The semun argument to semctl is passed by value, so dereference the
4467 get_user_ual(atptr, ptr);
4468 ret = do_semctl(first, second, third, atptr);
4473 ret = get_errno(msgget(first, second));
4477 ret = do_msgsnd(first, ptr, second, third);
4481 ret = do_msgctl(first, second, ptr);
4488 struct target_ipc_kludge {
4493 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4494 ret = -TARGET_EFAULT;
4498 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4500 unlock_user_struct(tmp, ptr, 0);
4504 ret = do_msgrcv(first, ptr, second, fifth, third);
4513 raddr = do_shmat(cpu_env, first, ptr, second);
4514 if (is_error(raddr))
4515 return get_errno(raddr);
4516 if (put_user_ual(raddr, third))
4517 return -TARGET_EFAULT;
4521 ret = -TARGET_EINVAL;
4526 ret = do_shmdt(ptr);
4530 /* IPC_* flag values are the same on all linux platforms */
4531 ret = get_errno(shmget(first, second, third));
4534 /* IPC_* and SHM_* command values are the same on all linux platforms */
4536 ret = do_shmctl(first, second, ptr);
4539 qemu_log_mask(LOG_UNIMP, "Unsupported ipc call: %d (version %d)\n",
4541 ret = -TARGET_ENOSYS;
4548 /* kernel structure types definitions */
4550 #define STRUCT(name, ...) STRUCT_ ## name,
4551 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4553 #include "syscall_types.h"
4557 #undef STRUCT_SPECIAL
4559 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
4560 #define STRUCT_SPECIAL(name)
4561 #include "syscall_types.h"
4563 #undef STRUCT_SPECIAL
4565 #define MAX_STRUCT_SIZE 4096
4567 #ifdef CONFIG_FIEMAP
4568 /* So fiemap access checks don't overflow on 32 bit systems.
4569 * This is very slightly smaller than the limit imposed by
4570 * the underlying kernel.
4572 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
4573 / sizeof(struct fiemap_extent))
4575 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4576 int fd, int cmd, abi_long arg)
4578 /* The parameter for this ioctl is a struct fiemap followed
4579 * by an array of struct fiemap_extent whose size is set
4580 * in fiemap->fm_extent_count. The array is filled in by the
4583 int target_size_in, target_size_out;
4585 const argtype *arg_type = ie->arg_type;
4586 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4589 int i, extent_size = thunk_type_size(extent_arg_type, 0);
4593 assert(arg_type[0] == TYPE_PTR);
4594 assert(ie->access == IOC_RW);
4596 target_size_in = thunk_type_size(arg_type, 0);
4597 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4599 return -TARGET_EFAULT;
4601 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4602 unlock_user(argptr, arg, 0);
4603 fm = (struct fiemap *)buf_temp;
4604 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4605 return -TARGET_EINVAL;
4608 outbufsz = sizeof (*fm) +
4609 (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4611 if (outbufsz > MAX_STRUCT_SIZE) {
4612 /* We can't fit all the extents into the fixed size buffer.
4613 * Allocate one that is large enough and use it instead.
4615 fm = g_try_malloc(outbufsz);
4617 return -TARGET_ENOMEM;
4619 memcpy(fm, buf_temp, sizeof(struct fiemap));
4622 ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4623 if (!is_error(ret)) {
4624 target_size_out = target_size_in;
4625 /* An extent_count of 0 means we were only counting the extents
4626 * so there are no structs to copy
4628 if (fm->fm_extent_count != 0) {
4629 target_size_out += fm->fm_mapped_extents * extent_size;
4631 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4633 ret = -TARGET_EFAULT;
4635 /* Convert the struct fiemap */
4636 thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4637 if (fm->fm_extent_count != 0) {
4638 p = argptr + target_size_in;
4639 /* ...and then all the struct fiemap_extents */
4640 for (i = 0; i < fm->fm_mapped_extents; i++) {
4641 thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4646 unlock_user(argptr, arg, target_size_out);
4656 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4657 int fd, int cmd, abi_long arg)
4659 const argtype *arg_type = ie->arg_type;
4663 struct ifconf *host_ifconf;
4665 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4666 int target_ifreq_size;
4671 abi_long target_ifc_buf;
4675 assert(arg_type[0] == TYPE_PTR);
4676 assert(ie->access == IOC_RW);
4679 target_size = thunk_type_size(arg_type, 0);
4681 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4683 return -TARGET_EFAULT;
4684 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4685 unlock_user(argptr, arg, 0);
4687 host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4688 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4689 target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
4691 if (target_ifc_buf != 0) {
4692 target_ifc_len = host_ifconf->ifc_len;
4693 nb_ifreq = target_ifc_len / target_ifreq_size;
4694 host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4696 outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4697 if (outbufsz > MAX_STRUCT_SIZE) {
4699 * We can't fit all the extents into the fixed size buffer.
4700 * Allocate one that is large enough and use it instead.
4702 host_ifconf = malloc(outbufsz);
4704 return -TARGET_ENOMEM;
4706 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4709 host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4711 host_ifconf->ifc_len = host_ifc_len;
4713 host_ifc_buf = NULL;
4715 host_ifconf->ifc_buf = host_ifc_buf;
4717 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4718 if (!is_error(ret)) {
4719 /* convert host ifc_len to target ifc_len */
4721 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4722 target_ifc_len = nb_ifreq * target_ifreq_size;
4723 host_ifconf->ifc_len = target_ifc_len;
4725 /* restore target ifc_buf */
4727 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4729 /* copy struct ifconf to target user */
4731 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4733 return -TARGET_EFAULT;
4734 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4735 unlock_user(argptr, arg, target_size);
4737 if (target_ifc_buf != 0) {
4738 /* copy ifreq[] to target user */
4739 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4740 for (i = 0; i < nb_ifreq ; i++) {
4741 thunk_convert(argptr + i * target_ifreq_size,
4742 host_ifc_buf + i * sizeof(struct ifreq),
4743 ifreq_arg_type, THUNK_TARGET);
4745 unlock_user(argptr, target_ifc_buf, target_ifc_len);
4756 #if defined(CONFIG_USBFS)
4757 #if HOST_LONG_BITS > 64
4758 #error USBDEVFS thunks do not support >64 bit hosts yet.
4761 uint64_t target_urb_adr;
4762 uint64_t target_buf_adr;
4763 char *target_buf_ptr;
4764 struct usbdevfs_urb host_urb;
4767 static GHashTable *usbdevfs_urb_hashtable(void)
4769 static GHashTable *urb_hashtable;
4771 if (!urb_hashtable) {
4772 urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
4774 return urb_hashtable;
4777 static void urb_hashtable_insert(struct live_urb *urb)
4779 GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4780 g_hash_table_insert(urb_hashtable, urb, urb);
4783 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
4785 GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4786 return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
4789 static void urb_hashtable_remove(struct live_urb *urb)
4791 GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4792 g_hash_table_remove(urb_hashtable, urb);
4796 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
4797 int fd, int cmd, abi_long arg)
4799 const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
4800 const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
4801 struct live_urb *lurb;
4805 uintptr_t target_urb_adr;
4808 target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
4810 memset(buf_temp, 0, sizeof(uint64_t));
4811 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4812 if (is_error(ret)) {
4816 memcpy(&hurb, buf_temp, sizeof(uint64_t));
4817 lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
4818 if (!lurb->target_urb_adr) {
4819 return -TARGET_EFAULT;
4821 urb_hashtable_remove(lurb);
4822 unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
4823 lurb->host_urb.buffer_length);
4824 lurb->target_buf_ptr = NULL;
4826 /* restore the guest buffer pointer */
4827 lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
4829 /* update the guest urb struct */
4830 argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
4833 return -TARGET_EFAULT;
4835 thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
4836 unlock_user(argptr, lurb->target_urb_adr, target_size);
4838 target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
4839 /* write back the urb handle */
4840 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4843 return -TARGET_EFAULT;
4846 /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
4847 target_urb_adr = lurb->target_urb_adr;
4848 thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
4849 unlock_user(argptr, arg, target_size);
4856 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
4857 uint8_t *buf_temp __attribute__((unused)),
4858 int fd, int cmd, abi_long arg)
4860 struct live_urb *lurb;
4862 /* map target address back to host URB with metadata. */
4863 lurb = urb_hashtable_lookup(arg);
4865 return -TARGET_EFAULT;
4867 return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4871 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
4872 int fd, int cmd, abi_long arg)
4874 const argtype *arg_type = ie->arg_type;
4879 struct live_urb *lurb;
4882 * each submitted URB needs to map to a unique ID for the
4883 * kernel, and that unique ID needs to be a pointer to
4884 * host memory. hence, we need to malloc for each URB.
4885 * isochronous transfers have a variable length struct.
4888 target_size = thunk_type_size(arg_type, THUNK_TARGET);
4890 /* construct host copy of urb and metadata */
4891 lurb = g_try_malloc0(sizeof(struct live_urb));
4893 return -TARGET_ENOMEM;
4896 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4899 return -TARGET_EFAULT;
4901 thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
4902 unlock_user(argptr, arg, 0);
4904 lurb->target_urb_adr = arg;
4905 lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
4907 /* buffer space used depends on endpoint type so lock the entire buffer */
4908 /* control type urbs should check the buffer contents for true direction */
4909 rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
4910 lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
4911 lurb->host_urb.buffer_length, 1);
4912 if (lurb->target_buf_ptr == NULL) {
4914 return -TARGET_EFAULT;
4917 /* update buffer pointer in host copy */
4918 lurb->host_urb.buffer = lurb->target_buf_ptr;
4920 ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4921 if (is_error(ret)) {
4922 unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
4925 urb_hashtable_insert(lurb);
4930 #endif /* CONFIG_USBFS */
4932 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
4933 int cmd, abi_long arg)
4936 struct dm_ioctl *host_dm;
4937 abi_long guest_data;
4938 uint32_t guest_data_size;
4940 const argtype *arg_type = ie->arg_type;
4942 void *big_buf = NULL;
4946 target_size = thunk_type_size(arg_type, 0);
4947 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4949 ret = -TARGET_EFAULT;
4952 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4953 unlock_user(argptr, arg, 0);
4955 /* buf_temp is too small, so fetch things into a bigger buffer */
4956 big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
4957 memcpy(big_buf, buf_temp, target_size);
4961 guest_data = arg + host_dm->data_start;
4962 if ((guest_data - arg) < 0) {
4963 ret = -TARGET_EINVAL;
4966 guest_data_size = host_dm->data_size - host_dm->data_start;
4967 host_data = (char*)host_dm + host_dm->data_start;
4969 argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
4971 ret = -TARGET_EFAULT;
4975 switch (ie->host_cmd) {
4977 case DM_LIST_DEVICES:
4980 case DM_DEV_SUSPEND:
4983 case DM_TABLE_STATUS:
4984 case DM_TABLE_CLEAR:
4986 case DM_LIST_VERSIONS:
4990 case DM_DEV_SET_GEOMETRY:
4991 /* data contains only strings */
4992 memcpy(host_data, argptr, guest_data_size);
4995 memcpy(host_data, argptr, guest_data_size);
4996 *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
5000 void *gspec = argptr;
5001 void *cur_data = host_data;
5002 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5003 int spec_size = thunk_type_size(arg_type, 0);
5006 for (i = 0; i < host_dm->target_count; i++) {
5007 struct dm_target_spec *spec = cur_data;
5011 thunk_convert(spec, gspec, arg_type, THUNK_HOST);
5012 slen = strlen((char*)gspec + spec_size) + 1;
5014 spec->next = sizeof(*spec) + slen;
5015 strcpy((char*)&spec[1], gspec + spec_size);
5017 cur_data += spec->next;
5022 ret = -TARGET_EINVAL;
5023 unlock_user(argptr, guest_data, 0);
5026 unlock_user(argptr, guest_data, 0);
5028 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5029 if (!is_error(ret)) {
5030 guest_data = arg + host_dm->data_start;
5031 guest_data_size = host_dm->data_size - host_dm->data_start;
5032 argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
5033 switch (ie->host_cmd) {
5038 case DM_DEV_SUSPEND:
5041 case DM_TABLE_CLEAR:
5043 case DM_DEV_SET_GEOMETRY:
5044 /* no return data */
5046 case DM_LIST_DEVICES:
5048 struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
5049 uint32_t remaining_data = guest_data_size;
5050 void *cur_data = argptr;
5051 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
5052 int nl_size = 12; /* can't use thunk_size due to alignment */
5055 uint32_t next = nl->next;
5057 nl->next = nl_size + (strlen(nl->name) + 1);
5059 if (remaining_data < nl->next) {
5060 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5063 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
5064 strcpy(cur_data + nl_size, nl->name);
5065 cur_data += nl->next;
5066 remaining_data -= nl->next;
5070 nl = (void*)nl + next;
5075 case DM_TABLE_STATUS:
5077 struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5078 void *cur_data = argptr;
5079 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5080 int spec_size = thunk_type_size(arg_type, 0);
5083 for (i = 0; i < host_dm->target_count; i++) {
5084 uint32_t next = spec->next;
5085 int slen = strlen((char*)&spec[1]) + 1;
5086 spec->next = (cur_data - argptr) + spec_size + slen;
5087 if (guest_data_size < spec->next) {
5088 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5091 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
5092 strcpy(cur_data + spec_size, (char*)&spec[1]);
5093 cur_data = argptr + spec->next;
5094 spec = (void*)host_dm + host_dm->data_start + next;
5100 void *hdata = (void*)host_dm + host_dm->data_start;
5101 int count = *(uint32_t*)hdata;
5102 uint64_t *hdev = hdata + 8;
5103 uint64_t *gdev = argptr + 8;
5106 *(uint32_t*)argptr = tswap32(count);
5107 for (i = 0; i < count; i++) {
5108 *gdev = tswap64(*hdev);
5114 case DM_LIST_VERSIONS:
5116 struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5117 uint32_t remaining_data = guest_data_size;
5118 void *cur_data = argptr;
5119 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5120 int vers_size = thunk_type_size(arg_type, 0);
5123 uint32_t next = vers->next;
5125 vers->next = vers_size + (strlen(vers->name) + 1);
5127 if (remaining_data < vers->next) {
5128 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5131 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
5132 strcpy(cur_data + vers_size, vers->name);
5133 cur_data += vers->next;
5134 remaining_data -= vers->next;
5138 vers = (void*)vers + next;
5143 unlock_user(argptr, guest_data, 0);
5144 ret = -TARGET_EINVAL;
5147 unlock_user(argptr, guest_data, guest_data_size);
5149 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5151 ret = -TARGET_EFAULT;
5154 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5155 unlock_user(argptr, arg, target_size);
5162 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5163 int cmd, abi_long arg)
5167 const argtype *arg_type = ie->arg_type;
5168 const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5171 struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5172 struct blkpg_partition host_part;
5174 /* Read and convert blkpg */
5176 target_size = thunk_type_size(arg_type, 0);
5177 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5179 ret = -TARGET_EFAULT;
5182 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5183 unlock_user(argptr, arg, 0);
5185 switch (host_blkpg->op) {
5186 case BLKPG_ADD_PARTITION:
5187 case BLKPG_DEL_PARTITION:
5188 /* payload is struct blkpg_partition */
5191 /* Unknown opcode */
5192 ret = -TARGET_EINVAL;
5196 /* Read and convert blkpg->data */
5197 arg = (abi_long)(uintptr_t)host_blkpg->data;
5198 target_size = thunk_type_size(part_arg_type, 0);
5199 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5201 ret = -TARGET_EFAULT;
5204 thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5205 unlock_user(argptr, arg, 0);
5207 /* Swizzle the data pointer to our local copy and call! */
5208 host_blkpg->data = &host_part;
5209 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5215 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5216 int fd, int cmd, abi_long arg)
5218 const argtype *arg_type = ie->arg_type;
5219 const StructEntry *se;
5220 const argtype *field_types;
5221 const int *dst_offsets, *src_offsets;
5224 abi_ulong *target_rt_dev_ptr = NULL;
5225 unsigned long *host_rt_dev_ptr = NULL;
5229 assert(ie->access == IOC_W);
5230 assert(*arg_type == TYPE_PTR);
5232 assert(*arg_type == TYPE_STRUCT);
5233 target_size = thunk_type_size(arg_type, 0);
5234 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5236 return -TARGET_EFAULT;
5239 assert(*arg_type == (int)STRUCT_rtentry);
5240 se = struct_entries + *arg_type++;
5241 assert(se->convert[0] == NULL);
5242 /* convert struct here to be able to catch rt_dev string */
5243 field_types = se->field_types;
5244 dst_offsets = se->field_offsets[THUNK_HOST];
5245 src_offsets = se->field_offsets[THUNK_TARGET];
5246 for (i = 0; i < se->nb_fields; i++) {
5247 if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5248 assert(*field_types == TYPE_PTRVOID);
5249 target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
5250 host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5251 if (*target_rt_dev_ptr != 0) {
5252 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5253 tswapal(*target_rt_dev_ptr));
5254 if (!*host_rt_dev_ptr) {
5255 unlock_user(argptr, arg, 0);
5256 return -TARGET_EFAULT;
5259 *host_rt_dev_ptr = 0;
5264 field_types = thunk_convert(buf_temp + dst_offsets[i],
5265 argptr + src_offsets[i],
5266 field_types, THUNK_HOST);
5268 unlock_user(argptr, arg, 0);
5270 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5272 assert(host_rt_dev_ptr != NULL);
5273 assert(target_rt_dev_ptr != NULL);
5274 if (*host_rt_dev_ptr != 0) {
5275 unlock_user((void *)*host_rt_dev_ptr,
5276 *target_rt_dev_ptr, 0);
5281 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5282 int fd, int cmd, abi_long arg)
5284 int sig = target_to_host_signal(arg);
5285 return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5288 static abi_long do_ioctl_SIOCGSTAMP(const IOCTLEntry *ie, uint8_t *buf_temp,
5289 int fd, int cmd, abi_long arg)
5294 ret = get_errno(safe_ioctl(fd, SIOCGSTAMP, &tv));
5295 if (is_error(ret)) {
5299 if (cmd == (int)TARGET_SIOCGSTAMP_OLD) {
5300 if (copy_to_user_timeval(arg, &tv)) {
5301 return -TARGET_EFAULT;
5304 if (copy_to_user_timeval64(arg, &tv)) {
5305 return -TARGET_EFAULT;
5312 static abi_long do_ioctl_SIOCGSTAMPNS(const IOCTLEntry *ie, uint8_t *buf_temp,
5313 int fd, int cmd, abi_long arg)
5318 ret = get_errno(safe_ioctl(fd, SIOCGSTAMPNS, &ts));
5319 if (is_error(ret)) {
5323 if (cmd == (int)TARGET_SIOCGSTAMPNS_OLD) {
5324 if (host_to_target_timespec(arg, &ts)) {
5325 return -TARGET_EFAULT;
5328 if (host_to_target_timespec64(arg, &ts)) {
5329 return -TARGET_EFAULT;
5337 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5338 int fd, int cmd, abi_long arg)
5340 int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5341 return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5347 static void unlock_drm_version(struct drm_version *host_ver,
5348 struct target_drm_version *target_ver,
5351 unlock_user(host_ver->name, target_ver->name,
5352 copy ? host_ver->name_len : 0);
5353 unlock_user(host_ver->date, target_ver->date,
5354 copy ? host_ver->date_len : 0);
5355 unlock_user(host_ver->desc, target_ver->desc,
5356 copy ? host_ver->desc_len : 0);
5359 static inline abi_long target_to_host_drmversion(struct drm_version *host_ver,
5360 struct target_drm_version *target_ver)
5362 memset(host_ver, 0, sizeof(*host_ver));
5364 __get_user(host_ver->name_len, &target_ver->name_len);
5365 if (host_ver->name_len) {
5366 host_ver->name = lock_user(VERIFY_WRITE, target_ver->name,
5367 target_ver->name_len, 0);
5368 if (!host_ver->name) {
5373 __get_user(host_ver->date_len, &target_ver->date_len);
5374 if (host_ver->date_len) {
5375 host_ver->date = lock_user(VERIFY_WRITE, target_ver->date,
5376 target_ver->date_len, 0);
5377 if (!host_ver->date) {
5382 __get_user(host_ver->desc_len, &target_ver->desc_len);
5383 if (host_ver->desc_len) {
5384 host_ver->desc = lock_user(VERIFY_WRITE, target_ver->desc,
5385 target_ver->desc_len, 0);
5386 if (!host_ver->desc) {
5393 unlock_drm_version(host_ver, target_ver, false);
5397 static inline void host_to_target_drmversion(
5398 struct target_drm_version *target_ver,
5399 struct drm_version *host_ver)
5401 __put_user(host_ver->version_major, &target_ver->version_major);
5402 __put_user(host_ver->version_minor, &target_ver->version_minor);
5403 __put_user(host_ver->version_patchlevel, &target_ver->version_patchlevel);
5404 __put_user(host_ver->name_len, &target_ver->name_len);
5405 __put_user(host_ver->date_len, &target_ver->date_len);
5406 __put_user(host_ver->desc_len, &target_ver->desc_len);
5407 unlock_drm_version(host_ver, target_ver, true);
5410 static abi_long do_ioctl_drm(const IOCTLEntry *ie, uint8_t *buf_temp,
5411 int fd, int cmd, abi_long arg)
5413 struct drm_version *ver;
5414 struct target_drm_version *target_ver;
5417 switch (ie->host_cmd) {
5418 case DRM_IOCTL_VERSION:
5419 if (!lock_user_struct(VERIFY_WRITE, target_ver, arg, 0)) {
5420 return -TARGET_EFAULT;
5422 ver = (struct drm_version *)buf_temp;
5423 ret = target_to_host_drmversion(ver, target_ver);
5424 if (!is_error(ret)) {
5425 ret = get_errno(safe_ioctl(fd, ie->host_cmd, ver));
5426 if (is_error(ret)) {
5427 unlock_drm_version(ver, target_ver, false);
5429 host_to_target_drmversion(target_ver, ver);
5432 unlock_user_struct(target_ver, arg, 0);
5435 return -TARGET_ENOSYS;
5438 static abi_long do_ioctl_drm_i915_getparam(const IOCTLEntry *ie,
5439 struct drm_i915_getparam *gparam,
5440 int fd, abi_long arg)
5444 struct target_drm_i915_getparam *target_gparam;
5446 if (!lock_user_struct(VERIFY_READ, target_gparam, arg, 0)) {
5447 return -TARGET_EFAULT;
5450 __get_user(gparam->param, &target_gparam->param);
5451 gparam->value = &value;
5452 ret = get_errno(safe_ioctl(fd, ie->host_cmd, gparam));
5453 put_user_s32(value, target_gparam->value);
5455 unlock_user_struct(target_gparam, arg, 0);
5459 static abi_long do_ioctl_drm_i915(const IOCTLEntry *ie, uint8_t *buf_temp,
5460 int fd, int cmd, abi_long arg)
5462 switch (ie->host_cmd) {
5463 case DRM_IOCTL_I915_GETPARAM:
5464 return do_ioctl_drm_i915_getparam(ie,
5465 (struct drm_i915_getparam *)buf_temp,
5468 return -TARGET_ENOSYS;
5474 IOCTLEntry ioctl_entries[] = {
5475 #define IOCTL(cmd, access, ...) \
5476 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
5477 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
5478 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
5479 #define IOCTL_IGNORE(cmd) \
5480 { TARGET_ ## cmd, 0, #cmd },
5485 /* ??? Implement proper locking for ioctls. */
5486 /* do_ioctl() Must return target values and target errnos. */
5487 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5489 const IOCTLEntry *ie;
5490 const argtype *arg_type;
5492 uint8_t buf_temp[MAX_STRUCT_SIZE];
5498 if (ie->target_cmd == 0) {
5500 LOG_UNIMP, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5501 return -TARGET_ENOSYS;
5503 if (ie->target_cmd == cmd)
5507 arg_type = ie->arg_type;
5509 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5510 } else if (!ie->host_cmd) {
5511 /* Some architectures define BSD ioctls in their headers
5512 that are not implemented in Linux. */
5513 return -TARGET_ENOSYS;
5516 switch(arg_type[0]) {
5519 ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5525 ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5529 target_size = thunk_type_size(arg_type, 0);
5530 switch(ie->access) {
5532 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5533 if (!is_error(ret)) {
5534 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5536 return -TARGET_EFAULT;
5537 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5538 unlock_user(argptr, arg, target_size);
5542 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5544 return -TARGET_EFAULT;
5545 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5546 unlock_user(argptr, arg, 0);
5547 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5551 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5553 return -TARGET_EFAULT;
5554 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5555 unlock_user(argptr, arg, 0);
5556 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5557 if (!is_error(ret)) {
5558 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5560 return -TARGET_EFAULT;
5561 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5562 unlock_user(argptr, arg, target_size);
5568 qemu_log_mask(LOG_UNIMP,
5569 "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5570 (long)cmd, arg_type[0]);
5571 ret = -TARGET_ENOSYS;
5577 static const bitmask_transtbl iflag_tbl[] = {
5578 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5579 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5580 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5581 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5582 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5583 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5584 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5585 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5586 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5587 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5588 { TARGET_IXON, TARGET_IXON, IXON, IXON },
5589 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5590 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5591 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5592 { TARGET_IUTF8, TARGET_IUTF8, IUTF8, IUTF8},
5596 static const bitmask_transtbl oflag_tbl[] = {
5597 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5598 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5599 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5600 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5601 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5602 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5603 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5604 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5605 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5606 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5607 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5608 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5609 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5610 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5611 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5612 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5613 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5614 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5615 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5616 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5617 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5618 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5619 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5620 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5624 static const bitmask_transtbl cflag_tbl[] = {
5625 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5626 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5627 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5628 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5629 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5630 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5631 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5632 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5633 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5634 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5635 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5636 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5637 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5638 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5639 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5640 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5641 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5642 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5643 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5644 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5645 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5646 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5647 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5648 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5649 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5650 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5651 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5652 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5653 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5654 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5655 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5659 static const bitmask_transtbl lflag_tbl[] = {
5660 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5661 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5662 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5663 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5664 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5665 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5666 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5667 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5668 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5669 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5670 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5671 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5672 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5673 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5674 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5675 { TARGET_EXTPROC, TARGET_EXTPROC, EXTPROC, EXTPROC},
5679 static void target_to_host_termios (void *dst, const void *src)
5681 struct host_termios *host = dst;
5682 const struct target_termios *target = src;
5685 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5687 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5689 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5691 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5692 host->c_line = target->c_line;
5694 memset(host->c_cc, 0, sizeof(host->c_cc));
5695 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5696 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5697 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5698 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5699 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5700 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5701 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5702 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5703 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5704 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5705 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5706 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5707 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5708 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5709 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5710 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5711 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5714 static void host_to_target_termios (void *dst, const void *src)
5716 struct target_termios *target = dst;
5717 const struct host_termios *host = src;
5720 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5722 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5724 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5726 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5727 target->c_line = host->c_line;
5729 memset(target->c_cc, 0, sizeof(target->c_cc));
5730 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5731 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5732 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5733 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5734 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5735 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5736 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5737 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5738 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5739 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5740 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5741 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5742 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5743 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5744 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5745 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5746 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5749 static const StructEntry struct_termios_def = {
5750 .convert = { host_to_target_termios, target_to_host_termios },
5751 .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5752 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5753 .print = print_termios,
5756 static bitmask_transtbl mmap_flags_tbl[] = {
5757 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
5758 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
5759 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5760 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
5761 MAP_ANONYMOUS, MAP_ANONYMOUS },
5762 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
5763 MAP_GROWSDOWN, MAP_GROWSDOWN },
5764 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
5765 MAP_DENYWRITE, MAP_DENYWRITE },
5766 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
5767 MAP_EXECUTABLE, MAP_EXECUTABLE },
5768 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5769 { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
5770 MAP_NORESERVE, MAP_NORESERVE },
5771 { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
5772 /* MAP_STACK had been ignored by the kernel for quite some time.
5773 Recognize it for the target insofar as we do not want to pass
5774 it through to the host. */
5775 { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
5780 * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
5781 * TARGET_I386 is defined if TARGET_X86_64 is defined
5783 #if defined(TARGET_I386)
5785 /* NOTE: there is really one LDT for all the threads */
5786 static uint8_t *ldt_table;
5788 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
5795 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
5796 if (size > bytecount)
5798 p = lock_user(VERIFY_WRITE, ptr, size, 0);
5800 return -TARGET_EFAULT;
5801 /* ??? Should this by byteswapped? */
5802 memcpy(p, ldt_table, size);
5803 unlock_user(p, ptr, size);
5807 /* XXX: add locking support */
5808 static abi_long write_ldt(CPUX86State *env,
5809 abi_ulong ptr, unsigned long bytecount, int oldmode)
5811 struct target_modify_ldt_ldt_s ldt_info;
5812 struct target_modify_ldt_ldt_s *target_ldt_info;
5813 int seg_32bit, contents, read_exec_only, limit_in_pages;
5814 int seg_not_present, useable, lm;
5815 uint32_t *lp, entry_1, entry_2;
5817 if (bytecount != sizeof(ldt_info))
5818 return -TARGET_EINVAL;
5819 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
5820 return -TARGET_EFAULT;
5821 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5822 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5823 ldt_info.limit = tswap32(target_ldt_info->limit);
5824 ldt_info.flags = tswap32(target_ldt_info->flags);
5825 unlock_user_struct(target_ldt_info, ptr, 0);
5827 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
5828 return -TARGET_EINVAL;
5829 seg_32bit = ldt_info.flags & 1;
5830 contents = (ldt_info.flags >> 1) & 3;
5831 read_exec_only = (ldt_info.flags >> 3) & 1;
5832 limit_in_pages = (ldt_info.flags >> 4) & 1;
5833 seg_not_present = (ldt_info.flags >> 5) & 1;
5834 useable = (ldt_info.flags >> 6) & 1;
5838 lm = (ldt_info.flags >> 7) & 1;
5840 if (contents == 3) {
5842 return -TARGET_EINVAL;
5843 if (seg_not_present == 0)
5844 return -TARGET_EINVAL;
5846 /* allocate the LDT */
5848 env->ldt.base = target_mmap(0,
5849 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
5850 PROT_READ|PROT_WRITE,
5851 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
5852 if (env->ldt.base == -1)
5853 return -TARGET_ENOMEM;
5854 memset(g2h(env->ldt.base), 0,
5855 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
5856 env->ldt.limit = 0xffff;
5857 ldt_table = g2h(env->ldt.base);
5860 /* NOTE: same code as Linux kernel */
5861 /* Allow LDTs to be cleared by the user. */
5862 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5865 read_exec_only == 1 &&
5867 limit_in_pages == 0 &&
5868 seg_not_present == 1 &&
5876 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5877 (ldt_info.limit & 0x0ffff);
5878 entry_2 = (ldt_info.base_addr & 0xff000000) |
5879 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5880 (ldt_info.limit & 0xf0000) |
5881 ((read_exec_only ^ 1) << 9) |
5883 ((seg_not_present ^ 1) << 15) |
5885 (limit_in_pages << 23) |
5889 entry_2 |= (useable << 20);
5891 /* Install the new entry ... */
5893 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
5894 lp[0] = tswap32(entry_1);
5895 lp[1] = tswap32(entry_2);
5899 /* specific and weird i386 syscalls */
5900 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
5901 unsigned long bytecount)
5907 ret = read_ldt(ptr, bytecount);
5910 ret = write_ldt(env, ptr, bytecount, 1);
5913 ret = write_ldt(env, ptr, bytecount, 0);
5916 ret = -TARGET_ENOSYS;
5922 #if defined(TARGET_ABI32)
5923 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
5925 uint64_t *gdt_table = g2h(env->gdt.base);
5926 struct target_modify_ldt_ldt_s ldt_info;
5927 struct target_modify_ldt_ldt_s *target_ldt_info;
5928 int seg_32bit, contents, read_exec_only, limit_in_pages;
5929 int seg_not_present, useable, lm;
5930 uint32_t *lp, entry_1, entry_2;
5933 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5934 if (!target_ldt_info)
5935 return -TARGET_EFAULT;
5936 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5937 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5938 ldt_info.limit = tswap32(target_ldt_info->limit);
5939 ldt_info.flags = tswap32(target_ldt_info->flags);
5940 if (ldt_info.entry_number == -1) {
5941 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
5942 if (gdt_table[i] == 0) {
5943 ldt_info.entry_number = i;
5944 target_ldt_info->entry_number = tswap32(i);
5949 unlock_user_struct(target_ldt_info, ptr, 1);
5951 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
5952 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
5953 return -TARGET_EINVAL;
5954 seg_32bit = ldt_info.flags & 1;
5955 contents = (ldt_info.flags >> 1) & 3;
5956 read_exec_only = (ldt_info.flags >> 3) & 1;
5957 limit_in_pages = (ldt_info.flags >> 4) & 1;
5958 seg_not_present = (ldt_info.flags >> 5) & 1;
5959 useable = (ldt_info.flags >> 6) & 1;
5963 lm = (ldt_info.flags >> 7) & 1;
5966 if (contents == 3) {
5967 if (seg_not_present == 0)
5968 return -TARGET_EINVAL;
5971 /* NOTE: same code as Linux kernel */
5972 /* Allow LDTs to be cleared by the user. */
5973 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5974 if ((contents == 0 &&
5975 read_exec_only == 1 &&
5977 limit_in_pages == 0 &&
5978 seg_not_present == 1 &&
5986 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5987 (ldt_info.limit & 0x0ffff);
5988 entry_2 = (ldt_info.base_addr & 0xff000000) |
5989 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5990 (ldt_info.limit & 0xf0000) |
5991 ((read_exec_only ^ 1) << 9) |
5993 ((seg_not_present ^ 1) << 15) |
5995 (limit_in_pages << 23) |
6000 /* Install the new entry ... */
6002 lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
6003 lp[0] = tswap32(entry_1);
6004 lp[1] = tswap32(entry_2);
6008 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
6010 struct target_modify_ldt_ldt_s *target_ldt_info;
6011 uint64_t *gdt_table = g2h(env->gdt.base);
6012 uint32_t base_addr, limit, flags;
6013 int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
6014 int seg_not_present, useable, lm;
6015 uint32_t *lp, entry_1, entry_2;
6017 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6018 if (!target_ldt_info)
6019 return -TARGET_EFAULT;
6020 idx = tswap32(target_ldt_info->entry_number);
6021 if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
6022 idx > TARGET_GDT_ENTRY_TLS_MAX) {
6023 unlock_user_struct(target_ldt_info, ptr, 1);
6024 return -TARGET_EINVAL;
6026 lp = (uint32_t *)(gdt_table + idx);
6027 entry_1 = tswap32(lp[0]);
6028 entry_2 = tswap32(lp[1]);
6030 read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
6031 contents = (entry_2 >> 10) & 3;
6032 seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
6033 seg_32bit = (entry_2 >> 22) & 1;
6034 limit_in_pages = (entry_2 >> 23) & 1;
6035 useable = (entry_2 >> 20) & 1;
6039 lm = (entry_2 >> 21) & 1;
6041 flags = (seg_32bit << 0) | (contents << 1) |
6042 (read_exec_only << 3) | (limit_in_pages << 4) |
6043 (seg_not_present << 5) | (useable << 6) | (lm << 7);
6044 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000);
6045 base_addr = (entry_1 >> 16) |
6046 (entry_2 & 0xff000000) |
6047 ((entry_2 & 0xff) << 16);
6048 target_ldt_info->base_addr = tswapal(base_addr);
6049 target_ldt_info->limit = tswap32(limit);
6050 target_ldt_info->flags = tswap32(flags);
6051 unlock_user_struct(target_ldt_info, ptr, 1);
6055 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6057 return -TARGET_ENOSYS;
6060 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6067 case TARGET_ARCH_SET_GS:
6068 case TARGET_ARCH_SET_FS:
6069 if (code == TARGET_ARCH_SET_GS)
6073 cpu_x86_load_seg(env, idx, 0);
6074 env->segs[idx].base = addr;
6076 case TARGET_ARCH_GET_GS:
6077 case TARGET_ARCH_GET_FS:
6078 if (code == TARGET_ARCH_GET_GS)
6082 val = env->segs[idx].base;
6083 if (put_user(val, addr, abi_ulong))
6084 ret = -TARGET_EFAULT;
6087 ret = -TARGET_EINVAL;
6092 #endif /* defined(TARGET_ABI32 */
6094 #endif /* defined(TARGET_I386) */
6096 #define NEW_STACK_SIZE 0x40000
6099 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
6102 pthread_mutex_t mutex;
6103 pthread_cond_t cond;
6106 abi_ulong child_tidptr;
6107 abi_ulong parent_tidptr;
6111 static void *clone_func(void *arg)
6113 new_thread_info *info = arg;
6118 rcu_register_thread();
6119 tcg_register_thread();
6123 ts = (TaskState *)cpu->opaque;
6124 info->tid = sys_gettid();
6126 if (info->child_tidptr)
6127 put_user_u32(info->tid, info->child_tidptr);
6128 if (info->parent_tidptr)
6129 put_user_u32(info->tid, info->parent_tidptr);
6130 qemu_guest_random_seed_thread_part2(cpu->random_seed);
6131 /* Enable signals. */
6132 sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
6133 /* Signal to the parent that we're ready. */
6134 pthread_mutex_lock(&info->mutex);
6135 pthread_cond_broadcast(&info->cond);
6136 pthread_mutex_unlock(&info->mutex);
6137 /* Wait until the parent has finished initializing the tls state. */
6138 pthread_mutex_lock(&clone_lock);
6139 pthread_mutex_unlock(&clone_lock);
6145 /* do_fork() Must return host values and target errnos (unlike most
6146 do_*() functions). */
6147 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
6148 abi_ulong parent_tidptr, target_ulong newtls,
6149 abi_ulong child_tidptr)
6151 CPUState *cpu = env_cpu(env);
6155 CPUArchState *new_env;
6158 flags &= ~CLONE_IGNORED_FLAGS;
6160 /* Emulate vfork() with fork() */
6161 if (flags & CLONE_VFORK)
6162 flags &= ~(CLONE_VFORK | CLONE_VM);
6164 if (flags & CLONE_VM) {
6165 TaskState *parent_ts = (TaskState *)cpu->opaque;
6166 new_thread_info info;
6167 pthread_attr_t attr;
6169 if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
6170 (flags & CLONE_INVALID_THREAD_FLAGS)) {
6171 return -TARGET_EINVAL;
6174 ts = g_new0(TaskState, 1);
6175 init_task_state(ts);
6177 /* Grab a mutex so that thread setup appears atomic. */
6178 pthread_mutex_lock(&clone_lock);
6180 /* we create a new CPU instance. */
6181 new_env = cpu_copy(env);
6182 /* Init regs that differ from the parent. */
6183 cpu_clone_regs_child(new_env, newsp, flags);
6184 cpu_clone_regs_parent(env, flags);
6185 new_cpu = env_cpu(new_env);
6186 new_cpu->opaque = ts;
6187 ts->bprm = parent_ts->bprm;
6188 ts->info = parent_ts->info;
6189 ts->signal_mask = parent_ts->signal_mask;
6191 if (flags & CLONE_CHILD_CLEARTID) {
6192 ts->child_tidptr = child_tidptr;
6195 if (flags & CLONE_SETTLS) {
6196 cpu_set_tls (new_env, newtls);
6199 memset(&info, 0, sizeof(info));
6200 pthread_mutex_init(&info.mutex, NULL);
6201 pthread_mutex_lock(&info.mutex);
6202 pthread_cond_init(&info.cond, NULL);
6204 if (flags & CLONE_CHILD_SETTID) {
6205 info.child_tidptr = child_tidptr;
6207 if (flags & CLONE_PARENT_SETTID) {
6208 info.parent_tidptr = parent_tidptr;
6211 ret = pthread_attr_init(&attr);
6212 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6213 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6214 /* It is not safe to deliver signals until the child has finished
6215 initializing, so temporarily block all signals. */
6216 sigfillset(&sigmask);
6217 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6218 cpu->random_seed = qemu_guest_random_seed_thread_part1();
6220 /* If this is our first additional thread, we need to ensure we
6221 * generate code for parallel execution and flush old translations.
6223 if (!parallel_cpus) {
6224 parallel_cpus = true;
6228 ret = pthread_create(&info.thread, &attr, clone_func, &info);
6229 /* TODO: Free new CPU state if thread creation failed. */
6231 sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6232 pthread_attr_destroy(&attr);
6234 /* Wait for the child to initialize. */
6235 pthread_cond_wait(&info.cond, &info.mutex);
6240 pthread_mutex_unlock(&info.mutex);
6241 pthread_cond_destroy(&info.cond);
6242 pthread_mutex_destroy(&info.mutex);
6243 pthread_mutex_unlock(&clone_lock);
6245 /* if no CLONE_VM, we consider it is a fork */
6246 if (flags & CLONE_INVALID_FORK_FLAGS) {
6247 return -TARGET_EINVAL;
6250 /* We can't support custom termination signals */
6251 if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6252 return -TARGET_EINVAL;
6255 if (block_signals()) {
6256 return -TARGET_ERESTARTSYS;
6262 /* Child Process. */
6263 cpu_clone_regs_child(env, newsp, flags);
6265 /* There is a race condition here. The parent process could
6266 theoretically read the TID in the child process before the child
6267 tid is set. This would require using either ptrace
6268 (not implemented) or having *_tidptr to point at a shared memory
6269 mapping. We can't repeat the spinlock hack used above because
6270 the child process gets its own copy of the lock. */
6271 if (flags & CLONE_CHILD_SETTID)
6272 put_user_u32(sys_gettid(), child_tidptr);
6273 if (flags & CLONE_PARENT_SETTID)
6274 put_user_u32(sys_gettid(), parent_tidptr);
6275 ts = (TaskState *)cpu->opaque;
6276 if (flags & CLONE_SETTLS)
6277 cpu_set_tls (env, newtls);
6278 if (flags & CLONE_CHILD_CLEARTID)
6279 ts->child_tidptr = child_tidptr;
6281 cpu_clone_regs_parent(env, flags);
6288 /* warning : doesn't handle linux specific flags... */
6289 static int target_to_host_fcntl_cmd(int cmd)
6294 case TARGET_F_DUPFD:
6295 case TARGET_F_GETFD:
6296 case TARGET_F_SETFD:
6297 case TARGET_F_GETFL:
6298 case TARGET_F_SETFL:
6299 case TARGET_F_OFD_GETLK:
6300 case TARGET_F_OFD_SETLK:
6301 case TARGET_F_OFD_SETLKW:
6304 case TARGET_F_GETLK:
6307 case TARGET_F_SETLK:
6310 case TARGET_F_SETLKW:
6313 case TARGET_F_GETOWN:
6316 case TARGET_F_SETOWN:
6319 case TARGET_F_GETSIG:
6322 case TARGET_F_SETSIG:
6325 #if TARGET_ABI_BITS == 32
6326 case TARGET_F_GETLK64:
6329 case TARGET_F_SETLK64:
6332 case TARGET_F_SETLKW64:
6336 case TARGET_F_SETLEASE:
6339 case TARGET_F_GETLEASE:
6342 #ifdef F_DUPFD_CLOEXEC
6343 case TARGET_F_DUPFD_CLOEXEC:
6344 ret = F_DUPFD_CLOEXEC;
6347 case TARGET_F_NOTIFY:
6351 case TARGET_F_GETOWN_EX:
6356 case TARGET_F_SETOWN_EX:
6361 case TARGET_F_SETPIPE_SZ:
6364 case TARGET_F_GETPIPE_SZ:
6369 ret = -TARGET_EINVAL;
6373 #if defined(__powerpc64__)
6374 /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6375 * is not supported by kernel. The glibc fcntl call actually adjusts
6376 * them to 5, 6 and 7 before making the syscall(). Since we make the
6377 * syscall directly, adjust to what is supported by the kernel.
6379 if (ret >= F_GETLK64 && ret <= F_SETLKW64) {
6380 ret -= F_GETLK64 - 5;
6387 #define FLOCK_TRANSTBL \
6389 TRANSTBL_CONVERT(F_RDLCK); \
6390 TRANSTBL_CONVERT(F_WRLCK); \
6391 TRANSTBL_CONVERT(F_UNLCK); \
6392 TRANSTBL_CONVERT(F_EXLCK); \
6393 TRANSTBL_CONVERT(F_SHLCK); \
6396 static int target_to_host_flock(int type)
6398 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6400 #undef TRANSTBL_CONVERT
6401 return -TARGET_EINVAL;
6404 static int host_to_target_flock(int type)
6406 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6408 #undef TRANSTBL_CONVERT
6409 /* if we don't know how to convert the value coming
6410 * from the host we copy to the target field as-is
6415 static inline abi_long copy_from_user_flock(struct flock64 *fl,
6416 abi_ulong target_flock_addr)
6418 struct target_flock *target_fl;
6421 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6422 return -TARGET_EFAULT;
6425 __get_user(l_type, &target_fl->l_type);
6426 l_type = target_to_host_flock(l_type);
6430 fl->l_type = l_type;
6431 __get_user(fl->l_whence, &target_fl->l_whence);
6432 __get_user(fl->l_start, &target_fl->l_start);
6433 __get_user(fl->l_len, &target_fl->l_len);
6434 __get_user(fl->l_pid, &target_fl->l_pid);
6435 unlock_user_struct(target_fl, target_flock_addr, 0);
6439 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6440 const struct flock64 *fl)
6442 struct target_flock *target_fl;
6445 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6446 return -TARGET_EFAULT;
6449 l_type = host_to_target_flock(fl->l_type);
6450 __put_user(l_type, &target_fl->l_type);
6451 __put_user(fl->l_whence, &target_fl->l_whence);
6452 __put_user(fl->l_start, &target_fl->l_start);
6453 __put_user(fl->l_len, &target_fl->l_len);
6454 __put_user(fl->l_pid, &target_fl->l_pid);
6455 unlock_user_struct(target_fl, target_flock_addr, 1);
6459 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
6460 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
6462 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6463 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
6464 abi_ulong target_flock_addr)
6466 struct target_oabi_flock64 *target_fl;
6469 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6470 return -TARGET_EFAULT;
6473 __get_user(l_type, &target_fl->l_type);
6474 l_type = target_to_host_flock(l_type);
6478 fl->l_type = l_type;
6479 __get_user(fl->l_whence, &target_fl->l_whence);
6480 __get_user(fl->l_start, &target_fl->l_start);
6481 __get_user(fl->l_len, &target_fl->l_len);
6482 __get_user(fl->l_pid, &target_fl->l_pid);
6483 unlock_user_struct(target_fl, target_flock_addr, 0);
6487 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
6488 const struct flock64 *fl)
6490 struct target_oabi_flock64 *target_fl;
6493 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6494 return -TARGET_EFAULT;
6497 l_type = host_to_target_flock(fl->l_type);
6498 __put_user(l_type, &target_fl->l_type);
6499 __put_user(fl->l_whence, &target_fl->l_whence);
6500 __put_user(fl->l_start, &target_fl->l_start);
6501 __put_user(fl->l_len, &target_fl->l_len);
6502 __put_user(fl->l_pid, &target_fl->l_pid);
6503 unlock_user_struct(target_fl, target_flock_addr, 1);
6508 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
6509 abi_ulong target_flock_addr)
6511 struct target_flock64 *target_fl;
6514 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6515 return -TARGET_EFAULT;
6518 __get_user(l_type, &target_fl->l_type);
6519 l_type = target_to_host_flock(l_type);
6523 fl->l_type = l_type;
6524 __get_user(fl->l_whence, &target_fl->l_whence);
6525 __get_user(fl->l_start, &target_fl->l_start);
6526 __get_user(fl->l_len, &target_fl->l_len);
6527 __get_user(fl->l_pid, &target_fl->l_pid);
6528 unlock_user_struct(target_fl, target_flock_addr, 0);
6532 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
6533 const struct flock64 *fl)
6535 struct target_flock64 *target_fl;
6538 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6539 return -TARGET_EFAULT;
6542 l_type = host_to_target_flock(fl->l_type);
6543 __put_user(l_type, &target_fl->l_type);
6544 __put_user(fl->l_whence, &target_fl->l_whence);
6545 __put_user(fl->l_start, &target_fl->l_start);
6546 __put_user(fl->l_len, &target_fl->l_len);
6547 __put_user(fl->l_pid, &target_fl->l_pid);
6548 unlock_user_struct(target_fl, target_flock_addr, 1);
6552 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
6554 struct flock64 fl64;
6556 struct f_owner_ex fox;
6557 struct target_f_owner_ex *target_fox;
6560 int host_cmd = target_to_host_fcntl_cmd(cmd);
6562 if (host_cmd == -TARGET_EINVAL)
6566 case TARGET_F_GETLK:
6567 ret = copy_from_user_flock(&fl64, arg);
6571 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6573 ret = copy_to_user_flock(arg, &fl64);
6577 case TARGET_F_SETLK:
6578 case TARGET_F_SETLKW:
6579 ret = copy_from_user_flock(&fl64, arg);
6583 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6586 case TARGET_F_GETLK64:
6587 case TARGET_F_OFD_GETLK:
6588 ret = copy_from_user_flock64(&fl64, arg);
6592 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6594 ret = copy_to_user_flock64(arg, &fl64);
6597 case TARGET_F_SETLK64:
6598 case TARGET_F_SETLKW64:
6599 case TARGET_F_OFD_SETLK:
6600 case TARGET_F_OFD_SETLKW:
6601 ret = copy_from_user_flock64(&fl64, arg);
6605 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6608 case TARGET_F_GETFL:
6609 ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6611 ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
6615 case TARGET_F_SETFL:
6616 ret = get_errno(safe_fcntl(fd, host_cmd,
6617 target_to_host_bitmask(arg,
6622 case TARGET_F_GETOWN_EX:
6623 ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6625 if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
6626 return -TARGET_EFAULT;
6627 target_fox->type = tswap32(fox.type);
6628 target_fox->pid = tswap32(fox.pid);
6629 unlock_user_struct(target_fox, arg, 1);
6635 case TARGET_F_SETOWN_EX:
6636 if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
6637 return -TARGET_EFAULT;
6638 fox.type = tswap32(target_fox->type);
6639 fox.pid = tswap32(target_fox->pid);
6640 unlock_user_struct(target_fox, arg, 0);
6641 ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6645 case TARGET_F_SETOWN:
6646 case TARGET_F_GETOWN:
6647 case TARGET_F_SETSIG:
6648 case TARGET_F_GETSIG:
6649 case TARGET_F_SETLEASE:
6650 case TARGET_F_GETLEASE:
6651 case TARGET_F_SETPIPE_SZ:
6652 case TARGET_F_GETPIPE_SZ:
6653 ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6657 ret = get_errno(safe_fcntl(fd, cmd, arg));
6665 static inline int high2lowuid(int uid)
6673 static inline int high2lowgid(int gid)
6681 static inline int low2highuid(int uid)
6683 if ((int16_t)uid == -1)
6689 static inline int low2highgid(int gid)
6691 if ((int16_t)gid == -1)
6696 static inline int tswapid(int id)
6701 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6703 #else /* !USE_UID16 */
6704 static inline int high2lowuid(int uid)
6708 static inline int high2lowgid(int gid)
6712 static inline int low2highuid(int uid)
6716 static inline int low2highgid(int gid)
6720 static inline int tswapid(int id)
6725 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6727 #endif /* USE_UID16 */
6729 /* We must do direct syscalls for setting UID/GID, because we want to
6730 * implement the Linux system call semantics of "change only for this thread",
6731 * not the libc/POSIX semantics of "change for all threads in process".
6732 * (See http://ewontfix.com/17/ for more details.)
6733 * We use the 32-bit version of the syscalls if present; if it is not
6734 * then either the host architecture supports 32-bit UIDs natively with
6735 * the standard syscall, or the 16-bit UID is the best we can do.
6737 #ifdef __NR_setuid32
6738 #define __NR_sys_setuid __NR_setuid32
6740 #define __NR_sys_setuid __NR_setuid
6742 #ifdef __NR_setgid32
6743 #define __NR_sys_setgid __NR_setgid32
6745 #define __NR_sys_setgid __NR_setgid
6747 #ifdef __NR_setresuid32
6748 #define __NR_sys_setresuid __NR_setresuid32
6750 #define __NR_sys_setresuid __NR_setresuid
6752 #ifdef __NR_setresgid32
6753 #define __NR_sys_setresgid __NR_setresgid32
6755 #define __NR_sys_setresgid __NR_setresgid
6758 _syscall1(int, sys_setuid, uid_t, uid)
6759 _syscall1(int, sys_setgid, gid_t, gid)
6760 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
6761 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
6763 void syscall_init(void)
6766 const argtype *arg_type;
6770 thunk_init(STRUCT_MAX);
6772 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6773 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6774 #include "syscall_types.h"
6776 #undef STRUCT_SPECIAL
6778 /* Build target_to_host_errno_table[] table from
6779 * host_to_target_errno_table[]. */
6780 for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
6781 target_to_host_errno_table[host_to_target_errno_table[i]] = i;
6784 /* we patch the ioctl size if necessary. We rely on the fact that
6785 no ioctl has all the bits at '1' in the size field */
6787 while (ie->target_cmd != 0) {
6788 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
6789 TARGET_IOC_SIZEMASK) {
6790 arg_type = ie->arg_type;
6791 if (arg_type[0] != TYPE_PTR) {
6792 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
6797 size = thunk_type_size(arg_type, 0);
6798 ie->target_cmd = (ie->target_cmd &
6799 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
6800 (size << TARGET_IOC_SIZESHIFT);
6803 /* automatic consistency check if same arch */
6804 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
6805 (defined(__x86_64__) && defined(TARGET_X86_64))
6806 if (unlikely(ie->target_cmd != ie->host_cmd)) {
6807 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
6808 ie->name, ie->target_cmd, ie->host_cmd);
6815 #ifdef TARGET_NR_truncate64
6816 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
6821 if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
6825 return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
6829 #ifdef TARGET_NR_ftruncate64
6830 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
6835 if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
6839 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
6843 #if defined(TARGET_NR_timer_settime) || \
6844 (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
6845 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_its,
6846 abi_ulong target_addr)
6848 if (target_to_host_timespec(&host_its->it_interval, target_addr +
6849 offsetof(struct target_itimerspec,
6851 target_to_host_timespec(&host_its->it_value, target_addr +
6852 offsetof(struct target_itimerspec,
6854 return -TARGET_EFAULT;
6861 #if defined(TARGET_NR_timer_settime64) || \
6862 (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD))
6863 static inline abi_long target_to_host_itimerspec64(struct itimerspec *host_its,
6864 abi_ulong target_addr)
6866 if (target_to_host_timespec64(&host_its->it_interval, target_addr +
6867 offsetof(struct target__kernel_itimerspec,
6869 target_to_host_timespec64(&host_its->it_value, target_addr +
6870 offsetof(struct target__kernel_itimerspec,
6872 return -TARGET_EFAULT;
6879 #if ((defined(TARGET_NR_timerfd_gettime) || \
6880 defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
6881 defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
6882 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
6883 struct itimerspec *host_its)
6885 if (host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
6887 &host_its->it_interval) ||
6888 host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
6890 &host_its->it_value)) {
6891 return -TARGET_EFAULT;
6897 #if ((defined(TARGET_NR_timerfd_gettime64) || \
6898 defined(TARGET_NR_timerfd_settime64)) && defined(CONFIG_TIMERFD)) || \
6899 defined(TARGET_NR_timer_gettime64) || defined(TARGET_NR_timer_settime64)
6900 static inline abi_long host_to_target_itimerspec64(abi_ulong target_addr,
6901 struct itimerspec *host_its)
6903 if (host_to_target_timespec64(target_addr +
6904 offsetof(struct target__kernel_itimerspec,
6906 &host_its->it_interval) ||
6907 host_to_target_timespec64(target_addr +
6908 offsetof(struct target__kernel_itimerspec,
6910 &host_its->it_value)) {
6911 return -TARGET_EFAULT;
6917 #if defined(TARGET_NR_adjtimex) || \
6918 (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
6919 static inline abi_long target_to_host_timex(struct timex *host_tx,
6920 abi_long target_addr)
6922 struct target_timex *target_tx;
6924 if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
6925 return -TARGET_EFAULT;
6928 __get_user(host_tx->modes, &target_tx->modes);
6929 __get_user(host_tx->offset, &target_tx->offset);
6930 __get_user(host_tx->freq, &target_tx->freq);
6931 __get_user(host_tx->maxerror, &target_tx->maxerror);
6932 __get_user(host_tx->esterror, &target_tx->esterror);
6933 __get_user(host_tx->status, &target_tx->status);
6934 __get_user(host_tx->constant, &target_tx->constant);
6935 __get_user(host_tx->precision, &target_tx->precision);
6936 __get_user(host_tx->tolerance, &target_tx->tolerance);
6937 __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
6938 __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
6939 __get_user(host_tx->tick, &target_tx->tick);
6940 __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
6941 __get_user(host_tx->jitter, &target_tx->jitter);
6942 __get_user(host_tx->shift, &target_tx->shift);
6943 __get_user(host_tx->stabil, &target_tx->stabil);
6944 __get_user(host_tx->jitcnt, &target_tx->jitcnt);
6945 __get_user(host_tx->calcnt, &target_tx->calcnt);
6946 __get_user(host_tx->errcnt, &target_tx->errcnt);
6947 __get_user(host_tx->stbcnt, &target_tx->stbcnt);
6948 __get_user(host_tx->tai, &target_tx->tai);
6950 unlock_user_struct(target_tx, target_addr, 0);
6954 static inline abi_long host_to_target_timex(abi_long target_addr,
6955 struct timex *host_tx)
6957 struct target_timex *target_tx;
6959 if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
6960 return -TARGET_EFAULT;
6963 __put_user(host_tx->modes, &target_tx->modes);
6964 __put_user(host_tx->offset, &target_tx->offset);
6965 __put_user(host_tx->freq, &target_tx->freq);
6966 __put_user(host_tx->maxerror, &target_tx->maxerror);
6967 __put_user(host_tx->esterror, &target_tx->esterror);
6968 __put_user(host_tx->status, &target_tx->status);
6969 __put_user(host_tx->constant, &target_tx->constant);
6970 __put_user(host_tx->precision, &target_tx->precision);
6971 __put_user(host_tx->tolerance, &target_tx->tolerance);
6972 __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
6973 __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
6974 __put_user(host_tx->tick, &target_tx->tick);
6975 __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
6976 __put_user(host_tx->jitter, &target_tx->jitter);
6977 __put_user(host_tx->shift, &target_tx->shift);
6978 __put_user(host_tx->stabil, &target_tx->stabil);
6979 __put_user(host_tx->jitcnt, &target_tx->jitcnt);
6980 __put_user(host_tx->calcnt, &target_tx->calcnt);
6981 __put_user(host_tx->errcnt, &target_tx->errcnt);
6982 __put_user(host_tx->stbcnt, &target_tx->stbcnt);
6983 __put_user(host_tx->tai, &target_tx->tai);
6985 unlock_user_struct(target_tx, target_addr, 1);
6991 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
6992 static inline abi_long target_to_host_timex64(struct timex *host_tx,
6993 abi_long target_addr)
6995 struct target__kernel_timex *target_tx;
6997 if (copy_from_user_timeval64(&host_tx->time, target_addr +
6998 offsetof(struct target__kernel_timex,
7000 return -TARGET_EFAULT;
7003 if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7004 return -TARGET_EFAULT;
7007 __get_user(host_tx->modes, &target_tx->modes);
7008 __get_user(host_tx->offset, &target_tx->offset);
7009 __get_user(host_tx->freq, &target_tx->freq);
7010 __get_user(host_tx->maxerror, &target_tx->maxerror);
7011 __get_user(host_tx->esterror, &target_tx->esterror);
7012 __get_user(host_tx->status, &target_tx->status);
7013 __get_user(host_tx->constant, &target_tx->constant);
7014 __get_user(host_tx->precision, &target_tx->precision);
7015 __get_user(host_tx->tolerance, &target_tx->tolerance);
7016 __get_user(host_tx->tick, &target_tx->tick);
7017 __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7018 __get_user(host_tx->jitter, &target_tx->jitter);
7019 __get_user(host_tx->shift, &target_tx->shift);
7020 __get_user(host_tx->stabil, &target_tx->stabil);
7021 __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7022 __get_user(host_tx->calcnt, &target_tx->calcnt);
7023 __get_user(host_tx->errcnt, &target_tx->errcnt);
7024 __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7025 __get_user(host_tx->tai, &target_tx->tai);
7027 unlock_user_struct(target_tx, target_addr, 0);
7031 static inline abi_long host_to_target_timex64(abi_long target_addr,
7032 struct timex *host_tx)
7034 struct target__kernel_timex *target_tx;
7036 if (copy_to_user_timeval64(target_addr +
7037 offsetof(struct target__kernel_timex, time),
7039 return -TARGET_EFAULT;
7042 if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7043 return -TARGET_EFAULT;
7046 __put_user(host_tx->modes, &target_tx->modes);
7047 __put_user(host_tx->offset, &target_tx->offset);
7048 __put_user(host_tx->freq, &target_tx->freq);
7049 __put_user(host_tx->maxerror, &target_tx->maxerror);
7050 __put_user(host_tx->esterror, &target_tx->esterror);
7051 __put_user(host_tx->status, &target_tx->status);
7052 __put_user(host_tx->constant, &target_tx->constant);
7053 __put_user(host_tx->precision, &target_tx->precision);
7054 __put_user(host_tx->tolerance, &target_tx->tolerance);
7055 __put_user(host_tx->tick, &target_tx->tick);
7056 __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7057 __put_user(host_tx->jitter, &target_tx->jitter);
7058 __put_user(host_tx->shift, &target_tx->shift);
7059 __put_user(host_tx->stabil, &target_tx->stabil);
7060 __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7061 __put_user(host_tx->calcnt, &target_tx->calcnt);
7062 __put_user(host_tx->errcnt, &target_tx->errcnt);
7063 __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7064 __put_user(host_tx->tai, &target_tx->tai);
7066 unlock_user_struct(target_tx, target_addr, 1);
7071 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
7072 abi_ulong target_addr)
7074 struct target_sigevent *target_sevp;
7076 if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
7077 return -TARGET_EFAULT;
7080 /* This union is awkward on 64 bit systems because it has a 32 bit
7081 * integer and a pointer in it; we follow the conversion approach
7082 * used for handling sigval types in signal.c so the guest should get
7083 * the correct value back even if we did a 64 bit byteswap and it's
7084 * using the 32 bit integer.
7086 host_sevp->sigev_value.sival_ptr =
7087 (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
7088 host_sevp->sigev_signo =
7089 target_to_host_signal(tswap32(target_sevp->sigev_signo));
7090 host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
7091 host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid);
7093 unlock_user_struct(target_sevp, target_addr, 1);
7097 #if defined(TARGET_NR_mlockall)
7098 static inline int target_to_host_mlockall_arg(int arg)
7102 if (arg & TARGET_MCL_CURRENT) {
7103 result |= MCL_CURRENT;
7105 if (arg & TARGET_MCL_FUTURE) {
7106 result |= MCL_FUTURE;
7109 if (arg & TARGET_MCL_ONFAULT) {
7110 result |= MCL_ONFAULT;
7118 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) || \
7119 defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) || \
7120 defined(TARGET_NR_newfstatat))
7121 static inline abi_long host_to_target_stat64(void *cpu_env,
7122 abi_ulong target_addr,
7123 struct stat *host_st)
7125 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7126 if (((CPUARMState *)cpu_env)->eabi) {
7127 struct target_eabi_stat64 *target_st;
7129 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7130 return -TARGET_EFAULT;
7131 memset(target_st, 0, sizeof(struct target_eabi_stat64));
7132 __put_user(host_st->st_dev, &target_st->st_dev);
7133 __put_user(host_st->st_ino, &target_st->st_ino);
7134 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7135 __put_user(host_st->st_ino, &target_st->__st_ino);
7137 __put_user(host_st->st_mode, &target_st->st_mode);
7138 __put_user(host_st->st_nlink, &target_st->st_nlink);
7139 __put_user(host_st->st_uid, &target_st->st_uid);
7140 __put_user(host_st->st_gid, &target_st->st_gid);
7141 __put_user(host_st->st_rdev, &target_st->st_rdev);
7142 __put_user(host_st->st_size, &target_st->st_size);
7143 __put_user(host_st->st_blksize, &target_st->st_blksize);
7144 __put_user(host_st->st_blocks, &target_st->st_blocks);
7145 __put_user(host_st->st_atime, &target_st->target_st_atime);
7146 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7147 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7148 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
7149 __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7150 __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7151 __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7153 unlock_user_struct(target_st, target_addr, 1);
7157 #if defined(TARGET_HAS_STRUCT_STAT64)
7158 struct target_stat64 *target_st;
7160 struct target_stat *target_st;
7163 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7164 return -TARGET_EFAULT;
7165 memset(target_st, 0, sizeof(*target_st));
7166 __put_user(host_st->st_dev, &target_st->st_dev);
7167 __put_user(host_st->st_ino, &target_st->st_ino);
7168 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7169 __put_user(host_st->st_ino, &target_st->__st_ino);
7171 __put_user(host_st->st_mode, &target_st->st_mode);
7172 __put_user(host_st->st_nlink, &target_st->st_nlink);
7173 __put_user(host_st->st_uid, &target_st->st_uid);
7174 __put_user(host_st->st_gid, &target_st->st_gid);
7175 __put_user(host_st->st_rdev, &target_st->st_rdev);
7176 /* XXX: better use of kernel struct */
7177 __put_user(host_st->st_size, &target_st->st_size);
7178 __put_user(host_st->st_blksize, &target_st->st_blksize);
7179 __put_user(host_st->st_blocks, &target_st->st_blocks);
7180 __put_user(host_st->st_atime, &target_st->target_st_atime);
7181 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7182 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7183 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
7184 __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7185 __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7186 __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7188 unlock_user_struct(target_st, target_addr, 1);
7195 #if defined(TARGET_NR_statx) && defined(__NR_statx)
7196 static inline abi_long host_to_target_statx(struct target_statx *host_stx,
7197 abi_ulong target_addr)
7199 struct target_statx *target_stx;
7201 if (!lock_user_struct(VERIFY_WRITE, target_stx, target_addr, 0)) {
7202 return -TARGET_EFAULT;
7204 memset(target_stx, 0, sizeof(*target_stx));
7206 __put_user(host_stx->stx_mask, &target_stx->stx_mask);
7207 __put_user(host_stx->stx_blksize, &target_stx->stx_blksize);
7208 __put_user(host_stx->stx_attributes, &target_stx->stx_attributes);
7209 __put_user(host_stx->stx_nlink, &target_stx->stx_nlink);
7210 __put_user(host_stx->stx_uid, &target_stx->stx_uid);
7211 __put_user(host_stx->stx_gid, &target_stx->stx_gid);
7212 __put_user(host_stx->stx_mode, &target_stx->stx_mode);
7213 __put_user(host_stx->stx_ino, &target_stx->stx_ino);
7214 __put_user(host_stx->stx_size, &target_stx->stx_size);
7215 __put_user(host_stx->stx_blocks, &target_stx->stx_blocks);
7216 __put_user(host_stx->stx_attributes_mask, &target_stx->stx_attributes_mask);
7217 __put_user(host_stx->stx_atime.tv_sec, &target_stx->stx_atime.tv_sec);
7218 __put_user(host_stx->stx_atime.tv_nsec, &target_stx->stx_atime.tv_nsec);
7219 __put_user(host_stx->stx_btime.tv_sec, &target_stx->stx_btime.tv_sec);
7220 __put_user(host_stx->stx_btime.tv_nsec, &target_stx->stx_btime.tv_nsec);
7221 __put_user(host_stx->stx_ctime.tv_sec, &target_stx->stx_ctime.tv_sec);
7222 __put_user(host_stx->stx_ctime.tv_nsec, &target_stx->stx_ctime.tv_nsec);
7223 __put_user(host_stx->stx_mtime.tv_sec, &target_stx->stx_mtime.tv_sec);
7224 __put_user(host_stx->stx_mtime.tv_nsec, &target_stx->stx_mtime.tv_nsec);
7225 __put_user(host_stx->stx_rdev_major, &target_stx->stx_rdev_major);
7226 __put_user(host_stx->stx_rdev_minor, &target_stx->stx_rdev_minor);
7227 __put_user(host_stx->stx_dev_major, &target_stx->stx_dev_major);
7228 __put_user(host_stx->stx_dev_minor, &target_stx->stx_dev_minor);
7230 unlock_user_struct(target_stx, target_addr, 1);
7236 static int do_sys_futex(int *uaddr, int op, int val,
7237 const struct timespec *timeout, int *uaddr2,
7240 #if HOST_LONG_BITS == 64
7241 #if defined(__NR_futex)
7242 /* always a 64-bit time_t, it doesn't define _time64 version */
7243 return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7246 #else /* HOST_LONG_BITS == 64 */
7247 #if defined(__NR_futex_time64)
7248 if (sizeof(timeout->tv_sec) == 8) {
7249 /* _time64 function on 32bit arch */
7250 return sys_futex_time64(uaddr, op, val, timeout, uaddr2, val3);
7253 #if defined(__NR_futex)
7254 /* old function on 32bit arch */
7255 return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7257 #endif /* HOST_LONG_BITS == 64 */
7258 g_assert_not_reached();
7261 static int do_safe_futex(int *uaddr, int op, int val,
7262 const struct timespec *timeout, int *uaddr2,
7265 #if HOST_LONG_BITS == 64
7266 #if defined(__NR_futex)
7267 /* always a 64-bit time_t, it doesn't define _time64 version */
7268 return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7270 #else /* HOST_LONG_BITS == 64 */
7271 #if defined(__NR_futex_time64)
7272 if (sizeof(timeout->tv_sec) == 8) {
7273 /* _time64 function on 32bit arch */
7274 return get_errno(safe_futex_time64(uaddr, op, val, timeout, uaddr2,
7278 #if defined(__NR_futex)
7279 /* old function on 32bit arch */
7280 return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7282 #endif /* HOST_LONG_BITS == 64 */
7283 return -TARGET_ENOSYS;
7286 /* ??? Using host futex calls even when target atomic operations
7287 are not really atomic probably breaks things. However implementing
7288 futexes locally would make futexes shared between multiple processes
7289 tricky. However they're probably useless because guest atomic
7290 operations won't work either. */
7291 #if defined(TARGET_NR_futex)
7292 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
7293 target_ulong uaddr2, int val3)
7295 struct timespec ts, *pts;
7298 /* ??? We assume FUTEX_* constants are the same on both host
7300 #ifdef FUTEX_CMD_MASK
7301 base_op = op & FUTEX_CMD_MASK;
7307 case FUTEX_WAIT_BITSET:
7310 target_to_host_timespec(pts, timeout);
7314 return do_safe_futex(g2h(uaddr), op, tswap32(val), pts, NULL, val3);
7316 return do_safe_futex(g2h(uaddr), op, val, NULL, NULL, 0);
7318 return do_safe_futex(g2h(uaddr), op, val, NULL, NULL, 0);
7320 case FUTEX_CMP_REQUEUE:
7322 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7323 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7324 But the prototype takes a `struct timespec *'; insert casts
7325 to satisfy the compiler. We do not need to tswap TIMEOUT
7326 since it's not compared to guest memory. */
7327 pts = (struct timespec *)(uintptr_t) timeout;
7328 return do_safe_futex(g2h(uaddr), op, val, pts, g2h(uaddr2),
7329 (base_op == FUTEX_CMP_REQUEUE
7333 return -TARGET_ENOSYS;
7338 #if defined(TARGET_NR_futex_time64)
7339 static int do_futex_time64(target_ulong uaddr, int op, int val, target_ulong timeout,
7340 target_ulong uaddr2, int val3)
7342 struct timespec ts, *pts;
7345 /* ??? We assume FUTEX_* constants are the same on both host
7347 #ifdef FUTEX_CMD_MASK
7348 base_op = op & FUTEX_CMD_MASK;
7354 case FUTEX_WAIT_BITSET:
7357 target_to_host_timespec64(pts, timeout);
7361 return do_safe_futex(g2h(uaddr), op, tswap32(val), pts, NULL, val3);
7363 return do_safe_futex(g2h(uaddr), op, val, NULL, NULL, 0);
7365 return do_safe_futex(g2h(uaddr), op, val, NULL, NULL, 0);
7367 case FUTEX_CMP_REQUEUE:
7369 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7370 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7371 But the prototype takes a `struct timespec *'; insert casts
7372 to satisfy the compiler. We do not need to tswap TIMEOUT
7373 since it's not compared to guest memory. */
7374 pts = (struct timespec *)(uintptr_t) timeout;
7375 return do_safe_futex(g2h(uaddr), op, val, pts, g2h(uaddr2),
7376 (base_op == FUTEX_CMP_REQUEUE
7380 return -TARGET_ENOSYS;
7385 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7386 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
7387 abi_long handle, abi_long mount_id,
7390 struct file_handle *target_fh;
7391 struct file_handle *fh;
7395 unsigned int size, total_size;
7397 if (get_user_s32(size, handle)) {
7398 return -TARGET_EFAULT;
7401 name = lock_user_string(pathname);
7403 return -TARGET_EFAULT;
7406 total_size = sizeof(struct file_handle) + size;
7407 target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
7409 unlock_user(name, pathname, 0);
7410 return -TARGET_EFAULT;
7413 fh = g_malloc0(total_size);
7414 fh->handle_bytes = size;
7416 ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
7417 unlock_user(name, pathname, 0);
7419 /* man name_to_handle_at(2):
7420 * Other than the use of the handle_bytes field, the caller should treat
7421 * the file_handle structure as an opaque data type
7424 memcpy(target_fh, fh, total_size);
7425 target_fh->handle_bytes = tswap32(fh->handle_bytes);
7426 target_fh->handle_type = tswap32(fh->handle_type);
7428 unlock_user(target_fh, handle, total_size);
7430 if (put_user_s32(mid, mount_id)) {
7431 return -TARGET_EFAULT;
7439 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7440 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
7443 struct file_handle *target_fh;
7444 struct file_handle *fh;
7445 unsigned int size, total_size;
7448 if (get_user_s32(size, handle)) {
7449 return -TARGET_EFAULT;
7452 total_size = sizeof(struct file_handle) + size;
7453 target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
7455 return -TARGET_EFAULT;
7458 fh = g_memdup(target_fh, total_size);
7459 fh->handle_bytes = size;
7460 fh->handle_type = tswap32(target_fh->handle_type);
7462 ret = get_errno(open_by_handle_at(mount_fd, fh,
7463 target_to_host_bitmask(flags, fcntl_flags_tbl)));
7467 unlock_user(target_fh, handle, total_size);
7473 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7475 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
7478 target_sigset_t *target_mask;
7482 if (flags & ~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)) {
7483 return -TARGET_EINVAL;
7485 if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
7486 return -TARGET_EFAULT;
7489 target_to_host_sigset(&host_mask, target_mask);
7491 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
7493 ret = get_errno(signalfd(fd, &host_mask, host_flags));
7495 fd_trans_register(ret, &target_signalfd_trans);
7498 unlock_user_struct(target_mask, mask, 0);
7504 /* Map host to target signal numbers for the wait family of syscalls.
7505 Assume all other status bits are the same. */
7506 int host_to_target_waitstatus(int status)
7508 if (WIFSIGNALED(status)) {
7509 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
7511 if (WIFSTOPPED(status)) {
7512 return (host_to_target_signal(WSTOPSIG(status)) << 8)
7518 static int open_self_cmdline(void *cpu_env, int fd)
7520 CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7521 struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
7524 for (i = 0; i < bprm->argc; i++) {
7525 size_t len = strlen(bprm->argv[i]) + 1;
7527 if (write(fd, bprm->argv[i], len) != len) {
7535 static int open_self_maps(void *cpu_env, int fd)
7537 CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7538 TaskState *ts = cpu->opaque;
7539 GSList *map_info = read_self_maps();
7543 for (s = map_info; s; s = g_slist_next(s)) {
7544 MapInfo *e = (MapInfo *) s->data;
7546 if (h2g_valid(e->start)) {
7547 unsigned long min = e->start;
7548 unsigned long max = e->end;
7549 int flags = page_get_flags(h2g(min));
7552 max = h2g_valid(max - 1) ?
7553 max : (uintptr_t) g2h(GUEST_ADDR_MAX) + 1;
7555 if (page_check_range(h2g(min), max - min, flags) == -1) {
7559 if (h2g(min) == ts->info->stack_limit) {
7565 count = dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
7566 " %c%c%c%c %08" PRIx64 " %s %"PRId64,
7567 h2g(min), h2g(max - 1) + 1,
7568 e->is_read ? 'r' : '-',
7569 e->is_write ? 'w' : '-',
7570 e->is_exec ? 'x' : '-',
7571 e->is_priv ? 'p' : '-',
7572 (uint64_t) e->offset, e->dev, e->inode);
7574 dprintf(fd, "%*s%s\n", 73 - count, "", path);
7581 free_self_maps(map_info);
7583 #ifdef TARGET_VSYSCALL_PAGE
7585 * We only support execution from the vsyscall page.
7586 * This is as if CONFIG_LEGACY_VSYSCALL_XONLY=y from v5.3.
7588 count = dprintf(fd, TARGET_FMT_lx "-" TARGET_FMT_lx
7589 " --xp 00000000 00:00 0",
7590 TARGET_VSYSCALL_PAGE, TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE);
7591 dprintf(fd, "%*s%s\n", 73 - count, "", "[vsyscall]");
7597 static int open_self_stat(void *cpu_env, int fd)
7599 CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7600 TaskState *ts = cpu->opaque;
7601 g_autoptr(GString) buf = g_string_new(NULL);
7604 for (i = 0; i < 44; i++) {
7607 g_string_printf(buf, FMT_pid " ", getpid());
7608 } else if (i == 1) {
7610 gchar *bin = g_strrstr(ts->bprm->argv[0], "/");
7611 bin = bin ? bin + 1 : ts->bprm->argv[0];
7612 g_string_printf(buf, "(%.15s) ", bin);
7613 } else if (i == 27) {
7615 g_string_printf(buf, TARGET_ABI_FMT_ld " ", ts->info->start_stack);
7617 /* for the rest, there is MasterCard */
7618 g_string_printf(buf, "0%c", i == 43 ? '\n' : ' ');
7621 if (write(fd, buf->str, buf->len) != buf->len) {
7629 static int open_self_auxv(void *cpu_env, int fd)
7631 CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7632 TaskState *ts = cpu->opaque;
7633 abi_ulong auxv = ts->info->saved_auxv;
7634 abi_ulong len = ts->info->auxv_len;
7638 * Auxiliary vector is stored in target process stack.
7639 * read in whole auxv vector and copy it to file
7641 ptr = lock_user(VERIFY_READ, auxv, len, 0);
7645 r = write(fd, ptr, len);
7652 lseek(fd, 0, SEEK_SET);
7653 unlock_user(ptr, auxv, len);
7659 static int is_proc_myself(const char *filename, const char *entry)
7661 if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
7662 filename += strlen("/proc/");
7663 if (!strncmp(filename, "self/", strlen("self/"))) {
7664 filename += strlen("self/");
7665 } else if (*filename >= '1' && *filename <= '9') {
7667 snprintf(myself, sizeof(myself), "%d/", getpid());
7668 if (!strncmp(filename, myself, strlen(myself))) {
7669 filename += strlen(myself);
7676 if (!strcmp(filename, entry)) {
7683 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) || \
7684 defined(TARGET_SPARC) || defined(TARGET_M68K) || defined(TARGET_HPPA)
7685 static int is_proc(const char *filename, const char *entry)
7687 return strcmp(filename, entry) == 0;
7691 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7692 static int open_net_route(void *cpu_env, int fd)
7699 fp = fopen("/proc/net/route", "r");
7706 read = getline(&line, &len, fp);
7707 dprintf(fd, "%s", line);
7711 while ((read = getline(&line, &len, fp)) != -1) {
7713 uint32_t dest, gw, mask;
7714 unsigned int flags, refcnt, use, metric, mtu, window, irtt;
7717 fields = sscanf(line,
7718 "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7719 iface, &dest, &gw, &flags, &refcnt, &use, &metric,
7720 &mask, &mtu, &window, &irtt);
7724 dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7725 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
7726 metric, tswap32(mask), mtu, window, irtt);
7736 #if defined(TARGET_SPARC)
7737 static int open_cpuinfo(void *cpu_env, int fd)
7739 dprintf(fd, "type\t\t: sun4u\n");
7744 #if defined(TARGET_HPPA)
7745 static int open_cpuinfo(void *cpu_env, int fd)
7747 dprintf(fd, "cpu family\t: PA-RISC 1.1e\n");
7748 dprintf(fd, "cpu\t\t: PA7300LC (PCX-L2)\n");
7749 dprintf(fd, "capabilities\t: os32\n");
7750 dprintf(fd, "model\t\t: 9000/778/B160L\n");
7751 dprintf(fd, "model name\t: Merlin L2 160 QEMU (9000/778/B160L)\n");
7756 #if defined(TARGET_M68K)
7757 static int open_hardware(void *cpu_env, int fd)
7759 dprintf(fd, "Model:\t\tqemu-m68k\n");
7764 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
7767 const char *filename;
7768 int (*fill)(void *cpu_env, int fd);
7769 int (*cmp)(const char *s1, const char *s2);
7771 const struct fake_open *fake_open;
7772 static const struct fake_open fakes[] = {
7773 { "maps", open_self_maps, is_proc_myself },
7774 { "stat", open_self_stat, is_proc_myself },
7775 { "auxv", open_self_auxv, is_proc_myself },
7776 { "cmdline", open_self_cmdline, is_proc_myself },
7777 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7778 { "/proc/net/route", open_net_route, is_proc },
7780 #if defined(TARGET_SPARC) || defined(TARGET_HPPA)
7781 { "/proc/cpuinfo", open_cpuinfo, is_proc },
7783 #if defined(TARGET_M68K)
7784 { "/proc/hardware", open_hardware, is_proc },
7786 { NULL, NULL, NULL }
7789 if (is_proc_myself(pathname, "exe")) {
7790 int execfd = qemu_getauxval(AT_EXECFD);
7791 return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
7794 for (fake_open = fakes; fake_open->filename; fake_open++) {
7795 if (fake_open->cmp(pathname, fake_open->filename)) {
7800 if (fake_open->filename) {
7802 char filename[PATH_MAX];
7805 /* create temporary file to map stat to */
7806 tmpdir = getenv("TMPDIR");
7809 snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
7810 fd = mkstemp(filename);
7816 if ((r = fake_open->fill(cpu_env, fd))) {
7822 lseek(fd, 0, SEEK_SET);
7827 return safe_openat(dirfd, path(pathname), flags, mode);
7830 #define TIMER_MAGIC 0x0caf0000
7831 #define TIMER_MAGIC_MASK 0xffff0000
7833 /* Convert QEMU provided timer ID back to internal 16bit index format */
7834 static target_timer_t get_timer_id(abi_long arg)
7836 target_timer_t timerid = arg;
7838 if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
7839 return -TARGET_EINVAL;
7844 if (timerid >= ARRAY_SIZE(g_posix_timers)) {
7845 return -TARGET_EINVAL;
7851 static int target_to_host_cpu_mask(unsigned long *host_mask,
7853 abi_ulong target_addr,
7856 unsigned target_bits = sizeof(abi_ulong) * 8;
7857 unsigned host_bits = sizeof(*host_mask) * 8;
7858 abi_ulong *target_mask;
7861 assert(host_size >= target_size);
7863 target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
7865 return -TARGET_EFAULT;
7867 memset(host_mask, 0, host_size);
7869 for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
7870 unsigned bit = i * target_bits;
7873 __get_user(val, &target_mask[i]);
7874 for (j = 0; j < target_bits; j++, bit++) {
7875 if (val & (1UL << j)) {
7876 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
7881 unlock_user(target_mask, target_addr, 0);
7885 static int host_to_target_cpu_mask(const unsigned long *host_mask,
7887 abi_ulong target_addr,
7890 unsigned target_bits = sizeof(abi_ulong) * 8;
7891 unsigned host_bits = sizeof(*host_mask) * 8;
7892 abi_ulong *target_mask;
7895 assert(host_size >= target_size);
7897 target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
7899 return -TARGET_EFAULT;
7902 for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
7903 unsigned bit = i * target_bits;
7906 for (j = 0; j < target_bits; j++, bit++) {
7907 if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
7911 __put_user(val, &target_mask[i]);
7914 unlock_user(target_mask, target_addr, target_size);
7918 /* This is an internal helper for do_syscall so that it is easier
7919 * to have a single return point, so that actions, such as logging
7920 * of syscall results, can be performed.
7921 * All errnos that do_syscall() returns must be -TARGET_<errcode>.
7923 static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
7924 abi_long arg2, abi_long arg3, abi_long arg4,
7925 abi_long arg5, abi_long arg6, abi_long arg7,
7928 CPUState *cpu = env_cpu(cpu_env);
7930 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
7931 || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
7932 || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
7933 || defined(TARGET_NR_statx)
7936 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
7937 || defined(TARGET_NR_fstatfs)
7943 case TARGET_NR_exit:
7944 /* In old applications this may be used to implement _exit(2).
7945 However in threaded applictions it is used for thread termination,
7946 and _exit_group is used for application termination.
7947 Do thread termination if we have more then one thread. */
7949 if (block_signals()) {
7950 return -TARGET_ERESTARTSYS;
7953 pthread_mutex_lock(&clone_lock);
7955 if (CPU_NEXT(first_cpu)) {
7956 TaskState *ts = cpu->opaque;
7958 object_property_set_bool(OBJECT(cpu), "realized", false, NULL);
7959 object_unref(OBJECT(cpu));
7961 * At this point the CPU should be unrealized and removed
7962 * from cpu lists. We can clean-up the rest of the thread
7963 * data without the lock held.
7966 pthread_mutex_unlock(&clone_lock);
7968 if (ts->child_tidptr) {
7969 put_user_u32(0, ts->child_tidptr);
7970 do_sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
7975 rcu_unregister_thread();
7979 pthread_mutex_unlock(&clone_lock);
7980 preexit_cleanup(cpu_env, arg1);
7982 return 0; /* avoid warning */
7983 case TARGET_NR_read:
7984 if (arg2 == 0 && arg3 == 0) {
7985 return get_errno(safe_read(arg1, 0, 0));
7987 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
7988 return -TARGET_EFAULT;
7989 ret = get_errno(safe_read(arg1, p, arg3));
7991 fd_trans_host_to_target_data(arg1)) {
7992 ret = fd_trans_host_to_target_data(arg1)(p, ret);
7994 unlock_user(p, arg2, ret);
7997 case TARGET_NR_write:
7998 if (arg2 == 0 && arg3 == 0) {
7999 return get_errno(safe_write(arg1, 0, 0));
8001 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
8002 return -TARGET_EFAULT;
8003 if (fd_trans_target_to_host_data(arg1)) {
8004 void *copy = g_malloc(arg3);
8005 memcpy(copy, p, arg3);
8006 ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
8008 ret = get_errno(safe_write(arg1, copy, ret));
8012 ret = get_errno(safe_write(arg1, p, arg3));
8014 unlock_user(p, arg2, 0);
8017 #ifdef TARGET_NR_open
8018 case TARGET_NR_open:
8019 if (!(p = lock_user_string(arg1)))
8020 return -TARGET_EFAULT;
8021 ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
8022 target_to_host_bitmask(arg2, fcntl_flags_tbl),
8024 fd_trans_unregister(ret);
8025 unlock_user(p, arg1, 0);
8028 case TARGET_NR_openat:
8029 if (!(p = lock_user_string(arg2)))
8030 return -TARGET_EFAULT;
8031 ret = get_errno(do_openat(cpu_env, arg1, p,
8032 target_to_host_bitmask(arg3, fcntl_flags_tbl),
8034 fd_trans_unregister(ret);
8035 unlock_user(p, arg2, 0);
8037 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8038 case TARGET_NR_name_to_handle_at:
8039 ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
8042 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8043 case TARGET_NR_open_by_handle_at:
8044 ret = do_open_by_handle_at(arg1, arg2, arg3);
8045 fd_trans_unregister(ret);
8048 case TARGET_NR_close:
8049 fd_trans_unregister(arg1);
8050 return get_errno(close(arg1));
8053 return do_brk(arg1);
8054 #ifdef TARGET_NR_fork
8055 case TARGET_NR_fork:
8056 return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
8058 #ifdef TARGET_NR_waitpid
8059 case TARGET_NR_waitpid:
8062 ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
8063 if (!is_error(ret) && arg2 && ret
8064 && put_user_s32(host_to_target_waitstatus(status), arg2))
8065 return -TARGET_EFAULT;
8069 #ifdef TARGET_NR_waitid
8070 case TARGET_NR_waitid:
8074 ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
8075 if (!is_error(ret) && arg3 && info.si_pid != 0) {
8076 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
8077 return -TARGET_EFAULT;
8078 host_to_target_siginfo(p, &info);
8079 unlock_user(p, arg3, sizeof(target_siginfo_t));
8084 #ifdef TARGET_NR_creat /* not on alpha */
8085 case TARGET_NR_creat:
8086 if (!(p = lock_user_string(arg1)))
8087 return -TARGET_EFAULT;
8088 ret = get_errno(creat(p, arg2));
8089 fd_trans_unregister(ret);
8090 unlock_user(p, arg1, 0);
8093 #ifdef TARGET_NR_link
8094 case TARGET_NR_link:
8097 p = lock_user_string(arg1);
8098 p2 = lock_user_string(arg2);
8100 ret = -TARGET_EFAULT;
8102 ret = get_errno(link(p, p2));
8103 unlock_user(p2, arg2, 0);
8104 unlock_user(p, arg1, 0);
8108 #if defined(TARGET_NR_linkat)
8109 case TARGET_NR_linkat:
8113 return -TARGET_EFAULT;
8114 p = lock_user_string(arg2);
8115 p2 = lock_user_string(arg4);
8117 ret = -TARGET_EFAULT;
8119 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
8120 unlock_user(p, arg2, 0);
8121 unlock_user(p2, arg4, 0);
8125 #ifdef TARGET_NR_unlink
8126 case TARGET_NR_unlink:
8127 if (!(p = lock_user_string(arg1)))
8128 return -TARGET_EFAULT;
8129 ret = get_errno(unlink(p));
8130 unlock_user(p, arg1, 0);
8133 #if defined(TARGET_NR_unlinkat)
8134 case TARGET_NR_unlinkat:
8135 if (!(p = lock_user_string(arg2)))
8136 return -TARGET_EFAULT;
8137 ret = get_errno(unlinkat(arg1, p, arg3));
8138 unlock_user(p, arg2, 0);
8141 case TARGET_NR_execve:
8143 char **argp, **envp;
8146 abi_ulong guest_argp;
8147 abi_ulong guest_envp;
8154 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
8155 if (get_user_ual(addr, gp))
8156 return -TARGET_EFAULT;
8163 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
8164 if (get_user_ual(addr, gp))
8165 return -TARGET_EFAULT;
8171 argp = g_new0(char *, argc + 1);
8172 envp = g_new0(char *, envc + 1);
8174 for (gp = guest_argp, q = argp; gp;
8175 gp += sizeof(abi_ulong), q++) {
8176 if (get_user_ual(addr, gp))
8180 if (!(*q = lock_user_string(addr)))
8182 total_size += strlen(*q) + 1;
8186 for (gp = guest_envp, q = envp; gp;
8187 gp += sizeof(abi_ulong), q++) {
8188 if (get_user_ual(addr, gp))
8192 if (!(*q = lock_user_string(addr)))
8194 total_size += strlen(*q) + 1;
8198 if (!(p = lock_user_string(arg1)))
8200 /* Although execve() is not an interruptible syscall it is
8201 * a special case where we must use the safe_syscall wrapper:
8202 * if we allow a signal to happen before we make the host
8203 * syscall then we will 'lose' it, because at the point of
8204 * execve the process leaves QEMU's control. So we use the
8205 * safe syscall wrapper to ensure that we either take the
8206 * signal as a guest signal, or else it does not happen
8207 * before the execve completes and makes it the other
8208 * program's problem.
8210 ret = get_errno(safe_execve(p, argp, envp));
8211 unlock_user(p, arg1, 0);
8216 ret = -TARGET_EFAULT;
8219 for (gp = guest_argp, q = argp; *q;
8220 gp += sizeof(abi_ulong), q++) {
8221 if (get_user_ual(addr, gp)
8224 unlock_user(*q, addr, 0);
8226 for (gp = guest_envp, q = envp; *q;
8227 gp += sizeof(abi_ulong), q++) {
8228 if (get_user_ual(addr, gp)
8231 unlock_user(*q, addr, 0);
8238 case TARGET_NR_chdir:
8239 if (!(p = lock_user_string(arg1)))
8240 return -TARGET_EFAULT;
8241 ret = get_errno(chdir(p));
8242 unlock_user(p, arg1, 0);
8244 #ifdef TARGET_NR_time
8245 case TARGET_NR_time:
8248 ret = get_errno(time(&host_time));
8251 && put_user_sal(host_time, arg1))
8252 return -TARGET_EFAULT;
8256 #ifdef TARGET_NR_mknod
8257 case TARGET_NR_mknod:
8258 if (!(p = lock_user_string(arg1)))
8259 return -TARGET_EFAULT;
8260 ret = get_errno(mknod(p, arg2, arg3));
8261 unlock_user(p, arg1, 0);
8264 #if defined(TARGET_NR_mknodat)
8265 case TARGET_NR_mknodat:
8266 if (!(p = lock_user_string(arg2)))
8267 return -TARGET_EFAULT;
8268 ret = get_errno(mknodat(arg1, p, arg3, arg4));
8269 unlock_user(p, arg2, 0);
8272 #ifdef TARGET_NR_chmod
8273 case TARGET_NR_chmod:
8274 if (!(p = lock_user_string(arg1)))
8275 return -TARGET_EFAULT;
8276 ret = get_errno(chmod(p, arg2));
8277 unlock_user(p, arg1, 0);
8280 #ifdef TARGET_NR_lseek
8281 case TARGET_NR_lseek:
8282 return get_errno(lseek(arg1, arg2, arg3));
8284 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
8285 /* Alpha specific */
8286 case TARGET_NR_getxpid:
8287 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
8288 return get_errno(getpid());
8290 #ifdef TARGET_NR_getpid
8291 case TARGET_NR_getpid:
8292 return get_errno(getpid());
8294 case TARGET_NR_mount:
8296 /* need to look at the data field */
8300 p = lock_user_string(arg1);
8302 return -TARGET_EFAULT;
8308 p2 = lock_user_string(arg2);
8311 unlock_user(p, arg1, 0);
8313 return -TARGET_EFAULT;
8317 p3 = lock_user_string(arg3);
8320 unlock_user(p, arg1, 0);
8322 unlock_user(p2, arg2, 0);
8323 return -TARGET_EFAULT;
8329 /* FIXME - arg5 should be locked, but it isn't clear how to
8330 * do that since it's not guaranteed to be a NULL-terminated
8334 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
8336 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5));
8338 ret = get_errno(ret);
8341 unlock_user(p, arg1, 0);
8343 unlock_user(p2, arg2, 0);
8345 unlock_user(p3, arg3, 0);
8349 #if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount)
8350 #if defined(TARGET_NR_umount)
8351 case TARGET_NR_umount:
8353 #if defined(TARGET_NR_oldumount)
8354 case TARGET_NR_oldumount:
8356 if (!(p = lock_user_string(arg1)))
8357 return -TARGET_EFAULT;
8358 ret = get_errno(umount(p));
8359 unlock_user(p, arg1, 0);
8362 #ifdef TARGET_NR_stime /* not on alpha */
8363 case TARGET_NR_stime:
8367 if (get_user_sal(ts.tv_sec, arg1)) {
8368 return -TARGET_EFAULT;
8370 return get_errno(clock_settime(CLOCK_REALTIME, &ts));
8373 #ifdef TARGET_NR_alarm /* not on alpha */
8374 case TARGET_NR_alarm:
8377 #ifdef TARGET_NR_pause /* not on alpha */
8378 case TARGET_NR_pause:
8379 if (!block_signals()) {
8380 sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
8382 return -TARGET_EINTR;
8384 #ifdef TARGET_NR_utime
8385 case TARGET_NR_utime:
8387 struct utimbuf tbuf, *host_tbuf;
8388 struct target_utimbuf *target_tbuf;
8390 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
8391 return -TARGET_EFAULT;
8392 tbuf.actime = tswapal(target_tbuf->actime);
8393 tbuf.modtime = tswapal(target_tbuf->modtime);
8394 unlock_user_struct(target_tbuf, arg2, 0);
8399 if (!(p = lock_user_string(arg1)))
8400 return -TARGET_EFAULT;
8401 ret = get_errno(utime(p, host_tbuf));
8402 unlock_user(p, arg1, 0);
8406 #ifdef TARGET_NR_utimes
8407 case TARGET_NR_utimes:
8409 struct timeval *tvp, tv[2];
8411 if (copy_from_user_timeval(&tv[0], arg2)
8412 || copy_from_user_timeval(&tv[1],
8413 arg2 + sizeof(struct target_timeval)))
8414 return -TARGET_EFAULT;
8419 if (!(p = lock_user_string(arg1)))
8420 return -TARGET_EFAULT;
8421 ret = get_errno(utimes(p, tvp));
8422 unlock_user(p, arg1, 0);
8426 #if defined(TARGET_NR_futimesat)
8427 case TARGET_NR_futimesat:
8429 struct timeval *tvp, tv[2];
8431 if (copy_from_user_timeval(&tv[0], arg3)
8432 || copy_from_user_timeval(&tv[1],
8433 arg3 + sizeof(struct target_timeval)))
8434 return -TARGET_EFAULT;
8439 if (!(p = lock_user_string(arg2))) {
8440 return -TARGET_EFAULT;
8442 ret = get_errno(futimesat(arg1, path(p), tvp));
8443 unlock_user(p, arg2, 0);
8447 #ifdef TARGET_NR_access
8448 case TARGET_NR_access:
8449 if (!(p = lock_user_string(arg1))) {
8450 return -TARGET_EFAULT;
8452 ret = get_errno(access(path(p), arg2));
8453 unlock_user(p, arg1, 0);
8456 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
8457 case TARGET_NR_faccessat:
8458 if (!(p = lock_user_string(arg2))) {
8459 return -TARGET_EFAULT;
8461 ret = get_errno(faccessat(arg1, p, arg3, 0));
8462 unlock_user(p, arg2, 0);
8465 #ifdef TARGET_NR_nice /* not on alpha */
8466 case TARGET_NR_nice:
8467 return get_errno(nice(arg1));
8469 case TARGET_NR_sync:
8472 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
8473 case TARGET_NR_syncfs:
8474 return get_errno(syncfs(arg1));
8476 case TARGET_NR_kill:
8477 return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
8478 #ifdef TARGET_NR_rename
8479 case TARGET_NR_rename:
8482 p = lock_user_string(arg1);
8483 p2 = lock_user_string(arg2);
8485 ret = -TARGET_EFAULT;
8487 ret = get_errno(rename(p, p2));
8488 unlock_user(p2, arg2, 0);
8489 unlock_user(p, arg1, 0);
8493 #if defined(TARGET_NR_renameat)
8494 case TARGET_NR_renameat:
8497 p = lock_user_string(arg2);
8498 p2 = lock_user_string(arg4);
8500 ret = -TARGET_EFAULT;
8502 ret = get_errno(renameat(arg1, p, arg3, p2));
8503 unlock_user(p2, arg4, 0);
8504 unlock_user(p, arg2, 0);
8508 #if defined(TARGET_NR_renameat2)
8509 case TARGET_NR_renameat2:
8512 p = lock_user_string(arg2);
8513 p2 = lock_user_string(arg4);
8515 ret = -TARGET_EFAULT;
8517 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
8519 unlock_user(p2, arg4, 0);
8520 unlock_user(p, arg2, 0);
8524 #ifdef TARGET_NR_mkdir
8525 case TARGET_NR_mkdir:
8526 if (!(p = lock_user_string(arg1)))
8527 return -TARGET_EFAULT;
8528 ret = get_errno(mkdir(p, arg2));
8529 unlock_user(p, arg1, 0);
8532 #if defined(TARGET_NR_mkdirat)
8533 case TARGET_NR_mkdirat:
8534 if (!(p = lock_user_string(arg2)))
8535 return -TARGET_EFAULT;
8536 ret = get_errno(mkdirat(arg1, p, arg3));
8537 unlock_user(p, arg2, 0);
8540 #ifdef TARGET_NR_rmdir
8541 case TARGET_NR_rmdir:
8542 if (!(p = lock_user_string(arg1)))
8543 return -TARGET_EFAULT;
8544 ret = get_errno(rmdir(p));
8545 unlock_user(p, arg1, 0);
8549 ret = get_errno(dup(arg1));
8551 fd_trans_dup(arg1, ret);
8554 #ifdef TARGET_NR_pipe
8555 case TARGET_NR_pipe:
8556 return do_pipe(cpu_env, arg1, 0, 0);
8558 #ifdef TARGET_NR_pipe2
8559 case TARGET_NR_pipe2:
8560 return do_pipe(cpu_env, arg1,
8561 target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
8563 case TARGET_NR_times:
8565 struct target_tms *tmsp;
8567 ret = get_errno(times(&tms));
8569 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
8571 return -TARGET_EFAULT;
8572 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
8573 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
8574 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
8575 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
8578 ret = host_to_target_clock_t(ret);
8581 case TARGET_NR_acct:
8583 ret = get_errno(acct(NULL));
8585 if (!(p = lock_user_string(arg1))) {
8586 return -TARGET_EFAULT;
8588 ret = get_errno(acct(path(p)));
8589 unlock_user(p, arg1, 0);
8592 #ifdef TARGET_NR_umount2
8593 case TARGET_NR_umount2:
8594 if (!(p = lock_user_string(arg1)))
8595 return -TARGET_EFAULT;
8596 ret = get_errno(umount2(p, arg2));
8597 unlock_user(p, arg1, 0);
8600 case TARGET_NR_ioctl:
8601 return do_ioctl(arg1, arg2, arg3);
8602 #ifdef TARGET_NR_fcntl
8603 case TARGET_NR_fcntl:
8604 return do_fcntl(arg1, arg2, arg3);
8606 case TARGET_NR_setpgid:
8607 return get_errno(setpgid(arg1, arg2));
8608 case TARGET_NR_umask:
8609 return get_errno(umask(arg1));
8610 case TARGET_NR_chroot:
8611 if (!(p = lock_user_string(arg1)))
8612 return -TARGET_EFAULT;
8613 ret = get_errno(chroot(p));
8614 unlock_user(p, arg1, 0);
8616 #ifdef TARGET_NR_dup2
8617 case TARGET_NR_dup2:
8618 ret = get_errno(dup2(arg1, arg2));
8620 fd_trans_dup(arg1, arg2);
8624 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
8625 case TARGET_NR_dup3:
8629 if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
8632 host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
8633 ret = get_errno(dup3(arg1, arg2, host_flags));
8635 fd_trans_dup(arg1, arg2);
8640 #ifdef TARGET_NR_getppid /* not on alpha */
8641 case TARGET_NR_getppid:
8642 return get_errno(getppid());
8644 #ifdef TARGET_NR_getpgrp
8645 case TARGET_NR_getpgrp:
8646 return get_errno(getpgrp());
8648 case TARGET_NR_setsid:
8649 return get_errno(setsid());
8650 #ifdef TARGET_NR_sigaction
8651 case TARGET_NR_sigaction:
8653 #if defined(TARGET_ALPHA)
8654 struct target_sigaction act, oact, *pact = 0;
8655 struct target_old_sigaction *old_act;
8657 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8658 return -TARGET_EFAULT;
8659 act._sa_handler = old_act->_sa_handler;
8660 target_siginitset(&act.sa_mask, old_act->sa_mask);
8661 act.sa_flags = old_act->sa_flags;
8662 act.sa_restorer = 0;
8663 unlock_user_struct(old_act, arg2, 0);
8666 ret = get_errno(do_sigaction(arg1, pact, &oact));
8667 if (!is_error(ret) && arg3) {
8668 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8669 return -TARGET_EFAULT;
8670 old_act->_sa_handler = oact._sa_handler;
8671 old_act->sa_mask = oact.sa_mask.sig[0];
8672 old_act->sa_flags = oact.sa_flags;
8673 unlock_user_struct(old_act, arg3, 1);
8675 #elif defined(TARGET_MIPS)
8676 struct target_sigaction act, oact, *pact, *old_act;
8679 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8680 return -TARGET_EFAULT;
8681 act._sa_handler = old_act->_sa_handler;
8682 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
8683 act.sa_flags = old_act->sa_flags;
8684 unlock_user_struct(old_act, arg2, 0);
8690 ret = get_errno(do_sigaction(arg1, pact, &oact));
8692 if (!is_error(ret) && arg3) {
8693 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8694 return -TARGET_EFAULT;
8695 old_act->_sa_handler = oact._sa_handler;
8696 old_act->sa_flags = oact.sa_flags;
8697 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
8698 old_act->sa_mask.sig[1] = 0;
8699 old_act->sa_mask.sig[2] = 0;
8700 old_act->sa_mask.sig[3] = 0;
8701 unlock_user_struct(old_act, arg3, 1);
8704 struct target_old_sigaction *old_act;
8705 struct target_sigaction act, oact, *pact;
8707 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8708 return -TARGET_EFAULT;
8709 act._sa_handler = old_act->_sa_handler;
8710 target_siginitset(&act.sa_mask, old_act->sa_mask);
8711 act.sa_flags = old_act->sa_flags;
8712 act.sa_restorer = old_act->sa_restorer;
8713 #ifdef TARGET_ARCH_HAS_KA_RESTORER
8714 act.ka_restorer = 0;
8716 unlock_user_struct(old_act, arg2, 0);
8721 ret = get_errno(do_sigaction(arg1, pact, &oact));
8722 if (!is_error(ret) && arg3) {
8723 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8724 return -TARGET_EFAULT;
8725 old_act->_sa_handler = oact._sa_handler;
8726 old_act->sa_mask = oact.sa_mask.sig[0];
8727 old_act->sa_flags = oact.sa_flags;
8728 old_act->sa_restorer = oact.sa_restorer;
8729 unlock_user_struct(old_act, arg3, 1);
8735 case TARGET_NR_rt_sigaction:
8737 #if defined(TARGET_ALPHA)
8738 /* For Alpha and SPARC this is a 5 argument syscall, with
8739 * a 'restorer' parameter which must be copied into the
8740 * sa_restorer field of the sigaction struct.
8741 * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
8742 * and arg5 is the sigsetsize.
8743 * Alpha also has a separate rt_sigaction struct that it uses
8744 * here; SPARC uses the usual sigaction struct.
8746 struct target_rt_sigaction *rt_act;
8747 struct target_sigaction act, oact, *pact = 0;
8749 if (arg4 != sizeof(target_sigset_t)) {
8750 return -TARGET_EINVAL;
8753 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
8754 return -TARGET_EFAULT;
8755 act._sa_handler = rt_act->_sa_handler;
8756 act.sa_mask = rt_act->sa_mask;
8757 act.sa_flags = rt_act->sa_flags;
8758 act.sa_restorer = arg5;
8759 unlock_user_struct(rt_act, arg2, 0);
8762 ret = get_errno(do_sigaction(arg1, pact, &oact));
8763 if (!is_error(ret) && arg3) {
8764 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
8765 return -TARGET_EFAULT;
8766 rt_act->_sa_handler = oact._sa_handler;
8767 rt_act->sa_mask = oact.sa_mask;
8768 rt_act->sa_flags = oact.sa_flags;
8769 unlock_user_struct(rt_act, arg3, 1);
8773 target_ulong restorer = arg4;
8774 target_ulong sigsetsize = arg5;
8776 target_ulong sigsetsize = arg4;
8778 struct target_sigaction *act;
8779 struct target_sigaction *oact;
8781 if (sigsetsize != sizeof(target_sigset_t)) {
8782 return -TARGET_EINVAL;
8785 if (!lock_user_struct(VERIFY_READ, act, arg2, 1)) {
8786 return -TARGET_EFAULT;
8788 #ifdef TARGET_ARCH_HAS_KA_RESTORER
8789 act->ka_restorer = restorer;
8795 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
8796 ret = -TARGET_EFAULT;
8797 goto rt_sigaction_fail;
8801 ret = get_errno(do_sigaction(arg1, act, oact));
8804 unlock_user_struct(act, arg2, 0);
8806 unlock_user_struct(oact, arg3, 1);
8810 #ifdef TARGET_NR_sgetmask /* not on alpha */
8811 case TARGET_NR_sgetmask:
8814 abi_ulong target_set;
8815 ret = do_sigprocmask(0, NULL, &cur_set);
8817 host_to_target_old_sigset(&target_set, &cur_set);
8823 #ifdef TARGET_NR_ssetmask /* not on alpha */
8824 case TARGET_NR_ssetmask:
8827 abi_ulong target_set = arg1;
8828 target_to_host_old_sigset(&set, &target_set);
8829 ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
8831 host_to_target_old_sigset(&target_set, &oset);
8837 #ifdef TARGET_NR_sigprocmask
8838 case TARGET_NR_sigprocmask:
8840 #if defined(TARGET_ALPHA)
8841 sigset_t set, oldset;
8846 case TARGET_SIG_BLOCK:
8849 case TARGET_SIG_UNBLOCK:
8852 case TARGET_SIG_SETMASK:
8856 return -TARGET_EINVAL;
8859 target_to_host_old_sigset(&set, &mask);
8861 ret = do_sigprocmask(how, &set, &oldset);
8862 if (!is_error(ret)) {
8863 host_to_target_old_sigset(&mask, &oldset);
8865 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
8868 sigset_t set, oldset, *set_ptr;
8873 case TARGET_SIG_BLOCK:
8876 case TARGET_SIG_UNBLOCK:
8879 case TARGET_SIG_SETMASK:
8883 return -TARGET_EINVAL;
8885 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
8886 return -TARGET_EFAULT;
8887 target_to_host_old_sigset(&set, p);
8888 unlock_user(p, arg2, 0);
8894 ret = do_sigprocmask(how, set_ptr, &oldset);
8895 if (!is_error(ret) && arg3) {
8896 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
8897 return -TARGET_EFAULT;
8898 host_to_target_old_sigset(p, &oldset);
8899 unlock_user(p, arg3, sizeof(target_sigset_t));
8905 case TARGET_NR_rt_sigprocmask:
8908 sigset_t set, oldset, *set_ptr;
8910 if (arg4 != sizeof(target_sigset_t)) {
8911 return -TARGET_EINVAL;
8916 case TARGET_SIG_BLOCK:
8919 case TARGET_SIG_UNBLOCK:
8922 case TARGET_SIG_SETMASK:
8926 return -TARGET_EINVAL;
8928 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
8929 return -TARGET_EFAULT;
8930 target_to_host_sigset(&set, p);
8931 unlock_user(p, arg2, 0);
8937 ret = do_sigprocmask(how, set_ptr, &oldset);
8938 if (!is_error(ret) && arg3) {
8939 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
8940 return -TARGET_EFAULT;
8941 host_to_target_sigset(p, &oldset);
8942 unlock_user(p, arg3, sizeof(target_sigset_t));
8946 #ifdef TARGET_NR_sigpending
8947 case TARGET_NR_sigpending:
8950 ret = get_errno(sigpending(&set));
8951 if (!is_error(ret)) {
8952 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8953 return -TARGET_EFAULT;
8954 host_to_target_old_sigset(p, &set);
8955 unlock_user(p, arg1, sizeof(target_sigset_t));
8960 case TARGET_NR_rt_sigpending:
8964 /* Yes, this check is >, not != like most. We follow the kernel's
8965 * logic and it does it like this because it implements
8966 * NR_sigpending through the same code path, and in that case
8967 * the old_sigset_t is smaller in size.
8969 if (arg2 > sizeof(target_sigset_t)) {
8970 return -TARGET_EINVAL;
8973 ret = get_errno(sigpending(&set));
8974 if (!is_error(ret)) {
8975 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8976 return -TARGET_EFAULT;
8977 host_to_target_sigset(p, &set);
8978 unlock_user(p, arg1, sizeof(target_sigset_t));
8982 #ifdef TARGET_NR_sigsuspend
8983 case TARGET_NR_sigsuspend:
8985 TaskState *ts = cpu->opaque;
8986 #if defined(TARGET_ALPHA)
8987 abi_ulong mask = arg1;
8988 target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
8990 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8991 return -TARGET_EFAULT;
8992 target_to_host_old_sigset(&ts->sigsuspend_mask, p);
8993 unlock_user(p, arg1, 0);
8995 ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8997 if (ret != -TARGET_ERESTARTSYS) {
8998 ts->in_sigsuspend = 1;
9003 case TARGET_NR_rt_sigsuspend:
9005 TaskState *ts = cpu->opaque;
9007 if (arg2 != sizeof(target_sigset_t)) {
9008 return -TARGET_EINVAL;
9010 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9011 return -TARGET_EFAULT;
9012 target_to_host_sigset(&ts->sigsuspend_mask, p);
9013 unlock_user(p, arg1, 0);
9014 ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
9016 if (ret != -TARGET_ERESTARTSYS) {
9017 ts->in_sigsuspend = 1;
9021 #ifdef TARGET_NR_rt_sigtimedwait
9022 case TARGET_NR_rt_sigtimedwait:
9025 struct timespec uts, *puts;
9028 if (arg4 != sizeof(target_sigset_t)) {
9029 return -TARGET_EINVAL;
9032 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9033 return -TARGET_EFAULT;
9034 target_to_host_sigset(&set, p);
9035 unlock_user(p, arg1, 0);
9038 if (target_to_host_timespec(puts, arg3)) {
9039 return -TARGET_EFAULT;
9044 ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9046 if (!is_error(ret)) {
9048 p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
9051 return -TARGET_EFAULT;
9053 host_to_target_siginfo(p, &uinfo);
9054 unlock_user(p, arg2, sizeof(target_siginfo_t));
9056 ret = host_to_target_signal(ret);
9061 #ifdef TARGET_NR_rt_sigtimedwait_time64
9062 case TARGET_NR_rt_sigtimedwait_time64:
9065 struct timespec uts, *puts;
9068 if (arg4 != sizeof(target_sigset_t)) {
9069 return -TARGET_EINVAL;
9072 p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1);
9074 return -TARGET_EFAULT;
9076 target_to_host_sigset(&set, p);
9077 unlock_user(p, arg1, 0);
9080 if (target_to_host_timespec64(puts, arg3)) {
9081 return -TARGET_EFAULT;
9086 ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9088 if (!is_error(ret)) {
9090 p = lock_user(VERIFY_WRITE, arg2,
9091 sizeof(target_siginfo_t), 0);
9093 return -TARGET_EFAULT;
9095 host_to_target_siginfo(p, &uinfo);
9096 unlock_user(p, arg2, sizeof(target_siginfo_t));
9098 ret = host_to_target_signal(ret);
9103 case TARGET_NR_rt_sigqueueinfo:
9107 p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
9109 return -TARGET_EFAULT;
9111 target_to_host_siginfo(&uinfo, p);
9112 unlock_user(p, arg3, 0);
9113 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
9116 case TARGET_NR_rt_tgsigqueueinfo:
9120 p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
9122 return -TARGET_EFAULT;
9124 target_to_host_siginfo(&uinfo, p);
9125 unlock_user(p, arg4, 0);
9126 ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, arg3, &uinfo));
9129 #ifdef TARGET_NR_sigreturn
9130 case TARGET_NR_sigreturn:
9131 if (block_signals()) {
9132 return -TARGET_ERESTARTSYS;
9134 return do_sigreturn(cpu_env);
9136 case TARGET_NR_rt_sigreturn:
9137 if (block_signals()) {
9138 return -TARGET_ERESTARTSYS;
9140 return do_rt_sigreturn(cpu_env);
9141 case TARGET_NR_sethostname:
9142 if (!(p = lock_user_string(arg1)))
9143 return -TARGET_EFAULT;
9144 ret = get_errno(sethostname(p, arg2));
9145 unlock_user(p, arg1, 0);
9147 #ifdef TARGET_NR_setrlimit
9148 case TARGET_NR_setrlimit:
9150 int resource = target_to_host_resource(arg1);
9151 struct target_rlimit *target_rlim;
9153 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
9154 return -TARGET_EFAULT;
9155 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
9156 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
9157 unlock_user_struct(target_rlim, arg2, 0);
9159 * If we just passed through resource limit settings for memory then
9160 * they would also apply to QEMU's own allocations, and QEMU will
9161 * crash or hang or die if its allocations fail. Ideally we would
9162 * track the guest allocations in QEMU and apply the limits ourselves.
9163 * For now, just tell the guest the call succeeded but don't actually
9166 if (resource != RLIMIT_AS &&
9167 resource != RLIMIT_DATA &&
9168 resource != RLIMIT_STACK) {
9169 return get_errno(setrlimit(resource, &rlim));
9175 #ifdef TARGET_NR_getrlimit
9176 case TARGET_NR_getrlimit:
9178 int resource = target_to_host_resource(arg1);
9179 struct target_rlimit *target_rlim;
9182 ret = get_errno(getrlimit(resource, &rlim));
9183 if (!is_error(ret)) {
9184 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
9185 return -TARGET_EFAULT;
9186 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
9187 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
9188 unlock_user_struct(target_rlim, arg2, 1);
9193 case TARGET_NR_getrusage:
9195 struct rusage rusage;
9196 ret = get_errno(getrusage(arg1, &rusage));
9197 if (!is_error(ret)) {
9198 ret = host_to_target_rusage(arg2, &rusage);
9202 #if defined(TARGET_NR_gettimeofday)
9203 case TARGET_NR_gettimeofday:
9208 ret = get_errno(gettimeofday(&tv, &tz));
9209 if (!is_error(ret)) {
9210 if (arg1 && copy_to_user_timeval(arg1, &tv)) {
9211 return -TARGET_EFAULT;
9213 if (arg2 && copy_to_user_timezone(arg2, &tz)) {
9214 return -TARGET_EFAULT;
9220 #if defined(TARGET_NR_settimeofday)
9221 case TARGET_NR_settimeofday:
9223 struct timeval tv, *ptv = NULL;
9224 struct timezone tz, *ptz = NULL;
9227 if (copy_from_user_timeval(&tv, arg1)) {
9228 return -TARGET_EFAULT;
9234 if (copy_from_user_timezone(&tz, arg2)) {
9235 return -TARGET_EFAULT;
9240 return get_errno(settimeofday(ptv, ptz));
9243 #if defined(TARGET_NR_select)
9244 case TARGET_NR_select:
9245 #if defined(TARGET_WANT_NI_OLD_SELECT)
9246 /* some architectures used to have old_select here
9247 * but now ENOSYS it.
9249 ret = -TARGET_ENOSYS;
9250 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
9251 ret = do_old_select(arg1);
9253 ret = do_select(arg1, arg2, arg3, arg4, arg5);
9257 #ifdef TARGET_NR_pselect6
9258 case TARGET_NR_pselect6:
9260 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
9261 fd_set rfds, wfds, efds;
9262 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
9263 struct timespec ts, *ts_ptr;
9266 * The 6th arg is actually two args smashed together,
9267 * so we cannot use the C library.
9275 abi_ulong arg_sigset, arg_sigsize, *arg7;
9276 target_sigset_t *target_sigset;
9284 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
9288 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
9292 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
9298 * This takes a timespec, and not a timeval, so we cannot
9299 * use the do_select() helper ...
9302 if (target_to_host_timespec(&ts, ts_addr)) {
9303 return -TARGET_EFAULT;
9310 /* Extract the two packed args for the sigset */
9313 sig.size = SIGSET_T_SIZE;
9315 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
9317 return -TARGET_EFAULT;
9319 arg_sigset = tswapal(arg7[0]);
9320 arg_sigsize = tswapal(arg7[1]);
9321 unlock_user(arg7, arg6, 0);
9325 if (arg_sigsize != sizeof(*target_sigset)) {
9326 /* Like the kernel, we enforce correct size sigsets */
9327 return -TARGET_EINVAL;
9329 target_sigset = lock_user(VERIFY_READ, arg_sigset,
9330 sizeof(*target_sigset), 1);
9331 if (!target_sigset) {
9332 return -TARGET_EFAULT;
9334 target_to_host_sigset(&set, target_sigset);
9335 unlock_user(target_sigset, arg_sigset, 0);
9343 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
9346 if (!is_error(ret)) {
9347 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
9348 return -TARGET_EFAULT;
9349 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
9350 return -TARGET_EFAULT;
9351 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
9352 return -TARGET_EFAULT;
9354 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
9355 return -TARGET_EFAULT;
9360 #ifdef TARGET_NR_symlink
9361 case TARGET_NR_symlink:
9364 p = lock_user_string(arg1);
9365 p2 = lock_user_string(arg2);
9367 ret = -TARGET_EFAULT;
9369 ret = get_errno(symlink(p, p2));
9370 unlock_user(p2, arg2, 0);
9371 unlock_user(p, arg1, 0);
9375 #if defined(TARGET_NR_symlinkat)
9376 case TARGET_NR_symlinkat:
9379 p = lock_user_string(arg1);
9380 p2 = lock_user_string(arg3);
9382 ret = -TARGET_EFAULT;
9384 ret = get_errno(symlinkat(p, arg2, p2));
9385 unlock_user(p2, arg3, 0);
9386 unlock_user(p, arg1, 0);
9390 #ifdef TARGET_NR_readlink
9391 case TARGET_NR_readlink:
9394 p = lock_user_string(arg1);
9395 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9397 ret = -TARGET_EFAULT;
9399 /* Short circuit this for the magic exe check. */
9400 ret = -TARGET_EINVAL;
9401 } else if (is_proc_myself((const char *)p, "exe")) {
9402 char real[PATH_MAX], *temp;
9403 temp = realpath(exec_path, real);
9404 /* Return value is # of bytes that we wrote to the buffer. */
9406 ret = get_errno(-1);
9408 /* Don't worry about sign mismatch as earlier mapping
9409 * logic would have thrown a bad address error. */
9410 ret = MIN(strlen(real), arg3);
9411 /* We cannot NUL terminate the string. */
9412 memcpy(p2, real, ret);
9415 ret = get_errno(readlink(path(p), p2, arg3));
9417 unlock_user(p2, arg2, ret);
9418 unlock_user(p, arg1, 0);
9422 #if defined(TARGET_NR_readlinkat)
9423 case TARGET_NR_readlinkat:
9426 p = lock_user_string(arg2);
9427 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
9429 ret = -TARGET_EFAULT;
9430 } else if (is_proc_myself((const char *)p, "exe")) {
9431 char real[PATH_MAX], *temp;
9432 temp = realpath(exec_path, real);
9433 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
9434 snprintf((char *)p2, arg4, "%s", real);
9436 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
9438 unlock_user(p2, arg3, ret);
9439 unlock_user(p, arg2, 0);
9443 #ifdef TARGET_NR_swapon
9444 case TARGET_NR_swapon:
9445 if (!(p = lock_user_string(arg1)))
9446 return -TARGET_EFAULT;
9447 ret = get_errno(swapon(p, arg2));
9448 unlock_user(p, arg1, 0);
9451 case TARGET_NR_reboot:
9452 if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
9453 /* arg4 must be ignored in all other cases */
9454 p = lock_user_string(arg4);
9456 return -TARGET_EFAULT;
9458 ret = get_errno(reboot(arg1, arg2, arg3, p));
9459 unlock_user(p, arg4, 0);
9461 ret = get_errno(reboot(arg1, arg2, arg3, NULL));
9464 #ifdef TARGET_NR_mmap
9465 case TARGET_NR_mmap:
9466 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
9467 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
9468 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
9469 || defined(TARGET_S390X)
9472 abi_ulong v1, v2, v3, v4, v5, v6;
9473 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
9474 return -TARGET_EFAULT;
9481 unlock_user(v, arg1, 0);
9482 ret = get_errno(target_mmap(v1, v2, v3,
9483 target_to_host_bitmask(v4, mmap_flags_tbl),
9487 ret = get_errno(target_mmap(arg1, arg2, arg3,
9488 target_to_host_bitmask(arg4, mmap_flags_tbl),
9494 #ifdef TARGET_NR_mmap2
9495 case TARGET_NR_mmap2:
9497 #define MMAP_SHIFT 12
9499 ret = target_mmap(arg1, arg2, arg3,
9500 target_to_host_bitmask(arg4, mmap_flags_tbl),
9501 arg5, arg6 << MMAP_SHIFT);
9502 return get_errno(ret);
9504 case TARGET_NR_munmap:
9505 return get_errno(target_munmap(arg1, arg2));
9506 case TARGET_NR_mprotect:
9508 TaskState *ts = cpu->opaque;
9509 /* Special hack to detect libc making the stack executable. */
9510 if ((arg3 & PROT_GROWSDOWN)
9511 && arg1 >= ts->info->stack_limit
9512 && arg1 <= ts->info->start_stack) {
9513 arg3 &= ~PROT_GROWSDOWN;
9514 arg2 = arg2 + arg1 - ts->info->stack_limit;
9515 arg1 = ts->info->stack_limit;
9518 return get_errno(target_mprotect(arg1, arg2, arg3));
9519 #ifdef TARGET_NR_mremap
9520 case TARGET_NR_mremap:
9521 return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
9523 /* ??? msync/mlock/munlock are broken for softmmu. */
9524 #ifdef TARGET_NR_msync
9525 case TARGET_NR_msync:
9526 return get_errno(msync(g2h(arg1), arg2, arg3));
9528 #ifdef TARGET_NR_mlock
9529 case TARGET_NR_mlock:
9530 return get_errno(mlock(g2h(arg1), arg2));
9532 #ifdef TARGET_NR_munlock
9533 case TARGET_NR_munlock:
9534 return get_errno(munlock(g2h(arg1), arg2));
9536 #ifdef TARGET_NR_mlockall
9537 case TARGET_NR_mlockall:
9538 return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
9540 #ifdef TARGET_NR_munlockall
9541 case TARGET_NR_munlockall:
9542 return get_errno(munlockall());
9544 #ifdef TARGET_NR_truncate
9545 case TARGET_NR_truncate:
9546 if (!(p = lock_user_string(arg1)))
9547 return -TARGET_EFAULT;
9548 ret = get_errno(truncate(p, arg2));
9549 unlock_user(p, arg1, 0);
9552 #ifdef TARGET_NR_ftruncate
9553 case TARGET_NR_ftruncate:
9554 return get_errno(ftruncate(arg1, arg2));
9556 case TARGET_NR_fchmod:
9557 return get_errno(fchmod(arg1, arg2));
9558 #if defined(TARGET_NR_fchmodat)
9559 case TARGET_NR_fchmodat:
9560 if (!(p = lock_user_string(arg2)))
9561 return -TARGET_EFAULT;
9562 ret = get_errno(fchmodat(arg1, p, arg3, 0));
9563 unlock_user(p, arg2, 0);
9566 case TARGET_NR_getpriority:
9567 /* Note that negative values are valid for getpriority, so we must
9568 differentiate based on errno settings. */
9570 ret = getpriority(arg1, arg2);
9571 if (ret == -1 && errno != 0) {
9572 return -host_to_target_errno(errno);
9575 /* Return value is the unbiased priority. Signal no error. */
9576 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
9578 /* Return value is a biased priority to avoid negative numbers. */
9582 case TARGET_NR_setpriority:
9583 return get_errno(setpriority(arg1, arg2, arg3));
9584 #ifdef TARGET_NR_statfs
9585 case TARGET_NR_statfs:
9586 if (!(p = lock_user_string(arg1))) {
9587 return -TARGET_EFAULT;
9589 ret = get_errno(statfs(path(p), &stfs));
9590 unlock_user(p, arg1, 0);
9592 if (!is_error(ret)) {
9593 struct target_statfs *target_stfs;
9595 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
9596 return -TARGET_EFAULT;
9597 __put_user(stfs.f_type, &target_stfs->f_type);
9598 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9599 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9600 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9601 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9602 __put_user(stfs.f_files, &target_stfs->f_files);
9603 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9604 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9605 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9606 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9607 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9608 #ifdef _STATFS_F_FLAGS
9609 __put_user(stfs.f_flags, &target_stfs->f_flags);
9611 __put_user(0, &target_stfs->f_flags);
9613 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9614 unlock_user_struct(target_stfs, arg2, 1);
9618 #ifdef TARGET_NR_fstatfs
9619 case TARGET_NR_fstatfs:
9620 ret = get_errno(fstatfs(arg1, &stfs));
9621 goto convert_statfs;
9623 #ifdef TARGET_NR_statfs64
9624 case TARGET_NR_statfs64:
9625 if (!(p = lock_user_string(arg1))) {
9626 return -TARGET_EFAULT;
9628 ret = get_errno(statfs(path(p), &stfs));
9629 unlock_user(p, arg1, 0);
9631 if (!is_error(ret)) {
9632 struct target_statfs64 *target_stfs;
9634 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
9635 return -TARGET_EFAULT;
9636 __put_user(stfs.f_type, &target_stfs->f_type);
9637 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9638 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9639 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9640 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9641 __put_user(stfs.f_files, &target_stfs->f_files);
9642 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9643 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9644 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9645 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9646 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9647 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9648 unlock_user_struct(target_stfs, arg3, 1);
9651 case TARGET_NR_fstatfs64:
9652 ret = get_errno(fstatfs(arg1, &stfs));
9653 goto convert_statfs64;
9655 #ifdef TARGET_NR_socketcall
9656 case TARGET_NR_socketcall:
9657 return do_socketcall(arg1, arg2);
9659 #ifdef TARGET_NR_accept
9660 case TARGET_NR_accept:
9661 return do_accept4(arg1, arg2, arg3, 0);
9663 #ifdef TARGET_NR_accept4
9664 case TARGET_NR_accept4:
9665 return do_accept4(arg1, arg2, arg3, arg4);
9667 #ifdef TARGET_NR_bind
9668 case TARGET_NR_bind:
9669 return do_bind(arg1, arg2, arg3);
9671 #ifdef TARGET_NR_connect
9672 case TARGET_NR_connect:
9673 return do_connect(arg1, arg2, arg3);
9675 #ifdef TARGET_NR_getpeername
9676 case TARGET_NR_getpeername:
9677 return do_getpeername(arg1, arg2, arg3);
9679 #ifdef TARGET_NR_getsockname
9680 case TARGET_NR_getsockname:
9681 return do_getsockname(arg1, arg2, arg3);
9683 #ifdef TARGET_NR_getsockopt
9684 case TARGET_NR_getsockopt:
9685 return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
9687 #ifdef TARGET_NR_listen
9688 case TARGET_NR_listen:
9689 return get_errno(listen(arg1, arg2));
9691 #ifdef TARGET_NR_recv
9692 case TARGET_NR_recv:
9693 return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
9695 #ifdef TARGET_NR_recvfrom
9696 case TARGET_NR_recvfrom:
9697 return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
9699 #ifdef TARGET_NR_recvmsg
9700 case TARGET_NR_recvmsg:
9701 return do_sendrecvmsg(arg1, arg2, arg3, 0);
9703 #ifdef TARGET_NR_send
9704 case TARGET_NR_send:
9705 return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
9707 #ifdef TARGET_NR_sendmsg
9708 case TARGET_NR_sendmsg:
9709 return do_sendrecvmsg(arg1, arg2, arg3, 1);
9711 #ifdef TARGET_NR_sendmmsg
9712 case TARGET_NR_sendmmsg:
9713 return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
9715 #ifdef TARGET_NR_recvmmsg
9716 case TARGET_NR_recvmmsg:
9717 return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
9719 #ifdef TARGET_NR_sendto
9720 case TARGET_NR_sendto:
9721 return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
9723 #ifdef TARGET_NR_shutdown
9724 case TARGET_NR_shutdown:
9725 return get_errno(shutdown(arg1, arg2));
9727 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
9728 case TARGET_NR_getrandom:
9729 p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
9731 return -TARGET_EFAULT;
9733 ret = get_errno(getrandom(p, arg2, arg3));
9734 unlock_user(p, arg1, ret);
9737 #ifdef TARGET_NR_socket
9738 case TARGET_NR_socket:
9739 return do_socket(arg1, arg2, arg3);
9741 #ifdef TARGET_NR_socketpair
9742 case TARGET_NR_socketpair:
9743 return do_socketpair(arg1, arg2, arg3, arg4);
9745 #ifdef TARGET_NR_setsockopt
9746 case TARGET_NR_setsockopt:
9747 return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
9749 #if defined(TARGET_NR_syslog)
9750 case TARGET_NR_syslog:
9755 case TARGET_SYSLOG_ACTION_CLOSE: /* Close log */
9756 case TARGET_SYSLOG_ACTION_OPEN: /* Open log */
9757 case TARGET_SYSLOG_ACTION_CLEAR: /* Clear ring buffer */
9758 case TARGET_SYSLOG_ACTION_CONSOLE_OFF: /* Disable logging */
9759 case TARGET_SYSLOG_ACTION_CONSOLE_ON: /* Enable logging */
9760 case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
9761 case TARGET_SYSLOG_ACTION_SIZE_UNREAD: /* Number of chars */
9762 case TARGET_SYSLOG_ACTION_SIZE_BUFFER: /* Size of the buffer */
9763 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
9764 case TARGET_SYSLOG_ACTION_READ: /* Read from log */
9765 case TARGET_SYSLOG_ACTION_READ_CLEAR: /* Read/clear msgs */
9766 case TARGET_SYSLOG_ACTION_READ_ALL: /* Read last messages */
9769 return -TARGET_EINVAL;
9774 p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9776 return -TARGET_EFAULT;
9778 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
9779 unlock_user(p, arg2, arg3);
9783 return -TARGET_EINVAL;
9788 case TARGET_NR_setitimer:
9790 struct itimerval value, ovalue, *pvalue;
9794 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
9795 || copy_from_user_timeval(&pvalue->it_value,
9796 arg2 + sizeof(struct target_timeval)))
9797 return -TARGET_EFAULT;
9801 ret = get_errno(setitimer(arg1, pvalue, &ovalue));
9802 if (!is_error(ret) && arg3) {
9803 if (copy_to_user_timeval(arg3,
9804 &ovalue.it_interval)
9805 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
9807 return -TARGET_EFAULT;
9811 case TARGET_NR_getitimer:
9813 struct itimerval value;
9815 ret = get_errno(getitimer(arg1, &value));
9816 if (!is_error(ret) && arg2) {
9817 if (copy_to_user_timeval(arg2,
9819 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
9821 return -TARGET_EFAULT;
9825 #ifdef TARGET_NR_stat
9826 case TARGET_NR_stat:
9827 if (!(p = lock_user_string(arg1))) {
9828 return -TARGET_EFAULT;
9830 ret = get_errno(stat(path(p), &st));
9831 unlock_user(p, arg1, 0);
9834 #ifdef TARGET_NR_lstat
9835 case TARGET_NR_lstat:
9836 if (!(p = lock_user_string(arg1))) {
9837 return -TARGET_EFAULT;
9839 ret = get_errno(lstat(path(p), &st));
9840 unlock_user(p, arg1, 0);
9843 #ifdef TARGET_NR_fstat
9844 case TARGET_NR_fstat:
9846 ret = get_errno(fstat(arg1, &st));
9847 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
9850 if (!is_error(ret)) {
9851 struct target_stat *target_st;
9853 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
9854 return -TARGET_EFAULT;
9855 memset(target_st, 0, sizeof(*target_st));
9856 __put_user(st.st_dev, &target_st->st_dev);
9857 __put_user(st.st_ino, &target_st->st_ino);
9858 __put_user(st.st_mode, &target_st->st_mode);
9859 __put_user(st.st_uid, &target_st->st_uid);
9860 __put_user(st.st_gid, &target_st->st_gid);
9861 __put_user(st.st_nlink, &target_st->st_nlink);
9862 __put_user(st.st_rdev, &target_st->st_rdev);
9863 __put_user(st.st_size, &target_st->st_size);
9864 __put_user(st.st_blksize, &target_st->st_blksize);
9865 __put_user(st.st_blocks, &target_st->st_blocks);
9866 __put_user(st.st_atime, &target_st->target_st_atime);
9867 __put_user(st.st_mtime, &target_st->target_st_mtime);
9868 __put_user(st.st_ctime, &target_st->target_st_ctime);
9869 #if (_POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700) && \
9870 defined(TARGET_STAT_HAVE_NSEC)
9871 __put_user(st.st_atim.tv_nsec,
9872 &target_st->target_st_atime_nsec);
9873 __put_user(st.st_mtim.tv_nsec,
9874 &target_st->target_st_mtime_nsec);
9875 __put_user(st.st_ctim.tv_nsec,
9876 &target_st->target_st_ctime_nsec);
9878 unlock_user_struct(target_st, arg2, 1);
9883 case TARGET_NR_vhangup:
9884 return get_errno(vhangup());
9885 #ifdef TARGET_NR_syscall
9886 case TARGET_NR_syscall:
9887 return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
9888 arg6, arg7, arg8, 0);
9890 #if defined(TARGET_NR_wait4)
9891 case TARGET_NR_wait4:
9894 abi_long status_ptr = arg2;
9895 struct rusage rusage, *rusage_ptr;
9896 abi_ulong target_rusage = arg4;
9897 abi_long rusage_err;
9899 rusage_ptr = &rusage;
9902 ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
9903 if (!is_error(ret)) {
9904 if (status_ptr && ret) {
9905 status = host_to_target_waitstatus(status);
9906 if (put_user_s32(status, status_ptr))
9907 return -TARGET_EFAULT;
9909 if (target_rusage) {
9910 rusage_err = host_to_target_rusage(target_rusage, &rusage);
9919 #ifdef TARGET_NR_swapoff
9920 case TARGET_NR_swapoff:
9921 if (!(p = lock_user_string(arg1)))
9922 return -TARGET_EFAULT;
9923 ret = get_errno(swapoff(p));
9924 unlock_user(p, arg1, 0);
9927 case TARGET_NR_sysinfo:
9929 struct target_sysinfo *target_value;
9930 struct sysinfo value;
9931 ret = get_errno(sysinfo(&value));
9932 if (!is_error(ret) && arg1)
9934 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
9935 return -TARGET_EFAULT;
9936 __put_user(value.uptime, &target_value->uptime);
9937 __put_user(value.loads[0], &target_value->loads[0]);
9938 __put_user(value.loads[1], &target_value->loads[1]);
9939 __put_user(value.loads[2], &target_value->loads[2]);
9940 __put_user(value.totalram, &target_value->totalram);
9941 __put_user(value.freeram, &target_value->freeram);
9942 __put_user(value.sharedram, &target_value->sharedram);
9943 __put_user(value.bufferram, &target_value->bufferram);
9944 __put_user(value.totalswap, &target_value->totalswap);
9945 __put_user(value.freeswap, &target_value->freeswap);
9946 __put_user(value.procs, &target_value->procs);
9947 __put_user(value.totalhigh, &target_value->totalhigh);
9948 __put_user(value.freehigh, &target_value->freehigh);
9949 __put_user(value.mem_unit, &target_value->mem_unit);
9950 unlock_user_struct(target_value, arg1, 1);
9954 #ifdef TARGET_NR_ipc
9956 return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
9958 #ifdef TARGET_NR_semget
9959 case TARGET_NR_semget:
9960 return get_errno(semget(arg1, arg2, arg3));
9962 #ifdef TARGET_NR_semop
9963 case TARGET_NR_semop:
9964 return do_semtimedop(arg1, arg2, arg3, 0, false);
9966 #ifdef TARGET_NR_semtimedop
9967 case TARGET_NR_semtimedop:
9968 return do_semtimedop(arg1, arg2, arg3, arg4, false);
9970 #ifdef TARGET_NR_semtimedop_time64
9971 case TARGET_NR_semtimedop_time64:
9972 return do_semtimedop(arg1, arg2, arg3, arg4, true);
9974 #ifdef TARGET_NR_semctl
9975 case TARGET_NR_semctl:
9976 return do_semctl(arg1, arg2, arg3, arg4);
9978 #ifdef TARGET_NR_msgctl
9979 case TARGET_NR_msgctl:
9980 return do_msgctl(arg1, arg2, arg3);
9982 #ifdef TARGET_NR_msgget
9983 case TARGET_NR_msgget:
9984 return get_errno(msgget(arg1, arg2));
9986 #ifdef TARGET_NR_msgrcv
9987 case TARGET_NR_msgrcv:
9988 return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
9990 #ifdef TARGET_NR_msgsnd
9991 case TARGET_NR_msgsnd:
9992 return do_msgsnd(arg1, arg2, arg3, arg4);
9994 #ifdef TARGET_NR_shmget
9995 case TARGET_NR_shmget:
9996 return get_errno(shmget(arg1, arg2, arg3));
9998 #ifdef TARGET_NR_shmctl
9999 case TARGET_NR_shmctl:
10000 return do_shmctl(arg1, arg2, arg3);
10002 #ifdef TARGET_NR_shmat
10003 case TARGET_NR_shmat:
10004 return do_shmat(cpu_env, arg1, arg2, arg3);
10006 #ifdef TARGET_NR_shmdt
10007 case TARGET_NR_shmdt:
10008 return do_shmdt(arg1);
10010 case TARGET_NR_fsync:
10011 return get_errno(fsync(arg1));
10012 case TARGET_NR_clone:
10013 /* Linux manages to have three different orderings for its
10014 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
10015 * match the kernel's CONFIG_CLONE_* settings.
10016 * Microblaze is further special in that it uses a sixth
10017 * implicit argument to clone for the TLS pointer.
10019 #if defined(TARGET_MICROBLAZE)
10020 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
10021 #elif defined(TARGET_CLONE_BACKWARDS)
10022 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
10023 #elif defined(TARGET_CLONE_BACKWARDS2)
10024 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
10026 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
10029 #ifdef __NR_exit_group
10030 /* new thread calls */
10031 case TARGET_NR_exit_group:
10032 preexit_cleanup(cpu_env, arg1);
10033 return get_errno(exit_group(arg1));
10035 case TARGET_NR_setdomainname:
10036 if (!(p = lock_user_string(arg1)))
10037 return -TARGET_EFAULT;
10038 ret = get_errno(setdomainname(p, arg2));
10039 unlock_user(p, arg1, 0);
10041 case TARGET_NR_uname:
10042 /* no need to transcode because we use the linux syscall */
10044 struct new_utsname * buf;
10046 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
10047 return -TARGET_EFAULT;
10048 ret = get_errno(sys_uname(buf));
10049 if (!is_error(ret)) {
10050 /* Overwrite the native machine name with whatever is being
10052 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
10053 sizeof(buf->machine));
10054 /* Allow the user to override the reported release. */
10055 if (qemu_uname_release && *qemu_uname_release) {
10056 g_strlcpy(buf->release, qemu_uname_release,
10057 sizeof(buf->release));
10060 unlock_user_struct(buf, arg1, 1);
10064 case TARGET_NR_modify_ldt:
10065 return do_modify_ldt(cpu_env, arg1, arg2, arg3);
10066 #if !defined(TARGET_X86_64)
10067 case TARGET_NR_vm86:
10068 return do_vm86(cpu_env, arg1, arg2);
10071 #if defined(TARGET_NR_adjtimex)
10072 case TARGET_NR_adjtimex:
10074 struct timex host_buf;
10076 if (target_to_host_timex(&host_buf, arg1) != 0) {
10077 return -TARGET_EFAULT;
10079 ret = get_errno(adjtimex(&host_buf));
10080 if (!is_error(ret)) {
10081 if (host_to_target_timex(arg1, &host_buf) != 0) {
10082 return -TARGET_EFAULT;
10088 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
10089 case TARGET_NR_clock_adjtime:
10091 struct timex htx, *phtx = &htx;
10093 if (target_to_host_timex(phtx, arg2) != 0) {
10094 return -TARGET_EFAULT;
10096 ret = get_errno(clock_adjtime(arg1, phtx));
10097 if (!is_error(ret) && phtx) {
10098 if (host_to_target_timex(arg2, phtx) != 0) {
10099 return -TARGET_EFAULT;
10105 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
10106 case TARGET_NR_clock_adjtime64:
10110 if (target_to_host_timex64(&htx, arg2) != 0) {
10111 return -TARGET_EFAULT;
10113 ret = get_errno(clock_adjtime(arg1, &htx));
10114 if (!is_error(ret) && host_to_target_timex64(arg2, &htx)) {
10115 return -TARGET_EFAULT;
10120 case TARGET_NR_getpgid:
10121 return get_errno(getpgid(arg1));
10122 case TARGET_NR_fchdir:
10123 return get_errno(fchdir(arg1));
10124 case TARGET_NR_personality:
10125 return get_errno(personality(arg1));
10126 #ifdef TARGET_NR__llseek /* Not on alpha */
10127 case TARGET_NR__llseek:
10130 #if !defined(__NR_llseek)
10131 res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
10133 ret = get_errno(res);
10138 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
10140 if ((ret == 0) && put_user_s64(res, arg4)) {
10141 return -TARGET_EFAULT;
10146 #ifdef TARGET_NR_getdents
10147 case TARGET_NR_getdents:
10148 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
10149 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
10151 struct target_dirent *target_dirp;
10152 struct linux_dirent *dirp;
10153 abi_long count = arg3;
10155 dirp = g_try_malloc(count);
10157 return -TARGET_ENOMEM;
10160 ret = get_errno(sys_getdents(arg1, dirp, count));
10161 if (!is_error(ret)) {
10162 struct linux_dirent *de;
10163 struct target_dirent *tde;
10165 int reclen, treclen;
10166 int count1, tnamelen;
10170 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10171 return -TARGET_EFAULT;
10174 reclen = de->d_reclen;
10175 tnamelen = reclen - offsetof(struct linux_dirent, d_name);
10176 assert(tnamelen >= 0);
10177 treclen = tnamelen + offsetof(struct target_dirent, d_name);
10178 assert(count1 + treclen <= count);
10179 tde->d_reclen = tswap16(treclen);
10180 tde->d_ino = tswapal(de->d_ino);
10181 tde->d_off = tswapal(de->d_off);
10182 memcpy(tde->d_name, de->d_name, tnamelen);
10183 de = (struct linux_dirent *)((char *)de + reclen);
10185 tde = (struct target_dirent *)((char *)tde + treclen);
10189 unlock_user(target_dirp, arg2, ret);
10195 struct linux_dirent *dirp;
10196 abi_long count = arg3;
10198 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10199 return -TARGET_EFAULT;
10200 ret = get_errno(sys_getdents(arg1, dirp, count));
10201 if (!is_error(ret)) {
10202 struct linux_dirent *de;
10207 reclen = de->d_reclen;
10210 de->d_reclen = tswap16(reclen);
10211 tswapls(&de->d_ino);
10212 tswapls(&de->d_off);
10213 de = (struct linux_dirent *)((char *)de + reclen);
10217 unlock_user(dirp, arg2, ret);
10221 /* Implement getdents in terms of getdents64 */
10223 struct linux_dirent64 *dirp;
10224 abi_long count = arg3;
10226 dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
10228 return -TARGET_EFAULT;
10230 ret = get_errno(sys_getdents64(arg1, dirp, count));
10231 if (!is_error(ret)) {
10232 /* Convert the dirent64 structs to target dirent. We do this
10233 * in-place, since we can guarantee that a target_dirent is no
10234 * larger than a dirent64; however this means we have to be
10235 * careful to read everything before writing in the new format.
10237 struct linux_dirent64 *de;
10238 struct target_dirent *tde;
10243 tde = (struct target_dirent *)dirp;
10245 int namelen, treclen;
10246 int reclen = de->d_reclen;
10247 uint64_t ino = de->d_ino;
10248 int64_t off = de->d_off;
10249 uint8_t type = de->d_type;
10251 namelen = strlen(de->d_name);
10252 treclen = offsetof(struct target_dirent, d_name)
10254 treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
10256 memmove(tde->d_name, de->d_name, namelen + 1);
10257 tde->d_ino = tswapal(ino);
10258 tde->d_off = tswapal(off);
10259 tde->d_reclen = tswap16(treclen);
10260 /* The target_dirent type is in what was formerly a padding
10261 * byte at the end of the structure:
10263 *(((char *)tde) + treclen - 1) = type;
10265 de = (struct linux_dirent64 *)((char *)de + reclen);
10266 tde = (struct target_dirent *)((char *)tde + treclen);
10272 unlock_user(dirp, arg2, ret);
10276 #endif /* TARGET_NR_getdents */
10277 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
10278 case TARGET_NR_getdents64:
10280 struct linux_dirent64 *dirp;
10281 abi_long count = arg3;
10282 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10283 return -TARGET_EFAULT;
10284 ret = get_errno(sys_getdents64(arg1, dirp, count));
10285 if (!is_error(ret)) {
10286 struct linux_dirent64 *de;
10291 reclen = de->d_reclen;
10294 de->d_reclen = tswap16(reclen);
10295 tswap64s((uint64_t *)&de->d_ino);
10296 tswap64s((uint64_t *)&de->d_off);
10297 de = (struct linux_dirent64 *)((char *)de + reclen);
10301 unlock_user(dirp, arg2, ret);
10304 #endif /* TARGET_NR_getdents64 */
10305 #if defined(TARGET_NR__newselect)
10306 case TARGET_NR__newselect:
10307 return do_select(arg1, arg2, arg3, arg4, arg5);
10309 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
10310 # ifdef TARGET_NR_poll
10311 case TARGET_NR_poll:
10313 # ifdef TARGET_NR_ppoll
10314 case TARGET_NR_ppoll:
10317 struct target_pollfd *target_pfd;
10318 unsigned int nfds = arg2;
10319 struct pollfd *pfd;
10325 if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
10326 return -TARGET_EINVAL;
10329 target_pfd = lock_user(VERIFY_WRITE, arg1,
10330 sizeof(struct target_pollfd) * nfds, 1);
10332 return -TARGET_EFAULT;
10335 pfd = alloca(sizeof(struct pollfd) * nfds);
10336 for (i = 0; i < nfds; i++) {
10337 pfd[i].fd = tswap32(target_pfd[i].fd);
10338 pfd[i].events = tswap16(target_pfd[i].events);
10343 # ifdef TARGET_NR_ppoll
10344 case TARGET_NR_ppoll:
10346 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
10347 target_sigset_t *target_set;
10348 sigset_t _set, *set = &_set;
10351 if (target_to_host_timespec(timeout_ts, arg3)) {
10352 unlock_user(target_pfd, arg1, 0);
10353 return -TARGET_EFAULT;
10360 if (arg5 != sizeof(target_sigset_t)) {
10361 unlock_user(target_pfd, arg1, 0);
10362 return -TARGET_EINVAL;
10365 target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
10367 unlock_user(target_pfd, arg1, 0);
10368 return -TARGET_EFAULT;
10370 target_to_host_sigset(set, target_set);
10375 ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
10376 set, SIGSET_T_SIZE));
10378 if (!is_error(ret) && arg3) {
10379 host_to_target_timespec(arg3, timeout_ts);
10382 unlock_user(target_set, arg4, 0);
10387 # ifdef TARGET_NR_poll
10388 case TARGET_NR_poll:
10390 struct timespec ts, *pts;
10393 /* Convert ms to secs, ns */
10394 ts.tv_sec = arg3 / 1000;
10395 ts.tv_nsec = (arg3 % 1000) * 1000000LL;
10398 /* -ve poll() timeout means "infinite" */
10401 ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
10406 g_assert_not_reached();
10409 if (!is_error(ret)) {
10410 for(i = 0; i < nfds; i++) {
10411 target_pfd[i].revents = tswap16(pfd[i].revents);
10414 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
10418 case TARGET_NR_flock:
10419 /* NOTE: the flock constant seems to be the same for every
10421 return get_errno(safe_flock(arg1, arg2));
10422 case TARGET_NR_readv:
10424 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10426 ret = get_errno(safe_readv(arg1, vec, arg3));
10427 unlock_iovec(vec, arg2, arg3, 1);
10429 ret = -host_to_target_errno(errno);
10433 case TARGET_NR_writev:
10435 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10437 ret = get_errno(safe_writev(arg1, vec, arg3));
10438 unlock_iovec(vec, arg2, arg3, 0);
10440 ret = -host_to_target_errno(errno);
10444 #if defined(TARGET_NR_preadv)
10445 case TARGET_NR_preadv:
10447 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10449 unsigned long low, high;
10451 target_to_host_low_high(arg4, arg5, &low, &high);
10452 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
10453 unlock_iovec(vec, arg2, arg3, 1);
10455 ret = -host_to_target_errno(errno);
10460 #if defined(TARGET_NR_pwritev)
10461 case TARGET_NR_pwritev:
10463 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10465 unsigned long low, high;
10467 target_to_host_low_high(arg4, arg5, &low, &high);
10468 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
10469 unlock_iovec(vec, arg2, arg3, 0);
10471 ret = -host_to_target_errno(errno);
10476 case TARGET_NR_getsid:
10477 return get_errno(getsid(arg1));
10478 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10479 case TARGET_NR_fdatasync:
10480 return get_errno(fdatasync(arg1));
10482 #ifdef TARGET_NR__sysctl
10483 case TARGET_NR__sysctl:
10484 /* We don't implement this, but ENOTDIR is always a safe
10486 return -TARGET_ENOTDIR;
10488 case TARGET_NR_sched_getaffinity:
10490 unsigned int mask_size;
10491 unsigned long *mask;
10494 * sched_getaffinity needs multiples of ulong, so need to take
10495 * care of mismatches between target ulong and host ulong sizes.
10497 if (arg2 & (sizeof(abi_ulong) - 1)) {
10498 return -TARGET_EINVAL;
10500 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10502 mask = alloca(mask_size);
10503 memset(mask, 0, mask_size);
10504 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
10506 if (!is_error(ret)) {
10508 /* More data returned than the caller's buffer will fit.
10509 * This only happens if sizeof(abi_long) < sizeof(long)
10510 * and the caller passed us a buffer holding an odd number
10511 * of abi_longs. If the host kernel is actually using the
10512 * extra 4 bytes then fail EINVAL; otherwise we can just
10513 * ignore them and only copy the interesting part.
10515 int numcpus = sysconf(_SC_NPROCESSORS_CONF);
10516 if (numcpus > arg2 * 8) {
10517 return -TARGET_EINVAL;
10522 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
10523 return -TARGET_EFAULT;
10528 case TARGET_NR_sched_setaffinity:
10530 unsigned int mask_size;
10531 unsigned long *mask;
10534 * sched_setaffinity needs multiples of ulong, so need to take
10535 * care of mismatches between target ulong and host ulong sizes.
10537 if (arg2 & (sizeof(abi_ulong) - 1)) {
10538 return -TARGET_EINVAL;
10540 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10541 mask = alloca(mask_size);
10543 ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
10548 return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
10550 case TARGET_NR_getcpu:
10552 unsigned cpu, node;
10553 ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
10554 arg2 ? &node : NULL,
10556 if (is_error(ret)) {
10559 if (arg1 && put_user_u32(cpu, arg1)) {
10560 return -TARGET_EFAULT;
10562 if (arg2 && put_user_u32(node, arg2)) {
10563 return -TARGET_EFAULT;
10567 case TARGET_NR_sched_setparam:
10569 struct sched_param *target_schp;
10570 struct sched_param schp;
10573 return -TARGET_EINVAL;
10575 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
10576 return -TARGET_EFAULT;
10577 schp.sched_priority = tswap32(target_schp->sched_priority);
10578 unlock_user_struct(target_schp, arg2, 0);
10579 return get_errno(sched_setparam(arg1, &schp));
10581 case TARGET_NR_sched_getparam:
10583 struct sched_param *target_schp;
10584 struct sched_param schp;
10587 return -TARGET_EINVAL;
10589 ret = get_errno(sched_getparam(arg1, &schp));
10590 if (!is_error(ret)) {
10591 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
10592 return -TARGET_EFAULT;
10593 target_schp->sched_priority = tswap32(schp.sched_priority);
10594 unlock_user_struct(target_schp, arg2, 1);
10598 case TARGET_NR_sched_setscheduler:
10600 struct sched_param *target_schp;
10601 struct sched_param schp;
10603 return -TARGET_EINVAL;
10605 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
10606 return -TARGET_EFAULT;
10607 schp.sched_priority = tswap32(target_schp->sched_priority);
10608 unlock_user_struct(target_schp, arg3, 0);
10609 return get_errno(sched_setscheduler(arg1, arg2, &schp));
10611 case TARGET_NR_sched_getscheduler:
10612 return get_errno(sched_getscheduler(arg1));
10613 case TARGET_NR_sched_yield:
10614 return get_errno(sched_yield());
10615 case TARGET_NR_sched_get_priority_max:
10616 return get_errno(sched_get_priority_max(arg1));
10617 case TARGET_NR_sched_get_priority_min:
10618 return get_errno(sched_get_priority_min(arg1));
10619 #ifdef TARGET_NR_sched_rr_get_interval
10620 case TARGET_NR_sched_rr_get_interval:
10622 struct timespec ts;
10623 ret = get_errno(sched_rr_get_interval(arg1, &ts));
10624 if (!is_error(ret)) {
10625 ret = host_to_target_timespec(arg2, &ts);
10630 #ifdef TARGET_NR_sched_rr_get_interval_time64
10631 case TARGET_NR_sched_rr_get_interval_time64:
10633 struct timespec ts;
10634 ret = get_errno(sched_rr_get_interval(arg1, &ts));
10635 if (!is_error(ret)) {
10636 ret = host_to_target_timespec64(arg2, &ts);
10641 #if defined(TARGET_NR_nanosleep)
10642 case TARGET_NR_nanosleep:
10644 struct timespec req, rem;
10645 target_to_host_timespec(&req, arg1);
10646 ret = get_errno(safe_nanosleep(&req, &rem));
10647 if (is_error(ret) && arg2) {
10648 host_to_target_timespec(arg2, &rem);
10653 case TARGET_NR_prctl:
10655 case PR_GET_PDEATHSIG:
10658 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
10659 if (!is_error(ret) && arg2
10660 && put_user_ual(deathsig, arg2)) {
10661 return -TARGET_EFAULT;
10668 void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
10670 return -TARGET_EFAULT;
10672 ret = get_errno(prctl(arg1, (unsigned long)name,
10673 arg3, arg4, arg5));
10674 unlock_user(name, arg2, 16);
10679 void *name = lock_user(VERIFY_READ, arg2, 16, 1);
10681 return -TARGET_EFAULT;
10683 ret = get_errno(prctl(arg1, (unsigned long)name,
10684 arg3, arg4, arg5));
10685 unlock_user(name, arg2, 0);
10690 case TARGET_PR_GET_FP_MODE:
10692 CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
10694 if (env->CP0_Status & (1 << CP0St_FR)) {
10695 ret |= TARGET_PR_FP_MODE_FR;
10697 if (env->CP0_Config5 & (1 << CP0C5_FRE)) {
10698 ret |= TARGET_PR_FP_MODE_FRE;
10702 case TARGET_PR_SET_FP_MODE:
10704 CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
10705 bool old_fr = env->CP0_Status & (1 << CP0St_FR);
10706 bool old_fre = env->CP0_Config5 & (1 << CP0C5_FRE);
10707 bool new_fr = arg2 & TARGET_PR_FP_MODE_FR;
10708 bool new_fre = arg2 & TARGET_PR_FP_MODE_FRE;
10710 const unsigned int known_bits = TARGET_PR_FP_MODE_FR |
10711 TARGET_PR_FP_MODE_FRE;
10713 /* If nothing to change, return right away, successfully. */
10714 if (old_fr == new_fr && old_fre == new_fre) {
10717 /* Check the value is valid */
10718 if (arg2 & ~known_bits) {
10719 return -TARGET_EOPNOTSUPP;
10721 /* Setting FRE without FR is not supported. */
10722 if (new_fre && !new_fr) {
10723 return -TARGET_EOPNOTSUPP;
10725 if (new_fr && !(env->active_fpu.fcr0 & (1 << FCR0_F64))) {
10726 /* FR1 is not supported */
10727 return -TARGET_EOPNOTSUPP;
10729 if (!new_fr && (env->active_fpu.fcr0 & (1 << FCR0_F64))
10730 && !(env->CP0_Status_rw_bitmask & (1 << CP0St_FR))) {
10731 /* cannot set FR=0 */
10732 return -TARGET_EOPNOTSUPP;
10734 if (new_fre && !(env->active_fpu.fcr0 & (1 << FCR0_FREP))) {
10735 /* Cannot set FRE=1 */
10736 return -TARGET_EOPNOTSUPP;
10740 fpr_t *fpr = env->active_fpu.fpr;
10741 for (i = 0; i < 32 ; i += 2) {
10742 if (!old_fr && new_fr) {
10743 fpr[i].w[!FP_ENDIAN_IDX] = fpr[i + 1].w[FP_ENDIAN_IDX];
10744 } else if (old_fr && !new_fr) {
10745 fpr[i + 1].w[FP_ENDIAN_IDX] = fpr[i].w[!FP_ENDIAN_IDX];
10750 env->CP0_Status |= (1 << CP0St_FR);
10751 env->hflags |= MIPS_HFLAG_F64;
10753 env->CP0_Status &= ~(1 << CP0St_FR);
10754 env->hflags &= ~MIPS_HFLAG_F64;
10757 env->CP0_Config5 |= (1 << CP0C5_FRE);
10758 if (env->active_fpu.fcr0 & (1 << FCR0_FREP)) {
10759 env->hflags |= MIPS_HFLAG_FRE;
10762 env->CP0_Config5 &= ~(1 << CP0C5_FRE);
10763 env->hflags &= ~MIPS_HFLAG_FRE;
10769 #ifdef TARGET_AARCH64
10770 case TARGET_PR_SVE_SET_VL:
10772 * We cannot support either PR_SVE_SET_VL_ONEXEC or
10773 * PR_SVE_VL_INHERIT. Note the kernel definition
10774 * of sve_vl_valid allows for VQ=512, i.e. VL=8192,
10775 * even though the current architectural maximum is VQ=16.
10777 ret = -TARGET_EINVAL;
10778 if (cpu_isar_feature(aa64_sve, env_archcpu(cpu_env))
10779 && arg2 >= 0 && arg2 <= 512 * 16 && !(arg2 & 15)) {
10780 CPUARMState *env = cpu_env;
10781 ARMCPU *cpu = env_archcpu(env);
10782 uint32_t vq, old_vq;
10784 old_vq = (env->vfp.zcr_el[1] & 0xf) + 1;
10785 vq = MAX(arg2 / 16, 1);
10786 vq = MIN(vq, cpu->sve_max_vq);
10789 aarch64_sve_narrow_vq(env, vq);
10791 env->vfp.zcr_el[1] = vq - 1;
10792 arm_rebuild_hflags(env);
10796 case TARGET_PR_SVE_GET_VL:
10797 ret = -TARGET_EINVAL;
10799 ARMCPU *cpu = env_archcpu(cpu_env);
10800 if (cpu_isar_feature(aa64_sve, cpu)) {
10801 ret = ((cpu->env.vfp.zcr_el[1] & 0xf) + 1) * 16;
10805 case TARGET_PR_PAC_RESET_KEYS:
10807 CPUARMState *env = cpu_env;
10808 ARMCPU *cpu = env_archcpu(env);
10810 if (arg3 || arg4 || arg5) {
10811 return -TARGET_EINVAL;
10813 if (cpu_isar_feature(aa64_pauth, cpu)) {
10814 int all = (TARGET_PR_PAC_APIAKEY | TARGET_PR_PAC_APIBKEY |
10815 TARGET_PR_PAC_APDAKEY | TARGET_PR_PAC_APDBKEY |
10816 TARGET_PR_PAC_APGAKEY);
10822 } else if (arg2 & ~all) {
10823 return -TARGET_EINVAL;
10825 if (arg2 & TARGET_PR_PAC_APIAKEY) {
10826 ret |= qemu_guest_getrandom(&env->keys.apia,
10827 sizeof(ARMPACKey), &err);
10829 if (arg2 & TARGET_PR_PAC_APIBKEY) {
10830 ret |= qemu_guest_getrandom(&env->keys.apib,
10831 sizeof(ARMPACKey), &err);
10833 if (arg2 & TARGET_PR_PAC_APDAKEY) {
10834 ret |= qemu_guest_getrandom(&env->keys.apda,
10835 sizeof(ARMPACKey), &err);
10837 if (arg2 & TARGET_PR_PAC_APDBKEY) {
10838 ret |= qemu_guest_getrandom(&env->keys.apdb,
10839 sizeof(ARMPACKey), &err);
10841 if (arg2 & TARGET_PR_PAC_APGAKEY) {
10842 ret |= qemu_guest_getrandom(&env->keys.apga,
10843 sizeof(ARMPACKey), &err);
10847 * Some unknown failure in the crypto. The best
10848 * we can do is log it and fail the syscall.
10849 * The real syscall cannot fail this way.
10851 qemu_log_mask(LOG_UNIMP,
10852 "PR_PAC_RESET_KEYS: Crypto failure: %s",
10853 error_get_pretty(err));
10855 return -TARGET_EIO;
10860 return -TARGET_EINVAL;
10861 #endif /* AARCH64 */
10862 case PR_GET_SECCOMP:
10863 case PR_SET_SECCOMP:
10864 /* Disable seccomp to prevent the target disabling syscalls we
10866 return -TARGET_EINVAL;
10868 /* Most prctl options have no pointer arguments */
10869 return get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
10872 #ifdef TARGET_NR_arch_prctl
10873 case TARGET_NR_arch_prctl:
10874 return do_arch_prctl(cpu_env, arg1, arg2);
10876 #ifdef TARGET_NR_pread64
10877 case TARGET_NR_pread64:
10878 if (regpairs_aligned(cpu_env, num)) {
10882 if (arg2 == 0 && arg3 == 0) {
10883 /* Special-case NULL buffer and zero length, which should succeed */
10886 p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10888 return -TARGET_EFAULT;
10891 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
10892 unlock_user(p, arg2, ret);
10894 case TARGET_NR_pwrite64:
10895 if (regpairs_aligned(cpu_env, num)) {
10899 if (arg2 == 0 && arg3 == 0) {
10900 /* Special-case NULL buffer and zero length, which should succeed */
10903 p = lock_user(VERIFY_READ, arg2, arg3, 1);
10905 return -TARGET_EFAULT;
10908 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
10909 unlock_user(p, arg2, 0);
10912 case TARGET_NR_getcwd:
10913 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
10914 return -TARGET_EFAULT;
10915 ret = get_errno(sys_getcwd1(p, arg2));
10916 unlock_user(p, arg1, ret);
10918 case TARGET_NR_capget:
10919 case TARGET_NR_capset:
10921 struct target_user_cap_header *target_header;
10922 struct target_user_cap_data *target_data = NULL;
10923 struct __user_cap_header_struct header;
10924 struct __user_cap_data_struct data[2];
10925 struct __user_cap_data_struct *dataptr = NULL;
10926 int i, target_datalen;
10927 int data_items = 1;
10929 if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
10930 return -TARGET_EFAULT;
10932 header.version = tswap32(target_header->version);
10933 header.pid = tswap32(target_header->pid);
10935 if (header.version != _LINUX_CAPABILITY_VERSION) {
10936 /* Version 2 and up takes pointer to two user_data structs */
10940 target_datalen = sizeof(*target_data) * data_items;
10943 if (num == TARGET_NR_capget) {
10944 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
10946 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
10948 if (!target_data) {
10949 unlock_user_struct(target_header, arg1, 0);
10950 return -TARGET_EFAULT;
10953 if (num == TARGET_NR_capset) {
10954 for (i = 0; i < data_items; i++) {
10955 data[i].effective = tswap32(target_data[i].effective);
10956 data[i].permitted = tswap32(target_data[i].permitted);
10957 data[i].inheritable = tswap32(target_data[i].inheritable);
10964 if (num == TARGET_NR_capget) {
10965 ret = get_errno(capget(&header, dataptr));
10967 ret = get_errno(capset(&header, dataptr));
10970 /* The kernel always updates version for both capget and capset */
10971 target_header->version = tswap32(header.version);
10972 unlock_user_struct(target_header, arg1, 1);
10975 if (num == TARGET_NR_capget) {
10976 for (i = 0; i < data_items; i++) {
10977 target_data[i].effective = tswap32(data[i].effective);
10978 target_data[i].permitted = tswap32(data[i].permitted);
10979 target_data[i].inheritable = tswap32(data[i].inheritable);
10981 unlock_user(target_data, arg2, target_datalen);
10983 unlock_user(target_data, arg2, 0);
10988 case TARGET_NR_sigaltstack:
10989 return do_sigaltstack(arg1, arg2,
10990 get_sp_from_cpustate((CPUArchState *)cpu_env));
10992 #ifdef CONFIG_SENDFILE
10993 #ifdef TARGET_NR_sendfile
10994 case TARGET_NR_sendfile:
10996 off_t *offp = NULL;
10999 ret = get_user_sal(off, arg3);
11000 if (is_error(ret)) {
11005 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11006 if (!is_error(ret) && arg3) {
11007 abi_long ret2 = put_user_sal(off, arg3);
11008 if (is_error(ret2)) {
11015 #ifdef TARGET_NR_sendfile64
11016 case TARGET_NR_sendfile64:
11018 off_t *offp = NULL;
11021 ret = get_user_s64(off, arg3);
11022 if (is_error(ret)) {
11027 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11028 if (!is_error(ret) && arg3) {
11029 abi_long ret2 = put_user_s64(off, arg3);
11030 if (is_error(ret2)) {
11038 #ifdef TARGET_NR_vfork
11039 case TARGET_NR_vfork:
11040 return get_errno(do_fork(cpu_env,
11041 CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
11044 #ifdef TARGET_NR_ugetrlimit
11045 case TARGET_NR_ugetrlimit:
11047 struct rlimit rlim;
11048 int resource = target_to_host_resource(arg1);
11049 ret = get_errno(getrlimit(resource, &rlim));
11050 if (!is_error(ret)) {
11051 struct target_rlimit *target_rlim;
11052 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
11053 return -TARGET_EFAULT;
11054 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
11055 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
11056 unlock_user_struct(target_rlim, arg2, 1);
11061 #ifdef TARGET_NR_truncate64
11062 case TARGET_NR_truncate64:
11063 if (!(p = lock_user_string(arg1)))
11064 return -TARGET_EFAULT;
11065 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
11066 unlock_user(p, arg1, 0);
11069 #ifdef TARGET_NR_ftruncate64
11070 case TARGET_NR_ftruncate64:
11071 return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
11073 #ifdef TARGET_NR_stat64
11074 case TARGET_NR_stat64:
11075 if (!(p = lock_user_string(arg1))) {
11076 return -TARGET_EFAULT;
11078 ret = get_errno(stat(path(p), &st));
11079 unlock_user(p, arg1, 0);
11080 if (!is_error(ret))
11081 ret = host_to_target_stat64(cpu_env, arg2, &st);
11084 #ifdef TARGET_NR_lstat64
11085 case TARGET_NR_lstat64:
11086 if (!(p = lock_user_string(arg1))) {
11087 return -TARGET_EFAULT;
11089 ret = get_errno(lstat(path(p), &st));
11090 unlock_user(p, arg1, 0);
11091 if (!is_error(ret))
11092 ret = host_to_target_stat64(cpu_env, arg2, &st);
11095 #ifdef TARGET_NR_fstat64
11096 case TARGET_NR_fstat64:
11097 ret = get_errno(fstat(arg1, &st));
11098 if (!is_error(ret))
11099 ret = host_to_target_stat64(cpu_env, arg2, &st);
11102 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
11103 #ifdef TARGET_NR_fstatat64
11104 case TARGET_NR_fstatat64:
11106 #ifdef TARGET_NR_newfstatat
11107 case TARGET_NR_newfstatat:
11109 if (!(p = lock_user_string(arg2))) {
11110 return -TARGET_EFAULT;
11112 ret = get_errno(fstatat(arg1, path(p), &st, arg4));
11113 unlock_user(p, arg2, 0);
11114 if (!is_error(ret))
11115 ret = host_to_target_stat64(cpu_env, arg3, &st);
11118 #if defined(TARGET_NR_statx)
11119 case TARGET_NR_statx:
11121 struct target_statx *target_stx;
11125 p = lock_user_string(arg2);
11127 return -TARGET_EFAULT;
11129 #if defined(__NR_statx)
11132 * It is assumed that struct statx is architecture independent.
11134 struct target_statx host_stx;
11137 ret = get_errno(sys_statx(dirfd, p, flags, mask, &host_stx));
11138 if (!is_error(ret)) {
11139 if (host_to_target_statx(&host_stx, arg5) != 0) {
11140 unlock_user(p, arg2, 0);
11141 return -TARGET_EFAULT;
11145 if (ret != -TARGET_ENOSYS) {
11146 unlock_user(p, arg2, 0);
11151 ret = get_errno(fstatat(dirfd, path(p), &st, flags));
11152 unlock_user(p, arg2, 0);
11154 if (!is_error(ret)) {
11155 if (!lock_user_struct(VERIFY_WRITE, target_stx, arg5, 0)) {
11156 return -TARGET_EFAULT;
11158 memset(target_stx, 0, sizeof(*target_stx));
11159 __put_user(major(st.st_dev), &target_stx->stx_dev_major);
11160 __put_user(minor(st.st_dev), &target_stx->stx_dev_minor);
11161 __put_user(st.st_ino, &target_stx->stx_ino);
11162 __put_user(st.st_mode, &target_stx->stx_mode);
11163 __put_user(st.st_uid, &target_stx->stx_uid);
11164 __put_user(st.st_gid, &target_stx->stx_gid);
11165 __put_user(st.st_nlink, &target_stx->stx_nlink);
11166 __put_user(major(st.st_rdev), &target_stx->stx_rdev_major);
11167 __put_user(minor(st.st_rdev), &target_stx->stx_rdev_minor);
11168 __put_user(st.st_size, &target_stx->stx_size);
11169 __put_user(st.st_blksize, &target_stx->stx_blksize);
11170 __put_user(st.st_blocks, &target_stx->stx_blocks);
11171 __put_user(st.st_atime, &target_stx->stx_atime.tv_sec);
11172 __put_user(st.st_mtime, &target_stx->stx_mtime.tv_sec);
11173 __put_user(st.st_ctime, &target_stx->stx_ctime.tv_sec);
11174 unlock_user_struct(target_stx, arg5, 1);
11179 #ifdef TARGET_NR_lchown
11180 case TARGET_NR_lchown:
11181 if (!(p = lock_user_string(arg1)))
11182 return -TARGET_EFAULT;
11183 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
11184 unlock_user(p, arg1, 0);
11187 #ifdef TARGET_NR_getuid
11188 case TARGET_NR_getuid:
11189 return get_errno(high2lowuid(getuid()));
11191 #ifdef TARGET_NR_getgid
11192 case TARGET_NR_getgid:
11193 return get_errno(high2lowgid(getgid()));
11195 #ifdef TARGET_NR_geteuid
11196 case TARGET_NR_geteuid:
11197 return get_errno(high2lowuid(geteuid()));
11199 #ifdef TARGET_NR_getegid
11200 case TARGET_NR_getegid:
11201 return get_errno(high2lowgid(getegid()));
11203 case TARGET_NR_setreuid:
11204 return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
11205 case TARGET_NR_setregid:
11206 return get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
11207 case TARGET_NR_getgroups:
11209 int gidsetsize = arg1;
11210 target_id *target_grouplist;
11214 grouplist = alloca(gidsetsize * sizeof(gid_t));
11215 ret = get_errno(getgroups(gidsetsize, grouplist));
11216 if (gidsetsize == 0)
11218 if (!is_error(ret)) {
11219 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
11220 if (!target_grouplist)
11221 return -TARGET_EFAULT;
11222 for(i = 0;i < ret; i++)
11223 target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
11224 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
11228 case TARGET_NR_setgroups:
11230 int gidsetsize = arg1;
11231 target_id *target_grouplist;
11232 gid_t *grouplist = NULL;
11235 grouplist = alloca(gidsetsize * sizeof(gid_t));
11236 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
11237 if (!target_grouplist) {
11238 return -TARGET_EFAULT;
11240 for (i = 0; i < gidsetsize; i++) {
11241 grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
11243 unlock_user(target_grouplist, arg2, 0);
11245 return get_errno(setgroups(gidsetsize, grouplist));
11247 case TARGET_NR_fchown:
11248 return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
11249 #if defined(TARGET_NR_fchownat)
11250 case TARGET_NR_fchownat:
11251 if (!(p = lock_user_string(arg2)))
11252 return -TARGET_EFAULT;
11253 ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
11254 low2highgid(arg4), arg5));
11255 unlock_user(p, arg2, 0);
11258 #ifdef TARGET_NR_setresuid
11259 case TARGET_NR_setresuid:
11260 return get_errno(sys_setresuid(low2highuid(arg1),
11262 low2highuid(arg3)));
11264 #ifdef TARGET_NR_getresuid
11265 case TARGET_NR_getresuid:
11267 uid_t ruid, euid, suid;
11268 ret = get_errno(getresuid(&ruid, &euid, &suid));
11269 if (!is_error(ret)) {
11270 if (put_user_id(high2lowuid(ruid), arg1)
11271 || put_user_id(high2lowuid(euid), arg2)
11272 || put_user_id(high2lowuid(suid), arg3))
11273 return -TARGET_EFAULT;
11278 #ifdef TARGET_NR_getresgid
11279 case TARGET_NR_setresgid:
11280 return get_errno(sys_setresgid(low2highgid(arg1),
11282 low2highgid(arg3)));
11284 #ifdef TARGET_NR_getresgid
11285 case TARGET_NR_getresgid:
11287 gid_t rgid, egid, sgid;
11288 ret = get_errno(getresgid(&rgid, &egid, &sgid));
11289 if (!is_error(ret)) {
11290 if (put_user_id(high2lowgid(rgid), arg1)
11291 || put_user_id(high2lowgid(egid), arg2)
11292 || put_user_id(high2lowgid(sgid), arg3))
11293 return -TARGET_EFAULT;
11298 #ifdef TARGET_NR_chown
11299 case TARGET_NR_chown:
11300 if (!(p = lock_user_string(arg1)))
11301 return -TARGET_EFAULT;
11302 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
11303 unlock_user(p, arg1, 0);
11306 case TARGET_NR_setuid:
11307 return get_errno(sys_setuid(low2highuid(arg1)));
11308 case TARGET_NR_setgid:
11309 return get_errno(sys_setgid(low2highgid(arg1)));
11310 case TARGET_NR_setfsuid:
11311 return get_errno(setfsuid(arg1));
11312 case TARGET_NR_setfsgid:
11313 return get_errno(setfsgid(arg1));
11315 #ifdef TARGET_NR_lchown32
11316 case TARGET_NR_lchown32:
11317 if (!(p = lock_user_string(arg1)))
11318 return -TARGET_EFAULT;
11319 ret = get_errno(lchown(p, arg2, arg3));
11320 unlock_user(p, arg1, 0);
11323 #ifdef TARGET_NR_getuid32
11324 case TARGET_NR_getuid32:
11325 return get_errno(getuid());
11328 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
11329 /* Alpha specific */
11330 case TARGET_NR_getxuid:
11334 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
11336 return get_errno(getuid());
11338 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
11339 /* Alpha specific */
11340 case TARGET_NR_getxgid:
11344 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
11346 return get_errno(getgid());
11348 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
11349 /* Alpha specific */
11350 case TARGET_NR_osf_getsysinfo:
11351 ret = -TARGET_EOPNOTSUPP;
11353 case TARGET_GSI_IEEE_FP_CONTROL:
11355 uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env);
11356 uint64_t swcr = ((CPUAlphaState *)cpu_env)->swcr;
11358 swcr &= ~SWCR_STATUS_MASK;
11359 swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
11361 if (put_user_u64 (swcr, arg2))
11362 return -TARGET_EFAULT;
11367 /* case GSI_IEEE_STATE_AT_SIGNAL:
11368 -- Not implemented in linux kernel.
11370 -- Retrieves current unaligned access state; not much used.
11371 case GSI_PROC_TYPE:
11372 -- Retrieves implver information; surely not used.
11373 case GSI_GET_HWRPB:
11374 -- Grabs a copy of the HWRPB; surely not used.
11379 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
11380 /* Alpha specific */
11381 case TARGET_NR_osf_setsysinfo:
11382 ret = -TARGET_EOPNOTSUPP;
11384 case TARGET_SSI_IEEE_FP_CONTROL:
11386 uint64_t swcr, fpcr;
11388 if (get_user_u64 (swcr, arg2)) {
11389 return -TARGET_EFAULT;
11393 * The kernel calls swcr_update_status to update the
11394 * status bits from the fpcr at every point that it
11395 * could be queried. Therefore, we store the status
11396 * bits only in FPCR.
11398 ((CPUAlphaState *)cpu_env)->swcr
11399 = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK);
11401 fpcr = cpu_alpha_load_fpcr(cpu_env);
11402 fpcr &= ((uint64_t)FPCR_DYN_MASK << 32);
11403 fpcr |= alpha_ieee_swcr_to_fpcr(swcr);
11404 cpu_alpha_store_fpcr(cpu_env, fpcr);
11409 case TARGET_SSI_IEEE_RAISE_EXCEPTION:
11411 uint64_t exc, fpcr, fex;
11413 if (get_user_u64(exc, arg2)) {
11414 return -TARGET_EFAULT;
11416 exc &= SWCR_STATUS_MASK;
11417 fpcr = cpu_alpha_load_fpcr(cpu_env);
11419 /* Old exceptions are not signaled. */
11420 fex = alpha_ieee_fpcr_to_swcr(fpcr);
11422 fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT;
11423 fex &= ((CPUArchState *)cpu_env)->swcr;
11425 /* Update the hardware fpcr. */
11426 fpcr |= alpha_ieee_swcr_to_fpcr(exc);
11427 cpu_alpha_store_fpcr(cpu_env, fpcr);
11430 int si_code = TARGET_FPE_FLTUNK;
11431 target_siginfo_t info;
11433 if (fex & SWCR_TRAP_ENABLE_DNO) {
11434 si_code = TARGET_FPE_FLTUND;
11436 if (fex & SWCR_TRAP_ENABLE_INE) {
11437 si_code = TARGET_FPE_FLTRES;
11439 if (fex & SWCR_TRAP_ENABLE_UNF) {
11440 si_code = TARGET_FPE_FLTUND;
11442 if (fex & SWCR_TRAP_ENABLE_OVF) {
11443 si_code = TARGET_FPE_FLTOVF;
11445 if (fex & SWCR_TRAP_ENABLE_DZE) {
11446 si_code = TARGET_FPE_FLTDIV;
11448 if (fex & SWCR_TRAP_ENABLE_INV) {
11449 si_code = TARGET_FPE_FLTINV;
11452 info.si_signo = SIGFPE;
11454 info.si_code = si_code;
11455 info._sifields._sigfault._addr
11456 = ((CPUArchState *)cpu_env)->pc;
11457 queue_signal((CPUArchState *)cpu_env, info.si_signo,
11458 QEMU_SI_FAULT, &info);
11464 /* case SSI_NVPAIRS:
11465 -- Used with SSIN_UACPROC to enable unaligned accesses.
11466 case SSI_IEEE_STATE_AT_SIGNAL:
11467 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
11468 -- Not implemented in linux kernel
11473 #ifdef TARGET_NR_osf_sigprocmask
11474 /* Alpha specific. */
11475 case TARGET_NR_osf_sigprocmask:
11479 sigset_t set, oldset;
11482 case TARGET_SIG_BLOCK:
11485 case TARGET_SIG_UNBLOCK:
11488 case TARGET_SIG_SETMASK:
11492 return -TARGET_EINVAL;
11495 target_to_host_old_sigset(&set, &mask);
11496 ret = do_sigprocmask(how, &set, &oldset);
11498 host_to_target_old_sigset(&mask, &oldset);
11505 #ifdef TARGET_NR_getgid32
11506 case TARGET_NR_getgid32:
11507 return get_errno(getgid());
11509 #ifdef TARGET_NR_geteuid32
11510 case TARGET_NR_geteuid32:
11511 return get_errno(geteuid());
11513 #ifdef TARGET_NR_getegid32
11514 case TARGET_NR_getegid32:
11515 return get_errno(getegid());
11517 #ifdef TARGET_NR_setreuid32
11518 case TARGET_NR_setreuid32:
11519 return get_errno(setreuid(arg1, arg2));
11521 #ifdef TARGET_NR_setregid32
11522 case TARGET_NR_setregid32:
11523 return get_errno(setregid(arg1, arg2));
11525 #ifdef TARGET_NR_getgroups32
11526 case TARGET_NR_getgroups32:
11528 int gidsetsize = arg1;
11529 uint32_t *target_grouplist;
11533 grouplist = alloca(gidsetsize * sizeof(gid_t));
11534 ret = get_errno(getgroups(gidsetsize, grouplist));
11535 if (gidsetsize == 0)
11537 if (!is_error(ret)) {
11538 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
11539 if (!target_grouplist) {
11540 return -TARGET_EFAULT;
11542 for(i = 0;i < ret; i++)
11543 target_grouplist[i] = tswap32(grouplist[i]);
11544 unlock_user(target_grouplist, arg2, gidsetsize * 4);
11549 #ifdef TARGET_NR_setgroups32
11550 case TARGET_NR_setgroups32:
11552 int gidsetsize = arg1;
11553 uint32_t *target_grouplist;
11557 grouplist = alloca(gidsetsize * sizeof(gid_t));
11558 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
11559 if (!target_grouplist) {
11560 return -TARGET_EFAULT;
11562 for(i = 0;i < gidsetsize; i++)
11563 grouplist[i] = tswap32(target_grouplist[i]);
11564 unlock_user(target_grouplist, arg2, 0);
11565 return get_errno(setgroups(gidsetsize, grouplist));
11568 #ifdef TARGET_NR_fchown32
11569 case TARGET_NR_fchown32:
11570 return get_errno(fchown(arg1, arg2, arg3));
11572 #ifdef TARGET_NR_setresuid32
11573 case TARGET_NR_setresuid32:
11574 return get_errno(sys_setresuid(arg1, arg2, arg3));
11576 #ifdef TARGET_NR_getresuid32
11577 case TARGET_NR_getresuid32:
11579 uid_t ruid, euid, suid;
11580 ret = get_errno(getresuid(&ruid, &euid, &suid));
11581 if (!is_error(ret)) {
11582 if (put_user_u32(ruid, arg1)
11583 || put_user_u32(euid, arg2)
11584 || put_user_u32(suid, arg3))
11585 return -TARGET_EFAULT;
11590 #ifdef TARGET_NR_setresgid32
11591 case TARGET_NR_setresgid32:
11592 return get_errno(sys_setresgid(arg1, arg2, arg3));
11594 #ifdef TARGET_NR_getresgid32
11595 case TARGET_NR_getresgid32:
11597 gid_t rgid, egid, sgid;
11598 ret = get_errno(getresgid(&rgid, &egid, &sgid));
11599 if (!is_error(ret)) {
11600 if (put_user_u32(rgid, arg1)
11601 || put_user_u32(egid, arg2)
11602 || put_user_u32(sgid, arg3))
11603 return -TARGET_EFAULT;
11608 #ifdef TARGET_NR_chown32
11609 case TARGET_NR_chown32:
11610 if (!(p = lock_user_string(arg1)))
11611 return -TARGET_EFAULT;
11612 ret = get_errno(chown(p, arg2, arg3));
11613 unlock_user(p, arg1, 0);
11616 #ifdef TARGET_NR_setuid32
11617 case TARGET_NR_setuid32:
11618 return get_errno(sys_setuid(arg1));
11620 #ifdef TARGET_NR_setgid32
11621 case TARGET_NR_setgid32:
11622 return get_errno(sys_setgid(arg1));
11624 #ifdef TARGET_NR_setfsuid32
11625 case TARGET_NR_setfsuid32:
11626 return get_errno(setfsuid(arg1));
11628 #ifdef TARGET_NR_setfsgid32
11629 case TARGET_NR_setfsgid32:
11630 return get_errno(setfsgid(arg1));
11632 #ifdef TARGET_NR_mincore
11633 case TARGET_NR_mincore:
11635 void *a = lock_user(VERIFY_READ, arg1, arg2, 0);
11637 return -TARGET_ENOMEM;
11639 p = lock_user_string(arg3);
11641 ret = -TARGET_EFAULT;
11643 ret = get_errno(mincore(a, arg2, p));
11644 unlock_user(p, arg3, ret);
11646 unlock_user(a, arg1, 0);
11650 #ifdef TARGET_NR_arm_fadvise64_64
11651 case TARGET_NR_arm_fadvise64_64:
11652 /* arm_fadvise64_64 looks like fadvise64_64 but
11653 * with different argument order: fd, advice, offset, len
11654 * rather than the usual fd, offset, len, advice.
11655 * Note that offset and len are both 64-bit so appear as
11656 * pairs of 32-bit registers.
11658 ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
11659 target_offset64(arg5, arg6), arg2);
11660 return -host_to_target_errno(ret);
11663 #if TARGET_ABI_BITS == 32
11665 #ifdef TARGET_NR_fadvise64_64
11666 case TARGET_NR_fadvise64_64:
11667 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
11668 /* 6 args: fd, advice, offset (high, low), len (high, low) */
11676 /* 6 args: fd, offset (high, low), len (high, low), advice */
11677 if (regpairs_aligned(cpu_env, num)) {
11678 /* offset is in (3,4), len in (5,6) and advice in 7 */
11686 ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
11687 target_offset64(arg4, arg5), arg6);
11688 return -host_to_target_errno(ret);
11691 #ifdef TARGET_NR_fadvise64
11692 case TARGET_NR_fadvise64:
11693 /* 5 args: fd, offset (high, low), len, advice */
11694 if (regpairs_aligned(cpu_env, num)) {
11695 /* offset is in (3,4), len in 5 and advice in 6 */
11701 ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
11702 return -host_to_target_errno(ret);
11705 #else /* not a 32-bit ABI */
11706 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11707 #ifdef TARGET_NR_fadvise64_64
11708 case TARGET_NR_fadvise64_64:
11710 #ifdef TARGET_NR_fadvise64
11711 case TARGET_NR_fadvise64:
11713 #ifdef TARGET_S390X
11715 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
11716 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
11717 case 6: arg4 = POSIX_FADV_DONTNEED; break;
11718 case 7: arg4 = POSIX_FADV_NOREUSE; break;
11722 return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
11724 #endif /* end of 64-bit ABI fadvise handling */
11726 #ifdef TARGET_NR_madvise
11727 case TARGET_NR_madvise:
11728 /* A straight passthrough may not be safe because qemu sometimes
11729 turns private file-backed mappings into anonymous mappings.
11730 This will break MADV_DONTNEED.
11731 This is a hint, so ignoring and returning success is ok. */
11734 #ifdef TARGET_NR_fcntl64
11735 case TARGET_NR_fcntl64:
11739 from_flock64_fn *copyfrom = copy_from_user_flock64;
11740 to_flock64_fn *copyto = copy_to_user_flock64;
11743 if (!((CPUARMState *)cpu_env)->eabi) {
11744 copyfrom = copy_from_user_oabi_flock64;
11745 copyto = copy_to_user_oabi_flock64;
11749 cmd = target_to_host_fcntl_cmd(arg2);
11750 if (cmd == -TARGET_EINVAL) {
11755 case TARGET_F_GETLK64:
11756 ret = copyfrom(&fl, arg3);
11760 ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11762 ret = copyto(arg3, &fl);
11766 case TARGET_F_SETLK64:
11767 case TARGET_F_SETLKW64:
11768 ret = copyfrom(&fl, arg3);
11772 ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11775 ret = do_fcntl(arg1, arg2, arg3);
11781 #ifdef TARGET_NR_cacheflush
11782 case TARGET_NR_cacheflush:
11783 /* self-modifying code is handled automatically, so nothing needed */
11786 #ifdef TARGET_NR_getpagesize
11787 case TARGET_NR_getpagesize:
11788 return TARGET_PAGE_SIZE;
11790 case TARGET_NR_gettid:
11791 return get_errno(sys_gettid());
11792 #ifdef TARGET_NR_readahead
11793 case TARGET_NR_readahead:
11794 #if TARGET_ABI_BITS == 32
11795 if (regpairs_aligned(cpu_env, num)) {
11800 ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
11802 ret = get_errno(readahead(arg1, arg2, arg3));
11807 #ifdef TARGET_NR_setxattr
11808 case TARGET_NR_listxattr:
11809 case TARGET_NR_llistxattr:
11813 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11815 return -TARGET_EFAULT;
11818 p = lock_user_string(arg1);
11820 if (num == TARGET_NR_listxattr) {
11821 ret = get_errno(listxattr(p, b, arg3));
11823 ret = get_errno(llistxattr(p, b, arg3));
11826 ret = -TARGET_EFAULT;
11828 unlock_user(p, arg1, 0);
11829 unlock_user(b, arg2, arg3);
11832 case TARGET_NR_flistxattr:
11836 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11838 return -TARGET_EFAULT;
11841 ret = get_errno(flistxattr(arg1, b, arg3));
11842 unlock_user(b, arg2, arg3);
11845 case TARGET_NR_setxattr:
11846 case TARGET_NR_lsetxattr:
11848 void *p, *n, *v = 0;
11850 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11852 return -TARGET_EFAULT;
11855 p = lock_user_string(arg1);
11856 n = lock_user_string(arg2);
11858 if (num == TARGET_NR_setxattr) {
11859 ret = get_errno(setxattr(p, n, v, arg4, arg5));
11861 ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
11864 ret = -TARGET_EFAULT;
11866 unlock_user(p, arg1, 0);
11867 unlock_user(n, arg2, 0);
11868 unlock_user(v, arg3, 0);
11871 case TARGET_NR_fsetxattr:
11875 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11877 return -TARGET_EFAULT;
11880 n = lock_user_string(arg2);
11882 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
11884 ret = -TARGET_EFAULT;
11886 unlock_user(n, arg2, 0);
11887 unlock_user(v, arg3, 0);
11890 case TARGET_NR_getxattr:
11891 case TARGET_NR_lgetxattr:
11893 void *p, *n, *v = 0;
11895 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11897 return -TARGET_EFAULT;
11900 p = lock_user_string(arg1);
11901 n = lock_user_string(arg2);
11903 if (num == TARGET_NR_getxattr) {
11904 ret = get_errno(getxattr(p, n, v, arg4));
11906 ret = get_errno(lgetxattr(p, n, v, arg4));
11909 ret = -TARGET_EFAULT;
11911 unlock_user(p, arg1, 0);
11912 unlock_user(n, arg2, 0);
11913 unlock_user(v, arg3, arg4);
11916 case TARGET_NR_fgetxattr:
11920 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11922 return -TARGET_EFAULT;
11925 n = lock_user_string(arg2);
11927 ret = get_errno(fgetxattr(arg1, n, v, arg4));
11929 ret = -TARGET_EFAULT;
11931 unlock_user(n, arg2, 0);
11932 unlock_user(v, arg3, arg4);
11935 case TARGET_NR_removexattr:
11936 case TARGET_NR_lremovexattr:
11939 p = lock_user_string(arg1);
11940 n = lock_user_string(arg2);
11942 if (num == TARGET_NR_removexattr) {
11943 ret = get_errno(removexattr(p, n));
11945 ret = get_errno(lremovexattr(p, n));
11948 ret = -TARGET_EFAULT;
11950 unlock_user(p, arg1, 0);
11951 unlock_user(n, arg2, 0);
11954 case TARGET_NR_fremovexattr:
11957 n = lock_user_string(arg2);
11959 ret = get_errno(fremovexattr(arg1, n));
11961 ret = -TARGET_EFAULT;
11963 unlock_user(n, arg2, 0);
11967 #endif /* CONFIG_ATTR */
11968 #ifdef TARGET_NR_set_thread_area
11969 case TARGET_NR_set_thread_area:
11970 #if defined(TARGET_MIPS)
11971 ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
11973 #elif defined(TARGET_CRIS)
11975 ret = -TARGET_EINVAL;
11977 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
11981 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
11982 return do_set_thread_area(cpu_env, arg1);
11983 #elif defined(TARGET_M68K)
11985 TaskState *ts = cpu->opaque;
11986 ts->tp_value = arg1;
11990 return -TARGET_ENOSYS;
11993 #ifdef TARGET_NR_get_thread_area
11994 case TARGET_NR_get_thread_area:
11995 #if defined(TARGET_I386) && defined(TARGET_ABI32)
11996 return do_get_thread_area(cpu_env, arg1);
11997 #elif defined(TARGET_M68K)
11999 TaskState *ts = cpu->opaque;
12000 return ts->tp_value;
12003 return -TARGET_ENOSYS;
12006 #ifdef TARGET_NR_getdomainname
12007 case TARGET_NR_getdomainname:
12008 return -TARGET_ENOSYS;
12011 #ifdef TARGET_NR_clock_settime
12012 case TARGET_NR_clock_settime:
12014 struct timespec ts;
12016 ret = target_to_host_timespec(&ts, arg2);
12017 if (!is_error(ret)) {
12018 ret = get_errno(clock_settime(arg1, &ts));
12023 #ifdef TARGET_NR_clock_settime64
12024 case TARGET_NR_clock_settime64:
12026 struct timespec ts;
12028 ret = target_to_host_timespec64(&ts, arg2);
12029 if (!is_error(ret)) {
12030 ret = get_errno(clock_settime(arg1, &ts));
12035 #ifdef TARGET_NR_clock_gettime
12036 case TARGET_NR_clock_gettime:
12038 struct timespec ts;
12039 ret = get_errno(clock_gettime(arg1, &ts));
12040 if (!is_error(ret)) {
12041 ret = host_to_target_timespec(arg2, &ts);
12046 #ifdef TARGET_NR_clock_gettime64
12047 case TARGET_NR_clock_gettime64:
12049 struct timespec ts;
12050 ret = get_errno(clock_gettime(arg1, &ts));
12051 if (!is_error(ret)) {
12052 ret = host_to_target_timespec64(arg2, &ts);
12057 #ifdef TARGET_NR_clock_getres
12058 case TARGET_NR_clock_getres:
12060 struct timespec ts;
12061 ret = get_errno(clock_getres(arg1, &ts));
12062 if (!is_error(ret)) {
12063 host_to_target_timespec(arg2, &ts);
12068 #ifdef TARGET_NR_clock_getres_time64
12069 case TARGET_NR_clock_getres_time64:
12071 struct timespec ts;
12072 ret = get_errno(clock_getres(arg1, &ts));
12073 if (!is_error(ret)) {
12074 host_to_target_timespec64(arg2, &ts);
12079 #ifdef TARGET_NR_clock_nanosleep
12080 case TARGET_NR_clock_nanosleep:
12082 struct timespec ts;
12083 if (target_to_host_timespec(&ts, arg3)) {
12084 return -TARGET_EFAULT;
12086 ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12087 &ts, arg4 ? &ts : NULL));
12089 * if the call is interrupted by a signal handler, it fails
12090 * with error -TARGET_EINTR and if arg4 is not NULL and arg2 is not
12091 * TIMER_ABSTIME, it returns the remaining unslept time in arg4.
12093 if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12094 host_to_target_timespec(arg4, &ts)) {
12095 return -TARGET_EFAULT;
12101 #ifdef TARGET_NR_clock_nanosleep_time64
12102 case TARGET_NR_clock_nanosleep_time64:
12104 struct timespec ts;
12106 if (target_to_host_timespec64(&ts, arg3)) {
12107 return -TARGET_EFAULT;
12110 ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12111 &ts, arg4 ? &ts : NULL));
12113 if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12114 host_to_target_timespec64(arg4, &ts)) {
12115 return -TARGET_EFAULT;
12121 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
12122 case TARGET_NR_set_tid_address:
12123 return get_errno(set_tid_address((int *)g2h(arg1)));
12126 case TARGET_NR_tkill:
12127 return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
12129 case TARGET_NR_tgkill:
12130 return get_errno(safe_tgkill((int)arg1, (int)arg2,
12131 target_to_host_signal(arg3)));
12133 #ifdef TARGET_NR_set_robust_list
12134 case TARGET_NR_set_robust_list:
12135 case TARGET_NR_get_robust_list:
12136 /* The ABI for supporting robust futexes has userspace pass
12137 * the kernel a pointer to a linked list which is updated by
12138 * userspace after the syscall; the list is walked by the kernel
12139 * when the thread exits. Since the linked list in QEMU guest
12140 * memory isn't a valid linked list for the host and we have
12141 * no way to reliably intercept the thread-death event, we can't
12142 * support these. Silently return ENOSYS so that guest userspace
12143 * falls back to a non-robust futex implementation (which should
12144 * be OK except in the corner case of the guest crashing while
12145 * holding a mutex that is shared with another process via
12148 return -TARGET_ENOSYS;
12151 #if defined(TARGET_NR_utimensat)
12152 case TARGET_NR_utimensat:
12154 struct timespec *tsp, ts[2];
12158 if (target_to_host_timespec(ts, arg3)) {
12159 return -TARGET_EFAULT;
12161 if (target_to_host_timespec(ts + 1, arg3 +
12162 sizeof(struct target_timespec))) {
12163 return -TARGET_EFAULT;
12168 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12170 if (!(p = lock_user_string(arg2))) {
12171 return -TARGET_EFAULT;
12173 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12174 unlock_user(p, arg2, 0);
12179 #ifdef TARGET_NR_utimensat_time64
12180 case TARGET_NR_utimensat_time64:
12182 struct timespec *tsp, ts[2];
12186 if (target_to_host_timespec64(ts, arg3)) {
12187 return -TARGET_EFAULT;
12189 if (target_to_host_timespec64(ts + 1, arg3 +
12190 sizeof(struct target__kernel_timespec))) {
12191 return -TARGET_EFAULT;
12196 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12198 p = lock_user_string(arg2);
12200 return -TARGET_EFAULT;
12202 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12203 unlock_user(p, arg2, 0);
12208 #ifdef TARGET_NR_futex
12209 case TARGET_NR_futex:
12210 return do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
12212 #ifdef TARGET_NR_futex_time64
12213 case TARGET_NR_futex_time64:
12214 return do_futex_time64(arg1, arg2, arg3, arg4, arg5, arg6);
12216 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
12217 case TARGET_NR_inotify_init:
12218 ret = get_errno(sys_inotify_init());
12220 fd_trans_register(ret, &target_inotify_trans);
12224 #ifdef CONFIG_INOTIFY1
12225 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
12226 case TARGET_NR_inotify_init1:
12227 ret = get_errno(sys_inotify_init1(target_to_host_bitmask(arg1,
12228 fcntl_flags_tbl)));
12230 fd_trans_register(ret, &target_inotify_trans);
12235 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
12236 case TARGET_NR_inotify_add_watch:
12237 p = lock_user_string(arg2);
12238 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
12239 unlock_user(p, arg2, 0);
12242 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
12243 case TARGET_NR_inotify_rm_watch:
12244 return get_errno(sys_inotify_rm_watch(arg1, arg2));
12247 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
12248 case TARGET_NR_mq_open:
12250 struct mq_attr posix_mq_attr;
12251 struct mq_attr *pposix_mq_attr;
12254 host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
12255 pposix_mq_attr = NULL;
12257 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
12258 return -TARGET_EFAULT;
12260 pposix_mq_attr = &posix_mq_attr;
12262 p = lock_user_string(arg1 - 1);
12264 return -TARGET_EFAULT;
12266 ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
12267 unlock_user (p, arg1, 0);
12271 case TARGET_NR_mq_unlink:
12272 p = lock_user_string(arg1 - 1);
12274 return -TARGET_EFAULT;
12276 ret = get_errno(mq_unlink(p));
12277 unlock_user (p, arg1, 0);
12280 #ifdef TARGET_NR_mq_timedsend
12281 case TARGET_NR_mq_timedsend:
12283 struct timespec ts;
12285 p = lock_user (VERIFY_READ, arg2, arg3, 1);
12287 if (target_to_host_timespec(&ts, arg5)) {
12288 return -TARGET_EFAULT;
12290 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12291 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
12292 return -TARGET_EFAULT;
12295 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12297 unlock_user (p, arg2, arg3);
12301 #ifdef TARGET_NR_mq_timedsend_time64
12302 case TARGET_NR_mq_timedsend_time64:
12304 struct timespec ts;
12306 p = lock_user(VERIFY_READ, arg2, arg3, 1);
12308 if (target_to_host_timespec64(&ts, arg5)) {
12309 return -TARGET_EFAULT;
12311 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12312 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
12313 return -TARGET_EFAULT;
12316 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12318 unlock_user(p, arg2, arg3);
12323 #ifdef TARGET_NR_mq_timedreceive
12324 case TARGET_NR_mq_timedreceive:
12326 struct timespec ts;
12329 p = lock_user (VERIFY_READ, arg2, arg3, 1);
12331 if (target_to_host_timespec(&ts, arg5)) {
12332 return -TARGET_EFAULT;
12334 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12336 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
12337 return -TARGET_EFAULT;
12340 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12343 unlock_user (p, arg2, arg3);
12345 put_user_u32(prio, arg4);
12349 #ifdef TARGET_NR_mq_timedreceive_time64
12350 case TARGET_NR_mq_timedreceive_time64:
12352 struct timespec ts;
12355 p = lock_user(VERIFY_READ, arg2, arg3, 1);
12357 if (target_to_host_timespec64(&ts, arg5)) {
12358 return -TARGET_EFAULT;
12360 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12362 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
12363 return -TARGET_EFAULT;
12366 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12369 unlock_user(p, arg2, arg3);
12371 put_user_u32(prio, arg4);
12377 /* Not implemented for now... */
12378 /* case TARGET_NR_mq_notify: */
12381 case TARGET_NR_mq_getsetattr:
12383 struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
12386 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
12387 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
12388 &posix_mq_attr_out));
12389 } else if (arg3 != 0) {
12390 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
12392 if (ret == 0 && arg3 != 0) {
12393 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
12399 #ifdef CONFIG_SPLICE
12400 #ifdef TARGET_NR_tee
12401 case TARGET_NR_tee:
12403 ret = get_errno(tee(arg1,arg2,arg3,arg4));
12407 #ifdef TARGET_NR_splice
12408 case TARGET_NR_splice:
12410 loff_t loff_in, loff_out;
12411 loff_t *ploff_in = NULL, *ploff_out = NULL;
12413 if (get_user_u64(loff_in, arg2)) {
12414 return -TARGET_EFAULT;
12416 ploff_in = &loff_in;
12419 if (get_user_u64(loff_out, arg4)) {
12420 return -TARGET_EFAULT;
12422 ploff_out = &loff_out;
12424 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
12426 if (put_user_u64(loff_in, arg2)) {
12427 return -TARGET_EFAULT;
12431 if (put_user_u64(loff_out, arg4)) {
12432 return -TARGET_EFAULT;
12438 #ifdef TARGET_NR_vmsplice
12439 case TARGET_NR_vmsplice:
12441 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
12443 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
12444 unlock_iovec(vec, arg2, arg3, 0);
12446 ret = -host_to_target_errno(errno);
12451 #endif /* CONFIG_SPLICE */
12452 #ifdef CONFIG_EVENTFD
12453 #if defined(TARGET_NR_eventfd)
12454 case TARGET_NR_eventfd:
12455 ret = get_errno(eventfd(arg1, 0));
12457 fd_trans_register(ret, &target_eventfd_trans);
12461 #if defined(TARGET_NR_eventfd2)
12462 case TARGET_NR_eventfd2:
12464 int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
12465 if (arg2 & TARGET_O_NONBLOCK) {
12466 host_flags |= O_NONBLOCK;
12468 if (arg2 & TARGET_O_CLOEXEC) {
12469 host_flags |= O_CLOEXEC;
12471 ret = get_errno(eventfd(arg1, host_flags));
12473 fd_trans_register(ret, &target_eventfd_trans);
12478 #endif /* CONFIG_EVENTFD */
12479 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
12480 case TARGET_NR_fallocate:
12481 #if TARGET_ABI_BITS == 32
12482 ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
12483 target_offset64(arg5, arg6)));
12485 ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
12489 #if defined(CONFIG_SYNC_FILE_RANGE)
12490 #if defined(TARGET_NR_sync_file_range)
12491 case TARGET_NR_sync_file_range:
12492 #if TARGET_ABI_BITS == 32
12493 #if defined(TARGET_MIPS)
12494 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12495 target_offset64(arg5, arg6), arg7));
12497 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
12498 target_offset64(arg4, arg5), arg6));
12499 #endif /* !TARGET_MIPS */
12501 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
12505 #if defined(TARGET_NR_sync_file_range2) || \
12506 defined(TARGET_NR_arm_sync_file_range)
12507 #if defined(TARGET_NR_sync_file_range2)
12508 case TARGET_NR_sync_file_range2:
12510 #if defined(TARGET_NR_arm_sync_file_range)
12511 case TARGET_NR_arm_sync_file_range:
12513 /* This is like sync_file_range but the arguments are reordered */
12514 #if TARGET_ABI_BITS == 32
12515 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12516 target_offset64(arg5, arg6), arg2));
12518 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
12523 #if defined(TARGET_NR_signalfd4)
12524 case TARGET_NR_signalfd4:
12525 return do_signalfd4(arg1, arg2, arg4);
12527 #if defined(TARGET_NR_signalfd)
12528 case TARGET_NR_signalfd:
12529 return do_signalfd4(arg1, arg2, 0);
12531 #if defined(CONFIG_EPOLL)
12532 #if defined(TARGET_NR_epoll_create)
12533 case TARGET_NR_epoll_create:
12534 return get_errno(epoll_create(arg1));
12536 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
12537 case TARGET_NR_epoll_create1:
12538 return get_errno(epoll_create1(target_to_host_bitmask(arg1, fcntl_flags_tbl)));
12540 #if defined(TARGET_NR_epoll_ctl)
12541 case TARGET_NR_epoll_ctl:
12543 struct epoll_event ep;
12544 struct epoll_event *epp = 0;
12546 struct target_epoll_event *target_ep;
12547 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
12548 return -TARGET_EFAULT;
12550 ep.events = tswap32(target_ep->events);
12551 /* The epoll_data_t union is just opaque data to the kernel,
12552 * so we transfer all 64 bits across and need not worry what
12553 * actual data type it is.
12555 ep.data.u64 = tswap64(target_ep->data.u64);
12556 unlock_user_struct(target_ep, arg4, 0);
12559 return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
12563 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
12564 #if defined(TARGET_NR_epoll_wait)
12565 case TARGET_NR_epoll_wait:
12567 #if defined(TARGET_NR_epoll_pwait)
12568 case TARGET_NR_epoll_pwait:
12571 struct target_epoll_event *target_ep;
12572 struct epoll_event *ep;
12574 int maxevents = arg3;
12575 int timeout = arg4;
12577 if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
12578 return -TARGET_EINVAL;
12581 target_ep = lock_user(VERIFY_WRITE, arg2,
12582 maxevents * sizeof(struct target_epoll_event), 1);
12584 return -TARGET_EFAULT;
12587 ep = g_try_new(struct epoll_event, maxevents);
12589 unlock_user(target_ep, arg2, 0);
12590 return -TARGET_ENOMEM;
12594 #if defined(TARGET_NR_epoll_pwait)
12595 case TARGET_NR_epoll_pwait:
12597 target_sigset_t *target_set;
12598 sigset_t _set, *set = &_set;
12601 if (arg6 != sizeof(target_sigset_t)) {
12602 ret = -TARGET_EINVAL;
12606 target_set = lock_user(VERIFY_READ, arg5,
12607 sizeof(target_sigset_t), 1);
12609 ret = -TARGET_EFAULT;
12612 target_to_host_sigset(set, target_set);
12613 unlock_user(target_set, arg5, 0);
12618 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12619 set, SIGSET_T_SIZE));
12623 #if defined(TARGET_NR_epoll_wait)
12624 case TARGET_NR_epoll_wait:
12625 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12630 ret = -TARGET_ENOSYS;
12632 if (!is_error(ret)) {
12634 for (i = 0; i < ret; i++) {
12635 target_ep[i].events = tswap32(ep[i].events);
12636 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
12638 unlock_user(target_ep, arg2,
12639 ret * sizeof(struct target_epoll_event));
12641 unlock_user(target_ep, arg2, 0);
12648 #ifdef TARGET_NR_prlimit64
12649 case TARGET_NR_prlimit64:
12651 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
12652 struct target_rlimit64 *target_rnew, *target_rold;
12653 struct host_rlimit64 rnew, rold, *rnewp = 0;
12654 int resource = target_to_host_resource(arg2);
12656 if (arg3 && (resource != RLIMIT_AS &&
12657 resource != RLIMIT_DATA &&
12658 resource != RLIMIT_STACK)) {
12659 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
12660 return -TARGET_EFAULT;
12662 rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
12663 rnew.rlim_max = tswap64(target_rnew->rlim_max);
12664 unlock_user_struct(target_rnew, arg3, 0);
12668 ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
12669 if (!is_error(ret) && arg4) {
12670 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
12671 return -TARGET_EFAULT;
12673 target_rold->rlim_cur = tswap64(rold.rlim_cur);
12674 target_rold->rlim_max = tswap64(rold.rlim_max);
12675 unlock_user_struct(target_rold, arg4, 1);
12680 #ifdef TARGET_NR_gethostname
12681 case TARGET_NR_gethostname:
12683 char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
12685 ret = get_errno(gethostname(name, arg2));
12686 unlock_user(name, arg1, arg2);
12688 ret = -TARGET_EFAULT;
12693 #ifdef TARGET_NR_atomic_cmpxchg_32
12694 case TARGET_NR_atomic_cmpxchg_32:
12696 /* should use start_exclusive from main.c */
12697 abi_ulong mem_value;
12698 if (get_user_u32(mem_value, arg6)) {
12699 target_siginfo_t info;
12700 info.si_signo = SIGSEGV;
12702 info.si_code = TARGET_SEGV_MAPERR;
12703 info._sifields._sigfault._addr = arg6;
12704 queue_signal((CPUArchState *)cpu_env, info.si_signo,
12705 QEMU_SI_FAULT, &info);
12709 if (mem_value == arg2)
12710 put_user_u32(arg1, arg6);
12714 #ifdef TARGET_NR_atomic_barrier
12715 case TARGET_NR_atomic_barrier:
12716 /* Like the kernel implementation and the
12717 qemu arm barrier, no-op this? */
12721 #ifdef TARGET_NR_timer_create
12722 case TARGET_NR_timer_create:
12724 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
12726 struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
12729 int timer_index = next_free_host_timer();
12731 if (timer_index < 0) {
12732 ret = -TARGET_EAGAIN;
12734 timer_t *phtimer = g_posix_timers + timer_index;
12737 phost_sevp = &host_sevp;
12738 ret = target_to_host_sigevent(phost_sevp, arg2);
12744 ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
12748 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
12749 return -TARGET_EFAULT;
12757 #ifdef TARGET_NR_timer_settime
12758 case TARGET_NR_timer_settime:
12760 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
12761 * struct itimerspec * old_value */
12762 target_timer_t timerid = get_timer_id(arg1);
12766 } else if (arg3 == 0) {
12767 ret = -TARGET_EINVAL;
12769 timer_t htimer = g_posix_timers[timerid];
12770 struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12772 if (target_to_host_itimerspec(&hspec_new, arg3)) {
12773 return -TARGET_EFAULT;
12776 timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12777 if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
12778 return -TARGET_EFAULT;
12785 #ifdef TARGET_NR_timer_settime64
12786 case TARGET_NR_timer_settime64:
12788 target_timer_t timerid = get_timer_id(arg1);
12792 } else if (arg3 == 0) {
12793 ret = -TARGET_EINVAL;
12795 timer_t htimer = g_posix_timers[timerid];
12796 struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12798 if (target_to_host_itimerspec64(&hspec_new, arg3)) {
12799 return -TARGET_EFAULT;
12802 timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12803 if (arg4 && host_to_target_itimerspec64(arg4, &hspec_old)) {
12804 return -TARGET_EFAULT;
12811 #ifdef TARGET_NR_timer_gettime
12812 case TARGET_NR_timer_gettime:
12814 /* args: timer_t timerid, struct itimerspec *curr_value */
12815 target_timer_t timerid = get_timer_id(arg1);
12819 } else if (!arg2) {
12820 ret = -TARGET_EFAULT;
12822 timer_t htimer = g_posix_timers[timerid];
12823 struct itimerspec hspec;
12824 ret = get_errno(timer_gettime(htimer, &hspec));
12826 if (host_to_target_itimerspec(arg2, &hspec)) {
12827 ret = -TARGET_EFAULT;
12834 #ifdef TARGET_NR_timer_gettime64
12835 case TARGET_NR_timer_gettime64:
12837 /* args: timer_t timerid, struct itimerspec64 *curr_value */
12838 target_timer_t timerid = get_timer_id(arg1);
12842 } else if (!arg2) {
12843 ret = -TARGET_EFAULT;
12845 timer_t htimer = g_posix_timers[timerid];
12846 struct itimerspec hspec;
12847 ret = get_errno(timer_gettime(htimer, &hspec));
12849 if (host_to_target_itimerspec64(arg2, &hspec)) {
12850 ret = -TARGET_EFAULT;
12857 #ifdef TARGET_NR_timer_getoverrun
12858 case TARGET_NR_timer_getoverrun:
12860 /* args: timer_t timerid */
12861 target_timer_t timerid = get_timer_id(arg1);
12866 timer_t htimer = g_posix_timers[timerid];
12867 ret = get_errno(timer_getoverrun(htimer));
12873 #ifdef TARGET_NR_timer_delete
12874 case TARGET_NR_timer_delete:
12876 /* args: timer_t timerid */
12877 target_timer_t timerid = get_timer_id(arg1);
12882 timer_t htimer = g_posix_timers[timerid];
12883 ret = get_errno(timer_delete(htimer));
12884 g_posix_timers[timerid] = 0;
12890 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
12891 case TARGET_NR_timerfd_create:
12892 return get_errno(timerfd_create(arg1,
12893 target_to_host_bitmask(arg2, fcntl_flags_tbl)));
12896 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
12897 case TARGET_NR_timerfd_gettime:
12899 struct itimerspec its_curr;
12901 ret = get_errno(timerfd_gettime(arg1, &its_curr));
12903 if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
12904 return -TARGET_EFAULT;
12910 #if defined(TARGET_NR_timerfd_gettime64) && defined(CONFIG_TIMERFD)
12911 case TARGET_NR_timerfd_gettime64:
12913 struct itimerspec its_curr;
12915 ret = get_errno(timerfd_gettime(arg1, &its_curr));
12917 if (arg2 && host_to_target_itimerspec64(arg2, &its_curr)) {
12918 return -TARGET_EFAULT;
12924 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
12925 case TARGET_NR_timerfd_settime:
12927 struct itimerspec its_new, its_old, *p_new;
12930 if (target_to_host_itimerspec(&its_new, arg3)) {
12931 return -TARGET_EFAULT;
12938 ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
12940 if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
12941 return -TARGET_EFAULT;
12947 #if defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)
12948 case TARGET_NR_timerfd_settime64:
12950 struct itimerspec its_new, its_old, *p_new;
12953 if (target_to_host_itimerspec64(&its_new, arg3)) {
12954 return -TARGET_EFAULT;
12961 ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
12963 if (arg4 && host_to_target_itimerspec64(arg4, &its_old)) {
12964 return -TARGET_EFAULT;
12970 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
12971 case TARGET_NR_ioprio_get:
12972 return get_errno(ioprio_get(arg1, arg2));
12975 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
12976 case TARGET_NR_ioprio_set:
12977 return get_errno(ioprio_set(arg1, arg2, arg3));
12980 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
12981 case TARGET_NR_setns:
12982 return get_errno(setns(arg1, arg2));
12984 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
12985 case TARGET_NR_unshare:
12986 return get_errno(unshare(arg1));
12988 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
12989 case TARGET_NR_kcmp:
12990 return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
12992 #ifdef TARGET_NR_swapcontext
12993 case TARGET_NR_swapcontext:
12994 /* PowerPC specific. */
12995 return do_swapcontext(cpu_env, arg1, arg2, arg3);
12997 #ifdef TARGET_NR_memfd_create
12998 case TARGET_NR_memfd_create:
12999 p = lock_user_string(arg1);
13001 return -TARGET_EFAULT;
13003 ret = get_errno(memfd_create(p, arg2));
13004 fd_trans_unregister(ret);
13005 unlock_user(p, arg1, 0);
13008 #if defined TARGET_NR_membarrier && defined __NR_membarrier
13009 case TARGET_NR_membarrier:
13010 return get_errno(membarrier(arg1, arg2));
13014 qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
13015 return -TARGET_ENOSYS;
13020 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
13021 abi_long arg2, abi_long arg3, abi_long arg4,
13022 abi_long arg5, abi_long arg6, abi_long arg7,
13025 CPUState *cpu = env_cpu(cpu_env);
13028 #ifdef DEBUG_ERESTARTSYS
13029 /* Debug-only code for exercising the syscall-restart code paths
13030 * in the per-architecture cpu main loops: restart every syscall
13031 * the guest makes once before letting it through.
13037 return -TARGET_ERESTARTSYS;
13042 record_syscall_start(cpu, num, arg1,
13043 arg2, arg3, arg4, arg5, arg6, arg7, arg8);
13045 if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13046 print_syscall(cpu_env, num, arg1, arg2, arg3, arg4, arg5, arg6);
13049 ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
13050 arg5, arg6, arg7, arg8);
13052 if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13053 print_syscall_ret(cpu_env, num, ret, arg1, arg2,
13054 arg3, arg4, arg5, arg6);
13057 record_syscall_return(cpu, num, ret);