4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
32 #include <sys/types.h>
38 #include <sys/mount.h>
40 #include <sys/fsuid.h>
41 #include <sys/personality.h>
42 #include <sys/prctl.h>
43 #include <sys/resource.h>
46 #include <linux/capability.h>
50 int __clone2(int (*fn)(void *), void *child_stack_base,
51 size_t stack_size, int flags, void *arg, ...);
53 #include <sys/socket.h>
57 #include <sys/times.h>
60 #include <sys/statfs.h>
62 #include <sys/sysinfo.h>
63 #include <sys/utsname.h>
64 //#include <sys/user.h>
65 #include <netinet/ip.h>
66 #include <netinet/tcp.h>
67 #include <linux/wireless.h>
68 #include <linux/icmp.h>
69 #include "qemu-common.h"
74 #include <sys/eventfd.h>
77 #include <sys/epoll.h>
80 #include "qemu/xattr.h"
82 #ifdef CONFIG_SENDFILE
83 #include <sys/sendfile.h>
86 #define termios host_termios
87 #define winsize host_winsize
88 #define termio host_termio
89 #define sgttyb host_sgttyb /* same as target */
90 #define tchars host_tchars /* same as target */
91 #define ltchars host_ltchars /* same as target */
93 #include <linux/termios.h>
94 #include <linux/unistd.h>
95 #include <linux/utsname.h>
96 #include <linux/cdrom.h>
97 #include <linux/hdreg.h>
98 #include <linux/soundcard.h>
100 #include <linux/mtio.h>
101 #include <linux/fs.h>
102 #if defined(CONFIG_FIEMAP)
103 #include <linux/fiemap.h>
105 #include <linux/fb.h>
106 #include <linux/vt.h>
107 #include <linux/dm-ioctl.h>
108 #include <linux/reboot.h>
109 #include <linux/route.h>
110 #include <linux/filter.h>
111 #include <linux/blkpg.h>
112 #include "linux_loop.h"
113 #include "cpu-uname.h"
117 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
118 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
122 //#include <linux/msdos_fs.h>
123 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
124 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
135 #define _syscall0(type,name) \
136 static type name (void) \
138 return syscall(__NR_##name); \
141 #define _syscall1(type,name,type1,arg1) \
142 static type name (type1 arg1) \
144 return syscall(__NR_##name, arg1); \
147 #define _syscall2(type,name,type1,arg1,type2,arg2) \
148 static type name (type1 arg1,type2 arg2) \
150 return syscall(__NR_##name, arg1, arg2); \
153 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
154 static type name (type1 arg1,type2 arg2,type3 arg3) \
156 return syscall(__NR_##name, arg1, arg2, arg3); \
159 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
160 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
162 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
165 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
167 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
169 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
173 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
174 type5,arg5,type6,arg6) \
175 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
178 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
182 #define __NR_sys_uname __NR_uname
183 #define __NR_sys_getcwd1 __NR_getcwd
184 #define __NR_sys_getdents __NR_getdents
185 #define __NR_sys_getdents64 __NR_getdents64
186 #define __NR_sys_getpriority __NR_getpriority
187 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
188 #define __NR_sys_syslog __NR_syslog
189 #define __NR_sys_tgkill __NR_tgkill
190 #define __NR_sys_tkill __NR_tkill
191 #define __NR_sys_futex __NR_futex
192 #define __NR_sys_inotify_init __NR_inotify_init
193 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
194 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
196 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \
198 #define __NR__llseek __NR_lseek
201 /* Newer kernel ports have llseek() instead of _llseek() */
202 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
203 #define TARGET_NR__llseek TARGET_NR_llseek
207 _syscall0(int, gettid)
209 /* This is a replacement for the host gettid() and must return a host
211 static int gettid(void) {
216 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
218 #if !defined(__NR_getdents) || \
219 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
220 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
222 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
223 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
224 loff_t *, res, uint, wh);
226 _syscall3(int,sys_rt_sigqueueinfo,int,pid,int,sig,siginfo_t *,uinfo)
227 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
228 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
229 _syscall3(int,sys_tgkill,int,tgid,int,pid,int,sig)
231 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
232 _syscall2(int,sys_tkill,int,tid,int,sig)
234 #ifdef __NR_exit_group
235 _syscall1(int,exit_group,int,error_code)
237 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
238 _syscall1(int,set_tid_address,int *,tidptr)
240 #if defined(TARGET_NR_futex) && defined(__NR_futex)
241 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
242 const struct timespec *,timeout,int *,uaddr2,int,val3)
244 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
245 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
246 unsigned long *, user_mask_ptr);
247 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
248 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
249 unsigned long *, user_mask_ptr);
250 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
252 _syscall2(int, capget, struct __user_cap_header_struct *, header,
253 struct __user_cap_data_struct *, data);
254 _syscall2(int, capset, struct __user_cap_header_struct *, header,
255 struct __user_cap_data_struct *, data);
257 static bitmask_transtbl fcntl_flags_tbl[] = {
258 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
259 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, },
260 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, },
261 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, },
262 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, },
263 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, },
264 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, },
265 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, },
266 { TARGET_O_SYNC, TARGET_O_DSYNC, O_SYNC, O_DSYNC, },
267 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, },
268 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, },
269 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
270 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, },
271 #if defined(O_DIRECT)
272 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, },
274 #if defined(O_NOATIME)
275 { TARGET_O_NOATIME, TARGET_O_NOATIME, O_NOATIME, O_NOATIME },
277 #if defined(O_CLOEXEC)
278 { TARGET_O_CLOEXEC, TARGET_O_CLOEXEC, O_CLOEXEC, O_CLOEXEC },
281 { TARGET_O_PATH, TARGET_O_PATH, O_PATH, O_PATH },
283 /* Don't terminate the list prematurely on 64-bit host+guest. */
284 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
285 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
290 #define COPY_UTSNAME_FIELD(dest, src) \
292 /* __NEW_UTS_LEN doesn't include terminating null */ \
293 (void) strncpy((dest), (src), __NEW_UTS_LEN); \
294 (dest)[__NEW_UTS_LEN] = '\0'; \
297 static int sys_uname(struct new_utsname *buf)
299 struct utsname uts_buf;
301 if (uname(&uts_buf) < 0)
305 * Just in case these have some differences, we
306 * translate utsname to new_utsname (which is the
307 * struct linux kernel uses).
310 memset(buf, 0, sizeof(*buf));
311 COPY_UTSNAME_FIELD(buf->sysname, uts_buf.sysname);
312 COPY_UTSNAME_FIELD(buf->nodename, uts_buf.nodename);
313 COPY_UTSNAME_FIELD(buf->release, uts_buf.release);
314 COPY_UTSNAME_FIELD(buf->version, uts_buf.version);
315 COPY_UTSNAME_FIELD(buf->machine, uts_buf.machine);
317 COPY_UTSNAME_FIELD(buf->domainname, uts_buf.domainname);
321 #undef COPY_UTSNAME_FIELD
324 static int sys_getcwd1(char *buf, size_t size)
326 if (getcwd(buf, size) == NULL) {
327 /* getcwd() sets errno */
330 return strlen(buf)+1;
333 #ifdef TARGET_NR_openat
334 static int sys_openat(int dirfd, const char *pathname, int flags, mode_t mode)
337 * open(2) has extra parameter 'mode' when called with
340 if ((flags & O_CREAT) != 0) {
341 return (openat(dirfd, pathname, flags, mode));
343 return (openat(dirfd, pathname, flags));
347 #ifdef TARGET_NR_utimensat
348 #ifdef CONFIG_UTIMENSAT
349 static int sys_utimensat(int dirfd, const char *pathname,
350 const struct timespec times[2], int flags)
352 if (pathname == NULL)
353 return futimens(dirfd, times);
355 return utimensat(dirfd, pathname, times, flags);
357 #elif defined(__NR_utimensat)
358 #define __NR_sys_utimensat __NR_utimensat
359 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
360 const struct timespec *,tsp,int,flags)
362 static int sys_utimensat(int dirfd, const char *pathname,
363 const struct timespec times[2], int flags)
369 #endif /* TARGET_NR_utimensat */
371 #ifdef CONFIG_INOTIFY
372 #include <sys/inotify.h>
374 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
375 static int sys_inotify_init(void)
377 return (inotify_init());
380 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
381 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
383 return (inotify_add_watch(fd, pathname, mask));
386 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
387 static int sys_inotify_rm_watch(int fd, int32_t wd)
389 return (inotify_rm_watch(fd, wd));
392 #ifdef CONFIG_INOTIFY1
393 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
394 static int sys_inotify_init1(int flags)
396 return (inotify_init1(flags));
401 /* Userspace can usually survive runtime without inotify */
402 #undef TARGET_NR_inotify_init
403 #undef TARGET_NR_inotify_init1
404 #undef TARGET_NR_inotify_add_watch
405 #undef TARGET_NR_inotify_rm_watch
406 #endif /* CONFIG_INOTIFY */
408 #if defined(TARGET_NR_ppoll)
410 # define __NR_ppoll -1
412 #define __NR_sys_ppoll __NR_ppoll
413 _syscall5(int, sys_ppoll, struct pollfd *, fds, nfds_t, nfds,
414 struct timespec *, timeout, const sigset_t *, sigmask,
418 #if defined(TARGET_NR_pselect6)
419 #ifndef __NR_pselect6
420 # define __NR_pselect6 -1
422 #define __NR_sys_pselect6 __NR_pselect6
423 _syscall6(int, sys_pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds,
424 fd_set *, exceptfds, struct timespec *, timeout, void *, sig);
427 #if defined(TARGET_NR_prlimit64)
428 #ifndef __NR_prlimit64
429 # define __NR_prlimit64 -1
431 #define __NR_sys_prlimit64 __NR_prlimit64
432 /* The glibc rlimit structure may not be that used by the underlying syscall */
433 struct host_rlimit64 {
437 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
438 const struct host_rlimit64 *, new_limit,
439 struct host_rlimit64 *, old_limit)
443 #if defined(TARGET_NR_timer_create)
444 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
445 static timer_t g_posix_timers[32] = { 0, } ;
447 static inline int next_free_host_timer(void)
450 /* FIXME: Does finding the next free slot require a lock? */
451 for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
452 if (g_posix_timers[k] == 0) {
453 g_posix_timers[k] = (timer_t) 1;
461 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
463 static inline int regpairs_aligned(void *cpu_env) {
464 return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
466 #elif defined(TARGET_MIPS)
467 static inline int regpairs_aligned(void *cpu_env) { return 1; }
468 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
469 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
470 * of registers which translates to the same as ARM/MIPS, because we start with
472 static inline int regpairs_aligned(void *cpu_env) { return 1; }
474 static inline int regpairs_aligned(void *cpu_env) { return 0; }
477 #define ERRNO_TABLE_SIZE 1200
479 /* target_to_host_errno_table[] is initialized from
480 * host_to_target_errno_table[] in syscall_init(). */
481 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
485 * This list is the union of errno values overridden in asm-<arch>/errno.h
486 * minus the errnos that are not actually generic to all archs.
488 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
489 [EIDRM] = TARGET_EIDRM,
490 [ECHRNG] = TARGET_ECHRNG,
491 [EL2NSYNC] = TARGET_EL2NSYNC,
492 [EL3HLT] = TARGET_EL3HLT,
493 [EL3RST] = TARGET_EL3RST,
494 [ELNRNG] = TARGET_ELNRNG,
495 [EUNATCH] = TARGET_EUNATCH,
496 [ENOCSI] = TARGET_ENOCSI,
497 [EL2HLT] = TARGET_EL2HLT,
498 [EDEADLK] = TARGET_EDEADLK,
499 [ENOLCK] = TARGET_ENOLCK,
500 [EBADE] = TARGET_EBADE,
501 [EBADR] = TARGET_EBADR,
502 [EXFULL] = TARGET_EXFULL,
503 [ENOANO] = TARGET_ENOANO,
504 [EBADRQC] = TARGET_EBADRQC,
505 [EBADSLT] = TARGET_EBADSLT,
506 [EBFONT] = TARGET_EBFONT,
507 [ENOSTR] = TARGET_ENOSTR,
508 [ENODATA] = TARGET_ENODATA,
509 [ETIME] = TARGET_ETIME,
510 [ENOSR] = TARGET_ENOSR,
511 [ENONET] = TARGET_ENONET,
512 [ENOPKG] = TARGET_ENOPKG,
513 [EREMOTE] = TARGET_EREMOTE,
514 [ENOLINK] = TARGET_ENOLINK,
515 [EADV] = TARGET_EADV,
516 [ESRMNT] = TARGET_ESRMNT,
517 [ECOMM] = TARGET_ECOMM,
518 [EPROTO] = TARGET_EPROTO,
519 [EDOTDOT] = TARGET_EDOTDOT,
520 [EMULTIHOP] = TARGET_EMULTIHOP,
521 [EBADMSG] = TARGET_EBADMSG,
522 [ENAMETOOLONG] = TARGET_ENAMETOOLONG,
523 [EOVERFLOW] = TARGET_EOVERFLOW,
524 [ENOTUNIQ] = TARGET_ENOTUNIQ,
525 [EBADFD] = TARGET_EBADFD,
526 [EREMCHG] = TARGET_EREMCHG,
527 [ELIBACC] = TARGET_ELIBACC,
528 [ELIBBAD] = TARGET_ELIBBAD,
529 [ELIBSCN] = TARGET_ELIBSCN,
530 [ELIBMAX] = TARGET_ELIBMAX,
531 [ELIBEXEC] = TARGET_ELIBEXEC,
532 [EILSEQ] = TARGET_EILSEQ,
533 [ENOSYS] = TARGET_ENOSYS,
534 [ELOOP] = TARGET_ELOOP,
535 [ERESTART] = TARGET_ERESTART,
536 [ESTRPIPE] = TARGET_ESTRPIPE,
537 [ENOTEMPTY] = TARGET_ENOTEMPTY,
538 [EUSERS] = TARGET_EUSERS,
539 [ENOTSOCK] = TARGET_ENOTSOCK,
540 [EDESTADDRREQ] = TARGET_EDESTADDRREQ,
541 [EMSGSIZE] = TARGET_EMSGSIZE,
542 [EPROTOTYPE] = TARGET_EPROTOTYPE,
543 [ENOPROTOOPT] = TARGET_ENOPROTOOPT,
544 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT,
545 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT,
546 [EOPNOTSUPP] = TARGET_EOPNOTSUPP,
547 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT,
548 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT,
549 [EADDRINUSE] = TARGET_EADDRINUSE,
550 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL,
551 [ENETDOWN] = TARGET_ENETDOWN,
552 [ENETUNREACH] = TARGET_ENETUNREACH,
553 [ENETRESET] = TARGET_ENETRESET,
554 [ECONNABORTED] = TARGET_ECONNABORTED,
555 [ECONNRESET] = TARGET_ECONNRESET,
556 [ENOBUFS] = TARGET_ENOBUFS,
557 [EISCONN] = TARGET_EISCONN,
558 [ENOTCONN] = TARGET_ENOTCONN,
559 [EUCLEAN] = TARGET_EUCLEAN,
560 [ENOTNAM] = TARGET_ENOTNAM,
561 [ENAVAIL] = TARGET_ENAVAIL,
562 [EISNAM] = TARGET_EISNAM,
563 [EREMOTEIO] = TARGET_EREMOTEIO,
564 [ESHUTDOWN] = TARGET_ESHUTDOWN,
565 [ETOOMANYREFS] = TARGET_ETOOMANYREFS,
566 [ETIMEDOUT] = TARGET_ETIMEDOUT,
567 [ECONNREFUSED] = TARGET_ECONNREFUSED,
568 [EHOSTDOWN] = TARGET_EHOSTDOWN,
569 [EHOSTUNREACH] = TARGET_EHOSTUNREACH,
570 [EALREADY] = TARGET_EALREADY,
571 [EINPROGRESS] = TARGET_EINPROGRESS,
572 [ESTALE] = TARGET_ESTALE,
573 [ECANCELED] = TARGET_ECANCELED,
574 [ENOMEDIUM] = TARGET_ENOMEDIUM,
575 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE,
577 [ENOKEY] = TARGET_ENOKEY,
580 [EKEYEXPIRED] = TARGET_EKEYEXPIRED,
583 [EKEYREVOKED] = TARGET_EKEYREVOKED,
586 [EKEYREJECTED] = TARGET_EKEYREJECTED,
589 [EOWNERDEAD] = TARGET_EOWNERDEAD,
591 #ifdef ENOTRECOVERABLE
592 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE,
596 static inline int host_to_target_errno(int err)
598 if(host_to_target_errno_table[err])
599 return host_to_target_errno_table[err];
603 static inline int target_to_host_errno(int err)
605 if (target_to_host_errno_table[err])
606 return target_to_host_errno_table[err];
610 static inline abi_long get_errno(abi_long ret)
613 return -host_to_target_errno(errno);
618 static inline int is_error(abi_long ret)
620 return (abi_ulong)ret >= (abi_ulong)(-4096);
623 char *target_strerror(int err)
625 if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
628 return strerror(target_to_host_errno(err));
631 static abi_ulong target_brk;
632 static abi_ulong target_original_brk;
633 static abi_ulong brk_page;
635 void target_set_brk(abi_ulong new_brk)
637 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
638 brk_page = HOST_PAGE_ALIGN(target_brk);
641 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
642 #define DEBUGF_BRK(message, args...)
644 /* do_brk() must return target values and target errnos. */
645 abi_long do_brk(abi_ulong new_brk)
647 abi_long mapped_addr;
650 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
653 DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
656 if (new_brk < target_original_brk) {
657 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
662 /* If the new brk is less than the highest page reserved to the
663 * target heap allocation, set it and we're almost done... */
664 if (new_brk <= brk_page) {
665 /* Heap contents are initialized to zero, as for anonymous
667 if (new_brk > target_brk) {
668 memset(g2h(target_brk), 0, new_brk - target_brk);
670 target_brk = new_brk;
671 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
675 /* We need to allocate more memory after the brk... Note that
676 * we don't use MAP_FIXED because that will map over the top of
677 * any existing mapping (like the one with the host libc or qemu
678 * itself); instead we treat "mapped but at wrong address" as
679 * a failure and unmap again.
681 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
682 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
683 PROT_READ|PROT_WRITE,
684 MAP_ANON|MAP_PRIVATE, 0, 0));
686 if (mapped_addr == brk_page) {
687 /* Heap contents are initialized to zero, as for anonymous
688 * mapped pages. Technically the new pages are already
689 * initialized to zero since they *are* anonymous mapped
690 * pages, however we have to take care with the contents that
691 * come from the remaining part of the previous page: it may
692 * contains garbage data due to a previous heap usage (grown
694 memset(g2h(target_brk), 0, brk_page - target_brk);
696 target_brk = new_brk;
697 brk_page = HOST_PAGE_ALIGN(target_brk);
698 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
701 } else if (mapped_addr != -1) {
702 /* Mapped but at wrong address, meaning there wasn't actually
703 * enough space for this brk.
705 target_munmap(mapped_addr, new_alloc_size);
707 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
710 DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
713 #if defined(TARGET_ALPHA)
714 /* We (partially) emulate OSF/1 on Alpha, which requires we
715 return a proper errno, not an unchanged brk value. */
716 return -TARGET_ENOMEM;
718 /* For everything else, return the previous break. */
722 static inline abi_long copy_from_user_fdset(fd_set *fds,
723 abi_ulong target_fds_addr,
727 abi_ulong b, *target_fds;
729 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
730 if (!(target_fds = lock_user(VERIFY_READ,
732 sizeof(abi_ulong) * nw,
734 return -TARGET_EFAULT;
738 for (i = 0; i < nw; i++) {
739 /* grab the abi_ulong */
740 __get_user(b, &target_fds[i]);
741 for (j = 0; j < TARGET_ABI_BITS; j++) {
742 /* check the bit inside the abi_ulong */
749 unlock_user(target_fds, target_fds_addr, 0);
754 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
755 abi_ulong target_fds_addr,
758 if (target_fds_addr) {
759 if (copy_from_user_fdset(fds, target_fds_addr, n))
760 return -TARGET_EFAULT;
768 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
774 abi_ulong *target_fds;
776 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
777 if (!(target_fds = lock_user(VERIFY_WRITE,
779 sizeof(abi_ulong) * nw,
781 return -TARGET_EFAULT;
784 for (i = 0; i < nw; i++) {
786 for (j = 0; j < TARGET_ABI_BITS; j++) {
787 v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
790 __put_user(v, &target_fds[i]);
793 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
798 #if defined(__alpha__)
804 static inline abi_long host_to_target_clock_t(long ticks)
806 #if HOST_HZ == TARGET_HZ
809 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
813 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
814 const struct rusage *rusage)
816 struct target_rusage *target_rusage;
818 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
819 return -TARGET_EFAULT;
820 target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
821 target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
822 target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
823 target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
824 target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
825 target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
826 target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
827 target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
828 target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
829 target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
830 target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
831 target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
832 target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
833 target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
834 target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
835 target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
836 target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
837 target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
838 unlock_user_struct(target_rusage, target_addr, 1);
843 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
845 abi_ulong target_rlim_swap;
848 target_rlim_swap = tswapal(target_rlim);
849 if (target_rlim_swap == TARGET_RLIM_INFINITY)
850 return RLIM_INFINITY;
852 result = target_rlim_swap;
853 if (target_rlim_swap != (rlim_t)result)
854 return RLIM_INFINITY;
859 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
861 abi_ulong target_rlim_swap;
864 if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
865 target_rlim_swap = TARGET_RLIM_INFINITY;
867 target_rlim_swap = rlim;
868 result = tswapal(target_rlim_swap);
873 static inline int target_to_host_resource(int code)
876 case TARGET_RLIMIT_AS:
878 case TARGET_RLIMIT_CORE:
880 case TARGET_RLIMIT_CPU:
882 case TARGET_RLIMIT_DATA:
884 case TARGET_RLIMIT_FSIZE:
886 case TARGET_RLIMIT_LOCKS:
888 case TARGET_RLIMIT_MEMLOCK:
889 return RLIMIT_MEMLOCK;
890 case TARGET_RLIMIT_MSGQUEUE:
891 return RLIMIT_MSGQUEUE;
892 case TARGET_RLIMIT_NICE:
894 case TARGET_RLIMIT_NOFILE:
895 return RLIMIT_NOFILE;
896 case TARGET_RLIMIT_NPROC:
898 case TARGET_RLIMIT_RSS:
900 case TARGET_RLIMIT_RTPRIO:
901 return RLIMIT_RTPRIO;
902 case TARGET_RLIMIT_SIGPENDING:
903 return RLIMIT_SIGPENDING;
904 case TARGET_RLIMIT_STACK:
911 static inline abi_long copy_from_user_timeval(struct timeval *tv,
912 abi_ulong target_tv_addr)
914 struct target_timeval *target_tv;
916 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1))
917 return -TARGET_EFAULT;
919 __get_user(tv->tv_sec, &target_tv->tv_sec);
920 __get_user(tv->tv_usec, &target_tv->tv_usec);
922 unlock_user_struct(target_tv, target_tv_addr, 0);
927 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
928 const struct timeval *tv)
930 struct target_timeval *target_tv;
932 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0))
933 return -TARGET_EFAULT;
935 __put_user(tv->tv_sec, &target_tv->tv_sec);
936 __put_user(tv->tv_usec, &target_tv->tv_usec);
938 unlock_user_struct(target_tv, target_tv_addr, 1);
943 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
946 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
947 abi_ulong target_mq_attr_addr)
949 struct target_mq_attr *target_mq_attr;
951 if (!lock_user_struct(VERIFY_READ, target_mq_attr,
952 target_mq_attr_addr, 1))
953 return -TARGET_EFAULT;
955 __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
956 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
957 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
958 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
960 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
965 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
966 const struct mq_attr *attr)
968 struct target_mq_attr *target_mq_attr;
970 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
971 target_mq_attr_addr, 0))
972 return -TARGET_EFAULT;
974 __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
975 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
976 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
977 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
979 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
985 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
986 /* do_select() must return target values and target errnos. */
987 static abi_long do_select(int n,
988 abi_ulong rfd_addr, abi_ulong wfd_addr,
989 abi_ulong efd_addr, abi_ulong target_tv_addr)
991 fd_set rfds, wfds, efds;
992 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
993 struct timeval tv, *tv_ptr;
996 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1000 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1004 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1009 if (target_tv_addr) {
1010 if (copy_from_user_timeval(&tv, target_tv_addr))
1011 return -TARGET_EFAULT;
1017 ret = get_errno(select(n, rfds_ptr, wfds_ptr, efds_ptr, tv_ptr));
1019 if (!is_error(ret)) {
1020 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1021 return -TARGET_EFAULT;
1022 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1023 return -TARGET_EFAULT;
1024 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1025 return -TARGET_EFAULT;
1027 if (target_tv_addr && copy_to_user_timeval(target_tv_addr, &tv))
1028 return -TARGET_EFAULT;
1035 static abi_long do_pipe2(int host_pipe[], int flags)
1038 return pipe2(host_pipe, flags);
1044 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1045 int flags, int is_pipe2)
1049 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1052 return get_errno(ret);
1054 /* Several targets have special calling conventions for the original
1055 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1057 #if defined(TARGET_ALPHA)
1058 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1059 return host_pipe[0];
1060 #elif defined(TARGET_MIPS)
1061 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1062 return host_pipe[0];
1063 #elif defined(TARGET_SH4)
1064 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1065 return host_pipe[0];
1066 #elif defined(TARGET_SPARC)
1067 ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1068 return host_pipe[0];
1072 if (put_user_s32(host_pipe[0], pipedes)
1073 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1074 return -TARGET_EFAULT;
1075 return get_errno(ret);
1078 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1079 abi_ulong target_addr,
1082 struct target_ip_mreqn *target_smreqn;
1084 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1086 return -TARGET_EFAULT;
1087 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1088 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1089 if (len == sizeof(struct target_ip_mreqn))
1090 mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1091 unlock_user(target_smreqn, target_addr, 0);
1096 static inline abi_long target_to_host_sockaddr(struct sockaddr *addr,
1097 abi_ulong target_addr,
1100 const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1101 sa_family_t sa_family;
1102 struct target_sockaddr *target_saddr;
1104 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1106 return -TARGET_EFAULT;
1108 sa_family = tswap16(target_saddr->sa_family);
1110 /* Oops. The caller might send a incomplete sun_path; sun_path
1111 * must be terminated by \0 (see the manual page), but
1112 * unfortunately it is quite common to specify sockaddr_un
1113 * length as "strlen(x->sun_path)" while it should be
1114 * "strlen(...) + 1". We'll fix that here if needed.
1115 * Linux kernel has a similar feature.
1118 if (sa_family == AF_UNIX) {
1119 if (len < unix_maxlen && len > 0) {
1120 char *cp = (char*)target_saddr;
1122 if ( cp[len-1] && !cp[len] )
1125 if (len > unix_maxlen)
1129 memcpy(addr, target_saddr, len);
1130 addr->sa_family = sa_family;
1131 unlock_user(target_saddr, target_addr, 0);
1136 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1137 struct sockaddr *addr,
1140 struct target_sockaddr *target_saddr;
1142 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1144 return -TARGET_EFAULT;
1145 memcpy(target_saddr, addr, len);
1146 target_saddr->sa_family = tswap16(addr->sa_family);
1147 unlock_user(target_saddr, target_addr, len);
1152 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1153 struct target_msghdr *target_msgh)
1155 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1156 abi_long msg_controllen;
1157 abi_ulong target_cmsg_addr;
1158 struct target_cmsghdr *target_cmsg;
1159 socklen_t space = 0;
1161 msg_controllen = tswapal(target_msgh->msg_controllen);
1162 if (msg_controllen < sizeof (struct target_cmsghdr))
1164 target_cmsg_addr = tswapal(target_msgh->msg_control);
1165 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1167 return -TARGET_EFAULT;
1169 while (cmsg && target_cmsg) {
1170 void *data = CMSG_DATA(cmsg);
1171 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1173 int len = tswapal(target_cmsg->cmsg_len)
1174 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr));
1176 space += CMSG_SPACE(len);
1177 if (space > msgh->msg_controllen) {
1178 space -= CMSG_SPACE(len);
1179 gemu_log("Host cmsg overflow\n");
1183 if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1184 cmsg->cmsg_level = SOL_SOCKET;
1186 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1188 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1189 cmsg->cmsg_len = CMSG_LEN(len);
1191 if (cmsg->cmsg_level != SOL_SOCKET || cmsg->cmsg_type != SCM_RIGHTS) {
1192 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg->cmsg_level, cmsg->cmsg_type);
1193 memcpy(data, target_data, len);
1195 int *fd = (int *)data;
1196 int *target_fd = (int *)target_data;
1197 int i, numfds = len / sizeof(int);
1199 for (i = 0; i < numfds; i++)
1200 fd[i] = tswap32(target_fd[i]);
1203 cmsg = CMSG_NXTHDR(msgh, cmsg);
1204 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg);
1206 unlock_user(target_cmsg, target_cmsg_addr, 0);
1208 msgh->msg_controllen = space;
1212 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1213 struct msghdr *msgh)
1215 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1216 abi_long msg_controllen;
1217 abi_ulong target_cmsg_addr;
1218 struct target_cmsghdr *target_cmsg;
1219 socklen_t space = 0;
1221 msg_controllen = tswapal(target_msgh->msg_controllen);
1222 if (msg_controllen < sizeof (struct target_cmsghdr))
1224 target_cmsg_addr = tswapal(target_msgh->msg_control);
1225 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1227 return -TARGET_EFAULT;
1229 while (cmsg && target_cmsg) {
1230 void *data = CMSG_DATA(cmsg);
1231 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1233 int len = cmsg->cmsg_len - CMSG_ALIGN(sizeof (struct cmsghdr));
1235 space += TARGET_CMSG_SPACE(len);
1236 if (space > msg_controllen) {
1237 space -= TARGET_CMSG_SPACE(len);
1238 gemu_log("Target cmsg overflow\n");
1242 if (cmsg->cmsg_level == SOL_SOCKET) {
1243 target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1245 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1247 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1248 target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(len));
1250 switch (cmsg->cmsg_level) {
1252 switch (cmsg->cmsg_type) {
1255 int *fd = (int *)data;
1256 int *target_fd = (int *)target_data;
1257 int i, numfds = len / sizeof(int);
1259 for (i = 0; i < numfds; i++)
1260 target_fd[i] = tswap32(fd[i]);
1265 struct timeval *tv = (struct timeval *)data;
1266 struct target_timeval *target_tv =
1267 (struct target_timeval *)target_data;
1269 if (len != sizeof(struct timeval))
1272 /* copy struct timeval to target */
1273 target_tv->tv_sec = tswapal(tv->tv_sec);
1274 target_tv->tv_usec = tswapal(tv->tv_usec);
1277 case SCM_CREDENTIALS:
1279 struct ucred *cred = (struct ucred *)data;
1280 struct target_ucred *target_cred =
1281 (struct target_ucred *)target_data;
1283 __put_user(cred->pid, &target_cred->pid);
1284 __put_user(cred->uid, &target_cred->uid);
1285 __put_user(cred->gid, &target_cred->gid);
1295 gemu_log("Unsupported ancillary data: %d/%d\n",
1296 cmsg->cmsg_level, cmsg->cmsg_type);
1297 memcpy(target_data, data, len);
1300 cmsg = CMSG_NXTHDR(msgh, cmsg);
1301 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg);
1303 unlock_user(target_cmsg, target_cmsg_addr, space);
1305 target_msgh->msg_controllen = tswapal(space);
1309 /* do_setsockopt() Must return target values and target errnos. */
1310 static abi_long do_setsockopt(int sockfd, int level, int optname,
1311 abi_ulong optval_addr, socklen_t optlen)
1315 struct ip_mreqn *ip_mreq;
1316 struct ip_mreq_source *ip_mreq_source;
1320 /* TCP options all take an 'int' value. */
1321 if (optlen < sizeof(uint32_t))
1322 return -TARGET_EINVAL;
1324 if (get_user_u32(val, optval_addr))
1325 return -TARGET_EFAULT;
1326 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1333 case IP_ROUTER_ALERT:
1337 case IP_MTU_DISCOVER:
1343 case IP_MULTICAST_TTL:
1344 case IP_MULTICAST_LOOP:
1346 if (optlen >= sizeof(uint32_t)) {
1347 if (get_user_u32(val, optval_addr))
1348 return -TARGET_EFAULT;
1349 } else if (optlen >= 1) {
1350 if (get_user_u8(val, optval_addr))
1351 return -TARGET_EFAULT;
1353 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1355 case IP_ADD_MEMBERSHIP:
1356 case IP_DROP_MEMBERSHIP:
1357 if (optlen < sizeof (struct target_ip_mreq) ||
1358 optlen > sizeof (struct target_ip_mreqn))
1359 return -TARGET_EINVAL;
1361 ip_mreq = (struct ip_mreqn *) alloca(optlen);
1362 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
1363 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
1366 case IP_BLOCK_SOURCE:
1367 case IP_UNBLOCK_SOURCE:
1368 case IP_ADD_SOURCE_MEMBERSHIP:
1369 case IP_DROP_SOURCE_MEMBERSHIP:
1370 if (optlen != sizeof (struct target_ip_mreq_source))
1371 return -TARGET_EINVAL;
1373 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
1374 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
1375 unlock_user (ip_mreq_source, optval_addr, 0);
1384 case IPV6_MTU_DISCOVER:
1387 case IPV6_RECVPKTINFO:
1389 if (optlen < sizeof(uint32_t)) {
1390 return -TARGET_EINVAL;
1392 if (get_user_u32(val, optval_addr)) {
1393 return -TARGET_EFAULT;
1395 ret = get_errno(setsockopt(sockfd, level, optname,
1396 &val, sizeof(val)));
1405 /* struct icmp_filter takes an u32 value */
1406 if (optlen < sizeof(uint32_t)) {
1407 return -TARGET_EINVAL;
1410 if (get_user_u32(val, optval_addr)) {
1411 return -TARGET_EFAULT;
1413 ret = get_errno(setsockopt(sockfd, level, optname,
1414 &val, sizeof(val)));
1421 case TARGET_SOL_SOCKET:
1423 case TARGET_SO_RCVTIMEO:
1427 optname = SO_RCVTIMEO;
1430 if (optlen != sizeof(struct target_timeval)) {
1431 return -TARGET_EINVAL;
1434 if (copy_from_user_timeval(&tv, optval_addr)) {
1435 return -TARGET_EFAULT;
1438 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
1442 case TARGET_SO_SNDTIMEO:
1443 optname = SO_SNDTIMEO;
1445 case TARGET_SO_ATTACH_FILTER:
1447 struct target_sock_fprog *tfprog;
1448 struct target_sock_filter *tfilter;
1449 struct sock_fprog fprog;
1450 struct sock_filter *filter;
1453 if (optlen != sizeof(*tfprog)) {
1454 return -TARGET_EINVAL;
1456 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
1457 return -TARGET_EFAULT;
1459 if (!lock_user_struct(VERIFY_READ, tfilter,
1460 tswapal(tfprog->filter), 0)) {
1461 unlock_user_struct(tfprog, optval_addr, 1);
1462 return -TARGET_EFAULT;
1465 fprog.len = tswap16(tfprog->len);
1466 filter = malloc(fprog.len * sizeof(*filter));
1467 if (filter == NULL) {
1468 unlock_user_struct(tfilter, tfprog->filter, 1);
1469 unlock_user_struct(tfprog, optval_addr, 1);
1470 return -TARGET_ENOMEM;
1472 for (i = 0; i < fprog.len; i++) {
1473 filter[i].code = tswap16(tfilter[i].code);
1474 filter[i].jt = tfilter[i].jt;
1475 filter[i].jf = tfilter[i].jf;
1476 filter[i].k = tswap32(tfilter[i].k);
1478 fprog.filter = filter;
1480 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
1481 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
1484 unlock_user_struct(tfilter, tfprog->filter, 1);
1485 unlock_user_struct(tfprog, optval_addr, 1);
1488 /* Options with 'int' argument. */
1489 case TARGET_SO_DEBUG:
1492 case TARGET_SO_REUSEADDR:
1493 optname = SO_REUSEADDR;
1495 case TARGET_SO_TYPE:
1498 case TARGET_SO_ERROR:
1501 case TARGET_SO_DONTROUTE:
1502 optname = SO_DONTROUTE;
1504 case TARGET_SO_BROADCAST:
1505 optname = SO_BROADCAST;
1507 case TARGET_SO_SNDBUF:
1508 optname = SO_SNDBUF;
1510 case TARGET_SO_RCVBUF:
1511 optname = SO_RCVBUF;
1513 case TARGET_SO_KEEPALIVE:
1514 optname = SO_KEEPALIVE;
1516 case TARGET_SO_OOBINLINE:
1517 optname = SO_OOBINLINE;
1519 case TARGET_SO_NO_CHECK:
1520 optname = SO_NO_CHECK;
1522 case TARGET_SO_PRIORITY:
1523 optname = SO_PRIORITY;
1526 case TARGET_SO_BSDCOMPAT:
1527 optname = SO_BSDCOMPAT;
1530 case TARGET_SO_PASSCRED:
1531 optname = SO_PASSCRED;
1533 case TARGET_SO_TIMESTAMP:
1534 optname = SO_TIMESTAMP;
1536 case TARGET_SO_RCVLOWAT:
1537 optname = SO_RCVLOWAT;
1543 if (optlen < sizeof(uint32_t))
1544 return -TARGET_EINVAL;
1546 if (get_user_u32(val, optval_addr))
1547 return -TARGET_EFAULT;
1548 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
1552 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname);
1553 ret = -TARGET_ENOPROTOOPT;
1558 /* do_getsockopt() Must return target values and target errnos. */
1559 static abi_long do_getsockopt(int sockfd, int level, int optname,
1560 abi_ulong optval_addr, abi_ulong optlen)
1567 case TARGET_SOL_SOCKET:
1570 /* These don't just return a single integer */
1571 case TARGET_SO_LINGER:
1572 case TARGET_SO_RCVTIMEO:
1573 case TARGET_SO_SNDTIMEO:
1574 case TARGET_SO_PEERNAME:
1576 case TARGET_SO_PEERCRED: {
1579 struct target_ucred *tcr;
1581 if (get_user_u32(len, optlen)) {
1582 return -TARGET_EFAULT;
1585 return -TARGET_EINVAL;
1589 ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
1597 if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
1598 return -TARGET_EFAULT;
1600 __put_user(cr.pid, &tcr->pid);
1601 __put_user(cr.uid, &tcr->uid);
1602 __put_user(cr.gid, &tcr->gid);
1603 unlock_user_struct(tcr, optval_addr, 1);
1604 if (put_user_u32(len, optlen)) {
1605 return -TARGET_EFAULT;
1609 /* Options with 'int' argument. */
1610 case TARGET_SO_DEBUG:
1613 case TARGET_SO_REUSEADDR:
1614 optname = SO_REUSEADDR;
1616 case TARGET_SO_TYPE:
1619 case TARGET_SO_ERROR:
1622 case TARGET_SO_DONTROUTE:
1623 optname = SO_DONTROUTE;
1625 case TARGET_SO_BROADCAST:
1626 optname = SO_BROADCAST;
1628 case TARGET_SO_SNDBUF:
1629 optname = SO_SNDBUF;
1631 case TARGET_SO_RCVBUF:
1632 optname = SO_RCVBUF;
1634 case TARGET_SO_KEEPALIVE:
1635 optname = SO_KEEPALIVE;
1637 case TARGET_SO_OOBINLINE:
1638 optname = SO_OOBINLINE;
1640 case TARGET_SO_NO_CHECK:
1641 optname = SO_NO_CHECK;
1643 case TARGET_SO_PRIORITY:
1644 optname = SO_PRIORITY;
1647 case TARGET_SO_BSDCOMPAT:
1648 optname = SO_BSDCOMPAT;
1651 case TARGET_SO_PASSCRED:
1652 optname = SO_PASSCRED;
1654 case TARGET_SO_TIMESTAMP:
1655 optname = SO_TIMESTAMP;
1657 case TARGET_SO_RCVLOWAT:
1658 optname = SO_RCVLOWAT;
1665 /* TCP options all take an 'int' value. */
1667 if (get_user_u32(len, optlen))
1668 return -TARGET_EFAULT;
1670 return -TARGET_EINVAL;
1672 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
1678 if (put_user_u32(val, optval_addr))
1679 return -TARGET_EFAULT;
1681 if (put_user_u8(val, optval_addr))
1682 return -TARGET_EFAULT;
1684 if (put_user_u32(len, optlen))
1685 return -TARGET_EFAULT;
1692 case IP_ROUTER_ALERT:
1696 case IP_MTU_DISCOVER:
1702 case IP_MULTICAST_TTL:
1703 case IP_MULTICAST_LOOP:
1704 if (get_user_u32(len, optlen))
1705 return -TARGET_EFAULT;
1707 return -TARGET_EINVAL;
1709 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
1712 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
1714 if (put_user_u32(len, optlen)
1715 || put_user_u8(val, optval_addr))
1716 return -TARGET_EFAULT;
1718 if (len > sizeof(int))
1720 if (put_user_u32(len, optlen)
1721 || put_user_u32(val, optval_addr))
1722 return -TARGET_EFAULT;
1726 ret = -TARGET_ENOPROTOOPT;
1732 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
1734 ret = -TARGET_EOPNOTSUPP;
1740 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
1741 int count, int copy)
1743 struct target_iovec *target_vec;
1745 abi_ulong total_len, max_len;
1753 if (count < 0 || count > IOV_MAX) {
1758 vec = calloc(count, sizeof(struct iovec));
1764 target_vec = lock_user(VERIFY_READ, target_addr,
1765 count * sizeof(struct target_iovec), 1);
1766 if (target_vec == NULL) {
1771 /* ??? If host page size > target page size, this will result in a
1772 value larger than what we can actually support. */
1773 max_len = 0x7fffffff & TARGET_PAGE_MASK;
1776 for (i = 0; i < count; i++) {
1777 abi_ulong base = tswapal(target_vec[i].iov_base);
1778 abi_long len = tswapal(target_vec[i].iov_len);
1783 } else if (len == 0) {
1784 /* Zero length pointer is ignored. */
1785 vec[i].iov_base = 0;
1787 vec[i].iov_base = lock_user(type, base, len, copy);
1788 if (!vec[i].iov_base) {
1792 if (len > max_len - total_len) {
1793 len = max_len - total_len;
1796 vec[i].iov_len = len;
1800 unlock_user(target_vec, target_addr, 0);
1804 unlock_user(target_vec, target_addr, 0);
1811 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
1812 int count, int copy)
1814 struct target_iovec *target_vec;
1817 target_vec = lock_user(VERIFY_READ, target_addr,
1818 count * sizeof(struct target_iovec), 1);
1820 for (i = 0; i < count; i++) {
1821 abi_ulong base = tswapal(target_vec[i].iov_base);
1822 abi_long len = tswapal(target_vec[i].iov_base);
1826 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
1828 unlock_user(target_vec, target_addr, 0);
1834 static inline int target_to_host_sock_type(int *type)
1837 int target_type = *type;
1839 switch (target_type & TARGET_SOCK_TYPE_MASK) {
1840 case TARGET_SOCK_DGRAM:
1841 host_type = SOCK_DGRAM;
1843 case TARGET_SOCK_STREAM:
1844 host_type = SOCK_STREAM;
1847 host_type = target_type & TARGET_SOCK_TYPE_MASK;
1850 if (target_type & TARGET_SOCK_CLOEXEC) {
1851 #if defined(SOCK_CLOEXEC)
1852 host_type |= SOCK_CLOEXEC;
1854 return -TARGET_EINVAL;
1857 if (target_type & TARGET_SOCK_NONBLOCK) {
1858 #if defined(SOCK_NONBLOCK)
1859 host_type |= SOCK_NONBLOCK;
1860 #elif !defined(O_NONBLOCK)
1861 return -TARGET_EINVAL;
1868 /* Try to emulate socket type flags after socket creation. */
1869 static int sock_flags_fixup(int fd, int target_type)
1871 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
1872 if (target_type & TARGET_SOCK_NONBLOCK) {
1873 int flags = fcntl(fd, F_GETFL);
1874 if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
1876 return -TARGET_EINVAL;
1883 /* do_socket() Must return target values and target errnos. */
1884 static abi_long do_socket(int domain, int type, int protocol)
1886 int target_type = type;
1889 ret = target_to_host_sock_type(&type);
1894 if (domain == PF_NETLINK)
1895 return -EAFNOSUPPORT; /* do not NETLINK socket connections possible */
1896 ret = get_errno(socket(domain, type, protocol));
1898 ret = sock_flags_fixup(ret, target_type);
1903 /* do_bind() Must return target values and target errnos. */
1904 static abi_long do_bind(int sockfd, abi_ulong target_addr,
1910 if ((int)addrlen < 0) {
1911 return -TARGET_EINVAL;
1914 addr = alloca(addrlen+1);
1916 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
1920 return get_errno(bind(sockfd, addr, addrlen));
1923 /* do_connect() Must return target values and target errnos. */
1924 static abi_long do_connect(int sockfd, abi_ulong target_addr,
1930 if ((int)addrlen < 0) {
1931 return -TARGET_EINVAL;
1934 addr = alloca(addrlen);
1936 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
1940 return get_errno(connect(sockfd, addr, addrlen));
1943 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
1944 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
1945 int flags, int send)
1951 abi_ulong target_vec;
1953 if (msgp->msg_name) {
1954 msg.msg_namelen = tswap32(msgp->msg_namelen);
1955 msg.msg_name = alloca(msg.msg_namelen);
1956 ret = target_to_host_sockaddr(msg.msg_name, tswapal(msgp->msg_name),
1962 msg.msg_name = NULL;
1963 msg.msg_namelen = 0;
1965 msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
1966 msg.msg_control = alloca(msg.msg_controllen);
1967 msg.msg_flags = tswap32(msgp->msg_flags);
1969 count = tswapal(msgp->msg_iovlen);
1970 target_vec = tswapal(msgp->msg_iov);
1971 vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
1972 target_vec, count, send);
1974 ret = -host_to_target_errno(errno);
1977 msg.msg_iovlen = count;
1981 ret = target_to_host_cmsg(&msg, msgp);
1983 ret = get_errno(sendmsg(fd, &msg, flags));
1985 ret = get_errno(recvmsg(fd, &msg, flags));
1986 if (!is_error(ret)) {
1988 ret = host_to_target_cmsg(msgp, &msg);
1989 if (!is_error(ret)) {
1990 msgp->msg_namelen = tswap32(msg.msg_namelen);
1991 if (msg.msg_name != NULL) {
1992 ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
1993 msg.msg_name, msg.msg_namelen);
2005 unlock_iovec(vec, target_vec, count, !send);
2010 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
2011 int flags, int send)
2014 struct target_msghdr *msgp;
2016 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
2020 return -TARGET_EFAULT;
2022 ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
2023 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
2027 #ifdef TARGET_NR_sendmmsg
2028 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
2029 * so it might not have this *mmsg-specific flag either.
2031 #ifndef MSG_WAITFORONE
2032 #define MSG_WAITFORONE 0x10000
2035 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
2036 unsigned int vlen, unsigned int flags,
2039 struct target_mmsghdr *mmsgp;
2043 if (vlen > UIO_MAXIOV) {
2047 mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
2049 return -TARGET_EFAULT;
2052 for (i = 0; i < vlen; i++) {
2053 ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
2054 if (is_error(ret)) {
2057 mmsgp[i].msg_len = tswap32(ret);
2058 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
2059 if (flags & MSG_WAITFORONE) {
2060 flags |= MSG_DONTWAIT;
2064 unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
2066 /* Return number of datagrams sent if we sent any at all;
2067 * otherwise return the error.
2076 /* If we don't have a system accept4() then just call accept.
2077 * The callsites to do_accept4() will ensure that they don't
2078 * pass a non-zero flags argument in this config.
2080 #ifndef CONFIG_ACCEPT4
2081 static inline int accept4(int sockfd, struct sockaddr *addr,
2082 socklen_t *addrlen, int flags)
2085 return accept(sockfd, addr, addrlen);
2089 /* do_accept4() Must return target values and target errnos. */
2090 static abi_long do_accept4(int fd, abi_ulong target_addr,
2091 abi_ulong target_addrlen_addr, int flags)
2098 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
2100 if (target_addr == 0) {
2101 return get_errno(accept4(fd, NULL, NULL, host_flags));
2104 /* linux returns EINVAL if addrlen pointer is invalid */
2105 if (get_user_u32(addrlen, target_addrlen_addr))
2106 return -TARGET_EINVAL;
2108 if ((int)addrlen < 0) {
2109 return -TARGET_EINVAL;
2112 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2113 return -TARGET_EINVAL;
2115 addr = alloca(addrlen);
2117 ret = get_errno(accept4(fd, addr, &addrlen, host_flags));
2118 if (!is_error(ret)) {
2119 host_to_target_sockaddr(target_addr, addr, addrlen);
2120 if (put_user_u32(addrlen, target_addrlen_addr))
2121 ret = -TARGET_EFAULT;
2126 /* do_getpeername() Must return target values and target errnos. */
2127 static abi_long do_getpeername(int fd, abi_ulong target_addr,
2128 abi_ulong target_addrlen_addr)
2134 if (get_user_u32(addrlen, target_addrlen_addr))
2135 return -TARGET_EFAULT;
2137 if ((int)addrlen < 0) {
2138 return -TARGET_EINVAL;
2141 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2142 return -TARGET_EFAULT;
2144 addr = alloca(addrlen);
2146 ret = get_errno(getpeername(fd, addr, &addrlen));
2147 if (!is_error(ret)) {
2148 host_to_target_sockaddr(target_addr, addr, addrlen);
2149 if (put_user_u32(addrlen, target_addrlen_addr))
2150 ret = -TARGET_EFAULT;
2155 /* do_getsockname() Must return target values and target errnos. */
2156 static abi_long do_getsockname(int fd, abi_ulong target_addr,
2157 abi_ulong target_addrlen_addr)
2163 if (get_user_u32(addrlen, target_addrlen_addr))
2164 return -TARGET_EFAULT;
2166 if ((int)addrlen < 0) {
2167 return -TARGET_EINVAL;
2170 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2171 return -TARGET_EFAULT;
2173 addr = alloca(addrlen);
2175 ret = get_errno(getsockname(fd, addr, &addrlen));
2176 if (!is_error(ret)) {
2177 host_to_target_sockaddr(target_addr, addr, addrlen);
2178 if (put_user_u32(addrlen, target_addrlen_addr))
2179 ret = -TARGET_EFAULT;
2184 /* do_socketpair() Must return target values and target errnos. */
2185 static abi_long do_socketpair(int domain, int type, int protocol,
2186 abi_ulong target_tab_addr)
2191 target_to_host_sock_type(&type);
2193 ret = get_errno(socketpair(domain, type, protocol, tab));
2194 if (!is_error(ret)) {
2195 if (put_user_s32(tab[0], target_tab_addr)
2196 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
2197 ret = -TARGET_EFAULT;
2202 /* do_sendto() Must return target values and target errnos. */
2203 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
2204 abi_ulong target_addr, socklen_t addrlen)
2210 if ((int)addrlen < 0) {
2211 return -TARGET_EINVAL;
2214 host_msg = lock_user(VERIFY_READ, msg, len, 1);
2216 return -TARGET_EFAULT;
2218 addr = alloca(addrlen);
2219 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
2221 unlock_user(host_msg, msg, 0);
2224 ret = get_errno(sendto(fd, host_msg, len, flags, addr, addrlen));
2226 ret = get_errno(send(fd, host_msg, len, flags));
2228 unlock_user(host_msg, msg, 0);
2232 /* do_recvfrom() Must return target values and target errnos. */
2233 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
2234 abi_ulong target_addr,
2235 abi_ulong target_addrlen)
2242 host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
2244 return -TARGET_EFAULT;
2246 if (get_user_u32(addrlen, target_addrlen)) {
2247 ret = -TARGET_EFAULT;
2250 if ((int)addrlen < 0) {
2251 ret = -TARGET_EINVAL;
2254 addr = alloca(addrlen);
2255 ret = get_errno(recvfrom(fd, host_msg, len, flags, addr, &addrlen));
2257 addr = NULL; /* To keep compiler quiet. */
2258 ret = get_errno(qemu_recv(fd, host_msg, len, flags));
2260 if (!is_error(ret)) {
2262 host_to_target_sockaddr(target_addr, addr, addrlen);
2263 if (put_user_u32(addrlen, target_addrlen)) {
2264 ret = -TARGET_EFAULT;
2268 unlock_user(host_msg, msg, len);
2271 unlock_user(host_msg, msg, 0);
2276 #ifdef TARGET_NR_socketcall
2277 /* do_socketcall() Must return target values and target errnos. */
2278 static abi_long do_socketcall(int num, abi_ulong vptr)
2280 static const unsigned ac[] = { /* number of arguments per call */
2281 [SOCKOP_socket] = 3, /* domain, type, protocol */
2282 [SOCKOP_bind] = 3, /* sockfd, addr, addrlen */
2283 [SOCKOP_connect] = 3, /* sockfd, addr, addrlen */
2284 [SOCKOP_listen] = 2, /* sockfd, backlog */
2285 [SOCKOP_accept] = 3, /* sockfd, addr, addrlen */
2286 [SOCKOP_accept4] = 4, /* sockfd, addr, addrlen, flags */
2287 [SOCKOP_getsockname] = 3, /* sockfd, addr, addrlen */
2288 [SOCKOP_getpeername] = 3, /* sockfd, addr, addrlen */
2289 [SOCKOP_socketpair] = 4, /* domain, type, protocol, tab */
2290 [SOCKOP_send] = 4, /* sockfd, msg, len, flags */
2291 [SOCKOP_recv] = 4, /* sockfd, msg, len, flags */
2292 [SOCKOP_sendto] = 6, /* sockfd, msg, len, flags, addr, addrlen */
2293 [SOCKOP_recvfrom] = 6, /* sockfd, msg, len, flags, addr, addrlen */
2294 [SOCKOP_shutdown] = 2, /* sockfd, how */
2295 [SOCKOP_sendmsg] = 3, /* sockfd, msg, flags */
2296 [SOCKOP_recvmsg] = 3, /* sockfd, msg, flags */
2297 [SOCKOP_setsockopt] = 5, /* sockfd, level, optname, optval, optlen */
2298 [SOCKOP_getsockopt] = 5, /* sockfd, level, optname, optval, optlen */
2300 abi_long a[6]; /* max 6 args */
2302 /* first, collect the arguments in a[] according to ac[] */
2303 if (num >= 0 && num < ARRAY_SIZE(ac)) {
2305 assert(ARRAY_SIZE(a) >= ac[num]); /* ensure we have space for args */
2306 for (i = 0; i < ac[num]; ++i) {
2307 if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
2308 return -TARGET_EFAULT;
2313 /* now when we have the args, actually handle the call */
2315 case SOCKOP_socket: /* domain, type, protocol */
2316 return do_socket(a[0], a[1], a[2]);
2317 case SOCKOP_bind: /* sockfd, addr, addrlen */
2318 return do_bind(a[0], a[1], a[2]);
2319 case SOCKOP_connect: /* sockfd, addr, addrlen */
2320 return do_connect(a[0], a[1], a[2]);
2321 case SOCKOP_listen: /* sockfd, backlog */
2322 return get_errno(listen(a[0], a[1]));
2323 case SOCKOP_accept: /* sockfd, addr, addrlen */
2324 return do_accept4(a[0], a[1], a[2], 0);
2325 case SOCKOP_accept4: /* sockfd, addr, addrlen, flags */
2326 return do_accept4(a[0], a[1], a[2], a[3]);
2327 case SOCKOP_getsockname: /* sockfd, addr, addrlen */
2328 return do_getsockname(a[0], a[1], a[2]);
2329 case SOCKOP_getpeername: /* sockfd, addr, addrlen */
2330 return do_getpeername(a[0], a[1], a[2]);
2331 case SOCKOP_socketpair: /* domain, type, protocol, tab */
2332 return do_socketpair(a[0], a[1], a[2], a[3]);
2333 case SOCKOP_send: /* sockfd, msg, len, flags */
2334 return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
2335 case SOCKOP_recv: /* sockfd, msg, len, flags */
2336 return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
2337 case SOCKOP_sendto: /* sockfd, msg, len, flags, addr, addrlen */
2338 return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
2339 case SOCKOP_recvfrom: /* sockfd, msg, len, flags, addr, addrlen */
2340 return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
2341 case SOCKOP_shutdown: /* sockfd, how */
2342 return get_errno(shutdown(a[0], a[1]));
2343 case SOCKOP_sendmsg: /* sockfd, msg, flags */
2344 return do_sendrecvmsg(a[0], a[1], a[2], 1);
2345 case SOCKOP_recvmsg: /* sockfd, msg, flags */
2346 return do_sendrecvmsg(a[0], a[1], a[2], 0);
2347 case SOCKOP_setsockopt: /* sockfd, level, optname, optval, optlen */
2348 return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
2349 case SOCKOP_getsockopt: /* sockfd, level, optname, optval, optlen */
2350 return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
2352 gemu_log("Unsupported socketcall: %d\n", num);
2353 return -TARGET_ENOSYS;
2358 #define N_SHM_REGIONS 32
2360 static struct shm_region {
2363 } shm_regions[N_SHM_REGIONS];
2365 struct target_semid_ds
2367 struct target_ipc_perm sem_perm;
2368 abi_ulong sem_otime;
2369 abi_ulong __unused1;
2370 abi_ulong sem_ctime;
2371 abi_ulong __unused2;
2372 abi_ulong sem_nsems;
2373 abi_ulong __unused3;
2374 abi_ulong __unused4;
2377 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
2378 abi_ulong target_addr)
2380 struct target_ipc_perm *target_ip;
2381 struct target_semid_ds *target_sd;
2383 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2384 return -TARGET_EFAULT;
2385 target_ip = &(target_sd->sem_perm);
2386 host_ip->__key = tswap32(target_ip->__key);
2387 host_ip->uid = tswap32(target_ip->uid);
2388 host_ip->gid = tswap32(target_ip->gid);
2389 host_ip->cuid = tswap32(target_ip->cuid);
2390 host_ip->cgid = tswap32(target_ip->cgid);
2391 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
2392 host_ip->mode = tswap32(target_ip->mode);
2394 host_ip->mode = tswap16(target_ip->mode);
2396 #if defined(TARGET_PPC)
2397 host_ip->__seq = tswap32(target_ip->__seq);
2399 host_ip->__seq = tswap16(target_ip->__seq);
2401 unlock_user_struct(target_sd, target_addr, 0);
2405 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
2406 struct ipc_perm *host_ip)
2408 struct target_ipc_perm *target_ip;
2409 struct target_semid_ds *target_sd;
2411 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2412 return -TARGET_EFAULT;
2413 target_ip = &(target_sd->sem_perm);
2414 target_ip->__key = tswap32(host_ip->__key);
2415 target_ip->uid = tswap32(host_ip->uid);
2416 target_ip->gid = tswap32(host_ip->gid);
2417 target_ip->cuid = tswap32(host_ip->cuid);
2418 target_ip->cgid = tswap32(host_ip->cgid);
2419 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
2420 target_ip->mode = tswap32(host_ip->mode);
2422 target_ip->mode = tswap16(host_ip->mode);
2424 #if defined(TARGET_PPC)
2425 target_ip->__seq = tswap32(host_ip->__seq);
2427 target_ip->__seq = tswap16(host_ip->__seq);
2429 unlock_user_struct(target_sd, target_addr, 1);
2433 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
2434 abi_ulong target_addr)
2436 struct target_semid_ds *target_sd;
2438 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2439 return -TARGET_EFAULT;
2440 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
2441 return -TARGET_EFAULT;
2442 host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
2443 host_sd->sem_otime = tswapal(target_sd->sem_otime);
2444 host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
2445 unlock_user_struct(target_sd, target_addr, 0);
2449 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
2450 struct semid_ds *host_sd)
2452 struct target_semid_ds *target_sd;
2454 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2455 return -TARGET_EFAULT;
2456 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
2457 return -TARGET_EFAULT;
2458 target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
2459 target_sd->sem_otime = tswapal(host_sd->sem_otime);
2460 target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
2461 unlock_user_struct(target_sd, target_addr, 1);
2465 struct target_seminfo {
2478 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
2479 struct seminfo *host_seminfo)
2481 struct target_seminfo *target_seminfo;
2482 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
2483 return -TARGET_EFAULT;
2484 __put_user(host_seminfo->semmap, &target_seminfo->semmap);
2485 __put_user(host_seminfo->semmni, &target_seminfo->semmni);
2486 __put_user(host_seminfo->semmns, &target_seminfo->semmns);
2487 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
2488 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
2489 __put_user(host_seminfo->semopm, &target_seminfo->semopm);
2490 __put_user(host_seminfo->semume, &target_seminfo->semume);
2491 __put_user(host_seminfo->semusz, &target_seminfo->semusz);
2492 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
2493 __put_user(host_seminfo->semaem, &target_seminfo->semaem);
2494 unlock_user_struct(target_seminfo, target_addr, 1);
2500 struct semid_ds *buf;
2501 unsigned short *array;
2502 struct seminfo *__buf;
2505 union target_semun {
2512 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
2513 abi_ulong target_addr)
2516 unsigned short *array;
2518 struct semid_ds semid_ds;
2521 semun.buf = &semid_ds;
2523 ret = semctl(semid, 0, IPC_STAT, semun);
2525 return get_errno(ret);
2527 nsems = semid_ds.sem_nsems;
2529 *host_array = malloc(nsems*sizeof(unsigned short));
2531 return -TARGET_ENOMEM;
2533 array = lock_user(VERIFY_READ, target_addr,
2534 nsems*sizeof(unsigned short), 1);
2537 return -TARGET_EFAULT;
2540 for(i=0; i<nsems; i++) {
2541 __get_user((*host_array)[i], &array[i]);
2543 unlock_user(array, target_addr, 0);
2548 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
2549 unsigned short **host_array)
2552 unsigned short *array;
2554 struct semid_ds semid_ds;
2557 semun.buf = &semid_ds;
2559 ret = semctl(semid, 0, IPC_STAT, semun);
2561 return get_errno(ret);
2563 nsems = semid_ds.sem_nsems;
2565 array = lock_user(VERIFY_WRITE, target_addr,
2566 nsems*sizeof(unsigned short), 0);
2568 return -TARGET_EFAULT;
2570 for(i=0; i<nsems; i++) {
2571 __put_user((*host_array)[i], &array[i]);
2574 unlock_user(array, target_addr, 1);
2579 static inline abi_long do_semctl(int semid, int semnum, int cmd,
2580 union target_semun target_su)
2583 struct semid_ds dsarg;
2584 unsigned short *array = NULL;
2585 struct seminfo seminfo;
2586 abi_long ret = -TARGET_EINVAL;
2593 arg.val = tswap32(target_su.val);
2594 ret = get_errno(semctl(semid, semnum, cmd, arg));
2595 target_su.val = tswap32(arg.val);
2599 err = target_to_host_semarray(semid, &array, target_su.array);
2603 ret = get_errno(semctl(semid, semnum, cmd, arg));
2604 err = host_to_target_semarray(semid, target_su.array, &array);
2611 err = target_to_host_semid_ds(&dsarg, target_su.buf);
2615 ret = get_errno(semctl(semid, semnum, cmd, arg));
2616 err = host_to_target_semid_ds(target_su.buf, &dsarg);
2622 arg.__buf = &seminfo;
2623 ret = get_errno(semctl(semid, semnum, cmd, arg));
2624 err = host_to_target_seminfo(target_su.__buf, &seminfo);
2632 ret = get_errno(semctl(semid, semnum, cmd, NULL));
2639 struct target_sembuf {
2640 unsigned short sem_num;
2645 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
2646 abi_ulong target_addr,
2649 struct target_sembuf *target_sembuf;
2652 target_sembuf = lock_user(VERIFY_READ, target_addr,
2653 nsops*sizeof(struct target_sembuf), 1);
2655 return -TARGET_EFAULT;
2657 for(i=0; i<nsops; i++) {
2658 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
2659 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
2660 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
2663 unlock_user(target_sembuf, target_addr, 0);
2668 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
2670 struct sembuf sops[nsops];
2672 if (target_to_host_sembuf(sops, ptr, nsops))
2673 return -TARGET_EFAULT;
2675 return get_errno(semop(semid, sops, nsops));
2678 struct target_msqid_ds
2680 struct target_ipc_perm msg_perm;
2681 abi_ulong msg_stime;
2682 #if TARGET_ABI_BITS == 32
2683 abi_ulong __unused1;
2685 abi_ulong msg_rtime;
2686 #if TARGET_ABI_BITS == 32
2687 abi_ulong __unused2;
2689 abi_ulong msg_ctime;
2690 #if TARGET_ABI_BITS == 32
2691 abi_ulong __unused3;
2693 abi_ulong __msg_cbytes;
2695 abi_ulong msg_qbytes;
2696 abi_ulong msg_lspid;
2697 abi_ulong msg_lrpid;
2698 abi_ulong __unused4;
2699 abi_ulong __unused5;
2702 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
2703 abi_ulong target_addr)
2705 struct target_msqid_ds *target_md;
2707 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
2708 return -TARGET_EFAULT;
2709 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
2710 return -TARGET_EFAULT;
2711 host_md->msg_stime = tswapal(target_md->msg_stime);
2712 host_md->msg_rtime = tswapal(target_md->msg_rtime);
2713 host_md->msg_ctime = tswapal(target_md->msg_ctime);
2714 host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
2715 host_md->msg_qnum = tswapal(target_md->msg_qnum);
2716 host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
2717 host_md->msg_lspid = tswapal(target_md->msg_lspid);
2718 host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
2719 unlock_user_struct(target_md, target_addr, 0);
2723 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
2724 struct msqid_ds *host_md)
2726 struct target_msqid_ds *target_md;
2728 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
2729 return -TARGET_EFAULT;
2730 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
2731 return -TARGET_EFAULT;
2732 target_md->msg_stime = tswapal(host_md->msg_stime);
2733 target_md->msg_rtime = tswapal(host_md->msg_rtime);
2734 target_md->msg_ctime = tswapal(host_md->msg_ctime);
2735 target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
2736 target_md->msg_qnum = tswapal(host_md->msg_qnum);
2737 target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
2738 target_md->msg_lspid = tswapal(host_md->msg_lspid);
2739 target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
2740 unlock_user_struct(target_md, target_addr, 1);
2744 struct target_msginfo {
2752 unsigned short int msgseg;
2755 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
2756 struct msginfo *host_msginfo)
2758 struct target_msginfo *target_msginfo;
2759 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
2760 return -TARGET_EFAULT;
2761 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
2762 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
2763 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
2764 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
2765 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
2766 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
2767 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
2768 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
2769 unlock_user_struct(target_msginfo, target_addr, 1);
2773 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
2775 struct msqid_ds dsarg;
2776 struct msginfo msginfo;
2777 abi_long ret = -TARGET_EINVAL;
2785 if (target_to_host_msqid_ds(&dsarg,ptr))
2786 return -TARGET_EFAULT;
2787 ret = get_errno(msgctl(msgid, cmd, &dsarg));
2788 if (host_to_target_msqid_ds(ptr,&dsarg))
2789 return -TARGET_EFAULT;
2792 ret = get_errno(msgctl(msgid, cmd, NULL));
2796 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
2797 if (host_to_target_msginfo(ptr, &msginfo))
2798 return -TARGET_EFAULT;
2805 struct target_msgbuf {
2810 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
2811 unsigned int msgsz, int msgflg)
2813 struct target_msgbuf *target_mb;
2814 struct msgbuf *host_mb;
2817 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
2818 return -TARGET_EFAULT;
2819 host_mb = malloc(msgsz+sizeof(long));
2820 host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
2821 memcpy(host_mb->mtext, target_mb->mtext, msgsz);
2822 ret = get_errno(msgsnd(msqid, host_mb, msgsz, msgflg));
2824 unlock_user_struct(target_mb, msgp, 0);
2829 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
2830 unsigned int msgsz, abi_long msgtyp,
2833 struct target_msgbuf *target_mb;
2835 struct msgbuf *host_mb;
2838 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
2839 return -TARGET_EFAULT;
2841 host_mb = g_malloc(msgsz+sizeof(long));
2842 ret = get_errno(msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
2845 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
2846 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
2847 if (!target_mtext) {
2848 ret = -TARGET_EFAULT;
2851 memcpy(target_mb->mtext, host_mb->mtext, ret);
2852 unlock_user(target_mtext, target_mtext_addr, ret);
2855 target_mb->mtype = tswapal(host_mb->mtype);
2859 unlock_user_struct(target_mb, msgp, 1);
2864 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
2865 abi_ulong target_addr)
2867 struct target_shmid_ds *target_sd;
2869 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2870 return -TARGET_EFAULT;
2871 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
2872 return -TARGET_EFAULT;
2873 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
2874 __get_user(host_sd->shm_atime, &target_sd->shm_atime);
2875 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
2876 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
2877 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
2878 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
2879 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
2880 unlock_user_struct(target_sd, target_addr, 0);
2884 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
2885 struct shmid_ds *host_sd)
2887 struct target_shmid_ds *target_sd;
2889 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2890 return -TARGET_EFAULT;
2891 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
2892 return -TARGET_EFAULT;
2893 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
2894 __put_user(host_sd->shm_atime, &target_sd->shm_atime);
2895 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
2896 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
2897 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
2898 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
2899 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
2900 unlock_user_struct(target_sd, target_addr, 1);
2904 struct target_shminfo {
2912 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
2913 struct shminfo *host_shminfo)
2915 struct target_shminfo *target_shminfo;
2916 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
2917 return -TARGET_EFAULT;
2918 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
2919 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
2920 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
2921 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
2922 __put_user(host_shminfo->shmall, &target_shminfo->shmall);
2923 unlock_user_struct(target_shminfo, target_addr, 1);
2927 struct target_shm_info {
2932 abi_ulong swap_attempts;
2933 abi_ulong swap_successes;
2936 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
2937 struct shm_info *host_shm_info)
2939 struct target_shm_info *target_shm_info;
2940 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
2941 return -TARGET_EFAULT;
2942 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
2943 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
2944 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
2945 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
2946 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
2947 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
2948 unlock_user_struct(target_shm_info, target_addr, 1);
2952 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
2954 struct shmid_ds dsarg;
2955 struct shminfo shminfo;
2956 struct shm_info shm_info;
2957 abi_long ret = -TARGET_EINVAL;
2965 if (target_to_host_shmid_ds(&dsarg, buf))
2966 return -TARGET_EFAULT;
2967 ret = get_errno(shmctl(shmid, cmd, &dsarg));
2968 if (host_to_target_shmid_ds(buf, &dsarg))
2969 return -TARGET_EFAULT;
2972 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
2973 if (host_to_target_shminfo(buf, &shminfo))
2974 return -TARGET_EFAULT;
2977 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
2978 if (host_to_target_shm_info(buf, &shm_info))
2979 return -TARGET_EFAULT;
2984 ret = get_errno(shmctl(shmid, cmd, NULL));
2991 static inline abi_ulong do_shmat(int shmid, abi_ulong shmaddr, int shmflg)
2995 struct shmid_ds shm_info;
2998 /* find out the length of the shared memory segment */
2999 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
3000 if (is_error(ret)) {
3001 /* can't get length, bail out */
3008 host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
3010 abi_ulong mmap_start;
3012 mmap_start = mmap_find_vma(0, shm_info.shm_segsz);
3014 if (mmap_start == -1) {
3016 host_raddr = (void *)-1;
3018 host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
3021 if (host_raddr == (void *)-1) {
3023 return get_errno((long)host_raddr);
3025 raddr=h2g((unsigned long)host_raddr);
3027 page_set_flags(raddr, raddr + shm_info.shm_segsz,
3028 PAGE_VALID | PAGE_READ |
3029 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
3031 for (i = 0; i < N_SHM_REGIONS; i++) {
3032 if (shm_regions[i].start == 0) {
3033 shm_regions[i].start = raddr;
3034 shm_regions[i].size = shm_info.shm_segsz;
3044 static inline abi_long do_shmdt(abi_ulong shmaddr)
3048 for (i = 0; i < N_SHM_REGIONS; ++i) {
3049 if (shm_regions[i].start == shmaddr) {
3050 shm_regions[i].start = 0;
3051 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
3056 return get_errno(shmdt(g2h(shmaddr)));
3059 #ifdef TARGET_NR_ipc
3060 /* ??? This only works with linear mappings. */
3061 /* do_ipc() must return target values and target errnos. */
3062 static abi_long do_ipc(unsigned int call, int first,
3063 int second, int third,
3064 abi_long ptr, abi_long fifth)
3069 version = call >> 16;
3074 ret = do_semop(first, ptr, second);
3078 ret = get_errno(semget(first, second, third));
3082 ret = do_semctl(first, second, third, (union target_semun)(abi_ulong) ptr);
3086 ret = get_errno(msgget(first, second));
3090 ret = do_msgsnd(first, ptr, second, third);
3094 ret = do_msgctl(first, second, ptr);
3101 struct target_ipc_kludge {
3106 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
3107 ret = -TARGET_EFAULT;
3111 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
3113 unlock_user_struct(tmp, ptr, 0);
3117 ret = do_msgrcv(first, ptr, second, fifth, third);
3126 raddr = do_shmat(first, ptr, second);
3127 if (is_error(raddr))
3128 return get_errno(raddr);
3129 if (put_user_ual(raddr, third))
3130 return -TARGET_EFAULT;
3134 ret = -TARGET_EINVAL;
3139 ret = do_shmdt(ptr);
3143 /* IPC_* flag values are the same on all linux platforms */
3144 ret = get_errno(shmget(first, second, third));
3147 /* IPC_* and SHM_* command values are the same on all linux platforms */
3149 ret = do_shmctl(first, second, ptr);
3152 gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
3153 ret = -TARGET_ENOSYS;
3160 /* kernel structure types definitions */
3162 #define STRUCT(name, ...) STRUCT_ ## name,
3163 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
3165 #include "syscall_types.h"
3168 #undef STRUCT_SPECIAL
3170 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
3171 #define STRUCT_SPECIAL(name)
3172 #include "syscall_types.h"
3174 #undef STRUCT_SPECIAL
3176 typedef struct IOCTLEntry IOCTLEntry;
3178 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
3179 int fd, abi_long cmd, abi_long arg);
3182 unsigned int target_cmd;
3183 unsigned int host_cmd;
3186 do_ioctl_fn *do_ioctl;
3187 const argtype arg_type[5];
3190 #define IOC_R 0x0001
3191 #define IOC_W 0x0002
3192 #define IOC_RW (IOC_R | IOC_W)
3194 #define MAX_STRUCT_SIZE 4096
3196 #ifdef CONFIG_FIEMAP
3197 /* So fiemap access checks don't overflow on 32 bit systems.
3198 * This is very slightly smaller than the limit imposed by
3199 * the underlying kernel.
3201 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
3202 / sizeof(struct fiemap_extent))
3204 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
3205 int fd, abi_long cmd, abi_long arg)
3207 /* The parameter for this ioctl is a struct fiemap followed
3208 * by an array of struct fiemap_extent whose size is set
3209 * in fiemap->fm_extent_count. The array is filled in by the
3212 int target_size_in, target_size_out;
3214 const argtype *arg_type = ie->arg_type;
3215 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
3218 int i, extent_size = thunk_type_size(extent_arg_type, 0);
3222 assert(arg_type[0] == TYPE_PTR);
3223 assert(ie->access == IOC_RW);
3225 target_size_in = thunk_type_size(arg_type, 0);
3226 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
3228 return -TARGET_EFAULT;
3230 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3231 unlock_user(argptr, arg, 0);
3232 fm = (struct fiemap *)buf_temp;
3233 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
3234 return -TARGET_EINVAL;
3237 outbufsz = sizeof (*fm) +
3238 (sizeof(struct fiemap_extent) * fm->fm_extent_count);
3240 if (outbufsz > MAX_STRUCT_SIZE) {
3241 /* We can't fit all the extents into the fixed size buffer.
3242 * Allocate one that is large enough and use it instead.
3244 fm = malloc(outbufsz);
3246 return -TARGET_ENOMEM;
3248 memcpy(fm, buf_temp, sizeof(struct fiemap));
3251 ret = get_errno(ioctl(fd, ie->host_cmd, fm));
3252 if (!is_error(ret)) {
3253 target_size_out = target_size_in;
3254 /* An extent_count of 0 means we were only counting the extents
3255 * so there are no structs to copy
3257 if (fm->fm_extent_count != 0) {
3258 target_size_out += fm->fm_mapped_extents * extent_size;
3260 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
3262 ret = -TARGET_EFAULT;
3264 /* Convert the struct fiemap */
3265 thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
3266 if (fm->fm_extent_count != 0) {
3267 p = argptr + target_size_in;
3268 /* ...and then all the struct fiemap_extents */
3269 for (i = 0; i < fm->fm_mapped_extents; i++) {
3270 thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
3275 unlock_user(argptr, arg, target_size_out);
3285 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
3286 int fd, abi_long cmd, abi_long arg)
3288 const argtype *arg_type = ie->arg_type;
3292 struct ifconf *host_ifconf;
3294 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
3295 int target_ifreq_size;
3300 abi_long target_ifc_buf;
3304 assert(arg_type[0] == TYPE_PTR);
3305 assert(ie->access == IOC_RW);
3308 target_size = thunk_type_size(arg_type, 0);
3310 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3312 return -TARGET_EFAULT;
3313 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3314 unlock_user(argptr, arg, 0);
3316 host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
3317 target_ifc_len = host_ifconf->ifc_len;
3318 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
3320 target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
3321 nb_ifreq = target_ifc_len / target_ifreq_size;
3322 host_ifc_len = nb_ifreq * sizeof(struct ifreq);
3324 outbufsz = sizeof(*host_ifconf) + host_ifc_len;
3325 if (outbufsz > MAX_STRUCT_SIZE) {
3326 /* We can't fit all the extents into the fixed size buffer.
3327 * Allocate one that is large enough and use it instead.
3329 host_ifconf = malloc(outbufsz);
3331 return -TARGET_ENOMEM;
3333 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
3336 host_ifc_buf = (char*)host_ifconf + sizeof(*host_ifconf);
3338 host_ifconf->ifc_len = host_ifc_len;
3339 host_ifconf->ifc_buf = host_ifc_buf;
3341 ret = get_errno(ioctl(fd, ie->host_cmd, host_ifconf));
3342 if (!is_error(ret)) {
3343 /* convert host ifc_len to target ifc_len */
3345 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
3346 target_ifc_len = nb_ifreq * target_ifreq_size;
3347 host_ifconf->ifc_len = target_ifc_len;
3349 /* restore target ifc_buf */
3351 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
3353 /* copy struct ifconf to target user */
3355 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3357 return -TARGET_EFAULT;
3358 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
3359 unlock_user(argptr, arg, target_size);
3361 /* copy ifreq[] to target user */
3363 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
3364 for (i = 0; i < nb_ifreq ; i++) {
3365 thunk_convert(argptr + i * target_ifreq_size,
3366 host_ifc_buf + i * sizeof(struct ifreq),
3367 ifreq_arg_type, THUNK_TARGET);
3369 unlock_user(argptr, target_ifc_buf, target_ifc_len);
3379 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
3380 abi_long cmd, abi_long arg)
3383 struct dm_ioctl *host_dm;
3384 abi_long guest_data;
3385 uint32_t guest_data_size;
3387 const argtype *arg_type = ie->arg_type;
3389 void *big_buf = NULL;
3393 target_size = thunk_type_size(arg_type, 0);
3394 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3396 ret = -TARGET_EFAULT;
3399 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3400 unlock_user(argptr, arg, 0);
3402 /* buf_temp is too small, so fetch things into a bigger buffer */
3403 big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
3404 memcpy(big_buf, buf_temp, target_size);
3408 guest_data = arg + host_dm->data_start;
3409 if ((guest_data - arg) < 0) {
3413 guest_data_size = host_dm->data_size - host_dm->data_start;
3414 host_data = (char*)host_dm + host_dm->data_start;
3416 argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
3417 switch (ie->host_cmd) {
3419 case DM_LIST_DEVICES:
3422 case DM_DEV_SUSPEND:
3425 case DM_TABLE_STATUS:
3426 case DM_TABLE_CLEAR:
3428 case DM_LIST_VERSIONS:
3432 case DM_DEV_SET_GEOMETRY:
3433 /* data contains only strings */
3434 memcpy(host_data, argptr, guest_data_size);
3437 memcpy(host_data, argptr, guest_data_size);
3438 *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
3442 void *gspec = argptr;
3443 void *cur_data = host_data;
3444 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
3445 int spec_size = thunk_type_size(arg_type, 0);
3448 for (i = 0; i < host_dm->target_count; i++) {
3449 struct dm_target_spec *spec = cur_data;
3453 thunk_convert(spec, gspec, arg_type, THUNK_HOST);
3454 slen = strlen((char*)gspec + spec_size) + 1;
3456 spec->next = sizeof(*spec) + slen;
3457 strcpy((char*)&spec[1], gspec + spec_size);
3459 cur_data += spec->next;
3464 ret = -TARGET_EINVAL;
3467 unlock_user(argptr, guest_data, 0);
3469 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3470 if (!is_error(ret)) {
3471 guest_data = arg + host_dm->data_start;
3472 guest_data_size = host_dm->data_size - host_dm->data_start;
3473 argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
3474 switch (ie->host_cmd) {
3479 case DM_DEV_SUSPEND:
3482 case DM_TABLE_CLEAR:
3484 case DM_DEV_SET_GEOMETRY:
3485 /* no return data */
3487 case DM_LIST_DEVICES:
3489 struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
3490 uint32_t remaining_data = guest_data_size;
3491 void *cur_data = argptr;
3492 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
3493 int nl_size = 12; /* can't use thunk_size due to alignment */
3496 uint32_t next = nl->next;
3498 nl->next = nl_size + (strlen(nl->name) + 1);
3500 if (remaining_data < nl->next) {
3501 host_dm->flags |= DM_BUFFER_FULL_FLAG;
3504 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
3505 strcpy(cur_data + nl_size, nl->name);
3506 cur_data += nl->next;
3507 remaining_data -= nl->next;
3511 nl = (void*)nl + next;
3516 case DM_TABLE_STATUS:
3518 struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
3519 void *cur_data = argptr;
3520 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
3521 int spec_size = thunk_type_size(arg_type, 0);
3524 for (i = 0; i < host_dm->target_count; i++) {
3525 uint32_t next = spec->next;
3526 int slen = strlen((char*)&spec[1]) + 1;
3527 spec->next = (cur_data - argptr) + spec_size + slen;
3528 if (guest_data_size < spec->next) {
3529 host_dm->flags |= DM_BUFFER_FULL_FLAG;
3532 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
3533 strcpy(cur_data + spec_size, (char*)&spec[1]);
3534 cur_data = argptr + spec->next;
3535 spec = (void*)host_dm + host_dm->data_start + next;
3541 void *hdata = (void*)host_dm + host_dm->data_start;
3542 int count = *(uint32_t*)hdata;
3543 uint64_t *hdev = hdata + 8;
3544 uint64_t *gdev = argptr + 8;
3547 *(uint32_t*)argptr = tswap32(count);
3548 for (i = 0; i < count; i++) {
3549 *gdev = tswap64(*hdev);
3555 case DM_LIST_VERSIONS:
3557 struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
3558 uint32_t remaining_data = guest_data_size;
3559 void *cur_data = argptr;
3560 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
3561 int vers_size = thunk_type_size(arg_type, 0);
3564 uint32_t next = vers->next;
3566 vers->next = vers_size + (strlen(vers->name) + 1);
3568 if (remaining_data < vers->next) {
3569 host_dm->flags |= DM_BUFFER_FULL_FLAG;
3572 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
3573 strcpy(cur_data + vers_size, vers->name);
3574 cur_data += vers->next;
3575 remaining_data -= vers->next;
3579 vers = (void*)vers + next;
3584 ret = -TARGET_EINVAL;
3587 unlock_user(argptr, guest_data, guest_data_size);
3589 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3591 ret = -TARGET_EFAULT;
3594 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
3595 unlock_user(argptr, arg, target_size);
3602 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
3603 int fd, abi_long cmd, abi_long arg)
3605 const argtype *arg_type = ie->arg_type;
3606 const StructEntry *se;
3607 const argtype *field_types;
3608 const int *dst_offsets, *src_offsets;
3611 abi_ulong *target_rt_dev_ptr;
3612 unsigned long *host_rt_dev_ptr;
3616 assert(ie->access == IOC_W);
3617 assert(*arg_type == TYPE_PTR);
3619 assert(*arg_type == TYPE_STRUCT);
3620 target_size = thunk_type_size(arg_type, 0);
3621 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3623 return -TARGET_EFAULT;
3626 assert(*arg_type == (int)STRUCT_rtentry);
3627 se = struct_entries + *arg_type++;
3628 assert(se->convert[0] == NULL);
3629 /* convert struct here to be able to catch rt_dev string */
3630 field_types = se->field_types;
3631 dst_offsets = se->field_offsets[THUNK_HOST];
3632 src_offsets = se->field_offsets[THUNK_TARGET];
3633 for (i = 0; i < se->nb_fields; i++) {
3634 if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
3635 assert(*field_types == TYPE_PTRVOID);
3636 target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
3637 host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
3638 if (*target_rt_dev_ptr != 0) {
3639 *host_rt_dev_ptr = (unsigned long)lock_user_string(
3640 tswapal(*target_rt_dev_ptr));
3641 if (!*host_rt_dev_ptr) {
3642 unlock_user(argptr, arg, 0);
3643 return -TARGET_EFAULT;
3646 *host_rt_dev_ptr = 0;
3651 field_types = thunk_convert(buf_temp + dst_offsets[i],
3652 argptr + src_offsets[i],
3653 field_types, THUNK_HOST);
3655 unlock_user(argptr, arg, 0);
3657 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3658 if (*host_rt_dev_ptr != 0) {
3659 unlock_user((void *)*host_rt_dev_ptr,
3660 *target_rt_dev_ptr, 0);
3665 static IOCTLEntry ioctl_entries[] = {
3666 #define IOCTL(cmd, access, ...) \
3667 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
3668 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
3669 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
3674 /* ??? Implement proper locking for ioctls. */
3675 /* do_ioctl() Must return target values and target errnos. */
3676 static abi_long do_ioctl(int fd, abi_long cmd, abi_long arg)
3678 const IOCTLEntry *ie;
3679 const argtype *arg_type;
3681 uint8_t buf_temp[MAX_STRUCT_SIZE];
3687 if (ie->target_cmd == 0) {
3688 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
3689 return -TARGET_ENOSYS;
3691 if (ie->target_cmd == cmd)
3695 arg_type = ie->arg_type;
3697 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd, ie->name);
3700 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
3703 switch(arg_type[0]) {
3706 ret = get_errno(ioctl(fd, ie->host_cmd));
3711 ret = get_errno(ioctl(fd, ie->host_cmd, arg));
3715 target_size = thunk_type_size(arg_type, 0);
3716 switch(ie->access) {
3718 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3719 if (!is_error(ret)) {
3720 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3722 return -TARGET_EFAULT;
3723 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
3724 unlock_user(argptr, arg, target_size);
3728 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3730 return -TARGET_EFAULT;
3731 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3732 unlock_user(argptr, arg, 0);
3733 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3737 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3739 return -TARGET_EFAULT;
3740 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3741 unlock_user(argptr, arg, 0);
3742 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3743 if (!is_error(ret)) {
3744 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3746 return -TARGET_EFAULT;
3747 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
3748 unlock_user(argptr, arg, target_size);
3754 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
3755 (long)cmd, arg_type[0]);
3756 ret = -TARGET_ENOSYS;
3762 static const bitmask_transtbl iflag_tbl[] = {
3763 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
3764 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
3765 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
3766 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
3767 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
3768 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
3769 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
3770 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
3771 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
3772 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
3773 { TARGET_IXON, TARGET_IXON, IXON, IXON },
3774 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
3775 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
3776 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
3780 static const bitmask_transtbl oflag_tbl[] = {
3781 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
3782 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
3783 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
3784 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
3785 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
3786 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
3787 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
3788 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
3789 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
3790 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
3791 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
3792 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
3793 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
3794 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
3795 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
3796 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
3797 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
3798 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
3799 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
3800 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
3801 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
3802 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
3803 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
3804 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
3808 static const bitmask_transtbl cflag_tbl[] = {
3809 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
3810 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
3811 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
3812 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
3813 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
3814 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
3815 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
3816 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
3817 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
3818 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
3819 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
3820 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
3821 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
3822 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
3823 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
3824 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
3825 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
3826 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
3827 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
3828 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
3829 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
3830 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
3831 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
3832 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
3833 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
3834 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
3835 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
3836 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
3837 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
3838 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
3839 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
3843 static const bitmask_transtbl lflag_tbl[] = {
3844 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
3845 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
3846 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
3847 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
3848 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
3849 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
3850 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
3851 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
3852 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
3853 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
3854 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
3855 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
3856 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
3857 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
3858 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
3862 static void target_to_host_termios (void *dst, const void *src)
3864 struct host_termios *host = dst;
3865 const struct target_termios *target = src;
3868 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
3870 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
3872 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
3874 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
3875 host->c_line = target->c_line;
3877 memset(host->c_cc, 0, sizeof(host->c_cc));
3878 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
3879 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
3880 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
3881 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
3882 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
3883 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
3884 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
3885 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
3886 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
3887 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
3888 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
3889 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
3890 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
3891 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
3892 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
3893 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
3894 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
3897 static void host_to_target_termios (void *dst, const void *src)
3899 struct target_termios *target = dst;
3900 const struct host_termios *host = src;
3903 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
3905 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
3907 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
3909 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
3910 target->c_line = host->c_line;
3912 memset(target->c_cc, 0, sizeof(target->c_cc));
3913 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
3914 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
3915 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
3916 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
3917 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
3918 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
3919 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
3920 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
3921 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
3922 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
3923 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
3924 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
3925 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
3926 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
3927 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
3928 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
3929 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
3932 static const StructEntry struct_termios_def = {
3933 .convert = { host_to_target_termios, target_to_host_termios },
3934 .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
3935 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
3938 static bitmask_transtbl mmap_flags_tbl[] = {
3939 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
3940 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
3941 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
3942 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS, MAP_ANONYMOUS, MAP_ANONYMOUS },
3943 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN, MAP_GROWSDOWN, MAP_GROWSDOWN },
3944 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE, MAP_DENYWRITE, MAP_DENYWRITE },
3945 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE, MAP_EXECUTABLE, MAP_EXECUTABLE },
3946 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
3950 #if defined(TARGET_I386)
3952 /* NOTE: there is really one LDT for all the threads */
3953 static uint8_t *ldt_table;
3955 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
3962 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
3963 if (size > bytecount)
3965 p = lock_user(VERIFY_WRITE, ptr, size, 0);
3967 return -TARGET_EFAULT;
3968 /* ??? Should this by byteswapped? */
3969 memcpy(p, ldt_table, size);
3970 unlock_user(p, ptr, size);
3974 /* XXX: add locking support */
3975 static abi_long write_ldt(CPUX86State *env,
3976 abi_ulong ptr, unsigned long bytecount, int oldmode)
3978 struct target_modify_ldt_ldt_s ldt_info;
3979 struct target_modify_ldt_ldt_s *target_ldt_info;
3980 int seg_32bit, contents, read_exec_only, limit_in_pages;
3981 int seg_not_present, useable, lm;
3982 uint32_t *lp, entry_1, entry_2;
3984 if (bytecount != sizeof(ldt_info))
3985 return -TARGET_EINVAL;
3986 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
3987 return -TARGET_EFAULT;
3988 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
3989 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
3990 ldt_info.limit = tswap32(target_ldt_info->limit);
3991 ldt_info.flags = tswap32(target_ldt_info->flags);
3992 unlock_user_struct(target_ldt_info, ptr, 0);
3994 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
3995 return -TARGET_EINVAL;
3996 seg_32bit = ldt_info.flags & 1;
3997 contents = (ldt_info.flags >> 1) & 3;
3998 read_exec_only = (ldt_info.flags >> 3) & 1;
3999 limit_in_pages = (ldt_info.flags >> 4) & 1;
4000 seg_not_present = (ldt_info.flags >> 5) & 1;
4001 useable = (ldt_info.flags >> 6) & 1;
4005 lm = (ldt_info.flags >> 7) & 1;
4007 if (contents == 3) {
4009 return -TARGET_EINVAL;
4010 if (seg_not_present == 0)
4011 return -TARGET_EINVAL;
4013 /* allocate the LDT */
4015 env->ldt.base = target_mmap(0,
4016 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
4017 PROT_READ|PROT_WRITE,
4018 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
4019 if (env->ldt.base == -1)
4020 return -TARGET_ENOMEM;
4021 memset(g2h(env->ldt.base), 0,
4022 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
4023 env->ldt.limit = 0xffff;
4024 ldt_table = g2h(env->ldt.base);
4027 /* NOTE: same code as Linux kernel */
4028 /* Allow LDTs to be cleared by the user. */
4029 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
4032 read_exec_only == 1 &&
4034 limit_in_pages == 0 &&
4035 seg_not_present == 1 &&
4043 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
4044 (ldt_info.limit & 0x0ffff);
4045 entry_2 = (ldt_info.base_addr & 0xff000000) |
4046 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
4047 (ldt_info.limit & 0xf0000) |
4048 ((read_exec_only ^ 1) << 9) |
4050 ((seg_not_present ^ 1) << 15) |
4052 (limit_in_pages << 23) |
4056 entry_2 |= (useable << 20);
4058 /* Install the new entry ... */
4060 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
4061 lp[0] = tswap32(entry_1);
4062 lp[1] = tswap32(entry_2);
4066 /* specific and weird i386 syscalls */
4067 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
4068 unsigned long bytecount)
4074 ret = read_ldt(ptr, bytecount);
4077 ret = write_ldt(env, ptr, bytecount, 1);
4080 ret = write_ldt(env, ptr, bytecount, 0);
4083 ret = -TARGET_ENOSYS;
4089 #if defined(TARGET_I386) && defined(TARGET_ABI32)
4090 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
4092 uint64_t *gdt_table = g2h(env->gdt.base);
4093 struct target_modify_ldt_ldt_s ldt_info;
4094 struct target_modify_ldt_ldt_s *target_ldt_info;
4095 int seg_32bit, contents, read_exec_only, limit_in_pages;
4096 int seg_not_present, useable, lm;
4097 uint32_t *lp, entry_1, entry_2;
4100 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
4101 if (!target_ldt_info)
4102 return -TARGET_EFAULT;
4103 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
4104 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
4105 ldt_info.limit = tswap32(target_ldt_info->limit);
4106 ldt_info.flags = tswap32(target_ldt_info->flags);
4107 if (ldt_info.entry_number == -1) {
4108 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
4109 if (gdt_table[i] == 0) {
4110 ldt_info.entry_number = i;
4111 target_ldt_info->entry_number = tswap32(i);
4116 unlock_user_struct(target_ldt_info, ptr, 1);
4118 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
4119 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
4120 return -TARGET_EINVAL;
4121 seg_32bit = ldt_info.flags & 1;
4122 contents = (ldt_info.flags >> 1) & 3;
4123 read_exec_only = (ldt_info.flags >> 3) & 1;
4124 limit_in_pages = (ldt_info.flags >> 4) & 1;
4125 seg_not_present = (ldt_info.flags >> 5) & 1;
4126 useable = (ldt_info.flags >> 6) & 1;
4130 lm = (ldt_info.flags >> 7) & 1;
4133 if (contents == 3) {
4134 if (seg_not_present == 0)
4135 return -TARGET_EINVAL;
4138 /* NOTE: same code as Linux kernel */
4139 /* Allow LDTs to be cleared by the user. */
4140 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
4141 if ((contents == 0 &&
4142 read_exec_only == 1 &&
4144 limit_in_pages == 0 &&
4145 seg_not_present == 1 &&
4153 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
4154 (ldt_info.limit & 0x0ffff);
4155 entry_2 = (ldt_info.base_addr & 0xff000000) |
4156 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
4157 (ldt_info.limit & 0xf0000) |
4158 ((read_exec_only ^ 1) << 9) |
4160 ((seg_not_present ^ 1) << 15) |
4162 (limit_in_pages << 23) |
4167 /* Install the new entry ... */
4169 lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
4170 lp[0] = tswap32(entry_1);
4171 lp[1] = tswap32(entry_2);
4175 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
4177 struct target_modify_ldt_ldt_s *target_ldt_info;
4178 uint64_t *gdt_table = g2h(env->gdt.base);
4179 uint32_t base_addr, limit, flags;
4180 int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
4181 int seg_not_present, useable, lm;
4182 uint32_t *lp, entry_1, entry_2;
4184 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
4185 if (!target_ldt_info)
4186 return -TARGET_EFAULT;
4187 idx = tswap32(target_ldt_info->entry_number);
4188 if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
4189 idx > TARGET_GDT_ENTRY_TLS_MAX) {
4190 unlock_user_struct(target_ldt_info, ptr, 1);
4191 return -TARGET_EINVAL;
4193 lp = (uint32_t *)(gdt_table + idx);
4194 entry_1 = tswap32(lp[0]);
4195 entry_2 = tswap32(lp[1]);
4197 read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
4198 contents = (entry_2 >> 10) & 3;
4199 seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
4200 seg_32bit = (entry_2 >> 22) & 1;
4201 limit_in_pages = (entry_2 >> 23) & 1;
4202 useable = (entry_2 >> 20) & 1;
4206 lm = (entry_2 >> 21) & 1;
4208 flags = (seg_32bit << 0) | (contents << 1) |
4209 (read_exec_only << 3) | (limit_in_pages << 4) |
4210 (seg_not_present << 5) | (useable << 6) | (lm << 7);
4211 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000);
4212 base_addr = (entry_1 >> 16) |
4213 (entry_2 & 0xff000000) |
4214 ((entry_2 & 0xff) << 16);
4215 target_ldt_info->base_addr = tswapal(base_addr);
4216 target_ldt_info->limit = tswap32(limit);
4217 target_ldt_info->flags = tswap32(flags);
4218 unlock_user_struct(target_ldt_info, ptr, 1);
4221 #endif /* TARGET_I386 && TARGET_ABI32 */
4223 #ifndef TARGET_ABI32
4224 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
4231 case TARGET_ARCH_SET_GS:
4232 case TARGET_ARCH_SET_FS:
4233 if (code == TARGET_ARCH_SET_GS)
4237 cpu_x86_load_seg(env, idx, 0);
4238 env->segs[idx].base = addr;
4240 case TARGET_ARCH_GET_GS:
4241 case TARGET_ARCH_GET_FS:
4242 if (code == TARGET_ARCH_GET_GS)
4246 val = env->segs[idx].base;
4247 if (put_user(val, addr, abi_ulong))
4248 ret = -TARGET_EFAULT;
4251 ret = -TARGET_EINVAL;
4258 #endif /* defined(TARGET_I386) */
4260 #define NEW_STACK_SIZE 0x40000
4263 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
4266 pthread_mutex_t mutex;
4267 pthread_cond_t cond;
4270 abi_ulong child_tidptr;
4271 abi_ulong parent_tidptr;
4275 static void *clone_func(void *arg)
4277 new_thread_info *info = arg;
4283 cpu = ENV_GET_CPU(env);
4285 ts = (TaskState *)cpu->opaque;
4286 info->tid = gettid();
4287 cpu->host_tid = info->tid;
4289 if (info->child_tidptr)
4290 put_user_u32(info->tid, info->child_tidptr);
4291 if (info->parent_tidptr)
4292 put_user_u32(info->tid, info->parent_tidptr);
4293 /* Enable signals. */
4294 sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
4295 /* Signal to the parent that we're ready. */
4296 pthread_mutex_lock(&info->mutex);
4297 pthread_cond_broadcast(&info->cond);
4298 pthread_mutex_unlock(&info->mutex);
4299 /* Wait until the parent has finshed initializing the tls state. */
4300 pthread_mutex_lock(&clone_lock);
4301 pthread_mutex_unlock(&clone_lock);
4307 /* do_fork() Must return host values and target errnos (unlike most
4308 do_*() functions). */
4309 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
4310 abi_ulong parent_tidptr, target_ulong newtls,
4311 abi_ulong child_tidptr)
4313 CPUState *cpu = ENV_GET_CPU(env);
4317 CPUArchState *new_env;
4318 unsigned int nptl_flags;
4321 /* Emulate vfork() with fork() */
4322 if (flags & CLONE_VFORK)
4323 flags &= ~(CLONE_VFORK | CLONE_VM);
4325 if (flags & CLONE_VM) {
4326 TaskState *parent_ts = (TaskState *)cpu->opaque;
4327 new_thread_info info;
4328 pthread_attr_t attr;
4330 ts = g_malloc0(sizeof(TaskState));
4331 init_task_state(ts);
4332 /* we create a new CPU instance. */
4333 new_env = cpu_copy(env);
4334 /* Init regs that differ from the parent. */
4335 cpu_clone_regs(new_env, newsp);
4336 new_cpu = ENV_GET_CPU(new_env);
4337 new_cpu->opaque = ts;
4338 ts->bprm = parent_ts->bprm;
4339 ts->info = parent_ts->info;
4341 flags &= ~CLONE_NPTL_FLAGS2;
4343 if (nptl_flags & CLONE_CHILD_CLEARTID) {
4344 ts->child_tidptr = child_tidptr;
4347 if (nptl_flags & CLONE_SETTLS)
4348 cpu_set_tls (new_env, newtls);
4350 /* Grab a mutex so that thread setup appears atomic. */
4351 pthread_mutex_lock(&clone_lock);
4353 memset(&info, 0, sizeof(info));
4354 pthread_mutex_init(&info.mutex, NULL);
4355 pthread_mutex_lock(&info.mutex);
4356 pthread_cond_init(&info.cond, NULL);
4358 if (nptl_flags & CLONE_CHILD_SETTID)
4359 info.child_tidptr = child_tidptr;
4360 if (nptl_flags & CLONE_PARENT_SETTID)
4361 info.parent_tidptr = parent_tidptr;
4363 ret = pthread_attr_init(&attr);
4364 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
4365 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
4366 /* It is not safe to deliver signals until the child has finished
4367 initializing, so temporarily block all signals. */
4368 sigfillset(&sigmask);
4369 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
4371 ret = pthread_create(&info.thread, &attr, clone_func, &info);
4372 /* TODO: Free new CPU state if thread creation failed. */
4374 sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
4375 pthread_attr_destroy(&attr);
4377 /* Wait for the child to initialize. */
4378 pthread_cond_wait(&info.cond, &info.mutex);
4380 if (flags & CLONE_PARENT_SETTID)
4381 put_user_u32(ret, parent_tidptr);
4385 pthread_mutex_unlock(&info.mutex);
4386 pthread_cond_destroy(&info.cond);
4387 pthread_mutex_destroy(&info.mutex);
4388 pthread_mutex_unlock(&clone_lock);
4390 /* if no CLONE_VM, we consider it is a fork */
4391 if ((flags & ~(CSIGNAL | CLONE_NPTL_FLAGS2)) != 0)
4396 /* Child Process. */
4397 cpu_clone_regs(env, newsp);
4399 /* There is a race condition here. The parent process could
4400 theoretically read the TID in the child process before the child
4401 tid is set. This would require using either ptrace
4402 (not implemented) or having *_tidptr to point at a shared memory
4403 mapping. We can't repeat the spinlock hack used above because
4404 the child process gets its own copy of the lock. */
4405 if (flags & CLONE_CHILD_SETTID)
4406 put_user_u32(gettid(), child_tidptr);
4407 if (flags & CLONE_PARENT_SETTID)
4408 put_user_u32(gettid(), parent_tidptr);
4409 ts = (TaskState *)cpu->opaque;
4410 if (flags & CLONE_SETTLS)
4411 cpu_set_tls (env, newtls);
4412 if (flags & CLONE_CHILD_CLEARTID)
4413 ts->child_tidptr = child_tidptr;
4421 /* warning : doesn't handle linux specific flags... */
4422 static int target_to_host_fcntl_cmd(int cmd)
4425 case TARGET_F_DUPFD:
4426 case TARGET_F_GETFD:
4427 case TARGET_F_SETFD:
4428 case TARGET_F_GETFL:
4429 case TARGET_F_SETFL:
4431 case TARGET_F_GETLK:
4433 case TARGET_F_SETLK:
4435 case TARGET_F_SETLKW:
4437 case TARGET_F_GETOWN:
4439 case TARGET_F_SETOWN:
4441 case TARGET_F_GETSIG:
4443 case TARGET_F_SETSIG:
4445 #if TARGET_ABI_BITS == 32
4446 case TARGET_F_GETLK64:
4448 case TARGET_F_SETLK64:
4450 case TARGET_F_SETLKW64:
4453 case TARGET_F_SETLEASE:
4455 case TARGET_F_GETLEASE:
4457 #ifdef F_DUPFD_CLOEXEC
4458 case TARGET_F_DUPFD_CLOEXEC:
4459 return F_DUPFD_CLOEXEC;
4461 case TARGET_F_NOTIFY:
4464 case TARGET_F_GETOWN_EX:
4468 case TARGET_F_SETOWN_EX:
4472 return -TARGET_EINVAL;
4474 return -TARGET_EINVAL;
4477 #define TRANSTBL_CONVERT(a) { -1, TARGET_##a, -1, a }
4478 static const bitmask_transtbl flock_tbl[] = {
4479 TRANSTBL_CONVERT(F_RDLCK),
4480 TRANSTBL_CONVERT(F_WRLCK),
4481 TRANSTBL_CONVERT(F_UNLCK),
4482 TRANSTBL_CONVERT(F_EXLCK),
4483 TRANSTBL_CONVERT(F_SHLCK),
4487 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
4490 struct target_flock *target_fl;
4491 struct flock64 fl64;
4492 struct target_flock64 *target_fl64;
4494 struct f_owner_ex fox;
4495 struct target_f_owner_ex *target_fox;
4498 int host_cmd = target_to_host_fcntl_cmd(cmd);
4500 if (host_cmd == -TARGET_EINVAL)
4504 case TARGET_F_GETLK:
4505 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
4506 return -TARGET_EFAULT;
4508 target_to_host_bitmask(tswap16(target_fl->l_type), flock_tbl);
4509 fl.l_whence = tswap16(target_fl->l_whence);
4510 fl.l_start = tswapal(target_fl->l_start);
4511 fl.l_len = tswapal(target_fl->l_len);
4512 fl.l_pid = tswap32(target_fl->l_pid);
4513 unlock_user_struct(target_fl, arg, 0);
4514 ret = get_errno(fcntl(fd, host_cmd, &fl));
4516 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg, 0))
4517 return -TARGET_EFAULT;
4519 host_to_target_bitmask(tswap16(fl.l_type), flock_tbl);
4520 target_fl->l_whence = tswap16(fl.l_whence);
4521 target_fl->l_start = tswapal(fl.l_start);
4522 target_fl->l_len = tswapal(fl.l_len);
4523 target_fl->l_pid = tswap32(fl.l_pid);
4524 unlock_user_struct(target_fl, arg, 1);
4528 case TARGET_F_SETLK:
4529 case TARGET_F_SETLKW:
4530 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
4531 return -TARGET_EFAULT;
4533 target_to_host_bitmask(tswap16(target_fl->l_type), flock_tbl);
4534 fl.l_whence = tswap16(target_fl->l_whence);
4535 fl.l_start = tswapal(target_fl->l_start);
4536 fl.l_len = tswapal(target_fl->l_len);
4537 fl.l_pid = tswap32(target_fl->l_pid);
4538 unlock_user_struct(target_fl, arg, 0);
4539 ret = get_errno(fcntl(fd, host_cmd, &fl));
4542 case TARGET_F_GETLK64:
4543 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
4544 return -TARGET_EFAULT;
4546 target_to_host_bitmask(tswap16(target_fl64->l_type), flock_tbl) >> 1;
4547 fl64.l_whence = tswap16(target_fl64->l_whence);
4548 fl64.l_start = tswap64(target_fl64->l_start);
4549 fl64.l_len = tswap64(target_fl64->l_len);
4550 fl64.l_pid = tswap32(target_fl64->l_pid);
4551 unlock_user_struct(target_fl64, arg, 0);
4552 ret = get_errno(fcntl(fd, host_cmd, &fl64));
4554 if (!lock_user_struct(VERIFY_WRITE, target_fl64, arg, 0))
4555 return -TARGET_EFAULT;
4556 target_fl64->l_type =
4557 host_to_target_bitmask(tswap16(fl64.l_type), flock_tbl) >> 1;
4558 target_fl64->l_whence = tswap16(fl64.l_whence);
4559 target_fl64->l_start = tswap64(fl64.l_start);
4560 target_fl64->l_len = tswap64(fl64.l_len);
4561 target_fl64->l_pid = tswap32(fl64.l_pid);
4562 unlock_user_struct(target_fl64, arg, 1);
4565 case TARGET_F_SETLK64:
4566 case TARGET_F_SETLKW64:
4567 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
4568 return -TARGET_EFAULT;
4570 target_to_host_bitmask(tswap16(target_fl64->l_type), flock_tbl) >> 1;
4571 fl64.l_whence = tswap16(target_fl64->l_whence);
4572 fl64.l_start = tswap64(target_fl64->l_start);
4573 fl64.l_len = tswap64(target_fl64->l_len);
4574 fl64.l_pid = tswap32(target_fl64->l_pid);
4575 unlock_user_struct(target_fl64, arg, 0);
4576 ret = get_errno(fcntl(fd, host_cmd, &fl64));
4579 case TARGET_F_GETFL:
4580 ret = get_errno(fcntl(fd, host_cmd, arg));
4582 ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
4586 case TARGET_F_SETFL:
4587 ret = get_errno(fcntl(fd, host_cmd, target_to_host_bitmask(arg, fcntl_flags_tbl)));
4591 case TARGET_F_GETOWN_EX:
4592 ret = get_errno(fcntl(fd, host_cmd, &fox));
4594 if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
4595 return -TARGET_EFAULT;
4596 target_fox->type = tswap32(fox.type);
4597 target_fox->pid = tswap32(fox.pid);
4598 unlock_user_struct(target_fox, arg, 1);
4604 case TARGET_F_SETOWN_EX:
4605 if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
4606 return -TARGET_EFAULT;
4607 fox.type = tswap32(target_fox->type);
4608 fox.pid = tswap32(target_fox->pid);
4609 unlock_user_struct(target_fox, arg, 0);
4610 ret = get_errno(fcntl(fd, host_cmd, &fox));
4614 case TARGET_F_SETOWN:
4615 case TARGET_F_GETOWN:
4616 case TARGET_F_SETSIG:
4617 case TARGET_F_GETSIG:
4618 case TARGET_F_SETLEASE:
4619 case TARGET_F_GETLEASE:
4620 ret = get_errno(fcntl(fd, host_cmd, arg));
4624 ret = get_errno(fcntl(fd, cmd, arg));
4632 static inline int high2lowuid(int uid)
4640 static inline int high2lowgid(int gid)
4648 static inline int low2highuid(int uid)
4650 if ((int16_t)uid == -1)
4656 static inline int low2highgid(int gid)
4658 if ((int16_t)gid == -1)
4663 static inline int tswapid(int id)
4668 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
4670 #else /* !USE_UID16 */
4671 static inline int high2lowuid(int uid)
4675 static inline int high2lowgid(int gid)
4679 static inline int low2highuid(int uid)
4683 static inline int low2highgid(int gid)
4687 static inline int tswapid(int id)
4692 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
4694 #endif /* USE_UID16 */
4696 void syscall_init(void)
4699 const argtype *arg_type;
4703 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
4704 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
4705 #include "syscall_types.h"
4707 #undef STRUCT_SPECIAL
4709 /* Build target_to_host_errno_table[] table from
4710 * host_to_target_errno_table[]. */
4711 for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
4712 target_to_host_errno_table[host_to_target_errno_table[i]] = i;
4715 /* we patch the ioctl size if necessary. We rely on the fact that
4716 no ioctl has all the bits at '1' in the size field */
4718 while (ie->target_cmd != 0) {
4719 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
4720 TARGET_IOC_SIZEMASK) {
4721 arg_type = ie->arg_type;
4722 if (arg_type[0] != TYPE_PTR) {
4723 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
4728 size = thunk_type_size(arg_type, 0);
4729 ie->target_cmd = (ie->target_cmd &
4730 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
4731 (size << TARGET_IOC_SIZESHIFT);
4734 /* automatic consistency check if same arch */
4735 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
4736 (defined(__x86_64__) && defined(TARGET_X86_64))
4737 if (unlikely(ie->target_cmd != ie->host_cmd)) {
4738 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
4739 ie->name, ie->target_cmd, ie->host_cmd);
4746 #if TARGET_ABI_BITS == 32
4747 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
4749 #ifdef TARGET_WORDS_BIGENDIAN
4750 return ((uint64_t)word0 << 32) | word1;
4752 return ((uint64_t)word1 << 32) | word0;
4755 #else /* TARGET_ABI_BITS == 32 */
4756 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
4760 #endif /* TARGET_ABI_BITS != 32 */
4762 #ifdef TARGET_NR_truncate64
4763 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
4768 if (regpairs_aligned(cpu_env)) {
4772 return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
4776 #ifdef TARGET_NR_ftruncate64
4777 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
4782 if (regpairs_aligned(cpu_env)) {
4786 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
4790 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
4791 abi_ulong target_addr)
4793 struct target_timespec *target_ts;
4795 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1))
4796 return -TARGET_EFAULT;
4797 host_ts->tv_sec = tswapal(target_ts->tv_sec);
4798 host_ts->tv_nsec = tswapal(target_ts->tv_nsec);
4799 unlock_user_struct(target_ts, target_addr, 0);
4803 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
4804 struct timespec *host_ts)
4806 struct target_timespec *target_ts;
4808 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0))
4809 return -TARGET_EFAULT;
4810 target_ts->tv_sec = tswapal(host_ts->tv_sec);
4811 target_ts->tv_nsec = tswapal(host_ts->tv_nsec);
4812 unlock_user_struct(target_ts, target_addr, 1);
4816 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_itspec,
4817 abi_ulong target_addr)
4819 struct target_itimerspec *target_itspec;
4821 if (!lock_user_struct(VERIFY_READ, target_itspec, target_addr, 1)) {
4822 return -TARGET_EFAULT;
4825 host_itspec->it_interval.tv_sec =
4826 tswapal(target_itspec->it_interval.tv_sec);
4827 host_itspec->it_interval.tv_nsec =
4828 tswapal(target_itspec->it_interval.tv_nsec);
4829 host_itspec->it_value.tv_sec = tswapal(target_itspec->it_value.tv_sec);
4830 host_itspec->it_value.tv_nsec = tswapal(target_itspec->it_value.tv_nsec);
4832 unlock_user_struct(target_itspec, target_addr, 1);
4836 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
4837 struct itimerspec *host_its)
4839 struct target_itimerspec *target_itspec;
4841 if (!lock_user_struct(VERIFY_WRITE, target_itspec, target_addr, 0)) {
4842 return -TARGET_EFAULT;
4845 target_itspec->it_interval.tv_sec = tswapal(host_its->it_interval.tv_sec);
4846 target_itspec->it_interval.tv_nsec = tswapal(host_its->it_interval.tv_nsec);
4848 target_itspec->it_value.tv_sec = tswapal(host_its->it_value.tv_sec);
4849 target_itspec->it_value.tv_nsec = tswapal(host_its->it_value.tv_nsec);
4851 unlock_user_struct(target_itspec, target_addr, 0);
4855 #if defined(TARGET_NR_stat64) || defined(TARGET_NR_newfstatat)
4856 static inline abi_long host_to_target_stat64(void *cpu_env,
4857 abi_ulong target_addr,
4858 struct stat *host_st)
4860 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
4861 if (((CPUARMState *)cpu_env)->eabi) {
4862 struct target_eabi_stat64 *target_st;
4864 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
4865 return -TARGET_EFAULT;
4866 memset(target_st, 0, sizeof(struct target_eabi_stat64));
4867 __put_user(host_st->st_dev, &target_st->st_dev);
4868 __put_user(host_st->st_ino, &target_st->st_ino);
4869 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
4870 __put_user(host_st->st_ino, &target_st->__st_ino);
4872 __put_user(host_st->st_mode, &target_st->st_mode);
4873 __put_user(host_st->st_nlink, &target_st->st_nlink);
4874 __put_user(host_st->st_uid, &target_st->st_uid);
4875 __put_user(host_st->st_gid, &target_st->st_gid);
4876 __put_user(host_st->st_rdev, &target_st->st_rdev);
4877 __put_user(host_st->st_size, &target_st->st_size);
4878 __put_user(host_st->st_blksize, &target_st->st_blksize);
4879 __put_user(host_st->st_blocks, &target_st->st_blocks);
4880 __put_user(host_st->st_atime, &target_st->target_st_atime);
4881 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
4882 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
4883 unlock_user_struct(target_st, target_addr, 1);
4887 #if defined(TARGET_HAS_STRUCT_STAT64)
4888 struct target_stat64 *target_st;
4890 struct target_stat *target_st;
4893 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
4894 return -TARGET_EFAULT;
4895 memset(target_st, 0, sizeof(*target_st));
4896 __put_user(host_st->st_dev, &target_st->st_dev);
4897 __put_user(host_st->st_ino, &target_st->st_ino);
4898 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
4899 __put_user(host_st->st_ino, &target_st->__st_ino);
4901 __put_user(host_st->st_mode, &target_st->st_mode);
4902 __put_user(host_st->st_nlink, &target_st->st_nlink);
4903 __put_user(host_st->st_uid, &target_st->st_uid);
4904 __put_user(host_st->st_gid, &target_st->st_gid);
4905 __put_user(host_st->st_rdev, &target_st->st_rdev);
4906 /* XXX: better use of kernel struct */
4907 __put_user(host_st->st_size, &target_st->st_size);
4908 __put_user(host_st->st_blksize, &target_st->st_blksize);
4909 __put_user(host_st->st_blocks, &target_st->st_blocks);
4910 __put_user(host_st->st_atime, &target_st->target_st_atime);
4911 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
4912 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
4913 unlock_user_struct(target_st, target_addr, 1);
4920 /* ??? Using host futex calls even when target atomic operations
4921 are not really atomic probably breaks things. However implementing
4922 futexes locally would make futexes shared between multiple processes
4923 tricky. However they're probably useless because guest atomic
4924 operations won't work either. */
4925 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
4926 target_ulong uaddr2, int val3)
4928 struct timespec ts, *pts;
4931 /* ??? We assume FUTEX_* constants are the same on both host
4933 #ifdef FUTEX_CMD_MASK
4934 base_op = op & FUTEX_CMD_MASK;
4940 case FUTEX_WAIT_BITSET:
4943 target_to_host_timespec(pts, timeout);
4947 return get_errno(sys_futex(g2h(uaddr), op, tswap32(val),
4950 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0));
4952 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0));
4954 case FUTEX_CMP_REQUEUE:
4956 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
4957 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
4958 But the prototype takes a `struct timespec *'; insert casts
4959 to satisfy the compiler. We do not need to tswap TIMEOUT
4960 since it's not compared to guest memory. */
4961 pts = (struct timespec *)(uintptr_t) timeout;
4962 return get_errno(sys_futex(g2h(uaddr), op, val, pts,
4964 (base_op == FUTEX_CMP_REQUEUE
4968 return -TARGET_ENOSYS;
4972 /* Map host to target signal numbers for the wait family of syscalls.
4973 Assume all other status bits are the same. */
4974 int host_to_target_waitstatus(int status)
4976 if (WIFSIGNALED(status)) {
4977 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
4979 if (WIFSTOPPED(status)) {
4980 return (host_to_target_signal(WSTOPSIG(status)) << 8)
4986 static int relstr_to_int(const char *s)
4988 /* Convert a uname release string like "2.6.18" to an integer
4989 * of the form 0x020612. (Beware that 0x020612 is *not* 2.6.12.)
4994 for (i = 0; i < 3; i++) {
4996 while (*s >= '0' && *s <= '9') {
5001 tmp = (tmp << 8) + n;
5009 int get_osversion(void)
5011 static int osversion;
5012 struct new_utsname buf;
5017 if (qemu_uname_release && *qemu_uname_release) {
5018 s = qemu_uname_release;
5020 if (sys_uname(&buf))
5024 osversion = relstr_to_int(s);
5028 void init_qemu_uname_release(void)
5030 /* Initialize qemu_uname_release for later use.
5031 * If the host kernel is too old and the user hasn't asked for
5032 * a specific fake version number, we might want to fake a minimum
5033 * target kernel version.
5035 #ifdef UNAME_MINIMUM_RELEASE
5036 struct new_utsname buf;
5038 if (qemu_uname_release && *qemu_uname_release) {
5042 if (sys_uname(&buf)) {
5046 if (relstr_to_int(buf.release) < relstr_to_int(UNAME_MINIMUM_RELEASE)) {
5047 qemu_uname_release = UNAME_MINIMUM_RELEASE;
5052 static int open_self_maps(void *cpu_env, int fd)
5054 #if defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_UNICORE32)
5055 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
5056 TaskState *ts = cpu->opaque;
5063 fp = fopen("/proc/self/maps", "r");
5068 while ((read = getline(&line, &len, fp)) != -1) {
5069 int fields, dev_maj, dev_min, inode;
5070 uint64_t min, max, offset;
5071 char flag_r, flag_w, flag_x, flag_p;
5072 char path[512] = "";
5073 fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d"
5074 " %512s", &min, &max, &flag_r, &flag_w, &flag_x,
5075 &flag_p, &offset, &dev_maj, &dev_min, &inode, path);
5077 if ((fields < 10) || (fields > 11)) {
5080 if (!strncmp(path, "[stack]", 7)) {
5083 if (h2g_valid(min) && h2g_valid(max)) {
5084 dprintf(fd, TARGET_ABI_FMT_lx "-" TARGET_ABI_FMT_lx
5085 " %c%c%c%c %08" PRIx64 " %02x:%02x %d %s%s\n",
5086 h2g(min), h2g(max), flag_r, flag_w,
5087 flag_x, flag_p, offset, dev_maj, dev_min, inode,
5088 path[0] ? " " : "", path);
5095 #if defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_UNICORE32)
5096 dprintf(fd, "%08llx-%08llx rw-p %08llx 00:00 0 [stack]\n",
5097 (unsigned long long)ts->info->stack_limit,
5098 (unsigned long long)(ts->info->start_stack +
5099 (TARGET_PAGE_SIZE - 1)) & TARGET_PAGE_MASK,
5100 (unsigned long long)0);
5106 static int open_self_stat(void *cpu_env, int fd)
5108 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
5109 TaskState *ts = cpu->opaque;
5110 abi_ulong start_stack = ts->info->start_stack;
5113 for (i = 0; i < 44; i++) {
5121 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
5122 } else if (i == 1) {
5124 snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]);
5125 } else if (i == 27) {
5128 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
5130 /* for the rest, there is MasterCard */
5131 snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' ');
5135 if (write(fd, buf, len) != len) {
5143 static int open_self_auxv(void *cpu_env, int fd)
5145 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
5146 TaskState *ts = cpu->opaque;
5147 abi_ulong auxv = ts->info->saved_auxv;
5148 abi_ulong len = ts->info->auxv_len;
5152 * Auxiliary vector is stored in target process stack.
5153 * read in whole auxv vector and copy it to file
5155 ptr = lock_user(VERIFY_READ, auxv, len, 0);
5159 r = write(fd, ptr, len);
5166 lseek(fd, 0, SEEK_SET);
5167 unlock_user(ptr, auxv, len);
5173 static int is_proc_myself(const char *filename, const char *entry)
5175 if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
5176 filename += strlen("/proc/");
5177 if (!strncmp(filename, "self/", strlen("self/"))) {
5178 filename += strlen("self/");
5179 } else if (*filename >= '1' && *filename <= '9') {
5181 snprintf(myself, sizeof(myself), "%d/", getpid());
5182 if (!strncmp(filename, myself, strlen(myself))) {
5183 filename += strlen(myself);
5190 if (!strcmp(filename, entry)) {
5197 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
5198 static int is_proc(const char *filename, const char *entry)
5200 return strcmp(filename, entry) == 0;
5203 static int open_net_route(void *cpu_env, int fd)
5210 fp = fopen("/proc/net/route", "r");
5217 read = getline(&line, &len, fp);
5218 dprintf(fd, "%s", line);
5222 while ((read = getline(&line, &len, fp)) != -1) {
5224 uint32_t dest, gw, mask;
5225 unsigned int flags, refcnt, use, metric, mtu, window, irtt;
5226 sscanf(line, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
5227 iface, &dest, &gw, &flags, &refcnt, &use, &metric,
5228 &mask, &mtu, &window, &irtt);
5229 dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
5230 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
5231 metric, tswap32(mask), mtu, window, irtt);
5241 static int do_open(void *cpu_env, const char *pathname, int flags, mode_t mode)
5244 const char *filename;
5245 int (*fill)(void *cpu_env, int fd);
5246 int (*cmp)(const char *s1, const char *s2);
5248 const struct fake_open *fake_open;
5249 static const struct fake_open fakes[] = {
5250 { "maps", open_self_maps, is_proc_myself },
5251 { "stat", open_self_stat, is_proc_myself },
5252 { "auxv", open_self_auxv, is_proc_myself },
5253 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
5254 { "/proc/net/route", open_net_route, is_proc },
5256 { NULL, NULL, NULL }
5259 if (is_proc_myself(pathname, "exe")) {
5260 int execfd = qemu_getauxval(AT_EXECFD);
5261 return execfd ? execfd : get_errno(open(exec_path, flags, mode));
5264 for (fake_open = fakes; fake_open->filename; fake_open++) {
5265 if (fake_open->cmp(pathname, fake_open->filename)) {
5270 if (fake_open->filename) {
5272 char filename[PATH_MAX];
5275 /* create temporary file to map stat to */
5276 tmpdir = getenv("TMPDIR");
5279 snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
5280 fd = mkstemp(filename);
5286 if ((r = fake_open->fill(cpu_env, fd))) {
5290 lseek(fd, 0, SEEK_SET);
5295 return get_errno(open(path(pathname), flags, mode));
5298 /* do_syscall() should always have a single exit point at the end so
5299 that actions, such as logging of syscall results, can be performed.
5300 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
5301 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
5302 abi_long arg2, abi_long arg3, abi_long arg4,
5303 abi_long arg5, abi_long arg6, abi_long arg7,
5306 CPUState *cpu = ENV_GET_CPU(cpu_env);
5313 gemu_log("syscall %d", num);
5316 print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
5319 case TARGET_NR_exit:
5320 /* In old applications this may be used to implement _exit(2).
5321 However in threaded applictions it is used for thread termination,
5322 and _exit_group is used for application termination.
5323 Do thread termination if we have more then one thread. */
5324 /* FIXME: This probably breaks if a signal arrives. We should probably
5325 be disabling signals. */
5326 if (CPU_NEXT(first_cpu)) {
5330 /* Remove the CPU from the list. */
5331 QTAILQ_REMOVE(&cpus, cpu, node);
5334 if (ts->child_tidptr) {
5335 put_user_u32(0, ts->child_tidptr);
5336 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
5340 object_unref(OBJECT(cpu));
5347 gdb_exit(cpu_env, arg1);
5349 ret = 0; /* avoid warning */
5351 case TARGET_NR_read:
5355 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
5357 ret = get_errno(read(arg1, p, arg3));
5358 unlock_user(p, arg2, ret);
5361 case TARGET_NR_write:
5362 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
5364 ret = get_errno(write(arg1, p, arg3));
5365 unlock_user(p, arg2, 0);
5367 case TARGET_NR_open:
5368 if (!(p = lock_user_string(arg1)))
5370 ret = get_errno(do_open(cpu_env, p,
5371 target_to_host_bitmask(arg2, fcntl_flags_tbl),
5373 unlock_user(p, arg1, 0);
5375 #if defined(TARGET_NR_openat) && defined(__NR_openat)
5376 case TARGET_NR_openat:
5377 if (!(p = lock_user_string(arg2)))
5379 ret = get_errno(sys_openat(arg1,
5381 target_to_host_bitmask(arg3, fcntl_flags_tbl),
5383 unlock_user(p, arg2, 0);
5386 case TARGET_NR_close:
5387 ret = get_errno(close(arg1));
5392 case TARGET_NR_fork:
5393 ret = get_errno(do_fork(cpu_env, SIGCHLD, 0, 0, 0, 0));
5395 #ifdef TARGET_NR_waitpid
5396 case TARGET_NR_waitpid:
5399 ret = get_errno(waitpid(arg1, &status, arg3));
5400 if (!is_error(ret) && arg2 && ret
5401 && put_user_s32(host_to_target_waitstatus(status), arg2))
5406 #ifdef TARGET_NR_waitid
5407 case TARGET_NR_waitid:
5411 ret = get_errno(waitid(arg1, arg2, &info, arg4));
5412 if (!is_error(ret) && arg3 && info.si_pid != 0) {
5413 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
5415 host_to_target_siginfo(p, &info);
5416 unlock_user(p, arg3, sizeof(target_siginfo_t));
5421 #ifdef TARGET_NR_creat /* not on alpha */
5422 case TARGET_NR_creat:
5423 if (!(p = lock_user_string(arg1)))
5425 ret = get_errno(creat(p, arg2));
5426 unlock_user(p, arg1, 0);
5429 case TARGET_NR_link:
5432 p = lock_user_string(arg1);
5433 p2 = lock_user_string(arg2);
5435 ret = -TARGET_EFAULT;
5437 ret = get_errno(link(p, p2));
5438 unlock_user(p2, arg2, 0);
5439 unlock_user(p, arg1, 0);
5442 #if defined(TARGET_NR_linkat)
5443 case TARGET_NR_linkat:
5448 p = lock_user_string(arg2);
5449 p2 = lock_user_string(arg4);
5451 ret = -TARGET_EFAULT;
5453 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
5454 unlock_user(p, arg2, 0);
5455 unlock_user(p2, arg4, 0);
5459 case TARGET_NR_unlink:
5460 if (!(p = lock_user_string(arg1)))
5462 ret = get_errno(unlink(p));
5463 unlock_user(p, arg1, 0);
5465 #if defined(TARGET_NR_unlinkat)
5466 case TARGET_NR_unlinkat:
5467 if (!(p = lock_user_string(arg2)))
5469 ret = get_errno(unlinkat(arg1, p, arg3));
5470 unlock_user(p, arg2, 0);
5473 case TARGET_NR_execve:
5475 char **argp, **envp;
5478 abi_ulong guest_argp;
5479 abi_ulong guest_envp;
5486 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
5487 if (get_user_ual(addr, gp))
5495 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
5496 if (get_user_ual(addr, gp))
5503 argp = alloca((argc + 1) * sizeof(void *));
5504 envp = alloca((envc + 1) * sizeof(void *));
5506 for (gp = guest_argp, q = argp; gp;
5507 gp += sizeof(abi_ulong), q++) {
5508 if (get_user_ual(addr, gp))
5512 if (!(*q = lock_user_string(addr)))
5514 total_size += strlen(*q) + 1;
5518 for (gp = guest_envp, q = envp; gp;
5519 gp += sizeof(abi_ulong), q++) {
5520 if (get_user_ual(addr, gp))
5524 if (!(*q = lock_user_string(addr)))
5526 total_size += strlen(*q) + 1;
5530 /* This case will not be caught by the host's execve() if its
5531 page size is bigger than the target's. */
5532 if (total_size > MAX_ARG_PAGES * TARGET_PAGE_SIZE) {
5533 ret = -TARGET_E2BIG;
5536 if (!(p = lock_user_string(arg1)))
5538 ret = get_errno(execve(p, argp, envp));
5539 unlock_user(p, arg1, 0);
5544 ret = -TARGET_EFAULT;
5547 for (gp = guest_argp, q = argp; *q;
5548 gp += sizeof(abi_ulong), q++) {
5549 if (get_user_ual(addr, gp)
5552 unlock_user(*q, addr, 0);
5554 for (gp = guest_envp, q = envp; *q;
5555 gp += sizeof(abi_ulong), q++) {
5556 if (get_user_ual(addr, gp)
5559 unlock_user(*q, addr, 0);
5563 case TARGET_NR_chdir:
5564 if (!(p = lock_user_string(arg1)))
5566 ret = get_errno(chdir(p));
5567 unlock_user(p, arg1, 0);
5569 #ifdef TARGET_NR_time
5570 case TARGET_NR_time:
5573 ret = get_errno(time(&host_time));
5576 && put_user_sal(host_time, arg1))
5581 case TARGET_NR_mknod:
5582 if (!(p = lock_user_string(arg1)))
5584 ret = get_errno(mknod(p, arg2, arg3));
5585 unlock_user(p, arg1, 0);
5587 #if defined(TARGET_NR_mknodat)
5588 case TARGET_NR_mknodat:
5589 if (!(p = lock_user_string(arg2)))
5591 ret = get_errno(mknodat(arg1, p, arg3, arg4));
5592 unlock_user(p, arg2, 0);
5595 case TARGET_NR_chmod:
5596 if (!(p = lock_user_string(arg1)))
5598 ret = get_errno(chmod(p, arg2));
5599 unlock_user(p, arg1, 0);
5601 #ifdef TARGET_NR_break
5602 case TARGET_NR_break:
5605 #ifdef TARGET_NR_oldstat
5606 case TARGET_NR_oldstat:
5609 case TARGET_NR_lseek:
5610 ret = get_errno(lseek(arg1, arg2, arg3));
5612 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
5613 /* Alpha specific */
5614 case TARGET_NR_getxpid:
5615 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
5616 ret = get_errno(getpid());
5619 #ifdef TARGET_NR_getpid
5620 case TARGET_NR_getpid:
5621 ret = get_errno(getpid());
5624 case TARGET_NR_mount:
5626 /* need to look at the data field */
5628 p = lock_user_string(arg1);
5629 p2 = lock_user_string(arg2);
5630 p3 = lock_user_string(arg3);
5631 if (!p || !p2 || !p3)
5632 ret = -TARGET_EFAULT;
5634 /* FIXME - arg5 should be locked, but it isn't clear how to
5635 * do that since it's not guaranteed to be a NULL-terminated
5639 ret = get_errno(mount(p, p2, p3, (unsigned long)arg4, NULL));
5641 ret = get_errno(mount(p, p2, p3, (unsigned long)arg4, g2h(arg5)));
5643 unlock_user(p, arg1, 0);
5644 unlock_user(p2, arg2, 0);
5645 unlock_user(p3, arg3, 0);
5648 #ifdef TARGET_NR_umount
5649 case TARGET_NR_umount:
5650 if (!(p = lock_user_string(arg1)))
5652 ret = get_errno(umount(p));
5653 unlock_user(p, arg1, 0);
5656 #ifdef TARGET_NR_stime /* not on alpha */
5657 case TARGET_NR_stime:
5660 if (get_user_sal(host_time, arg1))
5662 ret = get_errno(stime(&host_time));
5666 case TARGET_NR_ptrace:
5668 #ifdef TARGET_NR_alarm /* not on alpha */
5669 case TARGET_NR_alarm:
5673 #ifdef TARGET_NR_oldfstat
5674 case TARGET_NR_oldfstat:
5677 #ifdef TARGET_NR_pause /* not on alpha */
5678 case TARGET_NR_pause:
5679 ret = get_errno(pause());
5682 #ifdef TARGET_NR_utime
5683 case TARGET_NR_utime:
5685 struct utimbuf tbuf, *host_tbuf;
5686 struct target_utimbuf *target_tbuf;
5688 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
5690 tbuf.actime = tswapal(target_tbuf->actime);
5691 tbuf.modtime = tswapal(target_tbuf->modtime);
5692 unlock_user_struct(target_tbuf, arg2, 0);
5697 if (!(p = lock_user_string(arg1)))
5699 ret = get_errno(utime(p, host_tbuf));
5700 unlock_user(p, arg1, 0);
5704 case TARGET_NR_utimes:
5706 struct timeval *tvp, tv[2];
5708 if (copy_from_user_timeval(&tv[0], arg2)
5709 || copy_from_user_timeval(&tv[1],
5710 arg2 + sizeof(struct target_timeval)))
5716 if (!(p = lock_user_string(arg1)))
5718 ret = get_errno(utimes(p, tvp));
5719 unlock_user(p, arg1, 0);
5722 #if defined(TARGET_NR_futimesat)
5723 case TARGET_NR_futimesat:
5725 struct timeval *tvp, tv[2];
5727 if (copy_from_user_timeval(&tv[0], arg3)
5728 || copy_from_user_timeval(&tv[1],
5729 arg3 + sizeof(struct target_timeval)))
5735 if (!(p = lock_user_string(arg2)))
5737 ret = get_errno(futimesat(arg1, path(p), tvp));
5738 unlock_user(p, arg2, 0);
5742 #ifdef TARGET_NR_stty
5743 case TARGET_NR_stty:
5746 #ifdef TARGET_NR_gtty
5747 case TARGET_NR_gtty:
5750 case TARGET_NR_access:
5751 if (!(p = lock_user_string(arg1)))
5753 ret = get_errno(access(path(p), arg2));
5754 unlock_user(p, arg1, 0);
5756 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
5757 case TARGET_NR_faccessat:
5758 if (!(p = lock_user_string(arg2)))
5760 ret = get_errno(faccessat(arg1, p, arg3, 0));
5761 unlock_user(p, arg2, 0);
5764 #ifdef TARGET_NR_nice /* not on alpha */
5765 case TARGET_NR_nice:
5766 ret = get_errno(nice(arg1));
5769 #ifdef TARGET_NR_ftime
5770 case TARGET_NR_ftime:
5773 case TARGET_NR_sync:
5777 case TARGET_NR_kill:
5778 ret = get_errno(kill(arg1, target_to_host_signal(arg2)));
5780 case TARGET_NR_rename:
5783 p = lock_user_string(arg1);
5784 p2 = lock_user_string(arg2);
5786 ret = -TARGET_EFAULT;
5788 ret = get_errno(rename(p, p2));
5789 unlock_user(p2, arg2, 0);
5790 unlock_user(p, arg1, 0);
5793 #if defined(TARGET_NR_renameat)
5794 case TARGET_NR_renameat:
5797 p = lock_user_string(arg2);
5798 p2 = lock_user_string(arg4);
5800 ret = -TARGET_EFAULT;
5802 ret = get_errno(renameat(arg1, p, arg3, p2));
5803 unlock_user(p2, arg4, 0);
5804 unlock_user(p, arg2, 0);
5808 case TARGET_NR_mkdir:
5809 if (!(p = lock_user_string(arg1)))
5811 ret = get_errno(mkdir(p, arg2));
5812 unlock_user(p, arg1, 0);
5814 #if defined(TARGET_NR_mkdirat)
5815 case TARGET_NR_mkdirat:
5816 if (!(p = lock_user_string(arg2)))
5818 ret = get_errno(mkdirat(arg1, p, arg3));
5819 unlock_user(p, arg2, 0);
5822 case TARGET_NR_rmdir:
5823 if (!(p = lock_user_string(arg1)))
5825 ret = get_errno(rmdir(p));
5826 unlock_user(p, arg1, 0);
5829 ret = get_errno(dup(arg1));
5831 case TARGET_NR_pipe:
5832 ret = do_pipe(cpu_env, arg1, 0, 0);
5834 #ifdef TARGET_NR_pipe2
5835 case TARGET_NR_pipe2:
5836 ret = do_pipe(cpu_env, arg1,
5837 target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
5840 case TARGET_NR_times:
5842 struct target_tms *tmsp;
5844 ret = get_errno(times(&tms));
5846 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
5849 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
5850 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
5851 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
5852 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
5855 ret = host_to_target_clock_t(ret);
5858 #ifdef TARGET_NR_prof
5859 case TARGET_NR_prof:
5862 #ifdef TARGET_NR_signal
5863 case TARGET_NR_signal:
5866 case TARGET_NR_acct:
5868 ret = get_errno(acct(NULL));
5870 if (!(p = lock_user_string(arg1)))
5872 ret = get_errno(acct(path(p)));
5873 unlock_user(p, arg1, 0);
5876 #ifdef TARGET_NR_umount2
5877 case TARGET_NR_umount2:
5878 if (!(p = lock_user_string(arg1)))
5880 ret = get_errno(umount2(p, arg2));
5881 unlock_user(p, arg1, 0);
5884 #ifdef TARGET_NR_lock
5885 case TARGET_NR_lock:
5888 case TARGET_NR_ioctl:
5889 ret = do_ioctl(arg1, arg2, arg3);
5891 case TARGET_NR_fcntl:
5892 ret = do_fcntl(arg1, arg2, arg3);
5894 #ifdef TARGET_NR_mpx
5898 case TARGET_NR_setpgid:
5899 ret = get_errno(setpgid(arg1, arg2));
5901 #ifdef TARGET_NR_ulimit
5902 case TARGET_NR_ulimit:
5905 #ifdef TARGET_NR_oldolduname
5906 case TARGET_NR_oldolduname:
5909 case TARGET_NR_umask:
5910 ret = get_errno(umask(arg1));
5912 case TARGET_NR_chroot:
5913 if (!(p = lock_user_string(arg1)))
5915 ret = get_errno(chroot(p));
5916 unlock_user(p, arg1, 0);
5918 case TARGET_NR_ustat:
5920 case TARGET_NR_dup2:
5921 ret = get_errno(dup2(arg1, arg2));
5923 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
5924 case TARGET_NR_dup3:
5925 ret = get_errno(dup3(arg1, arg2, arg3));
5928 #ifdef TARGET_NR_getppid /* not on alpha */
5929 case TARGET_NR_getppid:
5930 ret = get_errno(getppid());
5933 case TARGET_NR_getpgrp:
5934 ret = get_errno(getpgrp());
5936 case TARGET_NR_setsid:
5937 ret = get_errno(setsid());
5939 #ifdef TARGET_NR_sigaction
5940 case TARGET_NR_sigaction:
5942 #if defined(TARGET_ALPHA)
5943 struct target_sigaction act, oact, *pact = 0;
5944 struct target_old_sigaction *old_act;
5946 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
5948 act._sa_handler = old_act->_sa_handler;
5949 target_siginitset(&act.sa_mask, old_act->sa_mask);
5950 act.sa_flags = old_act->sa_flags;
5951 act.sa_restorer = 0;
5952 unlock_user_struct(old_act, arg2, 0);
5955 ret = get_errno(do_sigaction(arg1, pact, &oact));
5956 if (!is_error(ret) && arg3) {
5957 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
5959 old_act->_sa_handler = oact._sa_handler;
5960 old_act->sa_mask = oact.sa_mask.sig[0];
5961 old_act->sa_flags = oact.sa_flags;
5962 unlock_user_struct(old_act, arg3, 1);
5964 #elif defined(TARGET_MIPS)
5965 struct target_sigaction act, oact, *pact, *old_act;
5968 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
5970 act._sa_handler = old_act->_sa_handler;
5971 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
5972 act.sa_flags = old_act->sa_flags;
5973 unlock_user_struct(old_act, arg2, 0);
5979 ret = get_errno(do_sigaction(arg1, pact, &oact));
5981 if (!is_error(ret) && arg3) {
5982 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
5984 old_act->_sa_handler = oact._sa_handler;
5985 old_act->sa_flags = oact.sa_flags;
5986 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
5987 old_act->sa_mask.sig[1] = 0;
5988 old_act->sa_mask.sig[2] = 0;
5989 old_act->sa_mask.sig[3] = 0;
5990 unlock_user_struct(old_act, arg3, 1);
5993 struct target_old_sigaction *old_act;
5994 struct target_sigaction act, oact, *pact;
5996 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
5998 act._sa_handler = old_act->_sa_handler;
5999 target_siginitset(&act.sa_mask, old_act->sa_mask);
6000 act.sa_flags = old_act->sa_flags;
6001 act.sa_restorer = old_act->sa_restorer;
6002 unlock_user_struct(old_act, arg2, 0);
6007 ret = get_errno(do_sigaction(arg1, pact, &oact));
6008 if (!is_error(ret) && arg3) {
6009 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
6011 old_act->_sa_handler = oact._sa_handler;
6012 old_act->sa_mask = oact.sa_mask.sig[0];
6013 old_act->sa_flags = oact.sa_flags;
6014 old_act->sa_restorer = oact.sa_restorer;
6015 unlock_user_struct(old_act, arg3, 1);
6021 case TARGET_NR_rt_sigaction:
6023 #if defined(TARGET_ALPHA)
6024 struct target_sigaction act, oact, *pact = 0;
6025 struct target_rt_sigaction *rt_act;
6026 /* ??? arg4 == sizeof(sigset_t). */
6028 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
6030 act._sa_handler = rt_act->_sa_handler;
6031 act.sa_mask = rt_act->sa_mask;
6032 act.sa_flags = rt_act->sa_flags;
6033 act.sa_restorer = arg5;
6034 unlock_user_struct(rt_act, arg2, 0);
6037 ret = get_errno(do_sigaction(arg1, pact, &oact));
6038 if (!is_error(ret) && arg3) {
6039 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
6041 rt_act->_sa_handler = oact._sa_handler;
6042 rt_act->sa_mask = oact.sa_mask;
6043 rt_act->sa_flags = oact.sa_flags;
6044 unlock_user_struct(rt_act, arg3, 1);
6047 struct target_sigaction *act;
6048 struct target_sigaction *oact;
6051 if (!lock_user_struct(VERIFY_READ, act, arg2, 1))
6056 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
6057 ret = -TARGET_EFAULT;
6058 goto rt_sigaction_fail;
6062 ret = get_errno(do_sigaction(arg1, act, oact));
6065 unlock_user_struct(act, arg2, 0);
6067 unlock_user_struct(oact, arg3, 1);
6071 #ifdef TARGET_NR_sgetmask /* not on alpha */
6072 case TARGET_NR_sgetmask:
6075 abi_ulong target_set;
6076 do_sigprocmask(0, NULL, &cur_set);
6077 host_to_target_old_sigset(&target_set, &cur_set);
6082 #ifdef TARGET_NR_ssetmask /* not on alpha */
6083 case TARGET_NR_ssetmask:
6085 sigset_t set, oset, cur_set;
6086 abi_ulong target_set = arg1;
6087 do_sigprocmask(0, NULL, &cur_set);
6088 target_to_host_old_sigset(&set, &target_set);
6089 sigorset(&set, &set, &cur_set);
6090 do_sigprocmask(SIG_SETMASK, &set, &oset);
6091 host_to_target_old_sigset(&target_set, &oset);
6096 #ifdef TARGET_NR_sigprocmask
6097 case TARGET_NR_sigprocmask:
6099 #if defined(TARGET_ALPHA)
6100 sigset_t set, oldset;
6105 case TARGET_SIG_BLOCK:
6108 case TARGET_SIG_UNBLOCK:
6111 case TARGET_SIG_SETMASK:
6115 ret = -TARGET_EINVAL;
6119 target_to_host_old_sigset(&set, &mask);
6121 ret = get_errno(do_sigprocmask(how, &set, &oldset));
6122 if (!is_error(ret)) {
6123 host_to_target_old_sigset(&mask, &oldset);
6125 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
6128 sigset_t set, oldset, *set_ptr;
6133 case TARGET_SIG_BLOCK:
6136 case TARGET_SIG_UNBLOCK:
6139 case TARGET_SIG_SETMASK:
6143 ret = -TARGET_EINVAL;
6146 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
6148 target_to_host_old_sigset(&set, p);
6149 unlock_user(p, arg2, 0);
6155 ret = get_errno(do_sigprocmask(how, set_ptr, &oldset));
6156 if (!is_error(ret) && arg3) {
6157 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
6159 host_to_target_old_sigset(p, &oldset);
6160 unlock_user(p, arg3, sizeof(target_sigset_t));
6166 case TARGET_NR_rt_sigprocmask:
6169 sigset_t set, oldset, *set_ptr;
6173 case TARGET_SIG_BLOCK:
6176 case TARGET_SIG_UNBLOCK:
6179 case TARGET_SIG_SETMASK:
6183 ret = -TARGET_EINVAL;
6186 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
6188 target_to_host_sigset(&set, p);
6189 unlock_user(p, arg2, 0);
6195 ret = get_errno(do_sigprocmask(how, set_ptr, &oldset));
6196 if (!is_error(ret) && arg3) {
6197 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
6199 host_to_target_sigset(p, &oldset);
6200 unlock_user(p, arg3, sizeof(target_sigset_t));
6204 #ifdef TARGET_NR_sigpending
6205 case TARGET_NR_sigpending:
6208 ret = get_errno(sigpending(&set));
6209 if (!is_error(ret)) {
6210 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
6212 host_to_target_old_sigset(p, &set);
6213 unlock_user(p, arg1, sizeof(target_sigset_t));
6218 case TARGET_NR_rt_sigpending:
6221 ret = get_errno(sigpending(&set));
6222 if (!is_error(ret)) {
6223 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
6225 host_to_target_sigset(p, &set);
6226 unlock_user(p, arg1, sizeof(target_sigset_t));
6230 #ifdef TARGET_NR_sigsuspend
6231 case TARGET_NR_sigsuspend:
6234 #if defined(TARGET_ALPHA)
6235 abi_ulong mask = arg1;
6236 target_to_host_old_sigset(&set, &mask);
6238 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
6240 target_to_host_old_sigset(&set, p);
6241 unlock_user(p, arg1, 0);
6243 ret = get_errno(sigsuspend(&set));
6247 case TARGET_NR_rt_sigsuspend:
6250 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
6252 target_to_host_sigset(&set, p);
6253 unlock_user(p, arg1, 0);
6254 ret = get_errno(sigsuspend(&set));
6257 case TARGET_NR_rt_sigtimedwait:
6260 struct timespec uts, *puts;
6263 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
6265 target_to_host_sigset(&set, p);
6266 unlock_user(p, arg1, 0);
6269 target_to_host_timespec(puts, arg3);
6273 ret = get_errno(sigtimedwait(&set, &uinfo, puts));
6274 if (!is_error(ret)) {
6276 p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
6281 host_to_target_siginfo(p, &uinfo);
6282 unlock_user(p, arg2, sizeof(target_siginfo_t));
6284 ret = host_to_target_signal(ret);
6288 case TARGET_NR_rt_sigqueueinfo:
6291 if (!(p = lock_user(VERIFY_READ, arg3, sizeof(target_sigset_t), 1)))
6293 target_to_host_siginfo(&uinfo, p);
6294 unlock_user(p, arg1, 0);
6295 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
6298 #ifdef TARGET_NR_sigreturn
6299 case TARGET_NR_sigreturn:
6300 /* NOTE: ret is eax, so not transcoding must be done */
6301 ret = do_sigreturn(cpu_env);
6304 case TARGET_NR_rt_sigreturn:
6305 /* NOTE: ret is eax, so not transcoding must be done */
6306 ret = do_rt_sigreturn(cpu_env);
6308 case TARGET_NR_sethostname:
6309 if (!(p = lock_user_string(arg1)))
6311 ret = get_errno(sethostname(p, arg2));
6312 unlock_user(p, arg1, 0);
6314 case TARGET_NR_setrlimit:
6316 int resource = target_to_host_resource(arg1);
6317 struct target_rlimit *target_rlim;
6319 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
6321 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
6322 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
6323 unlock_user_struct(target_rlim, arg2, 0);
6324 ret = get_errno(setrlimit(resource, &rlim));
6327 case TARGET_NR_getrlimit:
6329 int resource = target_to_host_resource(arg1);
6330 struct target_rlimit *target_rlim;
6333 ret = get_errno(getrlimit(resource, &rlim));
6334 if (!is_error(ret)) {
6335 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
6337 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
6338 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
6339 unlock_user_struct(target_rlim, arg2, 1);
6343 case TARGET_NR_getrusage:
6345 struct rusage rusage;
6346 ret = get_errno(getrusage(arg1, &rusage));
6347 if (!is_error(ret)) {
6348 host_to_target_rusage(arg2, &rusage);
6352 case TARGET_NR_gettimeofday:
6355 ret = get_errno(gettimeofday(&tv, NULL));
6356 if (!is_error(ret)) {
6357 if (copy_to_user_timeval(arg1, &tv))
6362 case TARGET_NR_settimeofday:
6365 if (copy_from_user_timeval(&tv, arg1))
6367 ret = get_errno(settimeofday(&tv, NULL));
6370 #if defined(TARGET_NR_select)
6371 case TARGET_NR_select:
6372 #if defined(TARGET_S390X) || defined(TARGET_ALPHA)
6373 ret = do_select(arg1, arg2, arg3, arg4, arg5);
6376 struct target_sel_arg_struct *sel;
6377 abi_ulong inp, outp, exp, tvp;
6380 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1))
6382 nsel = tswapal(sel->n);
6383 inp = tswapal(sel->inp);
6384 outp = tswapal(sel->outp);
6385 exp = tswapal(sel->exp);
6386 tvp = tswapal(sel->tvp);
6387 unlock_user_struct(sel, arg1, 0);
6388 ret = do_select(nsel, inp, outp, exp, tvp);
6393 #ifdef TARGET_NR_pselect6
6394 case TARGET_NR_pselect6:
6396 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
6397 fd_set rfds, wfds, efds;
6398 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
6399 struct timespec ts, *ts_ptr;
6402 * The 6th arg is actually two args smashed together,
6403 * so we cannot use the C library.
6411 abi_ulong arg_sigset, arg_sigsize, *arg7;
6412 target_sigset_t *target_sigset;
6420 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
6424 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
6428 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
6434 * This takes a timespec, and not a timeval, so we cannot
6435 * use the do_select() helper ...
6438 if (target_to_host_timespec(&ts, ts_addr)) {
6446 /* Extract the two packed args for the sigset */
6449 sig.size = _NSIG / 8;
6451 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
6455 arg_sigset = tswapal(arg7[0]);
6456 arg_sigsize = tswapal(arg7[1]);
6457 unlock_user(arg7, arg6, 0);
6461 if (arg_sigsize != sizeof(*target_sigset)) {
6462 /* Like the kernel, we enforce correct size sigsets */
6463 ret = -TARGET_EINVAL;
6466 target_sigset = lock_user(VERIFY_READ, arg_sigset,
6467 sizeof(*target_sigset), 1);
6468 if (!target_sigset) {
6471 target_to_host_sigset(&set, target_sigset);
6472 unlock_user(target_sigset, arg_sigset, 0);
6480 ret = get_errno(sys_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
6483 if (!is_error(ret)) {
6484 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
6486 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
6488 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
6491 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
6497 case TARGET_NR_symlink:
6500 p = lock_user_string(arg1);
6501 p2 = lock_user_string(arg2);
6503 ret = -TARGET_EFAULT;
6505 ret = get_errno(symlink(p, p2));
6506 unlock_user(p2, arg2, 0);
6507 unlock_user(p, arg1, 0);
6510 #if defined(TARGET_NR_symlinkat)
6511 case TARGET_NR_symlinkat:
6514 p = lock_user_string(arg1);
6515 p2 = lock_user_string(arg3);
6517 ret = -TARGET_EFAULT;
6519 ret = get_errno(symlinkat(p, arg2, p2));
6520 unlock_user(p2, arg3, 0);
6521 unlock_user(p, arg1, 0);
6525 #ifdef TARGET_NR_oldlstat
6526 case TARGET_NR_oldlstat:
6529 case TARGET_NR_readlink:
6532 p = lock_user_string(arg1);
6533 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
6535 ret = -TARGET_EFAULT;
6536 } else if (is_proc_myself((const char *)p, "exe")) {
6537 char real[PATH_MAX], *temp;
6538 temp = realpath(exec_path, real);
6539 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
6540 snprintf((char *)p2, arg3, "%s", real);
6542 ret = get_errno(readlink(path(p), p2, arg3));
6544 unlock_user(p2, arg2, ret);
6545 unlock_user(p, arg1, 0);
6548 #if defined(TARGET_NR_readlinkat)
6549 case TARGET_NR_readlinkat:
6552 p = lock_user_string(arg2);
6553 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
6555 ret = -TARGET_EFAULT;
6556 } else if (is_proc_myself((const char *)p, "exe")) {
6557 char real[PATH_MAX], *temp;
6558 temp = realpath(exec_path, real);
6559 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
6560 snprintf((char *)p2, arg4, "%s", real);
6562 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
6564 unlock_user(p2, arg3, ret);
6565 unlock_user(p, arg2, 0);
6569 #ifdef TARGET_NR_uselib
6570 case TARGET_NR_uselib:
6573 #ifdef TARGET_NR_swapon
6574 case TARGET_NR_swapon:
6575 if (!(p = lock_user_string(arg1)))
6577 ret = get_errno(swapon(p, arg2));
6578 unlock_user(p, arg1, 0);
6581 case TARGET_NR_reboot:
6582 if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
6583 /* arg4 must be ignored in all other cases */
6584 p = lock_user_string(arg4);
6588 ret = get_errno(reboot(arg1, arg2, arg3, p));
6589 unlock_user(p, arg4, 0);
6591 ret = get_errno(reboot(arg1, arg2, arg3, NULL));
6594 #ifdef TARGET_NR_readdir
6595 case TARGET_NR_readdir:
6598 #ifdef TARGET_NR_mmap
6599 case TARGET_NR_mmap:
6600 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
6601 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
6602 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
6603 || defined(TARGET_S390X)
6606 abi_ulong v1, v2, v3, v4, v5, v6;
6607 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
6615 unlock_user(v, arg1, 0);
6616 ret = get_errno(target_mmap(v1, v2, v3,
6617 target_to_host_bitmask(v4, mmap_flags_tbl),
6621 ret = get_errno(target_mmap(arg1, arg2, arg3,
6622 target_to_host_bitmask(arg4, mmap_flags_tbl),
6628 #ifdef TARGET_NR_mmap2
6629 case TARGET_NR_mmap2:
6631 #define MMAP_SHIFT 12
6633 ret = get_errno(target_mmap(arg1, arg2, arg3,
6634 target_to_host_bitmask(arg4, mmap_flags_tbl),
6636 arg6 << MMAP_SHIFT));
6639 case TARGET_NR_munmap:
6640 ret = get_errno(target_munmap(arg1, arg2));
6642 case TARGET_NR_mprotect:
6644 TaskState *ts = cpu->opaque;
6645 /* Special hack to detect libc making the stack executable. */
6646 if ((arg3 & PROT_GROWSDOWN)
6647 && arg1 >= ts->info->stack_limit
6648 && arg1 <= ts->info->start_stack) {
6649 arg3 &= ~PROT_GROWSDOWN;
6650 arg2 = arg2 + arg1 - ts->info->stack_limit;
6651 arg1 = ts->info->stack_limit;
6654 ret = get_errno(target_mprotect(arg1, arg2, arg3));
6656 #ifdef TARGET_NR_mremap
6657 case TARGET_NR_mremap:
6658 ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
6661 /* ??? msync/mlock/munlock are broken for softmmu. */
6662 #ifdef TARGET_NR_msync
6663 case TARGET_NR_msync:
6664 ret = get_errno(msync(g2h(arg1), arg2, arg3));
6667 #ifdef TARGET_NR_mlock
6668 case TARGET_NR_mlock:
6669 ret = get_errno(mlock(g2h(arg1), arg2));
6672 #ifdef TARGET_NR_munlock
6673 case TARGET_NR_munlock:
6674 ret = get_errno(munlock(g2h(arg1), arg2));
6677 #ifdef TARGET_NR_mlockall
6678 case TARGET_NR_mlockall:
6679 ret = get_errno(mlockall(arg1));
6682 #ifdef TARGET_NR_munlockall
6683 case TARGET_NR_munlockall:
6684 ret = get_errno(munlockall());
6687 case TARGET_NR_truncate:
6688 if (!(p = lock_user_string(arg1)))
6690 ret = get_errno(truncate(p, arg2));
6691 unlock_user(p, arg1, 0);
6693 case TARGET_NR_ftruncate:
6694 ret = get_errno(ftruncate(arg1, arg2));
6696 case TARGET_NR_fchmod:
6697 ret = get_errno(fchmod(arg1, arg2));
6699 #if defined(TARGET_NR_fchmodat)
6700 case TARGET_NR_fchmodat:
6701 if (!(p = lock_user_string(arg2)))
6703 ret = get_errno(fchmodat(arg1, p, arg3, 0));
6704 unlock_user(p, arg2, 0);
6707 case TARGET_NR_getpriority:
6708 /* Note that negative values are valid for getpriority, so we must
6709 differentiate based on errno settings. */
6711 ret = getpriority(arg1, arg2);
6712 if (ret == -1 && errno != 0) {
6713 ret = -host_to_target_errno(errno);
6717 /* Return value is the unbiased priority. Signal no error. */
6718 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
6720 /* Return value is a biased priority to avoid negative numbers. */
6724 case TARGET_NR_setpriority:
6725 ret = get_errno(setpriority(arg1, arg2, arg3));
6727 #ifdef TARGET_NR_profil
6728 case TARGET_NR_profil:
6731 case TARGET_NR_statfs:
6732 if (!(p = lock_user_string(arg1)))
6734 ret = get_errno(statfs(path(p), &stfs));
6735 unlock_user(p, arg1, 0);
6737 if (!is_error(ret)) {
6738 struct target_statfs *target_stfs;
6740 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
6742 __put_user(stfs.f_type, &target_stfs->f_type);
6743 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
6744 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
6745 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
6746 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
6747 __put_user(stfs.f_files, &target_stfs->f_files);
6748 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
6749 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
6750 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
6751 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
6752 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
6753 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
6754 unlock_user_struct(target_stfs, arg2, 1);
6757 case TARGET_NR_fstatfs:
6758 ret = get_errno(fstatfs(arg1, &stfs));
6759 goto convert_statfs;
6760 #ifdef TARGET_NR_statfs64
6761 case TARGET_NR_statfs64:
6762 if (!(p = lock_user_string(arg1)))
6764 ret = get_errno(statfs(path(p), &stfs));
6765 unlock_user(p, arg1, 0);
6767 if (!is_error(ret)) {
6768 struct target_statfs64 *target_stfs;
6770 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
6772 __put_user(stfs.f_type, &target_stfs->f_type);
6773 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
6774 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
6775 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
6776 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
6777 __put_user(stfs.f_files, &target_stfs->f_files);
6778 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
6779 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
6780 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
6781 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
6782 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
6783 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
6784 unlock_user_struct(target_stfs, arg3, 1);
6787 case TARGET_NR_fstatfs64:
6788 ret = get_errno(fstatfs(arg1, &stfs));
6789 goto convert_statfs64;
6791 #ifdef TARGET_NR_ioperm
6792 case TARGET_NR_ioperm:
6795 #ifdef TARGET_NR_socketcall
6796 case TARGET_NR_socketcall:
6797 ret = do_socketcall(arg1, arg2);
6800 #ifdef TARGET_NR_accept
6801 case TARGET_NR_accept:
6802 ret = do_accept4(arg1, arg2, arg3, 0);
6805 #ifdef TARGET_NR_accept4
6806 case TARGET_NR_accept4:
6807 #ifdef CONFIG_ACCEPT4
6808 ret = do_accept4(arg1, arg2, arg3, arg4);
6814 #ifdef TARGET_NR_bind
6815 case TARGET_NR_bind:
6816 ret = do_bind(arg1, arg2, arg3);
6819 #ifdef TARGET_NR_connect
6820 case TARGET_NR_connect:
6821 ret = do_connect(arg1, arg2, arg3);
6824 #ifdef TARGET_NR_getpeername
6825 case TARGET_NR_getpeername:
6826 ret = do_getpeername(arg1, arg2, arg3);
6829 #ifdef TARGET_NR_getsockname
6830 case TARGET_NR_getsockname:
6831 ret = do_getsockname(arg1, arg2, arg3);
6834 #ifdef TARGET_NR_getsockopt
6835 case TARGET_NR_getsockopt:
6836 ret = do_getsockopt(arg1, arg2, arg3, arg4, arg5);
6839 #ifdef TARGET_NR_listen
6840 case TARGET_NR_listen:
6841 ret = get_errno(listen(arg1, arg2));
6844 #ifdef TARGET_NR_recv
6845 case TARGET_NR_recv:
6846 ret = do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
6849 #ifdef TARGET_NR_recvfrom
6850 case TARGET_NR_recvfrom:
6851 ret = do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
6854 #ifdef TARGET_NR_recvmsg
6855 case TARGET_NR_recvmsg:
6856 ret = do_sendrecvmsg(arg1, arg2, arg3, 0);
6859 #ifdef TARGET_NR_send
6860 case TARGET_NR_send:
6861 ret = do_sendto(arg1, arg2, arg3, arg4, 0, 0);
6864 #ifdef TARGET_NR_sendmsg
6865 case TARGET_NR_sendmsg:
6866 ret = do_sendrecvmsg(arg1, arg2, arg3, 1);
6869 #ifdef TARGET_NR_sendmmsg
6870 case TARGET_NR_sendmmsg:
6871 ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
6873 case TARGET_NR_recvmmsg:
6874 ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
6877 #ifdef TARGET_NR_sendto
6878 case TARGET_NR_sendto:
6879 ret = do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
6882 #ifdef TARGET_NR_shutdown
6883 case TARGET_NR_shutdown:
6884 ret = get_errno(shutdown(arg1, arg2));
6887 #ifdef TARGET_NR_socket
6888 case TARGET_NR_socket:
6889 ret = do_socket(arg1, arg2, arg3);
6892 #ifdef TARGET_NR_socketpair
6893 case TARGET_NR_socketpair:
6894 ret = do_socketpair(arg1, arg2, arg3, arg4);
6897 #ifdef TARGET_NR_setsockopt
6898 case TARGET_NR_setsockopt:
6899 ret = do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
6903 case TARGET_NR_syslog:
6904 if (!(p = lock_user_string(arg2)))
6906 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
6907 unlock_user(p, arg2, 0);
6910 case TARGET_NR_setitimer:
6912 struct itimerval value, ovalue, *pvalue;
6916 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
6917 || copy_from_user_timeval(&pvalue->it_value,
6918 arg2 + sizeof(struct target_timeval)))
6923 ret = get_errno(setitimer(arg1, pvalue, &ovalue));
6924 if (!is_error(ret) && arg3) {
6925 if (copy_to_user_timeval(arg3,
6926 &ovalue.it_interval)
6927 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
6933 case TARGET_NR_getitimer:
6935 struct itimerval value;
6937 ret = get_errno(getitimer(arg1, &value));
6938 if (!is_error(ret) && arg2) {
6939 if (copy_to_user_timeval(arg2,
6941 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
6947 case TARGET_NR_stat:
6948 if (!(p = lock_user_string(arg1)))
6950 ret = get_errno(stat(path(p), &st));
6951 unlock_user(p, arg1, 0);
6953 case TARGET_NR_lstat:
6954 if (!(p = lock_user_string(arg1)))
6956 ret = get_errno(lstat(path(p), &st));
6957 unlock_user(p, arg1, 0);
6959 case TARGET_NR_fstat:
6961 ret = get_errno(fstat(arg1, &st));
6963 if (!is_error(ret)) {
6964 struct target_stat *target_st;
6966 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
6968 memset(target_st, 0, sizeof(*target_st));
6969 __put_user(st.st_dev, &target_st->st_dev);
6970 __put_user(st.st_ino, &target_st->st_ino);
6971 __put_user(st.st_mode, &target_st->st_mode);
6972 __put_user(st.st_uid, &target_st->st_uid);
6973 __put_user(st.st_gid, &target_st->st_gid);
6974 __put_user(st.st_nlink, &target_st->st_nlink);
6975 __put_user(st.st_rdev, &target_st->st_rdev);
6976 __put_user(st.st_size, &target_st->st_size);
6977 __put_user(st.st_blksize, &target_st->st_blksize);
6978 __put_user(st.st_blocks, &target_st->st_blocks);
6979 __put_user(st.st_atime, &target_st->target_st_atime);
6980 __put_user(st.st_mtime, &target_st->target_st_mtime);
6981 __put_user(st.st_ctime, &target_st->target_st_ctime);
6982 unlock_user_struct(target_st, arg2, 1);
6986 #ifdef TARGET_NR_olduname
6987 case TARGET_NR_olduname:
6990 #ifdef TARGET_NR_iopl
6991 case TARGET_NR_iopl:
6994 case TARGET_NR_vhangup:
6995 ret = get_errno(vhangup());
6997 #ifdef TARGET_NR_idle
6998 case TARGET_NR_idle:
7001 #ifdef TARGET_NR_syscall
7002 case TARGET_NR_syscall:
7003 ret = do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
7004 arg6, arg7, arg8, 0);
7007 case TARGET_NR_wait4:
7010 abi_long status_ptr = arg2;
7011 struct rusage rusage, *rusage_ptr;
7012 abi_ulong target_rusage = arg4;
7014 rusage_ptr = &rusage;
7017 ret = get_errno(wait4(arg1, &status, arg3, rusage_ptr));
7018 if (!is_error(ret)) {
7019 if (status_ptr && ret) {
7020 status = host_to_target_waitstatus(status);
7021 if (put_user_s32(status, status_ptr))
7025 host_to_target_rusage(target_rusage, &rusage);
7029 #ifdef TARGET_NR_swapoff
7030 case TARGET_NR_swapoff:
7031 if (!(p = lock_user_string(arg1)))
7033 ret = get_errno(swapoff(p));
7034 unlock_user(p, arg1, 0);
7037 case TARGET_NR_sysinfo:
7039 struct target_sysinfo *target_value;
7040 struct sysinfo value;
7041 ret = get_errno(sysinfo(&value));
7042 if (!is_error(ret) && arg1)
7044 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
7046 __put_user(value.uptime, &target_value->uptime);
7047 __put_user(value.loads[0], &target_value->loads[0]);
7048 __put_user(value.loads[1], &target_value->loads[1]);
7049 __put_user(value.loads[2], &target_value->loads[2]);
7050 __put_user(value.totalram, &target_value->totalram);
7051 __put_user(value.freeram, &target_value->freeram);
7052 __put_user(value.sharedram, &target_value->sharedram);
7053 __put_user(value.bufferram, &target_value->bufferram);
7054 __put_user(value.totalswap, &target_value->totalswap);
7055 __put_user(value.freeswap, &target_value->freeswap);
7056 __put_user(value.procs, &target_value->procs);
7057 __put_user(value.totalhigh, &target_value->totalhigh);
7058 __put_user(value.freehigh, &target_value->freehigh);
7059 __put_user(value.mem_unit, &target_value->mem_unit);
7060 unlock_user_struct(target_value, arg1, 1);
7064 #ifdef TARGET_NR_ipc
7066 ret = do_ipc(arg1, arg2, arg3, arg4, arg5, arg6);
7069 #ifdef TARGET_NR_semget
7070 case TARGET_NR_semget:
7071 ret = get_errno(semget(arg1, arg2, arg3));
7074 #ifdef TARGET_NR_semop
7075 case TARGET_NR_semop:
7076 ret = do_semop(arg1, arg2, arg3);
7079 #ifdef TARGET_NR_semctl
7080 case TARGET_NR_semctl:
7081 ret = do_semctl(arg1, arg2, arg3, (union target_semun)(abi_ulong)arg4);
7084 #ifdef TARGET_NR_msgctl
7085 case TARGET_NR_msgctl:
7086 ret = do_msgctl(arg1, arg2, arg3);
7089 #ifdef TARGET_NR_msgget
7090 case TARGET_NR_msgget:
7091 ret = get_errno(msgget(arg1, arg2));
7094 #ifdef TARGET_NR_msgrcv
7095 case TARGET_NR_msgrcv:
7096 ret = do_msgrcv(arg1, arg2, arg3, arg4, arg5);
7099 #ifdef TARGET_NR_msgsnd
7100 case TARGET_NR_msgsnd:
7101 ret = do_msgsnd(arg1, arg2, arg3, arg4);
7104 #ifdef TARGET_NR_shmget
7105 case TARGET_NR_shmget:
7106 ret = get_errno(shmget(arg1, arg2, arg3));
7109 #ifdef TARGET_NR_shmctl
7110 case TARGET_NR_shmctl:
7111 ret = do_shmctl(arg1, arg2, arg3);
7114 #ifdef TARGET_NR_shmat
7115 case TARGET_NR_shmat:
7116 ret = do_shmat(arg1, arg2, arg3);
7119 #ifdef TARGET_NR_shmdt
7120 case TARGET_NR_shmdt:
7121 ret = do_shmdt(arg1);
7124 case TARGET_NR_fsync:
7125 ret = get_errno(fsync(arg1));
7127 case TARGET_NR_clone:
7128 /* Linux manages to have three different orderings for its
7129 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
7130 * match the kernel's CONFIG_CLONE_* settings.
7131 * Microblaze is further special in that it uses a sixth
7132 * implicit argument to clone for the TLS pointer.
7134 #if defined(TARGET_MICROBLAZE)
7135 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
7136 #elif defined(TARGET_CLONE_BACKWARDS)
7137 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
7138 #elif defined(TARGET_CLONE_BACKWARDS2)
7139 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
7141 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
7144 #ifdef __NR_exit_group
7145 /* new thread calls */
7146 case TARGET_NR_exit_group:
7150 gdb_exit(cpu_env, arg1);
7151 ret = get_errno(exit_group(arg1));
7154 case TARGET_NR_setdomainname:
7155 if (!(p = lock_user_string(arg1)))
7157 ret = get_errno(setdomainname(p, arg2));
7158 unlock_user(p, arg1, 0);
7160 case TARGET_NR_uname:
7161 /* no need to transcode because we use the linux syscall */
7163 struct new_utsname * buf;
7165 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
7167 ret = get_errno(sys_uname(buf));
7168 if (!is_error(ret)) {
7169 /* Overrite the native machine name with whatever is being
7171 strcpy (buf->machine, cpu_to_uname_machine(cpu_env));
7172 /* Allow the user to override the reported release. */
7173 if (qemu_uname_release && *qemu_uname_release)
7174 strcpy (buf->release, qemu_uname_release);
7176 unlock_user_struct(buf, arg1, 1);
7180 case TARGET_NR_modify_ldt:
7181 ret = do_modify_ldt(cpu_env, arg1, arg2, arg3);
7183 #if !defined(TARGET_X86_64)
7184 case TARGET_NR_vm86old:
7186 case TARGET_NR_vm86:
7187 ret = do_vm86(cpu_env, arg1, arg2);
7191 case TARGET_NR_adjtimex:
7193 #ifdef TARGET_NR_create_module
7194 case TARGET_NR_create_module:
7196 case TARGET_NR_init_module:
7197 case TARGET_NR_delete_module:
7198 #ifdef TARGET_NR_get_kernel_syms
7199 case TARGET_NR_get_kernel_syms:
7202 case TARGET_NR_quotactl:
7204 case TARGET_NR_getpgid:
7205 ret = get_errno(getpgid(arg1));
7207 case TARGET_NR_fchdir:
7208 ret = get_errno(fchdir(arg1));
7210 #ifdef TARGET_NR_bdflush /* not on x86_64 */
7211 case TARGET_NR_bdflush:
7214 #ifdef TARGET_NR_sysfs
7215 case TARGET_NR_sysfs:
7218 case TARGET_NR_personality:
7219 ret = get_errno(personality(arg1));
7221 #ifdef TARGET_NR_afs_syscall
7222 case TARGET_NR_afs_syscall:
7225 #ifdef TARGET_NR__llseek /* Not on alpha */
7226 case TARGET_NR__llseek:
7229 #if !defined(__NR_llseek)
7230 res = lseek(arg1, ((uint64_t)arg2 << 32) | arg3, arg5);
7232 ret = get_errno(res);
7237 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
7239 if ((ret == 0) && put_user_s64(res, arg4)) {
7245 case TARGET_NR_getdents:
7246 #ifdef __NR_getdents
7247 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
7249 struct target_dirent *target_dirp;
7250 struct linux_dirent *dirp;
7251 abi_long count = arg3;
7253 dirp = malloc(count);
7255 ret = -TARGET_ENOMEM;
7259 ret = get_errno(sys_getdents(arg1, dirp, count));
7260 if (!is_error(ret)) {
7261 struct linux_dirent *de;
7262 struct target_dirent *tde;
7264 int reclen, treclen;
7265 int count1, tnamelen;
7269 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
7273 reclen = de->d_reclen;
7274 tnamelen = reclen - offsetof(struct linux_dirent, d_name);
7275 assert(tnamelen >= 0);
7276 treclen = tnamelen + offsetof(struct target_dirent, d_name);
7277 assert(count1 + treclen <= count);
7278 tde->d_reclen = tswap16(treclen);
7279 tde->d_ino = tswapal(de->d_ino);
7280 tde->d_off = tswapal(de->d_off);
7281 memcpy(tde->d_name, de->d_name, tnamelen);
7282 de = (struct linux_dirent *)((char *)de + reclen);
7284 tde = (struct target_dirent *)((char *)tde + treclen);
7288 unlock_user(target_dirp, arg2, ret);
7294 struct linux_dirent *dirp;
7295 abi_long count = arg3;
7297 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
7299 ret = get_errno(sys_getdents(arg1, dirp, count));
7300 if (!is_error(ret)) {
7301 struct linux_dirent *de;
7306 reclen = de->d_reclen;
7309 de->d_reclen = tswap16(reclen);
7310 tswapls(&de->d_ino);
7311 tswapls(&de->d_off);
7312 de = (struct linux_dirent *)((char *)de + reclen);
7316 unlock_user(dirp, arg2, ret);
7320 /* Implement getdents in terms of getdents64 */
7322 struct linux_dirent64 *dirp;
7323 abi_long count = arg3;
7325 dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
7329 ret = get_errno(sys_getdents64(arg1, dirp, count));
7330 if (!is_error(ret)) {
7331 /* Convert the dirent64 structs to target dirent. We do this
7332 * in-place, since we can guarantee that a target_dirent is no
7333 * larger than a dirent64; however this means we have to be
7334 * careful to read everything before writing in the new format.
7336 struct linux_dirent64 *de;
7337 struct target_dirent *tde;
7342 tde = (struct target_dirent *)dirp;
7344 int namelen, treclen;
7345 int reclen = de->d_reclen;
7346 uint64_t ino = de->d_ino;
7347 int64_t off = de->d_off;
7348 uint8_t type = de->d_type;
7350 namelen = strlen(de->d_name);
7351 treclen = offsetof(struct target_dirent, d_name)
7353 treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
7355 memmove(tde->d_name, de->d_name, namelen + 1);
7356 tde->d_ino = tswapal(ino);
7357 tde->d_off = tswapal(off);
7358 tde->d_reclen = tswap16(treclen);
7359 /* The target_dirent type is in what was formerly a padding
7360 * byte at the end of the structure:
7362 *(((char *)tde) + treclen - 1) = type;
7364 de = (struct linux_dirent64 *)((char *)de + reclen);
7365 tde = (struct target_dirent *)((char *)tde + treclen);
7371 unlock_user(dirp, arg2, ret);
7375 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
7376 case TARGET_NR_getdents64:
7378 struct linux_dirent64 *dirp;
7379 abi_long count = arg3;
7380 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
7382 ret = get_errno(sys_getdents64(arg1, dirp, count));
7383 if (!is_error(ret)) {
7384 struct linux_dirent64 *de;
7389 reclen = de->d_reclen;
7392 de->d_reclen = tswap16(reclen);
7393 tswap64s((uint64_t *)&de->d_ino);
7394 tswap64s((uint64_t *)&de->d_off);
7395 de = (struct linux_dirent64 *)((char *)de + reclen);
7399 unlock_user(dirp, arg2, ret);
7402 #endif /* TARGET_NR_getdents64 */
7403 #if defined(TARGET_NR__newselect)
7404 case TARGET_NR__newselect:
7405 ret = do_select(arg1, arg2, arg3, arg4, arg5);
7408 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
7409 # ifdef TARGET_NR_poll
7410 case TARGET_NR_poll:
7412 # ifdef TARGET_NR_ppoll
7413 case TARGET_NR_ppoll:
7416 struct target_pollfd *target_pfd;
7417 unsigned int nfds = arg2;
7422 target_pfd = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_pollfd) * nfds, 1);
7426 pfd = alloca(sizeof(struct pollfd) * nfds);
7427 for(i = 0; i < nfds; i++) {
7428 pfd[i].fd = tswap32(target_pfd[i].fd);
7429 pfd[i].events = tswap16(target_pfd[i].events);
7432 # ifdef TARGET_NR_ppoll
7433 if (num == TARGET_NR_ppoll) {
7434 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
7435 target_sigset_t *target_set;
7436 sigset_t _set, *set = &_set;
7439 if (target_to_host_timespec(timeout_ts, arg3)) {
7440 unlock_user(target_pfd, arg1, 0);
7448 target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
7450 unlock_user(target_pfd, arg1, 0);
7453 target_to_host_sigset(set, target_set);
7458 ret = get_errno(sys_ppoll(pfd, nfds, timeout_ts, set, _NSIG/8));
7460 if (!is_error(ret) && arg3) {
7461 host_to_target_timespec(arg3, timeout_ts);
7464 unlock_user(target_set, arg4, 0);
7468 ret = get_errno(poll(pfd, nfds, timeout));
7470 if (!is_error(ret)) {
7471 for(i = 0; i < nfds; i++) {
7472 target_pfd[i].revents = tswap16(pfd[i].revents);
7475 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
7479 case TARGET_NR_flock:
7480 /* NOTE: the flock constant seems to be the same for every
7482 ret = get_errno(flock(arg1, arg2));
7484 case TARGET_NR_readv:
7486 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
7488 ret = get_errno(readv(arg1, vec, arg3));
7489 unlock_iovec(vec, arg2, arg3, 1);
7491 ret = -host_to_target_errno(errno);
7495 case TARGET_NR_writev:
7497 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
7499 ret = get_errno(writev(arg1, vec, arg3));
7500 unlock_iovec(vec, arg2, arg3, 0);
7502 ret = -host_to_target_errno(errno);
7506 case TARGET_NR_getsid:
7507 ret = get_errno(getsid(arg1));
7509 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
7510 case TARGET_NR_fdatasync:
7511 ret = get_errno(fdatasync(arg1));
7514 case TARGET_NR__sysctl:
7515 /* We don't implement this, but ENOTDIR is always a safe
7517 ret = -TARGET_ENOTDIR;
7519 case TARGET_NR_sched_getaffinity:
7521 unsigned int mask_size;
7522 unsigned long *mask;
7525 * sched_getaffinity needs multiples of ulong, so need to take
7526 * care of mismatches between target ulong and host ulong sizes.
7528 if (arg2 & (sizeof(abi_ulong) - 1)) {
7529 ret = -TARGET_EINVAL;
7532 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
7534 mask = alloca(mask_size);
7535 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
7537 if (!is_error(ret)) {
7538 if (copy_to_user(arg3, mask, ret)) {
7544 case TARGET_NR_sched_setaffinity:
7546 unsigned int mask_size;
7547 unsigned long *mask;
7550 * sched_setaffinity needs multiples of ulong, so need to take
7551 * care of mismatches between target ulong and host ulong sizes.
7553 if (arg2 & (sizeof(abi_ulong) - 1)) {
7554 ret = -TARGET_EINVAL;
7557 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
7559 mask = alloca(mask_size);
7560 if (!lock_user_struct(VERIFY_READ, p, arg3, 1)) {
7563 memcpy(mask, p, arg2);
7564 unlock_user_struct(p, arg2, 0);
7566 ret = get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
7569 case TARGET_NR_sched_setparam:
7571 struct sched_param *target_schp;
7572 struct sched_param schp;
7574 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
7576 schp.sched_priority = tswap32(target_schp->sched_priority);
7577 unlock_user_struct(target_schp, arg2, 0);
7578 ret = get_errno(sched_setparam(arg1, &schp));
7581 case TARGET_NR_sched_getparam:
7583 struct sched_param *target_schp;
7584 struct sched_param schp;
7585 ret = get_errno(sched_getparam(arg1, &schp));
7586 if (!is_error(ret)) {
7587 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
7589 target_schp->sched_priority = tswap32(schp.sched_priority);
7590 unlock_user_struct(target_schp, arg2, 1);
7594 case TARGET_NR_sched_setscheduler:
7596 struct sched_param *target_schp;
7597 struct sched_param schp;
7598 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
7600 schp.sched_priority = tswap32(target_schp->sched_priority);
7601 unlock_user_struct(target_schp, arg3, 0);
7602 ret = get_errno(sched_setscheduler(arg1, arg2, &schp));
7605 case TARGET_NR_sched_getscheduler:
7606 ret = get_errno(sched_getscheduler(arg1));
7608 case TARGET_NR_sched_yield:
7609 ret = get_errno(sched_yield());
7611 case TARGET_NR_sched_get_priority_max:
7612 ret = get_errno(sched_get_priority_max(arg1));
7614 case TARGET_NR_sched_get_priority_min:
7615 ret = get_errno(sched_get_priority_min(arg1));
7617 case TARGET_NR_sched_rr_get_interval:
7620 ret = get_errno(sched_rr_get_interval(arg1, &ts));
7621 if (!is_error(ret)) {
7622 host_to_target_timespec(arg2, &ts);
7626 case TARGET_NR_nanosleep:
7628 struct timespec req, rem;
7629 target_to_host_timespec(&req, arg1);
7630 ret = get_errno(nanosleep(&req, &rem));
7631 if (is_error(ret) && arg2) {
7632 host_to_target_timespec(arg2, &rem);
7636 #ifdef TARGET_NR_query_module
7637 case TARGET_NR_query_module:
7640 #ifdef TARGET_NR_nfsservctl
7641 case TARGET_NR_nfsservctl:
7644 case TARGET_NR_prctl:
7646 case PR_GET_PDEATHSIG:
7649 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
7650 if (!is_error(ret) && arg2
7651 && put_user_ual(deathsig, arg2)) {
7659 void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
7663 ret = get_errno(prctl(arg1, (unsigned long)name,
7665 unlock_user(name, arg2, 16);
7670 void *name = lock_user(VERIFY_READ, arg2, 16, 1);
7674 ret = get_errno(prctl(arg1, (unsigned long)name,
7676 unlock_user(name, arg2, 0);
7681 /* Most prctl options have no pointer arguments */
7682 ret = get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
7686 #ifdef TARGET_NR_arch_prctl
7687 case TARGET_NR_arch_prctl:
7688 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
7689 ret = do_arch_prctl(cpu_env, arg1, arg2);
7695 #ifdef TARGET_NR_pread64
7696 case TARGET_NR_pread64:
7697 if (regpairs_aligned(cpu_env)) {
7701 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
7703 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
7704 unlock_user(p, arg2, ret);
7706 case TARGET_NR_pwrite64:
7707 if (regpairs_aligned(cpu_env)) {
7711 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
7713 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
7714 unlock_user(p, arg2, 0);
7717 case TARGET_NR_getcwd:
7718 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
7720 ret = get_errno(sys_getcwd1(p, arg2));
7721 unlock_user(p, arg1, ret);
7723 case TARGET_NR_capget:
7724 case TARGET_NR_capset:
7726 struct target_user_cap_header *target_header;
7727 struct target_user_cap_data *target_data = NULL;
7728 struct __user_cap_header_struct header;
7729 struct __user_cap_data_struct data[2];
7730 struct __user_cap_data_struct *dataptr = NULL;
7731 int i, target_datalen;
7734 if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
7737 header.version = tswap32(target_header->version);
7738 header.pid = tswap32(target_header->pid);
7740 if (header.version != _LINUX_CAPABILITY_VERSION) {
7741 /* Version 2 and up takes pointer to two user_data structs */
7745 target_datalen = sizeof(*target_data) * data_items;
7748 if (num == TARGET_NR_capget) {
7749 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
7751 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
7754 unlock_user_struct(target_header, arg1, 0);
7758 if (num == TARGET_NR_capset) {
7759 for (i = 0; i < data_items; i++) {
7760 data[i].effective = tswap32(target_data[i].effective);
7761 data[i].permitted = tswap32(target_data[i].permitted);
7762 data[i].inheritable = tswap32(target_data[i].inheritable);
7769 if (num == TARGET_NR_capget) {
7770 ret = get_errno(capget(&header, dataptr));
7772 ret = get_errno(capset(&header, dataptr));
7775 /* The kernel always updates version for both capget and capset */
7776 target_header->version = tswap32(header.version);
7777 unlock_user_struct(target_header, arg1, 1);
7780 if (num == TARGET_NR_capget) {
7781 for (i = 0; i < data_items; i++) {
7782 target_data[i].effective = tswap32(data[i].effective);
7783 target_data[i].permitted = tswap32(data[i].permitted);
7784 target_data[i].inheritable = tswap32(data[i].inheritable);
7786 unlock_user(target_data, arg2, target_datalen);
7788 unlock_user(target_data, arg2, 0);
7793 case TARGET_NR_sigaltstack:
7794 #if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_MIPS) || \
7795 defined(TARGET_SPARC) || defined(TARGET_PPC) || defined(TARGET_ALPHA) || \
7796 defined(TARGET_M68K) || defined(TARGET_S390X) || defined(TARGET_OPENRISC)
7797 ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUArchState *)cpu_env));
7803 #ifdef CONFIG_SENDFILE
7804 case TARGET_NR_sendfile:
7809 ret = get_user_sal(off, arg3);
7810 if (is_error(ret)) {
7815 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
7816 if (!is_error(ret) && arg3) {
7817 abi_long ret2 = put_user_sal(off, arg3);
7818 if (is_error(ret2)) {
7824 #ifdef TARGET_NR_sendfile64
7825 case TARGET_NR_sendfile64:
7830 ret = get_user_s64(off, arg3);
7831 if (is_error(ret)) {
7836 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
7837 if (!is_error(ret) && arg3) {
7838 abi_long ret2 = put_user_s64(off, arg3);
7839 if (is_error(ret2)) {
7847 case TARGET_NR_sendfile:
7848 #ifdef TARGET_NR_sendfile64
7849 case TARGET_NR_sendfile64:
7854 #ifdef TARGET_NR_getpmsg
7855 case TARGET_NR_getpmsg:
7858 #ifdef TARGET_NR_putpmsg
7859 case TARGET_NR_putpmsg:
7862 #ifdef TARGET_NR_vfork
7863 case TARGET_NR_vfork:
7864 ret = get_errno(do_fork(cpu_env, CLONE_VFORK | CLONE_VM | SIGCHLD,
7868 #ifdef TARGET_NR_ugetrlimit
7869 case TARGET_NR_ugetrlimit:
7872 int resource = target_to_host_resource(arg1);
7873 ret = get_errno(getrlimit(resource, &rlim));
7874 if (!is_error(ret)) {
7875 struct target_rlimit *target_rlim;
7876 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
7878 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
7879 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
7880 unlock_user_struct(target_rlim, arg2, 1);
7885 #ifdef TARGET_NR_truncate64
7886 case TARGET_NR_truncate64:
7887 if (!(p = lock_user_string(arg1)))
7889 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
7890 unlock_user(p, arg1, 0);
7893 #ifdef TARGET_NR_ftruncate64
7894 case TARGET_NR_ftruncate64:
7895 ret = target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
7898 #ifdef TARGET_NR_stat64
7899 case TARGET_NR_stat64:
7900 if (!(p = lock_user_string(arg1)))
7902 ret = get_errno(stat(path(p), &st));
7903 unlock_user(p, arg1, 0);
7905 ret = host_to_target_stat64(cpu_env, arg2, &st);
7908 #ifdef TARGET_NR_lstat64
7909 case TARGET_NR_lstat64:
7910 if (!(p = lock_user_string(arg1)))
7912 ret = get_errno(lstat(path(p), &st));
7913 unlock_user(p, arg1, 0);
7915 ret = host_to_target_stat64(cpu_env, arg2, &st);
7918 #ifdef TARGET_NR_fstat64
7919 case TARGET_NR_fstat64:
7920 ret = get_errno(fstat(arg1, &st));
7922 ret = host_to_target_stat64(cpu_env, arg2, &st);
7925 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
7926 #ifdef TARGET_NR_fstatat64
7927 case TARGET_NR_fstatat64:
7929 #ifdef TARGET_NR_newfstatat
7930 case TARGET_NR_newfstatat:
7932 if (!(p = lock_user_string(arg2)))
7934 ret = get_errno(fstatat(arg1, path(p), &st, arg4));
7936 ret = host_to_target_stat64(cpu_env, arg3, &st);
7939 case TARGET_NR_lchown:
7940 if (!(p = lock_user_string(arg1)))
7942 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
7943 unlock_user(p, arg1, 0);
7945 #ifdef TARGET_NR_getuid
7946 case TARGET_NR_getuid:
7947 ret = get_errno(high2lowuid(getuid()));
7950 #ifdef TARGET_NR_getgid
7951 case TARGET_NR_getgid:
7952 ret = get_errno(high2lowgid(getgid()));
7955 #ifdef TARGET_NR_geteuid
7956 case TARGET_NR_geteuid:
7957 ret = get_errno(high2lowuid(geteuid()));
7960 #ifdef TARGET_NR_getegid
7961 case TARGET_NR_getegid:
7962 ret = get_errno(high2lowgid(getegid()));
7965 case TARGET_NR_setreuid:
7966 ret = get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
7968 case TARGET_NR_setregid:
7969 ret = get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
7971 case TARGET_NR_getgroups:
7973 int gidsetsize = arg1;
7974 target_id *target_grouplist;
7978 grouplist = alloca(gidsetsize * sizeof(gid_t));
7979 ret = get_errno(getgroups(gidsetsize, grouplist));
7980 if (gidsetsize == 0)
7982 if (!is_error(ret)) {
7983 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
7984 if (!target_grouplist)
7986 for(i = 0;i < ret; i++)
7987 target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
7988 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
7992 case TARGET_NR_setgroups:
7994 int gidsetsize = arg1;
7995 target_id *target_grouplist;
7996 gid_t *grouplist = NULL;
7999 grouplist = alloca(gidsetsize * sizeof(gid_t));
8000 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
8001 if (!target_grouplist) {
8002 ret = -TARGET_EFAULT;
8005 for (i = 0; i < gidsetsize; i++) {
8006 grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
8008 unlock_user(target_grouplist, arg2, 0);
8010 ret = get_errno(setgroups(gidsetsize, grouplist));
8013 case TARGET_NR_fchown:
8014 ret = get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
8016 #if defined(TARGET_NR_fchownat)
8017 case TARGET_NR_fchownat:
8018 if (!(p = lock_user_string(arg2)))
8020 ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
8021 low2highgid(arg4), arg5));
8022 unlock_user(p, arg2, 0);
8025 #ifdef TARGET_NR_setresuid
8026 case TARGET_NR_setresuid:
8027 ret = get_errno(setresuid(low2highuid(arg1),
8029 low2highuid(arg3)));
8032 #ifdef TARGET_NR_getresuid
8033 case TARGET_NR_getresuid:
8035 uid_t ruid, euid, suid;
8036 ret = get_errno(getresuid(&ruid, &euid, &suid));
8037 if (!is_error(ret)) {
8038 if (put_user_id(high2lowuid(ruid), arg1)
8039 || put_user_id(high2lowuid(euid), arg2)
8040 || put_user_id(high2lowuid(suid), arg3))
8046 #ifdef TARGET_NR_getresgid
8047 case TARGET_NR_setresgid:
8048 ret = get_errno(setresgid(low2highgid(arg1),
8050 low2highgid(arg3)));
8053 #ifdef TARGET_NR_getresgid
8054 case TARGET_NR_getresgid:
8056 gid_t rgid, egid, sgid;
8057 ret = get_errno(getresgid(&rgid, &egid, &sgid));
8058 if (!is_error(ret)) {
8059 if (put_user_id(high2lowgid(rgid), arg1)
8060 || put_user_id(high2lowgid(egid), arg2)
8061 || put_user_id(high2lowgid(sgid), arg3))
8067 case TARGET_NR_chown:
8068 if (!(p = lock_user_string(arg1)))
8070 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
8071 unlock_user(p, arg1, 0);
8073 case TARGET_NR_setuid:
8074 ret = get_errno(setuid(low2highuid(arg1)));
8076 case TARGET_NR_setgid:
8077 ret = get_errno(setgid(low2highgid(arg1)));
8079 case TARGET_NR_setfsuid:
8080 ret = get_errno(setfsuid(arg1));
8082 case TARGET_NR_setfsgid:
8083 ret = get_errno(setfsgid(arg1));
8086 #ifdef TARGET_NR_lchown32
8087 case TARGET_NR_lchown32:
8088 if (!(p = lock_user_string(arg1)))
8090 ret = get_errno(lchown(p, arg2, arg3));
8091 unlock_user(p, arg1, 0);
8094 #ifdef TARGET_NR_getuid32
8095 case TARGET_NR_getuid32:
8096 ret = get_errno(getuid());
8100 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
8101 /* Alpha specific */
8102 case TARGET_NR_getxuid:
8106 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
8108 ret = get_errno(getuid());
8111 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
8112 /* Alpha specific */
8113 case TARGET_NR_getxgid:
8117 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
8119 ret = get_errno(getgid());
8122 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
8123 /* Alpha specific */
8124 case TARGET_NR_osf_getsysinfo:
8125 ret = -TARGET_EOPNOTSUPP;
8127 case TARGET_GSI_IEEE_FP_CONTROL:
8129 uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env);
8131 /* Copied from linux ieee_fpcr_to_swcr. */
8132 swcr = (fpcr >> 35) & SWCR_STATUS_MASK;
8133 swcr |= (fpcr >> 36) & SWCR_MAP_DMZ;
8134 swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV
8135 | SWCR_TRAP_ENABLE_DZE
8136 | SWCR_TRAP_ENABLE_OVF);
8137 swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF
8138 | SWCR_TRAP_ENABLE_INE);
8139 swcr |= (fpcr >> 47) & SWCR_MAP_UMZ;
8140 swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO;
8142 if (put_user_u64 (swcr, arg2))
8148 /* case GSI_IEEE_STATE_AT_SIGNAL:
8149 -- Not implemented in linux kernel.
8151 -- Retrieves current unaligned access state; not much used.
8153 -- Retrieves implver information; surely not used.
8155 -- Grabs a copy of the HWRPB; surely not used.
8160 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
8161 /* Alpha specific */
8162 case TARGET_NR_osf_setsysinfo:
8163 ret = -TARGET_EOPNOTSUPP;
8165 case TARGET_SSI_IEEE_FP_CONTROL:
8167 uint64_t swcr, fpcr, orig_fpcr;
8169 if (get_user_u64 (swcr, arg2)) {
8172 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
8173 fpcr = orig_fpcr & FPCR_DYN_MASK;
8175 /* Copied from linux ieee_swcr_to_fpcr. */
8176 fpcr |= (swcr & SWCR_STATUS_MASK) << 35;
8177 fpcr |= (swcr & SWCR_MAP_DMZ) << 36;
8178 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV
8179 | SWCR_TRAP_ENABLE_DZE
8180 | SWCR_TRAP_ENABLE_OVF)) << 48;
8181 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF
8182 | SWCR_TRAP_ENABLE_INE)) << 57;
8183 fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0);
8184 fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41;
8186 cpu_alpha_store_fpcr(cpu_env, fpcr);
8191 case TARGET_SSI_IEEE_RAISE_EXCEPTION:
8193 uint64_t exc, fpcr, orig_fpcr;
8196 if (get_user_u64(exc, arg2)) {
8200 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
8202 /* We only add to the exception status here. */
8203 fpcr = orig_fpcr | ((exc & SWCR_STATUS_MASK) << 35);
8205 cpu_alpha_store_fpcr(cpu_env, fpcr);
8208 /* Old exceptions are not signaled. */
8209 fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK);
8211 /* If any exceptions set by this call,
8212 and are unmasked, send a signal. */
8214 if ((fpcr & (FPCR_INE | FPCR_INED)) == FPCR_INE) {
8215 si_code = TARGET_FPE_FLTRES;
8217 if ((fpcr & (FPCR_UNF | FPCR_UNFD)) == FPCR_UNF) {
8218 si_code = TARGET_FPE_FLTUND;
8220 if ((fpcr & (FPCR_OVF | FPCR_OVFD)) == FPCR_OVF) {
8221 si_code = TARGET_FPE_FLTOVF;
8223 if ((fpcr & (FPCR_DZE | FPCR_DZED)) == FPCR_DZE) {
8224 si_code = TARGET_FPE_FLTDIV;
8226 if ((fpcr & (FPCR_INV | FPCR_INVD)) == FPCR_INV) {
8227 si_code = TARGET_FPE_FLTINV;
8230 target_siginfo_t info;
8231 info.si_signo = SIGFPE;
8233 info.si_code = si_code;
8234 info._sifields._sigfault._addr
8235 = ((CPUArchState *)cpu_env)->pc;
8236 queue_signal((CPUArchState *)cpu_env, info.si_signo, &info);
8241 /* case SSI_NVPAIRS:
8242 -- Used with SSIN_UACPROC to enable unaligned accesses.
8243 case SSI_IEEE_STATE_AT_SIGNAL:
8244 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
8245 -- Not implemented in linux kernel
8250 #ifdef TARGET_NR_osf_sigprocmask
8251 /* Alpha specific. */
8252 case TARGET_NR_osf_sigprocmask:
8256 sigset_t set, oldset;
8259 case TARGET_SIG_BLOCK:
8262 case TARGET_SIG_UNBLOCK:
8265 case TARGET_SIG_SETMASK:
8269 ret = -TARGET_EINVAL;
8273 target_to_host_old_sigset(&set, &mask);
8274 do_sigprocmask(how, &set, &oldset);
8275 host_to_target_old_sigset(&mask, &oldset);
8281 #ifdef TARGET_NR_getgid32
8282 case TARGET_NR_getgid32:
8283 ret = get_errno(getgid());
8286 #ifdef TARGET_NR_geteuid32
8287 case TARGET_NR_geteuid32:
8288 ret = get_errno(geteuid());
8291 #ifdef TARGET_NR_getegid32
8292 case TARGET_NR_getegid32:
8293 ret = get_errno(getegid());
8296 #ifdef TARGET_NR_setreuid32
8297 case TARGET_NR_setreuid32:
8298 ret = get_errno(setreuid(arg1, arg2));
8301 #ifdef TARGET_NR_setregid32
8302 case TARGET_NR_setregid32:
8303 ret = get_errno(setregid(arg1, arg2));
8306 #ifdef TARGET_NR_getgroups32
8307 case TARGET_NR_getgroups32:
8309 int gidsetsize = arg1;
8310 uint32_t *target_grouplist;
8314 grouplist = alloca(gidsetsize * sizeof(gid_t));
8315 ret = get_errno(getgroups(gidsetsize, grouplist));
8316 if (gidsetsize == 0)
8318 if (!is_error(ret)) {
8319 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
8320 if (!target_grouplist) {
8321 ret = -TARGET_EFAULT;
8324 for(i = 0;i < ret; i++)
8325 target_grouplist[i] = tswap32(grouplist[i]);
8326 unlock_user(target_grouplist, arg2, gidsetsize * 4);
8331 #ifdef TARGET_NR_setgroups32
8332 case TARGET_NR_setgroups32:
8334 int gidsetsize = arg1;
8335 uint32_t *target_grouplist;
8339 grouplist = alloca(gidsetsize * sizeof(gid_t));
8340 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
8341 if (!target_grouplist) {
8342 ret = -TARGET_EFAULT;
8345 for(i = 0;i < gidsetsize; i++)
8346 grouplist[i] = tswap32(target_grouplist[i]);
8347 unlock_user(target_grouplist, arg2, 0);
8348 ret = get_errno(setgroups(gidsetsize, grouplist));
8352 #ifdef TARGET_NR_fchown32
8353 case TARGET_NR_fchown32:
8354 ret = get_errno(fchown(arg1, arg2, arg3));
8357 #ifdef TARGET_NR_setresuid32
8358 case TARGET_NR_setresuid32:
8359 ret = get_errno(setresuid(arg1, arg2, arg3));
8362 #ifdef TARGET_NR_getresuid32
8363 case TARGET_NR_getresuid32:
8365 uid_t ruid, euid, suid;
8366 ret = get_errno(getresuid(&ruid, &euid, &suid));
8367 if (!is_error(ret)) {
8368 if (put_user_u32(ruid, arg1)
8369 || put_user_u32(euid, arg2)
8370 || put_user_u32(suid, arg3))
8376 #ifdef TARGET_NR_setresgid32
8377 case TARGET_NR_setresgid32:
8378 ret = get_errno(setresgid(arg1, arg2, arg3));
8381 #ifdef TARGET_NR_getresgid32
8382 case TARGET_NR_getresgid32:
8384 gid_t rgid, egid, sgid;
8385 ret = get_errno(getresgid(&rgid, &egid, &sgid));
8386 if (!is_error(ret)) {
8387 if (put_user_u32(rgid, arg1)
8388 || put_user_u32(egid, arg2)
8389 || put_user_u32(sgid, arg3))
8395 #ifdef TARGET_NR_chown32
8396 case TARGET_NR_chown32:
8397 if (!(p = lock_user_string(arg1)))
8399 ret = get_errno(chown(p, arg2, arg3));
8400 unlock_user(p, arg1, 0);
8403 #ifdef TARGET_NR_setuid32
8404 case TARGET_NR_setuid32:
8405 ret = get_errno(setuid(arg1));
8408 #ifdef TARGET_NR_setgid32
8409 case TARGET_NR_setgid32:
8410 ret = get_errno(setgid(arg1));
8413 #ifdef TARGET_NR_setfsuid32
8414 case TARGET_NR_setfsuid32:
8415 ret = get_errno(setfsuid(arg1));
8418 #ifdef TARGET_NR_setfsgid32
8419 case TARGET_NR_setfsgid32:
8420 ret = get_errno(setfsgid(arg1));
8424 case TARGET_NR_pivot_root:
8426 #ifdef TARGET_NR_mincore
8427 case TARGET_NR_mincore:
8430 ret = -TARGET_EFAULT;
8431 if (!(a = lock_user(VERIFY_READ, arg1,arg2, 0)))
8433 if (!(p = lock_user_string(arg3)))
8435 ret = get_errno(mincore(a, arg2, p));
8436 unlock_user(p, arg3, ret);
8438 unlock_user(a, arg1, 0);
8442 #ifdef TARGET_NR_arm_fadvise64_64
8443 case TARGET_NR_arm_fadvise64_64:
8446 * arm_fadvise64_64 looks like fadvise64_64 but
8447 * with different argument order
8455 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_arm_fadvise64_64) || defined(TARGET_NR_fadvise64)
8456 #ifdef TARGET_NR_fadvise64_64
8457 case TARGET_NR_fadvise64_64:
8459 #ifdef TARGET_NR_fadvise64
8460 case TARGET_NR_fadvise64:
8464 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
8465 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
8466 case 6: arg4 = POSIX_FADV_DONTNEED; break;
8467 case 7: arg4 = POSIX_FADV_NOREUSE; break;
8471 ret = -posix_fadvise(arg1, arg2, arg3, arg4);
8474 #ifdef TARGET_NR_madvise
8475 case TARGET_NR_madvise:
8476 /* A straight passthrough may not be safe because qemu sometimes
8477 turns private file-backed mappings into anonymous mappings.
8478 This will break MADV_DONTNEED.
8479 This is a hint, so ignoring and returning success is ok. */
8483 #if TARGET_ABI_BITS == 32
8484 case TARGET_NR_fcntl64:
8488 struct target_flock64 *target_fl;
8490 struct target_eabi_flock64 *target_efl;
8493 cmd = target_to_host_fcntl_cmd(arg2);
8494 if (cmd == -TARGET_EINVAL) {
8500 case TARGET_F_GETLK64:
8502 if (((CPUARMState *)cpu_env)->eabi) {
8503 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
8505 fl.l_type = tswap16(target_efl->l_type);
8506 fl.l_whence = tswap16(target_efl->l_whence);
8507 fl.l_start = tswap64(target_efl->l_start);
8508 fl.l_len = tswap64(target_efl->l_len);
8509 fl.l_pid = tswap32(target_efl->l_pid);
8510 unlock_user_struct(target_efl, arg3, 0);
8514 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
8516 fl.l_type = tswap16(target_fl->l_type);
8517 fl.l_whence = tswap16(target_fl->l_whence);
8518 fl.l_start = tswap64(target_fl->l_start);
8519 fl.l_len = tswap64(target_fl->l_len);
8520 fl.l_pid = tswap32(target_fl->l_pid);
8521 unlock_user_struct(target_fl, arg3, 0);
8523 ret = get_errno(fcntl(arg1, cmd, &fl));
8526 if (((CPUARMState *)cpu_env)->eabi) {
8527 if (!lock_user_struct(VERIFY_WRITE, target_efl, arg3, 0))
8529 target_efl->l_type = tswap16(fl.l_type);
8530 target_efl->l_whence = tswap16(fl.l_whence);
8531 target_efl->l_start = tswap64(fl.l_start);
8532 target_efl->l_len = tswap64(fl.l_len);
8533 target_efl->l_pid = tswap32(fl.l_pid);
8534 unlock_user_struct(target_efl, arg3, 1);
8538 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg3, 0))
8540 target_fl->l_type = tswap16(fl.l_type);
8541 target_fl->l_whence = tswap16(fl.l_whence);
8542 target_fl->l_start = tswap64(fl.l_start);
8543 target_fl->l_len = tswap64(fl.l_len);
8544 target_fl->l_pid = tswap32(fl.l_pid);
8545 unlock_user_struct(target_fl, arg3, 1);
8550 case TARGET_F_SETLK64:
8551 case TARGET_F_SETLKW64:
8553 if (((CPUARMState *)cpu_env)->eabi) {
8554 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
8556 fl.l_type = tswap16(target_efl->l_type);
8557 fl.l_whence = tswap16(target_efl->l_whence);
8558 fl.l_start = tswap64(target_efl->l_start);
8559 fl.l_len = tswap64(target_efl->l_len);
8560 fl.l_pid = tswap32(target_efl->l_pid);
8561 unlock_user_struct(target_efl, arg3, 0);
8565 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
8567 fl.l_type = tswap16(target_fl->l_type);
8568 fl.l_whence = tswap16(target_fl->l_whence);
8569 fl.l_start = tswap64(target_fl->l_start);
8570 fl.l_len = tswap64(target_fl->l_len);
8571 fl.l_pid = tswap32(target_fl->l_pid);
8572 unlock_user_struct(target_fl, arg3, 0);
8574 ret = get_errno(fcntl(arg1, cmd, &fl));
8577 ret = do_fcntl(arg1, arg2, arg3);
8583 #ifdef TARGET_NR_cacheflush
8584 case TARGET_NR_cacheflush:
8585 /* self-modifying code is handled automatically, so nothing needed */
8589 #ifdef TARGET_NR_security
8590 case TARGET_NR_security:
8593 #ifdef TARGET_NR_getpagesize
8594 case TARGET_NR_getpagesize:
8595 ret = TARGET_PAGE_SIZE;
8598 case TARGET_NR_gettid:
8599 ret = get_errno(gettid());
8601 #ifdef TARGET_NR_readahead
8602 case TARGET_NR_readahead:
8603 #if TARGET_ABI_BITS == 32
8604 if (regpairs_aligned(cpu_env)) {
8609 ret = get_errno(readahead(arg1, ((off64_t)arg3 << 32) | arg2, arg4));
8611 ret = get_errno(readahead(arg1, arg2, arg3));
8616 #ifdef TARGET_NR_setxattr
8617 case TARGET_NR_listxattr:
8618 case TARGET_NR_llistxattr:
8622 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8624 ret = -TARGET_EFAULT;
8628 p = lock_user_string(arg1);
8630 if (num == TARGET_NR_listxattr) {
8631 ret = get_errno(listxattr(p, b, arg3));
8633 ret = get_errno(llistxattr(p, b, arg3));
8636 ret = -TARGET_EFAULT;
8638 unlock_user(p, arg1, 0);
8639 unlock_user(b, arg2, arg3);
8642 case TARGET_NR_flistxattr:
8646 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8648 ret = -TARGET_EFAULT;
8652 ret = get_errno(flistxattr(arg1, b, arg3));
8653 unlock_user(b, arg2, arg3);
8656 case TARGET_NR_setxattr:
8657 case TARGET_NR_lsetxattr:
8659 void *p, *n, *v = 0;
8661 v = lock_user(VERIFY_READ, arg3, arg4, 1);
8663 ret = -TARGET_EFAULT;
8667 p = lock_user_string(arg1);
8668 n = lock_user_string(arg2);
8670 if (num == TARGET_NR_setxattr) {
8671 ret = get_errno(setxattr(p, n, v, arg4, arg5));
8673 ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
8676 ret = -TARGET_EFAULT;
8678 unlock_user(p, arg1, 0);
8679 unlock_user(n, arg2, 0);
8680 unlock_user(v, arg3, 0);
8683 case TARGET_NR_fsetxattr:
8687 v = lock_user(VERIFY_READ, arg3, arg4, 1);
8689 ret = -TARGET_EFAULT;
8693 n = lock_user_string(arg2);
8695 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
8697 ret = -TARGET_EFAULT;
8699 unlock_user(n, arg2, 0);
8700 unlock_user(v, arg3, 0);
8703 case TARGET_NR_getxattr:
8704 case TARGET_NR_lgetxattr:
8706 void *p, *n, *v = 0;
8708 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
8710 ret = -TARGET_EFAULT;
8714 p = lock_user_string(arg1);
8715 n = lock_user_string(arg2);
8717 if (num == TARGET_NR_getxattr) {
8718 ret = get_errno(getxattr(p, n, v, arg4));
8720 ret = get_errno(lgetxattr(p, n, v, arg4));
8723 ret = -TARGET_EFAULT;
8725 unlock_user(p, arg1, 0);
8726 unlock_user(n, arg2, 0);
8727 unlock_user(v, arg3, arg4);
8730 case TARGET_NR_fgetxattr:
8734 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
8736 ret = -TARGET_EFAULT;
8740 n = lock_user_string(arg2);
8742 ret = get_errno(fgetxattr(arg1, n, v, arg4));
8744 ret = -TARGET_EFAULT;
8746 unlock_user(n, arg2, 0);
8747 unlock_user(v, arg3, arg4);
8750 case TARGET_NR_removexattr:
8751 case TARGET_NR_lremovexattr:
8754 p = lock_user_string(arg1);
8755 n = lock_user_string(arg2);
8757 if (num == TARGET_NR_removexattr) {
8758 ret = get_errno(removexattr(p, n));
8760 ret = get_errno(lremovexattr(p, n));
8763 ret = -TARGET_EFAULT;
8765 unlock_user(p, arg1, 0);
8766 unlock_user(n, arg2, 0);
8769 case TARGET_NR_fremovexattr:
8772 n = lock_user_string(arg2);
8774 ret = get_errno(fremovexattr(arg1, n));
8776 ret = -TARGET_EFAULT;
8778 unlock_user(n, arg2, 0);
8782 #endif /* CONFIG_ATTR */
8783 #ifdef TARGET_NR_set_thread_area
8784 case TARGET_NR_set_thread_area:
8785 #if defined(TARGET_MIPS)
8786 ((CPUMIPSState *) cpu_env)->tls_value = arg1;
8789 #elif defined(TARGET_CRIS)
8791 ret = -TARGET_EINVAL;
8793 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
8797 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
8798 ret = do_set_thread_area(cpu_env, arg1);
8800 #elif defined(TARGET_M68K)
8802 TaskState *ts = cpu->opaque;
8803 ts->tp_value = arg1;
8808 goto unimplemented_nowarn;
8811 #ifdef TARGET_NR_get_thread_area
8812 case TARGET_NR_get_thread_area:
8813 #if defined(TARGET_I386) && defined(TARGET_ABI32)
8814 ret = do_get_thread_area(cpu_env, arg1);
8816 #elif defined(TARGET_M68K)
8818 TaskState *ts = cpu->opaque;
8823 goto unimplemented_nowarn;
8826 #ifdef TARGET_NR_getdomainname
8827 case TARGET_NR_getdomainname:
8828 goto unimplemented_nowarn;
8831 #ifdef TARGET_NR_clock_gettime
8832 case TARGET_NR_clock_gettime:
8835 ret = get_errno(clock_gettime(arg1, &ts));
8836 if (!is_error(ret)) {
8837 host_to_target_timespec(arg2, &ts);
8842 #ifdef TARGET_NR_clock_getres
8843 case TARGET_NR_clock_getres:
8846 ret = get_errno(clock_getres(arg1, &ts));
8847 if (!is_error(ret)) {
8848 host_to_target_timespec(arg2, &ts);
8853 #ifdef TARGET_NR_clock_nanosleep
8854 case TARGET_NR_clock_nanosleep:
8857 target_to_host_timespec(&ts, arg3);
8858 ret = get_errno(clock_nanosleep(arg1, arg2, &ts, arg4 ? &ts : NULL));
8860 host_to_target_timespec(arg4, &ts);
8865 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
8866 case TARGET_NR_set_tid_address:
8867 ret = get_errno(set_tid_address((int *)g2h(arg1)));
8871 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
8872 case TARGET_NR_tkill:
8873 ret = get_errno(sys_tkill((int)arg1, target_to_host_signal(arg2)));
8877 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
8878 case TARGET_NR_tgkill:
8879 ret = get_errno(sys_tgkill((int)arg1, (int)arg2,
8880 target_to_host_signal(arg3)));
8884 #ifdef TARGET_NR_set_robust_list
8885 case TARGET_NR_set_robust_list:
8886 case TARGET_NR_get_robust_list:
8887 /* The ABI for supporting robust futexes has userspace pass
8888 * the kernel a pointer to a linked list which is updated by
8889 * userspace after the syscall; the list is walked by the kernel
8890 * when the thread exits. Since the linked list in QEMU guest
8891 * memory isn't a valid linked list for the host and we have
8892 * no way to reliably intercept the thread-death event, we can't
8893 * support these. Silently return ENOSYS so that guest userspace
8894 * falls back to a non-robust futex implementation (which should
8895 * be OK except in the corner case of the guest crashing while
8896 * holding a mutex that is shared with another process via
8899 goto unimplemented_nowarn;
8902 #if defined(TARGET_NR_utimensat)
8903 case TARGET_NR_utimensat:
8905 struct timespec *tsp, ts[2];
8909 target_to_host_timespec(ts, arg3);
8910 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
8914 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
8916 if (!(p = lock_user_string(arg2))) {
8917 ret = -TARGET_EFAULT;
8920 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
8921 unlock_user(p, arg2, 0);
8926 case TARGET_NR_futex:
8927 ret = do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
8929 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
8930 case TARGET_NR_inotify_init:
8931 ret = get_errno(sys_inotify_init());
8934 #ifdef CONFIG_INOTIFY1
8935 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
8936 case TARGET_NR_inotify_init1:
8937 ret = get_errno(sys_inotify_init1(arg1));
8941 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
8942 case TARGET_NR_inotify_add_watch:
8943 p = lock_user_string(arg2);
8944 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
8945 unlock_user(p, arg2, 0);
8948 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
8949 case TARGET_NR_inotify_rm_watch:
8950 ret = get_errno(sys_inotify_rm_watch(arg1, arg2));
8954 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
8955 case TARGET_NR_mq_open:
8957 struct mq_attr posix_mq_attr;
8959 p = lock_user_string(arg1 - 1);
8961 copy_from_user_mq_attr (&posix_mq_attr, arg4);
8962 ret = get_errno(mq_open(p, arg2, arg3, &posix_mq_attr));
8963 unlock_user (p, arg1, 0);
8967 case TARGET_NR_mq_unlink:
8968 p = lock_user_string(arg1 - 1);
8969 ret = get_errno(mq_unlink(p));
8970 unlock_user (p, arg1, 0);
8973 case TARGET_NR_mq_timedsend:
8977 p = lock_user (VERIFY_READ, arg2, arg3, 1);
8979 target_to_host_timespec(&ts, arg5);
8980 ret = get_errno(mq_timedsend(arg1, p, arg3, arg4, &ts));
8981 host_to_target_timespec(arg5, &ts);
8984 ret = get_errno(mq_send(arg1, p, arg3, arg4));
8985 unlock_user (p, arg2, arg3);
8989 case TARGET_NR_mq_timedreceive:
8994 p = lock_user (VERIFY_READ, arg2, arg3, 1);
8996 target_to_host_timespec(&ts, arg5);
8997 ret = get_errno(mq_timedreceive(arg1, p, arg3, &prio, &ts));
8998 host_to_target_timespec(arg5, &ts);
9001 ret = get_errno(mq_receive(arg1, p, arg3, &prio));
9002 unlock_user (p, arg2, arg3);
9004 put_user_u32(prio, arg4);
9008 /* Not implemented for now... */
9009 /* case TARGET_NR_mq_notify: */
9012 case TARGET_NR_mq_getsetattr:
9014 struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
9017 ret = mq_getattr(arg1, &posix_mq_attr_out);
9018 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
9021 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
9022 ret |= mq_setattr(arg1, &posix_mq_attr_in, &posix_mq_attr_out);
9029 #ifdef CONFIG_SPLICE
9030 #ifdef TARGET_NR_tee
9033 ret = get_errno(tee(arg1,arg2,arg3,arg4));
9037 #ifdef TARGET_NR_splice
9038 case TARGET_NR_splice:
9040 loff_t loff_in, loff_out;
9041 loff_t *ploff_in = NULL, *ploff_out = NULL;
9043 get_user_u64(loff_in, arg2);
9044 ploff_in = &loff_in;
9047 get_user_u64(loff_out, arg2);
9048 ploff_out = &loff_out;
9050 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
9054 #ifdef TARGET_NR_vmsplice
9055 case TARGET_NR_vmsplice:
9057 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
9059 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
9060 unlock_iovec(vec, arg2, arg3, 0);
9062 ret = -host_to_target_errno(errno);
9067 #endif /* CONFIG_SPLICE */
9068 #ifdef CONFIG_EVENTFD
9069 #if defined(TARGET_NR_eventfd)
9070 case TARGET_NR_eventfd:
9071 ret = get_errno(eventfd(arg1, 0));
9074 #if defined(TARGET_NR_eventfd2)
9075 case TARGET_NR_eventfd2:
9077 int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
9078 if (arg2 & TARGET_O_NONBLOCK) {
9079 host_flags |= O_NONBLOCK;
9081 if (arg2 & TARGET_O_CLOEXEC) {
9082 host_flags |= O_CLOEXEC;
9084 ret = get_errno(eventfd(arg1, host_flags));
9088 #endif /* CONFIG_EVENTFD */
9089 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
9090 case TARGET_NR_fallocate:
9091 #if TARGET_ABI_BITS == 32
9092 ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
9093 target_offset64(arg5, arg6)));
9095 ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
9099 #if defined(CONFIG_SYNC_FILE_RANGE)
9100 #if defined(TARGET_NR_sync_file_range)
9101 case TARGET_NR_sync_file_range:
9102 #if TARGET_ABI_BITS == 32
9103 #if defined(TARGET_MIPS)
9104 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
9105 target_offset64(arg5, arg6), arg7));
9107 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
9108 target_offset64(arg4, arg5), arg6));
9109 #endif /* !TARGET_MIPS */
9111 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
9115 #if defined(TARGET_NR_sync_file_range2)
9116 case TARGET_NR_sync_file_range2:
9117 /* This is like sync_file_range but the arguments are reordered */
9118 #if TARGET_ABI_BITS == 32
9119 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
9120 target_offset64(arg5, arg6), arg2));
9122 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
9127 #if defined(CONFIG_EPOLL)
9128 #if defined(TARGET_NR_epoll_create)
9129 case TARGET_NR_epoll_create:
9130 ret = get_errno(epoll_create(arg1));
9133 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
9134 case TARGET_NR_epoll_create1:
9135 ret = get_errno(epoll_create1(arg1));
9138 #if defined(TARGET_NR_epoll_ctl)
9139 case TARGET_NR_epoll_ctl:
9141 struct epoll_event ep;
9142 struct epoll_event *epp = 0;
9144 struct target_epoll_event *target_ep;
9145 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
9148 ep.events = tswap32(target_ep->events);
9149 /* The epoll_data_t union is just opaque data to the kernel,
9150 * so we transfer all 64 bits across and need not worry what
9151 * actual data type it is.
9153 ep.data.u64 = tswap64(target_ep->data.u64);
9154 unlock_user_struct(target_ep, arg4, 0);
9157 ret = get_errno(epoll_ctl(arg1, arg2, arg3, epp));
9162 #if defined(TARGET_NR_epoll_pwait) && defined(CONFIG_EPOLL_PWAIT)
9163 #define IMPLEMENT_EPOLL_PWAIT
9165 #if defined(TARGET_NR_epoll_wait) || defined(IMPLEMENT_EPOLL_PWAIT)
9166 #if defined(TARGET_NR_epoll_wait)
9167 case TARGET_NR_epoll_wait:
9169 #if defined(IMPLEMENT_EPOLL_PWAIT)
9170 case TARGET_NR_epoll_pwait:
9173 struct target_epoll_event *target_ep;
9174 struct epoll_event *ep;
9176 int maxevents = arg3;
9179 target_ep = lock_user(VERIFY_WRITE, arg2,
9180 maxevents * sizeof(struct target_epoll_event), 1);
9185 ep = alloca(maxevents * sizeof(struct epoll_event));
9188 #if defined(IMPLEMENT_EPOLL_PWAIT)
9189 case TARGET_NR_epoll_pwait:
9191 target_sigset_t *target_set;
9192 sigset_t _set, *set = &_set;
9195 target_set = lock_user(VERIFY_READ, arg5,
9196 sizeof(target_sigset_t), 1);
9198 unlock_user(target_ep, arg2, 0);
9201 target_to_host_sigset(set, target_set);
9202 unlock_user(target_set, arg5, 0);
9207 ret = get_errno(epoll_pwait(epfd, ep, maxevents, timeout, set));
9211 #if defined(TARGET_NR_epoll_wait)
9212 case TARGET_NR_epoll_wait:
9213 ret = get_errno(epoll_wait(epfd, ep, maxevents, timeout));
9217 ret = -TARGET_ENOSYS;
9219 if (!is_error(ret)) {
9221 for (i = 0; i < ret; i++) {
9222 target_ep[i].events = tswap32(ep[i].events);
9223 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
9226 unlock_user(target_ep, arg2, ret * sizeof(struct target_epoll_event));
9231 #ifdef TARGET_NR_prlimit64
9232 case TARGET_NR_prlimit64:
9234 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
9235 struct target_rlimit64 *target_rnew, *target_rold;
9236 struct host_rlimit64 rnew, rold, *rnewp = 0;
9238 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
9241 rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
9242 rnew.rlim_max = tswap64(target_rnew->rlim_max);
9243 unlock_user_struct(target_rnew, arg3, 0);
9247 ret = get_errno(sys_prlimit64(arg1, arg2, rnewp, arg4 ? &rold : 0));
9248 if (!is_error(ret) && arg4) {
9249 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
9252 target_rold->rlim_cur = tswap64(rold.rlim_cur);
9253 target_rold->rlim_max = tswap64(rold.rlim_max);
9254 unlock_user_struct(target_rold, arg4, 1);
9259 #ifdef TARGET_NR_gethostname
9260 case TARGET_NR_gethostname:
9262 char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
9264 ret = get_errno(gethostname(name, arg2));
9265 unlock_user(name, arg1, arg2);
9267 ret = -TARGET_EFAULT;
9272 #ifdef TARGET_NR_atomic_cmpxchg_32
9273 case TARGET_NR_atomic_cmpxchg_32:
9275 /* should use start_exclusive from main.c */
9276 abi_ulong mem_value;
9277 if (get_user_u32(mem_value, arg6)) {
9278 target_siginfo_t info;
9279 info.si_signo = SIGSEGV;
9281 info.si_code = TARGET_SEGV_MAPERR;
9282 info._sifields._sigfault._addr = arg6;
9283 queue_signal((CPUArchState *)cpu_env, info.si_signo, &info);
9287 if (mem_value == arg2)
9288 put_user_u32(arg1, arg6);
9293 #ifdef TARGET_NR_atomic_barrier
9294 case TARGET_NR_atomic_barrier:
9296 /* Like the kernel implementation and the qemu arm barrier, no-op this? */
9302 #ifdef TARGET_NR_timer_create
9303 case TARGET_NR_timer_create:
9305 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
9307 struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
9308 struct target_sigevent *ptarget_sevp;
9309 struct target_timer_t *ptarget_timer;
9312 int timer_index = next_free_host_timer();
9314 if (timer_index < 0) {
9315 ret = -TARGET_EAGAIN;
9317 timer_t *phtimer = g_posix_timers + timer_index;
9320 if (!lock_user_struct(VERIFY_READ, ptarget_sevp, arg2, 1)) {
9324 host_sevp.sigev_signo = tswap32(ptarget_sevp->sigev_signo);
9325 host_sevp.sigev_notify = tswap32(ptarget_sevp->sigev_notify);
9327 phost_sevp = &host_sevp;
9330 ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
9334 if (!lock_user_struct(VERIFY_WRITE, ptarget_timer, arg3, 1)) {
9337 ptarget_timer->ptr = tswap32(0xcafe0000 | timer_index);
9338 unlock_user_struct(ptarget_timer, arg3, 1);
9345 #ifdef TARGET_NR_timer_settime
9346 case TARGET_NR_timer_settime:
9348 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
9349 * struct itimerspec * old_value */
9351 if (arg3 == 0 || arg1 < 0 || arg1 >= ARRAY_SIZE(g_posix_timers)) {
9352 ret = -TARGET_EINVAL;
9354 timer_t htimer = g_posix_timers[arg1];
9355 struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
9357 target_to_host_itimerspec(&hspec_new, arg3);
9359 timer_settime(htimer, arg2, &hspec_new, &hspec_old));
9360 host_to_target_itimerspec(arg2, &hspec_old);
9366 #ifdef TARGET_NR_timer_gettime
9367 case TARGET_NR_timer_gettime:
9369 /* args: timer_t timerid, struct itimerspec *curr_value */
9372 return -TARGET_EFAULT;
9373 } else if (arg1 < 0 || arg1 >= ARRAY_SIZE(g_posix_timers)) {
9374 ret = -TARGET_EINVAL;
9376 timer_t htimer = g_posix_timers[arg1];
9377 struct itimerspec hspec;
9378 ret = get_errno(timer_gettime(htimer, &hspec));
9380 if (host_to_target_itimerspec(arg2, &hspec)) {
9381 ret = -TARGET_EFAULT;
9388 #ifdef TARGET_NR_timer_getoverrun
9389 case TARGET_NR_timer_getoverrun:
9391 /* args: timer_t timerid */
9393 if (arg1 < 0 || arg1 >= ARRAY_SIZE(g_posix_timers)) {
9394 ret = -TARGET_EINVAL;
9396 timer_t htimer = g_posix_timers[arg1];
9397 ret = get_errno(timer_getoverrun(htimer));
9403 #ifdef TARGET_NR_timer_delete
9404 case TARGET_NR_timer_delete:
9406 /* args: timer_t timerid */
9408 if (arg1 < 0 || arg1 >= ARRAY_SIZE(g_posix_timers)) {
9409 ret = -TARGET_EINVAL;
9411 timer_t htimer = g_posix_timers[arg1];
9412 ret = get_errno(timer_delete(htimer));
9413 g_posix_timers[arg1] = 0;
9421 gemu_log("qemu: Unsupported syscall: %d\n", num);
9422 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
9423 unimplemented_nowarn:
9425 ret = -TARGET_ENOSYS;
9430 gemu_log(" = " TARGET_ABI_FMT_ld "\n", ret);
9433 print_syscall_ret(num, ret);
9436 ret = -TARGET_EFAULT;