4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
31 #include <sys/types.h>
37 #include <sys/mount.h>
38 #include <sys/prctl.h>
39 #include <sys/resource.h>
45 int __clone2(int (*fn)(void *), void *child_stack_base,
46 size_t stack_size, int flags, void *arg, ...);
48 #include <sys/socket.h>
52 #include <sys/times.h>
55 #include <sys/statfs.h>
57 #include <sys/sysinfo.h>
58 #include <sys/utsname.h>
59 //#include <sys/user.h>
60 #include <netinet/ip.h>
61 #include <netinet/tcp.h>
62 #include <linux/wireless.h>
63 #include <qemu-common.h>
68 #include <sys/eventfd.h>
71 #include <sys/epoll.h>
74 #include <attr/xattr.h>
77 #define termios host_termios
78 #define winsize host_winsize
79 #define termio host_termio
80 #define sgttyb host_sgttyb /* same as target */
81 #define tchars host_tchars /* same as target */
82 #define ltchars host_ltchars /* same as target */
84 #include <linux/termios.h>
85 #include <linux/unistd.h>
86 #include <linux/utsname.h>
87 #include <linux/cdrom.h>
88 #include <linux/hdreg.h>
89 #include <linux/soundcard.h>
91 #include <linux/mtio.h>
93 #if defined(CONFIG_FIEMAP)
94 #include <linux/fiemap.h>
98 #include "linux_loop.h"
99 #include "cpu-uname.h"
102 #include "qemu-common.h"
104 #if defined(CONFIG_USE_NPTL)
105 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
106 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
108 /* XXX: Hardcode the above values. */
109 #define CLONE_NPTL_FLAGS2 0
114 //#include <linux/msdos_fs.h>
115 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
116 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
127 #define _syscall0(type,name) \
128 static type name (void) \
130 return syscall(__NR_##name); \
133 #define _syscall1(type,name,type1,arg1) \
134 static type name (type1 arg1) \
136 return syscall(__NR_##name, arg1); \
139 #define _syscall2(type,name,type1,arg1,type2,arg2) \
140 static type name (type1 arg1,type2 arg2) \
142 return syscall(__NR_##name, arg1, arg2); \
145 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
146 static type name (type1 arg1,type2 arg2,type3 arg3) \
148 return syscall(__NR_##name, arg1, arg2, arg3); \
151 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
152 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
154 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
157 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
159 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
161 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
165 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
166 type5,arg5,type6,arg6) \
167 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
170 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
174 #define __NR_sys_uname __NR_uname
175 #define __NR_sys_faccessat __NR_faccessat
176 #define __NR_sys_fchmodat __NR_fchmodat
177 #define __NR_sys_fchownat __NR_fchownat
178 #define __NR_sys_fstatat64 __NR_fstatat64
179 #define __NR_sys_futimesat __NR_futimesat
180 #define __NR_sys_getcwd1 __NR_getcwd
181 #define __NR_sys_getdents __NR_getdents
182 #define __NR_sys_getdents64 __NR_getdents64
183 #define __NR_sys_getpriority __NR_getpriority
184 #define __NR_sys_linkat __NR_linkat
185 #define __NR_sys_mkdirat __NR_mkdirat
186 #define __NR_sys_mknodat __NR_mknodat
187 #define __NR_sys_newfstatat __NR_newfstatat
188 #define __NR_sys_openat __NR_openat
189 #define __NR_sys_readlinkat __NR_readlinkat
190 #define __NR_sys_renameat __NR_renameat
191 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
192 #define __NR_sys_symlinkat __NR_symlinkat
193 #define __NR_sys_syslog __NR_syslog
194 #define __NR_sys_tgkill __NR_tgkill
195 #define __NR_sys_tkill __NR_tkill
196 #define __NR_sys_unlinkat __NR_unlinkat
197 #define __NR_sys_utimensat __NR_utimensat
198 #define __NR_sys_futex __NR_futex
199 #define __NR_sys_inotify_init __NR_inotify_init
200 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
201 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
203 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \
205 #define __NR__llseek __NR_lseek
209 _syscall0(int, gettid)
211 /* This is a replacement for the host gettid() and must return a host
213 static int gettid(void) {
217 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
218 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
219 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
221 _syscall2(int, sys_getpriority, int, which, int, who);
222 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
223 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
224 loff_t *, res, uint, wh);
226 _syscall3(int,sys_rt_sigqueueinfo,int,pid,int,sig,siginfo_t *,uinfo)
227 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
228 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
229 _syscall3(int,sys_tgkill,int,tgid,int,pid,int,sig)
231 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
232 _syscall2(int,sys_tkill,int,tid,int,sig)
234 #ifdef __NR_exit_group
235 _syscall1(int,exit_group,int,error_code)
237 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
238 _syscall1(int,set_tid_address,int *,tidptr)
240 #if defined(CONFIG_USE_NPTL)
241 #if defined(TARGET_NR_futex) && defined(__NR_futex)
242 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
243 const struct timespec *,timeout,int *,uaddr2,int,val3)
246 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
247 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
248 unsigned long *, user_mask_ptr);
249 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
250 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
251 unsigned long *, user_mask_ptr);
253 static bitmask_transtbl fcntl_flags_tbl[] = {
254 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
255 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, },
256 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, },
257 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, },
258 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, },
259 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, },
260 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, },
261 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, },
262 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, },
263 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, },
264 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
265 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, },
266 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
267 #if defined(O_DIRECT)
268 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, },
273 #define COPY_UTSNAME_FIELD(dest, src) \
275 /* __NEW_UTS_LEN doesn't include terminating null */ \
276 (void) strncpy((dest), (src), __NEW_UTS_LEN); \
277 (dest)[__NEW_UTS_LEN] = '\0'; \
280 static int sys_uname(struct new_utsname *buf)
282 struct utsname uts_buf;
284 if (uname(&uts_buf) < 0)
288 * Just in case these have some differences, we
289 * translate utsname to new_utsname (which is the
290 * struct linux kernel uses).
293 memset(buf, 0, sizeof(*buf));
294 COPY_UTSNAME_FIELD(buf->sysname, uts_buf.sysname);
295 COPY_UTSNAME_FIELD(buf->nodename, uts_buf.nodename);
296 COPY_UTSNAME_FIELD(buf->release, uts_buf.release);
297 COPY_UTSNAME_FIELD(buf->version, uts_buf.version);
298 COPY_UTSNAME_FIELD(buf->machine, uts_buf.machine);
300 COPY_UTSNAME_FIELD(buf->domainname, uts_buf.domainname);
304 #undef COPY_UTSNAME_FIELD
307 static int sys_getcwd1(char *buf, size_t size)
309 if (getcwd(buf, size) == NULL) {
310 /* getcwd() sets errno */
313 return strlen(buf)+1;
318 * Host system seems to have atfile syscall stubs available. We
319 * now enable them one by one as specified by target syscall_nr.h.
322 #ifdef TARGET_NR_faccessat
323 static int sys_faccessat(int dirfd, const char *pathname, int mode)
325 return (faccessat(dirfd, pathname, mode, 0));
328 #ifdef TARGET_NR_fchmodat
329 static int sys_fchmodat(int dirfd, const char *pathname, mode_t mode)
331 return (fchmodat(dirfd, pathname, mode, 0));
334 #if defined(TARGET_NR_fchownat)
335 static int sys_fchownat(int dirfd, const char *pathname, uid_t owner,
336 gid_t group, int flags)
338 return (fchownat(dirfd, pathname, owner, group, flags));
341 #ifdef __NR_fstatat64
342 static int sys_fstatat64(int dirfd, const char *pathname, struct stat *buf,
345 return (fstatat(dirfd, pathname, buf, flags));
348 #ifdef __NR_newfstatat
349 static int sys_newfstatat(int dirfd, const char *pathname, struct stat *buf,
352 return (fstatat(dirfd, pathname, buf, flags));
355 #ifdef TARGET_NR_futimesat
356 static int sys_futimesat(int dirfd, const char *pathname,
357 const struct timeval times[2])
359 return (futimesat(dirfd, pathname, times));
362 #ifdef TARGET_NR_linkat
363 static int sys_linkat(int olddirfd, const char *oldpath,
364 int newdirfd, const char *newpath, int flags)
366 return (linkat(olddirfd, oldpath, newdirfd, newpath, flags));
369 #ifdef TARGET_NR_mkdirat
370 static int sys_mkdirat(int dirfd, const char *pathname, mode_t mode)
372 return (mkdirat(dirfd, pathname, mode));
375 #ifdef TARGET_NR_mknodat
376 static int sys_mknodat(int dirfd, const char *pathname, mode_t mode,
379 return (mknodat(dirfd, pathname, mode, dev));
382 #ifdef TARGET_NR_openat
383 static int sys_openat(int dirfd, const char *pathname, int flags, ...)
386 * open(2) has extra parameter 'mode' when called with
389 if ((flags & O_CREAT) != 0) {
394 * Get the 'mode' parameter and translate it to
398 mode = va_arg(ap, mode_t);
399 mode = target_to_host_bitmask(mode, fcntl_flags_tbl);
402 return (openat(dirfd, pathname, flags, mode));
404 return (openat(dirfd, pathname, flags));
407 #ifdef TARGET_NR_readlinkat
408 static int sys_readlinkat(int dirfd, const char *pathname, char *buf, size_t bufsiz)
410 return (readlinkat(dirfd, pathname, buf, bufsiz));
413 #ifdef TARGET_NR_renameat
414 static int sys_renameat(int olddirfd, const char *oldpath,
415 int newdirfd, const char *newpath)
417 return (renameat(olddirfd, oldpath, newdirfd, newpath));
420 #ifdef TARGET_NR_symlinkat
421 static int sys_symlinkat(const char *oldpath, int newdirfd, const char *newpath)
423 return (symlinkat(oldpath, newdirfd, newpath));
426 #ifdef TARGET_NR_unlinkat
427 static int sys_unlinkat(int dirfd, const char *pathname, int flags)
429 return (unlinkat(dirfd, pathname, flags));
432 #else /* !CONFIG_ATFILE */
435 * Try direct syscalls instead
437 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
438 _syscall3(int,sys_faccessat,int,dirfd,const char *,pathname,int,mode)
440 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat)
441 _syscall3(int,sys_fchmodat,int,dirfd,const char *,pathname, mode_t,mode)
443 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat)
444 _syscall5(int,sys_fchownat,int,dirfd,const char *,pathname,
445 uid_t,owner,gid_t,group,int,flags)
447 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \
448 defined(__NR_fstatat64)
449 _syscall4(int,sys_fstatat64,int,dirfd,const char *,pathname,
450 struct stat *,buf,int,flags)
452 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat)
453 _syscall3(int,sys_futimesat,int,dirfd,const char *,pathname,
454 const struct timeval *,times)
456 #if (defined(TARGET_NR_newfstatat) || defined(TARGET_NR_fstatat64) ) && \
457 defined(__NR_newfstatat)
458 _syscall4(int,sys_newfstatat,int,dirfd,const char *,pathname,
459 struct stat *,buf,int,flags)
461 #if defined(TARGET_NR_linkat) && defined(__NR_linkat)
462 _syscall5(int,sys_linkat,int,olddirfd,const char *,oldpath,
463 int,newdirfd,const char *,newpath,int,flags)
465 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat)
466 _syscall3(int,sys_mkdirat,int,dirfd,const char *,pathname,mode_t,mode)
468 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat)
469 _syscall4(int,sys_mknodat,int,dirfd,const char *,pathname,
470 mode_t,mode,dev_t,dev)
472 #if defined(TARGET_NR_openat) && defined(__NR_openat)
473 _syscall4(int,sys_openat,int,dirfd,const char *,pathname,int,flags,mode_t,mode)
475 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat)
476 _syscall4(int,sys_readlinkat,int,dirfd,const char *,pathname,
477 char *,buf,size_t,bufsize)
479 #if defined(TARGET_NR_renameat) && defined(__NR_renameat)
480 _syscall4(int,sys_renameat,int,olddirfd,const char *,oldpath,
481 int,newdirfd,const char *,newpath)
483 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat)
484 _syscall3(int,sys_symlinkat,const char *,oldpath,
485 int,newdirfd,const char *,newpath)
487 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat)
488 _syscall3(int,sys_unlinkat,int,dirfd,const char *,pathname,int,flags)
491 #endif /* CONFIG_ATFILE */
493 #ifdef CONFIG_UTIMENSAT
494 static int sys_utimensat(int dirfd, const char *pathname,
495 const struct timespec times[2], int flags)
497 if (pathname == NULL)
498 return futimens(dirfd, times);
500 return utimensat(dirfd, pathname, times, flags);
503 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat)
504 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
505 const struct timespec *,tsp,int,flags)
507 #endif /* CONFIG_UTIMENSAT */
509 #ifdef CONFIG_INOTIFY
510 #include <sys/inotify.h>
512 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
513 static int sys_inotify_init(void)
515 return (inotify_init());
518 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
519 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
521 return (inotify_add_watch(fd, pathname, mask));
524 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
525 static int sys_inotify_rm_watch(int fd, int32_t wd)
527 return (inotify_rm_watch(fd, wd));
530 #ifdef CONFIG_INOTIFY1
531 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
532 static int sys_inotify_init1(int flags)
534 return (inotify_init1(flags));
539 /* Userspace can usually survive runtime without inotify */
540 #undef TARGET_NR_inotify_init
541 #undef TARGET_NR_inotify_init1
542 #undef TARGET_NR_inotify_add_watch
543 #undef TARGET_NR_inotify_rm_watch
544 #endif /* CONFIG_INOTIFY */
546 #if defined(TARGET_NR_ppoll)
548 # define __NR_ppoll -1
550 #define __NR_sys_ppoll __NR_ppoll
551 _syscall5(int, sys_ppoll, struct pollfd *, fds, nfds_t, nfds,
552 struct timespec *, timeout, const __sigset_t *, sigmask,
556 #if defined(TARGET_NR_pselect6)
557 #ifndef __NR_pselect6
558 # define __NR_pselect6 -1
560 #define __NR_sys_pselect6 __NR_pselect6
561 _syscall6(int, sys_pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds,
562 fd_set *, exceptfds, struct timespec *, timeout, void *, sig);
565 #if defined(TARGET_NR_prlimit64)
566 #ifndef __NR_prlimit64
567 # define __NR_prlimit64 -1
569 #define __NR_sys_prlimit64 __NR_prlimit64
570 /* The glibc rlimit structure may not be that used by the underlying syscall */
571 struct host_rlimit64 {
575 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
576 const struct host_rlimit64 *, new_limit,
577 struct host_rlimit64 *, old_limit)
580 extern int personality(int);
581 extern int flock(int, int);
582 extern int setfsuid(int);
583 extern int setfsgid(int);
584 extern int setgroups(int, gid_t *);
586 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
588 static inline int regpairs_aligned(void *cpu_env) {
589 return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
591 #elif defined(TARGET_MIPS)
592 static inline int regpairs_aligned(void *cpu_env) { return 1; }
594 static inline int regpairs_aligned(void *cpu_env) { return 0; }
597 #define ERRNO_TABLE_SIZE 1200
599 /* target_to_host_errno_table[] is initialized from
600 * host_to_target_errno_table[] in syscall_init(). */
601 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
605 * This list is the union of errno values overridden in asm-<arch>/errno.h
606 * minus the errnos that are not actually generic to all archs.
608 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
609 [EIDRM] = TARGET_EIDRM,
610 [ECHRNG] = TARGET_ECHRNG,
611 [EL2NSYNC] = TARGET_EL2NSYNC,
612 [EL3HLT] = TARGET_EL3HLT,
613 [EL3RST] = TARGET_EL3RST,
614 [ELNRNG] = TARGET_ELNRNG,
615 [EUNATCH] = TARGET_EUNATCH,
616 [ENOCSI] = TARGET_ENOCSI,
617 [EL2HLT] = TARGET_EL2HLT,
618 [EDEADLK] = TARGET_EDEADLK,
619 [ENOLCK] = TARGET_ENOLCK,
620 [EBADE] = TARGET_EBADE,
621 [EBADR] = TARGET_EBADR,
622 [EXFULL] = TARGET_EXFULL,
623 [ENOANO] = TARGET_ENOANO,
624 [EBADRQC] = TARGET_EBADRQC,
625 [EBADSLT] = TARGET_EBADSLT,
626 [EBFONT] = TARGET_EBFONT,
627 [ENOSTR] = TARGET_ENOSTR,
628 [ENODATA] = TARGET_ENODATA,
629 [ETIME] = TARGET_ETIME,
630 [ENOSR] = TARGET_ENOSR,
631 [ENONET] = TARGET_ENONET,
632 [ENOPKG] = TARGET_ENOPKG,
633 [EREMOTE] = TARGET_EREMOTE,
634 [ENOLINK] = TARGET_ENOLINK,
635 [EADV] = TARGET_EADV,
636 [ESRMNT] = TARGET_ESRMNT,
637 [ECOMM] = TARGET_ECOMM,
638 [EPROTO] = TARGET_EPROTO,
639 [EDOTDOT] = TARGET_EDOTDOT,
640 [EMULTIHOP] = TARGET_EMULTIHOP,
641 [EBADMSG] = TARGET_EBADMSG,
642 [ENAMETOOLONG] = TARGET_ENAMETOOLONG,
643 [EOVERFLOW] = TARGET_EOVERFLOW,
644 [ENOTUNIQ] = TARGET_ENOTUNIQ,
645 [EBADFD] = TARGET_EBADFD,
646 [EREMCHG] = TARGET_EREMCHG,
647 [ELIBACC] = TARGET_ELIBACC,
648 [ELIBBAD] = TARGET_ELIBBAD,
649 [ELIBSCN] = TARGET_ELIBSCN,
650 [ELIBMAX] = TARGET_ELIBMAX,
651 [ELIBEXEC] = TARGET_ELIBEXEC,
652 [EILSEQ] = TARGET_EILSEQ,
653 [ENOSYS] = TARGET_ENOSYS,
654 [ELOOP] = TARGET_ELOOP,
655 [ERESTART] = TARGET_ERESTART,
656 [ESTRPIPE] = TARGET_ESTRPIPE,
657 [ENOTEMPTY] = TARGET_ENOTEMPTY,
658 [EUSERS] = TARGET_EUSERS,
659 [ENOTSOCK] = TARGET_ENOTSOCK,
660 [EDESTADDRREQ] = TARGET_EDESTADDRREQ,
661 [EMSGSIZE] = TARGET_EMSGSIZE,
662 [EPROTOTYPE] = TARGET_EPROTOTYPE,
663 [ENOPROTOOPT] = TARGET_ENOPROTOOPT,
664 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT,
665 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT,
666 [EOPNOTSUPP] = TARGET_EOPNOTSUPP,
667 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT,
668 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT,
669 [EADDRINUSE] = TARGET_EADDRINUSE,
670 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL,
671 [ENETDOWN] = TARGET_ENETDOWN,
672 [ENETUNREACH] = TARGET_ENETUNREACH,
673 [ENETRESET] = TARGET_ENETRESET,
674 [ECONNABORTED] = TARGET_ECONNABORTED,
675 [ECONNRESET] = TARGET_ECONNRESET,
676 [ENOBUFS] = TARGET_ENOBUFS,
677 [EISCONN] = TARGET_EISCONN,
678 [ENOTCONN] = TARGET_ENOTCONN,
679 [EUCLEAN] = TARGET_EUCLEAN,
680 [ENOTNAM] = TARGET_ENOTNAM,
681 [ENAVAIL] = TARGET_ENAVAIL,
682 [EISNAM] = TARGET_EISNAM,
683 [EREMOTEIO] = TARGET_EREMOTEIO,
684 [ESHUTDOWN] = TARGET_ESHUTDOWN,
685 [ETOOMANYREFS] = TARGET_ETOOMANYREFS,
686 [ETIMEDOUT] = TARGET_ETIMEDOUT,
687 [ECONNREFUSED] = TARGET_ECONNREFUSED,
688 [EHOSTDOWN] = TARGET_EHOSTDOWN,
689 [EHOSTUNREACH] = TARGET_EHOSTUNREACH,
690 [EALREADY] = TARGET_EALREADY,
691 [EINPROGRESS] = TARGET_EINPROGRESS,
692 [ESTALE] = TARGET_ESTALE,
693 [ECANCELED] = TARGET_ECANCELED,
694 [ENOMEDIUM] = TARGET_ENOMEDIUM,
695 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE,
697 [ENOKEY] = TARGET_ENOKEY,
700 [EKEYEXPIRED] = TARGET_EKEYEXPIRED,
703 [EKEYREVOKED] = TARGET_EKEYREVOKED,
706 [EKEYREJECTED] = TARGET_EKEYREJECTED,
709 [EOWNERDEAD] = TARGET_EOWNERDEAD,
711 #ifdef ENOTRECOVERABLE
712 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE,
716 static inline int host_to_target_errno(int err)
718 if(host_to_target_errno_table[err])
719 return host_to_target_errno_table[err];
723 static inline int target_to_host_errno(int err)
725 if (target_to_host_errno_table[err])
726 return target_to_host_errno_table[err];
730 static inline abi_long get_errno(abi_long ret)
733 return -host_to_target_errno(errno);
738 static inline int is_error(abi_long ret)
740 return (abi_ulong)ret >= (abi_ulong)(-4096);
743 char *target_strerror(int err)
745 return strerror(target_to_host_errno(err));
748 static abi_ulong target_brk;
749 static abi_ulong target_original_brk;
750 static abi_ulong brk_page;
752 void target_set_brk(abi_ulong new_brk)
754 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
755 brk_page = HOST_PAGE_ALIGN(target_brk);
758 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
759 #define DEBUGF_BRK(message, args...)
761 /* do_brk() must return target values and target errnos. */
762 abi_long do_brk(abi_ulong new_brk)
764 abi_long mapped_addr;
767 DEBUGF_BRK("do_brk(%#010x) -> ", new_brk);
770 DEBUGF_BRK("%#010x (!new_brk)\n", target_brk);
773 if (new_brk < target_original_brk) {
774 DEBUGF_BRK("%#010x (new_brk < target_original_brk)\n", target_brk);
778 /* If the new brk is less than the highest page reserved to the
779 * target heap allocation, set it and we're almost done... */
780 if (new_brk <= brk_page) {
781 /* Heap contents are initialized to zero, as for anonymous
783 if (new_brk > target_brk) {
784 memset(g2h(target_brk), 0, new_brk - target_brk);
786 target_brk = new_brk;
787 DEBUGF_BRK("%#010x (new_brk <= brk_page)\n", target_brk);
791 /* We need to allocate more memory after the brk... Note that
792 * we don't use MAP_FIXED because that will map over the top of
793 * any existing mapping (like the one with the host libc or qemu
794 * itself); instead we treat "mapped but at wrong address" as
795 * a failure and unmap again.
797 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
798 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
799 PROT_READ|PROT_WRITE,
800 MAP_ANON|MAP_PRIVATE, 0, 0));
802 if (mapped_addr == brk_page) {
803 /* Heap contents are initialized to zero, as for anonymous
804 * mapped pages. Technically the new pages are already
805 * initialized to zero since they *are* anonymous mapped
806 * pages, however we have to take care with the contents that
807 * come from the remaining part of the previous page: it may
808 * contains garbage data due to a previous heap usage (grown
810 memset(g2h(target_brk), 0, brk_page - target_brk);
812 target_brk = new_brk;
813 brk_page = HOST_PAGE_ALIGN(target_brk);
814 DEBUGF_BRK("%#010x (mapped_addr == brk_page)\n", target_brk);
816 } else if (mapped_addr != -1) {
817 /* Mapped but at wrong address, meaning there wasn't actually
818 * enough space for this brk.
820 target_munmap(mapped_addr, new_alloc_size);
822 DEBUGF_BRK("%#010x (mapped_addr != -1)\n", target_brk);
825 DEBUGF_BRK("%#010x (otherwise)\n", target_brk);
828 #if defined(TARGET_ALPHA)
829 /* We (partially) emulate OSF/1 on Alpha, which requires we
830 return a proper errno, not an unchanged brk value. */
831 return -TARGET_ENOMEM;
833 /* For everything else, return the previous break. */
837 static inline abi_long copy_from_user_fdset(fd_set *fds,
838 abi_ulong target_fds_addr,
842 abi_ulong b, *target_fds;
844 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
845 if (!(target_fds = lock_user(VERIFY_READ,
847 sizeof(abi_ulong) * nw,
849 return -TARGET_EFAULT;
853 for (i = 0; i < nw; i++) {
854 /* grab the abi_ulong */
855 __get_user(b, &target_fds[i]);
856 for (j = 0; j < TARGET_ABI_BITS; j++) {
857 /* check the bit inside the abi_ulong */
864 unlock_user(target_fds, target_fds_addr, 0);
869 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
870 abi_ulong target_fds_addr,
873 if (target_fds_addr) {
874 if (copy_from_user_fdset(fds, target_fds_addr, n))
875 return -TARGET_EFAULT;
883 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
889 abi_ulong *target_fds;
891 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
892 if (!(target_fds = lock_user(VERIFY_WRITE,
894 sizeof(abi_ulong) * nw,
896 return -TARGET_EFAULT;
899 for (i = 0; i < nw; i++) {
901 for (j = 0; j < TARGET_ABI_BITS; j++) {
902 v |= ((FD_ISSET(k, fds) != 0) << j);
905 __put_user(v, &target_fds[i]);
908 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
913 #if defined(__alpha__)
919 static inline abi_long host_to_target_clock_t(long ticks)
921 #if HOST_HZ == TARGET_HZ
924 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
928 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
929 const struct rusage *rusage)
931 struct target_rusage *target_rusage;
933 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
934 return -TARGET_EFAULT;
935 target_rusage->ru_utime.tv_sec = tswapl(rusage->ru_utime.tv_sec);
936 target_rusage->ru_utime.tv_usec = tswapl(rusage->ru_utime.tv_usec);
937 target_rusage->ru_stime.tv_sec = tswapl(rusage->ru_stime.tv_sec);
938 target_rusage->ru_stime.tv_usec = tswapl(rusage->ru_stime.tv_usec);
939 target_rusage->ru_maxrss = tswapl(rusage->ru_maxrss);
940 target_rusage->ru_ixrss = tswapl(rusage->ru_ixrss);
941 target_rusage->ru_idrss = tswapl(rusage->ru_idrss);
942 target_rusage->ru_isrss = tswapl(rusage->ru_isrss);
943 target_rusage->ru_minflt = tswapl(rusage->ru_minflt);
944 target_rusage->ru_majflt = tswapl(rusage->ru_majflt);
945 target_rusage->ru_nswap = tswapl(rusage->ru_nswap);
946 target_rusage->ru_inblock = tswapl(rusage->ru_inblock);
947 target_rusage->ru_oublock = tswapl(rusage->ru_oublock);
948 target_rusage->ru_msgsnd = tswapl(rusage->ru_msgsnd);
949 target_rusage->ru_msgrcv = tswapl(rusage->ru_msgrcv);
950 target_rusage->ru_nsignals = tswapl(rusage->ru_nsignals);
951 target_rusage->ru_nvcsw = tswapl(rusage->ru_nvcsw);
952 target_rusage->ru_nivcsw = tswapl(rusage->ru_nivcsw);
953 unlock_user_struct(target_rusage, target_addr, 1);
958 static inline rlim_t target_to_host_rlim(target_ulong target_rlim)
960 target_ulong target_rlim_swap;
963 target_rlim_swap = tswapl(target_rlim);
964 if (target_rlim_swap == TARGET_RLIM_INFINITY || target_rlim_swap != (rlim_t)target_rlim_swap)
965 result = RLIM_INFINITY;
967 result = target_rlim_swap;
972 static inline target_ulong host_to_target_rlim(rlim_t rlim)
974 target_ulong target_rlim_swap;
977 if (rlim == RLIM_INFINITY || rlim != (target_long)rlim)
978 target_rlim_swap = TARGET_RLIM_INFINITY;
980 target_rlim_swap = rlim;
981 result = tswapl(target_rlim_swap);
986 static inline int target_to_host_resource(int code)
989 case TARGET_RLIMIT_AS:
991 case TARGET_RLIMIT_CORE:
993 case TARGET_RLIMIT_CPU:
995 case TARGET_RLIMIT_DATA:
997 case TARGET_RLIMIT_FSIZE:
999 case TARGET_RLIMIT_LOCKS:
1000 return RLIMIT_LOCKS;
1001 case TARGET_RLIMIT_MEMLOCK:
1002 return RLIMIT_MEMLOCK;
1003 case TARGET_RLIMIT_MSGQUEUE:
1004 return RLIMIT_MSGQUEUE;
1005 case TARGET_RLIMIT_NICE:
1007 case TARGET_RLIMIT_NOFILE:
1008 return RLIMIT_NOFILE;
1009 case TARGET_RLIMIT_NPROC:
1010 return RLIMIT_NPROC;
1011 case TARGET_RLIMIT_RSS:
1013 case TARGET_RLIMIT_RTPRIO:
1014 return RLIMIT_RTPRIO;
1015 case TARGET_RLIMIT_SIGPENDING:
1016 return RLIMIT_SIGPENDING;
1017 case TARGET_RLIMIT_STACK:
1018 return RLIMIT_STACK;
1024 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1025 abi_ulong target_tv_addr)
1027 struct target_timeval *target_tv;
1029 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1))
1030 return -TARGET_EFAULT;
1032 __get_user(tv->tv_sec, &target_tv->tv_sec);
1033 __get_user(tv->tv_usec, &target_tv->tv_usec);
1035 unlock_user_struct(target_tv, target_tv_addr, 0);
1040 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1041 const struct timeval *tv)
1043 struct target_timeval *target_tv;
1045 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0))
1046 return -TARGET_EFAULT;
1048 __put_user(tv->tv_sec, &target_tv->tv_sec);
1049 __put_user(tv->tv_usec, &target_tv->tv_usec);
1051 unlock_user_struct(target_tv, target_tv_addr, 1);
1056 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1059 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1060 abi_ulong target_mq_attr_addr)
1062 struct target_mq_attr *target_mq_attr;
1064 if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1065 target_mq_attr_addr, 1))
1066 return -TARGET_EFAULT;
1068 __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1069 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1070 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1071 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1073 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1078 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1079 const struct mq_attr *attr)
1081 struct target_mq_attr *target_mq_attr;
1083 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1084 target_mq_attr_addr, 0))
1085 return -TARGET_EFAULT;
1087 __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1088 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1089 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1090 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1092 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1098 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1099 /* do_select() must return target values and target errnos. */
1100 static abi_long do_select(int n,
1101 abi_ulong rfd_addr, abi_ulong wfd_addr,
1102 abi_ulong efd_addr, abi_ulong target_tv_addr)
1104 fd_set rfds, wfds, efds;
1105 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1106 struct timeval tv, *tv_ptr;
1109 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1113 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1117 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1122 if (target_tv_addr) {
1123 if (copy_from_user_timeval(&tv, target_tv_addr))
1124 return -TARGET_EFAULT;
1130 ret = get_errno(select(n, rfds_ptr, wfds_ptr, efds_ptr, tv_ptr));
1132 if (!is_error(ret)) {
1133 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1134 return -TARGET_EFAULT;
1135 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1136 return -TARGET_EFAULT;
1137 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1138 return -TARGET_EFAULT;
1140 if (target_tv_addr && copy_to_user_timeval(target_tv_addr, &tv))
1141 return -TARGET_EFAULT;
1148 static abi_long do_pipe2(int host_pipe[], int flags)
1151 return pipe2(host_pipe, flags);
1157 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1158 int flags, int is_pipe2)
1162 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1165 return get_errno(ret);
1167 /* Several targets have special calling conventions for the original
1168 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1170 #if defined(TARGET_ALPHA)
1171 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1172 return host_pipe[0];
1173 #elif defined(TARGET_MIPS)
1174 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1175 return host_pipe[0];
1176 #elif defined(TARGET_SH4)
1177 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1178 return host_pipe[0];
1182 if (put_user_s32(host_pipe[0], pipedes)
1183 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1184 return -TARGET_EFAULT;
1185 return get_errno(ret);
1188 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1189 abi_ulong target_addr,
1192 struct target_ip_mreqn *target_smreqn;
1194 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1196 return -TARGET_EFAULT;
1197 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1198 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1199 if (len == sizeof(struct target_ip_mreqn))
1200 mreqn->imr_ifindex = tswapl(target_smreqn->imr_ifindex);
1201 unlock_user(target_smreqn, target_addr, 0);
1206 static inline abi_long target_to_host_sockaddr(struct sockaddr *addr,
1207 abi_ulong target_addr,
1210 const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1211 sa_family_t sa_family;
1212 struct target_sockaddr *target_saddr;
1214 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1216 return -TARGET_EFAULT;
1218 sa_family = tswap16(target_saddr->sa_family);
1220 /* Oops. The caller might send a incomplete sun_path; sun_path
1221 * must be terminated by \0 (see the manual page), but
1222 * unfortunately it is quite common to specify sockaddr_un
1223 * length as "strlen(x->sun_path)" while it should be
1224 * "strlen(...) + 1". We'll fix that here if needed.
1225 * Linux kernel has a similar feature.
1228 if (sa_family == AF_UNIX) {
1229 if (len < unix_maxlen && len > 0) {
1230 char *cp = (char*)target_saddr;
1232 if ( cp[len-1] && !cp[len] )
1235 if (len > unix_maxlen)
1239 memcpy(addr, target_saddr, len);
1240 addr->sa_family = sa_family;
1241 unlock_user(target_saddr, target_addr, 0);
1246 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1247 struct sockaddr *addr,
1250 struct target_sockaddr *target_saddr;
1252 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1254 return -TARGET_EFAULT;
1255 memcpy(target_saddr, addr, len);
1256 target_saddr->sa_family = tswap16(addr->sa_family);
1257 unlock_user(target_saddr, target_addr, len);
1262 /* ??? Should this also swap msgh->name? */
1263 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1264 struct target_msghdr *target_msgh)
1266 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1267 abi_long msg_controllen;
1268 abi_ulong target_cmsg_addr;
1269 struct target_cmsghdr *target_cmsg;
1270 socklen_t space = 0;
1272 msg_controllen = tswapl(target_msgh->msg_controllen);
1273 if (msg_controllen < sizeof (struct target_cmsghdr))
1275 target_cmsg_addr = tswapl(target_msgh->msg_control);
1276 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1278 return -TARGET_EFAULT;
1280 while (cmsg && target_cmsg) {
1281 void *data = CMSG_DATA(cmsg);
1282 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1284 int len = tswapl(target_cmsg->cmsg_len)
1285 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr));
1287 space += CMSG_SPACE(len);
1288 if (space > msgh->msg_controllen) {
1289 space -= CMSG_SPACE(len);
1290 gemu_log("Host cmsg overflow\n");
1294 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1295 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1296 cmsg->cmsg_len = CMSG_LEN(len);
1298 if (cmsg->cmsg_level != TARGET_SOL_SOCKET || cmsg->cmsg_type != SCM_RIGHTS) {
1299 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg->cmsg_level, cmsg->cmsg_type);
1300 memcpy(data, target_data, len);
1302 int *fd = (int *)data;
1303 int *target_fd = (int *)target_data;
1304 int i, numfds = len / sizeof(int);
1306 for (i = 0; i < numfds; i++)
1307 fd[i] = tswap32(target_fd[i]);
1310 cmsg = CMSG_NXTHDR(msgh, cmsg);
1311 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg);
1313 unlock_user(target_cmsg, target_cmsg_addr, 0);
1315 msgh->msg_controllen = space;
1319 /* ??? Should this also swap msgh->name? */
1320 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1321 struct msghdr *msgh)
1323 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1324 abi_long msg_controllen;
1325 abi_ulong target_cmsg_addr;
1326 struct target_cmsghdr *target_cmsg;
1327 socklen_t space = 0;
1329 msg_controllen = tswapl(target_msgh->msg_controllen);
1330 if (msg_controllen < sizeof (struct target_cmsghdr))
1332 target_cmsg_addr = tswapl(target_msgh->msg_control);
1333 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1335 return -TARGET_EFAULT;
1337 while (cmsg && target_cmsg) {
1338 void *data = CMSG_DATA(cmsg);
1339 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1341 int len = cmsg->cmsg_len - CMSG_ALIGN(sizeof (struct cmsghdr));
1343 space += TARGET_CMSG_SPACE(len);
1344 if (space > msg_controllen) {
1345 space -= TARGET_CMSG_SPACE(len);
1346 gemu_log("Target cmsg overflow\n");
1350 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1351 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1352 target_cmsg->cmsg_len = tswapl(TARGET_CMSG_LEN(len));
1354 if (cmsg->cmsg_level != TARGET_SOL_SOCKET || cmsg->cmsg_type != SCM_RIGHTS) {
1355 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg->cmsg_level, cmsg->cmsg_type);
1356 memcpy(target_data, data, len);
1358 int *fd = (int *)data;
1359 int *target_fd = (int *)target_data;
1360 int i, numfds = len / sizeof(int);
1362 for (i = 0; i < numfds; i++)
1363 target_fd[i] = tswap32(fd[i]);
1366 cmsg = CMSG_NXTHDR(msgh, cmsg);
1367 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg);
1369 unlock_user(target_cmsg, target_cmsg_addr, space);
1371 target_msgh->msg_controllen = tswapl(space);
1375 /* do_setsockopt() Must return target values and target errnos. */
1376 static abi_long do_setsockopt(int sockfd, int level, int optname,
1377 abi_ulong optval_addr, socklen_t optlen)
1381 struct ip_mreqn *ip_mreq;
1382 struct ip_mreq_source *ip_mreq_source;
1386 /* TCP options all take an 'int' value. */
1387 if (optlen < sizeof(uint32_t))
1388 return -TARGET_EINVAL;
1390 if (get_user_u32(val, optval_addr))
1391 return -TARGET_EFAULT;
1392 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1399 case IP_ROUTER_ALERT:
1403 case IP_MTU_DISCOVER:
1409 case IP_MULTICAST_TTL:
1410 case IP_MULTICAST_LOOP:
1412 if (optlen >= sizeof(uint32_t)) {
1413 if (get_user_u32(val, optval_addr))
1414 return -TARGET_EFAULT;
1415 } else if (optlen >= 1) {
1416 if (get_user_u8(val, optval_addr))
1417 return -TARGET_EFAULT;
1419 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1421 case IP_ADD_MEMBERSHIP:
1422 case IP_DROP_MEMBERSHIP:
1423 if (optlen < sizeof (struct target_ip_mreq) ||
1424 optlen > sizeof (struct target_ip_mreqn))
1425 return -TARGET_EINVAL;
1427 ip_mreq = (struct ip_mreqn *) alloca(optlen);
1428 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
1429 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
1432 case IP_BLOCK_SOURCE:
1433 case IP_UNBLOCK_SOURCE:
1434 case IP_ADD_SOURCE_MEMBERSHIP:
1435 case IP_DROP_SOURCE_MEMBERSHIP:
1436 if (optlen != sizeof (struct target_ip_mreq_source))
1437 return -TARGET_EINVAL;
1439 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
1440 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
1441 unlock_user (ip_mreq_source, optval_addr, 0);
1448 case TARGET_SOL_SOCKET:
1450 /* Options with 'int' argument. */
1451 case TARGET_SO_DEBUG:
1454 case TARGET_SO_REUSEADDR:
1455 optname = SO_REUSEADDR;
1457 case TARGET_SO_TYPE:
1460 case TARGET_SO_ERROR:
1463 case TARGET_SO_DONTROUTE:
1464 optname = SO_DONTROUTE;
1466 case TARGET_SO_BROADCAST:
1467 optname = SO_BROADCAST;
1469 case TARGET_SO_SNDBUF:
1470 optname = SO_SNDBUF;
1472 case TARGET_SO_RCVBUF:
1473 optname = SO_RCVBUF;
1475 case TARGET_SO_KEEPALIVE:
1476 optname = SO_KEEPALIVE;
1478 case TARGET_SO_OOBINLINE:
1479 optname = SO_OOBINLINE;
1481 case TARGET_SO_NO_CHECK:
1482 optname = SO_NO_CHECK;
1484 case TARGET_SO_PRIORITY:
1485 optname = SO_PRIORITY;
1488 case TARGET_SO_BSDCOMPAT:
1489 optname = SO_BSDCOMPAT;
1492 case TARGET_SO_PASSCRED:
1493 optname = SO_PASSCRED;
1495 case TARGET_SO_TIMESTAMP:
1496 optname = SO_TIMESTAMP;
1498 case TARGET_SO_RCVLOWAT:
1499 optname = SO_RCVLOWAT;
1501 case TARGET_SO_RCVTIMEO:
1502 optname = SO_RCVTIMEO;
1504 case TARGET_SO_SNDTIMEO:
1505 optname = SO_SNDTIMEO;
1511 if (optlen < sizeof(uint32_t))
1512 return -TARGET_EINVAL;
1514 if (get_user_u32(val, optval_addr))
1515 return -TARGET_EFAULT;
1516 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
1520 gemu_log("Unsupported setsockopt level=%d optname=%d \n", level, optname);
1521 ret = -TARGET_ENOPROTOOPT;
1526 /* do_getsockopt() Must return target values and target errnos. */
1527 static abi_long do_getsockopt(int sockfd, int level, int optname,
1528 abi_ulong optval_addr, abi_ulong optlen)
1535 case TARGET_SOL_SOCKET:
1538 /* These don't just return a single integer */
1539 case TARGET_SO_LINGER:
1540 case TARGET_SO_RCVTIMEO:
1541 case TARGET_SO_SNDTIMEO:
1542 case TARGET_SO_PEERCRED:
1543 case TARGET_SO_PEERNAME:
1545 /* Options with 'int' argument. */
1546 case TARGET_SO_DEBUG:
1549 case TARGET_SO_REUSEADDR:
1550 optname = SO_REUSEADDR;
1552 case TARGET_SO_TYPE:
1555 case TARGET_SO_ERROR:
1558 case TARGET_SO_DONTROUTE:
1559 optname = SO_DONTROUTE;
1561 case TARGET_SO_BROADCAST:
1562 optname = SO_BROADCAST;
1564 case TARGET_SO_SNDBUF:
1565 optname = SO_SNDBUF;
1567 case TARGET_SO_RCVBUF:
1568 optname = SO_RCVBUF;
1570 case TARGET_SO_KEEPALIVE:
1571 optname = SO_KEEPALIVE;
1573 case TARGET_SO_OOBINLINE:
1574 optname = SO_OOBINLINE;
1576 case TARGET_SO_NO_CHECK:
1577 optname = SO_NO_CHECK;
1579 case TARGET_SO_PRIORITY:
1580 optname = SO_PRIORITY;
1583 case TARGET_SO_BSDCOMPAT:
1584 optname = SO_BSDCOMPAT;
1587 case TARGET_SO_PASSCRED:
1588 optname = SO_PASSCRED;
1590 case TARGET_SO_TIMESTAMP:
1591 optname = SO_TIMESTAMP;
1593 case TARGET_SO_RCVLOWAT:
1594 optname = SO_RCVLOWAT;
1601 /* TCP options all take an 'int' value. */
1603 if (get_user_u32(len, optlen))
1604 return -TARGET_EFAULT;
1606 return -TARGET_EINVAL;
1608 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
1614 if (put_user_u32(val, optval_addr))
1615 return -TARGET_EFAULT;
1617 if (put_user_u8(val, optval_addr))
1618 return -TARGET_EFAULT;
1620 if (put_user_u32(len, optlen))
1621 return -TARGET_EFAULT;
1628 case IP_ROUTER_ALERT:
1632 case IP_MTU_DISCOVER:
1638 case IP_MULTICAST_TTL:
1639 case IP_MULTICAST_LOOP:
1640 if (get_user_u32(len, optlen))
1641 return -TARGET_EFAULT;
1643 return -TARGET_EINVAL;
1645 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
1648 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
1650 if (put_user_u32(len, optlen)
1651 || put_user_u8(val, optval_addr))
1652 return -TARGET_EFAULT;
1654 if (len > sizeof(int))
1656 if (put_user_u32(len, optlen)
1657 || put_user_u32(val, optval_addr))
1658 return -TARGET_EFAULT;
1662 ret = -TARGET_ENOPROTOOPT;
1668 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
1670 ret = -TARGET_EOPNOTSUPP;
1677 * lock_iovec()/unlock_iovec() have a return code of 0 for success where
1678 * other lock functions have a return code of 0 for failure.
1680 static abi_long lock_iovec(int type, struct iovec *vec, abi_ulong target_addr,
1681 int count, int copy)
1683 struct target_iovec *target_vec;
1687 target_vec = lock_user(VERIFY_READ, target_addr, count * sizeof(struct target_iovec), 1);
1689 return -TARGET_EFAULT;
1690 for(i = 0;i < count; i++) {
1691 base = tswapl(target_vec[i].iov_base);
1692 vec[i].iov_len = tswapl(target_vec[i].iov_len);
1693 if (vec[i].iov_len != 0) {
1694 vec[i].iov_base = lock_user(type, base, vec[i].iov_len, copy);
1695 /* Don't check lock_user return value. We must call writev even
1696 if a element has invalid base address. */
1698 /* zero length pointer is ignored */
1699 vec[i].iov_base = NULL;
1702 unlock_user (target_vec, target_addr, 0);
1706 static abi_long unlock_iovec(struct iovec *vec, abi_ulong target_addr,
1707 int count, int copy)
1709 struct target_iovec *target_vec;
1713 target_vec = lock_user(VERIFY_READ, target_addr, count * sizeof(struct target_iovec), 1);
1715 return -TARGET_EFAULT;
1716 for(i = 0;i < count; i++) {
1717 if (target_vec[i].iov_base) {
1718 base = tswapl(target_vec[i].iov_base);
1719 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
1722 unlock_user (target_vec, target_addr, 0);
1727 /* do_socket() Must return target values and target errnos. */
1728 static abi_long do_socket(int domain, int type, int protocol)
1730 #if defined(TARGET_MIPS)
1732 case TARGET_SOCK_DGRAM:
1735 case TARGET_SOCK_STREAM:
1738 case TARGET_SOCK_RAW:
1741 case TARGET_SOCK_RDM:
1744 case TARGET_SOCK_SEQPACKET:
1745 type = SOCK_SEQPACKET;
1747 case TARGET_SOCK_PACKET:
1752 if (domain == PF_NETLINK)
1753 return -EAFNOSUPPORT; /* do not NETLINK socket connections possible */
1754 return get_errno(socket(domain, type, protocol));
1757 /* do_bind() Must return target values and target errnos. */
1758 static abi_long do_bind(int sockfd, abi_ulong target_addr,
1764 if ((int)addrlen < 0) {
1765 return -TARGET_EINVAL;
1768 addr = alloca(addrlen+1);
1770 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
1774 return get_errno(bind(sockfd, addr, addrlen));
1777 /* do_connect() Must return target values and target errnos. */
1778 static abi_long do_connect(int sockfd, abi_ulong target_addr,
1784 if ((int)addrlen < 0) {
1785 return -TARGET_EINVAL;
1788 addr = alloca(addrlen);
1790 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
1794 return get_errno(connect(sockfd, addr, addrlen));
1797 /* do_sendrecvmsg() Must return target values and target errnos. */
1798 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
1799 int flags, int send)
1802 struct target_msghdr *msgp;
1806 abi_ulong target_vec;
1809 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
1813 return -TARGET_EFAULT;
1814 if (msgp->msg_name) {
1815 msg.msg_namelen = tswap32(msgp->msg_namelen);
1816 msg.msg_name = alloca(msg.msg_namelen);
1817 ret = target_to_host_sockaddr(msg.msg_name, tswapl(msgp->msg_name),
1820 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
1824 msg.msg_name = NULL;
1825 msg.msg_namelen = 0;
1827 msg.msg_controllen = 2 * tswapl(msgp->msg_controllen);
1828 msg.msg_control = alloca(msg.msg_controllen);
1829 msg.msg_flags = tswap32(msgp->msg_flags);
1831 count = tswapl(msgp->msg_iovlen);
1832 vec = alloca(count * sizeof(struct iovec));
1833 target_vec = tswapl(msgp->msg_iov);
1834 lock_iovec(send ? VERIFY_READ : VERIFY_WRITE, vec, target_vec, count, send);
1835 msg.msg_iovlen = count;
1839 ret = target_to_host_cmsg(&msg, msgp);
1841 ret = get_errno(sendmsg(fd, &msg, flags));
1843 ret = get_errno(recvmsg(fd, &msg, flags));
1844 if (!is_error(ret)) {
1846 ret = host_to_target_cmsg(msgp, &msg);
1851 unlock_iovec(vec, target_vec, count, !send);
1852 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
1856 /* do_accept() Must return target values and target errnos. */
1857 static abi_long do_accept(int fd, abi_ulong target_addr,
1858 abi_ulong target_addrlen_addr)
1864 if (target_addr == 0)
1865 return get_errno(accept(fd, NULL, NULL));
1867 /* linux returns EINVAL if addrlen pointer is invalid */
1868 if (get_user_u32(addrlen, target_addrlen_addr))
1869 return -TARGET_EINVAL;
1871 if ((int)addrlen < 0) {
1872 return -TARGET_EINVAL;
1875 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
1876 return -TARGET_EINVAL;
1878 addr = alloca(addrlen);
1880 ret = get_errno(accept(fd, addr, &addrlen));
1881 if (!is_error(ret)) {
1882 host_to_target_sockaddr(target_addr, addr, addrlen);
1883 if (put_user_u32(addrlen, target_addrlen_addr))
1884 ret = -TARGET_EFAULT;
1889 /* do_getpeername() Must return target values and target errnos. */
1890 static abi_long do_getpeername(int fd, abi_ulong target_addr,
1891 abi_ulong target_addrlen_addr)
1897 if (get_user_u32(addrlen, target_addrlen_addr))
1898 return -TARGET_EFAULT;
1900 if ((int)addrlen < 0) {
1901 return -TARGET_EINVAL;
1904 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
1905 return -TARGET_EFAULT;
1907 addr = alloca(addrlen);
1909 ret = get_errno(getpeername(fd, addr, &addrlen));
1910 if (!is_error(ret)) {
1911 host_to_target_sockaddr(target_addr, addr, addrlen);
1912 if (put_user_u32(addrlen, target_addrlen_addr))
1913 ret = -TARGET_EFAULT;
1918 /* do_getsockname() Must return target values and target errnos. */
1919 static abi_long do_getsockname(int fd, abi_ulong target_addr,
1920 abi_ulong target_addrlen_addr)
1926 if (get_user_u32(addrlen, target_addrlen_addr))
1927 return -TARGET_EFAULT;
1929 if ((int)addrlen < 0) {
1930 return -TARGET_EINVAL;
1933 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
1934 return -TARGET_EFAULT;
1936 addr = alloca(addrlen);
1938 ret = get_errno(getsockname(fd, addr, &addrlen));
1939 if (!is_error(ret)) {
1940 host_to_target_sockaddr(target_addr, addr, addrlen);
1941 if (put_user_u32(addrlen, target_addrlen_addr))
1942 ret = -TARGET_EFAULT;
1947 /* do_socketpair() Must return target values and target errnos. */
1948 static abi_long do_socketpair(int domain, int type, int protocol,
1949 abi_ulong target_tab_addr)
1954 ret = get_errno(socketpair(domain, type, protocol, tab));
1955 if (!is_error(ret)) {
1956 if (put_user_s32(tab[0], target_tab_addr)
1957 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
1958 ret = -TARGET_EFAULT;
1963 /* do_sendto() Must return target values and target errnos. */
1964 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
1965 abi_ulong target_addr, socklen_t addrlen)
1971 if ((int)addrlen < 0) {
1972 return -TARGET_EINVAL;
1975 host_msg = lock_user(VERIFY_READ, msg, len, 1);
1977 return -TARGET_EFAULT;
1979 addr = alloca(addrlen);
1980 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
1982 unlock_user(host_msg, msg, 0);
1985 ret = get_errno(sendto(fd, host_msg, len, flags, addr, addrlen));
1987 ret = get_errno(send(fd, host_msg, len, flags));
1989 unlock_user(host_msg, msg, 0);
1993 /* do_recvfrom() Must return target values and target errnos. */
1994 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
1995 abi_ulong target_addr,
1996 abi_ulong target_addrlen)
2003 host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
2005 return -TARGET_EFAULT;
2007 if (get_user_u32(addrlen, target_addrlen)) {
2008 ret = -TARGET_EFAULT;
2011 if ((int)addrlen < 0) {
2012 ret = -TARGET_EINVAL;
2015 addr = alloca(addrlen);
2016 ret = get_errno(recvfrom(fd, host_msg, len, flags, addr, &addrlen));
2018 addr = NULL; /* To keep compiler quiet. */
2019 ret = get_errno(qemu_recv(fd, host_msg, len, flags));
2021 if (!is_error(ret)) {
2023 host_to_target_sockaddr(target_addr, addr, addrlen);
2024 if (put_user_u32(addrlen, target_addrlen)) {
2025 ret = -TARGET_EFAULT;
2029 unlock_user(host_msg, msg, len);
2032 unlock_user(host_msg, msg, 0);
2037 #ifdef TARGET_NR_socketcall
2038 /* do_socketcall() Must return target values and target errnos. */
2039 static abi_long do_socketcall(int num, abi_ulong vptr)
2042 const int n = sizeof(abi_ulong);
2047 abi_ulong domain, type, protocol;
2049 if (get_user_ual(domain, vptr)
2050 || get_user_ual(type, vptr + n)
2051 || get_user_ual(protocol, vptr + 2 * n))
2052 return -TARGET_EFAULT;
2054 ret = do_socket(domain, type, protocol);
2060 abi_ulong target_addr;
2063 if (get_user_ual(sockfd, vptr)
2064 || get_user_ual(target_addr, vptr + n)
2065 || get_user_ual(addrlen, vptr + 2 * n))
2066 return -TARGET_EFAULT;
2068 ret = do_bind(sockfd, target_addr, addrlen);
2071 case SOCKOP_connect:
2074 abi_ulong target_addr;
2077 if (get_user_ual(sockfd, vptr)
2078 || get_user_ual(target_addr, vptr + n)
2079 || get_user_ual(addrlen, vptr + 2 * n))
2080 return -TARGET_EFAULT;
2082 ret = do_connect(sockfd, target_addr, addrlen);
2087 abi_ulong sockfd, backlog;
2089 if (get_user_ual(sockfd, vptr)
2090 || get_user_ual(backlog, vptr + n))
2091 return -TARGET_EFAULT;
2093 ret = get_errno(listen(sockfd, backlog));
2099 abi_ulong target_addr, target_addrlen;
2101 if (get_user_ual(sockfd, vptr)
2102 || get_user_ual(target_addr, vptr + n)
2103 || get_user_ual(target_addrlen, vptr + 2 * n))
2104 return -TARGET_EFAULT;
2106 ret = do_accept(sockfd, target_addr, target_addrlen);
2109 case SOCKOP_getsockname:
2112 abi_ulong target_addr, target_addrlen;
2114 if (get_user_ual(sockfd, vptr)
2115 || get_user_ual(target_addr, vptr + n)
2116 || get_user_ual(target_addrlen, vptr + 2 * n))
2117 return -TARGET_EFAULT;
2119 ret = do_getsockname(sockfd, target_addr, target_addrlen);
2122 case SOCKOP_getpeername:
2125 abi_ulong target_addr, target_addrlen;
2127 if (get_user_ual(sockfd, vptr)
2128 || get_user_ual(target_addr, vptr + n)
2129 || get_user_ual(target_addrlen, vptr + 2 * n))
2130 return -TARGET_EFAULT;
2132 ret = do_getpeername(sockfd, target_addr, target_addrlen);
2135 case SOCKOP_socketpair:
2137 abi_ulong domain, type, protocol;
2140 if (get_user_ual(domain, vptr)
2141 || get_user_ual(type, vptr + n)
2142 || get_user_ual(protocol, vptr + 2 * n)
2143 || get_user_ual(tab, vptr + 3 * n))
2144 return -TARGET_EFAULT;
2146 ret = do_socketpair(domain, type, protocol, tab);
2156 if (get_user_ual(sockfd, vptr)
2157 || get_user_ual(msg, vptr + n)
2158 || get_user_ual(len, vptr + 2 * n)
2159 || get_user_ual(flags, vptr + 3 * n))
2160 return -TARGET_EFAULT;
2162 ret = do_sendto(sockfd, msg, len, flags, 0, 0);
2172 if (get_user_ual(sockfd, vptr)
2173 || get_user_ual(msg, vptr + n)
2174 || get_user_ual(len, vptr + 2 * n)
2175 || get_user_ual(flags, vptr + 3 * n))
2176 return -TARGET_EFAULT;
2178 ret = do_recvfrom(sockfd, msg, len, flags, 0, 0);
2190 if (get_user_ual(sockfd, vptr)
2191 || get_user_ual(msg, vptr + n)
2192 || get_user_ual(len, vptr + 2 * n)
2193 || get_user_ual(flags, vptr + 3 * n)
2194 || get_user_ual(addr, vptr + 4 * n)
2195 || get_user_ual(addrlen, vptr + 5 * n))
2196 return -TARGET_EFAULT;
2198 ret = do_sendto(sockfd, msg, len, flags, addr, addrlen);
2201 case SOCKOP_recvfrom:
2210 if (get_user_ual(sockfd, vptr)
2211 || get_user_ual(msg, vptr + n)
2212 || get_user_ual(len, vptr + 2 * n)
2213 || get_user_ual(flags, vptr + 3 * n)
2214 || get_user_ual(addr, vptr + 4 * n)
2215 || get_user_ual(addrlen, vptr + 5 * n))
2216 return -TARGET_EFAULT;
2218 ret = do_recvfrom(sockfd, msg, len, flags, addr, addrlen);
2221 case SOCKOP_shutdown:
2223 abi_ulong sockfd, how;
2225 if (get_user_ual(sockfd, vptr)
2226 || get_user_ual(how, vptr + n))
2227 return -TARGET_EFAULT;
2229 ret = get_errno(shutdown(sockfd, how));
2232 case SOCKOP_sendmsg:
2233 case SOCKOP_recvmsg:
2236 abi_ulong target_msg;
2239 if (get_user_ual(fd, vptr)
2240 || get_user_ual(target_msg, vptr + n)
2241 || get_user_ual(flags, vptr + 2 * n))
2242 return -TARGET_EFAULT;
2244 ret = do_sendrecvmsg(fd, target_msg, flags,
2245 (num == SOCKOP_sendmsg));
2248 case SOCKOP_setsockopt:
2256 if (get_user_ual(sockfd, vptr)
2257 || get_user_ual(level, vptr + n)
2258 || get_user_ual(optname, vptr + 2 * n)
2259 || get_user_ual(optval, vptr + 3 * n)
2260 || get_user_ual(optlen, vptr + 4 * n))
2261 return -TARGET_EFAULT;
2263 ret = do_setsockopt(sockfd, level, optname, optval, optlen);
2266 case SOCKOP_getsockopt:
2274 if (get_user_ual(sockfd, vptr)
2275 || get_user_ual(level, vptr + n)
2276 || get_user_ual(optname, vptr + 2 * n)
2277 || get_user_ual(optval, vptr + 3 * n)
2278 || get_user_ual(optlen, vptr + 4 * n))
2279 return -TARGET_EFAULT;
2281 ret = do_getsockopt(sockfd, level, optname, optval, optlen);
2285 gemu_log("Unsupported socketcall: %d\n", num);
2286 ret = -TARGET_ENOSYS;
2293 #define N_SHM_REGIONS 32
2295 static struct shm_region {
2298 } shm_regions[N_SHM_REGIONS];
2300 struct target_ipc_perm
2307 unsigned short int mode;
2308 unsigned short int __pad1;
2309 unsigned short int __seq;
2310 unsigned short int __pad2;
2311 abi_ulong __unused1;
2312 abi_ulong __unused2;
2315 struct target_semid_ds
2317 struct target_ipc_perm sem_perm;
2318 abi_ulong sem_otime;
2319 abi_ulong __unused1;
2320 abi_ulong sem_ctime;
2321 abi_ulong __unused2;
2322 abi_ulong sem_nsems;
2323 abi_ulong __unused3;
2324 abi_ulong __unused4;
2327 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
2328 abi_ulong target_addr)
2330 struct target_ipc_perm *target_ip;
2331 struct target_semid_ds *target_sd;
2333 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2334 return -TARGET_EFAULT;
2335 target_ip = &(target_sd->sem_perm);
2336 host_ip->__key = tswapl(target_ip->__key);
2337 host_ip->uid = tswapl(target_ip->uid);
2338 host_ip->gid = tswapl(target_ip->gid);
2339 host_ip->cuid = tswapl(target_ip->cuid);
2340 host_ip->cgid = tswapl(target_ip->cgid);
2341 host_ip->mode = tswapl(target_ip->mode);
2342 unlock_user_struct(target_sd, target_addr, 0);
2346 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
2347 struct ipc_perm *host_ip)
2349 struct target_ipc_perm *target_ip;
2350 struct target_semid_ds *target_sd;
2352 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2353 return -TARGET_EFAULT;
2354 target_ip = &(target_sd->sem_perm);
2355 target_ip->__key = tswapl(host_ip->__key);
2356 target_ip->uid = tswapl(host_ip->uid);
2357 target_ip->gid = tswapl(host_ip->gid);
2358 target_ip->cuid = tswapl(host_ip->cuid);
2359 target_ip->cgid = tswapl(host_ip->cgid);
2360 target_ip->mode = tswapl(host_ip->mode);
2361 unlock_user_struct(target_sd, target_addr, 1);
2365 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
2366 abi_ulong target_addr)
2368 struct target_semid_ds *target_sd;
2370 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2371 return -TARGET_EFAULT;
2372 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
2373 return -TARGET_EFAULT;
2374 host_sd->sem_nsems = tswapl(target_sd->sem_nsems);
2375 host_sd->sem_otime = tswapl(target_sd->sem_otime);
2376 host_sd->sem_ctime = tswapl(target_sd->sem_ctime);
2377 unlock_user_struct(target_sd, target_addr, 0);
2381 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
2382 struct semid_ds *host_sd)
2384 struct target_semid_ds *target_sd;
2386 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2387 return -TARGET_EFAULT;
2388 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
2389 return -TARGET_EFAULT;;
2390 target_sd->sem_nsems = tswapl(host_sd->sem_nsems);
2391 target_sd->sem_otime = tswapl(host_sd->sem_otime);
2392 target_sd->sem_ctime = tswapl(host_sd->sem_ctime);
2393 unlock_user_struct(target_sd, target_addr, 1);
2397 struct target_seminfo {
2410 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
2411 struct seminfo *host_seminfo)
2413 struct target_seminfo *target_seminfo;
2414 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
2415 return -TARGET_EFAULT;
2416 __put_user(host_seminfo->semmap, &target_seminfo->semmap);
2417 __put_user(host_seminfo->semmni, &target_seminfo->semmni);
2418 __put_user(host_seminfo->semmns, &target_seminfo->semmns);
2419 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
2420 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
2421 __put_user(host_seminfo->semopm, &target_seminfo->semopm);
2422 __put_user(host_seminfo->semume, &target_seminfo->semume);
2423 __put_user(host_seminfo->semusz, &target_seminfo->semusz);
2424 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
2425 __put_user(host_seminfo->semaem, &target_seminfo->semaem);
2426 unlock_user_struct(target_seminfo, target_addr, 1);
2432 struct semid_ds *buf;
2433 unsigned short *array;
2434 struct seminfo *__buf;
2437 union target_semun {
2444 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
2445 abi_ulong target_addr)
2448 unsigned short *array;
2450 struct semid_ds semid_ds;
2453 semun.buf = &semid_ds;
2455 ret = semctl(semid, 0, IPC_STAT, semun);
2457 return get_errno(ret);
2459 nsems = semid_ds.sem_nsems;
2461 *host_array = malloc(nsems*sizeof(unsigned short));
2462 array = lock_user(VERIFY_READ, target_addr,
2463 nsems*sizeof(unsigned short), 1);
2465 return -TARGET_EFAULT;
2467 for(i=0; i<nsems; i++) {
2468 __get_user((*host_array)[i], &array[i]);
2470 unlock_user(array, target_addr, 0);
2475 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
2476 unsigned short **host_array)
2479 unsigned short *array;
2481 struct semid_ds semid_ds;
2484 semun.buf = &semid_ds;
2486 ret = semctl(semid, 0, IPC_STAT, semun);
2488 return get_errno(ret);
2490 nsems = semid_ds.sem_nsems;
2492 array = lock_user(VERIFY_WRITE, target_addr,
2493 nsems*sizeof(unsigned short), 0);
2495 return -TARGET_EFAULT;
2497 for(i=0; i<nsems; i++) {
2498 __put_user((*host_array)[i], &array[i]);
2501 unlock_user(array, target_addr, 1);
2506 static inline abi_long do_semctl(int semid, int semnum, int cmd,
2507 union target_semun target_su)
2510 struct semid_ds dsarg;
2511 unsigned short *array = NULL;
2512 struct seminfo seminfo;
2513 abi_long ret = -TARGET_EINVAL;
2520 arg.val = tswapl(target_su.val);
2521 ret = get_errno(semctl(semid, semnum, cmd, arg));
2522 target_su.val = tswapl(arg.val);
2526 err = target_to_host_semarray(semid, &array, target_su.array);
2530 ret = get_errno(semctl(semid, semnum, cmd, arg));
2531 err = host_to_target_semarray(semid, target_su.array, &array);
2538 err = target_to_host_semid_ds(&dsarg, target_su.buf);
2542 ret = get_errno(semctl(semid, semnum, cmd, arg));
2543 err = host_to_target_semid_ds(target_su.buf, &dsarg);
2549 arg.__buf = &seminfo;
2550 ret = get_errno(semctl(semid, semnum, cmd, arg));
2551 err = host_to_target_seminfo(target_su.__buf, &seminfo);
2559 ret = get_errno(semctl(semid, semnum, cmd, NULL));
2566 struct target_sembuf {
2567 unsigned short sem_num;
2572 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
2573 abi_ulong target_addr,
2576 struct target_sembuf *target_sembuf;
2579 target_sembuf = lock_user(VERIFY_READ, target_addr,
2580 nsops*sizeof(struct target_sembuf), 1);
2582 return -TARGET_EFAULT;
2584 for(i=0; i<nsops; i++) {
2585 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
2586 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
2587 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
2590 unlock_user(target_sembuf, target_addr, 0);
2595 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
2597 struct sembuf sops[nsops];
2599 if (target_to_host_sembuf(sops, ptr, nsops))
2600 return -TARGET_EFAULT;
2602 return semop(semid, sops, nsops);
2605 struct target_msqid_ds
2607 struct target_ipc_perm msg_perm;
2608 abi_ulong msg_stime;
2609 #if TARGET_ABI_BITS == 32
2610 abi_ulong __unused1;
2612 abi_ulong msg_rtime;
2613 #if TARGET_ABI_BITS == 32
2614 abi_ulong __unused2;
2616 abi_ulong msg_ctime;
2617 #if TARGET_ABI_BITS == 32
2618 abi_ulong __unused3;
2620 abi_ulong __msg_cbytes;
2622 abi_ulong msg_qbytes;
2623 abi_ulong msg_lspid;
2624 abi_ulong msg_lrpid;
2625 abi_ulong __unused4;
2626 abi_ulong __unused5;
2629 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
2630 abi_ulong target_addr)
2632 struct target_msqid_ds *target_md;
2634 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
2635 return -TARGET_EFAULT;
2636 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
2637 return -TARGET_EFAULT;
2638 host_md->msg_stime = tswapl(target_md->msg_stime);
2639 host_md->msg_rtime = tswapl(target_md->msg_rtime);
2640 host_md->msg_ctime = tswapl(target_md->msg_ctime);
2641 host_md->__msg_cbytes = tswapl(target_md->__msg_cbytes);
2642 host_md->msg_qnum = tswapl(target_md->msg_qnum);
2643 host_md->msg_qbytes = tswapl(target_md->msg_qbytes);
2644 host_md->msg_lspid = tswapl(target_md->msg_lspid);
2645 host_md->msg_lrpid = tswapl(target_md->msg_lrpid);
2646 unlock_user_struct(target_md, target_addr, 0);
2650 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
2651 struct msqid_ds *host_md)
2653 struct target_msqid_ds *target_md;
2655 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
2656 return -TARGET_EFAULT;
2657 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
2658 return -TARGET_EFAULT;
2659 target_md->msg_stime = tswapl(host_md->msg_stime);
2660 target_md->msg_rtime = tswapl(host_md->msg_rtime);
2661 target_md->msg_ctime = tswapl(host_md->msg_ctime);
2662 target_md->__msg_cbytes = tswapl(host_md->__msg_cbytes);
2663 target_md->msg_qnum = tswapl(host_md->msg_qnum);
2664 target_md->msg_qbytes = tswapl(host_md->msg_qbytes);
2665 target_md->msg_lspid = tswapl(host_md->msg_lspid);
2666 target_md->msg_lrpid = tswapl(host_md->msg_lrpid);
2667 unlock_user_struct(target_md, target_addr, 1);
2671 struct target_msginfo {
2679 unsigned short int msgseg;
2682 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
2683 struct msginfo *host_msginfo)
2685 struct target_msginfo *target_msginfo;
2686 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
2687 return -TARGET_EFAULT;
2688 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
2689 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
2690 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
2691 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
2692 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
2693 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
2694 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
2695 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
2696 unlock_user_struct(target_msginfo, target_addr, 1);
2700 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
2702 struct msqid_ds dsarg;
2703 struct msginfo msginfo;
2704 abi_long ret = -TARGET_EINVAL;
2712 if (target_to_host_msqid_ds(&dsarg,ptr))
2713 return -TARGET_EFAULT;
2714 ret = get_errno(msgctl(msgid, cmd, &dsarg));
2715 if (host_to_target_msqid_ds(ptr,&dsarg))
2716 return -TARGET_EFAULT;
2719 ret = get_errno(msgctl(msgid, cmd, NULL));
2723 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
2724 if (host_to_target_msginfo(ptr, &msginfo))
2725 return -TARGET_EFAULT;
2732 struct target_msgbuf {
2737 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
2738 unsigned int msgsz, int msgflg)
2740 struct target_msgbuf *target_mb;
2741 struct msgbuf *host_mb;
2744 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
2745 return -TARGET_EFAULT;
2746 host_mb = malloc(msgsz+sizeof(long));
2747 host_mb->mtype = (abi_long) tswapl(target_mb->mtype);
2748 memcpy(host_mb->mtext, target_mb->mtext, msgsz);
2749 ret = get_errno(msgsnd(msqid, host_mb, msgsz, msgflg));
2751 unlock_user_struct(target_mb, msgp, 0);
2756 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
2757 unsigned int msgsz, abi_long msgtyp,
2760 struct target_msgbuf *target_mb;
2762 struct msgbuf *host_mb;
2765 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
2766 return -TARGET_EFAULT;
2768 host_mb = malloc(msgsz+sizeof(long));
2769 ret = get_errno(msgrcv(msqid, host_mb, msgsz, tswapl(msgtyp), msgflg));
2772 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
2773 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
2774 if (!target_mtext) {
2775 ret = -TARGET_EFAULT;
2778 memcpy(target_mb->mtext, host_mb->mtext, ret);
2779 unlock_user(target_mtext, target_mtext_addr, ret);
2782 target_mb->mtype = tswapl(host_mb->mtype);
2787 unlock_user_struct(target_mb, msgp, 1);
2791 struct target_shmid_ds
2793 struct target_ipc_perm shm_perm;
2794 abi_ulong shm_segsz;
2795 abi_ulong shm_atime;
2796 #if TARGET_ABI_BITS == 32
2797 abi_ulong __unused1;
2799 abi_ulong shm_dtime;
2800 #if TARGET_ABI_BITS == 32
2801 abi_ulong __unused2;
2803 abi_ulong shm_ctime;
2804 #if TARGET_ABI_BITS == 32
2805 abi_ulong __unused3;
2809 abi_ulong shm_nattch;
2810 unsigned long int __unused4;
2811 unsigned long int __unused5;
2814 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
2815 abi_ulong target_addr)
2817 struct target_shmid_ds *target_sd;
2819 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2820 return -TARGET_EFAULT;
2821 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
2822 return -TARGET_EFAULT;
2823 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
2824 __get_user(host_sd->shm_atime, &target_sd->shm_atime);
2825 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
2826 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
2827 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
2828 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
2829 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
2830 unlock_user_struct(target_sd, target_addr, 0);
2834 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
2835 struct shmid_ds *host_sd)
2837 struct target_shmid_ds *target_sd;
2839 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2840 return -TARGET_EFAULT;
2841 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
2842 return -TARGET_EFAULT;
2843 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
2844 __put_user(host_sd->shm_atime, &target_sd->shm_atime);
2845 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
2846 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
2847 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
2848 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
2849 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
2850 unlock_user_struct(target_sd, target_addr, 1);
2854 struct target_shminfo {
2862 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
2863 struct shminfo *host_shminfo)
2865 struct target_shminfo *target_shminfo;
2866 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
2867 return -TARGET_EFAULT;
2868 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
2869 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
2870 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
2871 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
2872 __put_user(host_shminfo->shmall, &target_shminfo->shmall);
2873 unlock_user_struct(target_shminfo, target_addr, 1);
2877 struct target_shm_info {
2882 abi_ulong swap_attempts;
2883 abi_ulong swap_successes;
2886 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
2887 struct shm_info *host_shm_info)
2889 struct target_shm_info *target_shm_info;
2890 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
2891 return -TARGET_EFAULT;
2892 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
2893 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
2894 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
2895 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
2896 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
2897 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
2898 unlock_user_struct(target_shm_info, target_addr, 1);
2902 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
2904 struct shmid_ds dsarg;
2905 struct shminfo shminfo;
2906 struct shm_info shm_info;
2907 abi_long ret = -TARGET_EINVAL;
2915 if (target_to_host_shmid_ds(&dsarg, buf))
2916 return -TARGET_EFAULT;
2917 ret = get_errno(shmctl(shmid, cmd, &dsarg));
2918 if (host_to_target_shmid_ds(buf, &dsarg))
2919 return -TARGET_EFAULT;
2922 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
2923 if (host_to_target_shminfo(buf, &shminfo))
2924 return -TARGET_EFAULT;
2927 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
2928 if (host_to_target_shm_info(buf, &shm_info))
2929 return -TARGET_EFAULT;
2934 ret = get_errno(shmctl(shmid, cmd, NULL));
2941 static inline abi_ulong do_shmat(int shmid, abi_ulong shmaddr, int shmflg)
2945 struct shmid_ds shm_info;
2948 /* find out the length of the shared memory segment */
2949 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
2950 if (is_error(ret)) {
2951 /* can't get length, bail out */
2958 host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
2960 abi_ulong mmap_start;
2962 mmap_start = mmap_find_vma(0, shm_info.shm_segsz);
2964 if (mmap_start == -1) {
2966 host_raddr = (void *)-1;
2968 host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
2971 if (host_raddr == (void *)-1) {
2973 return get_errno((long)host_raddr);
2975 raddr=h2g((unsigned long)host_raddr);
2977 page_set_flags(raddr, raddr + shm_info.shm_segsz,
2978 PAGE_VALID | PAGE_READ |
2979 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
2981 for (i = 0; i < N_SHM_REGIONS; i++) {
2982 if (shm_regions[i].start == 0) {
2983 shm_regions[i].start = raddr;
2984 shm_regions[i].size = shm_info.shm_segsz;
2994 static inline abi_long do_shmdt(abi_ulong shmaddr)
2998 for (i = 0; i < N_SHM_REGIONS; ++i) {
2999 if (shm_regions[i].start == shmaddr) {
3000 shm_regions[i].start = 0;
3001 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
3006 return get_errno(shmdt(g2h(shmaddr)));
3009 #ifdef TARGET_NR_ipc
3010 /* ??? This only works with linear mappings. */
3011 /* do_ipc() must return target values and target errnos. */
3012 static abi_long do_ipc(unsigned int call, int first,
3013 int second, int third,
3014 abi_long ptr, abi_long fifth)
3019 version = call >> 16;
3024 ret = do_semop(first, ptr, second);
3028 ret = get_errno(semget(first, second, third));
3032 ret = do_semctl(first, second, third, (union target_semun)(abi_ulong) ptr);
3036 ret = get_errno(msgget(first, second));
3040 ret = do_msgsnd(first, ptr, second, third);
3044 ret = do_msgctl(first, second, ptr);
3051 struct target_ipc_kludge {
3056 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
3057 ret = -TARGET_EFAULT;
3061 ret = do_msgrcv(first, tmp->msgp, second, tmp->msgtyp, third);
3063 unlock_user_struct(tmp, ptr, 0);
3067 ret = do_msgrcv(first, ptr, second, fifth, third);
3076 raddr = do_shmat(first, ptr, second);
3077 if (is_error(raddr))
3078 return get_errno(raddr);
3079 if (put_user_ual(raddr, third))
3080 return -TARGET_EFAULT;
3084 ret = -TARGET_EINVAL;
3089 ret = do_shmdt(ptr);
3093 /* IPC_* flag values are the same on all linux platforms */
3094 ret = get_errno(shmget(first, second, third));
3097 /* IPC_* and SHM_* command values are the same on all linux platforms */
3099 ret = do_shmctl(first, second, third);
3102 gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
3103 ret = -TARGET_ENOSYS;
3110 /* kernel structure types definitions */
3112 #define STRUCT(name, ...) STRUCT_ ## name,
3113 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
3115 #include "syscall_types.h"
3118 #undef STRUCT_SPECIAL
3120 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
3121 #define STRUCT_SPECIAL(name)
3122 #include "syscall_types.h"
3124 #undef STRUCT_SPECIAL
3126 typedef struct IOCTLEntry IOCTLEntry;
3128 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
3129 int fd, abi_long cmd, abi_long arg);
3132 unsigned int target_cmd;
3133 unsigned int host_cmd;
3136 do_ioctl_fn *do_ioctl;
3137 const argtype arg_type[5];
3140 #define IOC_R 0x0001
3141 #define IOC_W 0x0002
3142 #define IOC_RW (IOC_R | IOC_W)
3144 #define MAX_STRUCT_SIZE 4096
3146 #ifdef CONFIG_FIEMAP
3147 /* So fiemap access checks don't overflow on 32 bit systems.
3148 * This is very slightly smaller than the limit imposed by
3149 * the underlying kernel.
3151 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
3152 / sizeof(struct fiemap_extent))
3154 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
3155 int fd, abi_long cmd, abi_long arg)
3157 /* The parameter for this ioctl is a struct fiemap followed
3158 * by an array of struct fiemap_extent whose size is set
3159 * in fiemap->fm_extent_count. The array is filled in by the
3162 int target_size_in, target_size_out;
3164 const argtype *arg_type = ie->arg_type;
3165 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
3168 int i, extent_size = thunk_type_size(extent_arg_type, 0);
3172 assert(arg_type[0] == TYPE_PTR);
3173 assert(ie->access == IOC_RW);
3175 target_size_in = thunk_type_size(arg_type, 0);
3176 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
3178 return -TARGET_EFAULT;
3180 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3181 unlock_user(argptr, arg, 0);
3182 fm = (struct fiemap *)buf_temp;
3183 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
3184 return -TARGET_EINVAL;
3187 outbufsz = sizeof (*fm) +
3188 (sizeof(struct fiemap_extent) * fm->fm_extent_count);
3190 if (outbufsz > MAX_STRUCT_SIZE) {
3191 /* We can't fit all the extents into the fixed size buffer.
3192 * Allocate one that is large enough and use it instead.
3194 fm = malloc(outbufsz);
3196 return -TARGET_ENOMEM;
3198 memcpy(fm, buf_temp, sizeof(struct fiemap));
3201 ret = get_errno(ioctl(fd, ie->host_cmd, fm));
3202 if (!is_error(ret)) {
3203 target_size_out = target_size_in;
3204 /* An extent_count of 0 means we were only counting the extents
3205 * so there are no structs to copy
3207 if (fm->fm_extent_count != 0) {
3208 target_size_out += fm->fm_mapped_extents * extent_size;
3210 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
3212 ret = -TARGET_EFAULT;
3214 /* Convert the struct fiemap */
3215 thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
3216 if (fm->fm_extent_count != 0) {
3217 p = argptr + target_size_in;
3218 /* ...and then all the struct fiemap_extents */
3219 for (i = 0; i < fm->fm_mapped_extents; i++) {
3220 thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
3225 unlock_user(argptr, arg, target_size_out);
3235 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
3236 int fd, abi_long cmd, abi_long arg)
3238 const argtype *arg_type = ie->arg_type;
3242 struct ifconf *host_ifconf;
3244 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
3245 int target_ifreq_size;
3250 abi_long target_ifc_buf;
3254 assert(arg_type[0] == TYPE_PTR);
3255 assert(ie->access == IOC_RW);
3258 target_size = thunk_type_size(arg_type, 0);
3260 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3262 return -TARGET_EFAULT;
3263 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3264 unlock_user(argptr, arg, 0);
3266 host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
3267 target_ifc_len = host_ifconf->ifc_len;
3268 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
3270 target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
3271 nb_ifreq = target_ifc_len / target_ifreq_size;
3272 host_ifc_len = nb_ifreq * sizeof(struct ifreq);
3274 outbufsz = sizeof(*host_ifconf) + host_ifc_len;
3275 if (outbufsz > MAX_STRUCT_SIZE) {
3276 /* We can't fit all the extents into the fixed size buffer.
3277 * Allocate one that is large enough and use it instead.
3279 host_ifconf = malloc(outbufsz);
3281 return -TARGET_ENOMEM;
3283 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
3286 host_ifc_buf = (char*)host_ifconf + sizeof(*host_ifconf);
3288 host_ifconf->ifc_len = host_ifc_len;
3289 host_ifconf->ifc_buf = host_ifc_buf;
3291 ret = get_errno(ioctl(fd, ie->host_cmd, host_ifconf));
3292 if (!is_error(ret)) {
3293 /* convert host ifc_len to target ifc_len */
3295 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
3296 target_ifc_len = nb_ifreq * target_ifreq_size;
3297 host_ifconf->ifc_len = target_ifc_len;
3299 /* restore target ifc_buf */
3301 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
3303 /* copy struct ifconf to target user */
3305 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3307 return -TARGET_EFAULT;
3308 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
3309 unlock_user(argptr, arg, target_size);
3311 /* copy ifreq[] to target user */
3313 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
3314 for (i = 0; i < nb_ifreq ; i++) {
3315 thunk_convert(argptr + i * target_ifreq_size,
3316 host_ifc_buf + i * sizeof(struct ifreq),
3317 ifreq_arg_type, THUNK_TARGET);
3319 unlock_user(argptr, target_ifc_buf, target_ifc_len);
3329 static IOCTLEntry ioctl_entries[] = {
3330 #define IOCTL(cmd, access, ...) \
3331 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
3332 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
3333 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
3338 /* ??? Implement proper locking for ioctls. */
3339 /* do_ioctl() Must return target values and target errnos. */
3340 static abi_long do_ioctl(int fd, abi_long cmd, abi_long arg)
3342 const IOCTLEntry *ie;
3343 const argtype *arg_type;
3345 uint8_t buf_temp[MAX_STRUCT_SIZE];
3351 if (ie->target_cmd == 0) {
3352 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
3353 return -TARGET_ENOSYS;
3355 if (ie->target_cmd == cmd)
3359 arg_type = ie->arg_type;
3361 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd, ie->name);
3364 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
3367 switch(arg_type[0]) {
3370 ret = get_errno(ioctl(fd, ie->host_cmd));
3375 ret = get_errno(ioctl(fd, ie->host_cmd, arg));
3379 target_size = thunk_type_size(arg_type, 0);
3380 switch(ie->access) {
3382 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3383 if (!is_error(ret)) {
3384 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3386 return -TARGET_EFAULT;
3387 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
3388 unlock_user(argptr, arg, target_size);
3392 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3394 return -TARGET_EFAULT;
3395 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3396 unlock_user(argptr, arg, 0);
3397 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3401 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3403 return -TARGET_EFAULT;
3404 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3405 unlock_user(argptr, arg, 0);
3406 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3407 if (!is_error(ret)) {
3408 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3410 return -TARGET_EFAULT;
3411 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
3412 unlock_user(argptr, arg, target_size);
3418 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
3419 (long)cmd, arg_type[0]);
3420 ret = -TARGET_ENOSYS;
3426 static const bitmask_transtbl iflag_tbl[] = {
3427 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
3428 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
3429 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
3430 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
3431 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
3432 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
3433 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
3434 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
3435 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
3436 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
3437 { TARGET_IXON, TARGET_IXON, IXON, IXON },
3438 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
3439 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
3440 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
3444 static const bitmask_transtbl oflag_tbl[] = {
3445 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
3446 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
3447 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
3448 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
3449 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
3450 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
3451 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
3452 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
3453 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
3454 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
3455 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
3456 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
3457 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
3458 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
3459 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
3460 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
3461 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
3462 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
3463 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
3464 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
3465 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
3466 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
3467 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
3468 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
3472 static const bitmask_transtbl cflag_tbl[] = {
3473 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
3474 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
3475 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
3476 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
3477 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
3478 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
3479 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
3480 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
3481 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
3482 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
3483 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
3484 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
3485 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
3486 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
3487 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
3488 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
3489 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
3490 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
3491 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
3492 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
3493 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
3494 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
3495 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
3496 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
3497 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
3498 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
3499 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
3500 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
3501 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
3502 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
3503 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
3507 static const bitmask_transtbl lflag_tbl[] = {
3508 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
3509 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
3510 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
3511 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
3512 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
3513 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
3514 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
3515 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
3516 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
3517 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
3518 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
3519 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
3520 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
3521 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
3522 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
3526 static void target_to_host_termios (void *dst, const void *src)
3528 struct host_termios *host = dst;
3529 const struct target_termios *target = src;
3532 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
3534 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
3536 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
3538 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
3539 host->c_line = target->c_line;
3541 memset(host->c_cc, 0, sizeof(host->c_cc));
3542 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
3543 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
3544 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
3545 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
3546 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
3547 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
3548 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
3549 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
3550 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
3551 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
3552 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
3553 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
3554 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
3555 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
3556 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
3557 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
3558 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
3561 static void host_to_target_termios (void *dst, const void *src)
3563 struct target_termios *target = dst;
3564 const struct host_termios *host = src;
3567 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
3569 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
3571 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
3573 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
3574 target->c_line = host->c_line;
3576 memset(target->c_cc, 0, sizeof(target->c_cc));
3577 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
3578 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
3579 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
3580 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
3581 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
3582 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
3583 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
3584 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
3585 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
3586 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
3587 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
3588 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
3589 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
3590 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
3591 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
3592 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
3593 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
3596 static const StructEntry struct_termios_def = {
3597 .convert = { host_to_target_termios, target_to_host_termios },
3598 .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
3599 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
3602 static bitmask_transtbl mmap_flags_tbl[] = {
3603 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
3604 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
3605 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
3606 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS, MAP_ANONYMOUS, MAP_ANONYMOUS },
3607 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN, MAP_GROWSDOWN, MAP_GROWSDOWN },
3608 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE, MAP_DENYWRITE, MAP_DENYWRITE },
3609 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE, MAP_EXECUTABLE, MAP_EXECUTABLE },
3610 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
3614 #if defined(TARGET_I386)
3616 /* NOTE: there is really one LDT for all the threads */
3617 static uint8_t *ldt_table;
3619 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
3626 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
3627 if (size > bytecount)
3629 p = lock_user(VERIFY_WRITE, ptr, size, 0);
3631 return -TARGET_EFAULT;
3632 /* ??? Should this by byteswapped? */
3633 memcpy(p, ldt_table, size);
3634 unlock_user(p, ptr, size);
3638 /* XXX: add locking support */
3639 static abi_long write_ldt(CPUX86State *env,
3640 abi_ulong ptr, unsigned long bytecount, int oldmode)
3642 struct target_modify_ldt_ldt_s ldt_info;
3643 struct target_modify_ldt_ldt_s *target_ldt_info;
3644 int seg_32bit, contents, read_exec_only, limit_in_pages;
3645 int seg_not_present, useable, lm;
3646 uint32_t *lp, entry_1, entry_2;
3648 if (bytecount != sizeof(ldt_info))
3649 return -TARGET_EINVAL;
3650 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
3651 return -TARGET_EFAULT;
3652 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
3653 ldt_info.base_addr = tswapl(target_ldt_info->base_addr);
3654 ldt_info.limit = tswap32(target_ldt_info->limit);
3655 ldt_info.flags = tswap32(target_ldt_info->flags);
3656 unlock_user_struct(target_ldt_info, ptr, 0);
3658 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
3659 return -TARGET_EINVAL;
3660 seg_32bit = ldt_info.flags & 1;
3661 contents = (ldt_info.flags >> 1) & 3;
3662 read_exec_only = (ldt_info.flags >> 3) & 1;
3663 limit_in_pages = (ldt_info.flags >> 4) & 1;
3664 seg_not_present = (ldt_info.flags >> 5) & 1;
3665 useable = (ldt_info.flags >> 6) & 1;
3669 lm = (ldt_info.flags >> 7) & 1;
3671 if (contents == 3) {
3673 return -TARGET_EINVAL;
3674 if (seg_not_present == 0)
3675 return -TARGET_EINVAL;
3677 /* allocate the LDT */
3679 env->ldt.base = target_mmap(0,
3680 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
3681 PROT_READ|PROT_WRITE,
3682 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
3683 if (env->ldt.base == -1)
3684 return -TARGET_ENOMEM;
3685 memset(g2h(env->ldt.base), 0,
3686 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
3687 env->ldt.limit = 0xffff;
3688 ldt_table = g2h(env->ldt.base);
3691 /* NOTE: same code as Linux kernel */
3692 /* Allow LDTs to be cleared by the user. */
3693 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
3696 read_exec_only == 1 &&
3698 limit_in_pages == 0 &&
3699 seg_not_present == 1 &&
3707 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
3708 (ldt_info.limit & 0x0ffff);
3709 entry_2 = (ldt_info.base_addr & 0xff000000) |
3710 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
3711 (ldt_info.limit & 0xf0000) |
3712 ((read_exec_only ^ 1) << 9) |
3714 ((seg_not_present ^ 1) << 15) |
3716 (limit_in_pages << 23) |
3720 entry_2 |= (useable << 20);
3722 /* Install the new entry ... */
3724 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
3725 lp[0] = tswap32(entry_1);
3726 lp[1] = tswap32(entry_2);
3730 /* specific and weird i386 syscalls */
3731 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
3732 unsigned long bytecount)
3738 ret = read_ldt(ptr, bytecount);
3741 ret = write_ldt(env, ptr, bytecount, 1);
3744 ret = write_ldt(env, ptr, bytecount, 0);
3747 ret = -TARGET_ENOSYS;
3753 #if defined(TARGET_I386) && defined(TARGET_ABI32)
3754 static abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
3756 uint64_t *gdt_table = g2h(env->gdt.base);
3757 struct target_modify_ldt_ldt_s ldt_info;
3758 struct target_modify_ldt_ldt_s *target_ldt_info;
3759 int seg_32bit, contents, read_exec_only, limit_in_pages;
3760 int seg_not_present, useable, lm;
3761 uint32_t *lp, entry_1, entry_2;
3764 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
3765 if (!target_ldt_info)
3766 return -TARGET_EFAULT;
3767 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
3768 ldt_info.base_addr = tswapl(target_ldt_info->base_addr);
3769 ldt_info.limit = tswap32(target_ldt_info->limit);
3770 ldt_info.flags = tswap32(target_ldt_info->flags);
3771 if (ldt_info.entry_number == -1) {
3772 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
3773 if (gdt_table[i] == 0) {
3774 ldt_info.entry_number = i;
3775 target_ldt_info->entry_number = tswap32(i);
3780 unlock_user_struct(target_ldt_info, ptr, 1);
3782 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
3783 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
3784 return -TARGET_EINVAL;
3785 seg_32bit = ldt_info.flags & 1;
3786 contents = (ldt_info.flags >> 1) & 3;
3787 read_exec_only = (ldt_info.flags >> 3) & 1;
3788 limit_in_pages = (ldt_info.flags >> 4) & 1;
3789 seg_not_present = (ldt_info.flags >> 5) & 1;
3790 useable = (ldt_info.flags >> 6) & 1;
3794 lm = (ldt_info.flags >> 7) & 1;
3797 if (contents == 3) {
3798 if (seg_not_present == 0)
3799 return -TARGET_EINVAL;
3802 /* NOTE: same code as Linux kernel */
3803 /* Allow LDTs to be cleared by the user. */
3804 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
3805 if ((contents == 0 &&
3806 read_exec_only == 1 &&
3808 limit_in_pages == 0 &&
3809 seg_not_present == 1 &&
3817 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
3818 (ldt_info.limit & 0x0ffff);
3819 entry_2 = (ldt_info.base_addr & 0xff000000) |
3820 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
3821 (ldt_info.limit & 0xf0000) |
3822 ((read_exec_only ^ 1) << 9) |
3824 ((seg_not_present ^ 1) << 15) |
3826 (limit_in_pages << 23) |
3831 /* Install the new entry ... */
3833 lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
3834 lp[0] = tswap32(entry_1);
3835 lp[1] = tswap32(entry_2);
3839 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
3841 struct target_modify_ldt_ldt_s *target_ldt_info;
3842 uint64_t *gdt_table = g2h(env->gdt.base);
3843 uint32_t base_addr, limit, flags;
3844 int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
3845 int seg_not_present, useable, lm;
3846 uint32_t *lp, entry_1, entry_2;
3848 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
3849 if (!target_ldt_info)
3850 return -TARGET_EFAULT;
3851 idx = tswap32(target_ldt_info->entry_number);
3852 if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
3853 idx > TARGET_GDT_ENTRY_TLS_MAX) {
3854 unlock_user_struct(target_ldt_info, ptr, 1);
3855 return -TARGET_EINVAL;
3857 lp = (uint32_t *)(gdt_table + idx);
3858 entry_1 = tswap32(lp[0]);
3859 entry_2 = tswap32(lp[1]);
3861 read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
3862 contents = (entry_2 >> 10) & 3;
3863 seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
3864 seg_32bit = (entry_2 >> 22) & 1;
3865 limit_in_pages = (entry_2 >> 23) & 1;
3866 useable = (entry_2 >> 20) & 1;
3870 lm = (entry_2 >> 21) & 1;
3872 flags = (seg_32bit << 0) | (contents << 1) |
3873 (read_exec_only << 3) | (limit_in_pages << 4) |
3874 (seg_not_present << 5) | (useable << 6) | (lm << 7);
3875 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000);
3876 base_addr = (entry_1 >> 16) |
3877 (entry_2 & 0xff000000) |
3878 ((entry_2 & 0xff) << 16);
3879 target_ldt_info->base_addr = tswapl(base_addr);
3880 target_ldt_info->limit = tswap32(limit);
3881 target_ldt_info->flags = tswap32(flags);
3882 unlock_user_struct(target_ldt_info, ptr, 1);
3885 #endif /* TARGET_I386 && TARGET_ABI32 */
3887 #ifndef TARGET_ABI32
3888 static abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
3895 case TARGET_ARCH_SET_GS:
3896 case TARGET_ARCH_SET_FS:
3897 if (code == TARGET_ARCH_SET_GS)
3901 cpu_x86_load_seg(env, idx, 0);
3902 env->segs[idx].base = addr;
3904 case TARGET_ARCH_GET_GS:
3905 case TARGET_ARCH_GET_FS:
3906 if (code == TARGET_ARCH_GET_GS)
3910 val = env->segs[idx].base;
3911 if (put_user(val, addr, abi_ulong))
3912 ret = -TARGET_EFAULT;
3915 ret = -TARGET_EINVAL;
3922 #endif /* defined(TARGET_I386) */
3924 #define NEW_STACK_SIZE 0x40000
3926 #if defined(CONFIG_USE_NPTL)
3928 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
3931 pthread_mutex_t mutex;
3932 pthread_cond_t cond;
3935 abi_ulong child_tidptr;
3936 abi_ulong parent_tidptr;
3940 static void *clone_func(void *arg)
3942 new_thread_info *info = arg;
3948 ts = (TaskState *)thread_env->opaque;
3949 info->tid = gettid();
3950 env->host_tid = info->tid;
3952 if (info->child_tidptr)
3953 put_user_u32(info->tid, info->child_tidptr);
3954 if (info->parent_tidptr)
3955 put_user_u32(info->tid, info->parent_tidptr);
3956 /* Enable signals. */
3957 sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
3958 /* Signal to the parent that we're ready. */
3959 pthread_mutex_lock(&info->mutex);
3960 pthread_cond_broadcast(&info->cond);
3961 pthread_mutex_unlock(&info->mutex);
3962 /* Wait until the parent has finshed initializing the tls state. */
3963 pthread_mutex_lock(&clone_lock);
3964 pthread_mutex_unlock(&clone_lock);
3971 static int clone_func(void *arg)
3973 CPUState *env = arg;
3980 /* do_fork() Must return host values and target errnos (unlike most
3981 do_*() functions). */
3982 static int do_fork(CPUState *env, unsigned int flags, abi_ulong newsp,
3983 abi_ulong parent_tidptr, target_ulong newtls,
3984 abi_ulong child_tidptr)
3989 #if defined(CONFIG_USE_NPTL)
3990 unsigned int nptl_flags;
3996 /* Emulate vfork() with fork() */
3997 if (flags & CLONE_VFORK)
3998 flags &= ~(CLONE_VFORK | CLONE_VM);
4000 if (flags & CLONE_VM) {
4001 TaskState *parent_ts = (TaskState *)env->opaque;
4002 #if defined(CONFIG_USE_NPTL)
4003 new_thread_info info;
4004 pthread_attr_t attr;
4006 ts = g_malloc0(sizeof(TaskState));
4007 init_task_state(ts);
4008 /* we create a new CPU instance. */
4009 new_env = cpu_copy(env);
4010 #if defined(TARGET_I386) || defined(TARGET_SPARC) || defined(TARGET_PPC)
4013 /* Init regs that differ from the parent. */
4014 cpu_clone_regs(new_env, newsp);
4015 new_env->opaque = ts;
4016 ts->bprm = parent_ts->bprm;
4017 ts->info = parent_ts->info;
4018 #if defined(CONFIG_USE_NPTL)
4020 flags &= ~CLONE_NPTL_FLAGS2;
4022 if (nptl_flags & CLONE_CHILD_CLEARTID) {
4023 ts->child_tidptr = child_tidptr;
4026 if (nptl_flags & CLONE_SETTLS)
4027 cpu_set_tls (new_env, newtls);
4029 /* Grab a mutex so that thread setup appears atomic. */
4030 pthread_mutex_lock(&clone_lock);
4032 memset(&info, 0, sizeof(info));
4033 pthread_mutex_init(&info.mutex, NULL);
4034 pthread_mutex_lock(&info.mutex);
4035 pthread_cond_init(&info.cond, NULL);
4037 if (nptl_flags & CLONE_CHILD_SETTID)
4038 info.child_tidptr = child_tidptr;
4039 if (nptl_flags & CLONE_PARENT_SETTID)
4040 info.parent_tidptr = parent_tidptr;
4042 ret = pthread_attr_init(&attr);
4043 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
4044 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
4045 /* It is not safe to deliver signals until the child has finished
4046 initializing, so temporarily block all signals. */
4047 sigfillset(&sigmask);
4048 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
4050 ret = pthread_create(&info.thread, &attr, clone_func, &info);
4051 /* TODO: Free new CPU state if thread creation failed. */
4053 sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
4054 pthread_attr_destroy(&attr);
4056 /* Wait for the child to initialize. */
4057 pthread_cond_wait(&info.cond, &info.mutex);
4059 if (flags & CLONE_PARENT_SETTID)
4060 put_user_u32(ret, parent_tidptr);
4064 pthread_mutex_unlock(&info.mutex);
4065 pthread_cond_destroy(&info.cond);
4066 pthread_mutex_destroy(&info.mutex);
4067 pthread_mutex_unlock(&clone_lock);
4069 if (flags & CLONE_NPTL_FLAGS2)
4071 /* This is probably going to die very quickly, but do it anyway. */
4072 new_stack = g_malloc0 (NEW_STACK_SIZE);
4074 ret = __clone2(clone_func, new_stack, NEW_STACK_SIZE, flags, new_env);
4076 ret = clone(clone_func, new_stack + NEW_STACK_SIZE, flags, new_env);
4080 /* if no CLONE_VM, we consider it is a fork */
4081 if ((flags & ~(CSIGNAL | CLONE_NPTL_FLAGS2)) != 0)
4086 /* Child Process. */
4087 cpu_clone_regs(env, newsp);
4089 #if defined(CONFIG_USE_NPTL)
4090 /* There is a race condition here. The parent process could
4091 theoretically read the TID in the child process before the child
4092 tid is set. This would require using either ptrace
4093 (not implemented) or having *_tidptr to point at a shared memory
4094 mapping. We can't repeat the spinlock hack used above because
4095 the child process gets its own copy of the lock. */
4096 if (flags & CLONE_CHILD_SETTID)
4097 put_user_u32(gettid(), child_tidptr);
4098 if (flags & CLONE_PARENT_SETTID)
4099 put_user_u32(gettid(), parent_tidptr);
4100 ts = (TaskState *)env->opaque;
4101 if (flags & CLONE_SETTLS)
4102 cpu_set_tls (env, newtls);
4103 if (flags & CLONE_CHILD_CLEARTID)
4104 ts->child_tidptr = child_tidptr;
4113 /* warning : doesn't handle linux specific flags... */
4114 static int target_to_host_fcntl_cmd(int cmd)
4117 case TARGET_F_DUPFD:
4118 case TARGET_F_GETFD:
4119 case TARGET_F_SETFD:
4120 case TARGET_F_GETFL:
4121 case TARGET_F_SETFL:
4123 case TARGET_F_GETLK:
4125 case TARGET_F_SETLK:
4127 case TARGET_F_SETLKW:
4129 case TARGET_F_GETOWN:
4131 case TARGET_F_SETOWN:
4133 case TARGET_F_GETSIG:
4135 case TARGET_F_SETSIG:
4137 #if TARGET_ABI_BITS == 32
4138 case TARGET_F_GETLK64:
4140 case TARGET_F_SETLK64:
4142 case TARGET_F_SETLKW64:
4145 case TARGET_F_SETLEASE:
4147 case TARGET_F_GETLEASE:
4149 #ifdef F_DUPFD_CLOEXEC
4150 case TARGET_F_DUPFD_CLOEXEC:
4151 return F_DUPFD_CLOEXEC;
4153 case TARGET_F_NOTIFY:
4156 return -TARGET_EINVAL;
4158 return -TARGET_EINVAL;
4161 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
4164 struct target_flock *target_fl;
4165 struct flock64 fl64;
4166 struct target_flock64 *target_fl64;
4168 int host_cmd = target_to_host_fcntl_cmd(cmd);
4170 if (host_cmd == -TARGET_EINVAL)
4174 case TARGET_F_GETLK:
4175 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
4176 return -TARGET_EFAULT;
4177 fl.l_type = tswap16(target_fl->l_type);
4178 fl.l_whence = tswap16(target_fl->l_whence);
4179 fl.l_start = tswapl(target_fl->l_start);
4180 fl.l_len = tswapl(target_fl->l_len);
4181 fl.l_pid = tswap32(target_fl->l_pid);
4182 unlock_user_struct(target_fl, arg, 0);
4183 ret = get_errno(fcntl(fd, host_cmd, &fl));
4185 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg, 0))
4186 return -TARGET_EFAULT;
4187 target_fl->l_type = tswap16(fl.l_type);
4188 target_fl->l_whence = tswap16(fl.l_whence);
4189 target_fl->l_start = tswapl(fl.l_start);
4190 target_fl->l_len = tswapl(fl.l_len);
4191 target_fl->l_pid = tswap32(fl.l_pid);
4192 unlock_user_struct(target_fl, arg, 1);
4196 case TARGET_F_SETLK:
4197 case TARGET_F_SETLKW:
4198 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
4199 return -TARGET_EFAULT;
4200 fl.l_type = tswap16(target_fl->l_type);
4201 fl.l_whence = tswap16(target_fl->l_whence);
4202 fl.l_start = tswapl(target_fl->l_start);
4203 fl.l_len = tswapl(target_fl->l_len);
4204 fl.l_pid = tswap32(target_fl->l_pid);
4205 unlock_user_struct(target_fl, arg, 0);
4206 ret = get_errno(fcntl(fd, host_cmd, &fl));
4209 case TARGET_F_GETLK64:
4210 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
4211 return -TARGET_EFAULT;
4212 fl64.l_type = tswap16(target_fl64->l_type) >> 1;
4213 fl64.l_whence = tswap16(target_fl64->l_whence);
4214 fl64.l_start = tswapl(target_fl64->l_start);
4215 fl64.l_len = tswapl(target_fl64->l_len);
4216 fl64.l_pid = tswap32(target_fl64->l_pid);
4217 unlock_user_struct(target_fl64, arg, 0);
4218 ret = get_errno(fcntl(fd, host_cmd, &fl64));
4220 if (!lock_user_struct(VERIFY_WRITE, target_fl64, arg, 0))
4221 return -TARGET_EFAULT;
4222 target_fl64->l_type = tswap16(fl64.l_type) >> 1;
4223 target_fl64->l_whence = tswap16(fl64.l_whence);
4224 target_fl64->l_start = tswapl(fl64.l_start);
4225 target_fl64->l_len = tswapl(fl64.l_len);
4226 target_fl64->l_pid = tswap32(fl64.l_pid);
4227 unlock_user_struct(target_fl64, arg, 1);
4230 case TARGET_F_SETLK64:
4231 case TARGET_F_SETLKW64:
4232 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
4233 return -TARGET_EFAULT;
4234 fl64.l_type = tswap16(target_fl64->l_type) >> 1;
4235 fl64.l_whence = tswap16(target_fl64->l_whence);
4236 fl64.l_start = tswapl(target_fl64->l_start);
4237 fl64.l_len = tswapl(target_fl64->l_len);
4238 fl64.l_pid = tswap32(target_fl64->l_pid);
4239 unlock_user_struct(target_fl64, arg, 0);
4240 ret = get_errno(fcntl(fd, host_cmd, &fl64));
4243 case TARGET_F_GETFL:
4244 ret = get_errno(fcntl(fd, host_cmd, arg));
4246 ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
4250 case TARGET_F_SETFL:
4251 ret = get_errno(fcntl(fd, host_cmd, target_to_host_bitmask(arg, fcntl_flags_tbl)));
4254 case TARGET_F_SETOWN:
4255 case TARGET_F_GETOWN:
4256 case TARGET_F_SETSIG:
4257 case TARGET_F_GETSIG:
4258 case TARGET_F_SETLEASE:
4259 case TARGET_F_GETLEASE:
4260 ret = get_errno(fcntl(fd, host_cmd, arg));
4264 ret = get_errno(fcntl(fd, cmd, arg));
4272 static inline int high2lowuid(int uid)
4280 static inline int high2lowgid(int gid)
4288 static inline int low2highuid(int uid)
4290 if ((int16_t)uid == -1)
4296 static inline int low2highgid(int gid)
4298 if ((int16_t)gid == -1)
4303 static inline int tswapid(int id)
4307 #else /* !USE_UID16 */
4308 static inline int high2lowuid(int uid)
4312 static inline int high2lowgid(int gid)
4316 static inline int low2highuid(int uid)
4320 static inline int low2highgid(int gid)
4324 static inline int tswapid(int id)
4328 #endif /* USE_UID16 */
4330 void syscall_init(void)
4333 const argtype *arg_type;
4337 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
4338 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
4339 #include "syscall_types.h"
4341 #undef STRUCT_SPECIAL
4343 /* we patch the ioctl size if necessary. We rely on the fact that
4344 no ioctl has all the bits at '1' in the size field */
4346 while (ie->target_cmd != 0) {
4347 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
4348 TARGET_IOC_SIZEMASK) {
4349 arg_type = ie->arg_type;
4350 if (arg_type[0] != TYPE_PTR) {
4351 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
4356 size = thunk_type_size(arg_type, 0);
4357 ie->target_cmd = (ie->target_cmd &
4358 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
4359 (size << TARGET_IOC_SIZESHIFT);
4362 /* Build target_to_host_errno_table[] table from
4363 * host_to_target_errno_table[]. */
4364 for (i=0; i < ERRNO_TABLE_SIZE; i++)
4365 target_to_host_errno_table[host_to_target_errno_table[i]] = i;
4367 /* automatic consistency check if same arch */
4368 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
4369 (defined(__x86_64__) && defined(TARGET_X86_64))
4370 if (unlikely(ie->target_cmd != ie->host_cmd)) {
4371 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
4372 ie->name, ie->target_cmd, ie->host_cmd);
4379 #if TARGET_ABI_BITS == 32
4380 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
4382 #ifdef TARGET_WORDS_BIGENDIAN
4383 return ((uint64_t)word0 << 32) | word1;
4385 return ((uint64_t)word1 << 32) | word0;
4388 #else /* TARGET_ABI_BITS == 32 */
4389 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
4393 #endif /* TARGET_ABI_BITS != 32 */
4395 #ifdef TARGET_NR_truncate64
4396 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
4401 if (regpairs_aligned(cpu_env)) {
4405 return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
4409 #ifdef TARGET_NR_ftruncate64
4410 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
4415 if (regpairs_aligned(cpu_env)) {
4419 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
4423 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
4424 abi_ulong target_addr)
4426 struct target_timespec *target_ts;
4428 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1))
4429 return -TARGET_EFAULT;
4430 host_ts->tv_sec = tswapl(target_ts->tv_sec);
4431 host_ts->tv_nsec = tswapl(target_ts->tv_nsec);
4432 unlock_user_struct(target_ts, target_addr, 0);
4436 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
4437 struct timespec *host_ts)
4439 struct target_timespec *target_ts;
4441 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0))
4442 return -TARGET_EFAULT;
4443 target_ts->tv_sec = tswapl(host_ts->tv_sec);
4444 target_ts->tv_nsec = tswapl(host_ts->tv_nsec);
4445 unlock_user_struct(target_ts, target_addr, 1);
4449 #if defined(TARGET_NR_stat64) || defined(TARGET_NR_newfstatat)
4450 static inline abi_long host_to_target_stat64(void *cpu_env,
4451 abi_ulong target_addr,
4452 struct stat *host_st)
4455 if (((CPUARMState *)cpu_env)->eabi) {
4456 struct target_eabi_stat64 *target_st;
4458 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
4459 return -TARGET_EFAULT;
4460 memset(target_st, 0, sizeof(struct target_eabi_stat64));
4461 __put_user(host_st->st_dev, &target_st->st_dev);
4462 __put_user(host_st->st_ino, &target_st->st_ino);
4463 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
4464 __put_user(host_st->st_ino, &target_st->__st_ino);
4466 __put_user(host_st->st_mode, &target_st->st_mode);
4467 __put_user(host_st->st_nlink, &target_st->st_nlink);
4468 __put_user(host_st->st_uid, &target_st->st_uid);
4469 __put_user(host_st->st_gid, &target_st->st_gid);
4470 __put_user(host_st->st_rdev, &target_st->st_rdev);
4471 __put_user(host_st->st_size, &target_st->st_size);
4472 __put_user(host_st->st_blksize, &target_st->st_blksize);
4473 __put_user(host_st->st_blocks, &target_st->st_blocks);
4474 __put_user(host_st->st_atime, &target_st->target_st_atime);
4475 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
4476 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
4477 unlock_user_struct(target_st, target_addr, 1);
4481 #if TARGET_ABI_BITS == 64 && !defined(TARGET_ALPHA)
4482 struct target_stat *target_st;
4484 struct target_stat64 *target_st;
4487 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
4488 return -TARGET_EFAULT;
4489 memset(target_st, 0, sizeof(*target_st));
4490 __put_user(host_st->st_dev, &target_st->st_dev);
4491 __put_user(host_st->st_ino, &target_st->st_ino);
4492 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
4493 __put_user(host_st->st_ino, &target_st->__st_ino);
4495 __put_user(host_st->st_mode, &target_st->st_mode);
4496 __put_user(host_st->st_nlink, &target_st->st_nlink);
4497 __put_user(host_st->st_uid, &target_st->st_uid);
4498 __put_user(host_st->st_gid, &target_st->st_gid);
4499 __put_user(host_st->st_rdev, &target_st->st_rdev);
4500 /* XXX: better use of kernel struct */
4501 __put_user(host_st->st_size, &target_st->st_size);
4502 __put_user(host_st->st_blksize, &target_st->st_blksize);
4503 __put_user(host_st->st_blocks, &target_st->st_blocks);
4504 __put_user(host_st->st_atime, &target_st->target_st_atime);
4505 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
4506 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
4507 unlock_user_struct(target_st, target_addr, 1);
4514 #if defined(CONFIG_USE_NPTL)
4515 /* ??? Using host futex calls even when target atomic operations
4516 are not really atomic probably breaks things. However implementing
4517 futexes locally would make futexes shared between multiple processes
4518 tricky. However they're probably useless because guest atomic
4519 operations won't work either. */
4520 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
4521 target_ulong uaddr2, int val3)
4523 struct timespec ts, *pts;
4526 /* ??? We assume FUTEX_* constants are the same on both host
4528 #ifdef FUTEX_CMD_MASK
4529 base_op = op & FUTEX_CMD_MASK;
4537 target_to_host_timespec(pts, timeout);
4541 return get_errno(sys_futex(g2h(uaddr), op, tswap32(val),
4544 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0));
4546 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0));
4548 case FUTEX_CMP_REQUEUE:
4550 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
4551 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
4552 But the prototype takes a `struct timespec *'; insert casts
4553 to satisfy the compiler. We do not need to tswap TIMEOUT
4554 since it's not compared to guest memory. */
4555 pts = (struct timespec *)(uintptr_t) timeout;
4556 return get_errno(sys_futex(g2h(uaddr), op, val, pts,
4558 (base_op == FUTEX_CMP_REQUEUE
4562 return -TARGET_ENOSYS;
4567 /* Map host to target signal numbers for the wait family of syscalls.
4568 Assume all other status bits are the same. */
4569 static int host_to_target_waitstatus(int status)
4571 if (WIFSIGNALED(status)) {
4572 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
4574 if (WIFSTOPPED(status)) {
4575 return (host_to_target_signal(WSTOPSIG(status)) << 8)
4581 int get_osversion(void)
4583 static int osversion;
4584 struct new_utsname buf;
4589 if (qemu_uname_release && *qemu_uname_release) {
4590 s = qemu_uname_release;
4592 if (sys_uname(&buf))
4597 for (i = 0; i < 3; i++) {
4599 while (*s >= '0' && *s <= '9') {
4604 tmp = (tmp << 8) + n;
4612 /* do_syscall() should always have a single exit point at the end so
4613 that actions, such as logging of syscall results, can be performed.
4614 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
4615 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
4616 abi_long arg2, abi_long arg3, abi_long arg4,
4617 abi_long arg5, abi_long arg6, abi_long arg7,
4626 gemu_log("syscall %d", num);
4629 print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
4632 case TARGET_NR_exit:
4633 #ifdef CONFIG_USE_NPTL
4634 /* In old applications this may be used to implement _exit(2).
4635 However in threaded applictions it is used for thread termination,
4636 and _exit_group is used for application termination.
4637 Do thread termination if we have more then one thread. */
4638 /* FIXME: This probably breaks if a signal arrives. We should probably
4639 be disabling signals. */
4640 if (first_cpu->next_cpu) {
4648 while (p && p != (CPUState *)cpu_env) {
4649 lastp = &p->next_cpu;
4652 /* If we didn't find the CPU for this thread then something is
4656 /* Remove the CPU from the list. */
4657 *lastp = p->next_cpu;
4659 ts = ((CPUState *)cpu_env)->opaque;
4660 if (ts->child_tidptr) {
4661 put_user_u32(0, ts->child_tidptr);
4662 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
4674 gdb_exit(cpu_env, arg1);
4676 ret = 0; /* avoid warning */
4678 case TARGET_NR_read:
4682 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
4684 ret = get_errno(read(arg1, p, arg3));
4685 unlock_user(p, arg2, ret);
4688 case TARGET_NR_write:
4689 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
4691 ret = get_errno(write(arg1, p, arg3));
4692 unlock_user(p, arg2, 0);
4694 case TARGET_NR_open:
4695 if (!(p = lock_user_string(arg1)))
4697 ret = get_errno(open(path(p),
4698 target_to_host_bitmask(arg2, fcntl_flags_tbl),
4700 unlock_user(p, arg1, 0);
4702 #if defined(TARGET_NR_openat) && defined(__NR_openat)
4703 case TARGET_NR_openat:
4704 if (!(p = lock_user_string(arg2)))
4706 ret = get_errno(sys_openat(arg1,
4708 target_to_host_bitmask(arg3, fcntl_flags_tbl),
4710 unlock_user(p, arg2, 0);
4713 case TARGET_NR_close:
4714 ret = get_errno(close(arg1));
4719 case TARGET_NR_fork:
4720 ret = get_errno(do_fork(cpu_env, SIGCHLD, 0, 0, 0, 0));
4722 #ifdef TARGET_NR_waitpid
4723 case TARGET_NR_waitpid:
4726 ret = get_errno(waitpid(arg1, &status, arg3));
4727 if (!is_error(ret) && arg2
4728 && put_user_s32(host_to_target_waitstatus(status), arg2))
4733 #ifdef TARGET_NR_waitid
4734 case TARGET_NR_waitid:
4738 ret = get_errno(waitid(arg1, arg2, &info, arg4));
4739 if (!is_error(ret) && arg3 && info.si_pid != 0) {
4740 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
4742 host_to_target_siginfo(p, &info);
4743 unlock_user(p, arg3, sizeof(target_siginfo_t));
4748 #ifdef TARGET_NR_creat /* not on alpha */
4749 case TARGET_NR_creat:
4750 if (!(p = lock_user_string(arg1)))
4752 ret = get_errno(creat(p, arg2));
4753 unlock_user(p, arg1, 0);
4756 case TARGET_NR_link:
4759 p = lock_user_string(arg1);
4760 p2 = lock_user_string(arg2);
4762 ret = -TARGET_EFAULT;
4764 ret = get_errno(link(p, p2));
4765 unlock_user(p2, arg2, 0);
4766 unlock_user(p, arg1, 0);
4769 #if defined(TARGET_NR_linkat) && defined(__NR_linkat)
4770 case TARGET_NR_linkat:
4775 p = lock_user_string(arg2);
4776 p2 = lock_user_string(arg4);
4778 ret = -TARGET_EFAULT;
4780 ret = get_errno(sys_linkat(arg1, p, arg3, p2, arg5));
4781 unlock_user(p, arg2, 0);
4782 unlock_user(p2, arg4, 0);
4786 case TARGET_NR_unlink:
4787 if (!(p = lock_user_string(arg1)))
4789 ret = get_errno(unlink(p));
4790 unlock_user(p, arg1, 0);
4792 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat)
4793 case TARGET_NR_unlinkat:
4794 if (!(p = lock_user_string(arg2)))
4796 ret = get_errno(sys_unlinkat(arg1, p, arg3));
4797 unlock_user(p, arg2, 0);
4800 case TARGET_NR_execve:
4802 char **argp, **envp;
4805 abi_ulong guest_argp;
4806 abi_ulong guest_envp;
4812 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
4813 if (get_user_ual(addr, gp))
4821 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
4822 if (get_user_ual(addr, gp))
4829 argp = alloca((argc + 1) * sizeof(void *));
4830 envp = alloca((envc + 1) * sizeof(void *));
4832 for (gp = guest_argp, q = argp; gp;
4833 gp += sizeof(abi_ulong), q++) {
4834 if (get_user_ual(addr, gp))
4838 if (!(*q = lock_user_string(addr)))
4843 for (gp = guest_envp, q = envp; gp;
4844 gp += sizeof(abi_ulong), q++) {
4845 if (get_user_ual(addr, gp))
4849 if (!(*q = lock_user_string(addr)))
4854 if (!(p = lock_user_string(arg1)))
4856 ret = get_errno(execve(p, argp, envp));
4857 unlock_user(p, arg1, 0);
4862 ret = -TARGET_EFAULT;
4865 for (gp = guest_argp, q = argp; *q;
4866 gp += sizeof(abi_ulong), q++) {
4867 if (get_user_ual(addr, gp)
4870 unlock_user(*q, addr, 0);
4872 for (gp = guest_envp, q = envp; *q;
4873 gp += sizeof(abi_ulong), q++) {
4874 if (get_user_ual(addr, gp)
4877 unlock_user(*q, addr, 0);
4881 case TARGET_NR_chdir:
4882 if (!(p = lock_user_string(arg1)))
4884 ret = get_errno(chdir(p));
4885 unlock_user(p, arg1, 0);
4887 #ifdef TARGET_NR_time
4888 case TARGET_NR_time:
4891 ret = get_errno(time(&host_time));
4894 && put_user_sal(host_time, arg1))
4899 case TARGET_NR_mknod:
4900 if (!(p = lock_user_string(arg1)))
4902 ret = get_errno(mknod(p, arg2, arg3));
4903 unlock_user(p, arg1, 0);
4905 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat)
4906 case TARGET_NR_mknodat:
4907 if (!(p = lock_user_string(arg2)))
4909 ret = get_errno(sys_mknodat(arg1, p, arg3, arg4));
4910 unlock_user(p, arg2, 0);
4913 case TARGET_NR_chmod:
4914 if (!(p = lock_user_string(arg1)))
4916 ret = get_errno(chmod(p, arg2));
4917 unlock_user(p, arg1, 0);
4919 #ifdef TARGET_NR_break
4920 case TARGET_NR_break:
4923 #ifdef TARGET_NR_oldstat
4924 case TARGET_NR_oldstat:
4927 case TARGET_NR_lseek:
4928 ret = get_errno(lseek(arg1, arg2, arg3));
4930 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
4931 /* Alpha specific */
4932 case TARGET_NR_getxpid:
4933 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
4934 ret = get_errno(getpid());
4937 #ifdef TARGET_NR_getpid
4938 case TARGET_NR_getpid:
4939 ret = get_errno(getpid());
4942 case TARGET_NR_mount:
4944 /* need to look at the data field */
4946 p = lock_user_string(arg1);
4947 p2 = lock_user_string(arg2);
4948 p3 = lock_user_string(arg3);
4949 if (!p || !p2 || !p3)
4950 ret = -TARGET_EFAULT;
4952 /* FIXME - arg5 should be locked, but it isn't clear how to
4953 * do that since it's not guaranteed to be a NULL-terminated
4957 ret = get_errno(mount(p, p2, p3, (unsigned long)arg4, NULL));
4959 ret = get_errno(mount(p, p2, p3, (unsigned long)arg4, g2h(arg5)));
4961 unlock_user(p, arg1, 0);
4962 unlock_user(p2, arg2, 0);
4963 unlock_user(p3, arg3, 0);
4966 #ifdef TARGET_NR_umount
4967 case TARGET_NR_umount:
4968 if (!(p = lock_user_string(arg1)))
4970 ret = get_errno(umount(p));
4971 unlock_user(p, arg1, 0);
4974 #ifdef TARGET_NR_stime /* not on alpha */
4975 case TARGET_NR_stime:
4978 if (get_user_sal(host_time, arg1))
4980 ret = get_errno(stime(&host_time));
4984 case TARGET_NR_ptrace:
4986 #ifdef TARGET_NR_alarm /* not on alpha */
4987 case TARGET_NR_alarm:
4991 #ifdef TARGET_NR_oldfstat
4992 case TARGET_NR_oldfstat:
4995 #ifdef TARGET_NR_pause /* not on alpha */
4996 case TARGET_NR_pause:
4997 ret = get_errno(pause());
5000 #ifdef TARGET_NR_utime
5001 case TARGET_NR_utime:
5003 struct utimbuf tbuf, *host_tbuf;
5004 struct target_utimbuf *target_tbuf;
5006 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
5008 tbuf.actime = tswapl(target_tbuf->actime);
5009 tbuf.modtime = tswapl(target_tbuf->modtime);
5010 unlock_user_struct(target_tbuf, arg2, 0);
5015 if (!(p = lock_user_string(arg1)))
5017 ret = get_errno(utime(p, host_tbuf));
5018 unlock_user(p, arg1, 0);
5022 case TARGET_NR_utimes:
5024 struct timeval *tvp, tv[2];
5026 if (copy_from_user_timeval(&tv[0], arg2)
5027 || copy_from_user_timeval(&tv[1],
5028 arg2 + sizeof(struct target_timeval)))
5034 if (!(p = lock_user_string(arg1)))
5036 ret = get_errno(utimes(p, tvp));
5037 unlock_user(p, arg1, 0);
5040 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat)
5041 case TARGET_NR_futimesat:
5043 struct timeval *tvp, tv[2];
5045 if (copy_from_user_timeval(&tv[0], arg3)
5046 || copy_from_user_timeval(&tv[1],
5047 arg3 + sizeof(struct target_timeval)))
5053 if (!(p = lock_user_string(arg2)))
5055 ret = get_errno(sys_futimesat(arg1, path(p), tvp));
5056 unlock_user(p, arg2, 0);
5060 #ifdef TARGET_NR_stty
5061 case TARGET_NR_stty:
5064 #ifdef TARGET_NR_gtty
5065 case TARGET_NR_gtty:
5068 case TARGET_NR_access:
5069 if (!(p = lock_user_string(arg1)))
5071 ret = get_errno(access(path(p), arg2));
5072 unlock_user(p, arg1, 0);
5074 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
5075 case TARGET_NR_faccessat:
5076 if (!(p = lock_user_string(arg2)))
5078 ret = get_errno(sys_faccessat(arg1, p, arg3));
5079 unlock_user(p, arg2, 0);
5082 #ifdef TARGET_NR_nice /* not on alpha */
5083 case TARGET_NR_nice:
5084 ret = get_errno(nice(arg1));
5087 #ifdef TARGET_NR_ftime
5088 case TARGET_NR_ftime:
5091 case TARGET_NR_sync:
5095 case TARGET_NR_kill:
5096 ret = get_errno(kill(arg1, target_to_host_signal(arg2)));
5098 case TARGET_NR_rename:
5101 p = lock_user_string(arg1);
5102 p2 = lock_user_string(arg2);
5104 ret = -TARGET_EFAULT;
5106 ret = get_errno(rename(p, p2));
5107 unlock_user(p2, arg2, 0);
5108 unlock_user(p, arg1, 0);
5111 #if defined(TARGET_NR_renameat) && defined(__NR_renameat)
5112 case TARGET_NR_renameat:
5115 p = lock_user_string(arg2);
5116 p2 = lock_user_string(arg4);
5118 ret = -TARGET_EFAULT;
5120 ret = get_errno(sys_renameat(arg1, p, arg3, p2));
5121 unlock_user(p2, arg4, 0);
5122 unlock_user(p, arg2, 0);
5126 case TARGET_NR_mkdir:
5127 if (!(p = lock_user_string(arg1)))
5129 ret = get_errno(mkdir(p, arg2));
5130 unlock_user(p, arg1, 0);
5132 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat)
5133 case TARGET_NR_mkdirat:
5134 if (!(p = lock_user_string(arg2)))
5136 ret = get_errno(sys_mkdirat(arg1, p, arg3));
5137 unlock_user(p, arg2, 0);
5140 case TARGET_NR_rmdir:
5141 if (!(p = lock_user_string(arg1)))
5143 ret = get_errno(rmdir(p));
5144 unlock_user(p, arg1, 0);
5147 ret = get_errno(dup(arg1));
5149 case TARGET_NR_pipe:
5150 ret = do_pipe(cpu_env, arg1, 0, 0);
5152 #ifdef TARGET_NR_pipe2
5153 case TARGET_NR_pipe2:
5154 ret = do_pipe(cpu_env, arg1, arg2, 1);
5157 case TARGET_NR_times:
5159 struct target_tms *tmsp;
5161 ret = get_errno(times(&tms));
5163 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
5166 tmsp->tms_utime = tswapl(host_to_target_clock_t(tms.tms_utime));
5167 tmsp->tms_stime = tswapl(host_to_target_clock_t(tms.tms_stime));
5168 tmsp->tms_cutime = tswapl(host_to_target_clock_t(tms.tms_cutime));
5169 tmsp->tms_cstime = tswapl(host_to_target_clock_t(tms.tms_cstime));
5172 ret = host_to_target_clock_t(ret);
5175 #ifdef TARGET_NR_prof
5176 case TARGET_NR_prof:
5179 #ifdef TARGET_NR_signal
5180 case TARGET_NR_signal:
5183 case TARGET_NR_acct:
5185 ret = get_errno(acct(NULL));
5187 if (!(p = lock_user_string(arg1)))
5189 ret = get_errno(acct(path(p)));
5190 unlock_user(p, arg1, 0);
5193 #ifdef TARGET_NR_umount2 /* not on alpha */
5194 case TARGET_NR_umount2:
5195 if (!(p = lock_user_string(arg1)))
5197 ret = get_errno(umount2(p, arg2));
5198 unlock_user(p, arg1, 0);
5201 #ifdef TARGET_NR_lock
5202 case TARGET_NR_lock:
5205 case TARGET_NR_ioctl:
5206 ret = do_ioctl(arg1, arg2, arg3);
5208 case TARGET_NR_fcntl:
5209 ret = do_fcntl(arg1, arg2, arg3);
5211 #ifdef TARGET_NR_mpx
5215 case TARGET_NR_setpgid:
5216 ret = get_errno(setpgid(arg1, arg2));
5218 #ifdef TARGET_NR_ulimit
5219 case TARGET_NR_ulimit:
5222 #ifdef TARGET_NR_oldolduname
5223 case TARGET_NR_oldolduname:
5226 case TARGET_NR_umask:
5227 ret = get_errno(umask(arg1));
5229 case TARGET_NR_chroot:
5230 if (!(p = lock_user_string(arg1)))
5232 ret = get_errno(chroot(p));
5233 unlock_user(p, arg1, 0);
5235 case TARGET_NR_ustat:
5237 case TARGET_NR_dup2:
5238 ret = get_errno(dup2(arg1, arg2));
5240 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
5241 case TARGET_NR_dup3:
5242 ret = get_errno(dup3(arg1, arg2, arg3));
5245 #ifdef TARGET_NR_getppid /* not on alpha */
5246 case TARGET_NR_getppid:
5247 ret = get_errno(getppid());
5250 case TARGET_NR_getpgrp:
5251 ret = get_errno(getpgrp());
5253 case TARGET_NR_setsid:
5254 ret = get_errno(setsid());
5256 #ifdef TARGET_NR_sigaction
5257 case TARGET_NR_sigaction:
5259 #if defined(TARGET_ALPHA)
5260 struct target_sigaction act, oact, *pact = 0;
5261 struct target_old_sigaction *old_act;
5263 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
5265 act._sa_handler = old_act->_sa_handler;
5266 target_siginitset(&act.sa_mask, old_act->sa_mask);
5267 act.sa_flags = old_act->sa_flags;
5268 act.sa_restorer = 0;
5269 unlock_user_struct(old_act, arg2, 0);
5272 ret = get_errno(do_sigaction(arg1, pact, &oact));
5273 if (!is_error(ret) && arg3) {
5274 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
5276 old_act->_sa_handler = oact._sa_handler;
5277 old_act->sa_mask = oact.sa_mask.sig[0];
5278 old_act->sa_flags = oact.sa_flags;
5279 unlock_user_struct(old_act, arg3, 1);
5281 #elif defined(TARGET_MIPS)
5282 struct target_sigaction act, oact, *pact, *old_act;
5285 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
5287 act._sa_handler = old_act->_sa_handler;
5288 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
5289 act.sa_flags = old_act->sa_flags;
5290 unlock_user_struct(old_act, arg2, 0);
5296 ret = get_errno(do_sigaction(arg1, pact, &oact));
5298 if (!is_error(ret) && arg3) {
5299 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
5301 old_act->_sa_handler = oact._sa_handler;
5302 old_act->sa_flags = oact.sa_flags;
5303 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
5304 old_act->sa_mask.sig[1] = 0;
5305 old_act->sa_mask.sig[2] = 0;
5306 old_act->sa_mask.sig[3] = 0;
5307 unlock_user_struct(old_act, arg3, 1);
5310 struct target_old_sigaction *old_act;
5311 struct target_sigaction act, oact, *pact;
5313 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
5315 act._sa_handler = old_act->_sa_handler;
5316 target_siginitset(&act.sa_mask, old_act->sa_mask);
5317 act.sa_flags = old_act->sa_flags;
5318 act.sa_restorer = old_act->sa_restorer;
5319 unlock_user_struct(old_act, arg2, 0);
5324 ret = get_errno(do_sigaction(arg1, pact, &oact));
5325 if (!is_error(ret) && arg3) {
5326 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
5328 old_act->_sa_handler = oact._sa_handler;
5329 old_act->sa_mask = oact.sa_mask.sig[0];
5330 old_act->sa_flags = oact.sa_flags;
5331 old_act->sa_restorer = oact.sa_restorer;
5332 unlock_user_struct(old_act, arg3, 1);
5338 case TARGET_NR_rt_sigaction:
5340 #if defined(TARGET_ALPHA)
5341 struct target_sigaction act, oact, *pact = 0;
5342 struct target_rt_sigaction *rt_act;
5343 /* ??? arg4 == sizeof(sigset_t). */
5345 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
5347 act._sa_handler = rt_act->_sa_handler;
5348 act.sa_mask = rt_act->sa_mask;
5349 act.sa_flags = rt_act->sa_flags;
5350 act.sa_restorer = arg5;
5351 unlock_user_struct(rt_act, arg2, 0);
5354 ret = get_errno(do_sigaction(arg1, pact, &oact));
5355 if (!is_error(ret) && arg3) {
5356 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
5358 rt_act->_sa_handler = oact._sa_handler;
5359 rt_act->sa_mask = oact.sa_mask;
5360 rt_act->sa_flags = oact.sa_flags;
5361 unlock_user_struct(rt_act, arg3, 1);
5364 struct target_sigaction *act;
5365 struct target_sigaction *oact;
5368 if (!lock_user_struct(VERIFY_READ, act, arg2, 1))
5373 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
5374 ret = -TARGET_EFAULT;
5375 goto rt_sigaction_fail;
5379 ret = get_errno(do_sigaction(arg1, act, oact));
5382 unlock_user_struct(act, arg2, 0);
5384 unlock_user_struct(oact, arg3, 1);
5388 #ifdef TARGET_NR_sgetmask /* not on alpha */
5389 case TARGET_NR_sgetmask:
5392 abi_ulong target_set;
5393 sigprocmask(0, NULL, &cur_set);
5394 host_to_target_old_sigset(&target_set, &cur_set);
5399 #ifdef TARGET_NR_ssetmask /* not on alpha */
5400 case TARGET_NR_ssetmask:
5402 sigset_t set, oset, cur_set;
5403 abi_ulong target_set = arg1;
5404 sigprocmask(0, NULL, &cur_set);
5405 target_to_host_old_sigset(&set, &target_set);
5406 sigorset(&set, &set, &cur_set);
5407 sigprocmask(SIG_SETMASK, &set, &oset);
5408 host_to_target_old_sigset(&target_set, &oset);
5413 #ifdef TARGET_NR_sigprocmask
5414 case TARGET_NR_sigprocmask:
5416 #if defined(TARGET_ALPHA)
5417 sigset_t set, oldset;
5422 case TARGET_SIG_BLOCK:
5425 case TARGET_SIG_UNBLOCK:
5428 case TARGET_SIG_SETMASK:
5432 ret = -TARGET_EINVAL;
5436 target_to_host_old_sigset(&set, &mask);
5438 ret = get_errno(sigprocmask(how, &set, &oldset));
5440 if (!is_error(ret)) {
5441 host_to_target_old_sigset(&mask, &oldset);
5443 ((CPUAlphaState *)cpu_env)->[IR_V0] = 0; /* force no error */
5446 sigset_t set, oldset, *set_ptr;
5451 case TARGET_SIG_BLOCK:
5454 case TARGET_SIG_UNBLOCK:
5457 case TARGET_SIG_SETMASK:
5461 ret = -TARGET_EINVAL;
5464 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
5466 target_to_host_old_sigset(&set, p);
5467 unlock_user(p, arg2, 0);
5473 ret = get_errno(sigprocmask(how, set_ptr, &oldset));
5474 if (!is_error(ret) && arg3) {
5475 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
5477 host_to_target_old_sigset(p, &oldset);
5478 unlock_user(p, arg3, sizeof(target_sigset_t));
5484 case TARGET_NR_rt_sigprocmask:
5487 sigset_t set, oldset, *set_ptr;
5491 case TARGET_SIG_BLOCK:
5494 case TARGET_SIG_UNBLOCK:
5497 case TARGET_SIG_SETMASK:
5501 ret = -TARGET_EINVAL;
5504 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
5506 target_to_host_sigset(&set, p);
5507 unlock_user(p, arg2, 0);
5513 ret = get_errno(sigprocmask(how, set_ptr, &oldset));
5514 if (!is_error(ret) && arg3) {
5515 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
5517 host_to_target_sigset(p, &oldset);
5518 unlock_user(p, arg3, sizeof(target_sigset_t));
5522 #ifdef TARGET_NR_sigpending
5523 case TARGET_NR_sigpending:
5526 ret = get_errno(sigpending(&set));
5527 if (!is_error(ret)) {
5528 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
5530 host_to_target_old_sigset(p, &set);
5531 unlock_user(p, arg1, sizeof(target_sigset_t));
5536 case TARGET_NR_rt_sigpending:
5539 ret = get_errno(sigpending(&set));
5540 if (!is_error(ret)) {
5541 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
5543 host_to_target_sigset(p, &set);
5544 unlock_user(p, arg1, sizeof(target_sigset_t));
5548 #ifdef TARGET_NR_sigsuspend
5549 case TARGET_NR_sigsuspend:
5552 #if defined(TARGET_ALPHA)
5553 abi_ulong mask = arg1;
5554 target_to_host_old_sigset(&set, &mask);
5556 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
5558 target_to_host_old_sigset(&set, p);
5559 unlock_user(p, arg1, 0);
5561 ret = get_errno(sigsuspend(&set));
5565 case TARGET_NR_rt_sigsuspend:
5568 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
5570 target_to_host_sigset(&set, p);
5571 unlock_user(p, arg1, 0);
5572 ret = get_errno(sigsuspend(&set));
5575 case TARGET_NR_rt_sigtimedwait:
5578 struct timespec uts, *puts;
5581 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
5583 target_to_host_sigset(&set, p);
5584 unlock_user(p, arg1, 0);
5587 target_to_host_timespec(puts, arg3);
5591 ret = get_errno(sigtimedwait(&set, &uinfo, puts));
5592 if (!is_error(ret) && arg2) {
5593 if (!(p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t), 0)))
5595 host_to_target_siginfo(p, &uinfo);
5596 unlock_user(p, arg2, sizeof(target_siginfo_t));
5600 case TARGET_NR_rt_sigqueueinfo:
5603 if (!(p = lock_user(VERIFY_READ, arg3, sizeof(target_sigset_t), 1)))
5605 target_to_host_siginfo(&uinfo, p);
5606 unlock_user(p, arg1, 0);
5607 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
5610 #ifdef TARGET_NR_sigreturn
5611 case TARGET_NR_sigreturn:
5612 /* NOTE: ret is eax, so not transcoding must be done */
5613 ret = do_sigreturn(cpu_env);
5616 case TARGET_NR_rt_sigreturn:
5617 /* NOTE: ret is eax, so not transcoding must be done */
5618 ret = do_rt_sigreturn(cpu_env);
5620 case TARGET_NR_sethostname:
5621 if (!(p = lock_user_string(arg1)))
5623 ret = get_errno(sethostname(p, arg2));
5624 unlock_user(p, arg1, 0);
5626 case TARGET_NR_setrlimit:
5628 int resource = target_to_host_resource(arg1);
5629 struct target_rlimit *target_rlim;
5631 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
5633 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
5634 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
5635 unlock_user_struct(target_rlim, arg2, 0);
5636 ret = get_errno(setrlimit(resource, &rlim));
5639 case TARGET_NR_getrlimit:
5641 int resource = target_to_host_resource(arg1);
5642 struct target_rlimit *target_rlim;
5645 ret = get_errno(getrlimit(resource, &rlim));
5646 if (!is_error(ret)) {
5647 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
5649 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
5650 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
5651 unlock_user_struct(target_rlim, arg2, 1);
5655 case TARGET_NR_getrusage:
5657 struct rusage rusage;
5658 ret = get_errno(getrusage(arg1, &rusage));
5659 if (!is_error(ret)) {
5660 host_to_target_rusage(arg2, &rusage);
5664 case TARGET_NR_gettimeofday:
5667 ret = get_errno(gettimeofday(&tv, NULL));
5668 if (!is_error(ret)) {
5669 if (copy_to_user_timeval(arg1, &tv))
5674 case TARGET_NR_settimeofday:
5677 if (copy_from_user_timeval(&tv, arg1))
5679 ret = get_errno(settimeofday(&tv, NULL));
5682 #if defined(TARGET_NR_select) && !defined(TARGET_S390X) && !defined(TARGET_S390)
5683 case TARGET_NR_select:
5685 struct target_sel_arg_struct *sel;
5686 abi_ulong inp, outp, exp, tvp;
5689 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1))
5691 nsel = tswapl(sel->n);
5692 inp = tswapl(sel->inp);
5693 outp = tswapl(sel->outp);
5694 exp = tswapl(sel->exp);
5695 tvp = tswapl(sel->tvp);
5696 unlock_user_struct(sel, arg1, 0);
5697 ret = do_select(nsel, inp, outp, exp, tvp);
5701 #ifdef TARGET_NR_pselect6
5702 case TARGET_NR_pselect6:
5704 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
5705 fd_set rfds, wfds, efds;
5706 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
5707 struct timespec ts, *ts_ptr;
5710 * The 6th arg is actually two args smashed together,
5711 * so we cannot use the C library.
5719 abi_ulong arg_sigset, arg_sigsize, *arg7;
5720 target_sigset_t *target_sigset;
5728 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
5732 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
5736 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
5742 * This takes a timespec, and not a timeval, so we cannot
5743 * use the do_select() helper ...
5746 if (target_to_host_timespec(&ts, ts_addr)) {
5754 /* Extract the two packed args for the sigset */
5757 sig.size = _NSIG / 8;
5759 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
5763 arg_sigset = tswapl(arg7[0]);
5764 arg_sigsize = tswapl(arg7[1]);
5765 unlock_user(arg7, arg6, 0);
5769 if (arg_sigsize != sizeof(*target_sigset)) {
5770 /* Like the kernel, we enforce correct size sigsets */
5771 ret = -TARGET_EINVAL;
5774 target_sigset = lock_user(VERIFY_READ, arg_sigset,
5775 sizeof(*target_sigset), 1);
5776 if (!target_sigset) {
5779 target_to_host_sigset(&set, target_sigset);
5780 unlock_user(target_sigset, arg_sigset, 0);
5788 ret = get_errno(sys_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
5791 if (!is_error(ret)) {
5792 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
5794 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
5796 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
5799 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
5805 case TARGET_NR_symlink:
5808 p = lock_user_string(arg1);
5809 p2 = lock_user_string(arg2);
5811 ret = -TARGET_EFAULT;
5813 ret = get_errno(symlink(p, p2));
5814 unlock_user(p2, arg2, 0);
5815 unlock_user(p, arg1, 0);
5818 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat)
5819 case TARGET_NR_symlinkat:
5822 p = lock_user_string(arg1);
5823 p2 = lock_user_string(arg3);
5825 ret = -TARGET_EFAULT;
5827 ret = get_errno(sys_symlinkat(p, arg2, p2));
5828 unlock_user(p2, arg3, 0);
5829 unlock_user(p, arg1, 0);
5833 #ifdef TARGET_NR_oldlstat
5834 case TARGET_NR_oldlstat:
5837 case TARGET_NR_readlink:
5840 p = lock_user_string(arg1);
5841 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
5843 ret = -TARGET_EFAULT;
5845 if (strncmp((const char *)p, "/proc/self/exe", 14) == 0) {
5846 char real[PATH_MAX];
5847 temp = realpath(exec_path,real);
5848 ret = (temp==NULL) ? get_errno(-1) : strlen(real) ;
5849 snprintf((char *)p2, arg3, "%s", real);
5852 ret = get_errno(readlink(path(p), p2, arg3));
5854 unlock_user(p2, arg2, ret);
5855 unlock_user(p, arg1, 0);
5858 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat)
5859 case TARGET_NR_readlinkat:
5862 p = lock_user_string(arg2);
5863 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
5865 ret = -TARGET_EFAULT;
5867 ret = get_errno(sys_readlinkat(arg1, path(p), p2, arg4));
5868 unlock_user(p2, arg3, ret);
5869 unlock_user(p, arg2, 0);
5873 #ifdef TARGET_NR_uselib
5874 case TARGET_NR_uselib:
5877 #ifdef TARGET_NR_swapon
5878 case TARGET_NR_swapon:
5879 if (!(p = lock_user_string(arg1)))
5881 ret = get_errno(swapon(p, arg2));
5882 unlock_user(p, arg1, 0);
5885 case TARGET_NR_reboot:
5887 #ifdef TARGET_NR_readdir
5888 case TARGET_NR_readdir:
5891 #ifdef TARGET_NR_mmap
5892 case TARGET_NR_mmap:
5893 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || defined(TARGET_ARM) || \
5894 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
5895 || defined(TARGET_S390X)
5898 abi_ulong v1, v2, v3, v4, v5, v6;
5899 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
5907 unlock_user(v, arg1, 0);
5908 ret = get_errno(target_mmap(v1, v2, v3,
5909 target_to_host_bitmask(v4, mmap_flags_tbl),
5913 ret = get_errno(target_mmap(arg1, arg2, arg3,
5914 target_to_host_bitmask(arg4, mmap_flags_tbl),
5920 #ifdef TARGET_NR_mmap2
5921 case TARGET_NR_mmap2:
5923 #define MMAP_SHIFT 12
5925 ret = get_errno(target_mmap(arg1, arg2, arg3,
5926 target_to_host_bitmask(arg4, mmap_flags_tbl),
5928 arg6 << MMAP_SHIFT));
5931 case TARGET_NR_munmap:
5932 ret = get_errno(target_munmap(arg1, arg2));
5934 case TARGET_NR_mprotect:
5936 TaskState *ts = ((CPUState *)cpu_env)->opaque;
5937 /* Special hack to detect libc making the stack executable. */
5938 if ((arg3 & PROT_GROWSDOWN)
5939 && arg1 >= ts->info->stack_limit
5940 && arg1 <= ts->info->start_stack) {
5941 arg3 &= ~PROT_GROWSDOWN;
5942 arg2 = arg2 + arg1 - ts->info->stack_limit;
5943 arg1 = ts->info->stack_limit;
5946 ret = get_errno(target_mprotect(arg1, arg2, arg3));
5948 #ifdef TARGET_NR_mremap
5949 case TARGET_NR_mremap:
5950 ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
5953 /* ??? msync/mlock/munlock are broken for softmmu. */
5954 #ifdef TARGET_NR_msync
5955 case TARGET_NR_msync:
5956 ret = get_errno(msync(g2h(arg1), arg2, arg3));
5959 #ifdef TARGET_NR_mlock
5960 case TARGET_NR_mlock:
5961 ret = get_errno(mlock(g2h(arg1), arg2));
5964 #ifdef TARGET_NR_munlock
5965 case TARGET_NR_munlock:
5966 ret = get_errno(munlock(g2h(arg1), arg2));
5969 #ifdef TARGET_NR_mlockall
5970 case TARGET_NR_mlockall:
5971 ret = get_errno(mlockall(arg1));
5974 #ifdef TARGET_NR_munlockall
5975 case TARGET_NR_munlockall:
5976 ret = get_errno(munlockall());
5979 case TARGET_NR_truncate:
5980 if (!(p = lock_user_string(arg1)))
5982 ret = get_errno(truncate(p, arg2));
5983 unlock_user(p, arg1, 0);
5985 case TARGET_NR_ftruncate:
5986 ret = get_errno(ftruncate(arg1, arg2));
5988 case TARGET_NR_fchmod:
5989 ret = get_errno(fchmod(arg1, arg2));
5991 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat)
5992 case TARGET_NR_fchmodat:
5993 if (!(p = lock_user_string(arg2)))
5995 ret = get_errno(sys_fchmodat(arg1, p, arg3));
5996 unlock_user(p, arg2, 0);
5999 case TARGET_NR_getpriority:
6000 /* libc does special remapping of the return value of
6001 * sys_getpriority() so it's just easiest to call
6002 * sys_getpriority() directly rather than through libc. */
6003 ret = get_errno(sys_getpriority(arg1, arg2));
6005 case TARGET_NR_setpriority:
6006 ret = get_errno(setpriority(arg1, arg2, arg3));
6008 #ifdef TARGET_NR_profil
6009 case TARGET_NR_profil:
6012 case TARGET_NR_statfs:
6013 if (!(p = lock_user_string(arg1)))
6015 ret = get_errno(statfs(path(p), &stfs));
6016 unlock_user(p, arg1, 0);
6018 if (!is_error(ret)) {
6019 struct target_statfs *target_stfs;
6021 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
6023 __put_user(stfs.f_type, &target_stfs->f_type);
6024 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
6025 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
6026 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
6027 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
6028 __put_user(stfs.f_files, &target_stfs->f_files);
6029 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
6030 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
6031 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
6032 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
6033 unlock_user_struct(target_stfs, arg2, 1);
6036 case TARGET_NR_fstatfs:
6037 ret = get_errno(fstatfs(arg1, &stfs));
6038 goto convert_statfs;
6039 #ifdef TARGET_NR_statfs64
6040 case TARGET_NR_statfs64:
6041 if (!(p = lock_user_string(arg1)))
6043 ret = get_errno(statfs(path(p), &stfs));
6044 unlock_user(p, arg1, 0);
6046 if (!is_error(ret)) {
6047 struct target_statfs64 *target_stfs;
6049 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
6051 __put_user(stfs.f_type, &target_stfs->f_type);
6052 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
6053 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
6054 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
6055 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
6056 __put_user(stfs.f_files, &target_stfs->f_files);
6057 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
6058 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
6059 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
6060 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
6061 unlock_user_struct(target_stfs, arg3, 1);
6064 case TARGET_NR_fstatfs64:
6065 ret = get_errno(fstatfs(arg1, &stfs));
6066 goto convert_statfs64;
6068 #ifdef TARGET_NR_ioperm
6069 case TARGET_NR_ioperm:
6072 #ifdef TARGET_NR_socketcall
6073 case TARGET_NR_socketcall:
6074 ret = do_socketcall(arg1, arg2);
6077 #ifdef TARGET_NR_accept
6078 case TARGET_NR_accept:
6079 ret = do_accept(arg1, arg2, arg3);
6082 #ifdef TARGET_NR_bind
6083 case TARGET_NR_bind:
6084 ret = do_bind(arg1, arg2, arg3);
6087 #ifdef TARGET_NR_connect
6088 case TARGET_NR_connect:
6089 ret = do_connect(arg1, arg2, arg3);
6092 #ifdef TARGET_NR_getpeername
6093 case TARGET_NR_getpeername:
6094 ret = do_getpeername(arg1, arg2, arg3);
6097 #ifdef TARGET_NR_getsockname
6098 case TARGET_NR_getsockname:
6099 ret = do_getsockname(arg1, arg2, arg3);
6102 #ifdef TARGET_NR_getsockopt
6103 case TARGET_NR_getsockopt:
6104 ret = do_getsockopt(arg1, arg2, arg3, arg4, arg5);
6107 #ifdef TARGET_NR_listen
6108 case TARGET_NR_listen:
6109 ret = get_errno(listen(arg1, arg2));
6112 #ifdef TARGET_NR_recv
6113 case TARGET_NR_recv:
6114 ret = do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
6117 #ifdef TARGET_NR_recvfrom
6118 case TARGET_NR_recvfrom:
6119 ret = do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
6122 #ifdef TARGET_NR_recvmsg
6123 case TARGET_NR_recvmsg:
6124 ret = do_sendrecvmsg(arg1, arg2, arg3, 0);
6127 #ifdef TARGET_NR_send
6128 case TARGET_NR_send:
6129 ret = do_sendto(arg1, arg2, arg3, arg4, 0, 0);
6132 #ifdef TARGET_NR_sendmsg
6133 case TARGET_NR_sendmsg:
6134 ret = do_sendrecvmsg(arg1, arg2, arg3, 1);
6137 #ifdef TARGET_NR_sendto
6138 case TARGET_NR_sendto:
6139 ret = do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
6142 #ifdef TARGET_NR_shutdown
6143 case TARGET_NR_shutdown:
6144 ret = get_errno(shutdown(arg1, arg2));
6147 #ifdef TARGET_NR_socket
6148 case TARGET_NR_socket:
6149 ret = do_socket(arg1, arg2, arg3);
6152 #ifdef TARGET_NR_socketpair
6153 case TARGET_NR_socketpair:
6154 ret = do_socketpair(arg1, arg2, arg3, arg4);
6157 #ifdef TARGET_NR_setsockopt
6158 case TARGET_NR_setsockopt:
6159 ret = do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
6163 case TARGET_NR_syslog:
6164 if (!(p = lock_user_string(arg2)))
6166 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
6167 unlock_user(p, arg2, 0);
6170 case TARGET_NR_setitimer:
6172 struct itimerval value, ovalue, *pvalue;
6176 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
6177 || copy_from_user_timeval(&pvalue->it_value,
6178 arg2 + sizeof(struct target_timeval)))
6183 ret = get_errno(setitimer(arg1, pvalue, &ovalue));
6184 if (!is_error(ret) && arg3) {
6185 if (copy_to_user_timeval(arg3,
6186 &ovalue.it_interval)
6187 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
6193 case TARGET_NR_getitimer:
6195 struct itimerval value;
6197 ret = get_errno(getitimer(arg1, &value));
6198 if (!is_error(ret) && arg2) {
6199 if (copy_to_user_timeval(arg2,
6201 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
6207 case TARGET_NR_stat:
6208 if (!(p = lock_user_string(arg1)))
6210 ret = get_errno(stat(path(p), &st));
6211 unlock_user(p, arg1, 0);
6213 case TARGET_NR_lstat:
6214 if (!(p = lock_user_string(arg1)))
6216 ret = get_errno(lstat(path(p), &st));
6217 unlock_user(p, arg1, 0);
6219 case TARGET_NR_fstat:
6221 ret = get_errno(fstat(arg1, &st));
6223 if (!is_error(ret)) {
6224 struct target_stat *target_st;
6226 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
6228 memset(target_st, 0, sizeof(*target_st));
6229 __put_user(st.st_dev, &target_st->st_dev);
6230 __put_user(st.st_ino, &target_st->st_ino);
6231 __put_user(st.st_mode, &target_st->st_mode);
6232 __put_user(st.st_uid, &target_st->st_uid);
6233 __put_user(st.st_gid, &target_st->st_gid);
6234 __put_user(st.st_nlink, &target_st->st_nlink);
6235 __put_user(st.st_rdev, &target_st->st_rdev);
6236 __put_user(st.st_size, &target_st->st_size);
6237 __put_user(st.st_blksize, &target_st->st_blksize);
6238 __put_user(st.st_blocks, &target_st->st_blocks);
6239 __put_user(st.st_atime, &target_st->target_st_atime);
6240 __put_user(st.st_mtime, &target_st->target_st_mtime);
6241 __put_user(st.st_ctime, &target_st->target_st_ctime);
6242 unlock_user_struct(target_st, arg2, 1);
6246 #ifdef TARGET_NR_olduname
6247 case TARGET_NR_olduname:
6250 #ifdef TARGET_NR_iopl
6251 case TARGET_NR_iopl:
6254 case TARGET_NR_vhangup:
6255 ret = get_errno(vhangup());
6257 #ifdef TARGET_NR_idle
6258 case TARGET_NR_idle:
6261 #ifdef TARGET_NR_syscall
6262 case TARGET_NR_syscall:
6263 ret = do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
6264 arg6, arg7, arg8, 0);
6267 case TARGET_NR_wait4:
6270 abi_long status_ptr = arg2;
6271 struct rusage rusage, *rusage_ptr;
6272 abi_ulong target_rusage = arg4;
6274 rusage_ptr = &rusage;
6277 ret = get_errno(wait4(arg1, &status, arg3, rusage_ptr));
6278 if (!is_error(ret)) {
6280 status = host_to_target_waitstatus(status);
6281 if (put_user_s32(status, status_ptr))
6285 host_to_target_rusage(target_rusage, &rusage);
6289 #ifdef TARGET_NR_swapoff
6290 case TARGET_NR_swapoff:
6291 if (!(p = lock_user_string(arg1)))
6293 ret = get_errno(swapoff(p));
6294 unlock_user(p, arg1, 0);
6297 case TARGET_NR_sysinfo:
6299 struct target_sysinfo *target_value;
6300 struct sysinfo value;
6301 ret = get_errno(sysinfo(&value));
6302 if (!is_error(ret) && arg1)
6304 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
6306 __put_user(value.uptime, &target_value->uptime);
6307 __put_user(value.loads[0], &target_value->loads[0]);
6308 __put_user(value.loads[1], &target_value->loads[1]);
6309 __put_user(value.loads[2], &target_value->loads[2]);
6310 __put_user(value.totalram, &target_value->totalram);
6311 __put_user(value.freeram, &target_value->freeram);
6312 __put_user(value.sharedram, &target_value->sharedram);
6313 __put_user(value.bufferram, &target_value->bufferram);
6314 __put_user(value.totalswap, &target_value->totalswap);
6315 __put_user(value.freeswap, &target_value->freeswap);
6316 __put_user(value.procs, &target_value->procs);
6317 __put_user(value.totalhigh, &target_value->totalhigh);
6318 __put_user(value.freehigh, &target_value->freehigh);
6319 __put_user(value.mem_unit, &target_value->mem_unit);
6320 unlock_user_struct(target_value, arg1, 1);
6324 #ifdef TARGET_NR_ipc
6326 ret = do_ipc(arg1, arg2, arg3, arg4, arg5, arg6);
6329 #ifdef TARGET_NR_semget
6330 case TARGET_NR_semget:
6331 ret = get_errno(semget(arg1, arg2, arg3));
6334 #ifdef TARGET_NR_semop
6335 case TARGET_NR_semop:
6336 ret = get_errno(do_semop(arg1, arg2, arg3));
6339 #ifdef TARGET_NR_semctl
6340 case TARGET_NR_semctl:
6341 ret = do_semctl(arg1, arg2, arg3, (union target_semun)(abi_ulong)arg4);
6344 #ifdef TARGET_NR_msgctl
6345 case TARGET_NR_msgctl:
6346 ret = do_msgctl(arg1, arg2, arg3);
6349 #ifdef TARGET_NR_msgget
6350 case TARGET_NR_msgget:
6351 ret = get_errno(msgget(arg1, arg2));
6354 #ifdef TARGET_NR_msgrcv
6355 case TARGET_NR_msgrcv:
6356 ret = do_msgrcv(arg1, arg2, arg3, arg4, arg5);
6359 #ifdef TARGET_NR_msgsnd
6360 case TARGET_NR_msgsnd:
6361 ret = do_msgsnd(arg1, arg2, arg3, arg4);
6364 #ifdef TARGET_NR_shmget
6365 case TARGET_NR_shmget:
6366 ret = get_errno(shmget(arg1, arg2, arg3));
6369 #ifdef TARGET_NR_shmctl
6370 case TARGET_NR_shmctl:
6371 ret = do_shmctl(arg1, arg2, arg3);
6374 #ifdef TARGET_NR_shmat
6375 case TARGET_NR_shmat:
6376 ret = do_shmat(arg1, arg2, arg3);
6379 #ifdef TARGET_NR_shmdt
6380 case TARGET_NR_shmdt:
6381 ret = do_shmdt(arg1);
6384 case TARGET_NR_fsync:
6385 ret = get_errno(fsync(arg1));
6387 case TARGET_NR_clone:
6388 #if defined(TARGET_SH4) || defined(TARGET_ALPHA)
6389 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
6390 #elif defined(TARGET_CRIS)
6391 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg4, arg5));
6392 #elif defined(TARGET_S390X)
6393 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
6395 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
6398 #ifdef __NR_exit_group
6399 /* new thread calls */
6400 case TARGET_NR_exit_group:
6404 gdb_exit(cpu_env, arg1);
6405 ret = get_errno(exit_group(arg1));
6408 case TARGET_NR_setdomainname:
6409 if (!(p = lock_user_string(arg1)))
6411 ret = get_errno(setdomainname(p, arg2));
6412 unlock_user(p, arg1, 0);
6414 case TARGET_NR_uname:
6415 /* no need to transcode because we use the linux syscall */
6417 struct new_utsname * buf;
6419 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
6421 ret = get_errno(sys_uname(buf));
6422 if (!is_error(ret)) {
6423 /* Overrite the native machine name with whatever is being
6425 strcpy (buf->machine, cpu_to_uname_machine(cpu_env));
6426 /* Allow the user to override the reported release. */
6427 if (qemu_uname_release && *qemu_uname_release)
6428 strcpy (buf->release, qemu_uname_release);
6430 unlock_user_struct(buf, arg1, 1);
6434 case TARGET_NR_modify_ldt:
6435 ret = do_modify_ldt(cpu_env, arg1, arg2, arg3);
6437 #if !defined(TARGET_X86_64)
6438 case TARGET_NR_vm86old:
6440 case TARGET_NR_vm86:
6441 ret = do_vm86(cpu_env, arg1, arg2);
6445 case TARGET_NR_adjtimex:
6447 #ifdef TARGET_NR_create_module
6448 case TARGET_NR_create_module:
6450 case TARGET_NR_init_module:
6451 case TARGET_NR_delete_module:
6452 #ifdef TARGET_NR_get_kernel_syms
6453 case TARGET_NR_get_kernel_syms:
6456 case TARGET_NR_quotactl:
6458 case TARGET_NR_getpgid:
6459 ret = get_errno(getpgid(arg1));
6461 case TARGET_NR_fchdir:
6462 ret = get_errno(fchdir(arg1));
6464 #ifdef TARGET_NR_bdflush /* not on x86_64 */
6465 case TARGET_NR_bdflush:
6468 #ifdef TARGET_NR_sysfs
6469 case TARGET_NR_sysfs:
6472 case TARGET_NR_personality:
6473 ret = get_errno(personality(arg1));
6475 #ifdef TARGET_NR_afs_syscall
6476 case TARGET_NR_afs_syscall:
6479 #ifdef TARGET_NR__llseek /* Not on alpha */
6480 case TARGET_NR__llseek:
6483 #if !defined(__NR_llseek)
6484 res = lseek(arg1, ((uint64_t)arg2 << 32) | arg3, arg5);
6486 ret = get_errno(res);
6491 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
6493 if ((ret == 0) && put_user_s64(res, arg4)) {
6499 case TARGET_NR_getdents:
6500 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
6502 struct target_dirent *target_dirp;
6503 struct linux_dirent *dirp;
6504 abi_long count = arg3;
6506 dirp = malloc(count);
6508 ret = -TARGET_ENOMEM;
6512 ret = get_errno(sys_getdents(arg1, dirp, count));
6513 if (!is_error(ret)) {
6514 struct linux_dirent *de;
6515 struct target_dirent *tde;
6517 int reclen, treclen;
6518 int count1, tnamelen;
6522 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
6526 reclen = de->d_reclen;
6527 treclen = reclen - (2 * (sizeof(long) - sizeof(abi_long)));
6528 tde->d_reclen = tswap16(treclen);
6529 tde->d_ino = tswapl(de->d_ino);
6530 tde->d_off = tswapl(de->d_off);
6531 tnamelen = treclen - (2 * sizeof(abi_long) + 2);
6534 /* XXX: may not be correct */
6535 pstrcpy(tde->d_name, tnamelen, de->d_name);
6536 de = (struct linux_dirent *)((char *)de + reclen);
6538 tde = (struct target_dirent *)((char *)tde + treclen);
6542 unlock_user(target_dirp, arg2, ret);
6548 struct linux_dirent *dirp;
6549 abi_long count = arg3;
6551 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
6553 ret = get_errno(sys_getdents(arg1, dirp, count));
6554 if (!is_error(ret)) {
6555 struct linux_dirent *de;
6560 reclen = de->d_reclen;
6563 de->d_reclen = tswap16(reclen);
6564 tswapls(&de->d_ino);
6565 tswapls(&de->d_off);
6566 de = (struct linux_dirent *)((char *)de + reclen);
6570 unlock_user(dirp, arg2, ret);
6574 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
6575 case TARGET_NR_getdents64:
6577 struct linux_dirent64 *dirp;
6578 abi_long count = arg3;
6579 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
6581 ret = get_errno(sys_getdents64(arg1, dirp, count));
6582 if (!is_error(ret)) {
6583 struct linux_dirent64 *de;
6588 reclen = de->d_reclen;
6591 de->d_reclen = tswap16(reclen);
6592 tswap64s((uint64_t *)&de->d_ino);
6593 tswap64s((uint64_t *)&de->d_off);
6594 de = (struct linux_dirent64 *)((char *)de + reclen);
6598 unlock_user(dirp, arg2, ret);
6601 #endif /* TARGET_NR_getdents64 */
6602 #if defined(TARGET_NR__newselect) || defined(TARGET_S390X)
6604 case TARGET_NR_select:
6606 case TARGET_NR__newselect:
6608 ret = do_select(arg1, arg2, arg3, arg4, arg5);
6611 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
6612 # ifdef TARGET_NR_poll
6613 case TARGET_NR_poll:
6615 # ifdef TARGET_NR_ppoll
6616 case TARGET_NR_ppoll:
6619 struct target_pollfd *target_pfd;
6620 unsigned int nfds = arg2;
6625 target_pfd = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_pollfd) * nfds, 1);
6629 pfd = alloca(sizeof(struct pollfd) * nfds);
6630 for(i = 0; i < nfds; i++) {
6631 pfd[i].fd = tswap32(target_pfd[i].fd);
6632 pfd[i].events = tswap16(target_pfd[i].events);
6635 # ifdef TARGET_NR_ppoll
6636 if (num == TARGET_NR_ppoll) {
6637 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
6638 target_sigset_t *target_set;
6639 sigset_t _set, *set = &_set;
6642 if (target_to_host_timespec(timeout_ts, arg3)) {
6643 unlock_user(target_pfd, arg1, 0);
6651 target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
6653 unlock_user(target_pfd, arg1, 0);
6656 target_to_host_sigset(set, target_set);
6661 ret = get_errno(sys_ppoll(pfd, nfds, timeout_ts, set, _NSIG/8));
6663 if (!is_error(ret) && arg3) {
6664 host_to_target_timespec(arg3, timeout_ts);
6667 unlock_user(target_set, arg4, 0);
6671 ret = get_errno(poll(pfd, nfds, timeout));
6673 if (!is_error(ret)) {
6674 for(i = 0; i < nfds; i++) {
6675 target_pfd[i].revents = tswap16(pfd[i].revents);
6678 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
6682 case TARGET_NR_flock:
6683 /* NOTE: the flock constant seems to be the same for every
6685 ret = get_errno(flock(arg1, arg2));
6687 case TARGET_NR_readv:
6692 vec = alloca(count * sizeof(struct iovec));
6693 if (lock_iovec(VERIFY_WRITE, vec, arg2, count, 0) < 0)
6695 ret = get_errno(readv(arg1, vec, count));
6696 unlock_iovec(vec, arg2, count, 1);
6699 case TARGET_NR_writev:
6704 vec = alloca(count * sizeof(struct iovec));
6705 if (lock_iovec(VERIFY_READ, vec, arg2, count, 1) < 0)
6707 ret = get_errno(writev(arg1, vec, count));
6708 unlock_iovec(vec, arg2, count, 0);
6711 case TARGET_NR_getsid:
6712 ret = get_errno(getsid(arg1));
6714 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
6715 case TARGET_NR_fdatasync:
6716 ret = get_errno(fdatasync(arg1));
6719 case TARGET_NR__sysctl:
6720 /* We don't implement this, but ENOTDIR is always a safe
6722 ret = -TARGET_ENOTDIR;
6724 case TARGET_NR_sched_getaffinity:
6726 unsigned int mask_size;
6727 unsigned long *mask;
6730 * sched_getaffinity needs multiples of ulong, so need to take
6731 * care of mismatches between target ulong and host ulong sizes.
6733 if (arg2 & (sizeof(abi_ulong) - 1)) {
6734 ret = -TARGET_EINVAL;
6737 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
6739 mask = alloca(mask_size);
6740 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
6742 if (!is_error(ret)) {
6743 if (copy_to_user(arg3, mask, ret)) {
6749 case TARGET_NR_sched_setaffinity:
6751 unsigned int mask_size;
6752 unsigned long *mask;
6755 * sched_setaffinity needs multiples of ulong, so need to take
6756 * care of mismatches between target ulong and host ulong sizes.
6758 if (arg2 & (sizeof(abi_ulong) - 1)) {
6759 ret = -TARGET_EINVAL;
6762 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
6764 mask = alloca(mask_size);
6765 if (!lock_user_struct(VERIFY_READ, p, arg3, 1)) {
6768 memcpy(mask, p, arg2);
6769 unlock_user_struct(p, arg2, 0);
6771 ret = get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
6774 case TARGET_NR_sched_setparam:
6776 struct sched_param *target_schp;
6777 struct sched_param schp;
6779 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
6781 schp.sched_priority = tswap32(target_schp->sched_priority);
6782 unlock_user_struct(target_schp, arg2, 0);
6783 ret = get_errno(sched_setparam(arg1, &schp));
6786 case TARGET_NR_sched_getparam:
6788 struct sched_param *target_schp;
6789 struct sched_param schp;
6790 ret = get_errno(sched_getparam(arg1, &schp));
6791 if (!is_error(ret)) {
6792 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
6794 target_schp->sched_priority = tswap32(schp.sched_priority);
6795 unlock_user_struct(target_schp, arg2, 1);
6799 case TARGET_NR_sched_setscheduler:
6801 struct sched_param *target_schp;
6802 struct sched_param schp;
6803 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
6805 schp.sched_priority = tswap32(target_schp->sched_priority);
6806 unlock_user_struct(target_schp, arg3, 0);
6807 ret = get_errno(sched_setscheduler(arg1, arg2, &schp));
6810 case TARGET_NR_sched_getscheduler:
6811 ret = get_errno(sched_getscheduler(arg1));
6813 case TARGET_NR_sched_yield:
6814 ret = get_errno(sched_yield());
6816 case TARGET_NR_sched_get_priority_max:
6817 ret = get_errno(sched_get_priority_max(arg1));
6819 case TARGET_NR_sched_get_priority_min:
6820 ret = get_errno(sched_get_priority_min(arg1));
6822 case TARGET_NR_sched_rr_get_interval:
6825 ret = get_errno(sched_rr_get_interval(arg1, &ts));
6826 if (!is_error(ret)) {
6827 host_to_target_timespec(arg2, &ts);
6831 case TARGET_NR_nanosleep:
6833 struct timespec req, rem;
6834 target_to_host_timespec(&req, arg1);
6835 ret = get_errno(nanosleep(&req, &rem));
6836 if (is_error(ret) && arg2) {
6837 host_to_target_timespec(arg2, &rem);
6841 #ifdef TARGET_NR_query_module
6842 case TARGET_NR_query_module:
6845 #ifdef TARGET_NR_nfsservctl
6846 case TARGET_NR_nfsservctl:
6849 case TARGET_NR_prctl:
6852 case PR_GET_PDEATHSIG:
6855 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
6856 if (!is_error(ret) && arg2
6857 && put_user_ual(deathsig, arg2))
6862 ret = get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
6866 #ifdef TARGET_NR_arch_prctl
6867 case TARGET_NR_arch_prctl:
6868 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
6869 ret = do_arch_prctl(cpu_env, arg1, arg2);
6875 #ifdef TARGET_NR_pread
6876 case TARGET_NR_pread:
6877 if (regpairs_aligned(cpu_env))
6879 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
6881 ret = get_errno(pread(arg1, p, arg3, arg4));
6882 unlock_user(p, arg2, ret);
6884 case TARGET_NR_pwrite:
6885 if (regpairs_aligned(cpu_env))
6887 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
6889 ret = get_errno(pwrite(arg1, p, arg3, arg4));
6890 unlock_user(p, arg2, 0);
6893 #ifdef TARGET_NR_pread64
6894 case TARGET_NR_pread64:
6895 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
6897 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
6898 unlock_user(p, arg2, ret);
6900 case TARGET_NR_pwrite64:
6901 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
6903 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
6904 unlock_user(p, arg2, 0);
6907 case TARGET_NR_getcwd:
6908 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
6910 ret = get_errno(sys_getcwd1(p, arg2));
6911 unlock_user(p, arg1, ret);
6913 case TARGET_NR_capget:
6915 case TARGET_NR_capset:
6917 case TARGET_NR_sigaltstack:
6918 #if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_MIPS) || \
6919 defined(TARGET_SPARC) || defined(TARGET_PPC) || defined(TARGET_ALPHA) || \
6920 defined(TARGET_M68K) || defined(TARGET_S390X)
6921 ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUState *)cpu_env));
6926 case TARGET_NR_sendfile:
6928 #ifdef TARGET_NR_getpmsg
6929 case TARGET_NR_getpmsg:
6932 #ifdef TARGET_NR_putpmsg
6933 case TARGET_NR_putpmsg:
6936 #ifdef TARGET_NR_vfork
6937 case TARGET_NR_vfork:
6938 ret = get_errno(do_fork(cpu_env, CLONE_VFORK | CLONE_VM | SIGCHLD,
6942 #ifdef TARGET_NR_ugetrlimit
6943 case TARGET_NR_ugetrlimit:
6946 int resource = target_to_host_resource(arg1);
6947 ret = get_errno(getrlimit(resource, &rlim));
6948 if (!is_error(ret)) {
6949 struct target_rlimit *target_rlim;
6950 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
6952 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
6953 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
6954 unlock_user_struct(target_rlim, arg2, 1);
6959 #ifdef TARGET_NR_truncate64
6960 case TARGET_NR_truncate64:
6961 if (!(p = lock_user_string(arg1)))
6963 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
6964 unlock_user(p, arg1, 0);
6967 #ifdef TARGET_NR_ftruncate64
6968 case TARGET_NR_ftruncate64:
6969 ret = target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
6972 #ifdef TARGET_NR_stat64
6973 case TARGET_NR_stat64:
6974 if (!(p = lock_user_string(arg1)))
6976 ret = get_errno(stat(path(p), &st));
6977 unlock_user(p, arg1, 0);
6979 ret = host_to_target_stat64(cpu_env, arg2, &st);
6982 #ifdef TARGET_NR_lstat64
6983 case TARGET_NR_lstat64:
6984 if (!(p = lock_user_string(arg1)))
6986 ret = get_errno(lstat(path(p), &st));
6987 unlock_user(p, arg1, 0);
6989 ret = host_to_target_stat64(cpu_env, arg2, &st);
6992 #ifdef TARGET_NR_fstat64
6993 case TARGET_NR_fstat64:
6994 ret = get_errno(fstat(arg1, &st));
6996 ret = host_to_target_stat64(cpu_env, arg2, &st);
6999 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \
7000 (defined(__NR_fstatat64) || defined(__NR_newfstatat))
7001 #ifdef TARGET_NR_fstatat64
7002 case TARGET_NR_fstatat64:
7004 #ifdef TARGET_NR_newfstatat
7005 case TARGET_NR_newfstatat:
7007 if (!(p = lock_user_string(arg2)))
7009 #ifdef __NR_fstatat64
7010 ret = get_errno(sys_fstatat64(arg1, path(p), &st, arg4));
7012 ret = get_errno(sys_newfstatat(arg1, path(p), &st, arg4));
7015 ret = host_to_target_stat64(cpu_env, arg3, &st);
7018 case TARGET_NR_lchown:
7019 if (!(p = lock_user_string(arg1)))
7021 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
7022 unlock_user(p, arg1, 0);
7024 #ifdef TARGET_NR_getuid
7025 case TARGET_NR_getuid:
7026 ret = get_errno(high2lowuid(getuid()));
7029 #ifdef TARGET_NR_getgid
7030 case TARGET_NR_getgid:
7031 ret = get_errno(high2lowgid(getgid()));
7034 #ifdef TARGET_NR_geteuid
7035 case TARGET_NR_geteuid:
7036 ret = get_errno(high2lowuid(geteuid()));
7039 #ifdef TARGET_NR_getegid
7040 case TARGET_NR_getegid:
7041 ret = get_errno(high2lowgid(getegid()));
7044 case TARGET_NR_setreuid:
7045 ret = get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
7047 case TARGET_NR_setregid:
7048 ret = get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
7050 case TARGET_NR_getgroups:
7052 int gidsetsize = arg1;
7053 target_id *target_grouplist;
7057 grouplist = alloca(gidsetsize * sizeof(gid_t));
7058 ret = get_errno(getgroups(gidsetsize, grouplist));
7059 if (gidsetsize == 0)
7061 if (!is_error(ret)) {
7062 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 2, 0);
7063 if (!target_grouplist)
7065 for(i = 0;i < ret; i++)
7066 target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
7067 unlock_user(target_grouplist, arg2, gidsetsize * 2);
7071 case TARGET_NR_setgroups:
7073 int gidsetsize = arg1;
7074 target_id *target_grouplist;
7078 grouplist = alloca(gidsetsize * sizeof(gid_t));
7079 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 2, 1);
7080 if (!target_grouplist) {
7081 ret = -TARGET_EFAULT;
7084 for(i = 0;i < gidsetsize; i++)
7085 grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
7086 unlock_user(target_grouplist, arg2, 0);
7087 ret = get_errno(setgroups(gidsetsize, grouplist));
7090 case TARGET_NR_fchown:
7091 ret = get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
7093 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat)
7094 case TARGET_NR_fchownat:
7095 if (!(p = lock_user_string(arg2)))
7097 ret = get_errno(sys_fchownat(arg1, p, low2highuid(arg3), low2highgid(arg4), arg5));
7098 unlock_user(p, arg2, 0);
7101 #ifdef TARGET_NR_setresuid
7102 case TARGET_NR_setresuid:
7103 ret = get_errno(setresuid(low2highuid(arg1),
7105 low2highuid(arg3)));
7108 #ifdef TARGET_NR_getresuid
7109 case TARGET_NR_getresuid:
7111 uid_t ruid, euid, suid;
7112 ret = get_errno(getresuid(&ruid, &euid, &suid));
7113 if (!is_error(ret)) {
7114 if (put_user_u16(high2lowuid(ruid), arg1)
7115 || put_user_u16(high2lowuid(euid), arg2)
7116 || put_user_u16(high2lowuid(suid), arg3))
7122 #ifdef TARGET_NR_getresgid
7123 case TARGET_NR_setresgid:
7124 ret = get_errno(setresgid(low2highgid(arg1),
7126 low2highgid(arg3)));
7129 #ifdef TARGET_NR_getresgid
7130 case TARGET_NR_getresgid:
7132 gid_t rgid, egid, sgid;
7133 ret = get_errno(getresgid(&rgid, &egid, &sgid));
7134 if (!is_error(ret)) {
7135 if (put_user_u16(high2lowgid(rgid), arg1)
7136 || put_user_u16(high2lowgid(egid), arg2)
7137 || put_user_u16(high2lowgid(sgid), arg3))
7143 case TARGET_NR_chown:
7144 if (!(p = lock_user_string(arg1)))
7146 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
7147 unlock_user(p, arg1, 0);
7149 case TARGET_NR_setuid:
7150 ret = get_errno(setuid(low2highuid(arg1)));
7152 case TARGET_NR_setgid:
7153 ret = get_errno(setgid(low2highgid(arg1)));
7155 case TARGET_NR_setfsuid:
7156 ret = get_errno(setfsuid(arg1));
7158 case TARGET_NR_setfsgid:
7159 ret = get_errno(setfsgid(arg1));
7162 #ifdef TARGET_NR_lchown32
7163 case TARGET_NR_lchown32:
7164 if (!(p = lock_user_string(arg1)))
7166 ret = get_errno(lchown(p, arg2, arg3));
7167 unlock_user(p, arg1, 0);
7170 #ifdef TARGET_NR_getuid32
7171 case TARGET_NR_getuid32:
7172 ret = get_errno(getuid());
7176 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
7177 /* Alpha specific */
7178 case TARGET_NR_getxuid:
7182 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
7184 ret = get_errno(getuid());
7187 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
7188 /* Alpha specific */
7189 case TARGET_NR_getxgid:
7193 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
7195 ret = get_errno(getgid());
7198 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
7199 /* Alpha specific */
7200 case TARGET_NR_osf_getsysinfo:
7201 ret = -TARGET_EOPNOTSUPP;
7203 case TARGET_GSI_IEEE_FP_CONTROL:
7205 uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env);
7207 /* Copied from linux ieee_fpcr_to_swcr. */
7208 swcr = (fpcr >> 35) & SWCR_STATUS_MASK;
7209 swcr |= (fpcr >> 36) & SWCR_MAP_DMZ;
7210 swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV
7211 | SWCR_TRAP_ENABLE_DZE
7212 | SWCR_TRAP_ENABLE_OVF);
7213 swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF
7214 | SWCR_TRAP_ENABLE_INE);
7215 swcr |= (fpcr >> 47) & SWCR_MAP_UMZ;
7216 swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO;
7218 if (put_user_u64 (swcr, arg2))
7224 /* case GSI_IEEE_STATE_AT_SIGNAL:
7225 -- Not implemented in linux kernel.
7227 -- Retrieves current unaligned access state; not much used.
7229 -- Retrieves implver information; surely not used.
7231 -- Grabs a copy of the HWRPB; surely not used.
7236 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
7237 /* Alpha specific */
7238 case TARGET_NR_osf_setsysinfo:
7239 ret = -TARGET_EOPNOTSUPP;
7241 case TARGET_SSI_IEEE_FP_CONTROL:
7242 case TARGET_SSI_IEEE_RAISE_EXCEPTION:
7244 uint64_t swcr, fpcr, orig_fpcr;
7246 if (get_user_u64 (swcr, arg2))
7248 orig_fpcr = cpu_alpha_load_fpcr (cpu_env);
7249 fpcr = orig_fpcr & FPCR_DYN_MASK;
7251 /* Copied from linux ieee_swcr_to_fpcr. */
7252 fpcr |= (swcr & SWCR_STATUS_MASK) << 35;
7253 fpcr |= (swcr & SWCR_MAP_DMZ) << 36;
7254 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV
7255 | SWCR_TRAP_ENABLE_DZE
7256 | SWCR_TRAP_ENABLE_OVF)) << 48;
7257 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF
7258 | SWCR_TRAP_ENABLE_INE)) << 57;
7259 fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0);
7260 fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41;
7262 cpu_alpha_store_fpcr (cpu_env, fpcr);
7265 if (arg1 == TARGET_SSI_IEEE_RAISE_EXCEPTION) {
7266 /* Old exceptions are not signaled. */
7267 fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK);
7269 /* If any exceptions set by this call, and are unmasked,
7276 /* case SSI_NVPAIRS:
7277 -- Used with SSIN_UACPROC to enable unaligned accesses.
7278 case SSI_IEEE_STATE_AT_SIGNAL:
7279 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
7280 -- Not implemented in linux kernel
7285 #ifdef TARGET_NR_osf_sigprocmask
7286 /* Alpha specific. */
7287 case TARGET_NR_osf_sigprocmask:
7291 sigset_t set, oldset;
7294 case TARGET_SIG_BLOCK:
7297 case TARGET_SIG_UNBLOCK:
7300 case TARGET_SIG_SETMASK:
7304 ret = -TARGET_EINVAL;
7308 target_to_host_old_sigset(&set, &mask);
7309 sigprocmask(how, &set, &oldset);
7310 host_to_target_old_sigset(&mask, &oldset);
7316 #ifdef TARGET_NR_getgid32
7317 case TARGET_NR_getgid32:
7318 ret = get_errno(getgid());
7321 #ifdef TARGET_NR_geteuid32
7322 case TARGET_NR_geteuid32:
7323 ret = get_errno(geteuid());
7326 #ifdef TARGET_NR_getegid32
7327 case TARGET_NR_getegid32:
7328 ret = get_errno(getegid());
7331 #ifdef TARGET_NR_setreuid32
7332 case TARGET_NR_setreuid32:
7333 ret = get_errno(setreuid(arg1, arg2));
7336 #ifdef TARGET_NR_setregid32
7337 case TARGET_NR_setregid32:
7338 ret = get_errno(setregid(arg1, arg2));
7341 #ifdef TARGET_NR_getgroups32
7342 case TARGET_NR_getgroups32:
7344 int gidsetsize = arg1;
7345 uint32_t *target_grouplist;
7349 grouplist = alloca(gidsetsize * sizeof(gid_t));
7350 ret = get_errno(getgroups(gidsetsize, grouplist));
7351 if (gidsetsize == 0)
7353 if (!is_error(ret)) {
7354 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
7355 if (!target_grouplist) {
7356 ret = -TARGET_EFAULT;
7359 for(i = 0;i < ret; i++)
7360 target_grouplist[i] = tswap32(grouplist[i]);
7361 unlock_user(target_grouplist, arg2, gidsetsize * 4);
7366 #ifdef TARGET_NR_setgroups32
7367 case TARGET_NR_setgroups32:
7369 int gidsetsize = arg1;
7370 uint32_t *target_grouplist;
7374 grouplist = alloca(gidsetsize * sizeof(gid_t));
7375 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
7376 if (!target_grouplist) {
7377 ret = -TARGET_EFAULT;
7380 for(i = 0;i < gidsetsize; i++)
7381 grouplist[i] = tswap32(target_grouplist[i]);
7382 unlock_user(target_grouplist, arg2, 0);
7383 ret = get_errno(setgroups(gidsetsize, grouplist));
7387 #ifdef TARGET_NR_fchown32
7388 case TARGET_NR_fchown32:
7389 ret = get_errno(fchown(arg1, arg2, arg3));
7392 #ifdef TARGET_NR_setresuid32
7393 case TARGET_NR_setresuid32:
7394 ret = get_errno(setresuid(arg1, arg2, arg3));
7397 #ifdef TARGET_NR_getresuid32
7398 case TARGET_NR_getresuid32:
7400 uid_t ruid, euid, suid;
7401 ret = get_errno(getresuid(&ruid, &euid, &suid));
7402 if (!is_error(ret)) {
7403 if (put_user_u32(ruid, arg1)
7404 || put_user_u32(euid, arg2)
7405 || put_user_u32(suid, arg3))
7411 #ifdef TARGET_NR_setresgid32
7412 case TARGET_NR_setresgid32:
7413 ret = get_errno(setresgid(arg1, arg2, arg3));
7416 #ifdef TARGET_NR_getresgid32
7417 case TARGET_NR_getresgid32:
7419 gid_t rgid, egid, sgid;
7420 ret = get_errno(getresgid(&rgid, &egid, &sgid));
7421 if (!is_error(ret)) {
7422 if (put_user_u32(rgid, arg1)
7423 || put_user_u32(egid, arg2)
7424 || put_user_u32(sgid, arg3))
7430 #ifdef TARGET_NR_chown32
7431 case TARGET_NR_chown32:
7432 if (!(p = lock_user_string(arg1)))
7434 ret = get_errno(chown(p, arg2, arg3));
7435 unlock_user(p, arg1, 0);
7438 #ifdef TARGET_NR_setuid32
7439 case TARGET_NR_setuid32:
7440 ret = get_errno(setuid(arg1));
7443 #ifdef TARGET_NR_setgid32
7444 case TARGET_NR_setgid32:
7445 ret = get_errno(setgid(arg1));
7448 #ifdef TARGET_NR_setfsuid32
7449 case TARGET_NR_setfsuid32:
7450 ret = get_errno(setfsuid(arg1));
7453 #ifdef TARGET_NR_setfsgid32
7454 case TARGET_NR_setfsgid32:
7455 ret = get_errno(setfsgid(arg1));
7459 case TARGET_NR_pivot_root:
7461 #ifdef TARGET_NR_mincore
7462 case TARGET_NR_mincore:
7465 ret = -TARGET_EFAULT;
7466 if (!(a = lock_user(VERIFY_READ, arg1,arg2, 0)))
7468 if (!(p = lock_user_string(arg3)))
7470 ret = get_errno(mincore(a, arg2, p));
7471 unlock_user(p, arg3, ret);
7473 unlock_user(a, arg1, 0);
7477 #ifdef TARGET_NR_arm_fadvise64_64
7478 case TARGET_NR_arm_fadvise64_64:
7481 * arm_fadvise64_64 looks like fadvise64_64 but
7482 * with different argument order
7490 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_arm_fadvise64_64) || defined(TARGET_NR_fadvise64)
7491 #ifdef TARGET_NR_fadvise64_64
7492 case TARGET_NR_fadvise64_64:
7494 #ifdef TARGET_NR_fadvise64
7495 case TARGET_NR_fadvise64:
7499 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
7500 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
7501 case 6: arg4 = POSIX_FADV_DONTNEED; break;
7502 case 7: arg4 = POSIX_FADV_NOREUSE; break;
7506 ret = -posix_fadvise(arg1, arg2, arg3, arg4);
7509 #ifdef TARGET_NR_madvise
7510 case TARGET_NR_madvise:
7511 /* A straight passthrough may not be safe because qemu sometimes
7512 turns private flie-backed mappings into anonymous mappings.
7513 This will break MADV_DONTNEED.
7514 This is a hint, so ignoring and returning success is ok. */
7518 #if TARGET_ABI_BITS == 32
7519 case TARGET_NR_fcntl64:
7523 struct target_flock64 *target_fl;
7525 struct target_eabi_flock64 *target_efl;
7528 cmd = target_to_host_fcntl_cmd(arg2);
7529 if (cmd == -TARGET_EINVAL)
7533 case TARGET_F_GETLK64:
7535 if (((CPUARMState *)cpu_env)->eabi) {
7536 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
7538 fl.l_type = tswap16(target_efl->l_type);
7539 fl.l_whence = tswap16(target_efl->l_whence);
7540 fl.l_start = tswap64(target_efl->l_start);
7541 fl.l_len = tswap64(target_efl->l_len);
7542 fl.l_pid = tswap32(target_efl->l_pid);
7543 unlock_user_struct(target_efl, arg3, 0);
7547 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
7549 fl.l_type = tswap16(target_fl->l_type);
7550 fl.l_whence = tswap16(target_fl->l_whence);
7551 fl.l_start = tswap64(target_fl->l_start);
7552 fl.l_len = tswap64(target_fl->l_len);
7553 fl.l_pid = tswap32(target_fl->l_pid);
7554 unlock_user_struct(target_fl, arg3, 0);
7556 ret = get_errno(fcntl(arg1, cmd, &fl));
7559 if (((CPUARMState *)cpu_env)->eabi) {
7560 if (!lock_user_struct(VERIFY_WRITE, target_efl, arg3, 0))
7562 target_efl->l_type = tswap16(fl.l_type);
7563 target_efl->l_whence = tswap16(fl.l_whence);
7564 target_efl->l_start = tswap64(fl.l_start);
7565 target_efl->l_len = tswap64(fl.l_len);
7566 target_efl->l_pid = tswap32(fl.l_pid);
7567 unlock_user_struct(target_efl, arg3, 1);
7571 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg3, 0))
7573 target_fl->l_type = tswap16(fl.l_type);
7574 target_fl->l_whence = tswap16(fl.l_whence);
7575 target_fl->l_start = tswap64(fl.l_start);
7576 target_fl->l_len = tswap64(fl.l_len);
7577 target_fl->l_pid = tswap32(fl.l_pid);
7578 unlock_user_struct(target_fl, arg3, 1);
7583 case TARGET_F_SETLK64:
7584 case TARGET_F_SETLKW64:
7586 if (((CPUARMState *)cpu_env)->eabi) {
7587 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
7589 fl.l_type = tswap16(target_efl->l_type);
7590 fl.l_whence = tswap16(target_efl->l_whence);
7591 fl.l_start = tswap64(target_efl->l_start);
7592 fl.l_len = tswap64(target_efl->l_len);
7593 fl.l_pid = tswap32(target_efl->l_pid);
7594 unlock_user_struct(target_efl, arg3, 0);
7598 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
7600 fl.l_type = tswap16(target_fl->l_type);
7601 fl.l_whence = tswap16(target_fl->l_whence);
7602 fl.l_start = tswap64(target_fl->l_start);
7603 fl.l_len = tswap64(target_fl->l_len);
7604 fl.l_pid = tswap32(target_fl->l_pid);
7605 unlock_user_struct(target_fl, arg3, 0);
7607 ret = get_errno(fcntl(arg1, cmd, &fl));
7610 ret = do_fcntl(arg1, arg2, arg3);
7616 #ifdef TARGET_NR_cacheflush
7617 case TARGET_NR_cacheflush:
7618 /* self-modifying code is handled automatically, so nothing needed */
7622 #ifdef TARGET_NR_security
7623 case TARGET_NR_security:
7626 #ifdef TARGET_NR_getpagesize
7627 case TARGET_NR_getpagesize:
7628 ret = TARGET_PAGE_SIZE;
7631 case TARGET_NR_gettid:
7632 ret = get_errno(gettid());
7634 #ifdef TARGET_NR_readahead
7635 case TARGET_NR_readahead:
7636 #if TARGET_ABI_BITS == 32
7637 if (regpairs_aligned(cpu_env)) {
7642 ret = get_errno(readahead(arg1, ((off64_t)arg3 << 32) | arg2, arg4));
7644 ret = get_errno(readahead(arg1, arg2, arg3));
7649 #ifdef TARGET_NR_setxattr
7650 case TARGET_NR_lsetxattr:
7651 case TARGET_NR_fsetxattr:
7652 case TARGET_NR_lgetxattr:
7653 case TARGET_NR_fgetxattr:
7654 case TARGET_NR_listxattr:
7655 case TARGET_NR_llistxattr:
7656 case TARGET_NR_flistxattr:
7657 case TARGET_NR_lremovexattr:
7658 case TARGET_NR_fremovexattr:
7659 ret = -TARGET_EOPNOTSUPP;
7661 case TARGET_NR_setxattr:
7664 p = lock_user_string(arg1);
7665 n = lock_user_string(arg2);
7666 v = lock_user(VERIFY_READ, arg3, arg4, 1);
7668 ret = get_errno(setxattr(p, n, v, arg4, arg5));
7670 ret = -TARGET_EFAULT;
7672 unlock_user(p, arg1, 0);
7673 unlock_user(n, arg2, 0);
7674 unlock_user(v, arg3, 0);
7677 case TARGET_NR_getxattr:
7680 p = lock_user_string(arg1);
7681 n = lock_user_string(arg2);
7682 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
7684 ret = get_errno(getxattr(p, n, v, arg4));
7686 ret = -TARGET_EFAULT;
7688 unlock_user(p, arg1, 0);
7689 unlock_user(n, arg2, 0);
7690 unlock_user(v, arg3, arg4);
7693 case TARGET_NR_removexattr:
7696 p = lock_user_string(arg1);
7697 n = lock_user_string(arg2);
7699 ret = get_errno(removexattr(p, n));
7701 ret = -TARGET_EFAULT;
7703 unlock_user(p, arg1, 0);
7704 unlock_user(n, arg2, 0);
7708 #endif /* CONFIG_ATTR */
7709 #ifdef TARGET_NR_set_thread_area
7710 case TARGET_NR_set_thread_area:
7711 #if defined(TARGET_MIPS)
7712 ((CPUMIPSState *) cpu_env)->tls_value = arg1;
7715 #elif defined(TARGET_CRIS)
7717 ret = -TARGET_EINVAL;
7719 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
7723 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
7724 ret = do_set_thread_area(cpu_env, arg1);
7727 goto unimplemented_nowarn;
7730 #ifdef TARGET_NR_get_thread_area
7731 case TARGET_NR_get_thread_area:
7732 #if defined(TARGET_I386) && defined(TARGET_ABI32)
7733 ret = do_get_thread_area(cpu_env, arg1);
7735 goto unimplemented_nowarn;
7738 #ifdef TARGET_NR_getdomainname
7739 case TARGET_NR_getdomainname:
7740 goto unimplemented_nowarn;
7743 #ifdef TARGET_NR_clock_gettime
7744 case TARGET_NR_clock_gettime:
7747 ret = get_errno(clock_gettime(arg1, &ts));
7748 if (!is_error(ret)) {
7749 host_to_target_timespec(arg2, &ts);
7754 #ifdef TARGET_NR_clock_getres
7755 case TARGET_NR_clock_getres:
7758 ret = get_errno(clock_getres(arg1, &ts));
7759 if (!is_error(ret)) {
7760 host_to_target_timespec(arg2, &ts);
7765 #ifdef TARGET_NR_clock_nanosleep
7766 case TARGET_NR_clock_nanosleep:
7769 target_to_host_timespec(&ts, arg3);
7770 ret = get_errno(clock_nanosleep(arg1, arg2, &ts, arg4 ? &ts : NULL));
7772 host_to_target_timespec(arg4, &ts);
7777 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
7778 case TARGET_NR_set_tid_address:
7779 ret = get_errno(set_tid_address((int *)g2h(arg1)));
7783 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
7784 case TARGET_NR_tkill:
7785 ret = get_errno(sys_tkill((int)arg1, target_to_host_signal(arg2)));
7789 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
7790 case TARGET_NR_tgkill:
7791 ret = get_errno(sys_tgkill((int)arg1, (int)arg2,
7792 target_to_host_signal(arg3)));
7796 #ifdef TARGET_NR_set_robust_list
7797 case TARGET_NR_set_robust_list:
7798 goto unimplemented_nowarn;
7801 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat)
7802 case TARGET_NR_utimensat:
7804 struct timespec *tsp, ts[2];
7808 target_to_host_timespec(ts, arg3);
7809 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
7813 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
7815 if (!(p = lock_user_string(arg2))) {
7816 ret = -TARGET_EFAULT;
7819 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
7820 unlock_user(p, arg2, 0);
7825 #if defined(CONFIG_USE_NPTL)
7826 case TARGET_NR_futex:
7827 ret = do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
7830 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
7831 case TARGET_NR_inotify_init:
7832 ret = get_errno(sys_inotify_init());
7835 #ifdef CONFIG_INOTIFY1
7836 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
7837 case TARGET_NR_inotify_init1:
7838 ret = get_errno(sys_inotify_init1(arg1));
7842 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
7843 case TARGET_NR_inotify_add_watch:
7844 p = lock_user_string(arg2);
7845 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
7846 unlock_user(p, arg2, 0);
7849 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
7850 case TARGET_NR_inotify_rm_watch:
7851 ret = get_errno(sys_inotify_rm_watch(arg1, arg2));
7855 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
7856 case TARGET_NR_mq_open:
7858 struct mq_attr posix_mq_attr;
7860 p = lock_user_string(arg1 - 1);
7862 copy_from_user_mq_attr (&posix_mq_attr, arg4);
7863 ret = get_errno(mq_open(p, arg2, arg3, &posix_mq_attr));
7864 unlock_user (p, arg1, 0);
7868 case TARGET_NR_mq_unlink:
7869 p = lock_user_string(arg1 - 1);
7870 ret = get_errno(mq_unlink(p));
7871 unlock_user (p, arg1, 0);
7874 case TARGET_NR_mq_timedsend:
7878 p = lock_user (VERIFY_READ, arg2, arg3, 1);
7880 target_to_host_timespec(&ts, arg5);
7881 ret = get_errno(mq_timedsend(arg1, p, arg3, arg4, &ts));
7882 host_to_target_timespec(arg5, &ts);
7885 ret = get_errno(mq_send(arg1, p, arg3, arg4));
7886 unlock_user (p, arg2, arg3);
7890 case TARGET_NR_mq_timedreceive:
7895 p = lock_user (VERIFY_READ, arg2, arg3, 1);
7897 target_to_host_timespec(&ts, arg5);
7898 ret = get_errno(mq_timedreceive(arg1, p, arg3, &prio, &ts));
7899 host_to_target_timespec(arg5, &ts);
7902 ret = get_errno(mq_receive(arg1, p, arg3, &prio));
7903 unlock_user (p, arg2, arg3);
7905 put_user_u32(prio, arg4);
7909 /* Not implemented for now... */
7910 /* case TARGET_NR_mq_notify: */
7913 case TARGET_NR_mq_getsetattr:
7915 struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
7918 ret = mq_getattr(arg1, &posix_mq_attr_out);
7919 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
7922 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
7923 ret |= mq_setattr(arg1, &posix_mq_attr_in, &posix_mq_attr_out);
7930 #ifdef CONFIG_SPLICE
7931 #ifdef TARGET_NR_tee
7934 ret = get_errno(tee(arg1,arg2,arg3,arg4));
7938 #ifdef TARGET_NR_splice
7939 case TARGET_NR_splice:
7941 loff_t loff_in, loff_out;
7942 loff_t *ploff_in = NULL, *ploff_out = NULL;
7944 get_user_u64(loff_in, arg2);
7945 ploff_in = &loff_in;
7948 get_user_u64(loff_out, arg2);
7949 ploff_out = &loff_out;
7951 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
7955 #ifdef TARGET_NR_vmsplice
7956 case TARGET_NR_vmsplice:
7961 vec = alloca(count * sizeof(struct iovec));
7962 if (lock_iovec(VERIFY_READ, vec, arg2, count, 1) < 0)
7964 ret = get_errno(vmsplice(arg1, vec, count, arg4));
7965 unlock_iovec(vec, arg2, count, 0);
7969 #endif /* CONFIG_SPLICE */
7970 #ifdef CONFIG_EVENTFD
7971 #if defined(TARGET_NR_eventfd)
7972 case TARGET_NR_eventfd:
7973 ret = get_errno(eventfd(arg1, 0));
7976 #if defined(TARGET_NR_eventfd2)
7977 case TARGET_NR_eventfd2:
7978 ret = get_errno(eventfd(arg1, arg2));
7981 #endif /* CONFIG_EVENTFD */
7982 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
7983 case TARGET_NR_fallocate:
7984 ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
7987 #if defined(CONFIG_SYNC_FILE_RANGE)
7988 #if defined(TARGET_NR_sync_file_range)
7989 case TARGET_NR_sync_file_range:
7990 #if TARGET_ABI_BITS == 32
7991 #if defined(TARGET_MIPS)
7992 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
7993 target_offset64(arg5, arg6), arg7));
7995 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
7996 target_offset64(arg4, arg5), arg6));
7997 #endif /* !TARGET_MIPS */
7999 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
8003 #if defined(TARGET_NR_sync_file_range2)
8004 case TARGET_NR_sync_file_range2:
8005 /* This is like sync_file_range but the arguments are reordered */
8006 #if TARGET_ABI_BITS == 32
8007 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
8008 target_offset64(arg5, arg6), arg2));
8010 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
8015 #if defined(CONFIG_EPOLL)
8016 #if defined(TARGET_NR_epoll_create)
8017 case TARGET_NR_epoll_create:
8018 ret = get_errno(epoll_create(arg1));
8021 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
8022 case TARGET_NR_epoll_create1:
8023 ret = get_errno(epoll_create1(arg1));
8026 #if defined(TARGET_NR_epoll_ctl)
8027 case TARGET_NR_epoll_ctl:
8029 struct epoll_event ep;
8030 struct epoll_event *epp = 0;
8032 struct target_epoll_event *target_ep;
8033 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
8036 ep.events = tswap32(target_ep->events);
8037 /* The epoll_data_t union is just opaque data to the kernel,
8038 * so we transfer all 64 bits across and need not worry what
8039 * actual data type it is.
8041 ep.data.u64 = tswap64(target_ep->data.u64);
8042 unlock_user_struct(target_ep, arg4, 0);
8045 ret = get_errno(epoll_ctl(arg1, arg2, arg3, epp));
8050 #if defined(TARGET_NR_epoll_pwait) && defined(CONFIG_EPOLL_PWAIT)
8051 #define IMPLEMENT_EPOLL_PWAIT
8053 #if defined(TARGET_NR_epoll_wait) || defined(IMPLEMENT_EPOLL_PWAIT)
8054 #if defined(TARGET_NR_epoll_wait)
8055 case TARGET_NR_epoll_wait:
8057 #if defined(IMPLEMENT_EPOLL_PWAIT)
8058 case TARGET_NR_epoll_pwait:
8061 struct target_epoll_event *target_ep;
8062 struct epoll_event *ep;
8064 int maxevents = arg3;
8067 target_ep = lock_user(VERIFY_WRITE, arg2,
8068 maxevents * sizeof(struct target_epoll_event), 1);
8073 ep = alloca(maxevents * sizeof(struct epoll_event));
8076 #if defined(IMPLEMENT_EPOLL_PWAIT)
8077 case TARGET_NR_epoll_pwait:
8079 target_sigset_t *target_set;
8080 sigset_t _set, *set = &_set;
8083 target_set = lock_user(VERIFY_READ, arg5,
8084 sizeof(target_sigset_t), 1);
8086 unlock_user(target_ep, arg2, 0);
8089 target_to_host_sigset(set, target_set);
8090 unlock_user(target_set, arg5, 0);
8095 ret = get_errno(epoll_pwait(epfd, ep, maxevents, timeout, set));
8099 #if defined(TARGET_NR_epoll_wait)
8100 case TARGET_NR_epoll_wait:
8101 ret = get_errno(epoll_wait(epfd, ep, maxevents, timeout));
8105 ret = -TARGET_ENOSYS;
8107 if (!is_error(ret)) {
8109 for (i = 0; i < ret; i++) {
8110 target_ep[i].events = tswap32(ep[i].events);
8111 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
8114 unlock_user(target_ep, arg2, ret * sizeof(struct target_epoll_event));
8119 #ifdef TARGET_NR_prlimit64
8120 case TARGET_NR_prlimit64:
8122 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
8123 struct target_rlimit64 *target_rnew, *target_rold;
8124 struct host_rlimit64 rnew, rold, *rnewp = 0;
8126 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
8129 rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
8130 rnew.rlim_max = tswap64(target_rnew->rlim_max);
8131 unlock_user_struct(target_rnew, arg3, 0);
8135 ret = get_errno(sys_prlimit64(arg1, arg2, rnewp, arg4 ? &rold : 0));
8136 if (!is_error(ret) && arg4) {
8137 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
8140 target_rold->rlim_cur = tswap64(rold.rlim_cur);
8141 target_rold->rlim_max = tswap64(rold.rlim_max);
8142 unlock_user_struct(target_rold, arg4, 1);
8149 gemu_log("qemu: Unsupported syscall: %d\n", num);
8150 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
8151 unimplemented_nowarn:
8153 ret = -TARGET_ENOSYS;
8158 gemu_log(" = " TARGET_ABI_FMT_ld "\n", ret);
8161 print_syscall_ret(num, ret);
8164 ret = -TARGET_EFAULT;