4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
32 #include <sys/types.h>
38 #include <sys/mount.h>
40 #include <sys/fsuid.h>
41 #include <sys/personality.h>
42 #include <sys/prctl.h>
43 #include <sys/resource.h>
49 int __clone2(int (*fn)(void *), void *child_stack_base,
50 size_t stack_size, int flags, void *arg, ...);
52 #include <sys/socket.h>
56 #include <sys/times.h>
59 #include <sys/statfs.h>
61 #include <sys/sysinfo.h>
62 #include <sys/utsname.h>
63 //#include <sys/user.h>
64 #include <netinet/ip.h>
65 #include <netinet/tcp.h>
66 #include <linux/wireless.h>
67 #include <linux/icmp.h>
68 #include "qemu-common.h"
73 #include <sys/eventfd.h>
76 #include <sys/epoll.h>
79 #include "qemu/xattr.h"
82 #define termios host_termios
83 #define winsize host_winsize
84 #define termio host_termio
85 #define sgttyb host_sgttyb /* same as target */
86 #define tchars host_tchars /* same as target */
87 #define ltchars host_ltchars /* same as target */
89 #include <linux/termios.h>
90 #include <linux/unistd.h>
91 #include <linux/utsname.h>
92 #include <linux/cdrom.h>
93 #include <linux/hdreg.h>
94 #include <linux/soundcard.h>
96 #include <linux/mtio.h>
98 #if defined(CONFIG_FIEMAP)
99 #include <linux/fiemap.h>
101 #include <linux/fb.h>
102 #include <linux/vt.h>
103 #include <linux/dm-ioctl.h>
104 #include "linux_loop.h"
105 #include "cpu-uname.h"
109 #if defined(CONFIG_USE_NPTL)
110 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
111 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
113 /* XXX: Hardcode the above values. */
114 #define CLONE_NPTL_FLAGS2 0
119 //#include <linux/msdos_fs.h>
120 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
121 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
132 #define _syscall0(type,name) \
133 static type name (void) \
135 return syscall(__NR_##name); \
138 #define _syscall1(type,name,type1,arg1) \
139 static type name (type1 arg1) \
141 return syscall(__NR_##name, arg1); \
144 #define _syscall2(type,name,type1,arg1,type2,arg2) \
145 static type name (type1 arg1,type2 arg2) \
147 return syscall(__NR_##name, arg1, arg2); \
150 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
151 static type name (type1 arg1,type2 arg2,type3 arg3) \
153 return syscall(__NR_##name, arg1, arg2, arg3); \
156 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
157 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
159 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
162 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
164 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
166 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
170 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
171 type5,arg5,type6,arg6) \
172 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
175 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
179 #define __NR_sys_uname __NR_uname
180 #define __NR_sys_faccessat __NR_faccessat
181 #define __NR_sys_fchmodat __NR_fchmodat
182 #define __NR_sys_fchownat __NR_fchownat
183 #define __NR_sys_fstatat64 __NR_fstatat64
184 #define __NR_sys_futimesat __NR_futimesat
185 #define __NR_sys_getcwd1 __NR_getcwd
186 #define __NR_sys_getdents __NR_getdents
187 #define __NR_sys_getdents64 __NR_getdents64
188 #define __NR_sys_getpriority __NR_getpriority
189 #define __NR_sys_linkat __NR_linkat
190 #define __NR_sys_mkdirat __NR_mkdirat
191 #define __NR_sys_mknodat __NR_mknodat
192 #define __NR_sys_newfstatat __NR_newfstatat
193 #define __NR_sys_openat __NR_openat
194 #define __NR_sys_readlinkat __NR_readlinkat
195 #define __NR_sys_renameat __NR_renameat
196 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
197 #define __NR_sys_symlinkat __NR_symlinkat
198 #define __NR_sys_syslog __NR_syslog
199 #define __NR_sys_tgkill __NR_tgkill
200 #define __NR_sys_tkill __NR_tkill
201 #define __NR_sys_unlinkat __NR_unlinkat
202 #define __NR_sys_utimensat __NR_utimensat
203 #define __NR_sys_futex __NR_futex
204 #define __NR_sys_inotify_init __NR_inotify_init
205 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
206 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
208 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \
210 #define __NR__llseek __NR_lseek
214 _syscall0(int, gettid)
216 /* This is a replacement for the host gettid() and must return a host
218 static int gettid(void) {
222 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
223 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
224 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
226 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
227 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
228 loff_t *, res, uint, wh);
230 _syscall3(int,sys_rt_sigqueueinfo,int,pid,int,sig,siginfo_t *,uinfo)
231 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
232 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
233 _syscall3(int,sys_tgkill,int,tgid,int,pid,int,sig)
235 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
236 _syscall2(int,sys_tkill,int,tid,int,sig)
238 #ifdef __NR_exit_group
239 _syscall1(int,exit_group,int,error_code)
241 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
242 _syscall1(int,set_tid_address,int *,tidptr)
244 #if defined(CONFIG_USE_NPTL)
245 #if defined(TARGET_NR_futex) && defined(__NR_futex)
246 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
247 const struct timespec *,timeout,int *,uaddr2,int,val3)
250 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
251 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
252 unsigned long *, user_mask_ptr);
253 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
254 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
255 unsigned long *, user_mask_ptr);
256 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
259 static bitmask_transtbl fcntl_flags_tbl[] = {
260 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
261 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, },
262 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, },
263 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, },
264 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, },
265 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, },
266 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, },
267 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, },
268 { TARGET_O_SYNC, TARGET_O_DSYNC, O_SYNC, O_DSYNC, },
269 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, },
270 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, },
271 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
272 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, },
273 #if defined(O_DIRECT)
274 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, },
276 #if defined(O_NOATIME)
277 { TARGET_O_NOATIME, TARGET_O_NOATIME, O_NOATIME, O_NOATIME },
279 #if defined(O_CLOEXEC)
280 { TARGET_O_CLOEXEC, TARGET_O_CLOEXEC, O_CLOEXEC, O_CLOEXEC },
283 { TARGET_O_PATH, TARGET_O_PATH, O_PATH, O_PATH },
285 /* Don't terminate the list prematurely on 64-bit host+guest. */
286 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
287 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
292 #define COPY_UTSNAME_FIELD(dest, src) \
294 /* __NEW_UTS_LEN doesn't include terminating null */ \
295 (void) strncpy((dest), (src), __NEW_UTS_LEN); \
296 (dest)[__NEW_UTS_LEN] = '\0'; \
299 static int sys_uname(struct new_utsname *buf)
301 struct utsname uts_buf;
303 if (uname(&uts_buf) < 0)
307 * Just in case these have some differences, we
308 * translate utsname to new_utsname (which is the
309 * struct linux kernel uses).
312 memset(buf, 0, sizeof(*buf));
313 COPY_UTSNAME_FIELD(buf->sysname, uts_buf.sysname);
314 COPY_UTSNAME_FIELD(buf->nodename, uts_buf.nodename);
315 COPY_UTSNAME_FIELD(buf->release, uts_buf.release);
316 COPY_UTSNAME_FIELD(buf->version, uts_buf.version);
317 COPY_UTSNAME_FIELD(buf->machine, uts_buf.machine);
319 COPY_UTSNAME_FIELD(buf->domainname, uts_buf.domainname);
323 #undef COPY_UTSNAME_FIELD
326 static int sys_getcwd1(char *buf, size_t size)
328 if (getcwd(buf, size) == NULL) {
329 /* getcwd() sets errno */
332 return strlen(buf)+1;
337 * Host system seems to have atfile syscall stubs available. We
338 * now enable them one by one as specified by target syscall_nr.h.
341 #ifdef TARGET_NR_faccessat
342 static int sys_faccessat(int dirfd, const char *pathname, int mode)
344 return (faccessat(dirfd, pathname, mode, 0));
347 #ifdef TARGET_NR_fchmodat
348 static int sys_fchmodat(int dirfd, const char *pathname, mode_t mode)
350 return (fchmodat(dirfd, pathname, mode, 0));
353 #if defined(TARGET_NR_fchownat)
354 static int sys_fchownat(int dirfd, const char *pathname, uid_t owner,
355 gid_t group, int flags)
357 return (fchownat(dirfd, pathname, owner, group, flags));
360 #ifdef __NR_fstatat64
361 static int sys_fstatat64(int dirfd, const char *pathname, struct stat *buf,
364 return (fstatat(dirfd, pathname, buf, flags));
367 #ifdef __NR_newfstatat
368 static int sys_newfstatat(int dirfd, const char *pathname, struct stat *buf,
371 return (fstatat(dirfd, pathname, buf, flags));
374 #ifdef TARGET_NR_futimesat
375 static int sys_futimesat(int dirfd, const char *pathname,
376 const struct timeval times[2])
378 return (futimesat(dirfd, pathname, times));
381 #ifdef TARGET_NR_linkat
382 static int sys_linkat(int olddirfd, const char *oldpath,
383 int newdirfd, const char *newpath, int flags)
385 return (linkat(olddirfd, oldpath, newdirfd, newpath, flags));
388 #ifdef TARGET_NR_mkdirat
389 static int sys_mkdirat(int dirfd, const char *pathname, mode_t mode)
391 return (mkdirat(dirfd, pathname, mode));
394 #ifdef TARGET_NR_mknodat
395 static int sys_mknodat(int dirfd, const char *pathname, mode_t mode,
398 return (mknodat(dirfd, pathname, mode, dev));
401 #ifdef TARGET_NR_openat
402 static int sys_openat(int dirfd, const char *pathname, int flags, mode_t mode)
405 * open(2) has extra parameter 'mode' when called with
408 if ((flags & O_CREAT) != 0) {
409 return (openat(dirfd, pathname, flags, mode));
411 return (openat(dirfd, pathname, flags));
414 #ifdef TARGET_NR_readlinkat
415 static int sys_readlinkat(int dirfd, const char *pathname, char *buf, size_t bufsiz)
417 return (readlinkat(dirfd, pathname, buf, bufsiz));
420 #ifdef TARGET_NR_renameat
421 static int sys_renameat(int olddirfd, const char *oldpath,
422 int newdirfd, const char *newpath)
424 return (renameat(olddirfd, oldpath, newdirfd, newpath));
427 #ifdef TARGET_NR_symlinkat
428 static int sys_symlinkat(const char *oldpath, int newdirfd, const char *newpath)
430 return (symlinkat(oldpath, newdirfd, newpath));
433 #ifdef TARGET_NR_unlinkat
434 static int sys_unlinkat(int dirfd, const char *pathname, int flags)
436 return (unlinkat(dirfd, pathname, flags));
439 #else /* !CONFIG_ATFILE */
442 * Try direct syscalls instead
444 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
445 _syscall3(int,sys_faccessat,int,dirfd,const char *,pathname,int,mode)
447 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat)
448 _syscall3(int,sys_fchmodat,int,dirfd,const char *,pathname, mode_t,mode)
450 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat)
451 _syscall5(int,sys_fchownat,int,dirfd,const char *,pathname,
452 uid_t,owner,gid_t,group,int,flags)
454 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \
455 defined(__NR_fstatat64)
456 _syscall4(int,sys_fstatat64,int,dirfd,const char *,pathname,
457 struct stat *,buf,int,flags)
459 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat)
460 _syscall3(int,sys_futimesat,int,dirfd,const char *,pathname,
461 const struct timeval *,times)
463 #if (defined(TARGET_NR_newfstatat) || defined(TARGET_NR_fstatat64) ) && \
464 defined(__NR_newfstatat)
465 _syscall4(int,sys_newfstatat,int,dirfd,const char *,pathname,
466 struct stat *,buf,int,flags)
468 #if defined(TARGET_NR_linkat) && defined(__NR_linkat)
469 _syscall5(int,sys_linkat,int,olddirfd,const char *,oldpath,
470 int,newdirfd,const char *,newpath,int,flags)
472 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat)
473 _syscall3(int,sys_mkdirat,int,dirfd,const char *,pathname,mode_t,mode)
475 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat)
476 _syscall4(int,sys_mknodat,int,dirfd,const char *,pathname,
477 mode_t,mode,dev_t,dev)
479 #if defined(TARGET_NR_openat) && defined(__NR_openat)
480 _syscall4(int,sys_openat,int,dirfd,const char *,pathname,int,flags,mode_t,mode)
482 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat)
483 _syscall4(int,sys_readlinkat,int,dirfd,const char *,pathname,
484 char *,buf,size_t,bufsize)
486 #if defined(TARGET_NR_renameat) && defined(__NR_renameat)
487 _syscall4(int,sys_renameat,int,olddirfd,const char *,oldpath,
488 int,newdirfd,const char *,newpath)
490 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat)
491 _syscall3(int,sys_symlinkat,const char *,oldpath,
492 int,newdirfd,const char *,newpath)
494 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat)
495 _syscall3(int,sys_unlinkat,int,dirfd,const char *,pathname,int,flags)
498 #endif /* CONFIG_ATFILE */
500 #ifdef CONFIG_UTIMENSAT
501 static int sys_utimensat(int dirfd, const char *pathname,
502 const struct timespec times[2], int flags)
504 if (pathname == NULL)
505 return futimens(dirfd, times);
507 return utimensat(dirfd, pathname, times, flags);
510 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat)
511 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
512 const struct timespec *,tsp,int,flags)
514 #endif /* CONFIG_UTIMENSAT */
516 #ifdef CONFIG_INOTIFY
517 #include <sys/inotify.h>
519 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
520 static int sys_inotify_init(void)
522 return (inotify_init());
525 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
526 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
528 return (inotify_add_watch(fd, pathname, mask));
531 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
532 static int sys_inotify_rm_watch(int fd, int32_t wd)
534 return (inotify_rm_watch(fd, wd));
537 #ifdef CONFIG_INOTIFY1
538 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
539 static int sys_inotify_init1(int flags)
541 return (inotify_init1(flags));
546 /* Userspace can usually survive runtime without inotify */
547 #undef TARGET_NR_inotify_init
548 #undef TARGET_NR_inotify_init1
549 #undef TARGET_NR_inotify_add_watch
550 #undef TARGET_NR_inotify_rm_watch
551 #endif /* CONFIG_INOTIFY */
553 #if defined(TARGET_NR_ppoll)
555 # define __NR_ppoll -1
557 #define __NR_sys_ppoll __NR_ppoll
558 _syscall5(int, sys_ppoll, struct pollfd *, fds, nfds_t, nfds,
559 struct timespec *, timeout, const __sigset_t *, sigmask,
563 #if defined(TARGET_NR_pselect6)
564 #ifndef __NR_pselect6
565 # define __NR_pselect6 -1
567 #define __NR_sys_pselect6 __NR_pselect6
568 _syscall6(int, sys_pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds,
569 fd_set *, exceptfds, struct timespec *, timeout, void *, sig);
572 #if defined(TARGET_NR_prlimit64)
573 #ifndef __NR_prlimit64
574 # define __NR_prlimit64 -1
576 #define __NR_sys_prlimit64 __NR_prlimit64
577 /* The glibc rlimit structure may not be that used by the underlying syscall */
578 struct host_rlimit64 {
582 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
583 const struct host_rlimit64 *, new_limit,
584 struct host_rlimit64 *, old_limit)
587 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
589 static inline int regpairs_aligned(void *cpu_env) {
590 return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
592 #elif defined(TARGET_MIPS)
593 static inline int regpairs_aligned(void *cpu_env) { return 1; }
594 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
595 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
596 * of registers which translates to the same as ARM/MIPS, because we start with
598 static inline int regpairs_aligned(void *cpu_env) { return 1; }
600 static inline int regpairs_aligned(void *cpu_env) { return 0; }
603 #define ERRNO_TABLE_SIZE 1200
605 /* target_to_host_errno_table[] is initialized from
606 * host_to_target_errno_table[] in syscall_init(). */
607 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
611 * This list is the union of errno values overridden in asm-<arch>/errno.h
612 * minus the errnos that are not actually generic to all archs.
614 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
615 [EIDRM] = TARGET_EIDRM,
616 [ECHRNG] = TARGET_ECHRNG,
617 [EL2NSYNC] = TARGET_EL2NSYNC,
618 [EL3HLT] = TARGET_EL3HLT,
619 [EL3RST] = TARGET_EL3RST,
620 [ELNRNG] = TARGET_ELNRNG,
621 [EUNATCH] = TARGET_EUNATCH,
622 [ENOCSI] = TARGET_ENOCSI,
623 [EL2HLT] = TARGET_EL2HLT,
624 [EDEADLK] = TARGET_EDEADLK,
625 [ENOLCK] = TARGET_ENOLCK,
626 [EBADE] = TARGET_EBADE,
627 [EBADR] = TARGET_EBADR,
628 [EXFULL] = TARGET_EXFULL,
629 [ENOANO] = TARGET_ENOANO,
630 [EBADRQC] = TARGET_EBADRQC,
631 [EBADSLT] = TARGET_EBADSLT,
632 [EBFONT] = TARGET_EBFONT,
633 [ENOSTR] = TARGET_ENOSTR,
634 [ENODATA] = TARGET_ENODATA,
635 [ETIME] = TARGET_ETIME,
636 [ENOSR] = TARGET_ENOSR,
637 [ENONET] = TARGET_ENONET,
638 [ENOPKG] = TARGET_ENOPKG,
639 [EREMOTE] = TARGET_EREMOTE,
640 [ENOLINK] = TARGET_ENOLINK,
641 [EADV] = TARGET_EADV,
642 [ESRMNT] = TARGET_ESRMNT,
643 [ECOMM] = TARGET_ECOMM,
644 [EPROTO] = TARGET_EPROTO,
645 [EDOTDOT] = TARGET_EDOTDOT,
646 [EMULTIHOP] = TARGET_EMULTIHOP,
647 [EBADMSG] = TARGET_EBADMSG,
648 [ENAMETOOLONG] = TARGET_ENAMETOOLONG,
649 [EOVERFLOW] = TARGET_EOVERFLOW,
650 [ENOTUNIQ] = TARGET_ENOTUNIQ,
651 [EBADFD] = TARGET_EBADFD,
652 [EREMCHG] = TARGET_EREMCHG,
653 [ELIBACC] = TARGET_ELIBACC,
654 [ELIBBAD] = TARGET_ELIBBAD,
655 [ELIBSCN] = TARGET_ELIBSCN,
656 [ELIBMAX] = TARGET_ELIBMAX,
657 [ELIBEXEC] = TARGET_ELIBEXEC,
658 [EILSEQ] = TARGET_EILSEQ,
659 [ENOSYS] = TARGET_ENOSYS,
660 [ELOOP] = TARGET_ELOOP,
661 [ERESTART] = TARGET_ERESTART,
662 [ESTRPIPE] = TARGET_ESTRPIPE,
663 [ENOTEMPTY] = TARGET_ENOTEMPTY,
664 [EUSERS] = TARGET_EUSERS,
665 [ENOTSOCK] = TARGET_ENOTSOCK,
666 [EDESTADDRREQ] = TARGET_EDESTADDRREQ,
667 [EMSGSIZE] = TARGET_EMSGSIZE,
668 [EPROTOTYPE] = TARGET_EPROTOTYPE,
669 [ENOPROTOOPT] = TARGET_ENOPROTOOPT,
670 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT,
671 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT,
672 [EOPNOTSUPP] = TARGET_EOPNOTSUPP,
673 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT,
674 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT,
675 [EADDRINUSE] = TARGET_EADDRINUSE,
676 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL,
677 [ENETDOWN] = TARGET_ENETDOWN,
678 [ENETUNREACH] = TARGET_ENETUNREACH,
679 [ENETRESET] = TARGET_ENETRESET,
680 [ECONNABORTED] = TARGET_ECONNABORTED,
681 [ECONNRESET] = TARGET_ECONNRESET,
682 [ENOBUFS] = TARGET_ENOBUFS,
683 [EISCONN] = TARGET_EISCONN,
684 [ENOTCONN] = TARGET_ENOTCONN,
685 [EUCLEAN] = TARGET_EUCLEAN,
686 [ENOTNAM] = TARGET_ENOTNAM,
687 [ENAVAIL] = TARGET_ENAVAIL,
688 [EISNAM] = TARGET_EISNAM,
689 [EREMOTEIO] = TARGET_EREMOTEIO,
690 [ESHUTDOWN] = TARGET_ESHUTDOWN,
691 [ETOOMANYREFS] = TARGET_ETOOMANYREFS,
692 [ETIMEDOUT] = TARGET_ETIMEDOUT,
693 [ECONNREFUSED] = TARGET_ECONNREFUSED,
694 [EHOSTDOWN] = TARGET_EHOSTDOWN,
695 [EHOSTUNREACH] = TARGET_EHOSTUNREACH,
696 [EALREADY] = TARGET_EALREADY,
697 [EINPROGRESS] = TARGET_EINPROGRESS,
698 [ESTALE] = TARGET_ESTALE,
699 [ECANCELED] = TARGET_ECANCELED,
700 [ENOMEDIUM] = TARGET_ENOMEDIUM,
701 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE,
703 [ENOKEY] = TARGET_ENOKEY,
706 [EKEYEXPIRED] = TARGET_EKEYEXPIRED,
709 [EKEYREVOKED] = TARGET_EKEYREVOKED,
712 [EKEYREJECTED] = TARGET_EKEYREJECTED,
715 [EOWNERDEAD] = TARGET_EOWNERDEAD,
717 #ifdef ENOTRECOVERABLE
718 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE,
722 static inline int host_to_target_errno(int err)
724 if(host_to_target_errno_table[err])
725 return host_to_target_errno_table[err];
729 static inline int target_to_host_errno(int err)
731 if (target_to_host_errno_table[err])
732 return target_to_host_errno_table[err];
736 static inline abi_long get_errno(abi_long ret)
739 return -host_to_target_errno(errno);
744 static inline int is_error(abi_long ret)
746 return (abi_ulong)ret >= (abi_ulong)(-4096);
749 char *target_strerror(int err)
751 if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
754 return strerror(target_to_host_errno(err));
757 static abi_ulong target_brk;
758 static abi_ulong target_original_brk;
759 static abi_ulong brk_page;
761 void target_set_brk(abi_ulong new_brk)
763 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
764 brk_page = HOST_PAGE_ALIGN(target_brk);
767 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
768 #define DEBUGF_BRK(message, args...)
770 /* do_brk() must return target values and target errnos. */
771 abi_long do_brk(abi_ulong new_brk)
773 abi_long mapped_addr;
776 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
779 DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
782 if (new_brk < target_original_brk) {
783 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
788 /* If the new brk is less than the highest page reserved to the
789 * target heap allocation, set it and we're almost done... */
790 if (new_brk <= brk_page) {
791 /* Heap contents are initialized to zero, as for anonymous
793 if (new_brk > target_brk) {
794 memset(g2h(target_brk), 0, new_brk - target_brk);
796 target_brk = new_brk;
797 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
801 /* We need to allocate more memory after the brk... Note that
802 * we don't use MAP_FIXED because that will map over the top of
803 * any existing mapping (like the one with the host libc or qemu
804 * itself); instead we treat "mapped but at wrong address" as
805 * a failure and unmap again.
807 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
808 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
809 PROT_READ|PROT_WRITE,
810 MAP_ANON|MAP_PRIVATE, 0, 0));
812 if (mapped_addr == brk_page) {
813 /* Heap contents are initialized to zero, as for anonymous
814 * mapped pages. Technically the new pages are already
815 * initialized to zero since they *are* anonymous mapped
816 * pages, however we have to take care with the contents that
817 * come from the remaining part of the previous page: it may
818 * contains garbage data due to a previous heap usage (grown
820 memset(g2h(target_brk), 0, brk_page - target_brk);
822 target_brk = new_brk;
823 brk_page = HOST_PAGE_ALIGN(target_brk);
824 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
827 } else if (mapped_addr != -1) {
828 /* Mapped but at wrong address, meaning there wasn't actually
829 * enough space for this brk.
831 target_munmap(mapped_addr, new_alloc_size);
833 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
836 DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
839 #if defined(TARGET_ALPHA)
840 /* We (partially) emulate OSF/1 on Alpha, which requires we
841 return a proper errno, not an unchanged brk value. */
842 return -TARGET_ENOMEM;
844 /* For everything else, return the previous break. */
848 static inline abi_long copy_from_user_fdset(fd_set *fds,
849 abi_ulong target_fds_addr,
853 abi_ulong b, *target_fds;
855 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
856 if (!(target_fds = lock_user(VERIFY_READ,
858 sizeof(abi_ulong) * nw,
860 return -TARGET_EFAULT;
864 for (i = 0; i < nw; i++) {
865 /* grab the abi_ulong */
866 __get_user(b, &target_fds[i]);
867 for (j = 0; j < TARGET_ABI_BITS; j++) {
868 /* check the bit inside the abi_ulong */
875 unlock_user(target_fds, target_fds_addr, 0);
880 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
881 abi_ulong target_fds_addr,
884 if (target_fds_addr) {
885 if (copy_from_user_fdset(fds, target_fds_addr, n))
886 return -TARGET_EFAULT;
894 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
900 abi_ulong *target_fds;
902 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
903 if (!(target_fds = lock_user(VERIFY_WRITE,
905 sizeof(abi_ulong) * nw,
907 return -TARGET_EFAULT;
910 for (i = 0; i < nw; i++) {
912 for (j = 0; j < TARGET_ABI_BITS; j++) {
913 v |= ((FD_ISSET(k, fds) != 0) << j);
916 __put_user(v, &target_fds[i]);
919 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
924 #if defined(__alpha__)
930 static inline abi_long host_to_target_clock_t(long ticks)
932 #if HOST_HZ == TARGET_HZ
935 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
939 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
940 const struct rusage *rusage)
942 struct target_rusage *target_rusage;
944 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
945 return -TARGET_EFAULT;
946 target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
947 target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
948 target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
949 target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
950 target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
951 target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
952 target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
953 target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
954 target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
955 target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
956 target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
957 target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
958 target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
959 target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
960 target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
961 target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
962 target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
963 target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
964 unlock_user_struct(target_rusage, target_addr, 1);
969 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
971 abi_ulong target_rlim_swap;
974 target_rlim_swap = tswapal(target_rlim);
975 if (target_rlim_swap == TARGET_RLIM_INFINITY)
976 return RLIM_INFINITY;
978 result = target_rlim_swap;
979 if (target_rlim_swap != (rlim_t)result)
980 return RLIM_INFINITY;
985 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
987 abi_ulong target_rlim_swap;
990 if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
991 target_rlim_swap = TARGET_RLIM_INFINITY;
993 target_rlim_swap = rlim;
994 result = tswapal(target_rlim_swap);
999 static inline int target_to_host_resource(int code)
1002 case TARGET_RLIMIT_AS:
1004 case TARGET_RLIMIT_CORE:
1006 case TARGET_RLIMIT_CPU:
1008 case TARGET_RLIMIT_DATA:
1010 case TARGET_RLIMIT_FSIZE:
1011 return RLIMIT_FSIZE;
1012 case TARGET_RLIMIT_LOCKS:
1013 return RLIMIT_LOCKS;
1014 case TARGET_RLIMIT_MEMLOCK:
1015 return RLIMIT_MEMLOCK;
1016 case TARGET_RLIMIT_MSGQUEUE:
1017 return RLIMIT_MSGQUEUE;
1018 case TARGET_RLIMIT_NICE:
1020 case TARGET_RLIMIT_NOFILE:
1021 return RLIMIT_NOFILE;
1022 case TARGET_RLIMIT_NPROC:
1023 return RLIMIT_NPROC;
1024 case TARGET_RLIMIT_RSS:
1026 case TARGET_RLIMIT_RTPRIO:
1027 return RLIMIT_RTPRIO;
1028 case TARGET_RLIMIT_SIGPENDING:
1029 return RLIMIT_SIGPENDING;
1030 case TARGET_RLIMIT_STACK:
1031 return RLIMIT_STACK;
1037 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1038 abi_ulong target_tv_addr)
1040 struct target_timeval *target_tv;
1042 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1))
1043 return -TARGET_EFAULT;
1045 __get_user(tv->tv_sec, &target_tv->tv_sec);
1046 __get_user(tv->tv_usec, &target_tv->tv_usec);
1048 unlock_user_struct(target_tv, target_tv_addr, 0);
1053 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1054 const struct timeval *tv)
1056 struct target_timeval *target_tv;
1058 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0))
1059 return -TARGET_EFAULT;
1061 __put_user(tv->tv_sec, &target_tv->tv_sec);
1062 __put_user(tv->tv_usec, &target_tv->tv_usec);
1064 unlock_user_struct(target_tv, target_tv_addr, 1);
1069 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1072 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1073 abi_ulong target_mq_attr_addr)
1075 struct target_mq_attr *target_mq_attr;
1077 if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1078 target_mq_attr_addr, 1))
1079 return -TARGET_EFAULT;
1081 __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1082 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1083 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1084 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1086 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1091 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1092 const struct mq_attr *attr)
1094 struct target_mq_attr *target_mq_attr;
1096 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1097 target_mq_attr_addr, 0))
1098 return -TARGET_EFAULT;
1100 __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1101 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1102 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1103 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1105 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1111 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1112 /* do_select() must return target values and target errnos. */
1113 static abi_long do_select(int n,
1114 abi_ulong rfd_addr, abi_ulong wfd_addr,
1115 abi_ulong efd_addr, abi_ulong target_tv_addr)
1117 fd_set rfds, wfds, efds;
1118 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1119 struct timeval tv, *tv_ptr;
1122 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1126 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1130 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1135 if (target_tv_addr) {
1136 if (copy_from_user_timeval(&tv, target_tv_addr))
1137 return -TARGET_EFAULT;
1143 ret = get_errno(select(n, rfds_ptr, wfds_ptr, efds_ptr, tv_ptr));
1145 if (!is_error(ret)) {
1146 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1147 return -TARGET_EFAULT;
1148 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1149 return -TARGET_EFAULT;
1150 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1151 return -TARGET_EFAULT;
1153 if (target_tv_addr && copy_to_user_timeval(target_tv_addr, &tv))
1154 return -TARGET_EFAULT;
1161 static abi_long do_pipe2(int host_pipe[], int flags)
1164 return pipe2(host_pipe, flags);
1170 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1171 int flags, int is_pipe2)
1175 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1178 return get_errno(ret);
1180 /* Several targets have special calling conventions for the original
1181 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1183 #if defined(TARGET_ALPHA)
1184 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1185 return host_pipe[0];
1186 #elif defined(TARGET_MIPS)
1187 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1188 return host_pipe[0];
1189 #elif defined(TARGET_SH4)
1190 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1191 return host_pipe[0];
1195 if (put_user_s32(host_pipe[0], pipedes)
1196 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1197 return -TARGET_EFAULT;
1198 return get_errno(ret);
1201 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1202 abi_ulong target_addr,
1205 struct target_ip_mreqn *target_smreqn;
1207 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1209 return -TARGET_EFAULT;
1210 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1211 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1212 if (len == sizeof(struct target_ip_mreqn))
1213 mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1214 unlock_user(target_smreqn, target_addr, 0);
1219 static inline abi_long target_to_host_sockaddr(struct sockaddr *addr,
1220 abi_ulong target_addr,
1223 const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1224 sa_family_t sa_family;
1225 struct target_sockaddr *target_saddr;
1227 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1229 return -TARGET_EFAULT;
1231 sa_family = tswap16(target_saddr->sa_family);
1233 /* Oops. The caller might send a incomplete sun_path; sun_path
1234 * must be terminated by \0 (see the manual page), but
1235 * unfortunately it is quite common to specify sockaddr_un
1236 * length as "strlen(x->sun_path)" while it should be
1237 * "strlen(...) + 1". We'll fix that here if needed.
1238 * Linux kernel has a similar feature.
1241 if (sa_family == AF_UNIX) {
1242 if (len < unix_maxlen && len > 0) {
1243 char *cp = (char*)target_saddr;
1245 if ( cp[len-1] && !cp[len] )
1248 if (len > unix_maxlen)
1252 memcpy(addr, target_saddr, len);
1253 addr->sa_family = sa_family;
1254 unlock_user(target_saddr, target_addr, 0);
1259 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1260 struct sockaddr *addr,
1263 struct target_sockaddr *target_saddr;
1265 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1267 return -TARGET_EFAULT;
1268 memcpy(target_saddr, addr, len);
1269 target_saddr->sa_family = tswap16(addr->sa_family);
1270 unlock_user(target_saddr, target_addr, len);
1275 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1276 struct target_msghdr *target_msgh)
1278 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1279 abi_long msg_controllen;
1280 abi_ulong target_cmsg_addr;
1281 struct target_cmsghdr *target_cmsg;
1282 socklen_t space = 0;
1284 msg_controllen = tswapal(target_msgh->msg_controllen);
1285 if (msg_controllen < sizeof (struct target_cmsghdr))
1287 target_cmsg_addr = tswapal(target_msgh->msg_control);
1288 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1290 return -TARGET_EFAULT;
1292 while (cmsg && target_cmsg) {
1293 void *data = CMSG_DATA(cmsg);
1294 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1296 int len = tswapal(target_cmsg->cmsg_len)
1297 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr));
1299 space += CMSG_SPACE(len);
1300 if (space > msgh->msg_controllen) {
1301 space -= CMSG_SPACE(len);
1302 gemu_log("Host cmsg overflow\n");
1306 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1307 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1308 cmsg->cmsg_len = CMSG_LEN(len);
1310 if (cmsg->cmsg_level != TARGET_SOL_SOCKET || cmsg->cmsg_type != SCM_RIGHTS) {
1311 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg->cmsg_level, cmsg->cmsg_type);
1312 memcpy(data, target_data, len);
1314 int *fd = (int *)data;
1315 int *target_fd = (int *)target_data;
1316 int i, numfds = len / sizeof(int);
1318 for (i = 0; i < numfds; i++)
1319 fd[i] = tswap32(target_fd[i]);
1322 cmsg = CMSG_NXTHDR(msgh, cmsg);
1323 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg);
1325 unlock_user(target_cmsg, target_cmsg_addr, 0);
1327 msgh->msg_controllen = space;
1331 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1332 struct msghdr *msgh)
1334 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1335 abi_long msg_controllen;
1336 abi_ulong target_cmsg_addr;
1337 struct target_cmsghdr *target_cmsg;
1338 socklen_t space = 0;
1340 msg_controllen = tswapal(target_msgh->msg_controllen);
1341 if (msg_controllen < sizeof (struct target_cmsghdr))
1343 target_cmsg_addr = tswapal(target_msgh->msg_control);
1344 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1346 return -TARGET_EFAULT;
1348 while (cmsg && target_cmsg) {
1349 void *data = CMSG_DATA(cmsg);
1350 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1352 int len = cmsg->cmsg_len - CMSG_ALIGN(sizeof (struct cmsghdr));
1354 space += TARGET_CMSG_SPACE(len);
1355 if (space > msg_controllen) {
1356 space -= TARGET_CMSG_SPACE(len);
1357 gemu_log("Target cmsg overflow\n");
1361 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1362 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1363 target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(len));
1365 if ((cmsg->cmsg_level == TARGET_SOL_SOCKET) &&
1366 (cmsg->cmsg_type == SCM_RIGHTS)) {
1367 int *fd = (int *)data;
1368 int *target_fd = (int *)target_data;
1369 int i, numfds = len / sizeof(int);
1371 for (i = 0; i < numfds; i++)
1372 target_fd[i] = tswap32(fd[i]);
1373 } else if ((cmsg->cmsg_level == TARGET_SOL_SOCKET) &&
1374 (cmsg->cmsg_type == SO_TIMESTAMP) &&
1375 (len == sizeof(struct timeval))) {
1376 /* copy struct timeval to target */
1377 struct timeval *tv = (struct timeval *)data;
1378 struct target_timeval *target_tv =
1379 (struct target_timeval *)target_data;
1381 target_tv->tv_sec = tswapal(tv->tv_sec);
1382 target_tv->tv_usec = tswapal(tv->tv_usec);
1384 gemu_log("Unsupported ancillary data: %d/%d\n",
1385 cmsg->cmsg_level, cmsg->cmsg_type);
1386 memcpy(target_data, data, len);
1389 cmsg = CMSG_NXTHDR(msgh, cmsg);
1390 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg);
1392 unlock_user(target_cmsg, target_cmsg_addr, space);
1394 target_msgh->msg_controllen = tswapal(space);
1398 /* do_setsockopt() Must return target values and target errnos. */
1399 static abi_long do_setsockopt(int sockfd, int level, int optname,
1400 abi_ulong optval_addr, socklen_t optlen)
1404 struct ip_mreqn *ip_mreq;
1405 struct ip_mreq_source *ip_mreq_source;
1409 /* TCP options all take an 'int' value. */
1410 if (optlen < sizeof(uint32_t))
1411 return -TARGET_EINVAL;
1413 if (get_user_u32(val, optval_addr))
1414 return -TARGET_EFAULT;
1415 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1422 case IP_ROUTER_ALERT:
1426 case IP_MTU_DISCOVER:
1432 case IP_MULTICAST_TTL:
1433 case IP_MULTICAST_LOOP:
1435 if (optlen >= sizeof(uint32_t)) {
1436 if (get_user_u32(val, optval_addr))
1437 return -TARGET_EFAULT;
1438 } else if (optlen >= 1) {
1439 if (get_user_u8(val, optval_addr))
1440 return -TARGET_EFAULT;
1442 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1444 case IP_ADD_MEMBERSHIP:
1445 case IP_DROP_MEMBERSHIP:
1446 if (optlen < sizeof (struct target_ip_mreq) ||
1447 optlen > sizeof (struct target_ip_mreqn))
1448 return -TARGET_EINVAL;
1450 ip_mreq = (struct ip_mreqn *) alloca(optlen);
1451 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
1452 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
1455 case IP_BLOCK_SOURCE:
1456 case IP_UNBLOCK_SOURCE:
1457 case IP_ADD_SOURCE_MEMBERSHIP:
1458 case IP_DROP_SOURCE_MEMBERSHIP:
1459 if (optlen != sizeof (struct target_ip_mreq_source))
1460 return -TARGET_EINVAL;
1462 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
1463 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
1464 unlock_user (ip_mreq_source, optval_addr, 0);
1474 /* struct icmp_filter takes an u32 value */
1475 if (optlen < sizeof(uint32_t)) {
1476 return -TARGET_EINVAL;
1479 if (get_user_u32(val, optval_addr)) {
1480 return -TARGET_EFAULT;
1482 ret = get_errno(setsockopt(sockfd, level, optname,
1483 &val, sizeof(val)));
1490 case TARGET_SOL_SOCKET:
1492 /* Options with 'int' argument. */
1493 case TARGET_SO_DEBUG:
1496 case TARGET_SO_REUSEADDR:
1497 optname = SO_REUSEADDR;
1499 case TARGET_SO_TYPE:
1502 case TARGET_SO_ERROR:
1505 case TARGET_SO_DONTROUTE:
1506 optname = SO_DONTROUTE;
1508 case TARGET_SO_BROADCAST:
1509 optname = SO_BROADCAST;
1511 case TARGET_SO_SNDBUF:
1512 optname = SO_SNDBUF;
1514 case TARGET_SO_RCVBUF:
1515 optname = SO_RCVBUF;
1517 case TARGET_SO_KEEPALIVE:
1518 optname = SO_KEEPALIVE;
1520 case TARGET_SO_OOBINLINE:
1521 optname = SO_OOBINLINE;
1523 case TARGET_SO_NO_CHECK:
1524 optname = SO_NO_CHECK;
1526 case TARGET_SO_PRIORITY:
1527 optname = SO_PRIORITY;
1530 case TARGET_SO_BSDCOMPAT:
1531 optname = SO_BSDCOMPAT;
1534 case TARGET_SO_PASSCRED:
1535 optname = SO_PASSCRED;
1537 case TARGET_SO_TIMESTAMP:
1538 optname = SO_TIMESTAMP;
1540 case TARGET_SO_RCVLOWAT:
1541 optname = SO_RCVLOWAT;
1543 case TARGET_SO_RCVTIMEO:
1544 optname = SO_RCVTIMEO;
1546 case TARGET_SO_SNDTIMEO:
1547 optname = SO_SNDTIMEO;
1553 if (optlen < sizeof(uint32_t))
1554 return -TARGET_EINVAL;
1556 if (get_user_u32(val, optval_addr))
1557 return -TARGET_EFAULT;
1558 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
1562 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname);
1563 ret = -TARGET_ENOPROTOOPT;
1568 /* do_getsockopt() Must return target values and target errnos. */
1569 static abi_long do_getsockopt(int sockfd, int level, int optname,
1570 abi_ulong optval_addr, abi_ulong optlen)
1577 case TARGET_SOL_SOCKET:
1580 /* These don't just return a single integer */
1581 case TARGET_SO_LINGER:
1582 case TARGET_SO_RCVTIMEO:
1583 case TARGET_SO_SNDTIMEO:
1584 case TARGET_SO_PEERNAME:
1586 case TARGET_SO_PEERCRED: {
1589 struct target_ucred *tcr;
1591 if (get_user_u32(len, optlen)) {
1592 return -TARGET_EFAULT;
1595 return -TARGET_EINVAL;
1599 ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
1607 if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
1608 return -TARGET_EFAULT;
1610 __put_user(cr.pid, &tcr->pid);
1611 __put_user(cr.uid, &tcr->uid);
1612 __put_user(cr.gid, &tcr->gid);
1613 unlock_user_struct(tcr, optval_addr, 1);
1614 if (put_user_u32(len, optlen)) {
1615 return -TARGET_EFAULT;
1619 /* Options with 'int' argument. */
1620 case TARGET_SO_DEBUG:
1623 case TARGET_SO_REUSEADDR:
1624 optname = SO_REUSEADDR;
1626 case TARGET_SO_TYPE:
1629 case TARGET_SO_ERROR:
1632 case TARGET_SO_DONTROUTE:
1633 optname = SO_DONTROUTE;
1635 case TARGET_SO_BROADCAST:
1636 optname = SO_BROADCAST;
1638 case TARGET_SO_SNDBUF:
1639 optname = SO_SNDBUF;
1641 case TARGET_SO_RCVBUF:
1642 optname = SO_RCVBUF;
1644 case TARGET_SO_KEEPALIVE:
1645 optname = SO_KEEPALIVE;
1647 case TARGET_SO_OOBINLINE:
1648 optname = SO_OOBINLINE;
1650 case TARGET_SO_NO_CHECK:
1651 optname = SO_NO_CHECK;
1653 case TARGET_SO_PRIORITY:
1654 optname = SO_PRIORITY;
1657 case TARGET_SO_BSDCOMPAT:
1658 optname = SO_BSDCOMPAT;
1661 case TARGET_SO_PASSCRED:
1662 optname = SO_PASSCRED;
1664 case TARGET_SO_TIMESTAMP:
1665 optname = SO_TIMESTAMP;
1667 case TARGET_SO_RCVLOWAT:
1668 optname = SO_RCVLOWAT;
1675 /* TCP options all take an 'int' value. */
1677 if (get_user_u32(len, optlen))
1678 return -TARGET_EFAULT;
1680 return -TARGET_EINVAL;
1682 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
1688 if (put_user_u32(val, optval_addr))
1689 return -TARGET_EFAULT;
1691 if (put_user_u8(val, optval_addr))
1692 return -TARGET_EFAULT;
1694 if (put_user_u32(len, optlen))
1695 return -TARGET_EFAULT;
1702 case IP_ROUTER_ALERT:
1706 case IP_MTU_DISCOVER:
1712 case IP_MULTICAST_TTL:
1713 case IP_MULTICAST_LOOP:
1714 if (get_user_u32(len, optlen))
1715 return -TARGET_EFAULT;
1717 return -TARGET_EINVAL;
1719 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
1722 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
1724 if (put_user_u32(len, optlen)
1725 || put_user_u8(val, optval_addr))
1726 return -TARGET_EFAULT;
1728 if (len > sizeof(int))
1730 if (put_user_u32(len, optlen)
1731 || put_user_u32(val, optval_addr))
1732 return -TARGET_EFAULT;
1736 ret = -TARGET_ENOPROTOOPT;
1742 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
1744 ret = -TARGET_EOPNOTSUPP;
1750 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
1751 int count, int copy)
1753 struct target_iovec *target_vec;
1755 abi_ulong total_len, max_len;
1762 if (count > IOV_MAX) {
1767 vec = calloc(count, sizeof(struct iovec));
1773 target_vec = lock_user(VERIFY_READ, target_addr,
1774 count * sizeof(struct target_iovec), 1);
1775 if (target_vec == NULL) {
1780 /* ??? If host page size > target page size, this will result in a
1781 value larger than what we can actually support. */
1782 max_len = 0x7fffffff & TARGET_PAGE_MASK;
1785 for (i = 0; i < count; i++) {
1786 abi_ulong base = tswapal(target_vec[i].iov_base);
1787 abi_long len = tswapal(target_vec[i].iov_len);
1792 } else if (len == 0) {
1793 /* Zero length pointer is ignored. */
1794 vec[i].iov_base = 0;
1796 vec[i].iov_base = lock_user(type, base, len, copy);
1797 if (!vec[i].iov_base) {
1801 if (len > max_len - total_len) {
1802 len = max_len - total_len;
1805 vec[i].iov_len = len;
1809 unlock_user(target_vec, target_addr, 0);
1815 unlock_user(target_vec, target_addr, 0);
1819 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
1820 int count, int copy)
1822 struct target_iovec *target_vec;
1825 target_vec = lock_user(VERIFY_READ, target_addr,
1826 count * sizeof(struct target_iovec), 1);
1828 for (i = 0; i < count; i++) {
1829 abi_ulong base = tswapal(target_vec[i].iov_base);
1830 abi_long len = tswapal(target_vec[i].iov_base);
1834 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
1836 unlock_user(target_vec, target_addr, 0);
1842 /* do_socket() Must return target values and target errnos. */
1843 static abi_long do_socket(int domain, int type, int protocol)
1845 #if defined(TARGET_MIPS)
1847 case TARGET_SOCK_DGRAM:
1850 case TARGET_SOCK_STREAM:
1853 case TARGET_SOCK_RAW:
1856 case TARGET_SOCK_RDM:
1859 case TARGET_SOCK_SEQPACKET:
1860 type = SOCK_SEQPACKET;
1862 case TARGET_SOCK_PACKET:
1867 if (domain == PF_NETLINK)
1868 return -EAFNOSUPPORT; /* do not NETLINK socket connections possible */
1869 return get_errno(socket(domain, type, protocol));
1872 /* do_bind() Must return target values and target errnos. */
1873 static abi_long do_bind(int sockfd, abi_ulong target_addr,
1879 if ((int)addrlen < 0) {
1880 return -TARGET_EINVAL;
1883 addr = alloca(addrlen+1);
1885 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
1889 return get_errno(bind(sockfd, addr, addrlen));
1892 /* do_connect() Must return target values and target errnos. */
1893 static abi_long do_connect(int sockfd, abi_ulong target_addr,
1899 if ((int)addrlen < 0) {
1900 return -TARGET_EINVAL;
1903 addr = alloca(addrlen);
1905 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
1909 return get_errno(connect(sockfd, addr, addrlen));
1912 /* do_sendrecvmsg() Must return target values and target errnos. */
1913 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
1914 int flags, int send)
1917 struct target_msghdr *msgp;
1921 abi_ulong target_vec;
1924 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
1928 return -TARGET_EFAULT;
1929 if (msgp->msg_name) {
1930 msg.msg_namelen = tswap32(msgp->msg_namelen);
1931 msg.msg_name = alloca(msg.msg_namelen);
1932 ret = target_to_host_sockaddr(msg.msg_name, tswapal(msgp->msg_name),
1938 msg.msg_name = NULL;
1939 msg.msg_namelen = 0;
1941 msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
1942 msg.msg_control = alloca(msg.msg_controllen);
1943 msg.msg_flags = tswap32(msgp->msg_flags);
1945 count = tswapal(msgp->msg_iovlen);
1946 target_vec = tswapal(msgp->msg_iov);
1947 vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
1948 target_vec, count, send);
1950 ret = -host_to_target_errno(errno);
1953 msg.msg_iovlen = count;
1957 ret = target_to_host_cmsg(&msg, msgp);
1959 ret = get_errno(sendmsg(fd, &msg, flags));
1961 ret = get_errno(recvmsg(fd, &msg, flags));
1962 if (!is_error(ret)) {
1964 ret = host_to_target_cmsg(msgp, &msg);
1965 if (!is_error(ret)) {
1966 msgp->msg_namelen = tswap32(msg.msg_namelen);
1967 if (msg.msg_name != NULL) {
1968 ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
1969 msg.msg_name, msg.msg_namelen);
1981 unlock_iovec(vec, target_vec, count, !send);
1983 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
1987 /* do_accept() Must return target values and target errnos. */
1988 static abi_long do_accept(int fd, abi_ulong target_addr,
1989 abi_ulong target_addrlen_addr)
1995 if (target_addr == 0)
1996 return get_errno(accept(fd, NULL, NULL));
1998 /* linux returns EINVAL if addrlen pointer is invalid */
1999 if (get_user_u32(addrlen, target_addrlen_addr))
2000 return -TARGET_EINVAL;
2002 if ((int)addrlen < 0) {
2003 return -TARGET_EINVAL;
2006 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2007 return -TARGET_EINVAL;
2009 addr = alloca(addrlen);
2011 ret = get_errno(accept(fd, addr, &addrlen));
2012 if (!is_error(ret)) {
2013 host_to_target_sockaddr(target_addr, addr, addrlen);
2014 if (put_user_u32(addrlen, target_addrlen_addr))
2015 ret = -TARGET_EFAULT;
2020 /* do_getpeername() Must return target values and target errnos. */
2021 static abi_long do_getpeername(int fd, abi_ulong target_addr,
2022 abi_ulong target_addrlen_addr)
2028 if (get_user_u32(addrlen, target_addrlen_addr))
2029 return -TARGET_EFAULT;
2031 if ((int)addrlen < 0) {
2032 return -TARGET_EINVAL;
2035 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2036 return -TARGET_EFAULT;
2038 addr = alloca(addrlen);
2040 ret = get_errno(getpeername(fd, addr, &addrlen));
2041 if (!is_error(ret)) {
2042 host_to_target_sockaddr(target_addr, addr, addrlen);
2043 if (put_user_u32(addrlen, target_addrlen_addr))
2044 ret = -TARGET_EFAULT;
2049 /* do_getsockname() Must return target values and target errnos. */
2050 static abi_long do_getsockname(int fd, abi_ulong target_addr,
2051 abi_ulong target_addrlen_addr)
2057 if (get_user_u32(addrlen, target_addrlen_addr))
2058 return -TARGET_EFAULT;
2060 if ((int)addrlen < 0) {
2061 return -TARGET_EINVAL;
2064 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2065 return -TARGET_EFAULT;
2067 addr = alloca(addrlen);
2069 ret = get_errno(getsockname(fd, addr, &addrlen));
2070 if (!is_error(ret)) {
2071 host_to_target_sockaddr(target_addr, addr, addrlen);
2072 if (put_user_u32(addrlen, target_addrlen_addr))
2073 ret = -TARGET_EFAULT;
2078 /* do_socketpair() Must return target values and target errnos. */
2079 static abi_long do_socketpair(int domain, int type, int protocol,
2080 abi_ulong target_tab_addr)
2085 ret = get_errno(socketpair(domain, type, protocol, tab));
2086 if (!is_error(ret)) {
2087 if (put_user_s32(tab[0], target_tab_addr)
2088 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
2089 ret = -TARGET_EFAULT;
2094 /* do_sendto() Must return target values and target errnos. */
2095 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
2096 abi_ulong target_addr, socklen_t addrlen)
2102 if ((int)addrlen < 0) {
2103 return -TARGET_EINVAL;
2106 host_msg = lock_user(VERIFY_READ, msg, len, 1);
2108 return -TARGET_EFAULT;
2110 addr = alloca(addrlen);
2111 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
2113 unlock_user(host_msg, msg, 0);
2116 ret = get_errno(sendto(fd, host_msg, len, flags, addr, addrlen));
2118 ret = get_errno(send(fd, host_msg, len, flags));
2120 unlock_user(host_msg, msg, 0);
2124 /* do_recvfrom() Must return target values and target errnos. */
2125 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
2126 abi_ulong target_addr,
2127 abi_ulong target_addrlen)
2134 host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
2136 return -TARGET_EFAULT;
2138 if (get_user_u32(addrlen, target_addrlen)) {
2139 ret = -TARGET_EFAULT;
2142 if ((int)addrlen < 0) {
2143 ret = -TARGET_EINVAL;
2146 addr = alloca(addrlen);
2147 ret = get_errno(recvfrom(fd, host_msg, len, flags, addr, &addrlen));
2149 addr = NULL; /* To keep compiler quiet. */
2150 ret = get_errno(qemu_recv(fd, host_msg, len, flags));
2152 if (!is_error(ret)) {
2154 host_to_target_sockaddr(target_addr, addr, addrlen);
2155 if (put_user_u32(addrlen, target_addrlen)) {
2156 ret = -TARGET_EFAULT;
2160 unlock_user(host_msg, msg, len);
2163 unlock_user(host_msg, msg, 0);
2168 #ifdef TARGET_NR_socketcall
2169 /* do_socketcall() Must return target values and target errnos. */
2170 static abi_long do_socketcall(int num, abi_ulong vptr)
2173 const int n = sizeof(abi_ulong);
2178 abi_ulong domain, type, protocol;
2180 if (get_user_ual(domain, vptr)
2181 || get_user_ual(type, vptr + n)
2182 || get_user_ual(protocol, vptr + 2 * n))
2183 return -TARGET_EFAULT;
2185 ret = do_socket(domain, type, protocol);
2191 abi_ulong target_addr;
2194 if (get_user_ual(sockfd, vptr)
2195 || get_user_ual(target_addr, vptr + n)
2196 || get_user_ual(addrlen, vptr + 2 * n))
2197 return -TARGET_EFAULT;
2199 ret = do_bind(sockfd, target_addr, addrlen);
2202 case SOCKOP_connect:
2205 abi_ulong target_addr;
2208 if (get_user_ual(sockfd, vptr)
2209 || get_user_ual(target_addr, vptr + n)
2210 || get_user_ual(addrlen, vptr + 2 * n))
2211 return -TARGET_EFAULT;
2213 ret = do_connect(sockfd, target_addr, addrlen);
2218 abi_ulong sockfd, backlog;
2220 if (get_user_ual(sockfd, vptr)
2221 || get_user_ual(backlog, vptr + n))
2222 return -TARGET_EFAULT;
2224 ret = get_errno(listen(sockfd, backlog));
2230 abi_ulong target_addr, target_addrlen;
2232 if (get_user_ual(sockfd, vptr)
2233 || get_user_ual(target_addr, vptr + n)
2234 || get_user_ual(target_addrlen, vptr + 2 * n))
2235 return -TARGET_EFAULT;
2237 ret = do_accept(sockfd, target_addr, target_addrlen);
2240 case SOCKOP_getsockname:
2243 abi_ulong target_addr, target_addrlen;
2245 if (get_user_ual(sockfd, vptr)
2246 || get_user_ual(target_addr, vptr + n)
2247 || get_user_ual(target_addrlen, vptr + 2 * n))
2248 return -TARGET_EFAULT;
2250 ret = do_getsockname(sockfd, target_addr, target_addrlen);
2253 case SOCKOP_getpeername:
2256 abi_ulong target_addr, target_addrlen;
2258 if (get_user_ual(sockfd, vptr)
2259 || get_user_ual(target_addr, vptr + n)
2260 || get_user_ual(target_addrlen, vptr + 2 * n))
2261 return -TARGET_EFAULT;
2263 ret = do_getpeername(sockfd, target_addr, target_addrlen);
2266 case SOCKOP_socketpair:
2268 abi_ulong domain, type, protocol;
2271 if (get_user_ual(domain, vptr)
2272 || get_user_ual(type, vptr + n)
2273 || get_user_ual(protocol, vptr + 2 * n)
2274 || get_user_ual(tab, vptr + 3 * n))
2275 return -TARGET_EFAULT;
2277 ret = do_socketpair(domain, type, protocol, tab);
2287 if (get_user_ual(sockfd, vptr)
2288 || get_user_ual(msg, vptr + n)
2289 || get_user_ual(len, vptr + 2 * n)
2290 || get_user_ual(flags, vptr + 3 * n))
2291 return -TARGET_EFAULT;
2293 ret = do_sendto(sockfd, msg, len, flags, 0, 0);
2303 if (get_user_ual(sockfd, vptr)
2304 || get_user_ual(msg, vptr + n)
2305 || get_user_ual(len, vptr + 2 * n)
2306 || get_user_ual(flags, vptr + 3 * n))
2307 return -TARGET_EFAULT;
2309 ret = do_recvfrom(sockfd, msg, len, flags, 0, 0);
2321 if (get_user_ual(sockfd, vptr)
2322 || get_user_ual(msg, vptr + n)
2323 || get_user_ual(len, vptr + 2 * n)
2324 || get_user_ual(flags, vptr + 3 * n)
2325 || get_user_ual(addr, vptr + 4 * n)
2326 || get_user_ual(addrlen, vptr + 5 * n))
2327 return -TARGET_EFAULT;
2329 ret = do_sendto(sockfd, msg, len, flags, addr, addrlen);
2332 case SOCKOP_recvfrom:
2341 if (get_user_ual(sockfd, vptr)
2342 || get_user_ual(msg, vptr + n)
2343 || get_user_ual(len, vptr + 2 * n)
2344 || get_user_ual(flags, vptr + 3 * n)
2345 || get_user_ual(addr, vptr + 4 * n)
2346 || get_user_ual(addrlen, vptr + 5 * n))
2347 return -TARGET_EFAULT;
2349 ret = do_recvfrom(sockfd, msg, len, flags, addr, addrlen);
2352 case SOCKOP_shutdown:
2354 abi_ulong sockfd, how;
2356 if (get_user_ual(sockfd, vptr)
2357 || get_user_ual(how, vptr + n))
2358 return -TARGET_EFAULT;
2360 ret = get_errno(shutdown(sockfd, how));
2363 case SOCKOP_sendmsg:
2364 case SOCKOP_recvmsg:
2367 abi_ulong target_msg;
2370 if (get_user_ual(fd, vptr)
2371 || get_user_ual(target_msg, vptr + n)
2372 || get_user_ual(flags, vptr + 2 * n))
2373 return -TARGET_EFAULT;
2375 ret = do_sendrecvmsg(fd, target_msg, flags,
2376 (num == SOCKOP_sendmsg));
2379 case SOCKOP_setsockopt:
2387 if (get_user_ual(sockfd, vptr)
2388 || get_user_ual(level, vptr + n)
2389 || get_user_ual(optname, vptr + 2 * n)
2390 || get_user_ual(optval, vptr + 3 * n)
2391 || get_user_ual(optlen, vptr + 4 * n))
2392 return -TARGET_EFAULT;
2394 ret = do_setsockopt(sockfd, level, optname, optval, optlen);
2397 case SOCKOP_getsockopt:
2405 if (get_user_ual(sockfd, vptr)
2406 || get_user_ual(level, vptr + n)
2407 || get_user_ual(optname, vptr + 2 * n)
2408 || get_user_ual(optval, vptr + 3 * n)
2409 || get_user_ual(optlen, vptr + 4 * n))
2410 return -TARGET_EFAULT;
2412 ret = do_getsockopt(sockfd, level, optname, optval, optlen);
2416 gemu_log("Unsupported socketcall: %d\n", num);
2417 ret = -TARGET_ENOSYS;
2424 #define N_SHM_REGIONS 32
2426 static struct shm_region {
2429 } shm_regions[N_SHM_REGIONS];
2431 struct target_ipc_perm
2438 unsigned short int mode;
2439 unsigned short int __pad1;
2440 unsigned short int __seq;
2441 unsigned short int __pad2;
2442 abi_ulong __unused1;
2443 abi_ulong __unused2;
2446 struct target_semid_ds
2448 struct target_ipc_perm sem_perm;
2449 abi_ulong sem_otime;
2450 abi_ulong __unused1;
2451 abi_ulong sem_ctime;
2452 abi_ulong __unused2;
2453 abi_ulong sem_nsems;
2454 abi_ulong __unused3;
2455 abi_ulong __unused4;
2458 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
2459 abi_ulong target_addr)
2461 struct target_ipc_perm *target_ip;
2462 struct target_semid_ds *target_sd;
2464 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2465 return -TARGET_EFAULT;
2466 target_ip = &(target_sd->sem_perm);
2467 host_ip->__key = tswapal(target_ip->__key);
2468 host_ip->uid = tswapal(target_ip->uid);
2469 host_ip->gid = tswapal(target_ip->gid);
2470 host_ip->cuid = tswapal(target_ip->cuid);
2471 host_ip->cgid = tswapal(target_ip->cgid);
2472 host_ip->mode = tswap16(target_ip->mode);
2473 unlock_user_struct(target_sd, target_addr, 0);
2477 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
2478 struct ipc_perm *host_ip)
2480 struct target_ipc_perm *target_ip;
2481 struct target_semid_ds *target_sd;
2483 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2484 return -TARGET_EFAULT;
2485 target_ip = &(target_sd->sem_perm);
2486 target_ip->__key = tswapal(host_ip->__key);
2487 target_ip->uid = tswapal(host_ip->uid);
2488 target_ip->gid = tswapal(host_ip->gid);
2489 target_ip->cuid = tswapal(host_ip->cuid);
2490 target_ip->cgid = tswapal(host_ip->cgid);
2491 target_ip->mode = tswap16(host_ip->mode);
2492 unlock_user_struct(target_sd, target_addr, 1);
2496 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
2497 abi_ulong target_addr)
2499 struct target_semid_ds *target_sd;
2501 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2502 return -TARGET_EFAULT;
2503 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
2504 return -TARGET_EFAULT;
2505 host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
2506 host_sd->sem_otime = tswapal(target_sd->sem_otime);
2507 host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
2508 unlock_user_struct(target_sd, target_addr, 0);
2512 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
2513 struct semid_ds *host_sd)
2515 struct target_semid_ds *target_sd;
2517 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2518 return -TARGET_EFAULT;
2519 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
2520 return -TARGET_EFAULT;
2521 target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
2522 target_sd->sem_otime = tswapal(host_sd->sem_otime);
2523 target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
2524 unlock_user_struct(target_sd, target_addr, 1);
2528 struct target_seminfo {
2541 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
2542 struct seminfo *host_seminfo)
2544 struct target_seminfo *target_seminfo;
2545 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
2546 return -TARGET_EFAULT;
2547 __put_user(host_seminfo->semmap, &target_seminfo->semmap);
2548 __put_user(host_seminfo->semmni, &target_seminfo->semmni);
2549 __put_user(host_seminfo->semmns, &target_seminfo->semmns);
2550 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
2551 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
2552 __put_user(host_seminfo->semopm, &target_seminfo->semopm);
2553 __put_user(host_seminfo->semume, &target_seminfo->semume);
2554 __put_user(host_seminfo->semusz, &target_seminfo->semusz);
2555 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
2556 __put_user(host_seminfo->semaem, &target_seminfo->semaem);
2557 unlock_user_struct(target_seminfo, target_addr, 1);
2563 struct semid_ds *buf;
2564 unsigned short *array;
2565 struct seminfo *__buf;
2568 union target_semun {
2575 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
2576 abi_ulong target_addr)
2579 unsigned short *array;
2581 struct semid_ds semid_ds;
2584 semun.buf = &semid_ds;
2586 ret = semctl(semid, 0, IPC_STAT, semun);
2588 return get_errno(ret);
2590 nsems = semid_ds.sem_nsems;
2592 *host_array = malloc(nsems*sizeof(unsigned short));
2593 array = lock_user(VERIFY_READ, target_addr,
2594 nsems*sizeof(unsigned short), 1);
2596 return -TARGET_EFAULT;
2598 for(i=0; i<nsems; i++) {
2599 __get_user((*host_array)[i], &array[i]);
2601 unlock_user(array, target_addr, 0);
2606 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
2607 unsigned short **host_array)
2610 unsigned short *array;
2612 struct semid_ds semid_ds;
2615 semun.buf = &semid_ds;
2617 ret = semctl(semid, 0, IPC_STAT, semun);
2619 return get_errno(ret);
2621 nsems = semid_ds.sem_nsems;
2623 array = lock_user(VERIFY_WRITE, target_addr,
2624 nsems*sizeof(unsigned short), 0);
2626 return -TARGET_EFAULT;
2628 for(i=0; i<nsems; i++) {
2629 __put_user((*host_array)[i], &array[i]);
2632 unlock_user(array, target_addr, 1);
2637 static inline abi_long do_semctl(int semid, int semnum, int cmd,
2638 union target_semun target_su)
2641 struct semid_ds dsarg;
2642 unsigned short *array = NULL;
2643 struct seminfo seminfo;
2644 abi_long ret = -TARGET_EINVAL;
2651 arg.val = tswap32(target_su.val);
2652 ret = get_errno(semctl(semid, semnum, cmd, arg));
2653 target_su.val = tswap32(arg.val);
2657 err = target_to_host_semarray(semid, &array, target_su.array);
2661 ret = get_errno(semctl(semid, semnum, cmd, arg));
2662 err = host_to_target_semarray(semid, target_su.array, &array);
2669 err = target_to_host_semid_ds(&dsarg, target_su.buf);
2673 ret = get_errno(semctl(semid, semnum, cmd, arg));
2674 err = host_to_target_semid_ds(target_su.buf, &dsarg);
2680 arg.__buf = &seminfo;
2681 ret = get_errno(semctl(semid, semnum, cmd, arg));
2682 err = host_to_target_seminfo(target_su.__buf, &seminfo);
2690 ret = get_errno(semctl(semid, semnum, cmd, NULL));
2697 struct target_sembuf {
2698 unsigned short sem_num;
2703 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
2704 abi_ulong target_addr,
2707 struct target_sembuf *target_sembuf;
2710 target_sembuf = lock_user(VERIFY_READ, target_addr,
2711 nsops*sizeof(struct target_sembuf), 1);
2713 return -TARGET_EFAULT;
2715 for(i=0; i<nsops; i++) {
2716 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
2717 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
2718 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
2721 unlock_user(target_sembuf, target_addr, 0);
2726 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
2728 struct sembuf sops[nsops];
2730 if (target_to_host_sembuf(sops, ptr, nsops))
2731 return -TARGET_EFAULT;
2733 return semop(semid, sops, nsops);
2736 struct target_msqid_ds
2738 struct target_ipc_perm msg_perm;
2739 abi_ulong msg_stime;
2740 #if TARGET_ABI_BITS == 32
2741 abi_ulong __unused1;
2743 abi_ulong msg_rtime;
2744 #if TARGET_ABI_BITS == 32
2745 abi_ulong __unused2;
2747 abi_ulong msg_ctime;
2748 #if TARGET_ABI_BITS == 32
2749 abi_ulong __unused3;
2751 abi_ulong __msg_cbytes;
2753 abi_ulong msg_qbytes;
2754 abi_ulong msg_lspid;
2755 abi_ulong msg_lrpid;
2756 abi_ulong __unused4;
2757 abi_ulong __unused5;
2760 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
2761 abi_ulong target_addr)
2763 struct target_msqid_ds *target_md;
2765 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
2766 return -TARGET_EFAULT;
2767 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
2768 return -TARGET_EFAULT;
2769 host_md->msg_stime = tswapal(target_md->msg_stime);
2770 host_md->msg_rtime = tswapal(target_md->msg_rtime);
2771 host_md->msg_ctime = tswapal(target_md->msg_ctime);
2772 host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
2773 host_md->msg_qnum = tswapal(target_md->msg_qnum);
2774 host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
2775 host_md->msg_lspid = tswapal(target_md->msg_lspid);
2776 host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
2777 unlock_user_struct(target_md, target_addr, 0);
2781 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
2782 struct msqid_ds *host_md)
2784 struct target_msqid_ds *target_md;
2786 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
2787 return -TARGET_EFAULT;
2788 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
2789 return -TARGET_EFAULT;
2790 target_md->msg_stime = tswapal(host_md->msg_stime);
2791 target_md->msg_rtime = tswapal(host_md->msg_rtime);
2792 target_md->msg_ctime = tswapal(host_md->msg_ctime);
2793 target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
2794 target_md->msg_qnum = tswapal(host_md->msg_qnum);
2795 target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
2796 target_md->msg_lspid = tswapal(host_md->msg_lspid);
2797 target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
2798 unlock_user_struct(target_md, target_addr, 1);
2802 struct target_msginfo {
2810 unsigned short int msgseg;
2813 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
2814 struct msginfo *host_msginfo)
2816 struct target_msginfo *target_msginfo;
2817 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
2818 return -TARGET_EFAULT;
2819 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
2820 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
2821 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
2822 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
2823 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
2824 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
2825 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
2826 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
2827 unlock_user_struct(target_msginfo, target_addr, 1);
2831 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
2833 struct msqid_ds dsarg;
2834 struct msginfo msginfo;
2835 abi_long ret = -TARGET_EINVAL;
2843 if (target_to_host_msqid_ds(&dsarg,ptr))
2844 return -TARGET_EFAULT;
2845 ret = get_errno(msgctl(msgid, cmd, &dsarg));
2846 if (host_to_target_msqid_ds(ptr,&dsarg))
2847 return -TARGET_EFAULT;
2850 ret = get_errno(msgctl(msgid, cmd, NULL));
2854 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
2855 if (host_to_target_msginfo(ptr, &msginfo))
2856 return -TARGET_EFAULT;
2863 struct target_msgbuf {
2868 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
2869 unsigned int msgsz, int msgflg)
2871 struct target_msgbuf *target_mb;
2872 struct msgbuf *host_mb;
2875 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
2876 return -TARGET_EFAULT;
2877 host_mb = malloc(msgsz+sizeof(long));
2878 host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
2879 memcpy(host_mb->mtext, target_mb->mtext, msgsz);
2880 ret = get_errno(msgsnd(msqid, host_mb, msgsz, msgflg));
2882 unlock_user_struct(target_mb, msgp, 0);
2887 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
2888 unsigned int msgsz, abi_long msgtyp,
2891 struct target_msgbuf *target_mb;
2893 struct msgbuf *host_mb;
2896 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
2897 return -TARGET_EFAULT;
2899 host_mb = g_malloc(msgsz+sizeof(long));
2900 ret = get_errno(msgrcv(msqid, host_mb, msgsz, tswapal(msgtyp), msgflg));
2903 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
2904 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
2905 if (!target_mtext) {
2906 ret = -TARGET_EFAULT;
2909 memcpy(target_mb->mtext, host_mb->mtext, ret);
2910 unlock_user(target_mtext, target_mtext_addr, ret);
2913 target_mb->mtype = tswapal(host_mb->mtype);
2917 unlock_user_struct(target_mb, msgp, 1);
2922 struct target_shmid_ds
2924 struct target_ipc_perm shm_perm;
2925 abi_ulong shm_segsz;
2926 abi_ulong shm_atime;
2927 #if TARGET_ABI_BITS == 32
2928 abi_ulong __unused1;
2930 abi_ulong shm_dtime;
2931 #if TARGET_ABI_BITS == 32
2932 abi_ulong __unused2;
2934 abi_ulong shm_ctime;
2935 #if TARGET_ABI_BITS == 32
2936 abi_ulong __unused3;
2940 abi_ulong shm_nattch;
2941 unsigned long int __unused4;
2942 unsigned long int __unused5;
2945 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
2946 abi_ulong target_addr)
2948 struct target_shmid_ds *target_sd;
2950 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2951 return -TARGET_EFAULT;
2952 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
2953 return -TARGET_EFAULT;
2954 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
2955 __get_user(host_sd->shm_atime, &target_sd->shm_atime);
2956 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
2957 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
2958 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
2959 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
2960 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
2961 unlock_user_struct(target_sd, target_addr, 0);
2965 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
2966 struct shmid_ds *host_sd)
2968 struct target_shmid_ds *target_sd;
2970 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2971 return -TARGET_EFAULT;
2972 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
2973 return -TARGET_EFAULT;
2974 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
2975 __put_user(host_sd->shm_atime, &target_sd->shm_atime);
2976 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
2977 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
2978 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
2979 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
2980 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
2981 unlock_user_struct(target_sd, target_addr, 1);
2985 struct target_shminfo {
2993 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
2994 struct shminfo *host_shminfo)
2996 struct target_shminfo *target_shminfo;
2997 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
2998 return -TARGET_EFAULT;
2999 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
3000 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
3001 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
3002 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
3003 __put_user(host_shminfo->shmall, &target_shminfo->shmall);
3004 unlock_user_struct(target_shminfo, target_addr, 1);
3008 struct target_shm_info {
3013 abi_ulong swap_attempts;
3014 abi_ulong swap_successes;
3017 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
3018 struct shm_info *host_shm_info)
3020 struct target_shm_info *target_shm_info;
3021 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
3022 return -TARGET_EFAULT;
3023 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
3024 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
3025 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
3026 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
3027 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
3028 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
3029 unlock_user_struct(target_shm_info, target_addr, 1);
3033 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
3035 struct shmid_ds dsarg;
3036 struct shminfo shminfo;
3037 struct shm_info shm_info;
3038 abi_long ret = -TARGET_EINVAL;
3046 if (target_to_host_shmid_ds(&dsarg, buf))
3047 return -TARGET_EFAULT;
3048 ret = get_errno(shmctl(shmid, cmd, &dsarg));
3049 if (host_to_target_shmid_ds(buf, &dsarg))
3050 return -TARGET_EFAULT;
3053 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
3054 if (host_to_target_shminfo(buf, &shminfo))
3055 return -TARGET_EFAULT;
3058 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
3059 if (host_to_target_shm_info(buf, &shm_info))
3060 return -TARGET_EFAULT;
3065 ret = get_errno(shmctl(shmid, cmd, NULL));
3072 static inline abi_ulong do_shmat(int shmid, abi_ulong shmaddr, int shmflg)
3076 struct shmid_ds shm_info;
3079 /* find out the length of the shared memory segment */
3080 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
3081 if (is_error(ret)) {
3082 /* can't get length, bail out */
3089 host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
3091 abi_ulong mmap_start;
3093 mmap_start = mmap_find_vma(0, shm_info.shm_segsz);
3095 if (mmap_start == -1) {
3097 host_raddr = (void *)-1;
3099 host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
3102 if (host_raddr == (void *)-1) {
3104 return get_errno((long)host_raddr);
3106 raddr=h2g((unsigned long)host_raddr);
3108 page_set_flags(raddr, raddr + shm_info.shm_segsz,
3109 PAGE_VALID | PAGE_READ |
3110 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
3112 for (i = 0; i < N_SHM_REGIONS; i++) {
3113 if (shm_regions[i].start == 0) {
3114 shm_regions[i].start = raddr;
3115 shm_regions[i].size = shm_info.shm_segsz;
3125 static inline abi_long do_shmdt(abi_ulong shmaddr)
3129 for (i = 0; i < N_SHM_REGIONS; ++i) {
3130 if (shm_regions[i].start == shmaddr) {
3131 shm_regions[i].start = 0;
3132 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
3137 return get_errno(shmdt(g2h(shmaddr)));
3140 #ifdef TARGET_NR_ipc
3141 /* ??? This only works with linear mappings. */
3142 /* do_ipc() must return target values and target errnos. */
3143 static abi_long do_ipc(unsigned int call, int first,
3144 int second, int third,
3145 abi_long ptr, abi_long fifth)
3150 version = call >> 16;
3155 ret = do_semop(first, ptr, second);
3159 ret = get_errno(semget(first, second, third));
3163 ret = do_semctl(first, second, third, (union target_semun)(abi_ulong) ptr);
3167 ret = get_errno(msgget(first, second));
3171 ret = do_msgsnd(first, ptr, second, third);
3175 ret = do_msgctl(first, second, ptr);
3182 struct target_ipc_kludge {
3187 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
3188 ret = -TARGET_EFAULT;
3192 ret = do_msgrcv(first, tmp->msgp, second, tmp->msgtyp, third);
3194 unlock_user_struct(tmp, ptr, 0);
3198 ret = do_msgrcv(first, ptr, second, fifth, third);
3207 raddr = do_shmat(first, ptr, second);
3208 if (is_error(raddr))
3209 return get_errno(raddr);
3210 if (put_user_ual(raddr, third))
3211 return -TARGET_EFAULT;
3215 ret = -TARGET_EINVAL;
3220 ret = do_shmdt(ptr);
3224 /* IPC_* flag values are the same on all linux platforms */
3225 ret = get_errno(shmget(first, second, third));
3228 /* IPC_* and SHM_* command values are the same on all linux platforms */
3230 ret = do_shmctl(first, second, third);
3233 gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
3234 ret = -TARGET_ENOSYS;
3241 /* kernel structure types definitions */
3243 #define STRUCT(name, ...) STRUCT_ ## name,
3244 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
3246 #include "syscall_types.h"
3249 #undef STRUCT_SPECIAL
3251 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
3252 #define STRUCT_SPECIAL(name)
3253 #include "syscall_types.h"
3255 #undef STRUCT_SPECIAL
3257 typedef struct IOCTLEntry IOCTLEntry;
3259 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
3260 int fd, abi_long cmd, abi_long arg);
3263 unsigned int target_cmd;
3264 unsigned int host_cmd;
3267 do_ioctl_fn *do_ioctl;
3268 const argtype arg_type[5];
3271 #define IOC_R 0x0001
3272 #define IOC_W 0x0002
3273 #define IOC_RW (IOC_R | IOC_W)
3275 #define MAX_STRUCT_SIZE 4096
3277 #ifdef CONFIG_FIEMAP
3278 /* So fiemap access checks don't overflow on 32 bit systems.
3279 * This is very slightly smaller than the limit imposed by
3280 * the underlying kernel.
3282 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
3283 / sizeof(struct fiemap_extent))
3285 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
3286 int fd, abi_long cmd, abi_long arg)
3288 /* The parameter for this ioctl is a struct fiemap followed
3289 * by an array of struct fiemap_extent whose size is set
3290 * in fiemap->fm_extent_count. The array is filled in by the
3293 int target_size_in, target_size_out;
3295 const argtype *arg_type = ie->arg_type;
3296 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
3299 int i, extent_size = thunk_type_size(extent_arg_type, 0);
3303 assert(arg_type[0] == TYPE_PTR);
3304 assert(ie->access == IOC_RW);
3306 target_size_in = thunk_type_size(arg_type, 0);
3307 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
3309 return -TARGET_EFAULT;
3311 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3312 unlock_user(argptr, arg, 0);
3313 fm = (struct fiemap *)buf_temp;
3314 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
3315 return -TARGET_EINVAL;
3318 outbufsz = sizeof (*fm) +
3319 (sizeof(struct fiemap_extent) * fm->fm_extent_count);
3321 if (outbufsz > MAX_STRUCT_SIZE) {
3322 /* We can't fit all the extents into the fixed size buffer.
3323 * Allocate one that is large enough and use it instead.
3325 fm = malloc(outbufsz);
3327 return -TARGET_ENOMEM;
3329 memcpy(fm, buf_temp, sizeof(struct fiemap));
3332 ret = get_errno(ioctl(fd, ie->host_cmd, fm));
3333 if (!is_error(ret)) {
3334 target_size_out = target_size_in;
3335 /* An extent_count of 0 means we were only counting the extents
3336 * so there are no structs to copy
3338 if (fm->fm_extent_count != 0) {
3339 target_size_out += fm->fm_mapped_extents * extent_size;
3341 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
3343 ret = -TARGET_EFAULT;
3345 /* Convert the struct fiemap */
3346 thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
3347 if (fm->fm_extent_count != 0) {
3348 p = argptr + target_size_in;
3349 /* ...and then all the struct fiemap_extents */
3350 for (i = 0; i < fm->fm_mapped_extents; i++) {
3351 thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
3356 unlock_user(argptr, arg, target_size_out);
3366 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
3367 int fd, abi_long cmd, abi_long arg)
3369 const argtype *arg_type = ie->arg_type;
3373 struct ifconf *host_ifconf;
3375 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
3376 int target_ifreq_size;
3381 abi_long target_ifc_buf;
3385 assert(arg_type[0] == TYPE_PTR);
3386 assert(ie->access == IOC_RW);
3389 target_size = thunk_type_size(arg_type, 0);
3391 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3393 return -TARGET_EFAULT;
3394 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3395 unlock_user(argptr, arg, 0);
3397 host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
3398 target_ifc_len = host_ifconf->ifc_len;
3399 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
3401 target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
3402 nb_ifreq = target_ifc_len / target_ifreq_size;
3403 host_ifc_len = nb_ifreq * sizeof(struct ifreq);
3405 outbufsz = sizeof(*host_ifconf) + host_ifc_len;
3406 if (outbufsz > MAX_STRUCT_SIZE) {
3407 /* We can't fit all the extents into the fixed size buffer.
3408 * Allocate one that is large enough and use it instead.
3410 host_ifconf = malloc(outbufsz);
3412 return -TARGET_ENOMEM;
3414 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
3417 host_ifc_buf = (char*)host_ifconf + sizeof(*host_ifconf);
3419 host_ifconf->ifc_len = host_ifc_len;
3420 host_ifconf->ifc_buf = host_ifc_buf;
3422 ret = get_errno(ioctl(fd, ie->host_cmd, host_ifconf));
3423 if (!is_error(ret)) {
3424 /* convert host ifc_len to target ifc_len */
3426 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
3427 target_ifc_len = nb_ifreq * target_ifreq_size;
3428 host_ifconf->ifc_len = target_ifc_len;
3430 /* restore target ifc_buf */
3432 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
3434 /* copy struct ifconf to target user */
3436 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3438 return -TARGET_EFAULT;
3439 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
3440 unlock_user(argptr, arg, target_size);
3442 /* copy ifreq[] to target user */
3444 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
3445 for (i = 0; i < nb_ifreq ; i++) {
3446 thunk_convert(argptr + i * target_ifreq_size,
3447 host_ifc_buf + i * sizeof(struct ifreq),
3448 ifreq_arg_type, THUNK_TARGET);
3450 unlock_user(argptr, target_ifc_buf, target_ifc_len);
3460 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
3461 abi_long cmd, abi_long arg)
3464 struct dm_ioctl *host_dm;
3465 abi_long guest_data;
3466 uint32_t guest_data_size;
3468 const argtype *arg_type = ie->arg_type;
3470 void *big_buf = NULL;
3474 target_size = thunk_type_size(arg_type, 0);
3475 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3477 ret = -TARGET_EFAULT;
3480 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3481 unlock_user(argptr, arg, 0);
3483 /* buf_temp is too small, so fetch things into a bigger buffer */
3484 big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
3485 memcpy(big_buf, buf_temp, target_size);
3489 guest_data = arg + host_dm->data_start;
3490 if ((guest_data - arg) < 0) {
3494 guest_data_size = host_dm->data_size - host_dm->data_start;
3495 host_data = (char*)host_dm + host_dm->data_start;
3497 argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
3498 switch (ie->host_cmd) {
3500 case DM_LIST_DEVICES:
3503 case DM_DEV_SUSPEND:
3506 case DM_TABLE_STATUS:
3507 case DM_TABLE_CLEAR:
3509 case DM_LIST_VERSIONS:
3513 case DM_DEV_SET_GEOMETRY:
3514 /* data contains only strings */
3515 memcpy(host_data, argptr, guest_data_size);
3518 memcpy(host_data, argptr, guest_data_size);
3519 *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
3523 void *gspec = argptr;
3524 void *cur_data = host_data;
3525 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
3526 int spec_size = thunk_type_size(arg_type, 0);
3529 for (i = 0; i < host_dm->target_count; i++) {
3530 struct dm_target_spec *spec = cur_data;
3534 thunk_convert(spec, gspec, arg_type, THUNK_HOST);
3535 slen = strlen((char*)gspec + spec_size) + 1;
3537 spec->next = sizeof(*spec) + slen;
3538 strcpy((char*)&spec[1], gspec + spec_size);
3540 cur_data += spec->next;
3545 ret = -TARGET_EINVAL;
3548 unlock_user(argptr, guest_data, 0);
3550 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3551 if (!is_error(ret)) {
3552 guest_data = arg + host_dm->data_start;
3553 guest_data_size = host_dm->data_size - host_dm->data_start;
3554 argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
3555 switch (ie->host_cmd) {
3560 case DM_DEV_SUSPEND:
3563 case DM_TABLE_CLEAR:
3565 case DM_DEV_SET_GEOMETRY:
3566 /* no return data */
3568 case DM_LIST_DEVICES:
3570 struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
3571 uint32_t remaining_data = guest_data_size;
3572 void *cur_data = argptr;
3573 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
3574 int nl_size = 12; /* can't use thunk_size due to alignment */
3577 uint32_t next = nl->next;
3579 nl->next = nl_size + (strlen(nl->name) + 1);
3581 if (remaining_data < nl->next) {
3582 host_dm->flags |= DM_BUFFER_FULL_FLAG;
3585 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
3586 strcpy(cur_data + nl_size, nl->name);
3587 cur_data += nl->next;
3588 remaining_data -= nl->next;
3592 nl = (void*)nl + next;
3597 case DM_TABLE_STATUS:
3599 struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
3600 void *cur_data = argptr;
3601 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
3602 int spec_size = thunk_type_size(arg_type, 0);
3605 for (i = 0; i < host_dm->target_count; i++) {
3606 uint32_t next = spec->next;
3607 int slen = strlen((char*)&spec[1]) + 1;
3608 spec->next = (cur_data - argptr) + spec_size + slen;
3609 if (guest_data_size < spec->next) {
3610 host_dm->flags |= DM_BUFFER_FULL_FLAG;
3613 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
3614 strcpy(cur_data + spec_size, (char*)&spec[1]);
3615 cur_data = argptr + spec->next;
3616 spec = (void*)host_dm + host_dm->data_start + next;
3622 void *hdata = (void*)host_dm + host_dm->data_start;
3623 int count = *(uint32_t*)hdata;
3624 uint64_t *hdev = hdata + 8;
3625 uint64_t *gdev = argptr + 8;
3628 *(uint32_t*)argptr = tswap32(count);
3629 for (i = 0; i < count; i++) {
3630 *gdev = tswap64(*hdev);
3636 case DM_LIST_VERSIONS:
3638 struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
3639 uint32_t remaining_data = guest_data_size;
3640 void *cur_data = argptr;
3641 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
3642 int vers_size = thunk_type_size(arg_type, 0);
3645 uint32_t next = vers->next;
3647 vers->next = vers_size + (strlen(vers->name) + 1);
3649 if (remaining_data < vers->next) {
3650 host_dm->flags |= DM_BUFFER_FULL_FLAG;
3653 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
3654 strcpy(cur_data + vers_size, vers->name);
3655 cur_data += vers->next;
3656 remaining_data -= vers->next;
3660 vers = (void*)vers + next;
3665 ret = -TARGET_EINVAL;
3668 unlock_user(argptr, guest_data, guest_data_size);
3670 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3672 ret = -TARGET_EFAULT;
3675 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
3676 unlock_user(argptr, arg, target_size);
3683 static IOCTLEntry ioctl_entries[] = {
3684 #define IOCTL(cmd, access, ...) \
3685 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
3686 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
3687 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
3692 /* ??? Implement proper locking for ioctls. */
3693 /* do_ioctl() Must return target values and target errnos. */
3694 static abi_long do_ioctl(int fd, abi_long cmd, abi_long arg)
3696 const IOCTLEntry *ie;
3697 const argtype *arg_type;
3699 uint8_t buf_temp[MAX_STRUCT_SIZE];
3705 if (ie->target_cmd == 0) {
3706 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
3707 return -TARGET_ENOSYS;
3709 if (ie->target_cmd == cmd)
3713 arg_type = ie->arg_type;
3715 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd, ie->name);
3718 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
3721 switch(arg_type[0]) {
3724 ret = get_errno(ioctl(fd, ie->host_cmd));
3729 ret = get_errno(ioctl(fd, ie->host_cmd, arg));
3733 target_size = thunk_type_size(arg_type, 0);
3734 switch(ie->access) {
3736 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3737 if (!is_error(ret)) {
3738 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3740 return -TARGET_EFAULT;
3741 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
3742 unlock_user(argptr, arg, target_size);
3746 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3748 return -TARGET_EFAULT;
3749 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3750 unlock_user(argptr, arg, 0);
3751 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3755 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3757 return -TARGET_EFAULT;
3758 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3759 unlock_user(argptr, arg, 0);
3760 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3761 if (!is_error(ret)) {
3762 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3764 return -TARGET_EFAULT;
3765 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
3766 unlock_user(argptr, arg, target_size);
3772 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
3773 (long)cmd, arg_type[0]);
3774 ret = -TARGET_ENOSYS;
3780 static const bitmask_transtbl iflag_tbl[] = {
3781 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
3782 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
3783 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
3784 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
3785 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
3786 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
3787 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
3788 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
3789 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
3790 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
3791 { TARGET_IXON, TARGET_IXON, IXON, IXON },
3792 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
3793 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
3794 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
3798 static const bitmask_transtbl oflag_tbl[] = {
3799 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
3800 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
3801 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
3802 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
3803 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
3804 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
3805 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
3806 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
3807 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
3808 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
3809 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
3810 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
3811 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
3812 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
3813 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
3814 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
3815 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
3816 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
3817 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
3818 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
3819 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
3820 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
3821 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
3822 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
3826 static const bitmask_transtbl cflag_tbl[] = {
3827 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
3828 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
3829 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
3830 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
3831 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
3832 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
3833 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
3834 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
3835 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
3836 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
3837 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
3838 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
3839 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
3840 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
3841 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
3842 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
3843 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
3844 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
3845 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
3846 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
3847 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
3848 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
3849 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
3850 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
3851 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
3852 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
3853 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
3854 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
3855 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
3856 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
3857 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
3861 static const bitmask_transtbl lflag_tbl[] = {
3862 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
3863 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
3864 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
3865 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
3866 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
3867 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
3868 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
3869 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
3870 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
3871 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
3872 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
3873 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
3874 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
3875 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
3876 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
3880 static void target_to_host_termios (void *dst, const void *src)
3882 struct host_termios *host = dst;
3883 const struct target_termios *target = src;
3886 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
3888 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
3890 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
3892 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
3893 host->c_line = target->c_line;
3895 memset(host->c_cc, 0, sizeof(host->c_cc));
3896 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
3897 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
3898 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
3899 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
3900 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
3901 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
3902 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
3903 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
3904 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
3905 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
3906 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
3907 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
3908 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
3909 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
3910 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
3911 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
3912 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
3915 static void host_to_target_termios (void *dst, const void *src)
3917 struct target_termios *target = dst;
3918 const struct host_termios *host = src;
3921 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
3923 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
3925 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
3927 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
3928 target->c_line = host->c_line;
3930 memset(target->c_cc, 0, sizeof(target->c_cc));
3931 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
3932 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
3933 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
3934 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
3935 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
3936 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
3937 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
3938 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
3939 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
3940 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
3941 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
3942 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
3943 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
3944 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
3945 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
3946 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
3947 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
3950 static const StructEntry struct_termios_def = {
3951 .convert = { host_to_target_termios, target_to_host_termios },
3952 .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
3953 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
3956 static bitmask_transtbl mmap_flags_tbl[] = {
3957 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
3958 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
3959 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
3960 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS, MAP_ANONYMOUS, MAP_ANONYMOUS },
3961 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN, MAP_GROWSDOWN, MAP_GROWSDOWN },
3962 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE, MAP_DENYWRITE, MAP_DENYWRITE },
3963 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE, MAP_EXECUTABLE, MAP_EXECUTABLE },
3964 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
3968 #if defined(TARGET_I386)
3970 /* NOTE: there is really one LDT for all the threads */
3971 static uint8_t *ldt_table;
3973 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
3980 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
3981 if (size > bytecount)
3983 p = lock_user(VERIFY_WRITE, ptr, size, 0);
3985 return -TARGET_EFAULT;
3986 /* ??? Should this by byteswapped? */
3987 memcpy(p, ldt_table, size);
3988 unlock_user(p, ptr, size);
3992 /* XXX: add locking support */
3993 static abi_long write_ldt(CPUX86State *env,
3994 abi_ulong ptr, unsigned long bytecount, int oldmode)
3996 struct target_modify_ldt_ldt_s ldt_info;
3997 struct target_modify_ldt_ldt_s *target_ldt_info;
3998 int seg_32bit, contents, read_exec_only, limit_in_pages;
3999 int seg_not_present, useable, lm;
4000 uint32_t *lp, entry_1, entry_2;
4002 if (bytecount != sizeof(ldt_info))
4003 return -TARGET_EINVAL;
4004 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
4005 return -TARGET_EFAULT;
4006 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
4007 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
4008 ldt_info.limit = tswap32(target_ldt_info->limit);
4009 ldt_info.flags = tswap32(target_ldt_info->flags);
4010 unlock_user_struct(target_ldt_info, ptr, 0);
4012 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
4013 return -TARGET_EINVAL;
4014 seg_32bit = ldt_info.flags & 1;
4015 contents = (ldt_info.flags >> 1) & 3;
4016 read_exec_only = (ldt_info.flags >> 3) & 1;
4017 limit_in_pages = (ldt_info.flags >> 4) & 1;
4018 seg_not_present = (ldt_info.flags >> 5) & 1;
4019 useable = (ldt_info.flags >> 6) & 1;
4023 lm = (ldt_info.flags >> 7) & 1;
4025 if (contents == 3) {
4027 return -TARGET_EINVAL;
4028 if (seg_not_present == 0)
4029 return -TARGET_EINVAL;
4031 /* allocate the LDT */
4033 env->ldt.base = target_mmap(0,
4034 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
4035 PROT_READ|PROT_WRITE,
4036 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
4037 if (env->ldt.base == -1)
4038 return -TARGET_ENOMEM;
4039 memset(g2h(env->ldt.base), 0,
4040 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
4041 env->ldt.limit = 0xffff;
4042 ldt_table = g2h(env->ldt.base);
4045 /* NOTE: same code as Linux kernel */
4046 /* Allow LDTs to be cleared by the user. */
4047 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
4050 read_exec_only == 1 &&
4052 limit_in_pages == 0 &&
4053 seg_not_present == 1 &&
4061 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
4062 (ldt_info.limit & 0x0ffff);
4063 entry_2 = (ldt_info.base_addr & 0xff000000) |
4064 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
4065 (ldt_info.limit & 0xf0000) |
4066 ((read_exec_only ^ 1) << 9) |
4068 ((seg_not_present ^ 1) << 15) |
4070 (limit_in_pages << 23) |
4074 entry_2 |= (useable << 20);
4076 /* Install the new entry ... */
4078 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
4079 lp[0] = tswap32(entry_1);
4080 lp[1] = tswap32(entry_2);
4084 /* specific and weird i386 syscalls */
4085 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
4086 unsigned long bytecount)
4092 ret = read_ldt(ptr, bytecount);
4095 ret = write_ldt(env, ptr, bytecount, 1);
4098 ret = write_ldt(env, ptr, bytecount, 0);
4101 ret = -TARGET_ENOSYS;
4107 #if defined(TARGET_I386) && defined(TARGET_ABI32)
4108 static abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
4110 uint64_t *gdt_table = g2h(env->gdt.base);
4111 struct target_modify_ldt_ldt_s ldt_info;
4112 struct target_modify_ldt_ldt_s *target_ldt_info;
4113 int seg_32bit, contents, read_exec_only, limit_in_pages;
4114 int seg_not_present, useable, lm;
4115 uint32_t *lp, entry_1, entry_2;
4118 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
4119 if (!target_ldt_info)
4120 return -TARGET_EFAULT;
4121 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
4122 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
4123 ldt_info.limit = tswap32(target_ldt_info->limit);
4124 ldt_info.flags = tswap32(target_ldt_info->flags);
4125 if (ldt_info.entry_number == -1) {
4126 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
4127 if (gdt_table[i] == 0) {
4128 ldt_info.entry_number = i;
4129 target_ldt_info->entry_number = tswap32(i);
4134 unlock_user_struct(target_ldt_info, ptr, 1);
4136 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
4137 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
4138 return -TARGET_EINVAL;
4139 seg_32bit = ldt_info.flags & 1;
4140 contents = (ldt_info.flags >> 1) & 3;
4141 read_exec_only = (ldt_info.flags >> 3) & 1;
4142 limit_in_pages = (ldt_info.flags >> 4) & 1;
4143 seg_not_present = (ldt_info.flags >> 5) & 1;
4144 useable = (ldt_info.flags >> 6) & 1;
4148 lm = (ldt_info.flags >> 7) & 1;
4151 if (contents == 3) {
4152 if (seg_not_present == 0)
4153 return -TARGET_EINVAL;
4156 /* NOTE: same code as Linux kernel */
4157 /* Allow LDTs to be cleared by the user. */
4158 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
4159 if ((contents == 0 &&
4160 read_exec_only == 1 &&
4162 limit_in_pages == 0 &&
4163 seg_not_present == 1 &&
4171 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
4172 (ldt_info.limit & 0x0ffff);
4173 entry_2 = (ldt_info.base_addr & 0xff000000) |
4174 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
4175 (ldt_info.limit & 0xf0000) |
4176 ((read_exec_only ^ 1) << 9) |
4178 ((seg_not_present ^ 1) << 15) |
4180 (limit_in_pages << 23) |
4185 /* Install the new entry ... */
4187 lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
4188 lp[0] = tswap32(entry_1);
4189 lp[1] = tswap32(entry_2);
4193 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
4195 struct target_modify_ldt_ldt_s *target_ldt_info;
4196 uint64_t *gdt_table = g2h(env->gdt.base);
4197 uint32_t base_addr, limit, flags;
4198 int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
4199 int seg_not_present, useable, lm;
4200 uint32_t *lp, entry_1, entry_2;
4202 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
4203 if (!target_ldt_info)
4204 return -TARGET_EFAULT;
4205 idx = tswap32(target_ldt_info->entry_number);
4206 if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
4207 idx > TARGET_GDT_ENTRY_TLS_MAX) {
4208 unlock_user_struct(target_ldt_info, ptr, 1);
4209 return -TARGET_EINVAL;
4211 lp = (uint32_t *)(gdt_table + idx);
4212 entry_1 = tswap32(lp[0]);
4213 entry_2 = tswap32(lp[1]);
4215 read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
4216 contents = (entry_2 >> 10) & 3;
4217 seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
4218 seg_32bit = (entry_2 >> 22) & 1;
4219 limit_in_pages = (entry_2 >> 23) & 1;
4220 useable = (entry_2 >> 20) & 1;
4224 lm = (entry_2 >> 21) & 1;
4226 flags = (seg_32bit << 0) | (contents << 1) |
4227 (read_exec_only << 3) | (limit_in_pages << 4) |
4228 (seg_not_present << 5) | (useable << 6) | (lm << 7);
4229 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000);
4230 base_addr = (entry_1 >> 16) |
4231 (entry_2 & 0xff000000) |
4232 ((entry_2 & 0xff) << 16);
4233 target_ldt_info->base_addr = tswapal(base_addr);
4234 target_ldt_info->limit = tswap32(limit);
4235 target_ldt_info->flags = tswap32(flags);
4236 unlock_user_struct(target_ldt_info, ptr, 1);
4239 #endif /* TARGET_I386 && TARGET_ABI32 */
4241 #ifndef TARGET_ABI32
4242 static abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
4249 case TARGET_ARCH_SET_GS:
4250 case TARGET_ARCH_SET_FS:
4251 if (code == TARGET_ARCH_SET_GS)
4255 cpu_x86_load_seg(env, idx, 0);
4256 env->segs[idx].base = addr;
4258 case TARGET_ARCH_GET_GS:
4259 case TARGET_ARCH_GET_FS:
4260 if (code == TARGET_ARCH_GET_GS)
4264 val = env->segs[idx].base;
4265 if (put_user(val, addr, abi_ulong))
4266 ret = -TARGET_EFAULT;
4269 ret = -TARGET_EINVAL;
4276 #endif /* defined(TARGET_I386) */
4278 #define NEW_STACK_SIZE 0x40000
4280 #if defined(CONFIG_USE_NPTL)
4282 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
4285 pthread_mutex_t mutex;
4286 pthread_cond_t cond;
4289 abi_ulong child_tidptr;
4290 abi_ulong parent_tidptr;
4294 static void *clone_func(void *arg)
4296 new_thread_info *info = arg;
4302 ts = (TaskState *)thread_env->opaque;
4303 info->tid = gettid();
4304 env->host_tid = info->tid;
4306 if (info->child_tidptr)
4307 put_user_u32(info->tid, info->child_tidptr);
4308 if (info->parent_tidptr)
4309 put_user_u32(info->tid, info->parent_tidptr);
4310 /* Enable signals. */
4311 sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
4312 /* Signal to the parent that we're ready. */
4313 pthread_mutex_lock(&info->mutex);
4314 pthread_cond_broadcast(&info->cond);
4315 pthread_mutex_unlock(&info->mutex);
4316 /* Wait until the parent has finshed initializing the tls state. */
4317 pthread_mutex_lock(&clone_lock);
4318 pthread_mutex_unlock(&clone_lock);
4325 static int clone_func(void *arg)
4327 CPUArchState *env = arg;
4334 /* do_fork() Must return host values and target errnos (unlike most
4335 do_*() functions). */
4336 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
4337 abi_ulong parent_tidptr, target_ulong newtls,
4338 abi_ulong child_tidptr)
4342 CPUArchState *new_env;
4343 #if defined(CONFIG_USE_NPTL)
4344 unsigned int nptl_flags;
4350 /* Emulate vfork() with fork() */
4351 if (flags & CLONE_VFORK)
4352 flags &= ~(CLONE_VFORK | CLONE_VM);
4354 if (flags & CLONE_VM) {
4355 TaskState *parent_ts = (TaskState *)env->opaque;
4356 #if defined(CONFIG_USE_NPTL)
4357 new_thread_info info;
4358 pthread_attr_t attr;
4360 ts = g_malloc0(sizeof(TaskState));
4361 init_task_state(ts);
4362 /* we create a new CPU instance. */
4363 new_env = cpu_copy(env);
4364 #if defined(TARGET_I386) || defined(TARGET_SPARC) || defined(TARGET_PPC)
4365 cpu_reset(ENV_GET_CPU(new_env));
4367 /* Init regs that differ from the parent. */
4368 cpu_clone_regs(new_env, newsp);
4369 new_env->opaque = ts;
4370 ts->bprm = parent_ts->bprm;
4371 ts->info = parent_ts->info;
4372 #if defined(CONFIG_USE_NPTL)
4374 flags &= ~CLONE_NPTL_FLAGS2;
4376 if (nptl_flags & CLONE_CHILD_CLEARTID) {
4377 ts->child_tidptr = child_tidptr;
4380 if (nptl_flags & CLONE_SETTLS)
4381 cpu_set_tls (new_env, newtls);
4383 /* Grab a mutex so that thread setup appears atomic. */
4384 pthread_mutex_lock(&clone_lock);
4386 memset(&info, 0, sizeof(info));
4387 pthread_mutex_init(&info.mutex, NULL);
4388 pthread_mutex_lock(&info.mutex);
4389 pthread_cond_init(&info.cond, NULL);
4391 if (nptl_flags & CLONE_CHILD_SETTID)
4392 info.child_tidptr = child_tidptr;
4393 if (nptl_flags & CLONE_PARENT_SETTID)
4394 info.parent_tidptr = parent_tidptr;
4396 ret = pthread_attr_init(&attr);
4397 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
4398 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
4399 /* It is not safe to deliver signals until the child has finished
4400 initializing, so temporarily block all signals. */
4401 sigfillset(&sigmask);
4402 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
4404 ret = pthread_create(&info.thread, &attr, clone_func, &info);
4405 /* TODO: Free new CPU state if thread creation failed. */
4407 sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
4408 pthread_attr_destroy(&attr);
4410 /* Wait for the child to initialize. */
4411 pthread_cond_wait(&info.cond, &info.mutex);
4413 if (flags & CLONE_PARENT_SETTID)
4414 put_user_u32(ret, parent_tidptr);
4418 pthread_mutex_unlock(&info.mutex);
4419 pthread_cond_destroy(&info.cond);
4420 pthread_mutex_destroy(&info.mutex);
4421 pthread_mutex_unlock(&clone_lock);
4423 if (flags & CLONE_NPTL_FLAGS2)
4425 /* This is probably going to die very quickly, but do it anyway. */
4426 new_stack = g_malloc0 (NEW_STACK_SIZE);
4428 ret = __clone2(clone_func, new_stack, NEW_STACK_SIZE, flags, new_env);
4430 ret = clone(clone_func, new_stack + NEW_STACK_SIZE, flags, new_env);
4434 /* if no CLONE_VM, we consider it is a fork */
4435 if ((flags & ~(CSIGNAL | CLONE_NPTL_FLAGS2)) != 0)
4440 /* Child Process. */
4441 cpu_clone_regs(env, newsp);
4443 #if defined(CONFIG_USE_NPTL)
4444 /* There is a race condition here. The parent process could
4445 theoretically read the TID in the child process before the child
4446 tid is set. This would require using either ptrace
4447 (not implemented) or having *_tidptr to point at a shared memory
4448 mapping. We can't repeat the spinlock hack used above because
4449 the child process gets its own copy of the lock. */
4450 if (flags & CLONE_CHILD_SETTID)
4451 put_user_u32(gettid(), child_tidptr);
4452 if (flags & CLONE_PARENT_SETTID)
4453 put_user_u32(gettid(), parent_tidptr);
4454 ts = (TaskState *)env->opaque;
4455 if (flags & CLONE_SETTLS)
4456 cpu_set_tls (env, newtls);
4457 if (flags & CLONE_CHILD_CLEARTID)
4458 ts->child_tidptr = child_tidptr;
4467 /* warning : doesn't handle linux specific flags... */
4468 static int target_to_host_fcntl_cmd(int cmd)
4471 case TARGET_F_DUPFD:
4472 case TARGET_F_GETFD:
4473 case TARGET_F_SETFD:
4474 case TARGET_F_GETFL:
4475 case TARGET_F_SETFL:
4477 case TARGET_F_GETLK:
4479 case TARGET_F_SETLK:
4481 case TARGET_F_SETLKW:
4483 case TARGET_F_GETOWN:
4485 case TARGET_F_SETOWN:
4487 case TARGET_F_GETSIG:
4489 case TARGET_F_SETSIG:
4491 #if TARGET_ABI_BITS == 32
4492 case TARGET_F_GETLK64:
4494 case TARGET_F_SETLK64:
4496 case TARGET_F_SETLKW64:
4499 case TARGET_F_SETLEASE:
4501 case TARGET_F_GETLEASE:
4503 #ifdef F_DUPFD_CLOEXEC
4504 case TARGET_F_DUPFD_CLOEXEC:
4505 return F_DUPFD_CLOEXEC;
4507 case TARGET_F_NOTIFY:
4510 return -TARGET_EINVAL;
4512 return -TARGET_EINVAL;
4515 #define TRANSTBL_CONVERT(a) { -1, TARGET_##a, -1, a }
4516 static const bitmask_transtbl flock_tbl[] = {
4517 TRANSTBL_CONVERT(F_RDLCK),
4518 TRANSTBL_CONVERT(F_WRLCK),
4519 TRANSTBL_CONVERT(F_UNLCK),
4520 TRANSTBL_CONVERT(F_EXLCK),
4521 TRANSTBL_CONVERT(F_SHLCK),
4525 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
4528 struct target_flock *target_fl;
4529 struct flock64 fl64;
4530 struct target_flock64 *target_fl64;
4532 int host_cmd = target_to_host_fcntl_cmd(cmd);
4534 if (host_cmd == -TARGET_EINVAL)
4538 case TARGET_F_GETLK:
4539 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
4540 return -TARGET_EFAULT;
4542 target_to_host_bitmask(tswap16(target_fl->l_type), flock_tbl);
4543 fl.l_whence = tswap16(target_fl->l_whence);
4544 fl.l_start = tswapal(target_fl->l_start);
4545 fl.l_len = tswapal(target_fl->l_len);
4546 fl.l_pid = tswap32(target_fl->l_pid);
4547 unlock_user_struct(target_fl, arg, 0);
4548 ret = get_errno(fcntl(fd, host_cmd, &fl));
4550 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg, 0))
4551 return -TARGET_EFAULT;
4553 host_to_target_bitmask(tswap16(fl.l_type), flock_tbl);
4554 target_fl->l_whence = tswap16(fl.l_whence);
4555 target_fl->l_start = tswapal(fl.l_start);
4556 target_fl->l_len = tswapal(fl.l_len);
4557 target_fl->l_pid = tswap32(fl.l_pid);
4558 unlock_user_struct(target_fl, arg, 1);
4562 case TARGET_F_SETLK:
4563 case TARGET_F_SETLKW:
4564 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
4565 return -TARGET_EFAULT;
4567 target_to_host_bitmask(tswap16(target_fl->l_type), flock_tbl);
4568 fl.l_whence = tswap16(target_fl->l_whence);
4569 fl.l_start = tswapal(target_fl->l_start);
4570 fl.l_len = tswapal(target_fl->l_len);
4571 fl.l_pid = tswap32(target_fl->l_pid);
4572 unlock_user_struct(target_fl, arg, 0);
4573 ret = get_errno(fcntl(fd, host_cmd, &fl));
4576 case TARGET_F_GETLK64:
4577 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
4578 return -TARGET_EFAULT;
4580 target_to_host_bitmask(tswap16(target_fl64->l_type), flock_tbl) >> 1;
4581 fl64.l_whence = tswap16(target_fl64->l_whence);
4582 fl64.l_start = tswap64(target_fl64->l_start);
4583 fl64.l_len = tswap64(target_fl64->l_len);
4584 fl64.l_pid = tswap32(target_fl64->l_pid);
4585 unlock_user_struct(target_fl64, arg, 0);
4586 ret = get_errno(fcntl(fd, host_cmd, &fl64));
4588 if (!lock_user_struct(VERIFY_WRITE, target_fl64, arg, 0))
4589 return -TARGET_EFAULT;
4590 target_fl64->l_type =
4591 host_to_target_bitmask(tswap16(fl64.l_type), flock_tbl) >> 1;
4592 target_fl64->l_whence = tswap16(fl64.l_whence);
4593 target_fl64->l_start = tswap64(fl64.l_start);
4594 target_fl64->l_len = tswap64(fl64.l_len);
4595 target_fl64->l_pid = tswap32(fl64.l_pid);
4596 unlock_user_struct(target_fl64, arg, 1);
4599 case TARGET_F_SETLK64:
4600 case TARGET_F_SETLKW64:
4601 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
4602 return -TARGET_EFAULT;
4604 target_to_host_bitmask(tswap16(target_fl64->l_type), flock_tbl) >> 1;
4605 fl64.l_whence = tswap16(target_fl64->l_whence);
4606 fl64.l_start = tswap64(target_fl64->l_start);
4607 fl64.l_len = tswap64(target_fl64->l_len);
4608 fl64.l_pid = tswap32(target_fl64->l_pid);
4609 unlock_user_struct(target_fl64, arg, 0);
4610 ret = get_errno(fcntl(fd, host_cmd, &fl64));
4613 case TARGET_F_GETFL:
4614 ret = get_errno(fcntl(fd, host_cmd, arg));
4616 ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
4620 case TARGET_F_SETFL:
4621 ret = get_errno(fcntl(fd, host_cmd, target_to_host_bitmask(arg, fcntl_flags_tbl)));
4624 case TARGET_F_SETOWN:
4625 case TARGET_F_GETOWN:
4626 case TARGET_F_SETSIG:
4627 case TARGET_F_GETSIG:
4628 case TARGET_F_SETLEASE:
4629 case TARGET_F_GETLEASE:
4630 ret = get_errno(fcntl(fd, host_cmd, arg));
4634 ret = get_errno(fcntl(fd, cmd, arg));
4642 static inline int high2lowuid(int uid)
4650 static inline int high2lowgid(int gid)
4658 static inline int low2highuid(int uid)
4660 if ((int16_t)uid == -1)
4666 static inline int low2highgid(int gid)
4668 if ((int16_t)gid == -1)
4673 static inline int tswapid(int id)
4677 #else /* !USE_UID16 */
4678 static inline int high2lowuid(int uid)
4682 static inline int high2lowgid(int gid)
4686 static inline int low2highuid(int uid)
4690 static inline int low2highgid(int gid)
4694 static inline int tswapid(int id)
4698 #endif /* USE_UID16 */
4700 void syscall_init(void)
4703 const argtype *arg_type;
4707 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
4708 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
4709 #include "syscall_types.h"
4711 #undef STRUCT_SPECIAL
4713 /* Build target_to_host_errno_table[] table from
4714 * host_to_target_errno_table[]. */
4715 for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
4716 target_to_host_errno_table[host_to_target_errno_table[i]] = i;
4719 /* we patch the ioctl size if necessary. We rely on the fact that
4720 no ioctl has all the bits at '1' in the size field */
4722 while (ie->target_cmd != 0) {
4723 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
4724 TARGET_IOC_SIZEMASK) {
4725 arg_type = ie->arg_type;
4726 if (arg_type[0] != TYPE_PTR) {
4727 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
4732 size = thunk_type_size(arg_type, 0);
4733 ie->target_cmd = (ie->target_cmd &
4734 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
4735 (size << TARGET_IOC_SIZESHIFT);
4738 /* automatic consistency check if same arch */
4739 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
4740 (defined(__x86_64__) && defined(TARGET_X86_64))
4741 if (unlikely(ie->target_cmd != ie->host_cmd)) {
4742 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
4743 ie->name, ie->target_cmd, ie->host_cmd);
4750 #if TARGET_ABI_BITS == 32
4751 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
4753 #ifdef TARGET_WORDS_BIGENDIAN
4754 return ((uint64_t)word0 << 32) | word1;
4756 return ((uint64_t)word1 << 32) | word0;
4759 #else /* TARGET_ABI_BITS == 32 */
4760 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
4764 #endif /* TARGET_ABI_BITS != 32 */
4766 #ifdef TARGET_NR_truncate64
4767 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
4772 if (regpairs_aligned(cpu_env)) {
4776 return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
4780 #ifdef TARGET_NR_ftruncate64
4781 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
4786 if (regpairs_aligned(cpu_env)) {
4790 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
4794 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
4795 abi_ulong target_addr)
4797 struct target_timespec *target_ts;
4799 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1))
4800 return -TARGET_EFAULT;
4801 host_ts->tv_sec = tswapal(target_ts->tv_sec);
4802 host_ts->tv_nsec = tswapal(target_ts->tv_nsec);
4803 unlock_user_struct(target_ts, target_addr, 0);
4807 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
4808 struct timespec *host_ts)
4810 struct target_timespec *target_ts;
4812 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0))
4813 return -TARGET_EFAULT;
4814 target_ts->tv_sec = tswapal(host_ts->tv_sec);
4815 target_ts->tv_nsec = tswapal(host_ts->tv_nsec);
4816 unlock_user_struct(target_ts, target_addr, 1);
4820 #if defined(TARGET_NR_stat64) || defined(TARGET_NR_newfstatat)
4821 static inline abi_long host_to_target_stat64(void *cpu_env,
4822 abi_ulong target_addr,
4823 struct stat *host_st)
4826 if (((CPUARMState *)cpu_env)->eabi) {
4827 struct target_eabi_stat64 *target_st;
4829 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
4830 return -TARGET_EFAULT;
4831 memset(target_st, 0, sizeof(struct target_eabi_stat64));
4832 __put_user(host_st->st_dev, &target_st->st_dev);
4833 __put_user(host_st->st_ino, &target_st->st_ino);
4834 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
4835 __put_user(host_st->st_ino, &target_st->__st_ino);
4837 __put_user(host_st->st_mode, &target_st->st_mode);
4838 __put_user(host_st->st_nlink, &target_st->st_nlink);
4839 __put_user(host_st->st_uid, &target_st->st_uid);
4840 __put_user(host_st->st_gid, &target_st->st_gid);
4841 __put_user(host_st->st_rdev, &target_st->st_rdev);
4842 __put_user(host_st->st_size, &target_st->st_size);
4843 __put_user(host_st->st_blksize, &target_st->st_blksize);
4844 __put_user(host_st->st_blocks, &target_st->st_blocks);
4845 __put_user(host_st->st_atime, &target_st->target_st_atime);
4846 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
4847 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
4848 unlock_user_struct(target_st, target_addr, 1);
4852 #if TARGET_ABI_BITS == 64 && !defined(TARGET_ALPHA)
4853 struct target_stat *target_st;
4855 struct target_stat64 *target_st;
4858 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
4859 return -TARGET_EFAULT;
4860 memset(target_st, 0, sizeof(*target_st));
4861 __put_user(host_st->st_dev, &target_st->st_dev);
4862 __put_user(host_st->st_ino, &target_st->st_ino);
4863 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
4864 __put_user(host_st->st_ino, &target_st->__st_ino);
4866 __put_user(host_st->st_mode, &target_st->st_mode);
4867 __put_user(host_st->st_nlink, &target_st->st_nlink);
4868 __put_user(host_st->st_uid, &target_st->st_uid);
4869 __put_user(host_st->st_gid, &target_st->st_gid);
4870 __put_user(host_st->st_rdev, &target_st->st_rdev);
4871 /* XXX: better use of kernel struct */
4872 __put_user(host_st->st_size, &target_st->st_size);
4873 __put_user(host_st->st_blksize, &target_st->st_blksize);
4874 __put_user(host_st->st_blocks, &target_st->st_blocks);
4875 __put_user(host_st->st_atime, &target_st->target_st_atime);
4876 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
4877 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
4878 unlock_user_struct(target_st, target_addr, 1);
4885 #if defined(CONFIG_USE_NPTL)
4886 /* ??? Using host futex calls even when target atomic operations
4887 are not really atomic probably breaks things. However implementing
4888 futexes locally would make futexes shared between multiple processes
4889 tricky. However they're probably useless because guest atomic
4890 operations won't work either. */
4891 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
4892 target_ulong uaddr2, int val3)
4894 struct timespec ts, *pts;
4897 /* ??? We assume FUTEX_* constants are the same on both host
4899 #ifdef FUTEX_CMD_MASK
4900 base_op = op & FUTEX_CMD_MASK;
4908 target_to_host_timespec(pts, timeout);
4912 return get_errno(sys_futex(g2h(uaddr), op, tswap32(val),
4915 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0));
4917 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0));
4919 case FUTEX_CMP_REQUEUE:
4921 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
4922 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
4923 But the prototype takes a `struct timespec *'; insert casts
4924 to satisfy the compiler. We do not need to tswap TIMEOUT
4925 since it's not compared to guest memory. */
4926 pts = (struct timespec *)(uintptr_t) timeout;
4927 return get_errno(sys_futex(g2h(uaddr), op, val, pts,
4929 (base_op == FUTEX_CMP_REQUEUE
4933 return -TARGET_ENOSYS;
4938 /* Map host to target signal numbers for the wait family of syscalls.
4939 Assume all other status bits are the same. */
4940 int host_to_target_waitstatus(int status)
4942 if (WIFSIGNALED(status)) {
4943 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
4945 if (WIFSTOPPED(status)) {
4946 return (host_to_target_signal(WSTOPSIG(status)) << 8)
4952 int get_osversion(void)
4954 static int osversion;
4955 struct new_utsname buf;
4960 if (qemu_uname_release && *qemu_uname_release) {
4961 s = qemu_uname_release;
4963 if (sys_uname(&buf))
4968 for (i = 0; i < 3; i++) {
4970 while (*s >= '0' && *s <= '9') {
4975 tmp = (tmp << 8) + n;
4984 static int open_self_maps(void *cpu_env, int fd)
4986 #if defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_UNICORE32)
4987 TaskState *ts = ((CPUArchState *)cpu_env)->opaque;
4994 fp = fopen("/proc/self/maps", "r");
4999 while ((read = getline(&line, &len, fp)) != -1) {
5000 int fields, dev_maj, dev_min, inode;
5001 uint64_t min, max, offset;
5002 char flag_r, flag_w, flag_x, flag_p;
5003 char path[512] = "";
5004 fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d"
5005 " %512s", &min, &max, &flag_r, &flag_w, &flag_x,
5006 &flag_p, &offset, &dev_maj, &dev_min, &inode, path);
5008 if ((fields < 10) || (fields > 11)) {
5011 if (!strncmp(path, "[stack]", 7)) {
5014 if (h2g_valid(min) && h2g_valid(max)) {
5015 dprintf(fd, TARGET_ABI_FMT_lx "-" TARGET_ABI_FMT_lx
5016 " %c%c%c%c %08" PRIx64 " %02x:%02x %d%s%s\n",
5017 h2g(min), h2g(max), flag_r, flag_w,
5018 flag_x, flag_p, offset, dev_maj, dev_min, inode,
5019 path[0] ? " " : "", path);
5026 #if defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_UNICORE32)
5027 dprintf(fd, "%08llx-%08llx rw-p %08llx 00:00 0 [stack]\n",
5028 (unsigned long long)ts->info->stack_limit,
5029 (unsigned long long)(ts->info->start_stack +
5030 (TARGET_PAGE_SIZE - 1)) & TARGET_PAGE_MASK,
5031 (unsigned long long)0);
5037 static int open_self_stat(void *cpu_env, int fd)
5039 TaskState *ts = ((CPUArchState *)cpu_env)->opaque;
5040 abi_ulong start_stack = ts->info->start_stack;
5043 for (i = 0; i < 44; i++) {
5051 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
5052 } else if (i == 1) {
5054 snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]);
5055 } else if (i == 27) {
5058 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
5060 /* for the rest, there is MasterCard */
5061 snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' ');
5065 if (write(fd, buf, len) != len) {
5073 static int open_self_auxv(void *cpu_env, int fd)
5075 TaskState *ts = ((CPUArchState *)cpu_env)->opaque;
5076 abi_ulong auxv = ts->info->saved_auxv;
5077 abi_ulong len = ts->info->auxv_len;
5081 * Auxiliary vector is stored in target process stack.
5082 * read in whole auxv vector and copy it to file
5084 ptr = lock_user(VERIFY_READ, auxv, len, 0);
5088 r = write(fd, ptr, len);
5095 lseek(fd, 0, SEEK_SET);
5096 unlock_user(ptr, auxv, len);
5102 static int do_open(void *cpu_env, const char *pathname, int flags, mode_t mode)
5105 const char *filename;
5106 int (*fill)(void *cpu_env, int fd);
5108 const struct fake_open *fake_open;
5109 static const struct fake_open fakes[] = {
5110 { "/proc/self/maps", open_self_maps },
5111 { "/proc/self/stat", open_self_stat },
5112 { "/proc/self/auxv", open_self_auxv },
5116 for (fake_open = fakes; fake_open->filename; fake_open++) {
5117 if (!strncmp(pathname, fake_open->filename,
5118 strlen(fake_open->filename))) {
5123 if (fake_open->filename) {
5125 char filename[PATH_MAX];
5128 /* create temporary file to map stat to */
5129 tmpdir = getenv("TMPDIR");
5132 snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
5133 fd = mkstemp(filename);
5139 if ((r = fake_open->fill(cpu_env, fd))) {
5143 lseek(fd, 0, SEEK_SET);
5148 return get_errno(open(path(pathname), flags, mode));
5151 /* do_syscall() should always have a single exit point at the end so
5152 that actions, such as logging of syscall results, can be performed.
5153 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
5154 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
5155 abi_long arg2, abi_long arg3, abi_long arg4,
5156 abi_long arg5, abi_long arg6, abi_long arg7,
5165 gemu_log("syscall %d", num);
5168 print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
5171 case TARGET_NR_exit:
5172 #ifdef CONFIG_USE_NPTL
5173 /* In old applications this may be used to implement _exit(2).
5174 However in threaded applictions it is used for thread termination,
5175 and _exit_group is used for application termination.
5176 Do thread termination if we have more then one thread. */
5177 /* FIXME: This probably breaks if a signal arrives. We should probably
5178 be disabling signals. */
5179 if (first_cpu->next_cpu) {
5181 CPUArchState **lastp;
5187 while (p && p != (CPUArchState *)cpu_env) {
5188 lastp = &p->next_cpu;
5191 /* If we didn't find the CPU for this thread then something is
5195 /* Remove the CPU from the list. */
5196 *lastp = p->next_cpu;
5198 ts = ((CPUArchState *)cpu_env)->opaque;
5199 if (ts->child_tidptr) {
5200 put_user_u32(0, ts->child_tidptr);
5201 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
5205 object_delete(OBJECT(ENV_GET_CPU(cpu_env)));
5213 gdb_exit(cpu_env, arg1);
5215 ret = 0; /* avoid warning */
5217 case TARGET_NR_read:
5221 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
5223 ret = get_errno(read(arg1, p, arg3));
5224 unlock_user(p, arg2, ret);
5227 case TARGET_NR_write:
5228 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
5230 ret = get_errno(write(arg1, p, arg3));
5231 unlock_user(p, arg2, 0);
5233 case TARGET_NR_open:
5234 if (!(p = lock_user_string(arg1)))
5236 ret = get_errno(do_open(cpu_env, p,
5237 target_to_host_bitmask(arg2, fcntl_flags_tbl),
5239 unlock_user(p, arg1, 0);
5241 #if defined(TARGET_NR_openat) && defined(__NR_openat)
5242 case TARGET_NR_openat:
5243 if (!(p = lock_user_string(arg2)))
5245 ret = get_errno(sys_openat(arg1,
5247 target_to_host_bitmask(arg3, fcntl_flags_tbl),
5249 unlock_user(p, arg2, 0);
5252 case TARGET_NR_close:
5253 ret = get_errno(close(arg1));
5258 case TARGET_NR_fork:
5259 ret = get_errno(do_fork(cpu_env, SIGCHLD, 0, 0, 0, 0));
5261 #ifdef TARGET_NR_waitpid
5262 case TARGET_NR_waitpid:
5265 ret = get_errno(waitpid(arg1, &status, arg3));
5266 if (!is_error(ret) && arg2 && ret
5267 && put_user_s32(host_to_target_waitstatus(status), arg2))
5272 #ifdef TARGET_NR_waitid
5273 case TARGET_NR_waitid:
5277 ret = get_errno(waitid(arg1, arg2, &info, arg4));
5278 if (!is_error(ret) && arg3 && info.si_pid != 0) {
5279 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
5281 host_to_target_siginfo(p, &info);
5282 unlock_user(p, arg3, sizeof(target_siginfo_t));
5287 #ifdef TARGET_NR_creat /* not on alpha */
5288 case TARGET_NR_creat:
5289 if (!(p = lock_user_string(arg1)))
5291 ret = get_errno(creat(p, arg2));
5292 unlock_user(p, arg1, 0);
5295 case TARGET_NR_link:
5298 p = lock_user_string(arg1);
5299 p2 = lock_user_string(arg2);
5301 ret = -TARGET_EFAULT;
5303 ret = get_errno(link(p, p2));
5304 unlock_user(p2, arg2, 0);
5305 unlock_user(p, arg1, 0);
5308 #if defined(TARGET_NR_linkat) && defined(__NR_linkat)
5309 case TARGET_NR_linkat:
5314 p = lock_user_string(arg2);
5315 p2 = lock_user_string(arg4);
5317 ret = -TARGET_EFAULT;
5319 ret = get_errno(sys_linkat(arg1, p, arg3, p2, arg5));
5320 unlock_user(p, arg2, 0);
5321 unlock_user(p2, arg4, 0);
5325 case TARGET_NR_unlink:
5326 if (!(p = lock_user_string(arg1)))
5328 ret = get_errno(unlink(p));
5329 unlock_user(p, arg1, 0);
5331 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat)
5332 case TARGET_NR_unlinkat:
5333 if (!(p = lock_user_string(arg2)))
5335 ret = get_errno(sys_unlinkat(arg1, p, arg3));
5336 unlock_user(p, arg2, 0);
5339 case TARGET_NR_execve:
5341 char **argp, **envp;
5344 abi_ulong guest_argp;
5345 abi_ulong guest_envp;
5352 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
5353 if (get_user_ual(addr, gp))
5361 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
5362 if (get_user_ual(addr, gp))
5369 argp = alloca((argc + 1) * sizeof(void *));
5370 envp = alloca((envc + 1) * sizeof(void *));
5372 for (gp = guest_argp, q = argp; gp;
5373 gp += sizeof(abi_ulong), q++) {
5374 if (get_user_ual(addr, gp))
5378 if (!(*q = lock_user_string(addr)))
5380 total_size += strlen(*q) + 1;
5384 for (gp = guest_envp, q = envp; gp;
5385 gp += sizeof(abi_ulong), q++) {
5386 if (get_user_ual(addr, gp))
5390 if (!(*q = lock_user_string(addr)))
5392 total_size += strlen(*q) + 1;
5396 /* This case will not be caught by the host's execve() if its
5397 page size is bigger than the target's. */
5398 if (total_size > MAX_ARG_PAGES * TARGET_PAGE_SIZE) {
5399 ret = -TARGET_E2BIG;
5402 if (!(p = lock_user_string(arg1)))
5404 ret = get_errno(execve(p, argp, envp));
5405 unlock_user(p, arg1, 0);
5410 ret = -TARGET_EFAULT;
5413 for (gp = guest_argp, q = argp; *q;
5414 gp += sizeof(abi_ulong), q++) {
5415 if (get_user_ual(addr, gp)
5418 unlock_user(*q, addr, 0);
5420 for (gp = guest_envp, q = envp; *q;
5421 gp += sizeof(abi_ulong), q++) {
5422 if (get_user_ual(addr, gp)
5425 unlock_user(*q, addr, 0);
5429 case TARGET_NR_chdir:
5430 if (!(p = lock_user_string(arg1)))
5432 ret = get_errno(chdir(p));
5433 unlock_user(p, arg1, 0);
5435 #ifdef TARGET_NR_time
5436 case TARGET_NR_time:
5439 ret = get_errno(time(&host_time));
5442 && put_user_sal(host_time, arg1))
5447 case TARGET_NR_mknod:
5448 if (!(p = lock_user_string(arg1)))
5450 ret = get_errno(mknod(p, arg2, arg3));
5451 unlock_user(p, arg1, 0);
5453 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat)
5454 case TARGET_NR_mknodat:
5455 if (!(p = lock_user_string(arg2)))
5457 ret = get_errno(sys_mknodat(arg1, p, arg3, arg4));
5458 unlock_user(p, arg2, 0);
5461 case TARGET_NR_chmod:
5462 if (!(p = lock_user_string(arg1)))
5464 ret = get_errno(chmod(p, arg2));
5465 unlock_user(p, arg1, 0);
5467 #ifdef TARGET_NR_break
5468 case TARGET_NR_break:
5471 #ifdef TARGET_NR_oldstat
5472 case TARGET_NR_oldstat:
5475 case TARGET_NR_lseek:
5476 ret = get_errno(lseek(arg1, arg2, arg3));
5478 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
5479 /* Alpha specific */
5480 case TARGET_NR_getxpid:
5481 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
5482 ret = get_errno(getpid());
5485 #ifdef TARGET_NR_getpid
5486 case TARGET_NR_getpid:
5487 ret = get_errno(getpid());
5490 case TARGET_NR_mount:
5492 /* need to look at the data field */
5494 p = lock_user_string(arg1);
5495 p2 = lock_user_string(arg2);
5496 p3 = lock_user_string(arg3);
5497 if (!p || !p2 || !p3)
5498 ret = -TARGET_EFAULT;
5500 /* FIXME - arg5 should be locked, but it isn't clear how to
5501 * do that since it's not guaranteed to be a NULL-terminated
5505 ret = get_errno(mount(p, p2, p3, (unsigned long)arg4, NULL));
5507 ret = get_errno(mount(p, p2, p3, (unsigned long)arg4, g2h(arg5)));
5509 unlock_user(p, arg1, 0);
5510 unlock_user(p2, arg2, 0);
5511 unlock_user(p3, arg3, 0);
5514 #ifdef TARGET_NR_umount
5515 case TARGET_NR_umount:
5516 if (!(p = lock_user_string(arg1)))
5518 ret = get_errno(umount(p));
5519 unlock_user(p, arg1, 0);
5522 #ifdef TARGET_NR_stime /* not on alpha */
5523 case TARGET_NR_stime:
5526 if (get_user_sal(host_time, arg1))
5528 ret = get_errno(stime(&host_time));
5532 case TARGET_NR_ptrace:
5534 #ifdef TARGET_NR_alarm /* not on alpha */
5535 case TARGET_NR_alarm:
5539 #ifdef TARGET_NR_oldfstat
5540 case TARGET_NR_oldfstat:
5543 #ifdef TARGET_NR_pause /* not on alpha */
5544 case TARGET_NR_pause:
5545 ret = get_errno(pause());
5548 #ifdef TARGET_NR_utime
5549 case TARGET_NR_utime:
5551 struct utimbuf tbuf, *host_tbuf;
5552 struct target_utimbuf *target_tbuf;
5554 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
5556 tbuf.actime = tswapal(target_tbuf->actime);
5557 tbuf.modtime = tswapal(target_tbuf->modtime);
5558 unlock_user_struct(target_tbuf, arg2, 0);
5563 if (!(p = lock_user_string(arg1)))
5565 ret = get_errno(utime(p, host_tbuf));
5566 unlock_user(p, arg1, 0);
5570 case TARGET_NR_utimes:
5572 struct timeval *tvp, tv[2];
5574 if (copy_from_user_timeval(&tv[0], arg2)
5575 || copy_from_user_timeval(&tv[1],
5576 arg2 + sizeof(struct target_timeval)))
5582 if (!(p = lock_user_string(arg1)))
5584 ret = get_errno(utimes(p, tvp));
5585 unlock_user(p, arg1, 0);
5588 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat)
5589 case TARGET_NR_futimesat:
5591 struct timeval *tvp, tv[2];
5593 if (copy_from_user_timeval(&tv[0], arg3)
5594 || copy_from_user_timeval(&tv[1],
5595 arg3 + sizeof(struct target_timeval)))
5601 if (!(p = lock_user_string(arg2)))
5603 ret = get_errno(sys_futimesat(arg1, path(p), tvp));
5604 unlock_user(p, arg2, 0);
5608 #ifdef TARGET_NR_stty
5609 case TARGET_NR_stty:
5612 #ifdef TARGET_NR_gtty
5613 case TARGET_NR_gtty:
5616 case TARGET_NR_access:
5617 if (!(p = lock_user_string(arg1)))
5619 ret = get_errno(access(path(p), arg2));
5620 unlock_user(p, arg1, 0);
5622 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
5623 case TARGET_NR_faccessat:
5624 if (!(p = lock_user_string(arg2)))
5626 ret = get_errno(sys_faccessat(arg1, p, arg3));
5627 unlock_user(p, arg2, 0);
5630 #ifdef TARGET_NR_nice /* not on alpha */
5631 case TARGET_NR_nice:
5632 ret = get_errno(nice(arg1));
5635 #ifdef TARGET_NR_ftime
5636 case TARGET_NR_ftime:
5639 case TARGET_NR_sync:
5643 case TARGET_NR_kill:
5644 ret = get_errno(kill(arg1, target_to_host_signal(arg2)));
5646 case TARGET_NR_rename:
5649 p = lock_user_string(arg1);
5650 p2 = lock_user_string(arg2);
5652 ret = -TARGET_EFAULT;
5654 ret = get_errno(rename(p, p2));
5655 unlock_user(p2, arg2, 0);
5656 unlock_user(p, arg1, 0);
5659 #if defined(TARGET_NR_renameat) && defined(__NR_renameat)
5660 case TARGET_NR_renameat:
5663 p = lock_user_string(arg2);
5664 p2 = lock_user_string(arg4);
5666 ret = -TARGET_EFAULT;
5668 ret = get_errno(sys_renameat(arg1, p, arg3, p2));
5669 unlock_user(p2, arg4, 0);
5670 unlock_user(p, arg2, 0);
5674 case TARGET_NR_mkdir:
5675 if (!(p = lock_user_string(arg1)))
5677 ret = get_errno(mkdir(p, arg2));
5678 unlock_user(p, arg1, 0);
5680 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat)
5681 case TARGET_NR_mkdirat:
5682 if (!(p = lock_user_string(arg2)))
5684 ret = get_errno(sys_mkdirat(arg1, p, arg3));
5685 unlock_user(p, arg2, 0);
5688 case TARGET_NR_rmdir:
5689 if (!(p = lock_user_string(arg1)))
5691 ret = get_errno(rmdir(p));
5692 unlock_user(p, arg1, 0);
5695 ret = get_errno(dup(arg1));
5697 case TARGET_NR_pipe:
5698 ret = do_pipe(cpu_env, arg1, 0, 0);
5700 #ifdef TARGET_NR_pipe2
5701 case TARGET_NR_pipe2:
5702 ret = do_pipe(cpu_env, arg1,
5703 target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
5706 case TARGET_NR_times:
5708 struct target_tms *tmsp;
5710 ret = get_errno(times(&tms));
5712 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
5715 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
5716 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
5717 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
5718 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
5721 ret = host_to_target_clock_t(ret);
5724 #ifdef TARGET_NR_prof
5725 case TARGET_NR_prof:
5728 #ifdef TARGET_NR_signal
5729 case TARGET_NR_signal:
5732 case TARGET_NR_acct:
5734 ret = get_errno(acct(NULL));
5736 if (!(p = lock_user_string(arg1)))
5738 ret = get_errno(acct(path(p)));
5739 unlock_user(p, arg1, 0);
5742 #ifdef TARGET_NR_umount2 /* not on alpha */
5743 case TARGET_NR_umount2:
5744 if (!(p = lock_user_string(arg1)))
5746 ret = get_errno(umount2(p, arg2));
5747 unlock_user(p, arg1, 0);
5750 #ifdef TARGET_NR_lock
5751 case TARGET_NR_lock:
5754 case TARGET_NR_ioctl:
5755 ret = do_ioctl(arg1, arg2, arg3);
5757 case TARGET_NR_fcntl:
5758 ret = do_fcntl(arg1, arg2, arg3);
5760 #ifdef TARGET_NR_mpx
5764 case TARGET_NR_setpgid:
5765 ret = get_errno(setpgid(arg1, arg2));
5767 #ifdef TARGET_NR_ulimit
5768 case TARGET_NR_ulimit:
5771 #ifdef TARGET_NR_oldolduname
5772 case TARGET_NR_oldolduname:
5775 case TARGET_NR_umask:
5776 ret = get_errno(umask(arg1));
5778 case TARGET_NR_chroot:
5779 if (!(p = lock_user_string(arg1)))
5781 ret = get_errno(chroot(p));
5782 unlock_user(p, arg1, 0);
5784 case TARGET_NR_ustat:
5786 case TARGET_NR_dup2:
5787 ret = get_errno(dup2(arg1, arg2));
5789 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
5790 case TARGET_NR_dup3:
5791 ret = get_errno(dup3(arg1, arg2, arg3));
5794 #ifdef TARGET_NR_getppid /* not on alpha */
5795 case TARGET_NR_getppid:
5796 ret = get_errno(getppid());
5799 case TARGET_NR_getpgrp:
5800 ret = get_errno(getpgrp());
5802 case TARGET_NR_setsid:
5803 ret = get_errno(setsid());
5805 #ifdef TARGET_NR_sigaction
5806 case TARGET_NR_sigaction:
5808 #if defined(TARGET_ALPHA)
5809 struct target_sigaction act, oact, *pact = 0;
5810 struct target_old_sigaction *old_act;
5812 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
5814 act._sa_handler = old_act->_sa_handler;
5815 target_siginitset(&act.sa_mask, old_act->sa_mask);
5816 act.sa_flags = old_act->sa_flags;
5817 act.sa_restorer = 0;
5818 unlock_user_struct(old_act, arg2, 0);
5821 ret = get_errno(do_sigaction(arg1, pact, &oact));
5822 if (!is_error(ret) && arg3) {
5823 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
5825 old_act->_sa_handler = oact._sa_handler;
5826 old_act->sa_mask = oact.sa_mask.sig[0];
5827 old_act->sa_flags = oact.sa_flags;
5828 unlock_user_struct(old_act, arg3, 1);
5830 #elif defined(TARGET_MIPS)
5831 struct target_sigaction act, oact, *pact, *old_act;
5834 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
5836 act._sa_handler = old_act->_sa_handler;
5837 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
5838 act.sa_flags = old_act->sa_flags;
5839 unlock_user_struct(old_act, arg2, 0);
5845 ret = get_errno(do_sigaction(arg1, pact, &oact));
5847 if (!is_error(ret) && arg3) {
5848 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
5850 old_act->_sa_handler = oact._sa_handler;
5851 old_act->sa_flags = oact.sa_flags;
5852 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
5853 old_act->sa_mask.sig[1] = 0;
5854 old_act->sa_mask.sig[2] = 0;
5855 old_act->sa_mask.sig[3] = 0;
5856 unlock_user_struct(old_act, arg3, 1);
5859 struct target_old_sigaction *old_act;
5860 struct target_sigaction act, oact, *pact;
5862 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
5864 act._sa_handler = old_act->_sa_handler;
5865 target_siginitset(&act.sa_mask, old_act->sa_mask);
5866 act.sa_flags = old_act->sa_flags;
5867 act.sa_restorer = old_act->sa_restorer;
5868 unlock_user_struct(old_act, arg2, 0);
5873 ret = get_errno(do_sigaction(arg1, pact, &oact));
5874 if (!is_error(ret) && arg3) {
5875 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
5877 old_act->_sa_handler = oact._sa_handler;
5878 old_act->sa_mask = oact.sa_mask.sig[0];
5879 old_act->sa_flags = oact.sa_flags;
5880 old_act->sa_restorer = oact.sa_restorer;
5881 unlock_user_struct(old_act, arg3, 1);
5887 case TARGET_NR_rt_sigaction:
5889 #if defined(TARGET_ALPHA)
5890 struct target_sigaction act, oact, *pact = 0;
5891 struct target_rt_sigaction *rt_act;
5892 /* ??? arg4 == sizeof(sigset_t). */
5894 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
5896 act._sa_handler = rt_act->_sa_handler;
5897 act.sa_mask = rt_act->sa_mask;
5898 act.sa_flags = rt_act->sa_flags;
5899 act.sa_restorer = arg5;
5900 unlock_user_struct(rt_act, arg2, 0);
5903 ret = get_errno(do_sigaction(arg1, pact, &oact));
5904 if (!is_error(ret) && arg3) {
5905 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
5907 rt_act->_sa_handler = oact._sa_handler;
5908 rt_act->sa_mask = oact.sa_mask;
5909 rt_act->sa_flags = oact.sa_flags;
5910 unlock_user_struct(rt_act, arg3, 1);
5913 struct target_sigaction *act;
5914 struct target_sigaction *oact;
5917 if (!lock_user_struct(VERIFY_READ, act, arg2, 1))
5922 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
5923 ret = -TARGET_EFAULT;
5924 goto rt_sigaction_fail;
5928 ret = get_errno(do_sigaction(arg1, act, oact));
5931 unlock_user_struct(act, arg2, 0);
5933 unlock_user_struct(oact, arg3, 1);
5937 #ifdef TARGET_NR_sgetmask /* not on alpha */
5938 case TARGET_NR_sgetmask:
5941 abi_ulong target_set;
5942 sigprocmask(0, NULL, &cur_set);
5943 host_to_target_old_sigset(&target_set, &cur_set);
5948 #ifdef TARGET_NR_ssetmask /* not on alpha */
5949 case TARGET_NR_ssetmask:
5951 sigset_t set, oset, cur_set;
5952 abi_ulong target_set = arg1;
5953 sigprocmask(0, NULL, &cur_set);
5954 target_to_host_old_sigset(&set, &target_set);
5955 sigorset(&set, &set, &cur_set);
5956 sigprocmask(SIG_SETMASK, &set, &oset);
5957 host_to_target_old_sigset(&target_set, &oset);
5962 #ifdef TARGET_NR_sigprocmask
5963 case TARGET_NR_sigprocmask:
5965 #if defined(TARGET_ALPHA)
5966 sigset_t set, oldset;
5971 case TARGET_SIG_BLOCK:
5974 case TARGET_SIG_UNBLOCK:
5977 case TARGET_SIG_SETMASK:
5981 ret = -TARGET_EINVAL;
5985 target_to_host_old_sigset(&set, &mask);
5987 ret = get_errno(sigprocmask(how, &set, &oldset));
5988 if (!is_error(ret)) {
5989 host_to_target_old_sigset(&mask, &oldset);
5991 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
5994 sigset_t set, oldset, *set_ptr;
5999 case TARGET_SIG_BLOCK:
6002 case TARGET_SIG_UNBLOCK:
6005 case TARGET_SIG_SETMASK:
6009 ret = -TARGET_EINVAL;
6012 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
6014 target_to_host_old_sigset(&set, p);
6015 unlock_user(p, arg2, 0);
6021 ret = get_errno(sigprocmask(how, set_ptr, &oldset));
6022 if (!is_error(ret) && arg3) {
6023 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
6025 host_to_target_old_sigset(p, &oldset);
6026 unlock_user(p, arg3, sizeof(target_sigset_t));
6032 case TARGET_NR_rt_sigprocmask:
6035 sigset_t set, oldset, *set_ptr;
6039 case TARGET_SIG_BLOCK:
6042 case TARGET_SIG_UNBLOCK:
6045 case TARGET_SIG_SETMASK:
6049 ret = -TARGET_EINVAL;
6052 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
6054 target_to_host_sigset(&set, p);
6055 unlock_user(p, arg2, 0);
6061 ret = get_errno(sigprocmask(how, set_ptr, &oldset));
6062 if (!is_error(ret) && arg3) {
6063 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
6065 host_to_target_sigset(p, &oldset);
6066 unlock_user(p, arg3, sizeof(target_sigset_t));
6070 #ifdef TARGET_NR_sigpending
6071 case TARGET_NR_sigpending:
6074 ret = get_errno(sigpending(&set));
6075 if (!is_error(ret)) {
6076 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
6078 host_to_target_old_sigset(p, &set);
6079 unlock_user(p, arg1, sizeof(target_sigset_t));
6084 case TARGET_NR_rt_sigpending:
6087 ret = get_errno(sigpending(&set));
6088 if (!is_error(ret)) {
6089 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
6091 host_to_target_sigset(p, &set);
6092 unlock_user(p, arg1, sizeof(target_sigset_t));
6096 #ifdef TARGET_NR_sigsuspend
6097 case TARGET_NR_sigsuspend:
6100 #if defined(TARGET_ALPHA)
6101 abi_ulong mask = arg1;
6102 target_to_host_old_sigset(&set, &mask);
6104 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
6106 target_to_host_old_sigset(&set, p);
6107 unlock_user(p, arg1, 0);
6109 ret = get_errno(sigsuspend(&set));
6113 case TARGET_NR_rt_sigsuspend:
6116 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
6118 target_to_host_sigset(&set, p);
6119 unlock_user(p, arg1, 0);
6120 ret = get_errno(sigsuspend(&set));
6123 case TARGET_NR_rt_sigtimedwait:
6126 struct timespec uts, *puts;
6129 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
6131 target_to_host_sigset(&set, p);
6132 unlock_user(p, arg1, 0);
6135 target_to_host_timespec(puts, arg3);
6139 ret = get_errno(sigtimedwait(&set, &uinfo, puts));
6140 if (!is_error(ret) && arg2) {
6141 if (!(p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t), 0)))
6143 host_to_target_siginfo(p, &uinfo);
6144 unlock_user(p, arg2, sizeof(target_siginfo_t));
6148 case TARGET_NR_rt_sigqueueinfo:
6151 if (!(p = lock_user(VERIFY_READ, arg3, sizeof(target_sigset_t), 1)))
6153 target_to_host_siginfo(&uinfo, p);
6154 unlock_user(p, arg1, 0);
6155 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
6158 #ifdef TARGET_NR_sigreturn
6159 case TARGET_NR_sigreturn:
6160 /* NOTE: ret is eax, so not transcoding must be done */
6161 ret = do_sigreturn(cpu_env);
6164 case TARGET_NR_rt_sigreturn:
6165 /* NOTE: ret is eax, so not transcoding must be done */
6166 ret = do_rt_sigreturn(cpu_env);
6168 case TARGET_NR_sethostname:
6169 if (!(p = lock_user_string(arg1)))
6171 ret = get_errno(sethostname(p, arg2));
6172 unlock_user(p, arg1, 0);
6174 case TARGET_NR_setrlimit:
6176 int resource = target_to_host_resource(arg1);
6177 struct target_rlimit *target_rlim;
6179 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
6181 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
6182 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
6183 unlock_user_struct(target_rlim, arg2, 0);
6184 ret = get_errno(setrlimit(resource, &rlim));
6187 case TARGET_NR_getrlimit:
6189 int resource = target_to_host_resource(arg1);
6190 struct target_rlimit *target_rlim;
6193 ret = get_errno(getrlimit(resource, &rlim));
6194 if (!is_error(ret)) {
6195 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
6197 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
6198 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
6199 unlock_user_struct(target_rlim, arg2, 1);
6203 case TARGET_NR_getrusage:
6205 struct rusage rusage;
6206 ret = get_errno(getrusage(arg1, &rusage));
6207 if (!is_error(ret)) {
6208 host_to_target_rusage(arg2, &rusage);
6212 case TARGET_NR_gettimeofday:
6215 ret = get_errno(gettimeofday(&tv, NULL));
6216 if (!is_error(ret)) {
6217 if (copy_to_user_timeval(arg1, &tv))
6222 case TARGET_NR_settimeofday:
6225 if (copy_from_user_timeval(&tv, arg1))
6227 ret = get_errno(settimeofday(&tv, NULL));
6230 #if defined(TARGET_NR_select)
6231 case TARGET_NR_select:
6232 #if defined(TARGET_S390X) || defined(TARGET_ALPHA)
6233 ret = do_select(arg1, arg2, arg3, arg4, arg5);
6236 struct target_sel_arg_struct *sel;
6237 abi_ulong inp, outp, exp, tvp;
6240 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1))
6242 nsel = tswapal(sel->n);
6243 inp = tswapal(sel->inp);
6244 outp = tswapal(sel->outp);
6245 exp = tswapal(sel->exp);
6246 tvp = tswapal(sel->tvp);
6247 unlock_user_struct(sel, arg1, 0);
6248 ret = do_select(nsel, inp, outp, exp, tvp);
6253 #ifdef TARGET_NR_pselect6
6254 case TARGET_NR_pselect6:
6256 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
6257 fd_set rfds, wfds, efds;
6258 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
6259 struct timespec ts, *ts_ptr;
6262 * The 6th arg is actually two args smashed together,
6263 * so we cannot use the C library.
6271 abi_ulong arg_sigset, arg_sigsize, *arg7;
6272 target_sigset_t *target_sigset;
6280 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
6284 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
6288 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
6294 * This takes a timespec, and not a timeval, so we cannot
6295 * use the do_select() helper ...
6298 if (target_to_host_timespec(&ts, ts_addr)) {
6306 /* Extract the two packed args for the sigset */
6309 sig.size = _NSIG / 8;
6311 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
6315 arg_sigset = tswapal(arg7[0]);
6316 arg_sigsize = tswapal(arg7[1]);
6317 unlock_user(arg7, arg6, 0);
6321 if (arg_sigsize != sizeof(*target_sigset)) {
6322 /* Like the kernel, we enforce correct size sigsets */
6323 ret = -TARGET_EINVAL;
6326 target_sigset = lock_user(VERIFY_READ, arg_sigset,
6327 sizeof(*target_sigset), 1);
6328 if (!target_sigset) {
6331 target_to_host_sigset(&set, target_sigset);
6332 unlock_user(target_sigset, arg_sigset, 0);
6340 ret = get_errno(sys_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
6343 if (!is_error(ret)) {
6344 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
6346 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
6348 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
6351 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
6357 case TARGET_NR_symlink:
6360 p = lock_user_string(arg1);
6361 p2 = lock_user_string(arg2);
6363 ret = -TARGET_EFAULT;
6365 ret = get_errno(symlink(p, p2));
6366 unlock_user(p2, arg2, 0);
6367 unlock_user(p, arg1, 0);
6370 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat)
6371 case TARGET_NR_symlinkat:
6374 p = lock_user_string(arg1);
6375 p2 = lock_user_string(arg3);
6377 ret = -TARGET_EFAULT;
6379 ret = get_errno(sys_symlinkat(p, arg2, p2));
6380 unlock_user(p2, arg3, 0);
6381 unlock_user(p, arg1, 0);
6385 #ifdef TARGET_NR_oldlstat
6386 case TARGET_NR_oldlstat:
6389 case TARGET_NR_readlink:
6392 p = lock_user_string(arg1);
6393 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
6395 ret = -TARGET_EFAULT;
6397 if (strncmp((const char *)p, "/proc/self/exe", 14) == 0) {
6398 char real[PATH_MAX];
6399 temp = realpath(exec_path,real);
6400 ret = (temp==NULL) ? get_errno(-1) : strlen(real) ;
6401 snprintf((char *)p2, arg3, "%s", real);
6404 ret = get_errno(readlink(path(p), p2, arg3));
6406 unlock_user(p2, arg2, ret);
6407 unlock_user(p, arg1, 0);
6410 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat)
6411 case TARGET_NR_readlinkat:
6414 p = lock_user_string(arg2);
6415 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
6417 ret = -TARGET_EFAULT;
6419 ret = get_errno(sys_readlinkat(arg1, path(p), p2, arg4));
6420 unlock_user(p2, arg3, ret);
6421 unlock_user(p, arg2, 0);
6425 #ifdef TARGET_NR_uselib
6426 case TARGET_NR_uselib:
6429 #ifdef TARGET_NR_swapon
6430 case TARGET_NR_swapon:
6431 if (!(p = lock_user_string(arg1)))
6433 ret = get_errno(swapon(p, arg2));
6434 unlock_user(p, arg1, 0);
6437 case TARGET_NR_reboot:
6438 if (!(p = lock_user_string(arg4)))
6440 ret = reboot(arg1, arg2, arg3, p);
6441 unlock_user(p, arg4, 0);
6443 #ifdef TARGET_NR_readdir
6444 case TARGET_NR_readdir:
6447 #ifdef TARGET_NR_mmap
6448 case TARGET_NR_mmap:
6449 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || defined(TARGET_ARM) || \
6450 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
6451 || defined(TARGET_S390X)
6454 abi_ulong v1, v2, v3, v4, v5, v6;
6455 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
6463 unlock_user(v, arg1, 0);
6464 ret = get_errno(target_mmap(v1, v2, v3,
6465 target_to_host_bitmask(v4, mmap_flags_tbl),
6469 ret = get_errno(target_mmap(arg1, arg2, arg3,
6470 target_to_host_bitmask(arg4, mmap_flags_tbl),
6476 #ifdef TARGET_NR_mmap2
6477 case TARGET_NR_mmap2:
6479 #define MMAP_SHIFT 12
6481 ret = get_errno(target_mmap(arg1, arg2, arg3,
6482 target_to_host_bitmask(arg4, mmap_flags_tbl),
6484 arg6 << MMAP_SHIFT));
6487 case TARGET_NR_munmap:
6488 ret = get_errno(target_munmap(arg1, arg2));
6490 case TARGET_NR_mprotect:
6492 TaskState *ts = ((CPUArchState *)cpu_env)->opaque;
6493 /* Special hack to detect libc making the stack executable. */
6494 if ((arg3 & PROT_GROWSDOWN)
6495 && arg1 >= ts->info->stack_limit
6496 && arg1 <= ts->info->start_stack) {
6497 arg3 &= ~PROT_GROWSDOWN;
6498 arg2 = arg2 + arg1 - ts->info->stack_limit;
6499 arg1 = ts->info->stack_limit;
6502 ret = get_errno(target_mprotect(arg1, arg2, arg3));
6504 #ifdef TARGET_NR_mremap
6505 case TARGET_NR_mremap:
6506 ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
6509 /* ??? msync/mlock/munlock are broken for softmmu. */
6510 #ifdef TARGET_NR_msync
6511 case TARGET_NR_msync:
6512 ret = get_errno(msync(g2h(arg1), arg2, arg3));
6515 #ifdef TARGET_NR_mlock
6516 case TARGET_NR_mlock:
6517 ret = get_errno(mlock(g2h(arg1), arg2));
6520 #ifdef TARGET_NR_munlock
6521 case TARGET_NR_munlock:
6522 ret = get_errno(munlock(g2h(arg1), arg2));
6525 #ifdef TARGET_NR_mlockall
6526 case TARGET_NR_mlockall:
6527 ret = get_errno(mlockall(arg1));
6530 #ifdef TARGET_NR_munlockall
6531 case TARGET_NR_munlockall:
6532 ret = get_errno(munlockall());
6535 case TARGET_NR_truncate:
6536 if (!(p = lock_user_string(arg1)))
6538 ret = get_errno(truncate(p, arg2));
6539 unlock_user(p, arg1, 0);
6541 case TARGET_NR_ftruncate:
6542 ret = get_errno(ftruncate(arg1, arg2));
6544 case TARGET_NR_fchmod:
6545 ret = get_errno(fchmod(arg1, arg2));
6547 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat)
6548 case TARGET_NR_fchmodat:
6549 if (!(p = lock_user_string(arg2)))
6551 ret = get_errno(sys_fchmodat(arg1, p, arg3));
6552 unlock_user(p, arg2, 0);
6555 case TARGET_NR_getpriority:
6556 /* Note that negative values are valid for getpriority, so we must
6557 differentiate based on errno settings. */
6559 ret = getpriority(arg1, arg2);
6560 if (ret == -1 && errno != 0) {
6561 ret = -host_to_target_errno(errno);
6565 /* Return value is the unbiased priority. Signal no error. */
6566 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
6568 /* Return value is a biased priority to avoid negative numbers. */
6572 case TARGET_NR_setpriority:
6573 ret = get_errno(setpriority(arg1, arg2, arg3));
6575 #ifdef TARGET_NR_profil
6576 case TARGET_NR_profil:
6579 case TARGET_NR_statfs:
6580 if (!(p = lock_user_string(arg1)))
6582 ret = get_errno(statfs(path(p), &stfs));
6583 unlock_user(p, arg1, 0);
6585 if (!is_error(ret)) {
6586 struct target_statfs *target_stfs;
6588 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
6590 __put_user(stfs.f_type, &target_stfs->f_type);
6591 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
6592 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
6593 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
6594 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
6595 __put_user(stfs.f_files, &target_stfs->f_files);
6596 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
6597 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
6598 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
6599 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
6600 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
6601 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
6602 unlock_user_struct(target_stfs, arg2, 1);
6605 case TARGET_NR_fstatfs:
6606 ret = get_errno(fstatfs(arg1, &stfs));
6607 goto convert_statfs;
6608 #ifdef TARGET_NR_statfs64
6609 case TARGET_NR_statfs64:
6610 if (!(p = lock_user_string(arg1)))
6612 ret = get_errno(statfs(path(p), &stfs));
6613 unlock_user(p, arg1, 0);
6615 if (!is_error(ret)) {
6616 struct target_statfs64 *target_stfs;
6618 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
6620 __put_user(stfs.f_type, &target_stfs->f_type);
6621 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
6622 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
6623 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
6624 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
6625 __put_user(stfs.f_files, &target_stfs->f_files);
6626 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
6627 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
6628 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
6629 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
6630 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
6631 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
6632 unlock_user_struct(target_stfs, arg3, 1);
6635 case TARGET_NR_fstatfs64:
6636 ret = get_errno(fstatfs(arg1, &stfs));
6637 goto convert_statfs64;
6639 #ifdef TARGET_NR_ioperm
6640 case TARGET_NR_ioperm:
6643 #ifdef TARGET_NR_socketcall
6644 case TARGET_NR_socketcall:
6645 ret = do_socketcall(arg1, arg2);
6648 #ifdef TARGET_NR_accept
6649 case TARGET_NR_accept:
6650 ret = do_accept(arg1, arg2, arg3);
6653 #ifdef TARGET_NR_bind
6654 case TARGET_NR_bind:
6655 ret = do_bind(arg1, arg2, arg3);
6658 #ifdef TARGET_NR_connect
6659 case TARGET_NR_connect:
6660 ret = do_connect(arg1, arg2, arg3);
6663 #ifdef TARGET_NR_getpeername
6664 case TARGET_NR_getpeername:
6665 ret = do_getpeername(arg1, arg2, arg3);
6668 #ifdef TARGET_NR_getsockname
6669 case TARGET_NR_getsockname:
6670 ret = do_getsockname(arg1, arg2, arg3);
6673 #ifdef TARGET_NR_getsockopt
6674 case TARGET_NR_getsockopt:
6675 ret = do_getsockopt(arg1, arg2, arg3, arg4, arg5);
6678 #ifdef TARGET_NR_listen
6679 case TARGET_NR_listen:
6680 ret = get_errno(listen(arg1, arg2));
6683 #ifdef TARGET_NR_recv
6684 case TARGET_NR_recv:
6685 ret = do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
6688 #ifdef TARGET_NR_recvfrom
6689 case TARGET_NR_recvfrom:
6690 ret = do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
6693 #ifdef TARGET_NR_recvmsg
6694 case TARGET_NR_recvmsg:
6695 ret = do_sendrecvmsg(arg1, arg2, arg3, 0);
6698 #ifdef TARGET_NR_send
6699 case TARGET_NR_send:
6700 ret = do_sendto(arg1, arg2, arg3, arg4, 0, 0);
6703 #ifdef TARGET_NR_sendmsg
6704 case TARGET_NR_sendmsg:
6705 ret = do_sendrecvmsg(arg1, arg2, arg3, 1);
6708 #ifdef TARGET_NR_sendto
6709 case TARGET_NR_sendto:
6710 ret = do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
6713 #ifdef TARGET_NR_shutdown
6714 case TARGET_NR_shutdown:
6715 ret = get_errno(shutdown(arg1, arg2));
6718 #ifdef TARGET_NR_socket
6719 case TARGET_NR_socket:
6720 ret = do_socket(arg1, arg2, arg3);
6723 #ifdef TARGET_NR_socketpair
6724 case TARGET_NR_socketpair:
6725 ret = do_socketpair(arg1, arg2, arg3, arg4);
6728 #ifdef TARGET_NR_setsockopt
6729 case TARGET_NR_setsockopt:
6730 ret = do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
6734 case TARGET_NR_syslog:
6735 if (!(p = lock_user_string(arg2)))
6737 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
6738 unlock_user(p, arg2, 0);
6741 case TARGET_NR_setitimer:
6743 struct itimerval value, ovalue, *pvalue;
6747 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
6748 || copy_from_user_timeval(&pvalue->it_value,
6749 arg2 + sizeof(struct target_timeval)))
6754 ret = get_errno(setitimer(arg1, pvalue, &ovalue));
6755 if (!is_error(ret) && arg3) {
6756 if (copy_to_user_timeval(arg3,
6757 &ovalue.it_interval)
6758 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
6764 case TARGET_NR_getitimer:
6766 struct itimerval value;
6768 ret = get_errno(getitimer(arg1, &value));
6769 if (!is_error(ret) && arg2) {
6770 if (copy_to_user_timeval(arg2,
6772 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
6778 case TARGET_NR_stat:
6779 if (!(p = lock_user_string(arg1)))
6781 ret = get_errno(stat(path(p), &st));
6782 unlock_user(p, arg1, 0);
6784 case TARGET_NR_lstat:
6785 if (!(p = lock_user_string(arg1)))
6787 ret = get_errno(lstat(path(p), &st));
6788 unlock_user(p, arg1, 0);
6790 case TARGET_NR_fstat:
6792 ret = get_errno(fstat(arg1, &st));
6794 if (!is_error(ret)) {
6795 struct target_stat *target_st;
6797 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
6799 memset(target_st, 0, sizeof(*target_st));
6800 __put_user(st.st_dev, &target_st->st_dev);
6801 __put_user(st.st_ino, &target_st->st_ino);
6802 __put_user(st.st_mode, &target_st->st_mode);
6803 __put_user(st.st_uid, &target_st->st_uid);
6804 __put_user(st.st_gid, &target_st->st_gid);
6805 __put_user(st.st_nlink, &target_st->st_nlink);
6806 __put_user(st.st_rdev, &target_st->st_rdev);
6807 __put_user(st.st_size, &target_st->st_size);
6808 __put_user(st.st_blksize, &target_st->st_blksize);
6809 __put_user(st.st_blocks, &target_st->st_blocks);
6810 __put_user(st.st_atime, &target_st->target_st_atime);
6811 __put_user(st.st_mtime, &target_st->target_st_mtime);
6812 __put_user(st.st_ctime, &target_st->target_st_ctime);
6813 unlock_user_struct(target_st, arg2, 1);
6817 #ifdef TARGET_NR_olduname
6818 case TARGET_NR_olduname:
6821 #ifdef TARGET_NR_iopl
6822 case TARGET_NR_iopl:
6825 case TARGET_NR_vhangup:
6826 ret = get_errno(vhangup());
6828 #ifdef TARGET_NR_idle
6829 case TARGET_NR_idle:
6832 #ifdef TARGET_NR_syscall
6833 case TARGET_NR_syscall:
6834 ret = do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
6835 arg6, arg7, arg8, 0);
6838 case TARGET_NR_wait4:
6841 abi_long status_ptr = arg2;
6842 struct rusage rusage, *rusage_ptr;
6843 abi_ulong target_rusage = arg4;
6845 rusage_ptr = &rusage;
6848 ret = get_errno(wait4(arg1, &status, arg3, rusage_ptr));
6849 if (!is_error(ret)) {
6850 if (status_ptr && ret) {
6851 status = host_to_target_waitstatus(status);
6852 if (put_user_s32(status, status_ptr))
6856 host_to_target_rusage(target_rusage, &rusage);
6860 #ifdef TARGET_NR_swapoff
6861 case TARGET_NR_swapoff:
6862 if (!(p = lock_user_string(arg1)))
6864 ret = get_errno(swapoff(p));
6865 unlock_user(p, arg1, 0);
6868 case TARGET_NR_sysinfo:
6870 struct target_sysinfo *target_value;
6871 struct sysinfo value;
6872 ret = get_errno(sysinfo(&value));
6873 if (!is_error(ret) && arg1)
6875 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
6877 __put_user(value.uptime, &target_value->uptime);
6878 __put_user(value.loads[0], &target_value->loads[0]);
6879 __put_user(value.loads[1], &target_value->loads[1]);
6880 __put_user(value.loads[2], &target_value->loads[2]);
6881 __put_user(value.totalram, &target_value->totalram);
6882 __put_user(value.freeram, &target_value->freeram);
6883 __put_user(value.sharedram, &target_value->sharedram);
6884 __put_user(value.bufferram, &target_value->bufferram);
6885 __put_user(value.totalswap, &target_value->totalswap);
6886 __put_user(value.freeswap, &target_value->freeswap);
6887 __put_user(value.procs, &target_value->procs);
6888 __put_user(value.totalhigh, &target_value->totalhigh);
6889 __put_user(value.freehigh, &target_value->freehigh);
6890 __put_user(value.mem_unit, &target_value->mem_unit);
6891 unlock_user_struct(target_value, arg1, 1);
6895 #ifdef TARGET_NR_ipc
6897 ret = do_ipc(arg1, arg2, arg3, arg4, arg5, arg6);
6900 #ifdef TARGET_NR_semget
6901 case TARGET_NR_semget:
6902 ret = get_errno(semget(arg1, arg2, arg3));
6905 #ifdef TARGET_NR_semop
6906 case TARGET_NR_semop:
6907 ret = get_errno(do_semop(arg1, arg2, arg3));
6910 #ifdef TARGET_NR_semctl
6911 case TARGET_NR_semctl:
6912 ret = do_semctl(arg1, arg2, arg3, (union target_semun)(abi_ulong)arg4);
6915 #ifdef TARGET_NR_msgctl
6916 case TARGET_NR_msgctl:
6917 ret = do_msgctl(arg1, arg2, arg3);
6920 #ifdef TARGET_NR_msgget
6921 case TARGET_NR_msgget:
6922 ret = get_errno(msgget(arg1, arg2));
6925 #ifdef TARGET_NR_msgrcv
6926 case TARGET_NR_msgrcv:
6927 ret = do_msgrcv(arg1, arg2, arg3, arg4, arg5);
6930 #ifdef TARGET_NR_msgsnd
6931 case TARGET_NR_msgsnd:
6932 ret = do_msgsnd(arg1, arg2, arg3, arg4);
6935 #ifdef TARGET_NR_shmget
6936 case TARGET_NR_shmget:
6937 ret = get_errno(shmget(arg1, arg2, arg3));
6940 #ifdef TARGET_NR_shmctl
6941 case TARGET_NR_shmctl:
6942 ret = do_shmctl(arg1, arg2, arg3);
6945 #ifdef TARGET_NR_shmat
6946 case TARGET_NR_shmat:
6947 ret = do_shmat(arg1, arg2, arg3);
6950 #ifdef TARGET_NR_shmdt
6951 case TARGET_NR_shmdt:
6952 ret = do_shmdt(arg1);
6955 case TARGET_NR_fsync:
6956 ret = get_errno(fsync(arg1));
6958 case TARGET_NR_clone:
6959 #if defined(TARGET_SH4) || defined(TARGET_ALPHA)
6960 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
6961 #elif defined(TARGET_CRIS)
6962 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg4, arg5));
6963 #elif defined(TARGET_MICROBLAZE)
6964 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
6965 #elif defined(TARGET_S390X)
6966 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
6968 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
6971 #ifdef __NR_exit_group
6972 /* new thread calls */
6973 case TARGET_NR_exit_group:
6977 gdb_exit(cpu_env, arg1);
6978 ret = get_errno(exit_group(arg1));
6981 case TARGET_NR_setdomainname:
6982 if (!(p = lock_user_string(arg1)))
6984 ret = get_errno(setdomainname(p, arg2));
6985 unlock_user(p, arg1, 0);
6987 case TARGET_NR_uname:
6988 /* no need to transcode because we use the linux syscall */
6990 struct new_utsname * buf;
6992 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
6994 ret = get_errno(sys_uname(buf));
6995 if (!is_error(ret)) {
6996 /* Overrite the native machine name with whatever is being
6998 strcpy (buf->machine, cpu_to_uname_machine(cpu_env));
6999 /* Allow the user to override the reported release. */
7000 if (qemu_uname_release && *qemu_uname_release)
7001 strcpy (buf->release, qemu_uname_release);
7003 unlock_user_struct(buf, arg1, 1);
7007 case TARGET_NR_modify_ldt:
7008 ret = do_modify_ldt(cpu_env, arg1, arg2, arg3);
7010 #if !defined(TARGET_X86_64)
7011 case TARGET_NR_vm86old:
7013 case TARGET_NR_vm86:
7014 ret = do_vm86(cpu_env, arg1, arg2);
7018 case TARGET_NR_adjtimex:
7020 #ifdef TARGET_NR_create_module
7021 case TARGET_NR_create_module:
7023 case TARGET_NR_init_module:
7024 case TARGET_NR_delete_module:
7025 #ifdef TARGET_NR_get_kernel_syms
7026 case TARGET_NR_get_kernel_syms:
7029 case TARGET_NR_quotactl:
7031 case TARGET_NR_getpgid:
7032 ret = get_errno(getpgid(arg1));
7034 case TARGET_NR_fchdir:
7035 ret = get_errno(fchdir(arg1));
7037 #ifdef TARGET_NR_bdflush /* not on x86_64 */
7038 case TARGET_NR_bdflush:
7041 #ifdef TARGET_NR_sysfs
7042 case TARGET_NR_sysfs:
7045 case TARGET_NR_personality:
7046 ret = get_errno(personality(arg1));
7048 #ifdef TARGET_NR_afs_syscall
7049 case TARGET_NR_afs_syscall:
7052 #ifdef TARGET_NR__llseek /* Not on alpha */
7053 case TARGET_NR__llseek:
7056 #if !defined(__NR_llseek)
7057 res = lseek(arg1, ((uint64_t)arg2 << 32) | arg3, arg5);
7059 ret = get_errno(res);
7064 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
7066 if ((ret == 0) && put_user_s64(res, arg4)) {
7072 case TARGET_NR_getdents:
7073 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
7075 struct target_dirent *target_dirp;
7076 struct linux_dirent *dirp;
7077 abi_long count = arg3;
7079 dirp = malloc(count);
7081 ret = -TARGET_ENOMEM;
7085 ret = get_errno(sys_getdents(arg1, dirp, count));
7086 if (!is_error(ret)) {
7087 struct linux_dirent *de;
7088 struct target_dirent *tde;
7090 int reclen, treclen;
7091 int count1, tnamelen;
7095 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
7099 reclen = de->d_reclen;
7100 tnamelen = reclen - offsetof(struct linux_dirent, d_name);
7101 assert(tnamelen >= 0);
7102 treclen = tnamelen + offsetof(struct target_dirent, d_name);
7103 assert(count1 + treclen <= count);
7104 tde->d_reclen = tswap16(treclen);
7105 tde->d_ino = tswapal(de->d_ino);
7106 tde->d_off = tswapal(de->d_off);
7107 memcpy(tde->d_name, de->d_name, tnamelen);
7108 de = (struct linux_dirent *)((char *)de + reclen);
7110 tde = (struct target_dirent *)((char *)tde + treclen);
7114 unlock_user(target_dirp, arg2, ret);
7120 struct linux_dirent *dirp;
7121 abi_long count = arg3;
7123 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
7125 ret = get_errno(sys_getdents(arg1, dirp, count));
7126 if (!is_error(ret)) {
7127 struct linux_dirent *de;
7132 reclen = de->d_reclen;
7135 de->d_reclen = tswap16(reclen);
7136 tswapls(&de->d_ino);
7137 tswapls(&de->d_off);
7138 de = (struct linux_dirent *)((char *)de + reclen);
7142 unlock_user(dirp, arg2, ret);
7146 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
7147 case TARGET_NR_getdents64:
7149 struct linux_dirent64 *dirp;
7150 abi_long count = arg3;
7151 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
7153 ret = get_errno(sys_getdents64(arg1, dirp, count));
7154 if (!is_error(ret)) {
7155 struct linux_dirent64 *de;
7160 reclen = de->d_reclen;
7163 de->d_reclen = tswap16(reclen);
7164 tswap64s((uint64_t *)&de->d_ino);
7165 tswap64s((uint64_t *)&de->d_off);
7166 de = (struct linux_dirent64 *)((char *)de + reclen);
7170 unlock_user(dirp, arg2, ret);
7173 #endif /* TARGET_NR_getdents64 */
7174 #if defined(TARGET_NR__newselect)
7175 case TARGET_NR__newselect:
7176 ret = do_select(arg1, arg2, arg3, arg4, arg5);
7179 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
7180 # ifdef TARGET_NR_poll
7181 case TARGET_NR_poll:
7183 # ifdef TARGET_NR_ppoll
7184 case TARGET_NR_ppoll:
7187 struct target_pollfd *target_pfd;
7188 unsigned int nfds = arg2;
7193 target_pfd = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_pollfd) * nfds, 1);
7197 pfd = alloca(sizeof(struct pollfd) * nfds);
7198 for(i = 0; i < nfds; i++) {
7199 pfd[i].fd = tswap32(target_pfd[i].fd);
7200 pfd[i].events = tswap16(target_pfd[i].events);
7203 # ifdef TARGET_NR_ppoll
7204 if (num == TARGET_NR_ppoll) {
7205 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
7206 target_sigset_t *target_set;
7207 sigset_t _set, *set = &_set;
7210 if (target_to_host_timespec(timeout_ts, arg3)) {
7211 unlock_user(target_pfd, arg1, 0);
7219 target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
7221 unlock_user(target_pfd, arg1, 0);
7224 target_to_host_sigset(set, target_set);
7229 ret = get_errno(sys_ppoll(pfd, nfds, timeout_ts, set, _NSIG/8));
7231 if (!is_error(ret) && arg3) {
7232 host_to_target_timespec(arg3, timeout_ts);
7235 unlock_user(target_set, arg4, 0);
7239 ret = get_errno(poll(pfd, nfds, timeout));
7241 if (!is_error(ret)) {
7242 for(i = 0; i < nfds; i++) {
7243 target_pfd[i].revents = tswap16(pfd[i].revents);
7246 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
7250 case TARGET_NR_flock:
7251 /* NOTE: the flock constant seems to be the same for every
7253 ret = get_errno(flock(arg1, arg2));
7255 case TARGET_NR_readv:
7257 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
7259 ret = get_errno(readv(arg1, vec, arg3));
7260 unlock_iovec(vec, arg2, arg3, 1);
7262 ret = -host_to_target_errno(errno);
7266 case TARGET_NR_writev:
7268 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
7270 ret = get_errno(writev(arg1, vec, arg3));
7271 unlock_iovec(vec, arg2, arg3, 0);
7273 ret = -host_to_target_errno(errno);
7277 case TARGET_NR_getsid:
7278 ret = get_errno(getsid(arg1));
7280 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
7281 case TARGET_NR_fdatasync:
7282 ret = get_errno(fdatasync(arg1));
7285 case TARGET_NR__sysctl:
7286 /* We don't implement this, but ENOTDIR is always a safe
7288 ret = -TARGET_ENOTDIR;
7290 case TARGET_NR_sched_getaffinity:
7292 unsigned int mask_size;
7293 unsigned long *mask;
7296 * sched_getaffinity needs multiples of ulong, so need to take
7297 * care of mismatches between target ulong and host ulong sizes.
7299 if (arg2 & (sizeof(abi_ulong) - 1)) {
7300 ret = -TARGET_EINVAL;
7303 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
7305 mask = alloca(mask_size);
7306 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
7308 if (!is_error(ret)) {
7309 if (copy_to_user(arg3, mask, ret)) {
7315 case TARGET_NR_sched_setaffinity:
7317 unsigned int mask_size;
7318 unsigned long *mask;
7321 * sched_setaffinity needs multiples of ulong, so need to take
7322 * care of mismatches between target ulong and host ulong sizes.
7324 if (arg2 & (sizeof(abi_ulong) - 1)) {
7325 ret = -TARGET_EINVAL;
7328 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
7330 mask = alloca(mask_size);
7331 if (!lock_user_struct(VERIFY_READ, p, arg3, 1)) {
7334 memcpy(mask, p, arg2);
7335 unlock_user_struct(p, arg2, 0);
7337 ret = get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
7340 case TARGET_NR_sched_setparam:
7342 struct sched_param *target_schp;
7343 struct sched_param schp;
7345 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
7347 schp.sched_priority = tswap32(target_schp->sched_priority);
7348 unlock_user_struct(target_schp, arg2, 0);
7349 ret = get_errno(sched_setparam(arg1, &schp));
7352 case TARGET_NR_sched_getparam:
7354 struct sched_param *target_schp;
7355 struct sched_param schp;
7356 ret = get_errno(sched_getparam(arg1, &schp));
7357 if (!is_error(ret)) {
7358 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
7360 target_schp->sched_priority = tswap32(schp.sched_priority);
7361 unlock_user_struct(target_schp, arg2, 1);
7365 case TARGET_NR_sched_setscheduler:
7367 struct sched_param *target_schp;
7368 struct sched_param schp;
7369 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
7371 schp.sched_priority = tswap32(target_schp->sched_priority);
7372 unlock_user_struct(target_schp, arg3, 0);
7373 ret = get_errno(sched_setscheduler(arg1, arg2, &schp));
7376 case TARGET_NR_sched_getscheduler:
7377 ret = get_errno(sched_getscheduler(arg1));
7379 case TARGET_NR_sched_yield:
7380 ret = get_errno(sched_yield());
7382 case TARGET_NR_sched_get_priority_max:
7383 ret = get_errno(sched_get_priority_max(arg1));
7385 case TARGET_NR_sched_get_priority_min:
7386 ret = get_errno(sched_get_priority_min(arg1));
7388 case TARGET_NR_sched_rr_get_interval:
7391 ret = get_errno(sched_rr_get_interval(arg1, &ts));
7392 if (!is_error(ret)) {
7393 host_to_target_timespec(arg2, &ts);
7397 case TARGET_NR_nanosleep:
7399 struct timespec req, rem;
7400 target_to_host_timespec(&req, arg1);
7401 ret = get_errno(nanosleep(&req, &rem));
7402 if (is_error(ret) && arg2) {
7403 host_to_target_timespec(arg2, &rem);
7407 #ifdef TARGET_NR_query_module
7408 case TARGET_NR_query_module:
7411 #ifdef TARGET_NR_nfsservctl
7412 case TARGET_NR_nfsservctl:
7415 case TARGET_NR_prctl:
7417 case PR_GET_PDEATHSIG:
7420 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
7421 if (!is_error(ret) && arg2
7422 && put_user_ual(deathsig, arg2)) {
7430 void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
7434 ret = get_errno(prctl(arg1, (unsigned long)name,
7436 unlock_user(name, arg2, 16);
7441 void *name = lock_user(VERIFY_READ, arg2, 16, 1);
7445 ret = get_errno(prctl(arg1, (unsigned long)name,
7447 unlock_user(name, arg2, 0);
7452 /* Most prctl options have no pointer arguments */
7453 ret = get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
7457 #ifdef TARGET_NR_arch_prctl
7458 case TARGET_NR_arch_prctl:
7459 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
7460 ret = do_arch_prctl(cpu_env, arg1, arg2);
7466 #ifdef TARGET_NR_pread64
7467 case TARGET_NR_pread64:
7468 if (regpairs_aligned(cpu_env)) {
7472 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
7474 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
7475 unlock_user(p, arg2, ret);
7477 case TARGET_NR_pwrite64:
7478 if (regpairs_aligned(cpu_env)) {
7482 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
7484 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
7485 unlock_user(p, arg2, 0);
7488 case TARGET_NR_getcwd:
7489 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
7491 ret = get_errno(sys_getcwd1(p, arg2));
7492 unlock_user(p, arg1, ret);
7494 case TARGET_NR_capget:
7496 case TARGET_NR_capset:
7498 case TARGET_NR_sigaltstack:
7499 #if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_MIPS) || \
7500 defined(TARGET_SPARC) || defined(TARGET_PPC) || defined(TARGET_ALPHA) || \
7501 defined(TARGET_M68K) || defined(TARGET_S390X) || defined(TARGET_OPENRISC)
7502 ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUArchState *)cpu_env));
7507 case TARGET_NR_sendfile:
7509 #ifdef TARGET_NR_getpmsg
7510 case TARGET_NR_getpmsg:
7513 #ifdef TARGET_NR_putpmsg
7514 case TARGET_NR_putpmsg:
7517 #ifdef TARGET_NR_vfork
7518 case TARGET_NR_vfork:
7519 ret = get_errno(do_fork(cpu_env, CLONE_VFORK | CLONE_VM | SIGCHLD,
7523 #ifdef TARGET_NR_ugetrlimit
7524 case TARGET_NR_ugetrlimit:
7527 int resource = target_to_host_resource(arg1);
7528 ret = get_errno(getrlimit(resource, &rlim));
7529 if (!is_error(ret)) {
7530 struct target_rlimit *target_rlim;
7531 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
7533 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
7534 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
7535 unlock_user_struct(target_rlim, arg2, 1);
7540 #ifdef TARGET_NR_truncate64
7541 case TARGET_NR_truncate64:
7542 if (!(p = lock_user_string(arg1)))
7544 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
7545 unlock_user(p, arg1, 0);
7548 #ifdef TARGET_NR_ftruncate64
7549 case TARGET_NR_ftruncate64:
7550 ret = target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
7553 #ifdef TARGET_NR_stat64
7554 case TARGET_NR_stat64:
7555 if (!(p = lock_user_string(arg1)))
7557 ret = get_errno(stat(path(p), &st));
7558 unlock_user(p, arg1, 0);
7560 ret = host_to_target_stat64(cpu_env, arg2, &st);
7563 #ifdef TARGET_NR_lstat64
7564 case TARGET_NR_lstat64:
7565 if (!(p = lock_user_string(arg1)))
7567 ret = get_errno(lstat(path(p), &st));
7568 unlock_user(p, arg1, 0);
7570 ret = host_to_target_stat64(cpu_env, arg2, &st);
7573 #ifdef TARGET_NR_fstat64
7574 case TARGET_NR_fstat64:
7575 ret = get_errno(fstat(arg1, &st));
7577 ret = host_to_target_stat64(cpu_env, arg2, &st);
7580 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \
7581 (defined(__NR_fstatat64) || defined(__NR_newfstatat))
7582 #ifdef TARGET_NR_fstatat64
7583 case TARGET_NR_fstatat64:
7585 #ifdef TARGET_NR_newfstatat
7586 case TARGET_NR_newfstatat:
7588 if (!(p = lock_user_string(arg2)))
7590 #ifdef __NR_fstatat64
7591 ret = get_errno(sys_fstatat64(arg1, path(p), &st, arg4));
7593 ret = get_errno(sys_newfstatat(arg1, path(p), &st, arg4));
7596 ret = host_to_target_stat64(cpu_env, arg3, &st);
7599 case TARGET_NR_lchown:
7600 if (!(p = lock_user_string(arg1)))
7602 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
7603 unlock_user(p, arg1, 0);
7605 #ifdef TARGET_NR_getuid
7606 case TARGET_NR_getuid:
7607 ret = get_errno(high2lowuid(getuid()));
7610 #ifdef TARGET_NR_getgid
7611 case TARGET_NR_getgid:
7612 ret = get_errno(high2lowgid(getgid()));
7615 #ifdef TARGET_NR_geteuid
7616 case TARGET_NR_geteuid:
7617 ret = get_errno(high2lowuid(geteuid()));
7620 #ifdef TARGET_NR_getegid
7621 case TARGET_NR_getegid:
7622 ret = get_errno(high2lowgid(getegid()));
7625 case TARGET_NR_setreuid:
7626 ret = get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
7628 case TARGET_NR_setregid:
7629 ret = get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
7631 case TARGET_NR_getgroups:
7633 int gidsetsize = arg1;
7634 target_id *target_grouplist;
7638 grouplist = alloca(gidsetsize * sizeof(gid_t));
7639 ret = get_errno(getgroups(gidsetsize, grouplist));
7640 if (gidsetsize == 0)
7642 if (!is_error(ret)) {
7643 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 2, 0);
7644 if (!target_grouplist)
7646 for(i = 0;i < ret; i++)
7647 target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
7648 unlock_user(target_grouplist, arg2, gidsetsize * 2);
7652 case TARGET_NR_setgroups:
7654 int gidsetsize = arg1;
7655 target_id *target_grouplist;
7659 grouplist = alloca(gidsetsize * sizeof(gid_t));
7660 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 2, 1);
7661 if (!target_grouplist) {
7662 ret = -TARGET_EFAULT;
7665 for(i = 0;i < gidsetsize; i++)
7666 grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
7667 unlock_user(target_grouplist, arg2, 0);
7668 ret = get_errno(setgroups(gidsetsize, grouplist));
7671 case TARGET_NR_fchown:
7672 ret = get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
7674 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat)
7675 case TARGET_NR_fchownat:
7676 if (!(p = lock_user_string(arg2)))
7678 ret = get_errno(sys_fchownat(arg1, p, low2highuid(arg3), low2highgid(arg4), arg5));
7679 unlock_user(p, arg2, 0);
7682 #ifdef TARGET_NR_setresuid
7683 case TARGET_NR_setresuid:
7684 ret = get_errno(setresuid(low2highuid(arg1),
7686 low2highuid(arg3)));
7689 #ifdef TARGET_NR_getresuid
7690 case TARGET_NR_getresuid:
7692 uid_t ruid, euid, suid;
7693 ret = get_errno(getresuid(&ruid, &euid, &suid));
7694 if (!is_error(ret)) {
7695 if (put_user_u16(high2lowuid(ruid), arg1)
7696 || put_user_u16(high2lowuid(euid), arg2)
7697 || put_user_u16(high2lowuid(suid), arg3))
7703 #ifdef TARGET_NR_getresgid
7704 case TARGET_NR_setresgid:
7705 ret = get_errno(setresgid(low2highgid(arg1),
7707 low2highgid(arg3)));
7710 #ifdef TARGET_NR_getresgid
7711 case TARGET_NR_getresgid:
7713 gid_t rgid, egid, sgid;
7714 ret = get_errno(getresgid(&rgid, &egid, &sgid));
7715 if (!is_error(ret)) {
7716 if (put_user_u16(high2lowgid(rgid), arg1)
7717 || put_user_u16(high2lowgid(egid), arg2)
7718 || put_user_u16(high2lowgid(sgid), arg3))
7724 case TARGET_NR_chown:
7725 if (!(p = lock_user_string(arg1)))
7727 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
7728 unlock_user(p, arg1, 0);
7730 case TARGET_NR_setuid:
7731 ret = get_errno(setuid(low2highuid(arg1)));
7733 case TARGET_NR_setgid:
7734 ret = get_errno(setgid(low2highgid(arg1)));
7736 case TARGET_NR_setfsuid:
7737 ret = get_errno(setfsuid(arg1));
7739 case TARGET_NR_setfsgid:
7740 ret = get_errno(setfsgid(arg1));
7743 #ifdef TARGET_NR_lchown32
7744 case TARGET_NR_lchown32:
7745 if (!(p = lock_user_string(arg1)))
7747 ret = get_errno(lchown(p, arg2, arg3));
7748 unlock_user(p, arg1, 0);
7751 #ifdef TARGET_NR_getuid32
7752 case TARGET_NR_getuid32:
7753 ret = get_errno(getuid());
7757 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
7758 /* Alpha specific */
7759 case TARGET_NR_getxuid:
7763 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
7765 ret = get_errno(getuid());
7768 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
7769 /* Alpha specific */
7770 case TARGET_NR_getxgid:
7774 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
7776 ret = get_errno(getgid());
7779 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
7780 /* Alpha specific */
7781 case TARGET_NR_osf_getsysinfo:
7782 ret = -TARGET_EOPNOTSUPP;
7784 case TARGET_GSI_IEEE_FP_CONTROL:
7786 uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env);
7788 /* Copied from linux ieee_fpcr_to_swcr. */
7789 swcr = (fpcr >> 35) & SWCR_STATUS_MASK;
7790 swcr |= (fpcr >> 36) & SWCR_MAP_DMZ;
7791 swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV
7792 | SWCR_TRAP_ENABLE_DZE
7793 | SWCR_TRAP_ENABLE_OVF);
7794 swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF
7795 | SWCR_TRAP_ENABLE_INE);
7796 swcr |= (fpcr >> 47) & SWCR_MAP_UMZ;
7797 swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO;
7799 if (put_user_u64 (swcr, arg2))
7805 /* case GSI_IEEE_STATE_AT_SIGNAL:
7806 -- Not implemented in linux kernel.
7808 -- Retrieves current unaligned access state; not much used.
7810 -- Retrieves implver information; surely not used.
7812 -- Grabs a copy of the HWRPB; surely not used.
7817 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
7818 /* Alpha specific */
7819 case TARGET_NR_osf_setsysinfo:
7820 ret = -TARGET_EOPNOTSUPP;
7822 case TARGET_SSI_IEEE_FP_CONTROL:
7824 uint64_t swcr, fpcr, orig_fpcr;
7826 if (get_user_u64 (swcr, arg2)) {
7829 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
7830 fpcr = orig_fpcr & FPCR_DYN_MASK;
7832 /* Copied from linux ieee_swcr_to_fpcr. */
7833 fpcr |= (swcr & SWCR_STATUS_MASK) << 35;
7834 fpcr |= (swcr & SWCR_MAP_DMZ) << 36;
7835 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV
7836 | SWCR_TRAP_ENABLE_DZE
7837 | SWCR_TRAP_ENABLE_OVF)) << 48;
7838 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF
7839 | SWCR_TRAP_ENABLE_INE)) << 57;
7840 fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0);
7841 fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41;
7843 cpu_alpha_store_fpcr(cpu_env, fpcr);
7848 case TARGET_SSI_IEEE_RAISE_EXCEPTION:
7850 uint64_t exc, fpcr, orig_fpcr;
7853 if (get_user_u64(exc, arg2)) {
7857 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
7859 /* We only add to the exception status here. */
7860 fpcr = orig_fpcr | ((exc & SWCR_STATUS_MASK) << 35);
7862 cpu_alpha_store_fpcr(cpu_env, fpcr);
7865 /* Old exceptions are not signaled. */
7866 fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK);
7868 /* If any exceptions set by this call,
7869 and are unmasked, send a signal. */
7871 if ((fpcr & (FPCR_INE | FPCR_INED)) == FPCR_INE) {
7872 si_code = TARGET_FPE_FLTRES;
7874 if ((fpcr & (FPCR_UNF | FPCR_UNFD)) == FPCR_UNF) {
7875 si_code = TARGET_FPE_FLTUND;
7877 if ((fpcr & (FPCR_OVF | FPCR_OVFD)) == FPCR_OVF) {
7878 si_code = TARGET_FPE_FLTOVF;
7880 if ((fpcr & (FPCR_DZE | FPCR_DZED)) == FPCR_DZE) {
7881 si_code = TARGET_FPE_FLTDIV;
7883 if ((fpcr & (FPCR_INV | FPCR_INVD)) == FPCR_INV) {
7884 si_code = TARGET_FPE_FLTINV;
7887 target_siginfo_t info;
7888 info.si_signo = SIGFPE;
7890 info.si_code = si_code;
7891 info._sifields._sigfault._addr
7892 = ((CPUArchState *)cpu_env)->pc;
7893 queue_signal((CPUArchState *)cpu_env, info.si_signo, &info);
7898 /* case SSI_NVPAIRS:
7899 -- Used with SSIN_UACPROC to enable unaligned accesses.
7900 case SSI_IEEE_STATE_AT_SIGNAL:
7901 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
7902 -- Not implemented in linux kernel
7907 #ifdef TARGET_NR_osf_sigprocmask
7908 /* Alpha specific. */
7909 case TARGET_NR_osf_sigprocmask:
7913 sigset_t set, oldset;
7916 case TARGET_SIG_BLOCK:
7919 case TARGET_SIG_UNBLOCK:
7922 case TARGET_SIG_SETMASK:
7926 ret = -TARGET_EINVAL;
7930 target_to_host_old_sigset(&set, &mask);
7931 sigprocmask(how, &set, &oldset);
7932 host_to_target_old_sigset(&mask, &oldset);
7938 #ifdef TARGET_NR_getgid32
7939 case TARGET_NR_getgid32:
7940 ret = get_errno(getgid());
7943 #ifdef TARGET_NR_geteuid32
7944 case TARGET_NR_geteuid32:
7945 ret = get_errno(geteuid());
7948 #ifdef TARGET_NR_getegid32
7949 case TARGET_NR_getegid32:
7950 ret = get_errno(getegid());
7953 #ifdef TARGET_NR_setreuid32
7954 case TARGET_NR_setreuid32:
7955 ret = get_errno(setreuid(arg1, arg2));
7958 #ifdef TARGET_NR_setregid32
7959 case TARGET_NR_setregid32:
7960 ret = get_errno(setregid(arg1, arg2));
7963 #ifdef TARGET_NR_getgroups32
7964 case TARGET_NR_getgroups32:
7966 int gidsetsize = arg1;
7967 uint32_t *target_grouplist;
7971 grouplist = alloca(gidsetsize * sizeof(gid_t));
7972 ret = get_errno(getgroups(gidsetsize, grouplist));
7973 if (gidsetsize == 0)
7975 if (!is_error(ret)) {
7976 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
7977 if (!target_grouplist) {
7978 ret = -TARGET_EFAULT;
7981 for(i = 0;i < ret; i++)
7982 target_grouplist[i] = tswap32(grouplist[i]);
7983 unlock_user(target_grouplist, arg2, gidsetsize * 4);
7988 #ifdef TARGET_NR_setgroups32
7989 case TARGET_NR_setgroups32:
7991 int gidsetsize = arg1;
7992 uint32_t *target_grouplist;
7996 grouplist = alloca(gidsetsize * sizeof(gid_t));
7997 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
7998 if (!target_grouplist) {
7999 ret = -TARGET_EFAULT;
8002 for(i = 0;i < gidsetsize; i++)
8003 grouplist[i] = tswap32(target_grouplist[i]);
8004 unlock_user(target_grouplist, arg2, 0);
8005 ret = get_errno(setgroups(gidsetsize, grouplist));
8009 #ifdef TARGET_NR_fchown32
8010 case TARGET_NR_fchown32:
8011 ret = get_errno(fchown(arg1, arg2, arg3));
8014 #ifdef TARGET_NR_setresuid32
8015 case TARGET_NR_setresuid32:
8016 ret = get_errno(setresuid(arg1, arg2, arg3));
8019 #ifdef TARGET_NR_getresuid32
8020 case TARGET_NR_getresuid32:
8022 uid_t ruid, euid, suid;
8023 ret = get_errno(getresuid(&ruid, &euid, &suid));
8024 if (!is_error(ret)) {
8025 if (put_user_u32(ruid, arg1)
8026 || put_user_u32(euid, arg2)
8027 || put_user_u32(suid, arg3))
8033 #ifdef TARGET_NR_setresgid32
8034 case TARGET_NR_setresgid32:
8035 ret = get_errno(setresgid(arg1, arg2, arg3));
8038 #ifdef TARGET_NR_getresgid32
8039 case TARGET_NR_getresgid32:
8041 gid_t rgid, egid, sgid;
8042 ret = get_errno(getresgid(&rgid, &egid, &sgid));
8043 if (!is_error(ret)) {
8044 if (put_user_u32(rgid, arg1)
8045 || put_user_u32(egid, arg2)
8046 || put_user_u32(sgid, arg3))
8052 #ifdef TARGET_NR_chown32
8053 case TARGET_NR_chown32:
8054 if (!(p = lock_user_string(arg1)))
8056 ret = get_errno(chown(p, arg2, arg3));
8057 unlock_user(p, arg1, 0);
8060 #ifdef TARGET_NR_setuid32
8061 case TARGET_NR_setuid32:
8062 ret = get_errno(setuid(arg1));
8065 #ifdef TARGET_NR_setgid32
8066 case TARGET_NR_setgid32:
8067 ret = get_errno(setgid(arg1));
8070 #ifdef TARGET_NR_setfsuid32
8071 case TARGET_NR_setfsuid32:
8072 ret = get_errno(setfsuid(arg1));
8075 #ifdef TARGET_NR_setfsgid32
8076 case TARGET_NR_setfsgid32:
8077 ret = get_errno(setfsgid(arg1));
8081 case TARGET_NR_pivot_root:
8083 #ifdef TARGET_NR_mincore
8084 case TARGET_NR_mincore:
8087 ret = -TARGET_EFAULT;
8088 if (!(a = lock_user(VERIFY_READ, arg1,arg2, 0)))
8090 if (!(p = lock_user_string(arg3)))
8092 ret = get_errno(mincore(a, arg2, p));
8093 unlock_user(p, arg3, ret);
8095 unlock_user(a, arg1, 0);
8099 #ifdef TARGET_NR_arm_fadvise64_64
8100 case TARGET_NR_arm_fadvise64_64:
8103 * arm_fadvise64_64 looks like fadvise64_64 but
8104 * with different argument order
8112 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_arm_fadvise64_64) || defined(TARGET_NR_fadvise64)
8113 #ifdef TARGET_NR_fadvise64_64
8114 case TARGET_NR_fadvise64_64:
8116 #ifdef TARGET_NR_fadvise64
8117 case TARGET_NR_fadvise64:
8121 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
8122 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
8123 case 6: arg4 = POSIX_FADV_DONTNEED; break;
8124 case 7: arg4 = POSIX_FADV_NOREUSE; break;
8128 ret = -posix_fadvise(arg1, arg2, arg3, arg4);
8131 #ifdef TARGET_NR_madvise
8132 case TARGET_NR_madvise:
8133 /* A straight passthrough may not be safe because qemu sometimes
8134 turns private flie-backed mappings into anonymous mappings.
8135 This will break MADV_DONTNEED.
8136 This is a hint, so ignoring and returning success is ok. */
8140 #if TARGET_ABI_BITS == 32
8141 case TARGET_NR_fcntl64:
8145 struct target_flock64 *target_fl;
8147 struct target_eabi_flock64 *target_efl;
8150 cmd = target_to_host_fcntl_cmd(arg2);
8151 if (cmd == -TARGET_EINVAL) {
8157 case TARGET_F_GETLK64:
8159 if (((CPUARMState *)cpu_env)->eabi) {
8160 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
8162 fl.l_type = tswap16(target_efl->l_type);
8163 fl.l_whence = tswap16(target_efl->l_whence);
8164 fl.l_start = tswap64(target_efl->l_start);
8165 fl.l_len = tswap64(target_efl->l_len);
8166 fl.l_pid = tswap32(target_efl->l_pid);
8167 unlock_user_struct(target_efl, arg3, 0);
8171 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
8173 fl.l_type = tswap16(target_fl->l_type);
8174 fl.l_whence = tswap16(target_fl->l_whence);
8175 fl.l_start = tswap64(target_fl->l_start);
8176 fl.l_len = tswap64(target_fl->l_len);
8177 fl.l_pid = tswap32(target_fl->l_pid);
8178 unlock_user_struct(target_fl, arg3, 0);
8180 ret = get_errno(fcntl(arg1, cmd, &fl));
8183 if (((CPUARMState *)cpu_env)->eabi) {
8184 if (!lock_user_struct(VERIFY_WRITE, target_efl, arg3, 0))
8186 target_efl->l_type = tswap16(fl.l_type);
8187 target_efl->l_whence = tswap16(fl.l_whence);
8188 target_efl->l_start = tswap64(fl.l_start);
8189 target_efl->l_len = tswap64(fl.l_len);
8190 target_efl->l_pid = tswap32(fl.l_pid);
8191 unlock_user_struct(target_efl, arg3, 1);
8195 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg3, 0))
8197 target_fl->l_type = tswap16(fl.l_type);
8198 target_fl->l_whence = tswap16(fl.l_whence);
8199 target_fl->l_start = tswap64(fl.l_start);
8200 target_fl->l_len = tswap64(fl.l_len);
8201 target_fl->l_pid = tswap32(fl.l_pid);
8202 unlock_user_struct(target_fl, arg3, 1);
8207 case TARGET_F_SETLK64:
8208 case TARGET_F_SETLKW64:
8210 if (((CPUARMState *)cpu_env)->eabi) {
8211 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
8213 fl.l_type = tswap16(target_efl->l_type);
8214 fl.l_whence = tswap16(target_efl->l_whence);
8215 fl.l_start = tswap64(target_efl->l_start);
8216 fl.l_len = tswap64(target_efl->l_len);
8217 fl.l_pid = tswap32(target_efl->l_pid);
8218 unlock_user_struct(target_efl, arg3, 0);
8222 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
8224 fl.l_type = tswap16(target_fl->l_type);
8225 fl.l_whence = tswap16(target_fl->l_whence);
8226 fl.l_start = tswap64(target_fl->l_start);
8227 fl.l_len = tswap64(target_fl->l_len);
8228 fl.l_pid = tswap32(target_fl->l_pid);
8229 unlock_user_struct(target_fl, arg3, 0);
8231 ret = get_errno(fcntl(arg1, cmd, &fl));
8234 ret = do_fcntl(arg1, arg2, arg3);
8240 #ifdef TARGET_NR_cacheflush
8241 case TARGET_NR_cacheflush:
8242 /* self-modifying code is handled automatically, so nothing needed */
8246 #ifdef TARGET_NR_security
8247 case TARGET_NR_security:
8250 #ifdef TARGET_NR_getpagesize
8251 case TARGET_NR_getpagesize:
8252 ret = TARGET_PAGE_SIZE;
8255 case TARGET_NR_gettid:
8256 ret = get_errno(gettid());
8258 #ifdef TARGET_NR_readahead
8259 case TARGET_NR_readahead:
8260 #if TARGET_ABI_BITS == 32
8261 if (regpairs_aligned(cpu_env)) {
8266 ret = get_errno(readahead(arg1, ((off64_t)arg3 << 32) | arg2, arg4));
8268 ret = get_errno(readahead(arg1, arg2, arg3));
8273 #ifdef TARGET_NR_setxattr
8274 case TARGET_NR_listxattr:
8275 case TARGET_NR_llistxattr:
8279 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8281 ret = -TARGET_EFAULT;
8285 p = lock_user_string(arg1);
8287 if (num == TARGET_NR_listxattr) {
8288 ret = get_errno(listxattr(p, b, arg3));
8290 ret = get_errno(llistxattr(p, b, arg3));
8293 ret = -TARGET_EFAULT;
8295 unlock_user(p, arg1, 0);
8296 unlock_user(b, arg2, arg3);
8299 case TARGET_NR_flistxattr:
8303 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8305 ret = -TARGET_EFAULT;
8309 ret = get_errno(flistxattr(arg1, b, arg3));
8310 unlock_user(b, arg2, arg3);
8313 case TARGET_NR_setxattr:
8314 case TARGET_NR_lsetxattr:
8316 void *p, *n, *v = 0;
8318 v = lock_user(VERIFY_READ, arg3, arg4, 1);
8320 ret = -TARGET_EFAULT;
8324 p = lock_user_string(arg1);
8325 n = lock_user_string(arg2);
8327 if (num == TARGET_NR_setxattr) {
8328 ret = get_errno(setxattr(p, n, v, arg4, arg5));
8330 ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
8333 ret = -TARGET_EFAULT;
8335 unlock_user(p, arg1, 0);
8336 unlock_user(n, arg2, 0);
8337 unlock_user(v, arg3, 0);
8340 case TARGET_NR_fsetxattr:
8344 v = lock_user(VERIFY_READ, arg3, arg4, 1);
8346 ret = -TARGET_EFAULT;
8350 n = lock_user_string(arg2);
8352 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
8354 ret = -TARGET_EFAULT;
8356 unlock_user(n, arg2, 0);
8357 unlock_user(v, arg3, 0);
8360 case TARGET_NR_getxattr:
8361 case TARGET_NR_lgetxattr:
8363 void *p, *n, *v = 0;
8365 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
8367 ret = -TARGET_EFAULT;
8371 p = lock_user_string(arg1);
8372 n = lock_user_string(arg2);
8374 if (num == TARGET_NR_getxattr) {
8375 ret = get_errno(getxattr(p, n, v, arg4));
8377 ret = get_errno(lgetxattr(p, n, v, arg4));
8380 ret = -TARGET_EFAULT;
8382 unlock_user(p, arg1, 0);
8383 unlock_user(n, arg2, 0);
8384 unlock_user(v, arg3, arg4);
8387 case TARGET_NR_fgetxattr:
8391 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
8393 ret = -TARGET_EFAULT;
8397 n = lock_user_string(arg2);
8399 ret = get_errno(fgetxattr(arg1, n, v, arg4));
8401 ret = -TARGET_EFAULT;
8403 unlock_user(n, arg2, 0);
8404 unlock_user(v, arg3, arg4);
8407 case TARGET_NR_removexattr:
8408 case TARGET_NR_lremovexattr:
8411 p = lock_user_string(arg1);
8412 n = lock_user_string(arg2);
8414 if (num == TARGET_NR_removexattr) {
8415 ret = get_errno(removexattr(p, n));
8417 ret = get_errno(lremovexattr(p, n));
8420 ret = -TARGET_EFAULT;
8422 unlock_user(p, arg1, 0);
8423 unlock_user(n, arg2, 0);
8426 case TARGET_NR_fremovexattr:
8429 n = lock_user_string(arg2);
8431 ret = get_errno(fremovexattr(arg1, n));
8433 ret = -TARGET_EFAULT;
8435 unlock_user(n, arg2, 0);
8439 #endif /* CONFIG_ATTR */
8440 #ifdef TARGET_NR_set_thread_area
8441 case TARGET_NR_set_thread_area:
8442 #if defined(TARGET_MIPS)
8443 ((CPUMIPSState *) cpu_env)->tls_value = arg1;
8446 #elif defined(TARGET_CRIS)
8448 ret = -TARGET_EINVAL;
8450 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
8454 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
8455 ret = do_set_thread_area(cpu_env, arg1);
8458 goto unimplemented_nowarn;
8461 #ifdef TARGET_NR_get_thread_area
8462 case TARGET_NR_get_thread_area:
8463 #if defined(TARGET_I386) && defined(TARGET_ABI32)
8464 ret = do_get_thread_area(cpu_env, arg1);
8466 goto unimplemented_nowarn;
8469 #ifdef TARGET_NR_getdomainname
8470 case TARGET_NR_getdomainname:
8471 goto unimplemented_nowarn;
8474 #ifdef TARGET_NR_clock_gettime
8475 case TARGET_NR_clock_gettime:
8478 ret = get_errno(clock_gettime(arg1, &ts));
8479 if (!is_error(ret)) {
8480 host_to_target_timespec(arg2, &ts);
8485 #ifdef TARGET_NR_clock_getres
8486 case TARGET_NR_clock_getres:
8489 ret = get_errno(clock_getres(arg1, &ts));
8490 if (!is_error(ret)) {
8491 host_to_target_timespec(arg2, &ts);
8496 #ifdef TARGET_NR_clock_nanosleep
8497 case TARGET_NR_clock_nanosleep:
8500 target_to_host_timespec(&ts, arg3);
8501 ret = get_errno(clock_nanosleep(arg1, arg2, &ts, arg4 ? &ts : NULL));
8503 host_to_target_timespec(arg4, &ts);
8508 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
8509 case TARGET_NR_set_tid_address:
8510 ret = get_errno(set_tid_address((int *)g2h(arg1)));
8514 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
8515 case TARGET_NR_tkill:
8516 ret = get_errno(sys_tkill((int)arg1, target_to_host_signal(arg2)));
8520 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
8521 case TARGET_NR_tgkill:
8522 ret = get_errno(sys_tgkill((int)arg1, (int)arg2,
8523 target_to_host_signal(arg3)));
8527 #ifdef TARGET_NR_set_robust_list
8528 case TARGET_NR_set_robust_list:
8529 goto unimplemented_nowarn;
8532 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat)
8533 case TARGET_NR_utimensat:
8535 struct timespec *tsp, ts[2];
8539 target_to_host_timespec(ts, arg3);
8540 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
8544 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
8546 if (!(p = lock_user_string(arg2))) {
8547 ret = -TARGET_EFAULT;
8550 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
8551 unlock_user(p, arg2, 0);
8556 #if defined(CONFIG_USE_NPTL)
8557 case TARGET_NR_futex:
8558 ret = do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
8561 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
8562 case TARGET_NR_inotify_init:
8563 ret = get_errno(sys_inotify_init());
8566 #ifdef CONFIG_INOTIFY1
8567 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
8568 case TARGET_NR_inotify_init1:
8569 ret = get_errno(sys_inotify_init1(arg1));
8573 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
8574 case TARGET_NR_inotify_add_watch:
8575 p = lock_user_string(arg2);
8576 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
8577 unlock_user(p, arg2, 0);
8580 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
8581 case TARGET_NR_inotify_rm_watch:
8582 ret = get_errno(sys_inotify_rm_watch(arg1, arg2));
8586 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
8587 case TARGET_NR_mq_open:
8589 struct mq_attr posix_mq_attr;
8591 p = lock_user_string(arg1 - 1);
8593 copy_from_user_mq_attr (&posix_mq_attr, arg4);
8594 ret = get_errno(mq_open(p, arg2, arg3, &posix_mq_attr));
8595 unlock_user (p, arg1, 0);
8599 case TARGET_NR_mq_unlink:
8600 p = lock_user_string(arg1 - 1);
8601 ret = get_errno(mq_unlink(p));
8602 unlock_user (p, arg1, 0);
8605 case TARGET_NR_mq_timedsend:
8609 p = lock_user (VERIFY_READ, arg2, arg3, 1);
8611 target_to_host_timespec(&ts, arg5);
8612 ret = get_errno(mq_timedsend(arg1, p, arg3, arg4, &ts));
8613 host_to_target_timespec(arg5, &ts);
8616 ret = get_errno(mq_send(arg1, p, arg3, arg4));
8617 unlock_user (p, arg2, arg3);
8621 case TARGET_NR_mq_timedreceive:
8626 p = lock_user (VERIFY_READ, arg2, arg3, 1);
8628 target_to_host_timespec(&ts, arg5);
8629 ret = get_errno(mq_timedreceive(arg1, p, arg3, &prio, &ts));
8630 host_to_target_timespec(arg5, &ts);
8633 ret = get_errno(mq_receive(arg1, p, arg3, &prio));
8634 unlock_user (p, arg2, arg3);
8636 put_user_u32(prio, arg4);
8640 /* Not implemented for now... */
8641 /* case TARGET_NR_mq_notify: */
8644 case TARGET_NR_mq_getsetattr:
8646 struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
8649 ret = mq_getattr(arg1, &posix_mq_attr_out);
8650 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
8653 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
8654 ret |= mq_setattr(arg1, &posix_mq_attr_in, &posix_mq_attr_out);
8661 #ifdef CONFIG_SPLICE
8662 #ifdef TARGET_NR_tee
8665 ret = get_errno(tee(arg1,arg2,arg3,arg4));
8669 #ifdef TARGET_NR_splice
8670 case TARGET_NR_splice:
8672 loff_t loff_in, loff_out;
8673 loff_t *ploff_in = NULL, *ploff_out = NULL;
8675 get_user_u64(loff_in, arg2);
8676 ploff_in = &loff_in;
8679 get_user_u64(loff_out, arg2);
8680 ploff_out = &loff_out;
8682 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
8686 #ifdef TARGET_NR_vmsplice
8687 case TARGET_NR_vmsplice:
8689 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
8691 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
8692 unlock_iovec(vec, arg2, arg3, 0);
8694 ret = -host_to_target_errno(errno);
8699 #endif /* CONFIG_SPLICE */
8700 #ifdef CONFIG_EVENTFD
8701 #if defined(TARGET_NR_eventfd)
8702 case TARGET_NR_eventfd:
8703 ret = get_errno(eventfd(arg1, 0));
8706 #if defined(TARGET_NR_eventfd2)
8707 case TARGET_NR_eventfd2:
8708 ret = get_errno(eventfd(arg1, arg2));
8711 #endif /* CONFIG_EVENTFD */
8712 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
8713 case TARGET_NR_fallocate:
8714 #if TARGET_ABI_BITS == 32
8715 ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
8716 target_offset64(arg5, arg6)));
8718 ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
8722 #if defined(CONFIG_SYNC_FILE_RANGE)
8723 #if defined(TARGET_NR_sync_file_range)
8724 case TARGET_NR_sync_file_range:
8725 #if TARGET_ABI_BITS == 32
8726 #if defined(TARGET_MIPS)
8727 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
8728 target_offset64(arg5, arg6), arg7));
8730 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
8731 target_offset64(arg4, arg5), arg6));
8732 #endif /* !TARGET_MIPS */
8734 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
8738 #if defined(TARGET_NR_sync_file_range2)
8739 case TARGET_NR_sync_file_range2:
8740 /* This is like sync_file_range but the arguments are reordered */
8741 #if TARGET_ABI_BITS == 32
8742 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
8743 target_offset64(arg5, arg6), arg2));
8745 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
8750 #if defined(CONFIG_EPOLL)
8751 #if defined(TARGET_NR_epoll_create)
8752 case TARGET_NR_epoll_create:
8753 ret = get_errno(epoll_create(arg1));
8756 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
8757 case TARGET_NR_epoll_create1:
8758 ret = get_errno(epoll_create1(arg1));
8761 #if defined(TARGET_NR_epoll_ctl)
8762 case TARGET_NR_epoll_ctl:
8764 struct epoll_event ep;
8765 struct epoll_event *epp = 0;
8767 struct target_epoll_event *target_ep;
8768 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
8771 ep.events = tswap32(target_ep->events);
8772 /* The epoll_data_t union is just opaque data to the kernel,
8773 * so we transfer all 64 bits across and need not worry what
8774 * actual data type it is.
8776 ep.data.u64 = tswap64(target_ep->data.u64);
8777 unlock_user_struct(target_ep, arg4, 0);
8780 ret = get_errno(epoll_ctl(arg1, arg2, arg3, epp));
8785 #if defined(TARGET_NR_epoll_pwait) && defined(CONFIG_EPOLL_PWAIT)
8786 #define IMPLEMENT_EPOLL_PWAIT
8788 #if defined(TARGET_NR_epoll_wait) || defined(IMPLEMENT_EPOLL_PWAIT)
8789 #if defined(TARGET_NR_epoll_wait)
8790 case TARGET_NR_epoll_wait:
8792 #if defined(IMPLEMENT_EPOLL_PWAIT)
8793 case TARGET_NR_epoll_pwait:
8796 struct target_epoll_event *target_ep;
8797 struct epoll_event *ep;
8799 int maxevents = arg3;
8802 target_ep = lock_user(VERIFY_WRITE, arg2,
8803 maxevents * sizeof(struct target_epoll_event), 1);
8808 ep = alloca(maxevents * sizeof(struct epoll_event));
8811 #if defined(IMPLEMENT_EPOLL_PWAIT)
8812 case TARGET_NR_epoll_pwait:
8814 target_sigset_t *target_set;
8815 sigset_t _set, *set = &_set;
8818 target_set = lock_user(VERIFY_READ, arg5,
8819 sizeof(target_sigset_t), 1);
8821 unlock_user(target_ep, arg2, 0);
8824 target_to_host_sigset(set, target_set);
8825 unlock_user(target_set, arg5, 0);
8830 ret = get_errno(epoll_pwait(epfd, ep, maxevents, timeout, set));
8834 #if defined(TARGET_NR_epoll_wait)
8835 case TARGET_NR_epoll_wait:
8836 ret = get_errno(epoll_wait(epfd, ep, maxevents, timeout));
8840 ret = -TARGET_ENOSYS;
8842 if (!is_error(ret)) {
8844 for (i = 0; i < ret; i++) {
8845 target_ep[i].events = tswap32(ep[i].events);
8846 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
8849 unlock_user(target_ep, arg2, ret * sizeof(struct target_epoll_event));
8854 #ifdef TARGET_NR_prlimit64
8855 case TARGET_NR_prlimit64:
8857 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
8858 struct target_rlimit64 *target_rnew, *target_rold;
8859 struct host_rlimit64 rnew, rold, *rnewp = 0;
8861 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
8864 rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
8865 rnew.rlim_max = tswap64(target_rnew->rlim_max);
8866 unlock_user_struct(target_rnew, arg3, 0);
8870 ret = get_errno(sys_prlimit64(arg1, arg2, rnewp, arg4 ? &rold : 0));
8871 if (!is_error(ret) && arg4) {
8872 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
8875 target_rold->rlim_cur = tswap64(rold.rlim_cur);
8876 target_rold->rlim_max = tswap64(rold.rlim_max);
8877 unlock_user_struct(target_rold, arg4, 1);
8882 #ifdef TARGET_NR_gethostname
8883 case TARGET_NR_gethostname:
8885 char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
8887 ret = get_errno(gethostname(name, arg2));
8888 unlock_user(name, arg1, arg2);
8890 ret = -TARGET_EFAULT;
8897 gemu_log("qemu: Unsupported syscall: %d\n", num);
8898 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
8899 unimplemented_nowarn:
8901 ret = -TARGET_ENOSYS;
8906 gemu_log(" = " TARGET_ABI_FMT_ld "\n", ret);
8909 print_syscall_ret(num, ret);
8912 ret = -TARGET_EFAULT;