4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
31 #include <sys/types.h>
37 #include <sys/mount.h>
38 #include <sys/prctl.h>
39 #include <sys/resource.h>
45 int __clone2(int (*fn)(void *), void *child_stack_base,
46 size_t stack_size, int flags, void *arg, ...);
48 #include <sys/socket.h>
52 #include <sys/times.h>
55 #include <sys/statfs.h>
57 #include <sys/sysinfo.h>
58 #include <sys/utsname.h>
59 //#include <sys/user.h>
60 #include <netinet/ip.h>
61 #include <netinet/tcp.h>
62 #include <linux/wireless.h>
63 #include <qemu-common.h>
68 #include <sys/eventfd.h>
71 #include <sys/epoll.h>
74 #define termios host_termios
75 #define winsize host_winsize
76 #define termio host_termio
77 #define sgttyb host_sgttyb /* same as target */
78 #define tchars host_tchars /* same as target */
79 #define ltchars host_ltchars /* same as target */
81 #include <linux/termios.h>
82 #include <linux/unistd.h>
83 #include <linux/utsname.h>
84 #include <linux/cdrom.h>
85 #include <linux/hdreg.h>
86 #include <linux/soundcard.h>
88 #include <linux/mtio.h>
90 #if defined(CONFIG_FIEMAP)
91 #include <linux/fiemap.h>
95 #include "linux_loop.h"
96 #include "cpu-uname.h"
99 #include "qemu-common.h"
101 #if defined(CONFIG_USE_NPTL)
102 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
103 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
105 /* XXX: Hardcode the above values. */
106 #define CLONE_NPTL_FLAGS2 0
111 //#include <linux/msdos_fs.h>
112 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
113 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
124 #define _syscall0(type,name) \
125 static type name (void) \
127 return syscall(__NR_##name); \
130 #define _syscall1(type,name,type1,arg1) \
131 static type name (type1 arg1) \
133 return syscall(__NR_##name, arg1); \
136 #define _syscall2(type,name,type1,arg1,type2,arg2) \
137 static type name (type1 arg1,type2 arg2) \
139 return syscall(__NR_##name, arg1, arg2); \
142 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
143 static type name (type1 arg1,type2 arg2,type3 arg3) \
145 return syscall(__NR_##name, arg1, arg2, arg3); \
148 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
149 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
151 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
154 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
156 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
158 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
162 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
163 type5,arg5,type6,arg6) \
164 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
167 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
171 #define __NR_sys_uname __NR_uname
172 #define __NR_sys_faccessat __NR_faccessat
173 #define __NR_sys_fchmodat __NR_fchmodat
174 #define __NR_sys_fchownat __NR_fchownat
175 #define __NR_sys_fstatat64 __NR_fstatat64
176 #define __NR_sys_futimesat __NR_futimesat
177 #define __NR_sys_getcwd1 __NR_getcwd
178 #define __NR_sys_getdents __NR_getdents
179 #define __NR_sys_getdents64 __NR_getdents64
180 #define __NR_sys_getpriority __NR_getpriority
181 #define __NR_sys_linkat __NR_linkat
182 #define __NR_sys_mkdirat __NR_mkdirat
183 #define __NR_sys_mknodat __NR_mknodat
184 #define __NR_sys_newfstatat __NR_newfstatat
185 #define __NR_sys_openat __NR_openat
186 #define __NR_sys_readlinkat __NR_readlinkat
187 #define __NR_sys_renameat __NR_renameat
188 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
189 #define __NR_sys_symlinkat __NR_symlinkat
190 #define __NR_sys_syslog __NR_syslog
191 #define __NR_sys_tgkill __NR_tgkill
192 #define __NR_sys_tkill __NR_tkill
193 #define __NR_sys_unlinkat __NR_unlinkat
194 #define __NR_sys_utimensat __NR_utimensat
195 #define __NR_sys_futex __NR_futex
196 #define __NR_sys_inotify_init __NR_inotify_init
197 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
198 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
200 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \
202 #define __NR__llseek __NR_lseek
206 _syscall0(int, gettid)
208 /* This is a replacement for the host gettid() and must return a host
210 static int gettid(void) {
214 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
215 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
216 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
218 _syscall2(int, sys_getpriority, int, which, int, who);
219 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
220 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
221 loff_t *, res, uint, wh);
223 _syscall3(int,sys_rt_sigqueueinfo,int,pid,int,sig,siginfo_t *,uinfo)
224 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
225 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
226 _syscall3(int,sys_tgkill,int,tgid,int,pid,int,sig)
228 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
229 _syscall2(int,sys_tkill,int,tid,int,sig)
231 #ifdef __NR_exit_group
232 _syscall1(int,exit_group,int,error_code)
234 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
235 _syscall1(int,set_tid_address,int *,tidptr)
237 #if defined(CONFIG_USE_NPTL)
238 #if defined(TARGET_NR_futex) && defined(__NR_futex)
239 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
240 const struct timespec *,timeout,int *,uaddr2,int,val3)
243 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
244 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
245 unsigned long *, user_mask_ptr);
246 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
247 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
248 unsigned long *, user_mask_ptr);
250 static bitmask_transtbl fcntl_flags_tbl[] = {
251 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
252 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, },
253 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, },
254 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, },
255 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, },
256 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, },
257 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, },
258 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, },
259 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, },
260 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, },
261 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
262 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, },
263 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
264 #if defined(O_DIRECT)
265 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, },
270 #define COPY_UTSNAME_FIELD(dest, src) \
272 /* __NEW_UTS_LEN doesn't include terminating null */ \
273 (void) strncpy((dest), (src), __NEW_UTS_LEN); \
274 (dest)[__NEW_UTS_LEN] = '\0'; \
277 static int sys_uname(struct new_utsname *buf)
279 struct utsname uts_buf;
281 if (uname(&uts_buf) < 0)
285 * Just in case these have some differences, we
286 * translate utsname to new_utsname (which is the
287 * struct linux kernel uses).
290 memset(buf, 0, sizeof(*buf));
291 COPY_UTSNAME_FIELD(buf->sysname, uts_buf.sysname);
292 COPY_UTSNAME_FIELD(buf->nodename, uts_buf.nodename);
293 COPY_UTSNAME_FIELD(buf->release, uts_buf.release);
294 COPY_UTSNAME_FIELD(buf->version, uts_buf.version);
295 COPY_UTSNAME_FIELD(buf->machine, uts_buf.machine);
297 COPY_UTSNAME_FIELD(buf->domainname, uts_buf.domainname);
301 #undef COPY_UTSNAME_FIELD
304 static int sys_getcwd1(char *buf, size_t size)
306 if (getcwd(buf, size) == NULL) {
307 /* getcwd() sets errno */
310 return strlen(buf)+1;
315 * Host system seems to have atfile syscall stubs available. We
316 * now enable them one by one as specified by target syscall_nr.h.
319 #ifdef TARGET_NR_faccessat
320 static int sys_faccessat(int dirfd, const char *pathname, int mode)
322 return (faccessat(dirfd, pathname, mode, 0));
325 #ifdef TARGET_NR_fchmodat
326 static int sys_fchmodat(int dirfd, const char *pathname, mode_t mode)
328 return (fchmodat(dirfd, pathname, mode, 0));
331 #if defined(TARGET_NR_fchownat)
332 static int sys_fchownat(int dirfd, const char *pathname, uid_t owner,
333 gid_t group, int flags)
335 return (fchownat(dirfd, pathname, owner, group, flags));
338 #ifdef __NR_fstatat64
339 static int sys_fstatat64(int dirfd, const char *pathname, struct stat *buf,
342 return (fstatat(dirfd, pathname, buf, flags));
345 #ifdef __NR_newfstatat
346 static int sys_newfstatat(int dirfd, const char *pathname, struct stat *buf,
349 return (fstatat(dirfd, pathname, buf, flags));
352 #ifdef TARGET_NR_futimesat
353 static int sys_futimesat(int dirfd, const char *pathname,
354 const struct timeval times[2])
356 return (futimesat(dirfd, pathname, times));
359 #ifdef TARGET_NR_linkat
360 static int sys_linkat(int olddirfd, const char *oldpath,
361 int newdirfd, const char *newpath, int flags)
363 return (linkat(olddirfd, oldpath, newdirfd, newpath, flags));
366 #ifdef TARGET_NR_mkdirat
367 static int sys_mkdirat(int dirfd, const char *pathname, mode_t mode)
369 return (mkdirat(dirfd, pathname, mode));
372 #ifdef TARGET_NR_mknodat
373 static int sys_mknodat(int dirfd, const char *pathname, mode_t mode,
376 return (mknodat(dirfd, pathname, mode, dev));
379 #ifdef TARGET_NR_openat
380 static int sys_openat(int dirfd, const char *pathname, int flags, ...)
383 * open(2) has extra parameter 'mode' when called with
386 if ((flags & O_CREAT) != 0) {
391 * Get the 'mode' parameter and translate it to
395 mode = va_arg(ap, mode_t);
396 mode = target_to_host_bitmask(mode, fcntl_flags_tbl);
399 return (openat(dirfd, pathname, flags, mode));
401 return (openat(dirfd, pathname, flags));
404 #ifdef TARGET_NR_readlinkat
405 static int sys_readlinkat(int dirfd, const char *pathname, char *buf, size_t bufsiz)
407 return (readlinkat(dirfd, pathname, buf, bufsiz));
410 #ifdef TARGET_NR_renameat
411 static int sys_renameat(int olddirfd, const char *oldpath,
412 int newdirfd, const char *newpath)
414 return (renameat(olddirfd, oldpath, newdirfd, newpath));
417 #ifdef TARGET_NR_symlinkat
418 static int sys_symlinkat(const char *oldpath, int newdirfd, const char *newpath)
420 return (symlinkat(oldpath, newdirfd, newpath));
423 #ifdef TARGET_NR_unlinkat
424 static int sys_unlinkat(int dirfd, const char *pathname, int flags)
426 return (unlinkat(dirfd, pathname, flags));
429 #else /* !CONFIG_ATFILE */
432 * Try direct syscalls instead
434 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
435 _syscall3(int,sys_faccessat,int,dirfd,const char *,pathname,int,mode)
437 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat)
438 _syscall3(int,sys_fchmodat,int,dirfd,const char *,pathname, mode_t,mode)
440 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat)
441 _syscall5(int,sys_fchownat,int,dirfd,const char *,pathname,
442 uid_t,owner,gid_t,group,int,flags)
444 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \
445 defined(__NR_fstatat64)
446 _syscall4(int,sys_fstatat64,int,dirfd,const char *,pathname,
447 struct stat *,buf,int,flags)
449 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat)
450 _syscall3(int,sys_futimesat,int,dirfd,const char *,pathname,
451 const struct timeval *,times)
453 #if (defined(TARGET_NR_newfstatat) || defined(TARGET_NR_fstatat64) ) && \
454 defined(__NR_newfstatat)
455 _syscall4(int,sys_newfstatat,int,dirfd,const char *,pathname,
456 struct stat *,buf,int,flags)
458 #if defined(TARGET_NR_linkat) && defined(__NR_linkat)
459 _syscall5(int,sys_linkat,int,olddirfd,const char *,oldpath,
460 int,newdirfd,const char *,newpath,int,flags)
462 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat)
463 _syscall3(int,sys_mkdirat,int,dirfd,const char *,pathname,mode_t,mode)
465 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat)
466 _syscall4(int,sys_mknodat,int,dirfd,const char *,pathname,
467 mode_t,mode,dev_t,dev)
469 #if defined(TARGET_NR_openat) && defined(__NR_openat)
470 _syscall4(int,sys_openat,int,dirfd,const char *,pathname,int,flags,mode_t,mode)
472 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat)
473 _syscall4(int,sys_readlinkat,int,dirfd,const char *,pathname,
474 char *,buf,size_t,bufsize)
476 #if defined(TARGET_NR_renameat) && defined(__NR_renameat)
477 _syscall4(int,sys_renameat,int,olddirfd,const char *,oldpath,
478 int,newdirfd,const char *,newpath)
480 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat)
481 _syscall3(int,sys_symlinkat,const char *,oldpath,
482 int,newdirfd,const char *,newpath)
484 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat)
485 _syscall3(int,sys_unlinkat,int,dirfd,const char *,pathname,int,flags)
488 #endif /* CONFIG_ATFILE */
490 #ifdef CONFIG_UTIMENSAT
491 static int sys_utimensat(int dirfd, const char *pathname,
492 const struct timespec times[2], int flags)
494 if (pathname == NULL)
495 return futimens(dirfd, times);
497 return utimensat(dirfd, pathname, times, flags);
500 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat)
501 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
502 const struct timespec *,tsp,int,flags)
504 #endif /* CONFIG_UTIMENSAT */
506 #ifdef CONFIG_INOTIFY
507 #include <sys/inotify.h>
509 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
510 static int sys_inotify_init(void)
512 return (inotify_init());
515 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
516 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
518 return (inotify_add_watch(fd, pathname, mask));
521 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
522 static int sys_inotify_rm_watch(int fd, int32_t wd)
524 return (inotify_rm_watch(fd, wd));
527 #ifdef CONFIG_INOTIFY1
528 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
529 static int sys_inotify_init1(int flags)
531 return (inotify_init1(flags));
536 /* Userspace can usually survive runtime without inotify */
537 #undef TARGET_NR_inotify_init
538 #undef TARGET_NR_inotify_init1
539 #undef TARGET_NR_inotify_add_watch
540 #undef TARGET_NR_inotify_rm_watch
541 #endif /* CONFIG_INOTIFY */
543 #if defined(TARGET_NR_ppoll)
545 # define __NR_ppoll -1
547 #define __NR_sys_ppoll __NR_ppoll
548 _syscall5(int, sys_ppoll, struct pollfd *, fds, nfds_t, nfds,
549 struct timespec *, timeout, const __sigset_t *, sigmask,
553 #if defined(TARGET_NR_pselect6)
554 #ifndef __NR_pselect6
555 # define __NR_pselect6 -1
557 #define __NR_sys_pselect6 __NR_pselect6
558 _syscall6(int, sys_pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds,
559 fd_set *, exceptfds, struct timespec *, timeout, void *, sig);
562 extern int personality(int);
563 extern int flock(int, int);
564 extern int setfsuid(int);
565 extern int setfsgid(int);
566 extern int setgroups(int, gid_t *);
568 #define ERRNO_TABLE_SIZE 1200
570 /* target_to_host_errno_table[] is initialized from
571 * host_to_target_errno_table[] in syscall_init(). */
572 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
576 * This list is the union of errno values overridden in asm-<arch>/errno.h
577 * minus the errnos that are not actually generic to all archs.
579 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
580 [EIDRM] = TARGET_EIDRM,
581 [ECHRNG] = TARGET_ECHRNG,
582 [EL2NSYNC] = TARGET_EL2NSYNC,
583 [EL3HLT] = TARGET_EL3HLT,
584 [EL3RST] = TARGET_EL3RST,
585 [ELNRNG] = TARGET_ELNRNG,
586 [EUNATCH] = TARGET_EUNATCH,
587 [ENOCSI] = TARGET_ENOCSI,
588 [EL2HLT] = TARGET_EL2HLT,
589 [EDEADLK] = TARGET_EDEADLK,
590 [ENOLCK] = TARGET_ENOLCK,
591 [EBADE] = TARGET_EBADE,
592 [EBADR] = TARGET_EBADR,
593 [EXFULL] = TARGET_EXFULL,
594 [ENOANO] = TARGET_ENOANO,
595 [EBADRQC] = TARGET_EBADRQC,
596 [EBADSLT] = TARGET_EBADSLT,
597 [EBFONT] = TARGET_EBFONT,
598 [ENOSTR] = TARGET_ENOSTR,
599 [ENODATA] = TARGET_ENODATA,
600 [ETIME] = TARGET_ETIME,
601 [ENOSR] = TARGET_ENOSR,
602 [ENONET] = TARGET_ENONET,
603 [ENOPKG] = TARGET_ENOPKG,
604 [EREMOTE] = TARGET_EREMOTE,
605 [ENOLINK] = TARGET_ENOLINK,
606 [EADV] = TARGET_EADV,
607 [ESRMNT] = TARGET_ESRMNT,
608 [ECOMM] = TARGET_ECOMM,
609 [EPROTO] = TARGET_EPROTO,
610 [EDOTDOT] = TARGET_EDOTDOT,
611 [EMULTIHOP] = TARGET_EMULTIHOP,
612 [EBADMSG] = TARGET_EBADMSG,
613 [ENAMETOOLONG] = TARGET_ENAMETOOLONG,
614 [EOVERFLOW] = TARGET_EOVERFLOW,
615 [ENOTUNIQ] = TARGET_ENOTUNIQ,
616 [EBADFD] = TARGET_EBADFD,
617 [EREMCHG] = TARGET_EREMCHG,
618 [ELIBACC] = TARGET_ELIBACC,
619 [ELIBBAD] = TARGET_ELIBBAD,
620 [ELIBSCN] = TARGET_ELIBSCN,
621 [ELIBMAX] = TARGET_ELIBMAX,
622 [ELIBEXEC] = TARGET_ELIBEXEC,
623 [EILSEQ] = TARGET_EILSEQ,
624 [ENOSYS] = TARGET_ENOSYS,
625 [ELOOP] = TARGET_ELOOP,
626 [ERESTART] = TARGET_ERESTART,
627 [ESTRPIPE] = TARGET_ESTRPIPE,
628 [ENOTEMPTY] = TARGET_ENOTEMPTY,
629 [EUSERS] = TARGET_EUSERS,
630 [ENOTSOCK] = TARGET_ENOTSOCK,
631 [EDESTADDRREQ] = TARGET_EDESTADDRREQ,
632 [EMSGSIZE] = TARGET_EMSGSIZE,
633 [EPROTOTYPE] = TARGET_EPROTOTYPE,
634 [ENOPROTOOPT] = TARGET_ENOPROTOOPT,
635 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT,
636 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT,
637 [EOPNOTSUPP] = TARGET_EOPNOTSUPP,
638 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT,
639 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT,
640 [EADDRINUSE] = TARGET_EADDRINUSE,
641 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL,
642 [ENETDOWN] = TARGET_ENETDOWN,
643 [ENETUNREACH] = TARGET_ENETUNREACH,
644 [ENETRESET] = TARGET_ENETRESET,
645 [ECONNABORTED] = TARGET_ECONNABORTED,
646 [ECONNRESET] = TARGET_ECONNRESET,
647 [ENOBUFS] = TARGET_ENOBUFS,
648 [EISCONN] = TARGET_EISCONN,
649 [ENOTCONN] = TARGET_ENOTCONN,
650 [EUCLEAN] = TARGET_EUCLEAN,
651 [ENOTNAM] = TARGET_ENOTNAM,
652 [ENAVAIL] = TARGET_ENAVAIL,
653 [EISNAM] = TARGET_EISNAM,
654 [EREMOTEIO] = TARGET_EREMOTEIO,
655 [ESHUTDOWN] = TARGET_ESHUTDOWN,
656 [ETOOMANYREFS] = TARGET_ETOOMANYREFS,
657 [ETIMEDOUT] = TARGET_ETIMEDOUT,
658 [ECONNREFUSED] = TARGET_ECONNREFUSED,
659 [EHOSTDOWN] = TARGET_EHOSTDOWN,
660 [EHOSTUNREACH] = TARGET_EHOSTUNREACH,
661 [EALREADY] = TARGET_EALREADY,
662 [EINPROGRESS] = TARGET_EINPROGRESS,
663 [ESTALE] = TARGET_ESTALE,
664 [ECANCELED] = TARGET_ECANCELED,
665 [ENOMEDIUM] = TARGET_ENOMEDIUM,
666 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE,
668 [ENOKEY] = TARGET_ENOKEY,
671 [EKEYEXPIRED] = TARGET_EKEYEXPIRED,
674 [EKEYREVOKED] = TARGET_EKEYREVOKED,
677 [EKEYREJECTED] = TARGET_EKEYREJECTED,
680 [EOWNERDEAD] = TARGET_EOWNERDEAD,
682 #ifdef ENOTRECOVERABLE
683 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE,
687 static inline int host_to_target_errno(int err)
689 if(host_to_target_errno_table[err])
690 return host_to_target_errno_table[err];
694 static inline int target_to_host_errno(int err)
696 if (target_to_host_errno_table[err])
697 return target_to_host_errno_table[err];
701 static inline abi_long get_errno(abi_long ret)
704 return -host_to_target_errno(errno);
709 static inline int is_error(abi_long ret)
711 return (abi_ulong)ret >= (abi_ulong)(-4096);
714 char *target_strerror(int err)
716 return strerror(target_to_host_errno(err));
719 static abi_ulong target_brk;
720 static abi_ulong target_original_brk;
721 static abi_ulong brk_page;
723 void target_set_brk(abi_ulong new_brk)
725 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
726 brk_page = HOST_PAGE_ALIGN(target_brk);
729 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
730 #define DEBUGF_BRK(message, args...)
732 /* do_brk() must return target values and target errnos. */
733 abi_long do_brk(abi_ulong new_brk)
735 abi_long mapped_addr;
738 DEBUGF_BRK("do_brk(%#010x) -> ", new_brk);
741 DEBUGF_BRK("%#010x (!new_brk)\n", target_brk);
744 if (new_brk < target_original_brk) {
745 DEBUGF_BRK("%#010x (new_brk < target_original_brk)\n", target_brk);
749 /* If the new brk is less than the highest page reserved to the
750 * target heap allocation, set it and we're almost done... */
751 if (new_brk <= brk_page) {
752 /* Heap contents are initialized to zero, as for anonymous
754 if (new_brk > target_brk) {
755 memset(g2h(target_brk), 0, new_brk - target_brk);
757 target_brk = new_brk;
758 DEBUGF_BRK("%#010x (new_brk <= brk_page)\n", target_brk);
762 /* We need to allocate more memory after the brk... Note that
763 * we don't use MAP_FIXED because that will map over the top of
764 * any existing mapping (like the one with the host libc or qemu
765 * itself); instead we treat "mapped but at wrong address" as
766 * a failure and unmap again.
768 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
769 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
770 PROT_READ|PROT_WRITE,
771 MAP_ANON|MAP_PRIVATE, 0, 0));
773 if (mapped_addr == brk_page) {
774 target_brk = new_brk;
775 brk_page = HOST_PAGE_ALIGN(target_brk);
776 DEBUGF_BRK("%#010x (mapped_addr == brk_page)\n", target_brk);
778 } else if (mapped_addr != -1) {
779 /* Mapped but at wrong address, meaning there wasn't actually
780 * enough space for this brk.
782 target_munmap(mapped_addr, new_alloc_size);
784 DEBUGF_BRK("%#010x (mapped_addr != -1)\n", target_brk);
787 DEBUGF_BRK("%#010x (otherwise)\n", target_brk);
790 #if defined(TARGET_ALPHA)
791 /* We (partially) emulate OSF/1 on Alpha, which requires we
792 return a proper errno, not an unchanged brk value. */
793 return -TARGET_ENOMEM;
795 /* For everything else, return the previous break. */
799 static inline abi_long copy_from_user_fdset(fd_set *fds,
800 abi_ulong target_fds_addr,
804 abi_ulong b, *target_fds;
806 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
807 if (!(target_fds = lock_user(VERIFY_READ,
809 sizeof(abi_ulong) * nw,
811 return -TARGET_EFAULT;
815 for (i = 0; i < nw; i++) {
816 /* grab the abi_ulong */
817 __get_user(b, &target_fds[i]);
818 for (j = 0; j < TARGET_ABI_BITS; j++) {
819 /* check the bit inside the abi_ulong */
826 unlock_user(target_fds, target_fds_addr, 0);
831 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
832 abi_ulong target_fds_addr,
835 if (target_fds_addr) {
836 if (copy_from_user_fdset(fds, target_fds_addr, n))
837 return -TARGET_EFAULT;
845 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
851 abi_ulong *target_fds;
853 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
854 if (!(target_fds = lock_user(VERIFY_WRITE,
856 sizeof(abi_ulong) * nw,
858 return -TARGET_EFAULT;
861 for (i = 0; i < nw; i++) {
863 for (j = 0; j < TARGET_ABI_BITS; j++) {
864 v |= ((FD_ISSET(k, fds) != 0) << j);
867 __put_user(v, &target_fds[i]);
870 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
875 #if defined(__alpha__)
881 static inline abi_long host_to_target_clock_t(long ticks)
883 #if HOST_HZ == TARGET_HZ
886 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
890 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
891 const struct rusage *rusage)
893 struct target_rusage *target_rusage;
895 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
896 return -TARGET_EFAULT;
897 target_rusage->ru_utime.tv_sec = tswapl(rusage->ru_utime.tv_sec);
898 target_rusage->ru_utime.tv_usec = tswapl(rusage->ru_utime.tv_usec);
899 target_rusage->ru_stime.tv_sec = tswapl(rusage->ru_stime.tv_sec);
900 target_rusage->ru_stime.tv_usec = tswapl(rusage->ru_stime.tv_usec);
901 target_rusage->ru_maxrss = tswapl(rusage->ru_maxrss);
902 target_rusage->ru_ixrss = tswapl(rusage->ru_ixrss);
903 target_rusage->ru_idrss = tswapl(rusage->ru_idrss);
904 target_rusage->ru_isrss = tswapl(rusage->ru_isrss);
905 target_rusage->ru_minflt = tswapl(rusage->ru_minflt);
906 target_rusage->ru_majflt = tswapl(rusage->ru_majflt);
907 target_rusage->ru_nswap = tswapl(rusage->ru_nswap);
908 target_rusage->ru_inblock = tswapl(rusage->ru_inblock);
909 target_rusage->ru_oublock = tswapl(rusage->ru_oublock);
910 target_rusage->ru_msgsnd = tswapl(rusage->ru_msgsnd);
911 target_rusage->ru_msgrcv = tswapl(rusage->ru_msgrcv);
912 target_rusage->ru_nsignals = tswapl(rusage->ru_nsignals);
913 target_rusage->ru_nvcsw = tswapl(rusage->ru_nvcsw);
914 target_rusage->ru_nivcsw = tswapl(rusage->ru_nivcsw);
915 unlock_user_struct(target_rusage, target_addr, 1);
920 static inline rlim_t target_to_host_rlim(target_ulong target_rlim)
922 if (target_rlim == TARGET_RLIM_INFINITY)
923 return RLIM_INFINITY;
925 return tswapl(target_rlim);
928 static inline target_ulong host_to_target_rlim(rlim_t rlim)
930 if (rlim == RLIM_INFINITY || rlim != (target_long)rlim)
931 return TARGET_RLIM_INFINITY;
936 static inline abi_long copy_from_user_timeval(struct timeval *tv,
937 abi_ulong target_tv_addr)
939 struct target_timeval *target_tv;
941 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1))
942 return -TARGET_EFAULT;
944 __get_user(tv->tv_sec, &target_tv->tv_sec);
945 __get_user(tv->tv_usec, &target_tv->tv_usec);
947 unlock_user_struct(target_tv, target_tv_addr, 0);
952 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
953 const struct timeval *tv)
955 struct target_timeval *target_tv;
957 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0))
958 return -TARGET_EFAULT;
960 __put_user(tv->tv_sec, &target_tv->tv_sec);
961 __put_user(tv->tv_usec, &target_tv->tv_usec);
963 unlock_user_struct(target_tv, target_tv_addr, 1);
968 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
971 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
972 abi_ulong target_mq_attr_addr)
974 struct target_mq_attr *target_mq_attr;
976 if (!lock_user_struct(VERIFY_READ, target_mq_attr,
977 target_mq_attr_addr, 1))
978 return -TARGET_EFAULT;
980 __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
981 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
982 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
983 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
985 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
990 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
991 const struct mq_attr *attr)
993 struct target_mq_attr *target_mq_attr;
995 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
996 target_mq_attr_addr, 0))
997 return -TARGET_EFAULT;
999 __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1000 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1001 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1002 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1004 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1010 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1011 /* do_select() must return target values and target errnos. */
1012 static abi_long do_select(int n,
1013 abi_ulong rfd_addr, abi_ulong wfd_addr,
1014 abi_ulong efd_addr, abi_ulong target_tv_addr)
1016 fd_set rfds, wfds, efds;
1017 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1018 struct timeval tv, *tv_ptr;
1021 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1025 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1029 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1034 if (target_tv_addr) {
1035 if (copy_from_user_timeval(&tv, target_tv_addr))
1036 return -TARGET_EFAULT;
1042 ret = get_errno(select(n, rfds_ptr, wfds_ptr, efds_ptr, tv_ptr));
1044 if (!is_error(ret)) {
1045 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1046 return -TARGET_EFAULT;
1047 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1048 return -TARGET_EFAULT;
1049 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1050 return -TARGET_EFAULT;
1052 if (target_tv_addr && copy_to_user_timeval(target_tv_addr, &tv))
1053 return -TARGET_EFAULT;
1060 static abi_long do_pipe2(int host_pipe[], int flags)
1063 return pipe2(host_pipe, flags);
1069 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1070 int flags, int is_pipe2)
1074 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1077 return get_errno(ret);
1079 /* Several targets have special calling conventions for the original
1080 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1082 #if defined(TARGET_ALPHA)
1083 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1084 return host_pipe[0];
1085 #elif defined(TARGET_MIPS)
1086 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1087 return host_pipe[0];
1088 #elif defined(TARGET_SH4)
1089 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1090 return host_pipe[0];
1094 if (put_user_s32(host_pipe[0], pipedes)
1095 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1096 return -TARGET_EFAULT;
1097 return get_errno(ret);
1100 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1101 abi_ulong target_addr,
1104 struct target_ip_mreqn *target_smreqn;
1106 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1108 return -TARGET_EFAULT;
1109 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1110 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1111 if (len == sizeof(struct target_ip_mreqn))
1112 mreqn->imr_ifindex = tswapl(target_smreqn->imr_ifindex);
1113 unlock_user(target_smreqn, target_addr, 0);
1118 static inline abi_long target_to_host_sockaddr(struct sockaddr *addr,
1119 abi_ulong target_addr,
1122 const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1123 sa_family_t sa_family;
1124 struct target_sockaddr *target_saddr;
1126 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1128 return -TARGET_EFAULT;
1130 sa_family = tswap16(target_saddr->sa_family);
1132 /* Oops. The caller might send a incomplete sun_path; sun_path
1133 * must be terminated by \0 (see the manual page), but
1134 * unfortunately it is quite common to specify sockaddr_un
1135 * length as "strlen(x->sun_path)" while it should be
1136 * "strlen(...) + 1". We'll fix that here if needed.
1137 * Linux kernel has a similar feature.
1140 if (sa_family == AF_UNIX) {
1141 if (len < unix_maxlen && len > 0) {
1142 char *cp = (char*)target_saddr;
1144 if ( cp[len-1] && !cp[len] )
1147 if (len > unix_maxlen)
1151 memcpy(addr, target_saddr, len);
1152 addr->sa_family = sa_family;
1153 unlock_user(target_saddr, target_addr, 0);
1158 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1159 struct sockaddr *addr,
1162 struct target_sockaddr *target_saddr;
1164 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1166 return -TARGET_EFAULT;
1167 memcpy(target_saddr, addr, len);
1168 target_saddr->sa_family = tswap16(addr->sa_family);
1169 unlock_user(target_saddr, target_addr, len);
1174 /* ??? Should this also swap msgh->name? */
1175 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1176 struct target_msghdr *target_msgh)
1178 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1179 abi_long msg_controllen;
1180 abi_ulong target_cmsg_addr;
1181 struct target_cmsghdr *target_cmsg;
1182 socklen_t space = 0;
1184 msg_controllen = tswapl(target_msgh->msg_controllen);
1185 if (msg_controllen < sizeof (struct target_cmsghdr))
1187 target_cmsg_addr = tswapl(target_msgh->msg_control);
1188 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1190 return -TARGET_EFAULT;
1192 while (cmsg && target_cmsg) {
1193 void *data = CMSG_DATA(cmsg);
1194 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1196 int len = tswapl(target_cmsg->cmsg_len)
1197 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr));
1199 space += CMSG_SPACE(len);
1200 if (space > msgh->msg_controllen) {
1201 space -= CMSG_SPACE(len);
1202 gemu_log("Host cmsg overflow\n");
1206 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1207 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1208 cmsg->cmsg_len = CMSG_LEN(len);
1210 if (cmsg->cmsg_level != TARGET_SOL_SOCKET || cmsg->cmsg_type != SCM_RIGHTS) {
1211 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg->cmsg_level, cmsg->cmsg_type);
1212 memcpy(data, target_data, len);
1214 int *fd = (int *)data;
1215 int *target_fd = (int *)target_data;
1216 int i, numfds = len / sizeof(int);
1218 for (i = 0; i < numfds; i++)
1219 fd[i] = tswap32(target_fd[i]);
1222 cmsg = CMSG_NXTHDR(msgh, cmsg);
1223 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg);
1225 unlock_user(target_cmsg, target_cmsg_addr, 0);
1227 msgh->msg_controllen = space;
1231 /* ??? Should this also swap msgh->name? */
1232 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1233 struct msghdr *msgh)
1235 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1236 abi_long msg_controllen;
1237 abi_ulong target_cmsg_addr;
1238 struct target_cmsghdr *target_cmsg;
1239 socklen_t space = 0;
1241 msg_controllen = tswapl(target_msgh->msg_controllen);
1242 if (msg_controllen < sizeof (struct target_cmsghdr))
1244 target_cmsg_addr = tswapl(target_msgh->msg_control);
1245 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1247 return -TARGET_EFAULT;
1249 while (cmsg && target_cmsg) {
1250 void *data = CMSG_DATA(cmsg);
1251 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1253 int len = cmsg->cmsg_len - CMSG_ALIGN(sizeof (struct cmsghdr));
1255 space += TARGET_CMSG_SPACE(len);
1256 if (space > msg_controllen) {
1257 space -= TARGET_CMSG_SPACE(len);
1258 gemu_log("Target cmsg overflow\n");
1262 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1263 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1264 target_cmsg->cmsg_len = tswapl(TARGET_CMSG_LEN(len));
1266 if (cmsg->cmsg_level != TARGET_SOL_SOCKET || cmsg->cmsg_type != SCM_RIGHTS) {
1267 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg->cmsg_level, cmsg->cmsg_type);
1268 memcpy(target_data, data, len);
1270 int *fd = (int *)data;
1271 int *target_fd = (int *)target_data;
1272 int i, numfds = len / sizeof(int);
1274 for (i = 0; i < numfds; i++)
1275 target_fd[i] = tswap32(fd[i]);
1278 cmsg = CMSG_NXTHDR(msgh, cmsg);
1279 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg);
1281 unlock_user(target_cmsg, target_cmsg_addr, space);
1283 target_msgh->msg_controllen = tswapl(space);
1287 /* do_setsockopt() Must return target values and target errnos. */
1288 static abi_long do_setsockopt(int sockfd, int level, int optname,
1289 abi_ulong optval_addr, socklen_t optlen)
1293 struct ip_mreqn *ip_mreq;
1294 struct ip_mreq_source *ip_mreq_source;
1298 /* TCP options all take an 'int' value. */
1299 if (optlen < sizeof(uint32_t))
1300 return -TARGET_EINVAL;
1302 if (get_user_u32(val, optval_addr))
1303 return -TARGET_EFAULT;
1304 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1311 case IP_ROUTER_ALERT:
1315 case IP_MTU_DISCOVER:
1321 case IP_MULTICAST_TTL:
1322 case IP_MULTICAST_LOOP:
1324 if (optlen >= sizeof(uint32_t)) {
1325 if (get_user_u32(val, optval_addr))
1326 return -TARGET_EFAULT;
1327 } else if (optlen >= 1) {
1328 if (get_user_u8(val, optval_addr))
1329 return -TARGET_EFAULT;
1331 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1333 case IP_ADD_MEMBERSHIP:
1334 case IP_DROP_MEMBERSHIP:
1335 if (optlen < sizeof (struct target_ip_mreq) ||
1336 optlen > sizeof (struct target_ip_mreqn))
1337 return -TARGET_EINVAL;
1339 ip_mreq = (struct ip_mreqn *) alloca(optlen);
1340 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
1341 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
1344 case IP_BLOCK_SOURCE:
1345 case IP_UNBLOCK_SOURCE:
1346 case IP_ADD_SOURCE_MEMBERSHIP:
1347 case IP_DROP_SOURCE_MEMBERSHIP:
1348 if (optlen != sizeof (struct target_ip_mreq_source))
1349 return -TARGET_EINVAL;
1351 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
1352 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
1353 unlock_user (ip_mreq_source, optval_addr, 0);
1360 case TARGET_SOL_SOCKET:
1362 /* Options with 'int' argument. */
1363 case TARGET_SO_DEBUG:
1366 case TARGET_SO_REUSEADDR:
1367 optname = SO_REUSEADDR;
1369 case TARGET_SO_TYPE:
1372 case TARGET_SO_ERROR:
1375 case TARGET_SO_DONTROUTE:
1376 optname = SO_DONTROUTE;
1378 case TARGET_SO_BROADCAST:
1379 optname = SO_BROADCAST;
1381 case TARGET_SO_SNDBUF:
1382 optname = SO_SNDBUF;
1384 case TARGET_SO_RCVBUF:
1385 optname = SO_RCVBUF;
1387 case TARGET_SO_KEEPALIVE:
1388 optname = SO_KEEPALIVE;
1390 case TARGET_SO_OOBINLINE:
1391 optname = SO_OOBINLINE;
1393 case TARGET_SO_NO_CHECK:
1394 optname = SO_NO_CHECK;
1396 case TARGET_SO_PRIORITY:
1397 optname = SO_PRIORITY;
1400 case TARGET_SO_BSDCOMPAT:
1401 optname = SO_BSDCOMPAT;
1404 case TARGET_SO_PASSCRED:
1405 optname = SO_PASSCRED;
1407 case TARGET_SO_TIMESTAMP:
1408 optname = SO_TIMESTAMP;
1410 case TARGET_SO_RCVLOWAT:
1411 optname = SO_RCVLOWAT;
1413 case TARGET_SO_RCVTIMEO:
1414 optname = SO_RCVTIMEO;
1416 case TARGET_SO_SNDTIMEO:
1417 optname = SO_SNDTIMEO;
1423 if (optlen < sizeof(uint32_t))
1424 return -TARGET_EINVAL;
1426 if (get_user_u32(val, optval_addr))
1427 return -TARGET_EFAULT;
1428 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
1432 gemu_log("Unsupported setsockopt level=%d optname=%d \n", level, optname);
1433 ret = -TARGET_ENOPROTOOPT;
1438 /* do_getsockopt() Must return target values and target errnos. */
1439 static abi_long do_getsockopt(int sockfd, int level, int optname,
1440 abi_ulong optval_addr, abi_ulong optlen)
1447 case TARGET_SOL_SOCKET:
1450 /* These don't just return a single integer */
1451 case TARGET_SO_LINGER:
1452 case TARGET_SO_RCVTIMEO:
1453 case TARGET_SO_SNDTIMEO:
1454 case TARGET_SO_PEERCRED:
1455 case TARGET_SO_PEERNAME:
1457 /* Options with 'int' argument. */
1458 case TARGET_SO_DEBUG:
1461 case TARGET_SO_REUSEADDR:
1462 optname = SO_REUSEADDR;
1464 case TARGET_SO_TYPE:
1467 case TARGET_SO_ERROR:
1470 case TARGET_SO_DONTROUTE:
1471 optname = SO_DONTROUTE;
1473 case TARGET_SO_BROADCAST:
1474 optname = SO_BROADCAST;
1476 case TARGET_SO_SNDBUF:
1477 optname = SO_SNDBUF;
1479 case TARGET_SO_RCVBUF:
1480 optname = SO_RCVBUF;
1482 case TARGET_SO_KEEPALIVE:
1483 optname = SO_KEEPALIVE;
1485 case TARGET_SO_OOBINLINE:
1486 optname = SO_OOBINLINE;
1488 case TARGET_SO_NO_CHECK:
1489 optname = SO_NO_CHECK;
1491 case TARGET_SO_PRIORITY:
1492 optname = SO_PRIORITY;
1495 case TARGET_SO_BSDCOMPAT:
1496 optname = SO_BSDCOMPAT;
1499 case TARGET_SO_PASSCRED:
1500 optname = SO_PASSCRED;
1502 case TARGET_SO_TIMESTAMP:
1503 optname = SO_TIMESTAMP;
1505 case TARGET_SO_RCVLOWAT:
1506 optname = SO_RCVLOWAT;
1513 /* TCP options all take an 'int' value. */
1515 if (get_user_u32(len, optlen))
1516 return -TARGET_EFAULT;
1518 return -TARGET_EINVAL;
1520 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
1526 if (put_user_u32(val, optval_addr))
1527 return -TARGET_EFAULT;
1529 if (put_user_u8(val, optval_addr))
1530 return -TARGET_EFAULT;
1532 if (put_user_u32(len, optlen))
1533 return -TARGET_EFAULT;
1540 case IP_ROUTER_ALERT:
1544 case IP_MTU_DISCOVER:
1550 case IP_MULTICAST_TTL:
1551 case IP_MULTICAST_LOOP:
1552 if (get_user_u32(len, optlen))
1553 return -TARGET_EFAULT;
1555 return -TARGET_EINVAL;
1557 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
1560 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
1562 if (put_user_u32(len, optlen)
1563 || put_user_u8(val, optval_addr))
1564 return -TARGET_EFAULT;
1566 if (len > sizeof(int))
1568 if (put_user_u32(len, optlen)
1569 || put_user_u32(val, optval_addr))
1570 return -TARGET_EFAULT;
1574 ret = -TARGET_ENOPROTOOPT;
1580 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
1582 ret = -TARGET_EOPNOTSUPP;
1589 * lock_iovec()/unlock_iovec() have a return code of 0 for success where
1590 * other lock functions have a return code of 0 for failure.
1592 static abi_long lock_iovec(int type, struct iovec *vec, abi_ulong target_addr,
1593 int count, int copy)
1595 struct target_iovec *target_vec;
1599 target_vec = lock_user(VERIFY_READ, target_addr, count * sizeof(struct target_iovec), 1);
1601 return -TARGET_EFAULT;
1602 for(i = 0;i < count; i++) {
1603 base = tswapl(target_vec[i].iov_base);
1604 vec[i].iov_len = tswapl(target_vec[i].iov_len);
1605 if (vec[i].iov_len != 0) {
1606 vec[i].iov_base = lock_user(type, base, vec[i].iov_len, copy);
1607 /* Don't check lock_user return value. We must call writev even
1608 if a element has invalid base address. */
1610 /* zero length pointer is ignored */
1611 vec[i].iov_base = NULL;
1614 unlock_user (target_vec, target_addr, 0);
1618 static abi_long unlock_iovec(struct iovec *vec, abi_ulong target_addr,
1619 int count, int copy)
1621 struct target_iovec *target_vec;
1625 target_vec = lock_user(VERIFY_READ, target_addr, count * sizeof(struct target_iovec), 1);
1627 return -TARGET_EFAULT;
1628 for(i = 0;i < count; i++) {
1629 if (target_vec[i].iov_base) {
1630 base = tswapl(target_vec[i].iov_base);
1631 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
1634 unlock_user (target_vec, target_addr, 0);
1639 /* do_socket() Must return target values and target errnos. */
1640 static abi_long do_socket(int domain, int type, int protocol)
1642 #if defined(TARGET_MIPS)
1644 case TARGET_SOCK_DGRAM:
1647 case TARGET_SOCK_STREAM:
1650 case TARGET_SOCK_RAW:
1653 case TARGET_SOCK_RDM:
1656 case TARGET_SOCK_SEQPACKET:
1657 type = SOCK_SEQPACKET;
1659 case TARGET_SOCK_PACKET:
1664 if (domain == PF_NETLINK)
1665 return -EAFNOSUPPORT; /* do not NETLINK socket connections possible */
1666 return get_errno(socket(domain, type, protocol));
1669 /* do_bind() Must return target values and target errnos. */
1670 static abi_long do_bind(int sockfd, abi_ulong target_addr,
1676 if ((int)addrlen < 0) {
1677 return -TARGET_EINVAL;
1680 addr = alloca(addrlen+1);
1682 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
1686 return get_errno(bind(sockfd, addr, addrlen));
1689 /* do_connect() Must return target values and target errnos. */
1690 static abi_long do_connect(int sockfd, abi_ulong target_addr,
1696 if ((int)addrlen < 0) {
1697 return -TARGET_EINVAL;
1700 addr = alloca(addrlen);
1702 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
1706 return get_errno(connect(sockfd, addr, addrlen));
1709 /* do_sendrecvmsg() Must return target values and target errnos. */
1710 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
1711 int flags, int send)
1714 struct target_msghdr *msgp;
1718 abi_ulong target_vec;
1721 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
1725 return -TARGET_EFAULT;
1726 if (msgp->msg_name) {
1727 msg.msg_namelen = tswap32(msgp->msg_namelen);
1728 msg.msg_name = alloca(msg.msg_namelen);
1729 ret = target_to_host_sockaddr(msg.msg_name, tswapl(msgp->msg_name),
1732 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
1736 msg.msg_name = NULL;
1737 msg.msg_namelen = 0;
1739 msg.msg_controllen = 2 * tswapl(msgp->msg_controllen);
1740 msg.msg_control = alloca(msg.msg_controllen);
1741 msg.msg_flags = tswap32(msgp->msg_flags);
1743 count = tswapl(msgp->msg_iovlen);
1744 vec = alloca(count * sizeof(struct iovec));
1745 target_vec = tswapl(msgp->msg_iov);
1746 lock_iovec(send ? VERIFY_READ : VERIFY_WRITE, vec, target_vec, count, send);
1747 msg.msg_iovlen = count;
1751 ret = target_to_host_cmsg(&msg, msgp);
1753 ret = get_errno(sendmsg(fd, &msg, flags));
1755 ret = get_errno(recvmsg(fd, &msg, flags));
1756 if (!is_error(ret)) {
1758 ret = host_to_target_cmsg(msgp, &msg);
1763 unlock_iovec(vec, target_vec, count, !send);
1764 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
1768 /* do_accept() Must return target values and target errnos. */
1769 static abi_long do_accept(int fd, abi_ulong target_addr,
1770 abi_ulong target_addrlen_addr)
1776 if (target_addr == 0)
1777 return get_errno(accept(fd, NULL, NULL));
1779 /* linux returns EINVAL if addrlen pointer is invalid */
1780 if (get_user_u32(addrlen, target_addrlen_addr))
1781 return -TARGET_EINVAL;
1783 if ((int)addrlen < 0) {
1784 return -TARGET_EINVAL;
1787 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
1788 return -TARGET_EINVAL;
1790 addr = alloca(addrlen);
1792 ret = get_errno(accept(fd, addr, &addrlen));
1793 if (!is_error(ret)) {
1794 host_to_target_sockaddr(target_addr, addr, addrlen);
1795 if (put_user_u32(addrlen, target_addrlen_addr))
1796 ret = -TARGET_EFAULT;
1801 /* do_getpeername() Must return target values and target errnos. */
1802 static abi_long do_getpeername(int fd, abi_ulong target_addr,
1803 abi_ulong target_addrlen_addr)
1809 if (get_user_u32(addrlen, target_addrlen_addr))
1810 return -TARGET_EFAULT;
1812 if ((int)addrlen < 0) {
1813 return -TARGET_EINVAL;
1816 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
1817 return -TARGET_EFAULT;
1819 addr = alloca(addrlen);
1821 ret = get_errno(getpeername(fd, addr, &addrlen));
1822 if (!is_error(ret)) {
1823 host_to_target_sockaddr(target_addr, addr, addrlen);
1824 if (put_user_u32(addrlen, target_addrlen_addr))
1825 ret = -TARGET_EFAULT;
1830 /* do_getsockname() Must return target values and target errnos. */
1831 static abi_long do_getsockname(int fd, abi_ulong target_addr,
1832 abi_ulong target_addrlen_addr)
1838 if (get_user_u32(addrlen, target_addrlen_addr))
1839 return -TARGET_EFAULT;
1841 if ((int)addrlen < 0) {
1842 return -TARGET_EINVAL;
1845 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
1846 return -TARGET_EFAULT;
1848 addr = alloca(addrlen);
1850 ret = get_errno(getsockname(fd, addr, &addrlen));
1851 if (!is_error(ret)) {
1852 host_to_target_sockaddr(target_addr, addr, addrlen);
1853 if (put_user_u32(addrlen, target_addrlen_addr))
1854 ret = -TARGET_EFAULT;
1859 /* do_socketpair() Must return target values and target errnos. */
1860 static abi_long do_socketpair(int domain, int type, int protocol,
1861 abi_ulong target_tab_addr)
1866 ret = get_errno(socketpair(domain, type, protocol, tab));
1867 if (!is_error(ret)) {
1868 if (put_user_s32(tab[0], target_tab_addr)
1869 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
1870 ret = -TARGET_EFAULT;
1875 /* do_sendto() Must return target values and target errnos. */
1876 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
1877 abi_ulong target_addr, socklen_t addrlen)
1883 if ((int)addrlen < 0) {
1884 return -TARGET_EINVAL;
1887 host_msg = lock_user(VERIFY_READ, msg, len, 1);
1889 return -TARGET_EFAULT;
1891 addr = alloca(addrlen);
1892 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
1894 unlock_user(host_msg, msg, 0);
1897 ret = get_errno(sendto(fd, host_msg, len, flags, addr, addrlen));
1899 ret = get_errno(send(fd, host_msg, len, flags));
1901 unlock_user(host_msg, msg, 0);
1905 /* do_recvfrom() Must return target values and target errnos. */
1906 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
1907 abi_ulong target_addr,
1908 abi_ulong target_addrlen)
1915 host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
1917 return -TARGET_EFAULT;
1919 if (get_user_u32(addrlen, target_addrlen)) {
1920 ret = -TARGET_EFAULT;
1923 if ((int)addrlen < 0) {
1924 ret = -TARGET_EINVAL;
1927 addr = alloca(addrlen);
1928 ret = get_errno(recvfrom(fd, host_msg, len, flags, addr, &addrlen));
1930 addr = NULL; /* To keep compiler quiet. */
1931 ret = get_errno(recv(fd, host_msg, len, flags));
1933 if (!is_error(ret)) {
1935 host_to_target_sockaddr(target_addr, addr, addrlen);
1936 if (put_user_u32(addrlen, target_addrlen)) {
1937 ret = -TARGET_EFAULT;
1941 unlock_user(host_msg, msg, len);
1944 unlock_user(host_msg, msg, 0);
1949 #ifdef TARGET_NR_socketcall
1950 /* do_socketcall() Must return target values and target errnos. */
1951 static abi_long do_socketcall(int num, abi_ulong vptr)
1954 const int n = sizeof(abi_ulong);
1959 abi_ulong domain, type, protocol;
1961 if (get_user_ual(domain, vptr)
1962 || get_user_ual(type, vptr + n)
1963 || get_user_ual(protocol, vptr + 2 * n))
1964 return -TARGET_EFAULT;
1966 ret = do_socket(domain, type, protocol);
1972 abi_ulong target_addr;
1975 if (get_user_ual(sockfd, vptr)
1976 || get_user_ual(target_addr, vptr + n)
1977 || get_user_ual(addrlen, vptr + 2 * n))
1978 return -TARGET_EFAULT;
1980 ret = do_bind(sockfd, target_addr, addrlen);
1983 case SOCKOP_connect:
1986 abi_ulong target_addr;
1989 if (get_user_ual(sockfd, vptr)
1990 || get_user_ual(target_addr, vptr + n)
1991 || get_user_ual(addrlen, vptr + 2 * n))
1992 return -TARGET_EFAULT;
1994 ret = do_connect(sockfd, target_addr, addrlen);
1999 abi_ulong sockfd, backlog;
2001 if (get_user_ual(sockfd, vptr)
2002 || get_user_ual(backlog, vptr + n))
2003 return -TARGET_EFAULT;
2005 ret = get_errno(listen(sockfd, backlog));
2011 abi_ulong target_addr, target_addrlen;
2013 if (get_user_ual(sockfd, vptr)
2014 || get_user_ual(target_addr, vptr + n)
2015 || get_user_ual(target_addrlen, vptr + 2 * n))
2016 return -TARGET_EFAULT;
2018 ret = do_accept(sockfd, target_addr, target_addrlen);
2021 case SOCKOP_getsockname:
2024 abi_ulong target_addr, target_addrlen;
2026 if (get_user_ual(sockfd, vptr)
2027 || get_user_ual(target_addr, vptr + n)
2028 || get_user_ual(target_addrlen, vptr + 2 * n))
2029 return -TARGET_EFAULT;
2031 ret = do_getsockname(sockfd, target_addr, target_addrlen);
2034 case SOCKOP_getpeername:
2037 abi_ulong target_addr, target_addrlen;
2039 if (get_user_ual(sockfd, vptr)
2040 || get_user_ual(target_addr, vptr + n)
2041 || get_user_ual(target_addrlen, vptr + 2 * n))
2042 return -TARGET_EFAULT;
2044 ret = do_getpeername(sockfd, target_addr, target_addrlen);
2047 case SOCKOP_socketpair:
2049 abi_ulong domain, type, protocol;
2052 if (get_user_ual(domain, vptr)
2053 || get_user_ual(type, vptr + n)
2054 || get_user_ual(protocol, vptr + 2 * n)
2055 || get_user_ual(tab, vptr + 3 * n))
2056 return -TARGET_EFAULT;
2058 ret = do_socketpair(domain, type, protocol, tab);
2068 if (get_user_ual(sockfd, vptr)
2069 || get_user_ual(msg, vptr + n)
2070 || get_user_ual(len, vptr + 2 * n)
2071 || get_user_ual(flags, vptr + 3 * n))
2072 return -TARGET_EFAULT;
2074 ret = do_sendto(sockfd, msg, len, flags, 0, 0);
2084 if (get_user_ual(sockfd, vptr)
2085 || get_user_ual(msg, vptr + n)
2086 || get_user_ual(len, vptr + 2 * n)
2087 || get_user_ual(flags, vptr + 3 * n))
2088 return -TARGET_EFAULT;
2090 ret = do_recvfrom(sockfd, msg, len, flags, 0, 0);
2102 if (get_user_ual(sockfd, vptr)
2103 || get_user_ual(msg, vptr + n)
2104 || get_user_ual(len, vptr + 2 * n)
2105 || get_user_ual(flags, vptr + 3 * n)
2106 || get_user_ual(addr, vptr + 4 * n)
2107 || get_user_ual(addrlen, vptr + 5 * n))
2108 return -TARGET_EFAULT;
2110 ret = do_sendto(sockfd, msg, len, flags, addr, addrlen);
2113 case SOCKOP_recvfrom:
2122 if (get_user_ual(sockfd, vptr)
2123 || get_user_ual(msg, vptr + n)
2124 || get_user_ual(len, vptr + 2 * n)
2125 || get_user_ual(flags, vptr + 3 * n)
2126 || get_user_ual(addr, vptr + 4 * n)
2127 || get_user_ual(addrlen, vptr + 5 * n))
2128 return -TARGET_EFAULT;
2130 ret = do_recvfrom(sockfd, msg, len, flags, addr, addrlen);
2133 case SOCKOP_shutdown:
2135 abi_ulong sockfd, how;
2137 if (get_user_ual(sockfd, vptr)
2138 || get_user_ual(how, vptr + n))
2139 return -TARGET_EFAULT;
2141 ret = get_errno(shutdown(sockfd, how));
2144 case SOCKOP_sendmsg:
2145 case SOCKOP_recvmsg:
2148 abi_ulong target_msg;
2151 if (get_user_ual(fd, vptr)
2152 || get_user_ual(target_msg, vptr + n)
2153 || get_user_ual(flags, vptr + 2 * n))
2154 return -TARGET_EFAULT;
2156 ret = do_sendrecvmsg(fd, target_msg, flags,
2157 (num == SOCKOP_sendmsg));
2160 case SOCKOP_setsockopt:
2168 if (get_user_ual(sockfd, vptr)
2169 || get_user_ual(level, vptr + n)
2170 || get_user_ual(optname, vptr + 2 * n)
2171 || get_user_ual(optval, vptr + 3 * n)
2172 || get_user_ual(optlen, vptr + 4 * n))
2173 return -TARGET_EFAULT;
2175 ret = do_setsockopt(sockfd, level, optname, optval, optlen);
2178 case SOCKOP_getsockopt:
2186 if (get_user_ual(sockfd, vptr)
2187 || get_user_ual(level, vptr + n)
2188 || get_user_ual(optname, vptr + 2 * n)
2189 || get_user_ual(optval, vptr + 3 * n)
2190 || get_user_ual(optlen, vptr + 4 * n))
2191 return -TARGET_EFAULT;
2193 ret = do_getsockopt(sockfd, level, optname, optval, optlen);
2197 gemu_log("Unsupported socketcall: %d\n", num);
2198 ret = -TARGET_ENOSYS;
2205 #define N_SHM_REGIONS 32
2207 static struct shm_region {
2210 } shm_regions[N_SHM_REGIONS];
2212 struct target_ipc_perm
2219 unsigned short int mode;
2220 unsigned short int __pad1;
2221 unsigned short int __seq;
2222 unsigned short int __pad2;
2223 abi_ulong __unused1;
2224 abi_ulong __unused2;
2227 struct target_semid_ds
2229 struct target_ipc_perm sem_perm;
2230 abi_ulong sem_otime;
2231 abi_ulong __unused1;
2232 abi_ulong sem_ctime;
2233 abi_ulong __unused2;
2234 abi_ulong sem_nsems;
2235 abi_ulong __unused3;
2236 abi_ulong __unused4;
2239 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
2240 abi_ulong target_addr)
2242 struct target_ipc_perm *target_ip;
2243 struct target_semid_ds *target_sd;
2245 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2246 return -TARGET_EFAULT;
2247 target_ip = &(target_sd->sem_perm);
2248 host_ip->__key = tswapl(target_ip->__key);
2249 host_ip->uid = tswapl(target_ip->uid);
2250 host_ip->gid = tswapl(target_ip->gid);
2251 host_ip->cuid = tswapl(target_ip->cuid);
2252 host_ip->cgid = tswapl(target_ip->cgid);
2253 host_ip->mode = tswapl(target_ip->mode);
2254 unlock_user_struct(target_sd, target_addr, 0);
2258 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
2259 struct ipc_perm *host_ip)
2261 struct target_ipc_perm *target_ip;
2262 struct target_semid_ds *target_sd;
2264 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2265 return -TARGET_EFAULT;
2266 target_ip = &(target_sd->sem_perm);
2267 target_ip->__key = tswapl(host_ip->__key);
2268 target_ip->uid = tswapl(host_ip->uid);
2269 target_ip->gid = tswapl(host_ip->gid);
2270 target_ip->cuid = tswapl(host_ip->cuid);
2271 target_ip->cgid = tswapl(host_ip->cgid);
2272 target_ip->mode = tswapl(host_ip->mode);
2273 unlock_user_struct(target_sd, target_addr, 1);
2277 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
2278 abi_ulong target_addr)
2280 struct target_semid_ds *target_sd;
2282 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2283 return -TARGET_EFAULT;
2284 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
2285 return -TARGET_EFAULT;
2286 host_sd->sem_nsems = tswapl(target_sd->sem_nsems);
2287 host_sd->sem_otime = tswapl(target_sd->sem_otime);
2288 host_sd->sem_ctime = tswapl(target_sd->sem_ctime);
2289 unlock_user_struct(target_sd, target_addr, 0);
2293 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
2294 struct semid_ds *host_sd)
2296 struct target_semid_ds *target_sd;
2298 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2299 return -TARGET_EFAULT;
2300 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
2301 return -TARGET_EFAULT;;
2302 target_sd->sem_nsems = tswapl(host_sd->sem_nsems);
2303 target_sd->sem_otime = tswapl(host_sd->sem_otime);
2304 target_sd->sem_ctime = tswapl(host_sd->sem_ctime);
2305 unlock_user_struct(target_sd, target_addr, 1);
2309 struct target_seminfo {
2322 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
2323 struct seminfo *host_seminfo)
2325 struct target_seminfo *target_seminfo;
2326 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
2327 return -TARGET_EFAULT;
2328 __put_user(host_seminfo->semmap, &target_seminfo->semmap);
2329 __put_user(host_seminfo->semmni, &target_seminfo->semmni);
2330 __put_user(host_seminfo->semmns, &target_seminfo->semmns);
2331 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
2332 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
2333 __put_user(host_seminfo->semopm, &target_seminfo->semopm);
2334 __put_user(host_seminfo->semume, &target_seminfo->semume);
2335 __put_user(host_seminfo->semusz, &target_seminfo->semusz);
2336 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
2337 __put_user(host_seminfo->semaem, &target_seminfo->semaem);
2338 unlock_user_struct(target_seminfo, target_addr, 1);
2344 struct semid_ds *buf;
2345 unsigned short *array;
2346 struct seminfo *__buf;
2349 union target_semun {
2356 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
2357 abi_ulong target_addr)
2360 unsigned short *array;
2362 struct semid_ds semid_ds;
2365 semun.buf = &semid_ds;
2367 ret = semctl(semid, 0, IPC_STAT, semun);
2369 return get_errno(ret);
2371 nsems = semid_ds.sem_nsems;
2373 *host_array = malloc(nsems*sizeof(unsigned short));
2374 array = lock_user(VERIFY_READ, target_addr,
2375 nsems*sizeof(unsigned short), 1);
2377 return -TARGET_EFAULT;
2379 for(i=0; i<nsems; i++) {
2380 __get_user((*host_array)[i], &array[i]);
2382 unlock_user(array, target_addr, 0);
2387 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
2388 unsigned short **host_array)
2391 unsigned short *array;
2393 struct semid_ds semid_ds;
2396 semun.buf = &semid_ds;
2398 ret = semctl(semid, 0, IPC_STAT, semun);
2400 return get_errno(ret);
2402 nsems = semid_ds.sem_nsems;
2404 array = lock_user(VERIFY_WRITE, target_addr,
2405 nsems*sizeof(unsigned short), 0);
2407 return -TARGET_EFAULT;
2409 for(i=0; i<nsems; i++) {
2410 __put_user((*host_array)[i], &array[i]);
2413 unlock_user(array, target_addr, 1);
2418 static inline abi_long do_semctl(int semid, int semnum, int cmd,
2419 union target_semun target_su)
2422 struct semid_ds dsarg;
2423 unsigned short *array = NULL;
2424 struct seminfo seminfo;
2425 abi_long ret = -TARGET_EINVAL;
2432 arg.val = tswapl(target_su.val);
2433 ret = get_errno(semctl(semid, semnum, cmd, arg));
2434 target_su.val = tswapl(arg.val);
2438 err = target_to_host_semarray(semid, &array, target_su.array);
2442 ret = get_errno(semctl(semid, semnum, cmd, arg));
2443 err = host_to_target_semarray(semid, target_su.array, &array);
2450 err = target_to_host_semid_ds(&dsarg, target_su.buf);
2454 ret = get_errno(semctl(semid, semnum, cmd, arg));
2455 err = host_to_target_semid_ds(target_su.buf, &dsarg);
2461 arg.__buf = &seminfo;
2462 ret = get_errno(semctl(semid, semnum, cmd, arg));
2463 err = host_to_target_seminfo(target_su.__buf, &seminfo);
2471 ret = get_errno(semctl(semid, semnum, cmd, NULL));
2478 struct target_sembuf {
2479 unsigned short sem_num;
2484 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
2485 abi_ulong target_addr,
2488 struct target_sembuf *target_sembuf;
2491 target_sembuf = lock_user(VERIFY_READ, target_addr,
2492 nsops*sizeof(struct target_sembuf), 1);
2494 return -TARGET_EFAULT;
2496 for(i=0; i<nsops; i++) {
2497 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
2498 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
2499 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
2502 unlock_user(target_sembuf, target_addr, 0);
2507 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
2509 struct sembuf sops[nsops];
2511 if (target_to_host_sembuf(sops, ptr, nsops))
2512 return -TARGET_EFAULT;
2514 return semop(semid, sops, nsops);
2517 struct target_msqid_ds
2519 struct target_ipc_perm msg_perm;
2520 abi_ulong msg_stime;
2521 #if TARGET_ABI_BITS == 32
2522 abi_ulong __unused1;
2524 abi_ulong msg_rtime;
2525 #if TARGET_ABI_BITS == 32
2526 abi_ulong __unused2;
2528 abi_ulong msg_ctime;
2529 #if TARGET_ABI_BITS == 32
2530 abi_ulong __unused3;
2532 abi_ulong __msg_cbytes;
2534 abi_ulong msg_qbytes;
2535 abi_ulong msg_lspid;
2536 abi_ulong msg_lrpid;
2537 abi_ulong __unused4;
2538 abi_ulong __unused5;
2541 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
2542 abi_ulong target_addr)
2544 struct target_msqid_ds *target_md;
2546 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
2547 return -TARGET_EFAULT;
2548 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
2549 return -TARGET_EFAULT;
2550 host_md->msg_stime = tswapl(target_md->msg_stime);
2551 host_md->msg_rtime = tswapl(target_md->msg_rtime);
2552 host_md->msg_ctime = tswapl(target_md->msg_ctime);
2553 host_md->__msg_cbytes = tswapl(target_md->__msg_cbytes);
2554 host_md->msg_qnum = tswapl(target_md->msg_qnum);
2555 host_md->msg_qbytes = tswapl(target_md->msg_qbytes);
2556 host_md->msg_lspid = tswapl(target_md->msg_lspid);
2557 host_md->msg_lrpid = tswapl(target_md->msg_lrpid);
2558 unlock_user_struct(target_md, target_addr, 0);
2562 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
2563 struct msqid_ds *host_md)
2565 struct target_msqid_ds *target_md;
2567 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
2568 return -TARGET_EFAULT;
2569 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
2570 return -TARGET_EFAULT;
2571 target_md->msg_stime = tswapl(host_md->msg_stime);
2572 target_md->msg_rtime = tswapl(host_md->msg_rtime);
2573 target_md->msg_ctime = tswapl(host_md->msg_ctime);
2574 target_md->__msg_cbytes = tswapl(host_md->__msg_cbytes);
2575 target_md->msg_qnum = tswapl(host_md->msg_qnum);
2576 target_md->msg_qbytes = tswapl(host_md->msg_qbytes);
2577 target_md->msg_lspid = tswapl(host_md->msg_lspid);
2578 target_md->msg_lrpid = tswapl(host_md->msg_lrpid);
2579 unlock_user_struct(target_md, target_addr, 1);
2583 struct target_msginfo {
2591 unsigned short int msgseg;
2594 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
2595 struct msginfo *host_msginfo)
2597 struct target_msginfo *target_msginfo;
2598 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
2599 return -TARGET_EFAULT;
2600 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
2601 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
2602 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
2603 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
2604 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
2605 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
2606 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
2607 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
2608 unlock_user_struct(target_msginfo, target_addr, 1);
2612 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
2614 struct msqid_ds dsarg;
2615 struct msginfo msginfo;
2616 abi_long ret = -TARGET_EINVAL;
2624 if (target_to_host_msqid_ds(&dsarg,ptr))
2625 return -TARGET_EFAULT;
2626 ret = get_errno(msgctl(msgid, cmd, &dsarg));
2627 if (host_to_target_msqid_ds(ptr,&dsarg))
2628 return -TARGET_EFAULT;
2631 ret = get_errno(msgctl(msgid, cmd, NULL));
2635 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
2636 if (host_to_target_msginfo(ptr, &msginfo))
2637 return -TARGET_EFAULT;
2644 struct target_msgbuf {
2649 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
2650 unsigned int msgsz, int msgflg)
2652 struct target_msgbuf *target_mb;
2653 struct msgbuf *host_mb;
2656 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
2657 return -TARGET_EFAULT;
2658 host_mb = malloc(msgsz+sizeof(long));
2659 host_mb->mtype = (abi_long) tswapl(target_mb->mtype);
2660 memcpy(host_mb->mtext, target_mb->mtext, msgsz);
2661 ret = get_errno(msgsnd(msqid, host_mb, msgsz, msgflg));
2663 unlock_user_struct(target_mb, msgp, 0);
2668 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
2669 unsigned int msgsz, abi_long msgtyp,
2672 struct target_msgbuf *target_mb;
2674 struct msgbuf *host_mb;
2677 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
2678 return -TARGET_EFAULT;
2680 host_mb = malloc(msgsz+sizeof(long));
2681 ret = get_errno(msgrcv(msqid, host_mb, msgsz, tswapl(msgtyp), msgflg));
2684 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
2685 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
2686 if (!target_mtext) {
2687 ret = -TARGET_EFAULT;
2690 memcpy(target_mb->mtext, host_mb->mtext, ret);
2691 unlock_user(target_mtext, target_mtext_addr, ret);
2694 target_mb->mtype = tswapl(host_mb->mtype);
2699 unlock_user_struct(target_mb, msgp, 1);
2703 struct target_shmid_ds
2705 struct target_ipc_perm shm_perm;
2706 abi_ulong shm_segsz;
2707 abi_ulong shm_atime;
2708 #if TARGET_ABI_BITS == 32
2709 abi_ulong __unused1;
2711 abi_ulong shm_dtime;
2712 #if TARGET_ABI_BITS == 32
2713 abi_ulong __unused2;
2715 abi_ulong shm_ctime;
2716 #if TARGET_ABI_BITS == 32
2717 abi_ulong __unused3;
2721 abi_ulong shm_nattch;
2722 unsigned long int __unused4;
2723 unsigned long int __unused5;
2726 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
2727 abi_ulong target_addr)
2729 struct target_shmid_ds *target_sd;
2731 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2732 return -TARGET_EFAULT;
2733 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
2734 return -TARGET_EFAULT;
2735 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
2736 __get_user(host_sd->shm_atime, &target_sd->shm_atime);
2737 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
2738 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
2739 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
2740 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
2741 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
2742 unlock_user_struct(target_sd, target_addr, 0);
2746 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
2747 struct shmid_ds *host_sd)
2749 struct target_shmid_ds *target_sd;
2751 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2752 return -TARGET_EFAULT;
2753 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
2754 return -TARGET_EFAULT;
2755 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
2756 __put_user(host_sd->shm_atime, &target_sd->shm_atime);
2757 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
2758 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
2759 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
2760 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
2761 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
2762 unlock_user_struct(target_sd, target_addr, 1);
2766 struct target_shminfo {
2774 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
2775 struct shminfo *host_shminfo)
2777 struct target_shminfo *target_shminfo;
2778 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
2779 return -TARGET_EFAULT;
2780 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
2781 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
2782 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
2783 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
2784 __put_user(host_shminfo->shmall, &target_shminfo->shmall);
2785 unlock_user_struct(target_shminfo, target_addr, 1);
2789 struct target_shm_info {
2794 abi_ulong swap_attempts;
2795 abi_ulong swap_successes;
2798 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
2799 struct shm_info *host_shm_info)
2801 struct target_shm_info *target_shm_info;
2802 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
2803 return -TARGET_EFAULT;
2804 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
2805 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
2806 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
2807 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
2808 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
2809 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
2810 unlock_user_struct(target_shm_info, target_addr, 1);
2814 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
2816 struct shmid_ds dsarg;
2817 struct shminfo shminfo;
2818 struct shm_info shm_info;
2819 abi_long ret = -TARGET_EINVAL;
2827 if (target_to_host_shmid_ds(&dsarg, buf))
2828 return -TARGET_EFAULT;
2829 ret = get_errno(shmctl(shmid, cmd, &dsarg));
2830 if (host_to_target_shmid_ds(buf, &dsarg))
2831 return -TARGET_EFAULT;
2834 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
2835 if (host_to_target_shminfo(buf, &shminfo))
2836 return -TARGET_EFAULT;
2839 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
2840 if (host_to_target_shm_info(buf, &shm_info))
2841 return -TARGET_EFAULT;
2846 ret = get_errno(shmctl(shmid, cmd, NULL));
2853 static inline abi_ulong do_shmat(int shmid, abi_ulong shmaddr, int shmflg)
2857 struct shmid_ds shm_info;
2860 /* find out the length of the shared memory segment */
2861 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
2862 if (is_error(ret)) {
2863 /* can't get length, bail out */
2870 host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
2872 abi_ulong mmap_start;
2874 mmap_start = mmap_find_vma(0, shm_info.shm_segsz);
2876 if (mmap_start == -1) {
2878 host_raddr = (void *)-1;
2880 host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
2883 if (host_raddr == (void *)-1) {
2885 return get_errno((long)host_raddr);
2887 raddr=h2g((unsigned long)host_raddr);
2889 page_set_flags(raddr, raddr + shm_info.shm_segsz,
2890 PAGE_VALID | PAGE_READ |
2891 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
2893 for (i = 0; i < N_SHM_REGIONS; i++) {
2894 if (shm_regions[i].start == 0) {
2895 shm_regions[i].start = raddr;
2896 shm_regions[i].size = shm_info.shm_segsz;
2906 static inline abi_long do_shmdt(abi_ulong shmaddr)
2910 for (i = 0; i < N_SHM_REGIONS; ++i) {
2911 if (shm_regions[i].start == shmaddr) {
2912 shm_regions[i].start = 0;
2913 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
2918 return get_errno(shmdt(g2h(shmaddr)));
2921 #ifdef TARGET_NR_ipc
2922 /* ??? This only works with linear mappings. */
2923 /* do_ipc() must return target values and target errnos. */
2924 static abi_long do_ipc(unsigned int call, int first,
2925 int second, int third,
2926 abi_long ptr, abi_long fifth)
2931 version = call >> 16;
2936 ret = do_semop(first, ptr, second);
2940 ret = get_errno(semget(first, second, third));
2944 ret = do_semctl(first, second, third, (union target_semun)(abi_ulong) ptr);
2948 ret = get_errno(msgget(first, second));
2952 ret = do_msgsnd(first, ptr, second, third);
2956 ret = do_msgctl(first, second, ptr);
2963 struct target_ipc_kludge {
2968 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
2969 ret = -TARGET_EFAULT;
2973 ret = do_msgrcv(first, tmp->msgp, second, tmp->msgtyp, third);
2975 unlock_user_struct(tmp, ptr, 0);
2979 ret = do_msgrcv(first, ptr, second, fifth, third);
2988 raddr = do_shmat(first, ptr, second);
2989 if (is_error(raddr))
2990 return get_errno(raddr);
2991 if (put_user_ual(raddr, third))
2992 return -TARGET_EFAULT;
2996 ret = -TARGET_EINVAL;
3001 ret = do_shmdt(ptr);
3005 /* IPC_* flag values are the same on all linux platforms */
3006 ret = get_errno(shmget(first, second, third));
3009 /* IPC_* and SHM_* command values are the same on all linux platforms */
3011 ret = do_shmctl(first, second, third);
3014 gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
3015 ret = -TARGET_ENOSYS;
3022 /* kernel structure types definitions */
3024 #define STRUCT(name, ...) STRUCT_ ## name,
3025 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
3027 #include "syscall_types.h"
3030 #undef STRUCT_SPECIAL
3032 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
3033 #define STRUCT_SPECIAL(name)
3034 #include "syscall_types.h"
3036 #undef STRUCT_SPECIAL
3038 typedef struct IOCTLEntry IOCTLEntry;
3040 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
3041 int fd, abi_long cmd, abi_long arg);
3044 unsigned int target_cmd;
3045 unsigned int host_cmd;
3048 do_ioctl_fn *do_ioctl;
3049 const argtype arg_type[5];
3052 #define IOC_R 0x0001
3053 #define IOC_W 0x0002
3054 #define IOC_RW (IOC_R | IOC_W)
3056 #define MAX_STRUCT_SIZE 4096
3058 #ifdef CONFIG_FIEMAP
3059 /* So fiemap access checks don't overflow on 32 bit systems.
3060 * This is very slightly smaller than the limit imposed by
3061 * the underlying kernel.
3063 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
3064 / sizeof(struct fiemap_extent))
3066 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
3067 int fd, abi_long cmd, abi_long arg)
3069 /* The parameter for this ioctl is a struct fiemap followed
3070 * by an array of struct fiemap_extent whose size is set
3071 * in fiemap->fm_extent_count. The array is filled in by the
3074 int target_size_in, target_size_out;
3076 const argtype *arg_type = ie->arg_type;
3077 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
3080 int i, extent_size = thunk_type_size(extent_arg_type, 0);
3084 assert(arg_type[0] == TYPE_PTR);
3085 assert(ie->access == IOC_RW);
3087 target_size_in = thunk_type_size(arg_type, 0);
3088 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
3090 return -TARGET_EFAULT;
3092 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3093 unlock_user(argptr, arg, 0);
3094 fm = (struct fiemap *)buf_temp;
3095 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
3096 return -TARGET_EINVAL;
3099 outbufsz = sizeof (*fm) +
3100 (sizeof(struct fiemap_extent) * fm->fm_extent_count);
3102 if (outbufsz > MAX_STRUCT_SIZE) {
3103 /* We can't fit all the extents into the fixed size buffer.
3104 * Allocate one that is large enough and use it instead.
3106 fm = malloc(outbufsz);
3108 return -TARGET_ENOMEM;
3110 memcpy(fm, buf_temp, sizeof(struct fiemap));
3113 ret = get_errno(ioctl(fd, ie->host_cmd, fm));
3114 if (!is_error(ret)) {
3115 target_size_out = target_size_in;
3116 /* An extent_count of 0 means we were only counting the extents
3117 * so there are no structs to copy
3119 if (fm->fm_extent_count != 0) {
3120 target_size_out += fm->fm_mapped_extents * extent_size;
3122 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
3124 ret = -TARGET_EFAULT;
3126 /* Convert the struct fiemap */
3127 thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
3128 if (fm->fm_extent_count != 0) {
3129 p = argptr + target_size_in;
3130 /* ...and then all the struct fiemap_extents */
3131 for (i = 0; i < fm->fm_mapped_extents; i++) {
3132 thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
3137 unlock_user(argptr, arg, target_size_out);
3147 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
3148 int fd, abi_long cmd, abi_long arg)
3150 const argtype *arg_type = ie->arg_type;
3154 struct ifconf *host_ifconf;
3156 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
3157 int target_ifreq_size;
3162 abi_long target_ifc_buf;
3166 assert(arg_type[0] == TYPE_PTR);
3167 assert(ie->access == IOC_RW);
3170 target_size = thunk_type_size(arg_type, 0);
3172 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3174 return -TARGET_EFAULT;
3175 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3176 unlock_user(argptr, arg, 0);
3178 host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
3179 target_ifc_len = host_ifconf->ifc_len;
3180 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
3182 target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
3183 nb_ifreq = target_ifc_len / target_ifreq_size;
3184 host_ifc_len = nb_ifreq * sizeof(struct ifreq);
3186 outbufsz = sizeof(*host_ifconf) + host_ifc_len;
3187 if (outbufsz > MAX_STRUCT_SIZE) {
3188 /* We can't fit all the extents into the fixed size buffer.
3189 * Allocate one that is large enough and use it instead.
3191 host_ifconf = malloc(outbufsz);
3193 return -TARGET_ENOMEM;
3195 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
3198 host_ifc_buf = (char*)host_ifconf + sizeof(*host_ifconf);
3200 host_ifconf->ifc_len = host_ifc_len;
3201 host_ifconf->ifc_buf = host_ifc_buf;
3203 ret = get_errno(ioctl(fd, ie->host_cmd, host_ifconf));
3204 if (!is_error(ret)) {
3205 /* convert host ifc_len to target ifc_len */
3207 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
3208 target_ifc_len = nb_ifreq * target_ifreq_size;
3209 host_ifconf->ifc_len = target_ifc_len;
3211 /* restore target ifc_buf */
3213 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
3215 /* copy struct ifconf to target user */
3217 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3219 return -TARGET_EFAULT;
3220 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
3221 unlock_user(argptr, arg, target_size);
3223 /* copy ifreq[] to target user */
3225 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
3226 for (i = 0; i < nb_ifreq ; i++) {
3227 thunk_convert(argptr + i * target_ifreq_size,
3228 host_ifc_buf + i * sizeof(struct ifreq),
3229 ifreq_arg_type, THUNK_TARGET);
3231 unlock_user(argptr, target_ifc_buf, target_ifc_len);
3241 static IOCTLEntry ioctl_entries[] = {
3242 #define IOCTL(cmd, access, ...) \
3243 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
3244 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
3245 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
3250 /* ??? Implement proper locking for ioctls. */
3251 /* do_ioctl() Must return target values and target errnos. */
3252 static abi_long do_ioctl(int fd, abi_long cmd, abi_long arg)
3254 const IOCTLEntry *ie;
3255 const argtype *arg_type;
3257 uint8_t buf_temp[MAX_STRUCT_SIZE];
3263 if (ie->target_cmd == 0) {
3264 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
3265 return -TARGET_ENOSYS;
3267 if (ie->target_cmd == cmd)
3271 arg_type = ie->arg_type;
3273 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd, ie->name);
3276 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
3279 switch(arg_type[0]) {
3282 ret = get_errno(ioctl(fd, ie->host_cmd));
3287 ret = get_errno(ioctl(fd, ie->host_cmd, arg));
3291 target_size = thunk_type_size(arg_type, 0);
3292 switch(ie->access) {
3294 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3295 if (!is_error(ret)) {
3296 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3298 return -TARGET_EFAULT;
3299 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
3300 unlock_user(argptr, arg, target_size);
3304 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3306 return -TARGET_EFAULT;
3307 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3308 unlock_user(argptr, arg, 0);
3309 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3313 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3315 return -TARGET_EFAULT;
3316 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3317 unlock_user(argptr, arg, 0);
3318 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3319 if (!is_error(ret)) {
3320 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3322 return -TARGET_EFAULT;
3323 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
3324 unlock_user(argptr, arg, target_size);
3330 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
3331 (long)cmd, arg_type[0]);
3332 ret = -TARGET_ENOSYS;
3338 static const bitmask_transtbl iflag_tbl[] = {
3339 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
3340 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
3341 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
3342 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
3343 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
3344 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
3345 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
3346 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
3347 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
3348 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
3349 { TARGET_IXON, TARGET_IXON, IXON, IXON },
3350 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
3351 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
3352 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
3356 static const bitmask_transtbl oflag_tbl[] = {
3357 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
3358 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
3359 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
3360 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
3361 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
3362 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
3363 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
3364 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
3365 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
3366 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
3367 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
3368 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
3369 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
3370 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
3371 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
3372 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
3373 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
3374 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
3375 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
3376 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
3377 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
3378 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
3379 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
3380 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
3384 static const bitmask_transtbl cflag_tbl[] = {
3385 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
3386 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
3387 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
3388 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
3389 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
3390 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
3391 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
3392 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
3393 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
3394 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
3395 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
3396 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
3397 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
3398 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
3399 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
3400 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
3401 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
3402 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
3403 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
3404 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
3405 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
3406 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
3407 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
3408 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
3409 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
3410 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
3411 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
3412 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
3413 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
3414 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
3415 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
3419 static const bitmask_transtbl lflag_tbl[] = {
3420 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
3421 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
3422 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
3423 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
3424 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
3425 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
3426 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
3427 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
3428 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
3429 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
3430 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
3431 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
3432 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
3433 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
3434 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
3438 static void target_to_host_termios (void *dst, const void *src)
3440 struct host_termios *host = dst;
3441 const struct target_termios *target = src;
3444 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
3446 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
3448 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
3450 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
3451 host->c_line = target->c_line;
3453 memset(host->c_cc, 0, sizeof(host->c_cc));
3454 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
3455 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
3456 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
3457 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
3458 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
3459 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
3460 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
3461 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
3462 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
3463 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
3464 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
3465 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
3466 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
3467 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
3468 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
3469 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
3470 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
3473 static void host_to_target_termios (void *dst, const void *src)
3475 struct target_termios *target = dst;
3476 const struct host_termios *host = src;
3479 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
3481 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
3483 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
3485 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
3486 target->c_line = host->c_line;
3488 memset(target->c_cc, 0, sizeof(target->c_cc));
3489 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
3490 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
3491 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
3492 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
3493 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
3494 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
3495 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
3496 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
3497 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
3498 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
3499 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
3500 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
3501 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
3502 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
3503 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
3504 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
3505 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
3508 static const StructEntry struct_termios_def = {
3509 .convert = { host_to_target_termios, target_to_host_termios },
3510 .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
3511 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
3514 static bitmask_transtbl mmap_flags_tbl[] = {
3515 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
3516 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
3517 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
3518 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS, MAP_ANONYMOUS, MAP_ANONYMOUS },
3519 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN, MAP_GROWSDOWN, MAP_GROWSDOWN },
3520 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE, MAP_DENYWRITE, MAP_DENYWRITE },
3521 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE, MAP_EXECUTABLE, MAP_EXECUTABLE },
3522 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
3526 #if defined(TARGET_I386)
3528 /* NOTE: there is really one LDT for all the threads */
3529 static uint8_t *ldt_table;
3531 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
3538 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
3539 if (size > bytecount)
3541 p = lock_user(VERIFY_WRITE, ptr, size, 0);
3543 return -TARGET_EFAULT;
3544 /* ??? Should this by byteswapped? */
3545 memcpy(p, ldt_table, size);
3546 unlock_user(p, ptr, size);
3550 /* XXX: add locking support */
3551 static abi_long write_ldt(CPUX86State *env,
3552 abi_ulong ptr, unsigned long bytecount, int oldmode)
3554 struct target_modify_ldt_ldt_s ldt_info;
3555 struct target_modify_ldt_ldt_s *target_ldt_info;
3556 int seg_32bit, contents, read_exec_only, limit_in_pages;
3557 int seg_not_present, useable, lm;
3558 uint32_t *lp, entry_1, entry_2;
3560 if (bytecount != sizeof(ldt_info))
3561 return -TARGET_EINVAL;
3562 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
3563 return -TARGET_EFAULT;
3564 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
3565 ldt_info.base_addr = tswapl(target_ldt_info->base_addr);
3566 ldt_info.limit = tswap32(target_ldt_info->limit);
3567 ldt_info.flags = tswap32(target_ldt_info->flags);
3568 unlock_user_struct(target_ldt_info, ptr, 0);
3570 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
3571 return -TARGET_EINVAL;
3572 seg_32bit = ldt_info.flags & 1;
3573 contents = (ldt_info.flags >> 1) & 3;
3574 read_exec_only = (ldt_info.flags >> 3) & 1;
3575 limit_in_pages = (ldt_info.flags >> 4) & 1;
3576 seg_not_present = (ldt_info.flags >> 5) & 1;
3577 useable = (ldt_info.flags >> 6) & 1;
3581 lm = (ldt_info.flags >> 7) & 1;
3583 if (contents == 3) {
3585 return -TARGET_EINVAL;
3586 if (seg_not_present == 0)
3587 return -TARGET_EINVAL;
3589 /* allocate the LDT */
3591 env->ldt.base = target_mmap(0,
3592 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
3593 PROT_READ|PROT_WRITE,
3594 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
3595 if (env->ldt.base == -1)
3596 return -TARGET_ENOMEM;
3597 memset(g2h(env->ldt.base), 0,
3598 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
3599 env->ldt.limit = 0xffff;
3600 ldt_table = g2h(env->ldt.base);
3603 /* NOTE: same code as Linux kernel */
3604 /* Allow LDTs to be cleared by the user. */
3605 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
3608 read_exec_only == 1 &&
3610 limit_in_pages == 0 &&
3611 seg_not_present == 1 &&
3619 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
3620 (ldt_info.limit & 0x0ffff);
3621 entry_2 = (ldt_info.base_addr & 0xff000000) |
3622 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
3623 (ldt_info.limit & 0xf0000) |
3624 ((read_exec_only ^ 1) << 9) |
3626 ((seg_not_present ^ 1) << 15) |
3628 (limit_in_pages << 23) |
3632 entry_2 |= (useable << 20);
3634 /* Install the new entry ... */
3636 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
3637 lp[0] = tswap32(entry_1);
3638 lp[1] = tswap32(entry_2);
3642 /* specific and weird i386 syscalls */
3643 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
3644 unsigned long bytecount)
3650 ret = read_ldt(ptr, bytecount);
3653 ret = write_ldt(env, ptr, bytecount, 1);
3656 ret = write_ldt(env, ptr, bytecount, 0);
3659 ret = -TARGET_ENOSYS;
3665 #if defined(TARGET_I386) && defined(TARGET_ABI32)
3666 static abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
3668 uint64_t *gdt_table = g2h(env->gdt.base);
3669 struct target_modify_ldt_ldt_s ldt_info;
3670 struct target_modify_ldt_ldt_s *target_ldt_info;
3671 int seg_32bit, contents, read_exec_only, limit_in_pages;
3672 int seg_not_present, useable, lm;
3673 uint32_t *lp, entry_1, entry_2;
3676 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
3677 if (!target_ldt_info)
3678 return -TARGET_EFAULT;
3679 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
3680 ldt_info.base_addr = tswapl(target_ldt_info->base_addr);
3681 ldt_info.limit = tswap32(target_ldt_info->limit);
3682 ldt_info.flags = tswap32(target_ldt_info->flags);
3683 if (ldt_info.entry_number == -1) {
3684 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
3685 if (gdt_table[i] == 0) {
3686 ldt_info.entry_number = i;
3687 target_ldt_info->entry_number = tswap32(i);
3692 unlock_user_struct(target_ldt_info, ptr, 1);
3694 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
3695 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
3696 return -TARGET_EINVAL;
3697 seg_32bit = ldt_info.flags & 1;
3698 contents = (ldt_info.flags >> 1) & 3;
3699 read_exec_only = (ldt_info.flags >> 3) & 1;
3700 limit_in_pages = (ldt_info.flags >> 4) & 1;
3701 seg_not_present = (ldt_info.flags >> 5) & 1;
3702 useable = (ldt_info.flags >> 6) & 1;
3706 lm = (ldt_info.flags >> 7) & 1;
3709 if (contents == 3) {
3710 if (seg_not_present == 0)
3711 return -TARGET_EINVAL;
3714 /* NOTE: same code as Linux kernel */
3715 /* Allow LDTs to be cleared by the user. */
3716 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
3717 if ((contents == 0 &&
3718 read_exec_only == 1 &&
3720 limit_in_pages == 0 &&
3721 seg_not_present == 1 &&
3729 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
3730 (ldt_info.limit & 0x0ffff);
3731 entry_2 = (ldt_info.base_addr & 0xff000000) |
3732 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
3733 (ldt_info.limit & 0xf0000) |
3734 ((read_exec_only ^ 1) << 9) |
3736 ((seg_not_present ^ 1) << 15) |
3738 (limit_in_pages << 23) |
3743 /* Install the new entry ... */
3745 lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
3746 lp[0] = tswap32(entry_1);
3747 lp[1] = tswap32(entry_2);
3751 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
3753 struct target_modify_ldt_ldt_s *target_ldt_info;
3754 uint64_t *gdt_table = g2h(env->gdt.base);
3755 uint32_t base_addr, limit, flags;
3756 int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
3757 int seg_not_present, useable, lm;
3758 uint32_t *lp, entry_1, entry_2;
3760 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
3761 if (!target_ldt_info)
3762 return -TARGET_EFAULT;
3763 idx = tswap32(target_ldt_info->entry_number);
3764 if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
3765 idx > TARGET_GDT_ENTRY_TLS_MAX) {
3766 unlock_user_struct(target_ldt_info, ptr, 1);
3767 return -TARGET_EINVAL;
3769 lp = (uint32_t *)(gdt_table + idx);
3770 entry_1 = tswap32(lp[0]);
3771 entry_2 = tswap32(lp[1]);
3773 read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
3774 contents = (entry_2 >> 10) & 3;
3775 seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
3776 seg_32bit = (entry_2 >> 22) & 1;
3777 limit_in_pages = (entry_2 >> 23) & 1;
3778 useable = (entry_2 >> 20) & 1;
3782 lm = (entry_2 >> 21) & 1;
3784 flags = (seg_32bit << 0) | (contents << 1) |
3785 (read_exec_only << 3) | (limit_in_pages << 4) |
3786 (seg_not_present << 5) | (useable << 6) | (lm << 7);
3787 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000);
3788 base_addr = (entry_1 >> 16) |
3789 (entry_2 & 0xff000000) |
3790 ((entry_2 & 0xff) << 16);
3791 target_ldt_info->base_addr = tswapl(base_addr);
3792 target_ldt_info->limit = tswap32(limit);
3793 target_ldt_info->flags = tswap32(flags);
3794 unlock_user_struct(target_ldt_info, ptr, 1);
3797 #endif /* TARGET_I386 && TARGET_ABI32 */
3799 #ifndef TARGET_ABI32
3800 static abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
3807 case TARGET_ARCH_SET_GS:
3808 case TARGET_ARCH_SET_FS:
3809 if (code == TARGET_ARCH_SET_GS)
3813 cpu_x86_load_seg(env, idx, 0);
3814 env->segs[idx].base = addr;
3816 case TARGET_ARCH_GET_GS:
3817 case TARGET_ARCH_GET_FS:
3818 if (code == TARGET_ARCH_GET_GS)
3822 val = env->segs[idx].base;
3823 if (put_user(val, addr, abi_ulong))
3824 return -TARGET_EFAULT;
3827 ret = -TARGET_EINVAL;
3834 #endif /* defined(TARGET_I386) */
3836 #define NEW_STACK_SIZE 0x40000
3838 #if defined(CONFIG_USE_NPTL)
3840 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
3843 pthread_mutex_t mutex;
3844 pthread_cond_t cond;
3847 abi_ulong child_tidptr;
3848 abi_ulong parent_tidptr;
3852 static void *clone_func(void *arg)
3854 new_thread_info *info = arg;
3860 ts = (TaskState *)thread_env->opaque;
3861 info->tid = gettid();
3862 env->host_tid = info->tid;
3864 if (info->child_tidptr)
3865 put_user_u32(info->tid, info->child_tidptr);
3866 if (info->parent_tidptr)
3867 put_user_u32(info->tid, info->parent_tidptr);
3868 /* Enable signals. */
3869 sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
3870 /* Signal to the parent that we're ready. */
3871 pthread_mutex_lock(&info->mutex);
3872 pthread_cond_broadcast(&info->cond);
3873 pthread_mutex_unlock(&info->mutex);
3874 /* Wait until the parent has finshed initializing the tls state. */
3875 pthread_mutex_lock(&clone_lock);
3876 pthread_mutex_unlock(&clone_lock);
3883 static int clone_func(void *arg)
3885 CPUState *env = arg;
3892 /* do_fork() Must return host values and target errnos (unlike most
3893 do_*() functions). */
3894 static int do_fork(CPUState *env, unsigned int flags, abi_ulong newsp,
3895 abi_ulong parent_tidptr, target_ulong newtls,
3896 abi_ulong child_tidptr)
3901 #if defined(CONFIG_USE_NPTL)
3902 unsigned int nptl_flags;
3908 /* Emulate vfork() with fork() */
3909 if (flags & CLONE_VFORK)
3910 flags &= ~(CLONE_VFORK | CLONE_VM);
3912 if (flags & CLONE_VM) {
3913 TaskState *parent_ts = (TaskState *)env->opaque;
3914 #if defined(CONFIG_USE_NPTL)
3915 new_thread_info info;
3916 pthread_attr_t attr;
3918 ts = qemu_mallocz(sizeof(TaskState));
3919 init_task_state(ts);
3920 /* we create a new CPU instance. */
3921 new_env = cpu_copy(env);
3922 #if defined(TARGET_I386) || defined(TARGET_SPARC) || defined(TARGET_PPC)
3925 /* Init regs that differ from the parent. */
3926 cpu_clone_regs(new_env, newsp);
3927 new_env->opaque = ts;
3928 ts->bprm = parent_ts->bprm;
3929 ts->info = parent_ts->info;
3930 #if defined(CONFIG_USE_NPTL)
3932 flags &= ~CLONE_NPTL_FLAGS2;
3934 if (nptl_flags & CLONE_CHILD_CLEARTID) {
3935 ts->child_tidptr = child_tidptr;
3938 if (nptl_flags & CLONE_SETTLS)
3939 cpu_set_tls (new_env, newtls);
3941 /* Grab a mutex so that thread setup appears atomic. */
3942 pthread_mutex_lock(&clone_lock);
3944 memset(&info, 0, sizeof(info));
3945 pthread_mutex_init(&info.mutex, NULL);
3946 pthread_mutex_lock(&info.mutex);
3947 pthread_cond_init(&info.cond, NULL);
3949 if (nptl_flags & CLONE_CHILD_SETTID)
3950 info.child_tidptr = child_tidptr;
3951 if (nptl_flags & CLONE_PARENT_SETTID)
3952 info.parent_tidptr = parent_tidptr;
3954 ret = pthread_attr_init(&attr);
3955 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
3956 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
3957 /* It is not safe to deliver signals until the child has finished
3958 initializing, so temporarily block all signals. */
3959 sigfillset(&sigmask);
3960 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
3962 ret = pthread_create(&info.thread, &attr, clone_func, &info);
3963 /* TODO: Free new CPU state if thread creation failed. */
3965 sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
3966 pthread_attr_destroy(&attr);
3968 /* Wait for the child to initialize. */
3969 pthread_cond_wait(&info.cond, &info.mutex);
3971 if (flags & CLONE_PARENT_SETTID)
3972 put_user_u32(ret, parent_tidptr);
3976 pthread_mutex_unlock(&info.mutex);
3977 pthread_cond_destroy(&info.cond);
3978 pthread_mutex_destroy(&info.mutex);
3979 pthread_mutex_unlock(&clone_lock);
3981 if (flags & CLONE_NPTL_FLAGS2)
3983 /* This is probably going to die very quickly, but do it anyway. */
3984 new_stack = qemu_mallocz (NEW_STACK_SIZE);
3986 ret = __clone2(clone_func, new_stack, NEW_STACK_SIZE, flags, new_env);
3988 ret = clone(clone_func, new_stack + NEW_STACK_SIZE, flags, new_env);
3992 /* if no CLONE_VM, we consider it is a fork */
3993 if ((flags & ~(CSIGNAL | CLONE_NPTL_FLAGS2)) != 0)
3998 /* Child Process. */
3999 cpu_clone_regs(env, newsp);
4001 #if defined(CONFIG_USE_NPTL)
4002 /* There is a race condition here. The parent process could
4003 theoretically read the TID in the child process before the child
4004 tid is set. This would require using either ptrace
4005 (not implemented) or having *_tidptr to point at a shared memory
4006 mapping. We can't repeat the spinlock hack used above because
4007 the child process gets its own copy of the lock. */
4008 if (flags & CLONE_CHILD_SETTID)
4009 put_user_u32(gettid(), child_tidptr);
4010 if (flags & CLONE_PARENT_SETTID)
4011 put_user_u32(gettid(), parent_tidptr);
4012 ts = (TaskState *)env->opaque;
4013 if (flags & CLONE_SETTLS)
4014 cpu_set_tls (env, newtls);
4015 if (flags & CLONE_CHILD_CLEARTID)
4016 ts->child_tidptr = child_tidptr;
4025 /* warning : doesn't handle linux specific flags... */
4026 static int target_to_host_fcntl_cmd(int cmd)
4029 case TARGET_F_DUPFD:
4030 case TARGET_F_GETFD:
4031 case TARGET_F_SETFD:
4032 case TARGET_F_GETFL:
4033 case TARGET_F_SETFL:
4035 case TARGET_F_GETLK:
4037 case TARGET_F_SETLK:
4039 case TARGET_F_SETLKW:
4041 case TARGET_F_GETOWN:
4043 case TARGET_F_SETOWN:
4045 case TARGET_F_GETSIG:
4047 case TARGET_F_SETSIG:
4049 #if TARGET_ABI_BITS == 32
4050 case TARGET_F_GETLK64:
4052 case TARGET_F_SETLK64:
4054 case TARGET_F_SETLKW64:
4057 case TARGET_F_SETLEASE:
4059 case TARGET_F_GETLEASE:
4061 #ifdef F_DUPFD_CLOEXEC
4062 case TARGET_F_DUPFD_CLOEXEC:
4063 return F_DUPFD_CLOEXEC;
4065 case TARGET_F_NOTIFY:
4068 return -TARGET_EINVAL;
4070 return -TARGET_EINVAL;
4073 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
4076 struct target_flock *target_fl;
4077 struct flock64 fl64;
4078 struct target_flock64 *target_fl64;
4080 int host_cmd = target_to_host_fcntl_cmd(cmd);
4082 if (host_cmd == -TARGET_EINVAL)
4086 case TARGET_F_GETLK:
4087 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
4088 return -TARGET_EFAULT;
4089 fl.l_type = tswap16(target_fl->l_type);
4090 fl.l_whence = tswap16(target_fl->l_whence);
4091 fl.l_start = tswapl(target_fl->l_start);
4092 fl.l_len = tswapl(target_fl->l_len);
4093 fl.l_pid = tswap32(target_fl->l_pid);
4094 unlock_user_struct(target_fl, arg, 0);
4095 ret = get_errno(fcntl(fd, host_cmd, &fl));
4097 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg, 0))
4098 return -TARGET_EFAULT;
4099 target_fl->l_type = tswap16(fl.l_type);
4100 target_fl->l_whence = tswap16(fl.l_whence);
4101 target_fl->l_start = tswapl(fl.l_start);
4102 target_fl->l_len = tswapl(fl.l_len);
4103 target_fl->l_pid = tswap32(fl.l_pid);
4104 unlock_user_struct(target_fl, arg, 1);
4108 case TARGET_F_SETLK:
4109 case TARGET_F_SETLKW:
4110 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
4111 return -TARGET_EFAULT;
4112 fl.l_type = tswap16(target_fl->l_type);
4113 fl.l_whence = tswap16(target_fl->l_whence);
4114 fl.l_start = tswapl(target_fl->l_start);
4115 fl.l_len = tswapl(target_fl->l_len);
4116 fl.l_pid = tswap32(target_fl->l_pid);
4117 unlock_user_struct(target_fl, arg, 0);
4118 ret = get_errno(fcntl(fd, host_cmd, &fl));
4121 case TARGET_F_GETLK64:
4122 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
4123 return -TARGET_EFAULT;
4124 fl64.l_type = tswap16(target_fl64->l_type) >> 1;
4125 fl64.l_whence = tswap16(target_fl64->l_whence);
4126 fl64.l_start = tswapl(target_fl64->l_start);
4127 fl64.l_len = tswapl(target_fl64->l_len);
4128 fl64.l_pid = tswap32(target_fl64->l_pid);
4129 unlock_user_struct(target_fl64, arg, 0);
4130 ret = get_errno(fcntl(fd, host_cmd, &fl64));
4132 if (!lock_user_struct(VERIFY_WRITE, target_fl64, arg, 0))
4133 return -TARGET_EFAULT;
4134 target_fl64->l_type = tswap16(fl64.l_type) >> 1;
4135 target_fl64->l_whence = tswap16(fl64.l_whence);
4136 target_fl64->l_start = tswapl(fl64.l_start);
4137 target_fl64->l_len = tswapl(fl64.l_len);
4138 target_fl64->l_pid = tswap32(fl64.l_pid);
4139 unlock_user_struct(target_fl64, arg, 1);
4142 case TARGET_F_SETLK64:
4143 case TARGET_F_SETLKW64:
4144 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
4145 return -TARGET_EFAULT;
4146 fl64.l_type = tswap16(target_fl64->l_type) >> 1;
4147 fl64.l_whence = tswap16(target_fl64->l_whence);
4148 fl64.l_start = tswapl(target_fl64->l_start);
4149 fl64.l_len = tswapl(target_fl64->l_len);
4150 fl64.l_pid = tswap32(target_fl64->l_pid);
4151 unlock_user_struct(target_fl64, arg, 0);
4152 ret = get_errno(fcntl(fd, host_cmd, &fl64));
4155 case TARGET_F_GETFL:
4156 ret = get_errno(fcntl(fd, host_cmd, arg));
4158 ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
4162 case TARGET_F_SETFL:
4163 ret = get_errno(fcntl(fd, host_cmd, target_to_host_bitmask(arg, fcntl_flags_tbl)));
4166 case TARGET_F_SETOWN:
4167 case TARGET_F_GETOWN:
4168 case TARGET_F_SETSIG:
4169 case TARGET_F_GETSIG:
4170 case TARGET_F_SETLEASE:
4171 case TARGET_F_GETLEASE:
4172 ret = get_errno(fcntl(fd, host_cmd, arg));
4176 ret = get_errno(fcntl(fd, cmd, arg));
4184 static inline int high2lowuid(int uid)
4192 static inline int high2lowgid(int gid)
4200 static inline int low2highuid(int uid)
4202 if ((int16_t)uid == -1)
4208 static inline int low2highgid(int gid)
4210 if ((int16_t)gid == -1)
4215 static inline int tswapid(int id)
4219 #else /* !USE_UID16 */
4220 static inline int high2lowuid(int uid)
4224 static inline int high2lowgid(int gid)
4228 static inline int low2highuid(int uid)
4232 static inline int low2highgid(int gid)
4236 static inline int tswapid(int id)
4240 #endif /* USE_UID16 */
4242 void syscall_init(void)
4245 const argtype *arg_type;
4249 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
4250 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
4251 #include "syscall_types.h"
4253 #undef STRUCT_SPECIAL
4255 /* we patch the ioctl size if necessary. We rely on the fact that
4256 no ioctl has all the bits at '1' in the size field */
4258 while (ie->target_cmd != 0) {
4259 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
4260 TARGET_IOC_SIZEMASK) {
4261 arg_type = ie->arg_type;
4262 if (arg_type[0] != TYPE_PTR) {
4263 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
4268 size = thunk_type_size(arg_type, 0);
4269 ie->target_cmd = (ie->target_cmd &
4270 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
4271 (size << TARGET_IOC_SIZESHIFT);
4274 /* Build target_to_host_errno_table[] table from
4275 * host_to_target_errno_table[]. */
4276 for (i=0; i < ERRNO_TABLE_SIZE; i++)
4277 target_to_host_errno_table[host_to_target_errno_table[i]] = i;
4279 /* automatic consistency check if same arch */
4280 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
4281 (defined(__x86_64__) && defined(TARGET_X86_64))
4282 if (unlikely(ie->target_cmd != ie->host_cmd)) {
4283 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
4284 ie->name, ie->target_cmd, ie->host_cmd);
4291 #if TARGET_ABI_BITS == 32
4292 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
4294 #ifdef TARGET_WORDS_BIGENDIAN
4295 return ((uint64_t)word0 << 32) | word1;
4297 return ((uint64_t)word1 << 32) | word0;
4300 #else /* TARGET_ABI_BITS == 32 */
4301 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
4305 #endif /* TARGET_ABI_BITS != 32 */
4307 #ifdef TARGET_NR_truncate64
4308 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
4314 if (((CPUARMState *)cpu_env)->eabi)
4320 return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
4324 #ifdef TARGET_NR_ftruncate64
4325 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
4331 if (((CPUARMState *)cpu_env)->eabi)
4337 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
4341 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
4342 abi_ulong target_addr)
4344 struct target_timespec *target_ts;
4346 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1))
4347 return -TARGET_EFAULT;
4348 host_ts->tv_sec = tswapl(target_ts->tv_sec);
4349 host_ts->tv_nsec = tswapl(target_ts->tv_nsec);
4350 unlock_user_struct(target_ts, target_addr, 0);
4354 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
4355 struct timespec *host_ts)
4357 struct target_timespec *target_ts;
4359 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0))
4360 return -TARGET_EFAULT;
4361 target_ts->tv_sec = tswapl(host_ts->tv_sec);
4362 target_ts->tv_nsec = tswapl(host_ts->tv_nsec);
4363 unlock_user_struct(target_ts, target_addr, 1);
4367 #if defined(TARGET_NR_stat64) || defined(TARGET_NR_newfstatat)
4368 static inline abi_long host_to_target_stat64(void *cpu_env,
4369 abi_ulong target_addr,
4370 struct stat *host_st)
4373 if (((CPUARMState *)cpu_env)->eabi) {
4374 struct target_eabi_stat64 *target_st;
4376 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
4377 return -TARGET_EFAULT;
4378 memset(target_st, 0, sizeof(struct target_eabi_stat64));
4379 __put_user(host_st->st_dev, &target_st->st_dev);
4380 __put_user(host_st->st_ino, &target_st->st_ino);
4381 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
4382 __put_user(host_st->st_ino, &target_st->__st_ino);
4384 __put_user(host_st->st_mode, &target_st->st_mode);
4385 __put_user(host_st->st_nlink, &target_st->st_nlink);
4386 __put_user(host_st->st_uid, &target_st->st_uid);
4387 __put_user(host_st->st_gid, &target_st->st_gid);
4388 __put_user(host_st->st_rdev, &target_st->st_rdev);
4389 __put_user(host_st->st_size, &target_st->st_size);
4390 __put_user(host_st->st_blksize, &target_st->st_blksize);
4391 __put_user(host_st->st_blocks, &target_st->st_blocks);
4392 __put_user(host_st->st_atime, &target_st->target_st_atime);
4393 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
4394 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
4395 unlock_user_struct(target_st, target_addr, 1);
4399 #if TARGET_ABI_BITS == 64 && !defined(TARGET_ALPHA)
4400 struct target_stat *target_st;
4402 struct target_stat64 *target_st;
4405 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
4406 return -TARGET_EFAULT;
4407 memset(target_st, 0, sizeof(*target_st));
4408 __put_user(host_st->st_dev, &target_st->st_dev);
4409 __put_user(host_st->st_ino, &target_st->st_ino);
4410 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
4411 __put_user(host_st->st_ino, &target_st->__st_ino);
4413 __put_user(host_st->st_mode, &target_st->st_mode);
4414 __put_user(host_st->st_nlink, &target_st->st_nlink);
4415 __put_user(host_st->st_uid, &target_st->st_uid);
4416 __put_user(host_st->st_gid, &target_st->st_gid);
4417 __put_user(host_st->st_rdev, &target_st->st_rdev);
4418 /* XXX: better use of kernel struct */
4419 __put_user(host_st->st_size, &target_st->st_size);
4420 __put_user(host_st->st_blksize, &target_st->st_blksize);
4421 __put_user(host_st->st_blocks, &target_st->st_blocks);
4422 __put_user(host_st->st_atime, &target_st->target_st_atime);
4423 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
4424 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
4425 unlock_user_struct(target_st, target_addr, 1);
4432 #if defined(CONFIG_USE_NPTL)
4433 /* ??? Using host futex calls even when target atomic operations
4434 are not really atomic probably breaks things. However implementing
4435 futexes locally would make futexes shared between multiple processes
4436 tricky. However they're probably useless because guest atomic
4437 operations won't work either. */
4438 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
4439 target_ulong uaddr2, int val3)
4441 struct timespec ts, *pts;
4444 /* ??? We assume FUTEX_* constants are the same on both host
4446 #ifdef FUTEX_CMD_MASK
4447 base_op = op & FUTEX_CMD_MASK;
4455 target_to_host_timespec(pts, timeout);
4459 return get_errno(sys_futex(g2h(uaddr), op, tswap32(val),
4462 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0));
4464 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0));
4466 case FUTEX_CMP_REQUEUE:
4468 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
4469 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
4470 But the prototype takes a `struct timespec *'; insert casts
4471 to satisfy the compiler. We do not need to tswap TIMEOUT
4472 since it's not compared to guest memory. */
4473 pts = (struct timespec *)(uintptr_t) timeout;
4474 return get_errno(sys_futex(g2h(uaddr), op, val, pts,
4476 (base_op == FUTEX_CMP_REQUEUE
4480 return -TARGET_ENOSYS;
4485 /* Map host to target signal numbers for the wait family of syscalls.
4486 Assume all other status bits are the same. */
4487 static int host_to_target_waitstatus(int status)
4489 if (WIFSIGNALED(status)) {
4490 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
4492 if (WIFSTOPPED(status)) {
4493 return (host_to_target_signal(WSTOPSIG(status)) << 8)
4499 int get_osversion(void)
4501 static int osversion;
4502 struct new_utsname buf;
4507 if (qemu_uname_release && *qemu_uname_release) {
4508 s = qemu_uname_release;
4510 if (sys_uname(&buf))
4515 for (i = 0; i < 3; i++) {
4517 while (*s >= '0' && *s <= '9') {
4522 tmp = (tmp << 8) + n;
4530 /* do_syscall() should always have a single exit point at the end so
4531 that actions, such as logging of syscall results, can be performed.
4532 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
4533 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
4534 abi_long arg2, abi_long arg3, abi_long arg4,
4535 abi_long arg5, abi_long arg6)
4543 gemu_log("syscall %d", num);
4546 print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
4549 case TARGET_NR_exit:
4550 #ifdef CONFIG_USE_NPTL
4551 /* In old applications this may be used to implement _exit(2).
4552 However in threaded applictions it is used for thread termination,
4553 and _exit_group is used for application termination.
4554 Do thread termination if we have more then one thread. */
4555 /* FIXME: This probably breaks if a signal arrives. We should probably
4556 be disabling signals. */
4557 if (first_cpu->next_cpu) {
4565 while (p && p != (CPUState *)cpu_env) {
4566 lastp = &p->next_cpu;
4569 /* If we didn't find the CPU for this thread then something is
4573 /* Remove the CPU from the list. */
4574 *lastp = p->next_cpu;
4576 ts = ((CPUState *)cpu_env)->opaque;
4577 if (ts->child_tidptr) {
4578 put_user_u32(0, ts->child_tidptr);
4579 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
4591 gdb_exit(cpu_env, arg1);
4593 ret = 0; /* avoid warning */
4595 case TARGET_NR_read:
4599 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
4601 ret = get_errno(read(arg1, p, arg3));
4602 unlock_user(p, arg2, ret);
4605 case TARGET_NR_write:
4606 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
4608 ret = get_errno(write(arg1, p, arg3));
4609 unlock_user(p, arg2, 0);
4611 case TARGET_NR_open:
4612 if (!(p = lock_user_string(arg1)))
4614 ret = get_errno(open(path(p),
4615 target_to_host_bitmask(arg2, fcntl_flags_tbl),
4617 unlock_user(p, arg1, 0);
4619 #if defined(TARGET_NR_openat) && defined(__NR_openat)
4620 case TARGET_NR_openat:
4621 if (!(p = lock_user_string(arg2)))
4623 ret = get_errno(sys_openat(arg1,
4625 target_to_host_bitmask(arg3, fcntl_flags_tbl),
4627 unlock_user(p, arg2, 0);
4630 case TARGET_NR_close:
4631 ret = get_errno(close(arg1));
4636 case TARGET_NR_fork:
4637 ret = get_errno(do_fork(cpu_env, SIGCHLD, 0, 0, 0, 0));
4639 #ifdef TARGET_NR_waitpid
4640 case TARGET_NR_waitpid:
4643 ret = get_errno(waitpid(arg1, &status, arg3));
4644 if (!is_error(ret) && arg2
4645 && put_user_s32(host_to_target_waitstatus(status), arg2))
4650 #ifdef TARGET_NR_waitid
4651 case TARGET_NR_waitid:
4655 ret = get_errno(waitid(arg1, arg2, &info, arg4));
4656 if (!is_error(ret) && arg3 && info.si_pid != 0) {
4657 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
4659 host_to_target_siginfo(p, &info);
4660 unlock_user(p, arg3, sizeof(target_siginfo_t));
4665 #ifdef TARGET_NR_creat /* not on alpha */
4666 case TARGET_NR_creat:
4667 if (!(p = lock_user_string(arg1)))
4669 ret = get_errno(creat(p, arg2));
4670 unlock_user(p, arg1, 0);
4673 case TARGET_NR_link:
4676 p = lock_user_string(arg1);
4677 p2 = lock_user_string(arg2);
4679 ret = -TARGET_EFAULT;
4681 ret = get_errno(link(p, p2));
4682 unlock_user(p2, arg2, 0);
4683 unlock_user(p, arg1, 0);
4686 #if defined(TARGET_NR_linkat) && defined(__NR_linkat)
4687 case TARGET_NR_linkat:
4692 p = lock_user_string(arg2);
4693 p2 = lock_user_string(arg4);
4695 ret = -TARGET_EFAULT;
4697 ret = get_errno(sys_linkat(arg1, p, arg3, p2, arg5));
4698 unlock_user(p, arg2, 0);
4699 unlock_user(p2, arg4, 0);
4703 case TARGET_NR_unlink:
4704 if (!(p = lock_user_string(arg1)))
4706 ret = get_errno(unlink(p));
4707 unlock_user(p, arg1, 0);
4709 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat)
4710 case TARGET_NR_unlinkat:
4711 if (!(p = lock_user_string(arg2)))
4713 ret = get_errno(sys_unlinkat(arg1, p, arg3));
4714 unlock_user(p, arg2, 0);
4717 case TARGET_NR_execve:
4719 char **argp, **envp;
4722 abi_ulong guest_argp;
4723 abi_ulong guest_envp;
4729 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
4730 if (get_user_ual(addr, gp))
4738 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
4739 if (get_user_ual(addr, gp))
4746 argp = alloca((argc + 1) * sizeof(void *));
4747 envp = alloca((envc + 1) * sizeof(void *));
4749 for (gp = guest_argp, q = argp; gp;
4750 gp += sizeof(abi_ulong), q++) {
4751 if (get_user_ual(addr, gp))
4755 if (!(*q = lock_user_string(addr)))
4760 for (gp = guest_envp, q = envp; gp;
4761 gp += sizeof(abi_ulong), q++) {
4762 if (get_user_ual(addr, gp))
4766 if (!(*q = lock_user_string(addr)))
4771 if (!(p = lock_user_string(arg1)))
4773 ret = get_errno(execve(p, argp, envp));
4774 unlock_user(p, arg1, 0);
4779 ret = -TARGET_EFAULT;
4782 for (gp = guest_argp, q = argp; *q;
4783 gp += sizeof(abi_ulong), q++) {
4784 if (get_user_ual(addr, gp)
4787 unlock_user(*q, addr, 0);
4789 for (gp = guest_envp, q = envp; *q;
4790 gp += sizeof(abi_ulong), q++) {
4791 if (get_user_ual(addr, gp)
4794 unlock_user(*q, addr, 0);
4798 case TARGET_NR_chdir:
4799 if (!(p = lock_user_string(arg1)))
4801 ret = get_errno(chdir(p));
4802 unlock_user(p, arg1, 0);
4804 #ifdef TARGET_NR_time
4805 case TARGET_NR_time:
4808 ret = get_errno(time(&host_time));
4811 && put_user_sal(host_time, arg1))
4816 case TARGET_NR_mknod:
4817 if (!(p = lock_user_string(arg1)))
4819 ret = get_errno(mknod(p, arg2, arg3));
4820 unlock_user(p, arg1, 0);
4822 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat)
4823 case TARGET_NR_mknodat:
4824 if (!(p = lock_user_string(arg2)))
4826 ret = get_errno(sys_mknodat(arg1, p, arg3, arg4));
4827 unlock_user(p, arg2, 0);
4830 case TARGET_NR_chmod:
4831 if (!(p = lock_user_string(arg1)))
4833 ret = get_errno(chmod(p, arg2));
4834 unlock_user(p, arg1, 0);
4836 #ifdef TARGET_NR_break
4837 case TARGET_NR_break:
4840 #ifdef TARGET_NR_oldstat
4841 case TARGET_NR_oldstat:
4844 case TARGET_NR_lseek:
4845 ret = get_errno(lseek(arg1, arg2, arg3));
4847 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
4848 /* Alpha specific */
4849 case TARGET_NR_getxpid:
4850 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
4851 ret = get_errno(getpid());
4854 #ifdef TARGET_NR_getpid
4855 case TARGET_NR_getpid:
4856 ret = get_errno(getpid());
4859 case TARGET_NR_mount:
4861 /* need to look at the data field */
4863 p = lock_user_string(arg1);
4864 p2 = lock_user_string(arg2);
4865 p3 = lock_user_string(arg3);
4866 if (!p || !p2 || !p3)
4867 ret = -TARGET_EFAULT;
4869 /* FIXME - arg5 should be locked, but it isn't clear how to
4870 * do that since it's not guaranteed to be a NULL-terminated
4874 ret = get_errno(mount(p, p2, p3, (unsigned long)arg4, NULL));
4876 ret = get_errno(mount(p, p2, p3, (unsigned long)arg4, g2h(arg5)));
4878 unlock_user(p, arg1, 0);
4879 unlock_user(p2, arg2, 0);
4880 unlock_user(p3, arg3, 0);
4883 #ifdef TARGET_NR_umount
4884 case TARGET_NR_umount:
4885 if (!(p = lock_user_string(arg1)))
4887 ret = get_errno(umount(p));
4888 unlock_user(p, arg1, 0);
4891 #ifdef TARGET_NR_stime /* not on alpha */
4892 case TARGET_NR_stime:
4895 if (get_user_sal(host_time, arg1))
4897 ret = get_errno(stime(&host_time));
4901 case TARGET_NR_ptrace:
4903 #ifdef TARGET_NR_alarm /* not on alpha */
4904 case TARGET_NR_alarm:
4908 #ifdef TARGET_NR_oldfstat
4909 case TARGET_NR_oldfstat:
4912 #ifdef TARGET_NR_pause /* not on alpha */
4913 case TARGET_NR_pause:
4914 ret = get_errno(pause());
4917 #ifdef TARGET_NR_utime
4918 case TARGET_NR_utime:
4920 struct utimbuf tbuf, *host_tbuf;
4921 struct target_utimbuf *target_tbuf;
4923 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
4925 tbuf.actime = tswapl(target_tbuf->actime);
4926 tbuf.modtime = tswapl(target_tbuf->modtime);
4927 unlock_user_struct(target_tbuf, arg2, 0);
4932 if (!(p = lock_user_string(arg1)))
4934 ret = get_errno(utime(p, host_tbuf));
4935 unlock_user(p, arg1, 0);
4939 case TARGET_NR_utimes:
4941 struct timeval *tvp, tv[2];
4943 if (copy_from_user_timeval(&tv[0], arg2)
4944 || copy_from_user_timeval(&tv[1],
4945 arg2 + sizeof(struct target_timeval)))
4951 if (!(p = lock_user_string(arg1)))
4953 ret = get_errno(utimes(p, tvp));
4954 unlock_user(p, arg1, 0);
4957 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat)
4958 case TARGET_NR_futimesat:
4960 struct timeval *tvp, tv[2];
4962 if (copy_from_user_timeval(&tv[0], arg3)
4963 || copy_from_user_timeval(&tv[1],
4964 arg3 + sizeof(struct target_timeval)))
4970 if (!(p = lock_user_string(arg2)))
4972 ret = get_errno(sys_futimesat(arg1, path(p), tvp));
4973 unlock_user(p, arg2, 0);
4977 #ifdef TARGET_NR_stty
4978 case TARGET_NR_stty:
4981 #ifdef TARGET_NR_gtty
4982 case TARGET_NR_gtty:
4985 case TARGET_NR_access:
4986 if (!(p = lock_user_string(arg1)))
4988 ret = get_errno(access(path(p), arg2));
4989 unlock_user(p, arg1, 0);
4991 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
4992 case TARGET_NR_faccessat:
4993 if (!(p = lock_user_string(arg2)))
4995 ret = get_errno(sys_faccessat(arg1, p, arg3));
4996 unlock_user(p, arg2, 0);
4999 #ifdef TARGET_NR_nice /* not on alpha */
5000 case TARGET_NR_nice:
5001 ret = get_errno(nice(arg1));
5004 #ifdef TARGET_NR_ftime
5005 case TARGET_NR_ftime:
5008 case TARGET_NR_sync:
5012 case TARGET_NR_kill:
5013 ret = get_errno(kill(arg1, target_to_host_signal(arg2)));
5015 case TARGET_NR_rename:
5018 p = lock_user_string(arg1);
5019 p2 = lock_user_string(arg2);
5021 ret = -TARGET_EFAULT;
5023 ret = get_errno(rename(p, p2));
5024 unlock_user(p2, arg2, 0);
5025 unlock_user(p, arg1, 0);
5028 #if defined(TARGET_NR_renameat) && defined(__NR_renameat)
5029 case TARGET_NR_renameat:
5032 p = lock_user_string(arg2);
5033 p2 = lock_user_string(arg4);
5035 ret = -TARGET_EFAULT;
5037 ret = get_errno(sys_renameat(arg1, p, arg3, p2));
5038 unlock_user(p2, arg4, 0);
5039 unlock_user(p, arg2, 0);
5043 case TARGET_NR_mkdir:
5044 if (!(p = lock_user_string(arg1)))
5046 ret = get_errno(mkdir(p, arg2));
5047 unlock_user(p, arg1, 0);
5049 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat)
5050 case TARGET_NR_mkdirat:
5051 if (!(p = lock_user_string(arg2)))
5053 ret = get_errno(sys_mkdirat(arg1, p, arg3));
5054 unlock_user(p, arg2, 0);
5057 case TARGET_NR_rmdir:
5058 if (!(p = lock_user_string(arg1)))
5060 ret = get_errno(rmdir(p));
5061 unlock_user(p, arg1, 0);
5064 ret = get_errno(dup(arg1));
5066 case TARGET_NR_pipe:
5067 ret = do_pipe(cpu_env, arg1, 0, 0);
5069 #ifdef TARGET_NR_pipe2
5070 case TARGET_NR_pipe2:
5071 ret = do_pipe(cpu_env, arg1, arg2, 1);
5074 case TARGET_NR_times:
5076 struct target_tms *tmsp;
5078 ret = get_errno(times(&tms));
5080 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
5083 tmsp->tms_utime = tswapl(host_to_target_clock_t(tms.tms_utime));
5084 tmsp->tms_stime = tswapl(host_to_target_clock_t(tms.tms_stime));
5085 tmsp->tms_cutime = tswapl(host_to_target_clock_t(tms.tms_cutime));
5086 tmsp->tms_cstime = tswapl(host_to_target_clock_t(tms.tms_cstime));
5089 ret = host_to_target_clock_t(ret);
5092 #ifdef TARGET_NR_prof
5093 case TARGET_NR_prof:
5096 #ifdef TARGET_NR_signal
5097 case TARGET_NR_signal:
5100 case TARGET_NR_acct:
5102 ret = get_errno(acct(NULL));
5104 if (!(p = lock_user_string(arg1)))
5106 ret = get_errno(acct(path(p)));
5107 unlock_user(p, arg1, 0);
5110 #ifdef TARGET_NR_umount2 /* not on alpha */
5111 case TARGET_NR_umount2:
5112 if (!(p = lock_user_string(arg1)))
5114 ret = get_errno(umount2(p, arg2));
5115 unlock_user(p, arg1, 0);
5118 #ifdef TARGET_NR_lock
5119 case TARGET_NR_lock:
5122 case TARGET_NR_ioctl:
5123 ret = do_ioctl(arg1, arg2, arg3);
5125 case TARGET_NR_fcntl:
5126 ret = do_fcntl(arg1, arg2, arg3);
5128 #ifdef TARGET_NR_mpx
5132 case TARGET_NR_setpgid:
5133 ret = get_errno(setpgid(arg1, arg2));
5135 #ifdef TARGET_NR_ulimit
5136 case TARGET_NR_ulimit:
5139 #ifdef TARGET_NR_oldolduname
5140 case TARGET_NR_oldolduname:
5143 case TARGET_NR_umask:
5144 ret = get_errno(umask(arg1));
5146 case TARGET_NR_chroot:
5147 if (!(p = lock_user_string(arg1)))
5149 ret = get_errno(chroot(p));
5150 unlock_user(p, arg1, 0);
5152 case TARGET_NR_ustat:
5154 case TARGET_NR_dup2:
5155 ret = get_errno(dup2(arg1, arg2));
5157 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
5158 case TARGET_NR_dup3:
5159 ret = get_errno(dup3(arg1, arg2, arg3));
5162 #ifdef TARGET_NR_getppid /* not on alpha */
5163 case TARGET_NR_getppid:
5164 ret = get_errno(getppid());
5167 case TARGET_NR_getpgrp:
5168 ret = get_errno(getpgrp());
5170 case TARGET_NR_setsid:
5171 ret = get_errno(setsid());
5173 #ifdef TARGET_NR_sigaction
5174 case TARGET_NR_sigaction:
5176 #if defined(TARGET_ALPHA)
5177 struct target_sigaction act, oact, *pact = 0;
5178 struct target_old_sigaction *old_act;
5180 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
5182 act._sa_handler = old_act->_sa_handler;
5183 target_siginitset(&act.sa_mask, old_act->sa_mask);
5184 act.sa_flags = old_act->sa_flags;
5185 act.sa_restorer = 0;
5186 unlock_user_struct(old_act, arg2, 0);
5189 ret = get_errno(do_sigaction(arg1, pact, &oact));
5190 if (!is_error(ret) && arg3) {
5191 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
5193 old_act->_sa_handler = oact._sa_handler;
5194 old_act->sa_mask = oact.sa_mask.sig[0];
5195 old_act->sa_flags = oact.sa_flags;
5196 unlock_user_struct(old_act, arg3, 1);
5198 #elif defined(TARGET_MIPS)
5199 struct target_sigaction act, oact, *pact, *old_act;
5202 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
5204 act._sa_handler = old_act->_sa_handler;
5205 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
5206 act.sa_flags = old_act->sa_flags;
5207 unlock_user_struct(old_act, arg2, 0);
5213 ret = get_errno(do_sigaction(arg1, pact, &oact));
5215 if (!is_error(ret) && arg3) {
5216 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
5218 old_act->_sa_handler = oact._sa_handler;
5219 old_act->sa_flags = oact.sa_flags;
5220 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
5221 old_act->sa_mask.sig[1] = 0;
5222 old_act->sa_mask.sig[2] = 0;
5223 old_act->sa_mask.sig[3] = 0;
5224 unlock_user_struct(old_act, arg3, 1);
5227 struct target_old_sigaction *old_act;
5228 struct target_sigaction act, oact, *pact;
5230 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
5232 act._sa_handler = old_act->_sa_handler;
5233 target_siginitset(&act.sa_mask, old_act->sa_mask);
5234 act.sa_flags = old_act->sa_flags;
5235 act.sa_restorer = old_act->sa_restorer;
5236 unlock_user_struct(old_act, arg2, 0);
5241 ret = get_errno(do_sigaction(arg1, pact, &oact));
5242 if (!is_error(ret) && arg3) {
5243 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
5245 old_act->_sa_handler = oact._sa_handler;
5246 old_act->sa_mask = oact.sa_mask.sig[0];
5247 old_act->sa_flags = oact.sa_flags;
5248 old_act->sa_restorer = oact.sa_restorer;
5249 unlock_user_struct(old_act, arg3, 1);
5255 case TARGET_NR_rt_sigaction:
5257 #if defined(TARGET_ALPHA)
5258 struct target_sigaction act, oact, *pact = 0;
5259 struct target_rt_sigaction *rt_act;
5260 /* ??? arg4 == sizeof(sigset_t). */
5262 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
5264 act._sa_handler = rt_act->_sa_handler;
5265 act.sa_mask = rt_act->sa_mask;
5266 act.sa_flags = rt_act->sa_flags;
5267 act.sa_restorer = arg5;
5268 unlock_user_struct(rt_act, arg2, 0);
5271 ret = get_errno(do_sigaction(arg1, pact, &oact));
5272 if (!is_error(ret) && arg3) {
5273 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
5275 rt_act->_sa_handler = oact._sa_handler;
5276 rt_act->sa_mask = oact.sa_mask;
5277 rt_act->sa_flags = oact.sa_flags;
5278 unlock_user_struct(rt_act, arg3, 1);
5281 struct target_sigaction *act;
5282 struct target_sigaction *oact;
5285 if (!lock_user_struct(VERIFY_READ, act, arg2, 1))
5290 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
5291 ret = -TARGET_EFAULT;
5292 goto rt_sigaction_fail;
5296 ret = get_errno(do_sigaction(arg1, act, oact));
5299 unlock_user_struct(act, arg2, 0);
5301 unlock_user_struct(oact, arg3, 1);
5305 #ifdef TARGET_NR_sgetmask /* not on alpha */
5306 case TARGET_NR_sgetmask:
5309 abi_ulong target_set;
5310 sigprocmask(0, NULL, &cur_set);
5311 host_to_target_old_sigset(&target_set, &cur_set);
5316 #ifdef TARGET_NR_ssetmask /* not on alpha */
5317 case TARGET_NR_ssetmask:
5319 sigset_t set, oset, cur_set;
5320 abi_ulong target_set = arg1;
5321 sigprocmask(0, NULL, &cur_set);
5322 target_to_host_old_sigset(&set, &target_set);
5323 sigorset(&set, &set, &cur_set);
5324 sigprocmask(SIG_SETMASK, &set, &oset);
5325 host_to_target_old_sigset(&target_set, &oset);
5330 #ifdef TARGET_NR_sigprocmask
5331 case TARGET_NR_sigprocmask:
5333 #if defined(TARGET_ALPHA)
5334 sigset_t set, oldset;
5339 case TARGET_SIG_BLOCK:
5342 case TARGET_SIG_UNBLOCK:
5345 case TARGET_SIG_SETMASK:
5349 ret = -TARGET_EINVAL;
5353 target_to_host_old_sigset(&set, &mask);
5355 ret = get_errno(sigprocmask(how, &set, &oldset));
5357 if (!is_error(ret)) {
5358 host_to_target_old_sigset(&mask, &oldset);
5360 ((CPUAlphaState *)cpu_env)->[IR_V0] = 0; /* force no error */
5363 sigset_t set, oldset, *set_ptr;
5368 case TARGET_SIG_BLOCK:
5371 case TARGET_SIG_UNBLOCK:
5374 case TARGET_SIG_SETMASK:
5378 ret = -TARGET_EINVAL;
5381 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
5383 target_to_host_old_sigset(&set, p);
5384 unlock_user(p, arg2, 0);
5390 ret = get_errno(sigprocmask(how, set_ptr, &oldset));
5391 if (!is_error(ret) && arg3) {
5392 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
5394 host_to_target_old_sigset(p, &oldset);
5395 unlock_user(p, arg3, sizeof(target_sigset_t));
5401 case TARGET_NR_rt_sigprocmask:
5404 sigset_t set, oldset, *set_ptr;
5408 case TARGET_SIG_BLOCK:
5411 case TARGET_SIG_UNBLOCK:
5414 case TARGET_SIG_SETMASK:
5418 ret = -TARGET_EINVAL;
5421 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
5423 target_to_host_sigset(&set, p);
5424 unlock_user(p, arg2, 0);
5430 ret = get_errno(sigprocmask(how, set_ptr, &oldset));
5431 if (!is_error(ret) && arg3) {
5432 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
5434 host_to_target_sigset(p, &oldset);
5435 unlock_user(p, arg3, sizeof(target_sigset_t));
5439 #ifdef TARGET_NR_sigpending
5440 case TARGET_NR_sigpending:
5443 ret = get_errno(sigpending(&set));
5444 if (!is_error(ret)) {
5445 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
5447 host_to_target_old_sigset(p, &set);
5448 unlock_user(p, arg1, sizeof(target_sigset_t));
5453 case TARGET_NR_rt_sigpending:
5456 ret = get_errno(sigpending(&set));
5457 if (!is_error(ret)) {
5458 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
5460 host_to_target_sigset(p, &set);
5461 unlock_user(p, arg1, sizeof(target_sigset_t));
5465 #ifdef TARGET_NR_sigsuspend
5466 case TARGET_NR_sigsuspend:
5469 #if defined(TARGET_ALPHA)
5470 abi_ulong mask = arg1;
5471 target_to_host_old_sigset(&set, &mask);
5473 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
5475 target_to_host_old_sigset(&set, p);
5476 unlock_user(p, arg1, 0);
5478 ret = get_errno(sigsuspend(&set));
5482 case TARGET_NR_rt_sigsuspend:
5485 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
5487 target_to_host_sigset(&set, p);
5488 unlock_user(p, arg1, 0);
5489 ret = get_errno(sigsuspend(&set));
5492 case TARGET_NR_rt_sigtimedwait:
5495 struct timespec uts, *puts;
5498 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
5500 target_to_host_sigset(&set, p);
5501 unlock_user(p, arg1, 0);
5504 target_to_host_timespec(puts, arg3);
5508 ret = get_errno(sigtimedwait(&set, &uinfo, puts));
5509 if (!is_error(ret) && arg2) {
5510 if (!(p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t), 0)))
5512 host_to_target_siginfo(p, &uinfo);
5513 unlock_user(p, arg2, sizeof(target_siginfo_t));
5517 case TARGET_NR_rt_sigqueueinfo:
5520 if (!(p = lock_user(VERIFY_READ, arg3, sizeof(target_sigset_t), 1)))
5522 target_to_host_siginfo(&uinfo, p);
5523 unlock_user(p, arg1, 0);
5524 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
5527 #ifdef TARGET_NR_sigreturn
5528 case TARGET_NR_sigreturn:
5529 /* NOTE: ret is eax, so not transcoding must be done */
5530 ret = do_sigreturn(cpu_env);
5533 case TARGET_NR_rt_sigreturn:
5534 /* NOTE: ret is eax, so not transcoding must be done */
5535 ret = do_rt_sigreturn(cpu_env);
5537 case TARGET_NR_sethostname:
5538 if (!(p = lock_user_string(arg1)))
5540 ret = get_errno(sethostname(p, arg2));
5541 unlock_user(p, arg1, 0);
5543 case TARGET_NR_setrlimit:
5545 int resource = arg1;
5546 struct target_rlimit *target_rlim;
5548 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
5550 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
5551 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
5552 unlock_user_struct(target_rlim, arg2, 0);
5553 ret = get_errno(setrlimit(resource, &rlim));
5556 case TARGET_NR_getrlimit:
5558 int resource = arg1;
5559 struct target_rlimit *target_rlim;
5562 ret = get_errno(getrlimit(resource, &rlim));
5563 if (!is_error(ret)) {
5564 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
5566 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
5567 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
5568 unlock_user_struct(target_rlim, arg2, 1);
5572 case TARGET_NR_getrusage:
5574 struct rusage rusage;
5575 ret = get_errno(getrusage(arg1, &rusage));
5576 if (!is_error(ret)) {
5577 host_to_target_rusage(arg2, &rusage);
5581 case TARGET_NR_gettimeofday:
5584 ret = get_errno(gettimeofday(&tv, NULL));
5585 if (!is_error(ret)) {
5586 if (copy_to_user_timeval(arg1, &tv))
5591 case TARGET_NR_settimeofday:
5594 if (copy_from_user_timeval(&tv, arg1))
5596 ret = get_errno(settimeofday(&tv, NULL));
5599 #if defined(TARGET_NR_select) && !defined(TARGET_S390X) && !defined(TARGET_S390)
5600 case TARGET_NR_select:
5602 struct target_sel_arg_struct *sel;
5603 abi_ulong inp, outp, exp, tvp;
5606 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1))
5608 nsel = tswapl(sel->n);
5609 inp = tswapl(sel->inp);
5610 outp = tswapl(sel->outp);
5611 exp = tswapl(sel->exp);
5612 tvp = tswapl(sel->tvp);
5613 unlock_user_struct(sel, arg1, 0);
5614 ret = do_select(nsel, inp, outp, exp, tvp);
5618 #ifdef TARGET_NR_pselect6
5619 case TARGET_NR_pselect6:
5621 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
5622 fd_set rfds, wfds, efds;
5623 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
5624 struct timespec ts, *ts_ptr;
5627 * The 6th arg is actually two args smashed together,
5628 * so we cannot use the C library.
5636 abi_ulong arg_sigset, arg_sigsize, *arg7;
5637 target_sigset_t *target_sigset;
5645 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
5649 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
5653 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
5659 * This takes a timespec, and not a timeval, so we cannot
5660 * use the do_select() helper ...
5663 if (target_to_host_timespec(&ts, ts_addr)) {
5671 /* Extract the two packed args for the sigset */
5674 sig.size = _NSIG / 8;
5676 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
5680 arg_sigset = tswapl(arg7[0]);
5681 arg_sigsize = tswapl(arg7[1]);
5682 unlock_user(arg7, arg6, 0);
5686 target_sigset = lock_user(VERIFY_READ, arg_sigset,
5687 sizeof(*target_sigset), 1);
5688 if (!target_sigset) {
5691 target_to_host_sigset(&set, target_sigset);
5692 unlock_user(target_sigset, arg_sigset, 0);
5700 ret = get_errno(sys_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
5703 if (!is_error(ret)) {
5704 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
5706 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
5708 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
5711 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
5717 case TARGET_NR_symlink:
5720 p = lock_user_string(arg1);
5721 p2 = lock_user_string(arg2);
5723 ret = -TARGET_EFAULT;
5725 ret = get_errno(symlink(p, p2));
5726 unlock_user(p2, arg2, 0);
5727 unlock_user(p, arg1, 0);
5730 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat)
5731 case TARGET_NR_symlinkat:
5734 p = lock_user_string(arg1);
5735 p2 = lock_user_string(arg3);
5737 ret = -TARGET_EFAULT;
5739 ret = get_errno(sys_symlinkat(p, arg2, p2));
5740 unlock_user(p2, arg3, 0);
5741 unlock_user(p, arg1, 0);
5745 #ifdef TARGET_NR_oldlstat
5746 case TARGET_NR_oldlstat:
5749 case TARGET_NR_readlink:
5752 p = lock_user_string(arg1);
5753 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
5755 ret = -TARGET_EFAULT;
5757 if (strncmp((const char *)p, "/proc/self/exe", 14) == 0) {
5758 char real[PATH_MAX];
5759 temp = realpath(exec_path,real);
5760 ret = (temp==NULL) ? get_errno(-1) : strlen(real) ;
5761 snprintf((char *)p2, arg3, "%s", real);
5764 ret = get_errno(readlink(path(p), p2, arg3));
5766 unlock_user(p2, arg2, ret);
5767 unlock_user(p, arg1, 0);
5770 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat)
5771 case TARGET_NR_readlinkat:
5774 p = lock_user_string(arg2);
5775 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
5777 ret = -TARGET_EFAULT;
5779 ret = get_errno(sys_readlinkat(arg1, path(p), p2, arg4));
5780 unlock_user(p2, arg3, ret);
5781 unlock_user(p, arg2, 0);
5785 #ifdef TARGET_NR_uselib
5786 case TARGET_NR_uselib:
5789 #ifdef TARGET_NR_swapon
5790 case TARGET_NR_swapon:
5791 if (!(p = lock_user_string(arg1)))
5793 ret = get_errno(swapon(p, arg2));
5794 unlock_user(p, arg1, 0);
5797 case TARGET_NR_reboot:
5799 #ifdef TARGET_NR_readdir
5800 case TARGET_NR_readdir:
5803 #ifdef TARGET_NR_mmap
5804 case TARGET_NR_mmap:
5805 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || defined(TARGET_ARM) || \
5806 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
5807 || defined(TARGET_S390X)
5810 abi_ulong v1, v2, v3, v4, v5, v6;
5811 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
5819 unlock_user(v, arg1, 0);
5820 ret = get_errno(target_mmap(v1, v2, v3,
5821 target_to_host_bitmask(v4, mmap_flags_tbl),
5825 ret = get_errno(target_mmap(arg1, arg2, arg3,
5826 target_to_host_bitmask(arg4, mmap_flags_tbl),
5832 #ifdef TARGET_NR_mmap2
5833 case TARGET_NR_mmap2:
5835 #define MMAP_SHIFT 12
5837 ret = get_errno(target_mmap(arg1, arg2, arg3,
5838 target_to_host_bitmask(arg4, mmap_flags_tbl),
5840 arg6 << MMAP_SHIFT));
5843 case TARGET_NR_munmap:
5844 ret = get_errno(target_munmap(arg1, arg2));
5846 case TARGET_NR_mprotect:
5848 TaskState *ts = ((CPUState *)cpu_env)->opaque;
5849 /* Special hack to detect libc making the stack executable. */
5850 if ((arg3 & PROT_GROWSDOWN)
5851 && arg1 >= ts->info->stack_limit
5852 && arg1 <= ts->info->start_stack) {
5853 arg3 &= ~PROT_GROWSDOWN;
5854 arg2 = arg2 + arg1 - ts->info->stack_limit;
5855 arg1 = ts->info->stack_limit;
5858 ret = get_errno(target_mprotect(arg1, arg2, arg3));
5860 #ifdef TARGET_NR_mremap
5861 case TARGET_NR_mremap:
5862 ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
5865 /* ??? msync/mlock/munlock are broken for softmmu. */
5866 #ifdef TARGET_NR_msync
5867 case TARGET_NR_msync:
5868 ret = get_errno(msync(g2h(arg1), arg2, arg3));
5871 #ifdef TARGET_NR_mlock
5872 case TARGET_NR_mlock:
5873 ret = get_errno(mlock(g2h(arg1), arg2));
5876 #ifdef TARGET_NR_munlock
5877 case TARGET_NR_munlock:
5878 ret = get_errno(munlock(g2h(arg1), arg2));
5881 #ifdef TARGET_NR_mlockall
5882 case TARGET_NR_mlockall:
5883 ret = get_errno(mlockall(arg1));
5886 #ifdef TARGET_NR_munlockall
5887 case TARGET_NR_munlockall:
5888 ret = get_errno(munlockall());
5891 case TARGET_NR_truncate:
5892 if (!(p = lock_user_string(arg1)))
5894 ret = get_errno(truncate(p, arg2));
5895 unlock_user(p, arg1, 0);
5897 case TARGET_NR_ftruncate:
5898 ret = get_errno(ftruncate(arg1, arg2));
5900 case TARGET_NR_fchmod:
5901 ret = get_errno(fchmod(arg1, arg2));
5903 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat)
5904 case TARGET_NR_fchmodat:
5905 if (!(p = lock_user_string(arg2)))
5907 ret = get_errno(sys_fchmodat(arg1, p, arg3));
5908 unlock_user(p, arg2, 0);
5911 case TARGET_NR_getpriority:
5912 /* libc does special remapping of the return value of
5913 * sys_getpriority() so it's just easiest to call
5914 * sys_getpriority() directly rather than through libc. */
5915 ret = get_errno(sys_getpriority(arg1, arg2));
5917 case TARGET_NR_setpriority:
5918 ret = get_errno(setpriority(arg1, arg2, arg3));
5920 #ifdef TARGET_NR_profil
5921 case TARGET_NR_profil:
5924 case TARGET_NR_statfs:
5925 if (!(p = lock_user_string(arg1)))
5927 ret = get_errno(statfs(path(p), &stfs));
5928 unlock_user(p, arg1, 0);
5930 if (!is_error(ret)) {
5931 struct target_statfs *target_stfs;
5933 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
5935 __put_user(stfs.f_type, &target_stfs->f_type);
5936 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
5937 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
5938 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
5939 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
5940 __put_user(stfs.f_files, &target_stfs->f_files);
5941 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
5942 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
5943 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
5944 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
5945 unlock_user_struct(target_stfs, arg2, 1);
5948 case TARGET_NR_fstatfs:
5949 ret = get_errno(fstatfs(arg1, &stfs));
5950 goto convert_statfs;
5951 #ifdef TARGET_NR_statfs64
5952 case TARGET_NR_statfs64:
5953 if (!(p = lock_user_string(arg1)))
5955 ret = get_errno(statfs(path(p), &stfs));
5956 unlock_user(p, arg1, 0);
5958 if (!is_error(ret)) {
5959 struct target_statfs64 *target_stfs;
5961 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
5963 __put_user(stfs.f_type, &target_stfs->f_type);
5964 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
5965 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
5966 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
5967 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
5968 __put_user(stfs.f_files, &target_stfs->f_files);
5969 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
5970 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
5971 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
5972 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
5973 unlock_user_struct(target_stfs, arg3, 1);
5976 case TARGET_NR_fstatfs64:
5977 ret = get_errno(fstatfs(arg1, &stfs));
5978 goto convert_statfs64;
5980 #ifdef TARGET_NR_ioperm
5981 case TARGET_NR_ioperm:
5984 #ifdef TARGET_NR_socketcall
5985 case TARGET_NR_socketcall:
5986 ret = do_socketcall(arg1, arg2);
5989 #ifdef TARGET_NR_accept
5990 case TARGET_NR_accept:
5991 ret = do_accept(arg1, arg2, arg3);
5994 #ifdef TARGET_NR_bind
5995 case TARGET_NR_bind:
5996 ret = do_bind(arg1, arg2, arg3);
5999 #ifdef TARGET_NR_connect
6000 case TARGET_NR_connect:
6001 ret = do_connect(arg1, arg2, arg3);
6004 #ifdef TARGET_NR_getpeername
6005 case TARGET_NR_getpeername:
6006 ret = do_getpeername(arg1, arg2, arg3);
6009 #ifdef TARGET_NR_getsockname
6010 case TARGET_NR_getsockname:
6011 ret = do_getsockname(arg1, arg2, arg3);
6014 #ifdef TARGET_NR_getsockopt
6015 case TARGET_NR_getsockopt:
6016 ret = do_getsockopt(arg1, arg2, arg3, arg4, arg5);
6019 #ifdef TARGET_NR_listen
6020 case TARGET_NR_listen:
6021 ret = get_errno(listen(arg1, arg2));
6024 #ifdef TARGET_NR_recv
6025 case TARGET_NR_recv:
6026 ret = do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
6029 #ifdef TARGET_NR_recvfrom
6030 case TARGET_NR_recvfrom:
6031 ret = do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
6034 #ifdef TARGET_NR_recvmsg
6035 case TARGET_NR_recvmsg:
6036 ret = do_sendrecvmsg(arg1, arg2, arg3, 0);
6039 #ifdef TARGET_NR_send
6040 case TARGET_NR_send:
6041 ret = do_sendto(arg1, arg2, arg3, arg4, 0, 0);
6044 #ifdef TARGET_NR_sendmsg
6045 case TARGET_NR_sendmsg:
6046 ret = do_sendrecvmsg(arg1, arg2, arg3, 1);
6049 #ifdef TARGET_NR_sendto
6050 case TARGET_NR_sendto:
6051 ret = do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
6054 #ifdef TARGET_NR_shutdown
6055 case TARGET_NR_shutdown:
6056 ret = get_errno(shutdown(arg1, arg2));
6059 #ifdef TARGET_NR_socket
6060 case TARGET_NR_socket:
6061 ret = do_socket(arg1, arg2, arg3);
6064 #ifdef TARGET_NR_socketpair
6065 case TARGET_NR_socketpair:
6066 ret = do_socketpair(arg1, arg2, arg3, arg4);
6069 #ifdef TARGET_NR_setsockopt
6070 case TARGET_NR_setsockopt:
6071 ret = do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
6075 case TARGET_NR_syslog:
6076 if (!(p = lock_user_string(arg2)))
6078 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
6079 unlock_user(p, arg2, 0);
6082 case TARGET_NR_setitimer:
6084 struct itimerval value, ovalue, *pvalue;
6088 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
6089 || copy_from_user_timeval(&pvalue->it_value,
6090 arg2 + sizeof(struct target_timeval)))
6095 ret = get_errno(setitimer(arg1, pvalue, &ovalue));
6096 if (!is_error(ret) && arg3) {
6097 if (copy_to_user_timeval(arg3,
6098 &ovalue.it_interval)
6099 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
6105 case TARGET_NR_getitimer:
6107 struct itimerval value;
6109 ret = get_errno(getitimer(arg1, &value));
6110 if (!is_error(ret) && arg2) {
6111 if (copy_to_user_timeval(arg2,
6113 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
6119 case TARGET_NR_stat:
6120 if (!(p = lock_user_string(arg1)))
6122 ret = get_errno(stat(path(p), &st));
6123 unlock_user(p, arg1, 0);
6125 case TARGET_NR_lstat:
6126 if (!(p = lock_user_string(arg1)))
6128 ret = get_errno(lstat(path(p), &st));
6129 unlock_user(p, arg1, 0);
6131 case TARGET_NR_fstat:
6133 ret = get_errno(fstat(arg1, &st));
6135 if (!is_error(ret)) {
6136 struct target_stat *target_st;
6138 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
6140 memset(target_st, 0, sizeof(*target_st));
6141 __put_user(st.st_dev, &target_st->st_dev);
6142 __put_user(st.st_ino, &target_st->st_ino);
6143 __put_user(st.st_mode, &target_st->st_mode);
6144 __put_user(st.st_uid, &target_st->st_uid);
6145 __put_user(st.st_gid, &target_st->st_gid);
6146 __put_user(st.st_nlink, &target_st->st_nlink);
6147 __put_user(st.st_rdev, &target_st->st_rdev);
6148 __put_user(st.st_size, &target_st->st_size);
6149 __put_user(st.st_blksize, &target_st->st_blksize);
6150 __put_user(st.st_blocks, &target_st->st_blocks);
6151 __put_user(st.st_atime, &target_st->target_st_atime);
6152 __put_user(st.st_mtime, &target_st->target_st_mtime);
6153 __put_user(st.st_ctime, &target_st->target_st_ctime);
6154 unlock_user_struct(target_st, arg2, 1);
6158 #ifdef TARGET_NR_olduname
6159 case TARGET_NR_olduname:
6162 #ifdef TARGET_NR_iopl
6163 case TARGET_NR_iopl:
6166 case TARGET_NR_vhangup:
6167 ret = get_errno(vhangup());
6169 #ifdef TARGET_NR_idle
6170 case TARGET_NR_idle:
6173 #ifdef TARGET_NR_syscall
6174 case TARGET_NR_syscall:
6175 ret = do_syscall(cpu_env,arg1 & 0xffff,arg2,arg3,arg4,arg5,arg6,0);
6178 case TARGET_NR_wait4:
6181 abi_long status_ptr = arg2;
6182 struct rusage rusage, *rusage_ptr;
6183 abi_ulong target_rusage = arg4;
6185 rusage_ptr = &rusage;
6188 ret = get_errno(wait4(arg1, &status, arg3, rusage_ptr));
6189 if (!is_error(ret)) {
6191 status = host_to_target_waitstatus(status);
6192 if (put_user_s32(status, status_ptr))
6196 host_to_target_rusage(target_rusage, &rusage);
6200 #ifdef TARGET_NR_swapoff
6201 case TARGET_NR_swapoff:
6202 if (!(p = lock_user_string(arg1)))
6204 ret = get_errno(swapoff(p));
6205 unlock_user(p, arg1, 0);
6208 case TARGET_NR_sysinfo:
6210 struct target_sysinfo *target_value;
6211 struct sysinfo value;
6212 ret = get_errno(sysinfo(&value));
6213 if (!is_error(ret) && arg1)
6215 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
6217 __put_user(value.uptime, &target_value->uptime);
6218 __put_user(value.loads[0], &target_value->loads[0]);
6219 __put_user(value.loads[1], &target_value->loads[1]);
6220 __put_user(value.loads[2], &target_value->loads[2]);
6221 __put_user(value.totalram, &target_value->totalram);
6222 __put_user(value.freeram, &target_value->freeram);
6223 __put_user(value.sharedram, &target_value->sharedram);
6224 __put_user(value.bufferram, &target_value->bufferram);
6225 __put_user(value.totalswap, &target_value->totalswap);
6226 __put_user(value.freeswap, &target_value->freeswap);
6227 __put_user(value.procs, &target_value->procs);
6228 __put_user(value.totalhigh, &target_value->totalhigh);
6229 __put_user(value.freehigh, &target_value->freehigh);
6230 __put_user(value.mem_unit, &target_value->mem_unit);
6231 unlock_user_struct(target_value, arg1, 1);
6235 #ifdef TARGET_NR_ipc
6237 ret = do_ipc(arg1, arg2, arg3, arg4, arg5, arg6);
6240 #ifdef TARGET_NR_semget
6241 case TARGET_NR_semget:
6242 ret = get_errno(semget(arg1, arg2, arg3));
6245 #ifdef TARGET_NR_semop
6246 case TARGET_NR_semop:
6247 ret = get_errno(do_semop(arg1, arg2, arg3));
6250 #ifdef TARGET_NR_semctl
6251 case TARGET_NR_semctl:
6252 ret = do_semctl(arg1, arg2, arg3, (union target_semun)(abi_ulong)arg4);
6255 #ifdef TARGET_NR_msgctl
6256 case TARGET_NR_msgctl:
6257 ret = do_msgctl(arg1, arg2, arg3);
6260 #ifdef TARGET_NR_msgget
6261 case TARGET_NR_msgget:
6262 ret = get_errno(msgget(arg1, arg2));
6265 #ifdef TARGET_NR_msgrcv
6266 case TARGET_NR_msgrcv:
6267 ret = do_msgrcv(arg1, arg2, arg3, arg4, arg5);
6270 #ifdef TARGET_NR_msgsnd
6271 case TARGET_NR_msgsnd:
6272 ret = do_msgsnd(arg1, arg2, arg3, arg4);
6275 #ifdef TARGET_NR_shmget
6276 case TARGET_NR_shmget:
6277 ret = get_errno(shmget(arg1, arg2, arg3));
6280 #ifdef TARGET_NR_shmctl
6281 case TARGET_NR_shmctl:
6282 ret = do_shmctl(arg1, arg2, arg3);
6285 #ifdef TARGET_NR_shmat
6286 case TARGET_NR_shmat:
6287 ret = do_shmat(arg1, arg2, arg3);
6290 #ifdef TARGET_NR_shmdt
6291 case TARGET_NR_shmdt:
6292 ret = do_shmdt(arg1);
6295 case TARGET_NR_fsync:
6296 ret = get_errno(fsync(arg1));
6298 case TARGET_NR_clone:
6299 #if defined(TARGET_SH4) || defined(TARGET_ALPHA)
6300 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
6301 #elif defined(TARGET_CRIS)
6302 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg4, arg5));
6303 #elif defined(TARGET_S390X)
6304 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
6306 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
6309 #ifdef __NR_exit_group
6310 /* new thread calls */
6311 case TARGET_NR_exit_group:
6315 gdb_exit(cpu_env, arg1);
6316 ret = get_errno(exit_group(arg1));
6319 case TARGET_NR_setdomainname:
6320 if (!(p = lock_user_string(arg1)))
6322 ret = get_errno(setdomainname(p, arg2));
6323 unlock_user(p, arg1, 0);
6325 case TARGET_NR_uname:
6326 /* no need to transcode because we use the linux syscall */
6328 struct new_utsname * buf;
6330 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
6332 ret = get_errno(sys_uname(buf));
6333 if (!is_error(ret)) {
6334 /* Overrite the native machine name with whatever is being
6336 strcpy (buf->machine, cpu_to_uname_machine(cpu_env));
6337 /* Allow the user to override the reported release. */
6338 if (qemu_uname_release && *qemu_uname_release)
6339 strcpy (buf->release, qemu_uname_release);
6341 unlock_user_struct(buf, arg1, 1);
6345 case TARGET_NR_modify_ldt:
6346 ret = do_modify_ldt(cpu_env, arg1, arg2, arg3);
6348 #if !defined(TARGET_X86_64)
6349 case TARGET_NR_vm86old:
6351 case TARGET_NR_vm86:
6352 ret = do_vm86(cpu_env, arg1, arg2);
6356 case TARGET_NR_adjtimex:
6358 #ifdef TARGET_NR_create_module
6359 case TARGET_NR_create_module:
6361 case TARGET_NR_init_module:
6362 case TARGET_NR_delete_module:
6363 #ifdef TARGET_NR_get_kernel_syms
6364 case TARGET_NR_get_kernel_syms:
6367 case TARGET_NR_quotactl:
6369 case TARGET_NR_getpgid:
6370 ret = get_errno(getpgid(arg1));
6372 case TARGET_NR_fchdir:
6373 ret = get_errno(fchdir(arg1));
6375 #ifdef TARGET_NR_bdflush /* not on x86_64 */
6376 case TARGET_NR_bdflush:
6379 #ifdef TARGET_NR_sysfs
6380 case TARGET_NR_sysfs:
6383 case TARGET_NR_personality:
6384 ret = get_errno(personality(arg1));
6386 #ifdef TARGET_NR_afs_syscall
6387 case TARGET_NR_afs_syscall:
6390 #ifdef TARGET_NR__llseek /* Not on alpha */
6391 case TARGET_NR__llseek:
6394 #if !defined(__NR_llseek)
6395 res = lseek(arg1, ((uint64_t)arg2 << 32) | arg3, arg5);
6397 ret = get_errno(res);
6402 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
6404 if ((ret == 0) && put_user_s64(res, arg4)) {
6410 case TARGET_NR_getdents:
6411 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
6413 struct target_dirent *target_dirp;
6414 struct linux_dirent *dirp;
6415 abi_long count = arg3;
6417 dirp = malloc(count);
6419 ret = -TARGET_ENOMEM;
6423 ret = get_errno(sys_getdents(arg1, dirp, count));
6424 if (!is_error(ret)) {
6425 struct linux_dirent *de;
6426 struct target_dirent *tde;
6428 int reclen, treclen;
6429 int count1, tnamelen;
6433 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
6437 reclen = de->d_reclen;
6438 treclen = reclen - (2 * (sizeof(long) - sizeof(abi_long)));
6439 tde->d_reclen = tswap16(treclen);
6440 tde->d_ino = tswapl(de->d_ino);
6441 tde->d_off = tswapl(de->d_off);
6442 tnamelen = treclen - (2 * sizeof(abi_long) + 2);
6445 /* XXX: may not be correct */
6446 pstrcpy(tde->d_name, tnamelen, de->d_name);
6447 de = (struct linux_dirent *)((char *)de + reclen);
6449 tde = (struct target_dirent *)((char *)tde + treclen);
6453 unlock_user(target_dirp, arg2, ret);
6459 struct linux_dirent *dirp;
6460 abi_long count = arg3;
6462 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
6464 ret = get_errno(sys_getdents(arg1, dirp, count));
6465 if (!is_error(ret)) {
6466 struct linux_dirent *de;
6471 reclen = de->d_reclen;
6474 de->d_reclen = tswap16(reclen);
6475 tswapls(&de->d_ino);
6476 tswapls(&de->d_off);
6477 de = (struct linux_dirent *)((char *)de + reclen);
6481 unlock_user(dirp, arg2, ret);
6485 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
6486 case TARGET_NR_getdents64:
6488 struct linux_dirent64 *dirp;
6489 abi_long count = arg3;
6490 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
6492 ret = get_errno(sys_getdents64(arg1, dirp, count));
6493 if (!is_error(ret)) {
6494 struct linux_dirent64 *de;
6499 reclen = de->d_reclen;
6502 de->d_reclen = tswap16(reclen);
6503 tswap64s((uint64_t *)&de->d_ino);
6504 tswap64s((uint64_t *)&de->d_off);
6505 de = (struct linux_dirent64 *)((char *)de + reclen);
6509 unlock_user(dirp, arg2, ret);
6512 #endif /* TARGET_NR_getdents64 */
6513 #if defined(TARGET_NR__newselect) || defined(TARGET_S390X)
6515 case TARGET_NR_select:
6517 case TARGET_NR__newselect:
6519 ret = do_select(arg1, arg2, arg3, arg4, arg5);
6522 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
6523 # ifdef TARGET_NR_poll
6524 case TARGET_NR_poll:
6526 # ifdef TARGET_NR_ppoll
6527 case TARGET_NR_ppoll:
6530 struct target_pollfd *target_pfd;
6531 unsigned int nfds = arg2;
6536 target_pfd = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_pollfd) * nfds, 1);
6540 pfd = alloca(sizeof(struct pollfd) * nfds);
6541 for(i = 0; i < nfds; i++) {
6542 pfd[i].fd = tswap32(target_pfd[i].fd);
6543 pfd[i].events = tswap16(target_pfd[i].events);
6546 # ifdef TARGET_NR_ppoll
6547 if (num == TARGET_NR_ppoll) {
6548 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
6549 target_sigset_t *target_set;
6550 sigset_t _set, *set = &_set;
6553 if (target_to_host_timespec(timeout_ts, arg3)) {
6554 unlock_user(target_pfd, arg1, 0);
6562 target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
6564 unlock_user(target_pfd, arg1, 0);
6567 target_to_host_sigset(set, target_set);
6572 ret = get_errno(sys_ppoll(pfd, nfds, timeout_ts, set, _NSIG/8));
6574 if (!is_error(ret) && arg3) {
6575 host_to_target_timespec(arg3, timeout_ts);
6578 unlock_user(target_set, arg4, 0);
6582 ret = get_errno(poll(pfd, nfds, timeout));
6584 if (!is_error(ret)) {
6585 for(i = 0; i < nfds; i++) {
6586 target_pfd[i].revents = tswap16(pfd[i].revents);
6589 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
6593 case TARGET_NR_flock:
6594 /* NOTE: the flock constant seems to be the same for every
6596 ret = get_errno(flock(arg1, arg2));
6598 case TARGET_NR_readv:
6603 vec = alloca(count * sizeof(struct iovec));
6604 if (lock_iovec(VERIFY_WRITE, vec, arg2, count, 0) < 0)
6606 ret = get_errno(readv(arg1, vec, count));
6607 unlock_iovec(vec, arg2, count, 1);
6610 case TARGET_NR_writev:
6615 vec = alloca(count * sizeof(struct iovec));
6616 if (lock_iovec(VERIFY_READ, vec, arg2, count, 1) < 0)
6618 ret = get_errno(writev(arg1, vec, count));
6619 unlock_iovec(vec, arg2, count, 0);
6622 case TARGET_NR_getsid:
6623 ret = get_errno(getsid(arg1));
6625 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
6626 case TARGET_NR_fdatasync:
6627 ret = get_errno(fdatasync(arg1));
6630 case TARGET_NR__sysctl:
6631 /* We don't implement this, but ENOTDIR is always a safe
6633 ret = -TARGET_ENOTDIR;
6635 case TARGET_NR_sched_getaffinity:
6637 unsigned int mask_size;
6638 unsigned long *mask;
6641 * sched_getaffinity needs multiples of ulong, so need to take
6642 * care of mismatches between target ulong and host ulong sizes.
6644 if (arg2 & (sizeof(abi_ulong) - 1)) {
6645 ret = -TARGET_EINVAL;
6648 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
6650 mask = alloca(mask_size);
6651 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
6653 if (!is_error(ret)) {
6654 if (copy_to_user(arg3, mask, ret)) {
6660 case TARGET_NR_sched_setaffinity:
6662 unsigned int mask_size;
6663 unsigned long *mask;
6666 * sched_setaffinity needs multiples of ulong, so need to take
6667 * care of mismatches between target ulong and host ulong sizes.
6669 if (arg2 & (sizeof(abi_ulong) - 1)) {
6670 ret = -TARGET_EINVAL;
6673 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
6675 mask = alloca(mask_size);
6676 if (!lock_user_struct(VERIFY_READ, p, arg3, 1)) {
6679 memcpy(mask, p, arg2);
6680 unlock_user_struct(p, arg2, 0);
6682 ret = get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
6685 case TARGET_NR_sched_setparam:
6687 struct sched_param *target_schp;
6688 struct sched_param schp;
6690 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
6692 schp.sched_priority = tswap32(target_schp->sched_priority);
6693 unlock_user_struct(target_schp, arg2, 0);
6694 ret = get_errno(sched_setparam(arg1, &schp));
6697 case TARGET_NR_sched_getparam:
6699 struct sched_param *target_schp;
6700 struct sched_param schp;
6701 ret = get_errno(sched_getparam(arg1, &schp));
6702 if (!is_error(ret)) {
6703 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
6705 target_schp->sched_priority = tswap32(schp.sched_priority);
6706 unlock_user_struct(target_schp, arg2, 1);
6710 case TARGET_NR_sched_setscheduler:
6712 struct sched_param *target_schp;
6713 struct sched_param schp;
6714 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
6716 schp.sched_priority = tswap32(target_schp->sched_priority);
6717 unlock_user_struct(target_schp, arg3, 0);
6718 ret = get_errno(sched_setscheduler(arg1, arg2, &schp));
6721 case TARGET_NR_sched_getscheduler:
6722 ret = get_errno(sched_getscheduler(arg1));
6724 case TARGET_NR_sched_yield:
6725 ret = get_errno(sched_yield());
6727 case TARGET_NR_sched_get_priority_max:
6728 ret = get_errno(sched_get_priority_max(arg1));
6730 case TARGET_NR_sched_get_priority_min:
6731 ret = get_errno(sched_get_priority_min(arg1));
6733 case TARGET_NR_sched_rr_get_interval:
6736 ret = get_errno(sched_rr_get_interval(arg1, &ts));
6737 if (!is_error(ret)) {
6738 host_to_target_timespec(arg2, &ts);
6742 case TARGET_NR_nanosleep:
6744 struct timespec req, rem;
6745 target_to_host_timespec(&req, arg1);
6746 ret = get_errno(nanosleep(&req, &rem));
6747 if (is_error(ret) && arg2) {
6748 host_to_target_timespec(arg2, &rem);
6752 #ifdef TARGET_NR_query_module
6753 case TARGET_NR_query_module:
6756 #ifdef TARGET_NR_nfsservctl
6757 case TARGET_NR_nfsservctl:
6760 case TARGET_NR_prctl:
6763 case PR_GET_PDEATHSIG:
6766 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
6767 if (!is_error(ret) && arg2
6768 && put_user_ual(deathsig, arg2))
6773 ret = get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
6777 #ifdef TARGET_NR_arch_prctl
6778 case TARGET_NR_arch_prctl:
6779 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
6780 ret = do_arch_prctl(cpu_env, arg1, arg2);
6786 #ifdef TARGET_NR_pread
6787 case TARGET_NR_pread:
6789 if (((CPUARMState *)cpu_env)->eabi)
6792 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
6794 ret = get_errno(pread(arg1, p, arg3, arg4));
6795 unlock_user(p, arg2, ret);
6797 case TARGET_NR_pwrite:
6799 if (((CPUARMState *)cpu_env)->eabi)
6802 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
6804 ret = get_errno(pwrite(arg1, p, arg3, arg4));
6805 unlock_user(p, arg2, 0);
6808 #ifdef TARGET_NR_pread64
6809 case TARGET_NR_pread64:
6810 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
6812 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
6813 unlock_user(p, arg2, ret);
6815 case TARGET_NR_pwrite64:
6816 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
6818 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
6819 unlock_user(p, arg2, 0);
6822 case TARGET_NR_getcwd:
6823 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
6825 ret = get_errno(sys_getcwd1(p, arg2));
6826 unlock_user(p, arg1, ret);
6828 case TARGET_NR_capget:
6830 case TARGET_NR_capset:
6832 case TARGET_NR_sigaltstack:
6833 #if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_MIPS) || \
6834 defined(TARGET_SPARC) || defined(TARGET_PPC) || defined(TARGET_ALPHA) || \
6835 defined(TARGET_M68K) || defined(TARGET_S390X)
6836 ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUState *)cpu_env));
6841 case TARGET_NR_sendfile:
6843 #ifdef TARGET_NR_getpmsg
6844 case TARGET_NR_getpmsg:
6847 #ifdef TARGET_NR_putpmsg
6848 case TARGET_NR_putpmsg:
6851 #ifdef TARGET_NR_vfork
6852 case TARGET_NR_vfork:
6853 ret = get_errno(do_fork(cpu_env, CLONE_VFORK | CLONE_VM | SIGCHLD,
6857 #ifdef TARGET_NR_ugetrlimit
6858 case TARGET_NR_ugetrlimit:
6861 ret = get_errno(getrlimit(arg1, &rlim));
6862 if (!is_error(ret)) {
6863 struct target_rlimit *target_rlim;
6864 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
6866 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
6867 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
6868 unlock_user_struct(target_rlim, arg2, 1);
6873 #ifdef TARGET_NR_truncate64
6874 case TARGET_NR_truncate64:
6875 if (!(p = lock_user_string(arg1)))
6877 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
6878 unlock_user(p, arg1, 0);
6881 #ifdef TARGET_NR_ftruncate64
6882 case TARGET_NR_ftruncate64:
6883 ret = target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
6886 #ifdef TARGET_NR_stat64
6887 case TARGET_NR_stat64:
6888 if (!(p = lock_user_string(arg1)))
6890 ret = get_errno(stat(path(p), &st));
6891 unlock_user(p, arg1, 0);
6893 ret = host_to_target_stat64(cpu_env, arg2, &st);
6896 #ifdef TARGET_NR_lstat64
6897 case TARGET_NR_lstat64:
6898 if (!(p = lock_user_string(arg1)))
6900 ret = get_errno(lstat(path(p), &st));
6901 unlock_user(p, arg1, 0);
6903 ret = host_to_target_stat64(cpu_env, arg2, &st);
6906 #ifdef TARGET_NR_fstat64
6907 case TARGET_NR_fstat64:
6908 ret = get_errno(fstat(arg1, &st));
6910 ret = host_to_target_stat64(cpu_env, arg2, &st);
6913 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \
6914 (defined(__NR_fstatat64) || defined(__NR_newfstatat))
6915 #ifdef TARGET_NR_fstatat64
6916 case TARGET_NR_fstatat64:
6918 #ifdef TARGET_NR_newfstatat
6919 case TARGET_NR_newfstatat:
6921 if (!(p = lock_user_string(arg2)))
6923 #ifdef __NR_fstatat64
6924 ret = get_errno(sys_fstatat64(arg1, path(p), &st, arg4));
6926 ret = get_errno(sys_newfstatat(arg1, path(p), &st, arg4));
6929 ret = host_to_target_stat64(cpu_env, arg3, &st);
6932 case TARGET_NR_lchown:
6933 if (!(p = lock_user_string(arg1)))
6935 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
6936 unlock_user(p, arg1, 0);
6938 #ifdef TARGET_NR_getuid
6939 case TARGET_NR_getuid:
6940 ret = get_errno(high2lowuid(getuid()));
6943 #ifdef TARGET_NR_getgid
6944 case TARGET_NR_getgid:
6945 ret = get_errno(high2lowgid(getgid()));
6948 #ifdef TARGET_NR_geteuid
6949 case TARGET_NR_geteuid:
6950 ret = get_errno(high2lowuid(geteuid()));
6953 #ifdef TARGET_NR_getegid
6954 case TARGET_NR_getegid:
6955 ret = get_errno(high2lowgid(getegid()));
6958 case TARGET_NR_setreuid:
6959 ret = get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
6961 case TARGET_NR_setregid:
6962 ret = get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
6964 case TARGET_NR_getgroups:
6966 int gidsetsize = arg1;
6967 target_id *target_grouplist;
6971 grouplist = alloca(gidsetsize * sizeof(gid_t));
6972 ret = get_errno(getgroups(gidsetsize, grouplist));
6973 if (gidsetsize == 0)
6975 if (!is_error(ret)) {
6976 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 2, 0);
6977 if (!target_grouplist)
6979 for(i = 0;i < ret; i++)
6980 target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
6981 unlock_user(target_grouplist, arg2, gidsetsize * 2);
6985 case TARGET_NR_setgroups:
6987 int gidsetsize = arg1;
6988 target_id *target_grouplist;
6992 grouplist = alloca(gidsetsize * sizeof(gid_t));
6993 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 2, 1);
6994 if (!target_grouplist) {
6995 ret = -TARGET_EFAULT;
6998 for(i = 0;i < gidsetsize; i++)
6999 grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
7000 unlock_user(target_grouplist, arg2, 0);
7001 ret = get_errno(setgroups(gidsetsize, grouplist));
7004 case TARGET_NR_fchown:
7005 ret = get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
7007 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat)
7008 case TARGET_NR_fchownat:
7009 if (!(p = lock_user_string(arg2)))
7011 ret = get_errno(sys_fchownat(arg1, p, low2highuid(arg3), low2highgid(arg4), arg5));
7012 unlock_user(p, arg2, 0);
7015 #ifdef TARGET_NR_setresuid
7016 case TARGET_NR_setresuid:
7017 ret = get_errno(setresuid(low2highuid(arg1),
7019 low2highuid(arg3)));
7022 #ifdef TARGET_NR_getresuid
7023 case TARGET_NR_getresuid:
7025 uid_t ruid, euid, suid;
7026 ret = get_errno(getresuid(&ruid, &euid, &suid));
7027 if (!is_error(ret)) {
7028 if (put_user_u16(high2lowuid(ruid), arg1)
7029 || put_user_u16(high2lowuid(euid), arg2)
7030 || put_user_u16(high2lowuid(suid), arg3))
7036 #ifdef TARGET_NR_getresgid
7037 case TARGET_NR_setresgid:
7038 ret = get_errno(setresgid(low2highgid(arg1),
7040 low2highgid(arg3)));
7043 #ifdef TARGET_NR_getresgid
7044 case TARGET_NR_getresgid:
7046 gid_t rgid, egid, sgid;
7047 ret = get_errno(getresgid(&rgid, &egid, &sgid));
7048 if (!is_error(ret)) {
7049 if (put_user_u16(high2lowgid(rgid), arg1)
7050 || put_user_u16(high2lowgid(egid), arg2)
7051 || put_user_u16(high2lowgid(sgid), arg3))
7057 case TARGET_NR_chown:
7058 if (!(p = lock_user_string(arg1)))
7060 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
7061 unlock_user(p, arg1, 0);
7063 case TARGET_NR_setuid:
7064 ret = get_errno(setuid(low2highuid(arg1)));
7066 case TARGET_NR_setgid:
7067 ret = get_errno(setgid(low2highgid(arg1)));
7069 case TARGET_NR_setfsuid:
7070 ret = get_errno(setfsuid(arg1));
7072 case TARGET_NR_setfsgid:
7073 ret = get_errno(setfsgid(arg1));
7076 #ifdef TARGET_NR_lchown32
7077 case TARGET_NR_lchown32:
7078 if (!(p = lock_user_string(arg1)))
7080 ret = get_errno(lchown(p, arg2, arg3));
7081 unlock_user(p, arg1, 0);
7084 #ifdef TARGET_NR_getuid32
7085 case TARGET_NR_getuid32:
7086 ret = get_errno(getuid());
7090 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
7091 /* Alpha specific */
7092 case TARGET_NR_getxuid:
7096 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
7098 ret = get_errno(getuid());
7101 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
7102 /* Alpha specific */
7103 case TARGET_NR_getxgid:
7107 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
7109 ret = get_errno(getgid());
7112 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
7113 /* Alpha specific */
7114 case TARGET_NR_osf_getsysinfo:
7115 ret = -TARGET_EOPNOTSUPP;
7117 case TARGET_GSI_IEEE_FP_CONTROL:
7119 uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env);
7121 /* Copied from linux ieee_fpcr_to_swcr. */
7122 swcr = (fpcr >> 35) & SWCR_STATUS_MASK;
7123 swcr |= (fpcr >> 36) & SWCR_MAP_DMZ;
7124 swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV
7125 | SWCR_TRAP_ENABLE_DZE
7126 | SWCR_TRAP_ENABLE_OVF);
7127 swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF
7128 | SWCR_TRAP_ENABLE_INE);
7129 swcr |= (fpcr >> 47) & SWCR_MAP_UMZ;
7130 swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO;
7132 if (put_user_u64 (swcr, arg2))
7138 /* case GSI_IEEE_STATE_AT_SIGNAL:
7139 -- Not implemented in linux kernel.
7141 -- Retrieves current unaligned access state; not much used.
7143 -- Retrieves implver information; surely not used.
7145 -- Grabs a copy of the HWRPB; surely not used.
7150 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
7151 /* Alpha specific */
7152 case TARGET_NR_osf_setsysinfo:
7153 ret = -TARGET_EOPNOTSUPP;
7155 case TARGET_SSI_IEEE_FP_CONTROL:
7156 case TARGET_SSI_IEEE_RAISE_EXCEPTION:
7158 uint64_t swcr, fpcr, orig_fpcr;
7160 if (get_user_u64 (swcr, arg2))
7162 orig_fpcr = cpu_alpha_load_fpcr (cpu_env);
7163 fpcr = orig_fpcr & FPCR_DYN_MASK;
7165 /* Copied from linux ieee_swcr_to_fpcr. */
7166 fpcr |= (swcr & SWCR_STATUS_MASK) << 35;
7167 fpcr |= (swcr & SWCR_MAP_DMZ) << 36;
7168 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV
7169 | SWCR_TRAP_ENABLE_DZE
7170 | SWCR_TRAP_ENABLE_OVF)) << 48;
7171 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF
7172 | SWCR_TRAP_ENABLE_INE)) << 57;
7173 fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0);
7174 fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41;
7176 cpu_alpha_store_fpcr (cpu_env, fpcr);
7179 if (arg1 == TARGET_SSI_IEEE_RAISE_EXCEPTION) {
7180 /* Old exceptions are not signaled. */
7181 fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK);
7183 /* If any exceptions set by this call, and are unmasked,
7190 /* case SSI_NVPAIRS:
7191 -- Used with SSIN_UACPROC to enable unaligned accesses.
7192 case SSI_IEEE_STATE_AT_SIGNAL:
7193 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
7194 -- Not implemented in linux kernel
7199 #ifdef TARGET_NR_osf_sigprocmask
7200 /* Alpha specific. */
7201 case TARGET_NR_osf_sigprocmask:
7205 sigset_t set, oldset;
7208 case TARGET_SIG_BLOCK:
7211 case TARGET_SIG_UNBLOCK:
7214 case TARGET_SIG_SETMASK:
7218 ret = -TARGET_EINVAL;
7222 target_to_host_old_sigset(&set, &mask);
7223 sigprocmask(arg1, &set, &oldset);
7224 host_to_target_old_sigset(&mask, &oldset);
7230 #ifdef TARGET_NR_getgid32
7231 case TARGET_NR_getgid32:
7232 ret = get_errno(getgid());
7235 #ifdef TARGET_NR_geteuid32
7236 case TARGET_NR_geteuid32:
7237 ret = get_errno(geteuid());
7240 #ifdef TARGET_NR_getegid32
7241 case TARGET_NR_getegid32:
7242 ret = get_errno(getegid());
7245 #ifdef TARGET_NR_setreuid32
7246 case TARGET_NR_setreuid32:
7247 ret = get_errno(setreuid(arg1, arg2));
7250 #ifdef TARGET_NR_setregid32
7251 case TARGET_NR_setregid32:
7252 ret = get_errno(setregid(arg1, arg2));
7255 #ifdef TARGET_NR_getgroups32
7256 case TARGET_NR_getgroups32:
7258 int gidsetsize = arg1;
7259 uint32_t *target_grouplist;
7263 grouplist = alloca(gidsetsize * sizeof(gid_t));
7264 ret = get_errno(getgroups(gidsetsize, grouplist));
7265 if (gidsetsize == 0)
7267 if (!is_error(ret)) {
7268 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
7269 if (!target_grouplist) {
7270 ret = -TARGET_EFAULT;
7273 for(i = 0;i < ret; i++)
7274 target_grouplist[i] = tswap32(grouplist[i]);
7275 unlock_user(target_grouplist, arg2, gidsetsize * 4);
7280 #ifdef TARGET_NR_setgroups32
7281 case TARGET_NR_setgroups32:
7283 int gidsetsize = arg1;
7284 uint32_t *target_grouplist;
7288 grouplist = alloca(gidsetsize * sizeof(gid_t));
7289 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
7290 if (!target_grouplist) {
7291 ret = -TARGET_EFAULT;
7294 for(i = 0;i < gidsetsize; i++)
7295 grouplist[i] = tswap32(target_grouplist[i]);
7296 unlock_user(target_grouplist, arg2, 0);
7297 ret = get_errno(setgroups(gidsetsize, grouplist));
7301 #ifdef TARGET_NR_fchown32
7302 case TARGET_NR_fchown32:
7303 ret = get_errno(fchown(arg1, arg2, arg3));
7306 #ifdef TARGET_NR_setresuid32
7307 case TARGET_NR_setresuid32:
7308 ret = get_errno(setresuid(arg1, arg2, arg3));
7311 #ifdef TARGET_NR_getresuid32
7312 case TARGET_NR_getresuid32:
7314 uid_t ruid, euid, suid;
7315 ret = get_errno(getresuid(&ruid, &euid, &suid));
7316 if (!is_error(ret)) {
7317 if (put_user_u32(ruid, arg1)
7318 || put_user_u32(euid, arg2)
7319 || put_user_u32(suid, arg3))
7325 #ifdef TARGET_NR_setresgid32
7326 case TARGET_NR_setresgid32:
7327 ret = get_errno(setresgid(arg1, arg2, arg3));
7330 #ifdef TARGET_NR_getresgid32
7331 case TARGET_NR_getresgid32:
7333 gid_t rgid, egid, sgid;
7334 ret = get_errno(getresgid(&rgid, &egid, &sgid));
7335 if (!is_error(ret)) {
7336 if (put_user_u32(rgid, arg1)
7337 || put_user_u32(egid, arg2)
7338 || put_user_u32(sgid, arg3))
7344 #ifdef TARGET_NR_chown32
7345 case TARGET_NR_chown32:
7346 if (!(p = lock_user_string(arg1)))
7348 ret = get_errno(chown(p, arg2, arg3));
7349 unlock_user(p, arg1, 0);
7352 #ifdef TARGET_NR_setuid32
7353 case TARGET_NR_setuid32:
7354 ret = get_errno(setuid(arg1));
7357 #ifdef TARGET_NR_setgid32
7358 case TARGET_NR_setgid32:
7359 ret = get_errno(setgid(arg1));
7362 #ifdef TARGET_NR_setfsuid32
7363 case TARGET_NR_setfsuid32:
7364 ret = get_errno(setfsuid(arg1));
7367 #ifdef TARGET_NR_setfsgid32
7368 case TARGET_NR_setfsgid32:
7369 ret = get_errno(setfsgid(arg1));
7373 case TARGET_NR_pivot_root:
7375 #ifdef TARGET_NR_mincore
7376 case TARGET_NR_mincore:
7379 ret = -TARGET_EFAULT;
7380 if (!(a = lock_user(VERIFY_READ, arg1,arg2, 0)))
7382 if (!(p = lock_user_string(arg3)))
7384 ret = get_errno(mincore(a, arg2, p));
7385 unlock_user(p, arg3, ret);
7387 unlock_user(a, arg1, 0);
7391 #ifdef TARGET_NR_arm_fadvise64_64
7392 case TARGET_NR_arm_fadvise64_64:
7395 * arm_fadvise64_64 looks like fadvise64_64 but
7396 * with different argument order
7404 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_arm_fadvise64_64) || defined(TARGET_NR_fadvise64)
7405 #ifdef TARGET_NR_fadvise64_64
7406 case TARGET_NR_fadvise64_64:
7408 #ifdef TARGET_NR_fadvise64
7409 case TARGET_NR_fadvise64:
7413 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
7414 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
7415 case 6: arg4 = POSIX_FADV_DONTNEED; break;
7416 case 7: arg4 = POSIX_FADV_NOREUSE; break;
7420 ret = -posix_fadvise(arg1, arg2, arg3, arg4);
7423 #ifdef TARGET_NR_madvise
7424 case TARGET_NR_madvise:
7425 /* A straight passthrough may not be safe because qemu sometimes
7426 turns private flie-backed mappings into anonymous mappings.
7427 This will break MADV_DONTNEED.
7428 This is a hint, so ignoring and returning success is ok. */
7432 #if TARGET_ABI_BITS == 32
7433 case TARGET_NR_fcntl64:
7437 struct target_flock64 *target_fl;
7439 struct target_eabi_flock64 *target_efl;
7442 cmd = target_to_host_fcntl_cmd(arg2);
7443 if (cmd == -TARGET_EINVAL)
7447 case TARGET_F_GETLK64:
7449 if (((CPUARMState *)cpu_env)->eabi) {
7450 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
7452 fl.l_type = tswap16(target_efl->l_type);
7453 fl.l_whence = tswap16(target_efl->l_whence);
7454 fl.l_start = tswap64(target_efl->l_start);
7455 fl.l_len = tswap64(target_efl->l_len);
7456 fl.l_pid = tswap32(target_efl->l_pid);
7457 unlock_user_struct(target_efl, arg3, 0);
7461 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
7463 fl.l_type = tswap16(target_fl->l_type);
7464 fl.l_whence = tswap16(target_fl->l_whence);
7465 fl.l_start = tswap64(target_fl->l_start);
7466 fl.l_len = tswap64(target_fl->l_len);
7467 fl.l_pid = tswap32(target_fl->l_pid);
7468 unlock_user_struct(target_fl, arg3, 0);
7470 ret = get_errno(fcntl(arg1, cmd, &fl));
7473 if (((CPUARMState *)cpu_env)->eabi) {
7474 if (!lock_user_struct(VERIFY_WRITE, target_efl, arg3, 0))
7476 target_efl->l_type = tswap16(fl.l_type);
7477 target_efl->l_whence = tswap16(fl.l_whence);
7478 target_efl->l_start = tswap64(fl.l_start);
7479 target_efl->l_len = tswap64(fl.l_len);
7480 target_efl->l_pid = tswap32(fl.l_pid);
7481 unlock_user_struct(target_efl, arg3, 1);
7485 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg3, 0))
7487 target_fl->l_type = tswap16(fl.l_type);
7488 target_fl->l_whence = tswap16(fl.l_whence);
7489 target_fl->l_start = tswap64(fl.l_start);
7490 target_fl->l_len = tswap64(fl.l_len);
7491 target_fl->l_pid = tswap32(fl.l_pid);
7492 unlock_user_struct(target_fl, arg3, 1);
7497 case TARGET_F_SETLK64:
7498 case TARGET_F_SETLKW64:
7500 if (((CPUARMState *)cpu_env)->eabi) {
7501 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
7503 fl.l_type = tswap16(target_efl->l_type);
7504 fl.l_whence = tswap16(target_efl->l_whence);
7505 fl.l_start = tswap64(target_efl->l_start);
7506 fl.l_len = tswap64(target_efl->l_len);
7507 fl.l_pid = tswap32(target_efl->l_pid);
7508 unlock_user_struct(target_efl, arg3, 0);
7512 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
7514 fl.l_type = tswap16(target_fl->l_type);
7515 fl.l_whence = tswap16(target_fl->l_whence);
7516 fl.l_start = tswap64(target_fl->l_start);
7517 fl.l_len = tswap64(target_fl->l_len);
7518 fl.l_pid = tswap32(target_fl->l_pid);
7519 unlock_user_struct(target_fl, arg3, 0);
7521 ret = get_errno(fcntl(arg1, cmd, &fl));
7524 ret = do_fcntl(arg1, arg2, arg3);
7530 #ifdef TARGET_NR_cacheflush
7531 case TARGET_NR_cacheflush:
7532 /* self-modifying code is handled automatically, so nothing needed */
7536 #ifdef TARGET_NR_security
7537 case TARGET_NR_security:
7540 #ifdef TARGET_NR_getpagesize
7541 case TARGET_NR_getpagesize:
7542 ret = TARGET_PAGE_SIZE;
7545 case TARGET_NR_gettid:
7546 ret = get_errno(gettid());
7548 #ifdef TARGET_NR_readahead
7549 case TARGET_NR_readahead:
7550 #if TARGET_ABI_BITS == 32
7552 if (((CPUARMState *)cpu_env)->eabi)
7559 ret = get_errno(readahead(arg1, ((off64_t)arg3 << 32) | arg2, arg4));
7561 ret = get_errno(readahead(arg1, arg2, arg3));
7565 #ifdef TARGET_NR_setxattr
7566 case TARGET_NR_setxattr:
7567 case TARGET_NR_lsetxattr:
7568 case TARGET_NR_fsetxattr:
7569 case TARGET_NR_getxattr:
7570 case TARGET_NR_lgetxattr:
7571 case TARGET_NR_fgetxattr:
7572 case TARGET_NR_listxattr:
7573 case TARGET_NR_llistxattr:
7574 case TARGET_NR_flistxattr:
7575 case TARGET_NR_removexattr:
7576 case TARGET_NR_lremovexattr:
7577 case TARGET_NR_fremovexattr:
7578 ret = -TARGET_EOPNOTSUPP;
7581 #ifdef TARGET_NR_set_thread_area
7582 case TARGET_NR_set_thread_area:
7583 #if defined(TARGET_MIPS)
7584 ((CPUMIPSState *) cpu_env)->tls_value = arg1;
7587 #elif defined(TARGET_CRIS)
7589 ret = -TARGET_EINVAL;
7591 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
7595 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
7596 ret = do_set_thread_area(cpu_env, arg1);
7599 goto unimplemented_nowarn;
7602 #ifdef TARGET_NR_get_thread_area
7603 case TARGET_NR_get_thread_area:
7604 #if defined(TARGET_I386) && defined(TARGET_ABI32)
7605 ret = do_get_thread_area(cpu_env, arg1);
7607 goto unimplemented_nowarn;
7610 #ifdef TARGET_NR_getdomainname
7611 case TARGET_NR_getdomainname:
7612 goto unimplemented_nowarn;
7615 #ifdef TARGET_NR_clock_gettime
7616 case TARGET_NR_clock_gettime:
7619 ret = get_errno(clock_gettime(arg1, &ts));
7620 if (!is_error(ret)) {
7621 host_to_target_timespec(arg2, &ts);
7626 #ifdef TARGET_NR_clock_getres
7627 case TARGET_NR_clock_getres:
7630 ret = get_errno(clock_getres(arg1, &ts));
7631 if (!is_error(ret)) {
7632 host_to_target_timespec(arg2, &ts);
7637 #ifdef TARGET_NR_clock_nanosleep
7638 case TARGET_NR_clock_nanosleep:
7641 target_to_host_timespec(&ts, arg3);
7642 ret = get_errno(clock_nanosleep(arg1, arg2, &ts, arg4 ? &ts : NULL));
7644 host_to_target_timespec(arg4, &ts);
7649 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
7650 case TARGET_NR_set_tid_address:
7651 ret = get_errno(set_tid_address((int *)g2h(arg1)));
7655 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
7656 case TARGET_NR_tkill:
7657 ret = get_errno(sys_tkill((int)arg1, target_to_host_signal(arg2)));
7661 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
7662 case TARGET_NR_tgkill:
7663 ret = get_errno(sys_tgkill((int)arg1, (int)arg2,
7664 target_to_host_signal(arg3)));
7668 #ifdef TARGET_NR_set_robust_list
7669 case TARGET_NR_set_robust_list:
7670 goto unimplemented_nowarn;
7673 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat)
7674 case TARGET_NR_utimensat:
7676 struct timespec *tsp, ts[2];
7680 target_to_host_timespec(ts, arg3);
7681 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
7685 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
7687 if (!(p = lock_user_string(arg2))) {
7688 ret = -TARGET_EFAULT;
7691 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
7692 unlock_user(p, arg2, 0);
7697 #if defined(CONFIG_USE_NPTL)
7698 case TARGET_NR_futex:
7699 ret = do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
7702 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
7703 case TARGET_NR_inotify_init:
7704 ret = get_errno(sys_inotify_init());
7707 #ifdef CONFIG_INOTIFY1
7708 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
7709 case TARGET_NR_inotify_init1:
7710 ret = get_errno(sys_inotify_init1(arg1));
7714 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
7715 case TARGET_NR_inotify_add_watch:
7716 p = lock_user_string(arg2);
7717 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
7718 unlock_user(p, arg2, 0);
7721 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
7722 case TARGET_NR_inotify_rm_watch:
7723 ret = get_errno(sys_inotify_rm_watch(arg1, arg2));
7727 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
7728 case TARGET_NR_mq_open:
7730 struct mq_attr posix_mq_attr;
7732 p = lock_user_string(arg1 - 1);
7734 copy_from_user_mq_attr (&posix_mq_attr, arg4);
7735 ret = get_errno(mq_open(p, arg2, arg3, &posix_mq_attr));
7736 unlock_user (p, arg1, 0);
7740 case TARGET_NR_mq_unlink:
7741 p = lock_user_string(arg1 - 1);
7742 ret = get_errno(mq_unlink(p));
7743 unlock_user (p, arg1, 0);
7746 case TARGET_NR_mq_timedsend:
7750 p = lock_user (VERIFY_READ, arg2, arg3, 1);
7752 target_to_host_timespec(&ts, arg5);
7753 ret = get_errno(mq_timedsend(arg1, p, arg3, arg4, &ts));
7754 host_to_target_timespec(arg5, &ts);
7757 ret = get_errno(mq_send(arg1, p, arg3, arg4));
7758 unlock_user (p, arg2, arg3);
7762 case TARGET_NR_mq_timedreceive:
7767 p = lock_user (VERIFY_READ, arg2, arg3, 1);
7769 target_to_host_timespec(&ts, arg5);
7770 ret = get_errno(mq_timedreceive(arg1, p, arg3, &prio, &ts));
7771 host_to_target_timespec(arg5, &ts);
7774 ret = get_errno(mq_receive(arg1, p, arg3, &prio));
7775 unlock_user (p, arg2, arg3);
7777 put_user_u32(prio, arg4);
7781 /* Not implemented for now... */
7782 /* case TARGET_NR_mq_notify: */
7785 case TARGET_NR_mq_getsetattr:
7787 struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
7790 ret = mq_getattr(arg1, &posix_mq_attr_out);
7791 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
7794 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
7795 ret |= mq_setattr(arg1, &posix_mq_attr_in, &posix_mq_attr_out);
7802 #ifdef CONFIG_SPLICE
7803 #ifdef TARGET_NR_tee
7806 ret = get_errno(tee(arg1,arg2,arg3,arg4));
7810 #ifdef TARGET_NR_splice
7811 case TARGET_NR_splice:
7813 loff_t loff_in, loff_out;
7814 loff_t *ploff_in = NULL, *ploff_out = NULL;
7816 get_user_u64(loff_in, arg2);
7817 ploff_in = &loff_in;
7820 get_user_u64(loff_out, arg2);
7821 ploff_out = &loff_out;
7823 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
7827 #ifdef TARGET_NR_vmsplice
7828 case TARGET_NR_vmsplice:
7833 vec = alloca(count * sizeof(struct iovec));
7834 if (lock_iovec(VERIFY_READ, vec, arg2, count, 1) < 0)
7836 ret = get_errno(vmsplice(arg1, vec, count, arg4));
7837 unlock_iovec(vec, arg2, count, 0);
7841 #endif /* CONFIG_SPLICE */
7842 #ifdef CONFIG_EVENTFD
7843 #if defined(TARGET_NR_eventfd)
7844 case TARGET_NR_eventfd:
7845 ret = get_errno(eventfd(arg1, 0));
7848 #if defined(TARGET_NR_eventfd2)
7849 case TARGET_NR_eventfd2:
7850 ret = get_errno(eventfd(arg1, arg2));
7853 #endif /* CONFIG_EVENTFD */
7854 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
7855 case TARGET_NR_fallocate:
7856 ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
7859 #if defined(CONFIG_SYNC_FILE_RANGE)
7860 #if defined(TARGET_NR_sync_file_range)
7861 case TARGET_NR_sync_file_range:
7862 #if TARGET_ABI_BITS == 32
7863 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
7864 target_offset64(arg4, arg5), arg6));
7866 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
7870 #if defined(TARGET_NR_sync_file_range2)
7871 case TARGET_NR_sync_file_range2:
7872 /* This is like sync_file_range but the arguments are reordered */
7873 #if TARGET_ABI_BITS == 32
7874 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
7875 target_offset64(arg5, arg6), arg2));
7877 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
7882 #if defined(CONFIG_EPOLL)
7883 #if defined(TARGET_NR_epoll_create)
7884 case TARGET_NR_epoll_create:
7885 ret = get_errno(epoll_create(arg1));
7888 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
7889 case TARGET_NR_epoll_create1:
7890 ret = get_errno(epoll_create1(arg1));
7893 #if defined(TARGET_NR_epoll_ctl)
7894 case TARGET_NR_epoll_ctl:
7896 struct epoll_event ep;
7897 struct epoll_event *epp = 0;
7899 struct target_epoll_event *target_ep;
7900 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
7903 ep.events = tswap32(target_ep->events);
7904 /* The epoll_data_t union is just opaque data to the kernel,
7905 * so we transfer all 64 bits across and need not worry what
7906 * actual data type it is.
7908 ep.data.u64 = tswap64(target_ep->data.u64);
7909 unlock_user_struct(target_ep, arg4, 0);
7912 ret = get_errno(epoll_ctl(arg1, arg2, arg3, epp));
7917 #if defined(TARGET_NR_epoll_pwait) && defined(CONFIG_EPOLL_PWAIT)
7918 #define IMPLEMENT_EPOLL_PWAIT
7920 #if defined(TARGET_NR_epoll_wait) || defined(IMPLEMENT_EPOLL_PWAIT)
7921 #if defined(TARGET_NR_epoll_wait)
7922 case TARGET_NR_epoll_wait:
7924 #if defined(IMPLEMENT_EPOLL_PWAIT)
7925 case TARGET_NR_epoll_pwait:
7928 struct target_epoll_event *target_ep;
7929 struct epoll_event *ep;
7931 int maxevents = arg3;
7934 target_ep = lock_user(VERIFY_WRITE, arg2,
7935 maxevents * sizeof(struct target_epoll_event), 1);
7940 ep = alloca(maxevents * sizeof(struct epoll_event));
7943 #if defined(IMPLEMENT_EPOLL_PWAIT)
7944 case TARGET_NR_epoll_pwait:
7946 target_sigset_t *target_set;
7947 sigset_t _set, *set = &_set;
7950 target_set = lock_user(VERIFY_READ, arg5,
7951 sizeof(target_sigset_t), 1);
7953 unlock_user(target_ep, arg2, 0);
7956 target_to_host_sigset(set, target_set);
7957 unlock_user(target_set, arg5, 0);
7962 ret = get_errno(epoll_pwait(epfd, ep, maxevents, timeout, set));
7966 #if defined(TARGET_NR_epoll_wait)
7967 case TARGET_NR_epoll_wait:
7968 ret = get_errno(epoll_wait(epfd, ep, maxevents, timeout));
7972 ret = -TARGET_ENOSYS;
7974 if (!is_error(ret)) {
7976 for (i = 0; i < ret; i++) {
7977 target_ep[i].events = tswap32(ep[i].events);
7978 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
7981 unlock_user(target_ep, arg2, ret * sizeof(struct target_epoll_event));
7988 gemu_log("qemu: Unsupported syscall: %d\n", num);
7989 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
7990 unimplemented_nowarn:
7992 ret = -TARGET_ENOSYS;
7997 gemu_log(" = " TARGET_ABI_FMT_ld "\n", ret);
8000 print_syscall_ret(num, ret);
8003 ret = -TARGET_EFAULT;