4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
31 #include <sys/types.h>
37 #include <sys/mount.h>
38 #include <sys/prctl.h>
39 #include <sys/resource.h>
45 int __clone2(int (*fn)(void *), void *child_stack_base,
46 size_t stack_size, int flags, void *arg, ...);
48 #include <sys/socket.h>
52 #include <sys/times.h>
55 #include <sys/statfs.h>
57 #include <sys/sysinfo.h>
58 #include <sys/utsname.h>
59 //#include <sys/user.h>
60 #include <netinet/ip.h>
61 #include <netinet/tcp.h>
62 #include <linux/wireless.h>
63 #include <qemu-common.h>
68 #include <sys/eventfd.h>
71 #include <sys/epoll.h>
74 #define termios host_termios
75 #define winsize host_winsize
76 #define termio host_termio
77 #define sgttyb host_sgttyb /* same as target */
78 #define tchars host_tchars /* same as target */
79 #define ltchars host_ltchars /* same as target */
81 #include <linux/termios.h>
82 #include <linux/unistd.h>
83 #include <linux/utsname.h>
84 #include <linux/cdrom.h>
85 #include <linux/hdreg.h>
86 #include <linux/soundcard.h>
88 #include <linux/mtio.h>
90 #if defined(CONFIG_FIEMAP)
91 #include <linux/fiemap.h>
95 #include "linux_loop.h"
96 #include "cpu-uname.h"
99 #include "qemu-common.h"
101 #if defined(CONFIG_USE_NPTL)
102 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
103 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
105 /* XXX: Hardcode the above values. */
106 #define CLONE_NPTL_FLAGS2 0
111 //#include <linux/msdos_fs.h>
112 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
113 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
124 #define _syscall0(type,name) \
125 static type name (void) \
127 return syscall(__NR_##name); \
130 #define _syscall1(type,name,type1,arg1) \
131 static type name (type1 arg1) \
133 return syscall(__NR_##name, arg1); \
136 #define _syscall2(type,name,type1,arg1,type2,arg2) \
137 static type name (type1 arg1,type2 arg2) \
139 return syscall(__NR_##name, arg1, arg2); \
142 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
143 static type name (type1 arg1,type2 arg2,type3 arg3) \
145 return syscall(__NR_##name, arg1, arg2, arg3); \
148 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
149 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
151 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
154 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
156 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
158 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
162 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
163 type5,arg5,type6,arg6) \
164 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
167 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
171 #define __NR_sys_uname __NR_uname
172 #define __NR_sys_faccessat __NR_faccessat
173 #define __NR_sys_fchmodat __NR_fchmodat
174 #define __NR_sys_fchownat __NR_fchownat
175 #define __NR_sys_fstatat64 __NR_fstatat64
176 #define __NR_sys_futimesat __NR_futimesat
177 #define __NR_sys_getcwd1 __NR_getcwd
178 #define __NR_sys_getdents __NR_getdents
179 #define __NR_sys_getdents64 __NR_getdents64
180 #define __NR_sys_getpriority __NR_getpriority
181 #define __NR_sys_linkat __NR_linkat
182 #define __NR_sys_mkdirat __NR_mkdirat
183 #define __NR_sys_mknodat __NR_mknodat
184 #define __NR_sys_newfstatat __NR_newfstatat
185 #define __NR_sys_openat __NR_openat
186 #define __NR_sys_readlinkat __NR_readlinkat
187 #define __NR_sys_renameat __NR_renameat
188 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
189 #define __NR_sys_symlinkat __NR_symlinkat
190 #define __NR_sys_syslog __NR_syslog
191 #define __NR_sys_tgkill __NR_tgkill
192 #define __NR_sys_tkill __NR_tkill
193 #define __NR_sys_unlinkat __NR_unlinkat
194 #define __NR_sys_utimensat __NR_utimensat
195 #define __NR_sys_futex __NR_futex
196 #define __NR_sys_inotify_init __NR_inotify_init
197 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
198 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
200 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \
202 #define __NR__llseek __NR_lseek
206 _syscall0(int, gettid)
208 /* This is a replacement for the host gettid() and must return a host
210 static int gettid(void) {
214 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
215 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
216 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
218 _syscall2(int, sys_getpriority, int, which, int, who);
219 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
220 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
221 loff_t *, res, uint, wh);
223 _syscall3(int,sys_rt_sigqueueinfo,int,pid,int,sig,siginfo_t *,uinfo)
224 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
225 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
226 _syscall3(int,sys_tgkill,int,tgid,int,pid,int,sig)
228 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
229 _syscall2(int,sys_tkill,int,tid,int,sig)
231 #ifdef __NR_exit_group
232 _syscall1(int,exit_group,int,error_code)
234 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
235 _syscall1(int,set_tid_address,int *,tidptr)
237 #if defined(CONFIG_USE_NPTL)
238 #if defined(TARGET_NR_futex) && defined(__NR_futex)
239 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
240 const struct timespec *,timeout,int *,uaddr2,int,val3)
243 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
244 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
245 unsigned long *, user_mask_ptr);
246 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
247 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
248 unsigned long *, user_mask_ptr);
250 static bitmask_transtbl fcntl_flags_tbl[] = {
251 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
252 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, },
253 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, },
254 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, },
255 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, },
256 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, },
257 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, },
258 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, },
259 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, },
260 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, },
261 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
262 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, },
263 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
264 #if defined(O_DIRECT)
265 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, },
270 #define COPY_UTSNAME_FIELD(dest, src) \
272 /* __NEW_UTS_LEN doesn't include terminating null */ \
273 (void) strncpy((dest), (src), __NEW_UTS_LEN); \
274 (dest)[__NEW_UTS_LEN] = '\0'; \
277 static int sys_uname(struct new_utsname *buf)
279 struct utsname uts_buf;
281 if (uname(&uts_buf) < 0)
285 * Just in case these have some differences, we
286 * translate utsname to new_utsname (which is the
287 * struct linux kernel uses).
290 memset(buf, 0, sizeof(*buf));
291 COPY_UTSNAME_FIELD(buf->sysname, uts_buf.sysname);
292 COPY_UTSNAME_FIELD(buf->nodename, uts_buf.nodename);
293 COPY_UTSNAME_FIELD(buf->release, uts_buf.release);
294 COPY_UTSNAME_FIELD(buf->version, uts_buf.version);
295 COPY_UTSNAME_FIELD(buf->machine, uts_buf.machine);
297 COPY_UTSNAME_FIELD(buf->domainname, uts_buf.domainname);
301 #undef COPY_UTSNAME_FIELD
304 static int sys_getcwd1(char *buf, size_t size)
306 if (getcwd(buf, size) == NULL) {
307 /* getcwd() sets errno */
310 return strlen(buf)+1;
315 * Host system seems to have atfile syscall stubs available. We
316 * now enable them one by one as specified by target syscall_nr.h.
319 #ifdef TARGET_NR_faccessat
320 static int sys_faccessat(int dirfd, const char *pathname, int mode)
322 return (faccessat(dirfd, pathname, mode, 0));
325 #ifdef TARGET_NR_fchmodat
326 static int sys_fchmodat(int dirfd, const char *pathname, mode_t mode)
328 return (fchmodat(dirfd, pathname, mode, 0));
331 #if defined(TARGET_NR_fchownat)
332 static int sys_fchownat(int dirfd, const char *pathname, uid_t owner,
333 gid_t group, int flags)
335 return (fchownat(dirfd, pathname, owner, group, flags));
338 #ifdef __NR_fstatat64
339 static int sys_fstatat64(int dirfd, const char *pathname, struct stat *buf,
342 return (fstatat(dirfd, pathname, buf, flags));
345 #ifdef __NR_newfstatat
346 static int sys_newfstatat(int dirfd, const char *pathname, struct stat *buf,
349 return (fstatat(dirfd, pathname, buf, flags));
352 #ifdef TARGET_NR_futimesat
353 static int sys_futimesat(int dirfd, const char *pathname,
354 const struct timeval times[2])
356 return (futimesat(dirfd, pathname, times));
359 #ifdef TARGET_NR_linkat
360 static int sys_linkat(int olddirfd, const char *oldpath,
361 int newdirfd, const char *newpath, int flags)
363 return (linkat(olddirfd, oldpath, newdirfd, newpath, flags));
366 #ifdef TARGET_NR_mkdirat
367 static int sys_mkdirat(int dirfd, const char *pathname, mode_t mode)
369 return (mkdirat(dirfd, pathname, mode));
372 #ifdef TARGET_NR_mknodat
373 static int sys_mknodat(int dirfd, const char *pathname, mode_t mode,
376 return (mknodat(dirfd, pathname, mode, dev));
379 #ifdef TARGET_NR_openat
380 static int sys_openat(int dirfd, const char *pathname, int flags, ...)
383 * open(2) has extra parameter 'mode' when called with
386 if ((flags & O_CREAT) != 0) {
391 * Get the 'mode' parameter and translate it to
395 mode = va_arg(ap, mode_t);
396 mode = target_to_host_bitmask(mode, fcntl_flags_tbl);
399 return (openat(dirfd, pathname, flags, mode));
401 return (openat(dirfd, pathname, flags));
404 #ifdef TARGET_NR_readlinkat
405 static int sys_readlinkat(int dirfd, const char *pathname, char *buf, size_t bufsiz)
407 return (readlinkat(dirfd, pathname, buf, bufsiz));
410 #ifdef TARGET_NR_renameat
411 static int sys_renameat(int olddirfd, const char *oldpath,
412 int newdirfd, const char *newpath)
414 return (renameat(olddirfd, oldpath, newdirfd, newpath));
417 #ifdef TARGET_NR_symlinkat
418 static int sys_symlinkat(const char *oldpath, int newdirfd, const char *newpath)
420 return (symlinkat(oldpath, newdirfd, newpath));
423 #ifdef TARGET_NR_unlinkat
424 static int sys_unlinkat(int dirfd, const char *pathname, int flags)
426 return (unlinkat(dirfd, pathname, flags));
429 #else /* !CONFIG_ATFILE */
432 * Try direct syscalls instead
434 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
435 _syscall3(int,sys_faccessat,int,dirfd,const char *,pathname,int,mode)
437 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat)
438 _syscall3(int,sys_fchmodat,int,dirfd,const char *,pathname, mode_t,mode)
440 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat)
441 _syscall5(int,sys_fchownat,int,dirfd,const char *,pathname,
442 uid_t,owner,gid_t,group,int,flags)
444 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \
445 defined(__NR_fstatat64)
446 _syscall4(int,sys_fstatat64,int,dirfd,const char *,pathname,
447 struct stat *,buf,int,flags)
449 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat)
450 _syscall3(int,sys_futimesat,int,dirfd,const char *,pathname,
451 const struct timeval *,times)
453 #if (defined(TARGET_NR_newfstatat) || defined(TARGET_NR_fstatat64) ) && \
454 defined(__NR_newfstatat)
455 _syscall4(int,sys_newfstatat,int,dirfd,const char *,pathname,
456 struct stat *,buf,int,flags)
458 #if defined(TARGET_NR_linkat) && defined(__NR_linkat)
459 _syscall5(int,sys_linkat,int,olddirfd,const char *,oldpath,
460 int,newdirfd,const char *,newpath,int,flags)
462 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat)
463 _syscall3(int,sys_mkdirat,int,dirfd,const char *,pathname,mode_t,mode)
465 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat)
466 _syscall4(int,sys_mknodat,int,dirfd,const char *,pathname,
467 mode_t,mode,dev_t,dev)
469 #if defined(TARGET_NR_openat) && defined(__NR_openat)
470 _syscall4(int,sys_openat,int,dirfd,const char *,pathname,int,flags,mode_t,mode)
472 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat)
473 _syscall4(int,sys_readlinkat,int,dirfd,const char *,pathname,
474 char *,buf,size_t,bufsize)
476 #if defined(TARGET_NR_renameat) && defined(__NR_renameat)
477 _syscall4(int,sys_renameat,int,olddirfd,const char *,oldpath,
478 int,newdirfd,const char *,newpath)
480 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat)
481 _syscall3(int,sys_symlinkat,const char *,oldpath,
482 int,newdirfd,const char *,newpath)
484 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat)
485 _syscall3(int,sys_unlinkat,int,dirfd,const char *,pathname,int,flags)
488 #endif /* CONFIG_ATFILE */
490 #ifdef CONFIG_UTIMENSAT
491 static int sys_utimensat(int dirfd, const char *pathname,
492 const struct timespec times[2], int flags)
494 if (pathname == NULL)
495 return futimens(dirfd, times);
497 return utimensat(dirfd, pathname, times, flags);
500 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat)
501 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
502 const struct timespec *,tsp,int,flags)
504 #endif /* CONFIG_UTIMENSAT */
506 #ifdef CONFIG_INOTIFY
507 #include <sys/inotify.h>
509 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
510 static int sys_inotify_init(void)
512 return (inotify_init());
515 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
516 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
518 return (inotify_add_watch(fd, pathname, mask));
521 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
522 static int sys_inotify_rm_watch(int fd, int32_t wd)
524 return (inotify_rm_watch(fd, wd));
527 #ifdef CONFIG_INOTIFY1
528 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
529 static int sys_inotify_init1(int flags)
531 return (inotify_init1(flags));
536 /* Userspace can usually survive runtime without inotify */
537 #undef TARGET_NR_inotify_init
538 #undef TARGET_NR_inotify_init1
539 #undef TARGET_NR_inotify_add_watch
540 #undef TARGET_NR_inotify_rm_watch
541 #endif /* CONFIG_INOTIFY */
543 #if defined(TARGET_NR_ppoll)
545 # define __NR_ppoll -1
547 #define __NR_sys_ppoll __NR_ppoll
548 _syscall5(int, sys_ppoll, struct pollfd *, fds, nfds_t, nfds,
549 struct timespec *, timeout, const __sigset_t *, sigmask,
553 #if defined(TARGET_NR_pselect6)
554 #ifndef __NR_pselect6
555 # define __NR_pselect6 -1
557 #define __NR_sys_pselect6 __NR_pselect6
558 _syscall6(int, sys_pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds,
559 fd_set *, exceptfds, struct timespec *, timeout, void *, sig);
562 extern int personality(int);
563 extern int flock(int, int);
564 extern int setfsuid(int);
565 extern int setfsgid(int);
566 extern int setgroups(int, gid_t *);
568 #define ERRNO_TABLE_SIZE 1200
570 /* target_to_host_errno_table[] is initialized from
571 * host_to_target_errno_table[] in syscall_init(). */
572 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
576 * This list is the union of errno values overridden in asm-<arch>/errno.h
577 * minus the errnos that are not actually generic to all archs.
579 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
580 [EIDRM] = TARGET_EIDRM,
581 [ECHRNG] = TARGET_ECHRNG,
582 [EL2NSYNC] = TARGET_EL2NSYNC,
583 [EL3HLT] = TARGET_EL3HLT,
584 [EL3RST] = TARGET_EL3RST,
585 [ELNRNG] = TARGET_ELNRNG,
586 [EUNATCH] = TARGET_EUNATCH,
587 [ENOCSI] = TARGET_ENOCSI,
588 [EL2HLT] = TARGET_EL2HLT,
589 [EDEADLK] = TARGET_EDEADLK,
590 [ENOLCK] = TARGET_ENOLCK,
591 [EBADE] = TARGET_EBADE,
592 [EBADR] = TARGET_EBADR,
593 [EXFULL] = TARGET_EXFULL,
594 [ENOANO] = TARGET_ENOANO,
595 [EBADRQC] = TARGET_EBADRQC,
596 [EBADSLT] = TARGET_EBADSLT,
597 [EBFONT] = TARGET_EBFONT,
598 [ENOSTR] = TARGET_ENOSTR,
599 [ENODATA] = TARGET_ENODATA,
600 [ETIME] = TARGET_ETIME,
601 [ENOSR] = TARGET_ENOSR,
602 [ENONET] = TARGET_ENONET,
603 [ENOPKG] = TARGET_ENOPKG,
604 [EREMOTE] = TARGET_EREMOTE,
605 [ENOLINK] = TARGET_ENOLINK,
606 [EADV] = TARGET_EADV,
607 [ESRMNT] = TARGET_ESRMNT,
608 [ECOMM] = TARGET_ECOMM,
609 [EPROTO] = TARGET_EPROTO,
610 [EDOTDOT] = TARGET_EDOTDOT,
611 [EMULTIHOP] = TARGET_EMULTIHOP,
612 [EBADMSG] = TARGET_EBADMSG,
613 [ENAMETOOLONG] = TARGET_ENAMETOOLONG,
614 [EOVERFLOW] = TARGET_EOVERFLOW,
615 [ENOTUNIQ] = TARGET_ENOTUNIQ,
616 [EBADFD] = TARGET_EBADFD,
617 [EREMCHG] = TARGET_EREMCHG,
618 [ELIBACC] = TARGET_ELIBACC,
619 [ELIBBAD] = TARGET_ELIBBAD,
620 [ELIBSCN] = TARGET_ELIBSCN,
621 [ELIBMAX] = TARGET_ELIBMAX,
622 [ELIBEXEC] = TARGET_ELIBEXEC,
623 [EILSEQ] = TARGET_EILSEQ,
624 [ENOSYS] = TARGET_ENOSYS,
625 [ELOOP] = TARGET_ELOOP,
626 [ERESTART] = TARGET_ERESTART,
627 [ESTRPIPE] = TARGET_ESTRPIPE,
628 [ENOTEMPTY] = TARGET_ENOTEMPTY,
629 [EUSERS] = TARGET_EUSERS,
630 [ENOTSOCK] = TARGET_ENOTSOCK,
631 [EDESTADDRREQ] = TARGET_EDESTADDRREQ,
632 [EMSGSIZE] = TARGET_EMSGSIZE,
633 [EPROTOTYPE] = TARGET_EPROTOTYPE,
634 [ENOPROTOOPT] = TARGET_ENOPROTOOPT,
635 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT,
636 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT,
637 [EOPNOTSUPP] = TARGET_EOPNOTSUPP,
638 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT,
639 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT,
640 [EADDRINUSE] = TARGET_EADDRINUSE,
641 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL,
642 [ENETDOWN] = TARGET_ENETDOWN,
643 [ENETUNREACH] = TARGET_ENETUNREACH,
644 [ENETRESET] = TARGET_ENETRESET,
645 [ECONNABORTED] = TARGET_ECONNABORTED,
646 [ECONNRESET] = TARGET_ECONNRESET,
647 [ENOBUFS] = TARGET_ENOBUFS,
648 [EISCONN] = TARGET_EISCONN,
649 [ENOTCONN] = TARGET_ENOTCONN,
650 [EUCLEAN] = TARGET_EUCLEAN,
651 [ENOTNAM] = TARGET_ENOTNAM,
652 [ENAVAIL] = TARGET_ENAVAIL,
653 [EISNAM] = TARGET_EISNAM,
654 [EREMOTEIO] = TARGET_EREMOTEIO,
655 [ESHUTDOWN] = TARGET_ESHUTDOWN,
656 [ETOOMANYREFS] = TARGET_ETOOMANYREFS,
657 [ETIMEDOUT] = TARGET_ETIMEDOUT,
658 [ECONNREFUSED] = TARGET_ECONNREFUSED,
659 [EHOSTDOWN] = TARGET_EHOSTDOWN,
660 [EHOSTUNREACH] = TARGET_EHOSTUNREACH,
661 [EALREADY] = TARGET_EALREADY,
662 [EINPROGRESS] = TARGET_EINPROGRESS,
663 [ESTALE] = TARGET_ESTALE,
664 [ECANCELED] = TARGET_ECANCELED,
665 [ENOMEDIUM] = TARGET_ENOMEDIUM,
666 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE,
668 [ENOKEY] = TARGET_ENOKEY,
671 [EKEYEXPIRED] = TARGET_EKEYEXPIRED,
674 [EKEYREVOKED] = TARGET_EKEYREVOKED,
677 [EKEYREJECTED] = TARGET_EKEYREJECTED,
680 [EOWNERDEAD] = TARGET_EOWNERDEAD,
682 #ifdef ENOTRECOVERABLE
683 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE,
687 static inline int host_to_target_errno(int err)
689 if(host_to_target_errno_table[err])
690 return host_to_target_errno_table[err];
694 static inline int target_to_host_errno(int err)
696 if (target_to_host_errno_table[err])
697 return target_to_host_errno_table[err];
701 static inline abi_long get_errno(abi_long ret)
704 return -host_to_target_errno(errno);
709 static inline int is_error(abi_long ret)
711 return (abi_ulong)ret >= (abi_ulong)(-4096);
714 char *target_strerror(int err)
716 return strerror(target_to_host_errno(err));
719 static abi_ulong target_brk;
720 static abi_ulong target_original_brk;
721 static abi_ulong brk_page;
723 void target_set_brk(abi_ulong new_brk)
725 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
726 brk_page = HOST_PAGE_ALIGN(target_brk);
729 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
730 #define DEBUGF_BRK(message, args...)
732 /* do_brk() must return target values and target errnos. */
733 abi_long do_brk(abi_ulong new_brk)
735 abi_long mapped_addr;
738 DEBUGF_BRK("do_brk(%#010x) -> ", new_brk);
741 DEBUGF_BRK("%#010x (!new_brk)\n", target_brk);
744 if (new_brk < target_original_brk) {
745 DEBUGF_BRK("%#010x (new_brk < target_original_brk)\n", target_brk);
749 /* If the new brk is less than the highest page reserved to the
750 * target heap allocation, set it and we're almost done... */
751 if (new_brk <= brk_page) {
752 /* Heap contents are initialized to zero, as for anonymous
754 if (new_brk > target_brk) {
755 memset(g2h(target_brk), 0, new_brk - target_brk);
757 target_brk = new_brk;
758 DEBUGF_BRK("%#010x (new_brk <= brk_page)\n", target_brk);
762 /* We need to allocate more memory after the brk... Note that
763 * we don't use MAP_FIXED because that will map over the top of
764 * any existing mapping (like the one with the host libc or qemu
765 * itself); instead we treat "mapped but at wrong address" as
766 * a failure and unmap again.
768 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
769 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
770 PROT_READ|PROT_WRITE,
771 MAP_ANON|MAP_PRIVATE, 0, 0));
773 if (mapped_addr == brk_page) {
774 target_brk = new_brk;
775 brk_page = HOST_PAGE_ALIGN(target_brk);
776 DEBUGF_BRK("%#010x (mapped_addr == brk_page)\n", target_brk);
778 } else if (mapped_addr != -1) {
779 /* Mapped but at wrong address, meaning there wasn't actually
780 * enough space for this brk.
782 target_munmap(mapped_addr, new_alloc_size);
784 DEBUGF_BRK("%#010x (mapped_addr != -1)\n", target_brk);
787 DEBUGF_BRK("%#010x (otherwise)\n", target_brk);
790 #if defined(TARGET_ALPHA)
791 /* We (partially) emulate OSF/1 on Alpha, which requires we
792 return a proper errno, not an unchanged brk value. */
793 return -TARGET_ENOMEM;
795 /* For everything else, return the previous break. */
799 static inline abi_long copy_from_user_fdset(fd_set *fds,
800 abi_ulong target_fds_addr,
804 abi_ulong b, *target_fds;
806 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
807 if (!(target_fds = lock_user(VERIFY_READ,
809 sizeof(abi_ulong) * nw,
811 return -TARGET_EFAULT;
815 for (i = 0; i < nw; i++) {
816 /* grab the abi_ulong */
817 __get_user(b, &target_fds[i]);
818 for (j = 0; j < TARGET_ABI_BITS; j++) {
819 /* check the bit inside the abi_ulong */
826 unlock_user(target_fds, target_fds_addr, 0);
831 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
832 abi_ulong target_fds_addr,
835 if (target_fds_addr) {
836 if (copy_from_user_fdset(fds, target_fds_addr, n))
837 return -TARGET_EFAULT;
845 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
851 abi_ulong *target_fds;
853 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
854 if (!(target_fds = lock_user(VERIFY_WRITE,
856 sizeof(abi_ulong) * nw,
858 return -TARGET_EFAULT;
861 for (i = 0; i < nw; i++) {
863 for (j = 0; j < TARGET_ABI_BITS; j++) {
864 v |= ((FD_ISSET(k, fds) != 0) << j);
867 __put_user(v, &target_fds[i]);
870 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
875 #if defined(__alpha__)
881 static inline abi_long host_to_target_clock_t(long ticks)
883 #if HOST_HZ == TARGET_HZ
886 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
890 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
891 const struct rusage *rusage)
893 struct target_rusage *target_rusage;
895 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
896 return -TARGET_EFAULT;
897 target_rusage->ru_utime.tv_sec = tswapl(rusage->ru_utime.tv_sec);
898 target_rusage->ru_utime.tv_usec = tswapl(rusage->ru_utime.tv_usec);
899 target_rusage->ru_stime.tv_sec = tswapl(rusage->ru_stime.tv_sec);
900 target_rusage->ru_stime.tv_usec = tswapl(rusage->ru_stime.tv_usec);
901 target_rusage->ru_maxrss = tswapl(rusage->ru_maxrss);
902 target_rusage->ru_ixrss = tswapl(rusage->ru_ixrss);
903 target_rusage->ru_idrss = tswapl(rusage->ru_idrss);
904 target_rusage->ru_isrss = tswapl(rusage->ru_isrss);
905 target_rusage->ru_minflt = tswapl(rusage->ru_minflt);
906 target_rusage->ru_majflt = tswapl(rusage->ru_majflt);
907 target_rusage->ru_nswap = tswapl(rusage->ru_nswap);
908 target_rusage->ru_inblock = tswapl(rusage->ru_inblock);
909 target_rusage->ru_oublock = tswapl(rusage->ru_oublock);
910 target_rusage->ru_msgsnd = tswapl(rusage->ru_msgsnd);
911 target_rusage->ru_msgrcv = tswapl(rusage->ru_msgrcv);
912 target_rusage->ru_nsignals = tswapl(rusage->ru_nsignals);
913 target_rusage->ru_nvcsw = tswapl(rusage->ru_nvcsw);
914 target_rusage->ru_nivcsw = tswapl(rusage->ru_nivcsw);
915 unlock_user_struct(target_rusage, target_addr, 1);
920 static inline rlim_t target_to_host_rlim(target_ulong target_rlim)
922 if (target_rlim == TARGET_RLIM_INFINITY)
923 return RLIM_INFINITY;
925 return tswapl(target_rlim);
928 static inline target_ulong host_to_target_rlim(rlim_t rlim)
930 if (rlim == RLIM_INFINITY || rlim != (target_long)rlim)
931 return TARGET_RLIM_INFINITY;
936 static inline abi_long copy_from_user_timeval(struct timeval *tv,
937 abi_ulong target_tv_addr)
939 struct target_timeval *target_tv;
941 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1))
942 return -TARGET_EFAULT;
944 __get_user(tv->tv_sec, &target_tv->tv_sec);
945 __get_user(tv->tv_usec, &target_tv->tv_usec);
947 unlock_user_struct(target_tv, target_tv_addr, 0);
952 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
953 const struct timeval *tv)
955 struct target_timeval *target_tv;
957 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0))
958 return -TARGET_EFAULT;
960 __put_user(tv->tv_sec, &target_tv->tv_sec);
961 __put_user(tv->tv_usec, &target_tv->tv_usec);
963 unlock_user_struct(target_tv, target_tv_addr, 1);
968 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
971 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
972 abi_ulong target_mq_attr_addr)
974 struct target_mq_attr *target_mq_attr;
976 if (!lock_user_struct(VERIFY_READ, target_mq_attr,
977 target_mq_attr_addr, 1))
978 return -TARGET_EFAULT;
980 __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
981 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
982 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
983 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
985 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
990 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
991 const struct mq_attr *attr)
993 struct target_mq_attr *target_mq_attr;
995 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
996 target_mq_attr_addr, 0))
997 return -TARGET_EFAULT;
999 __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1000 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1001 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1002 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1004 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1010 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1011 /* do_select() must return target values and target errnos. */
1012 static abi_long do_select(int n,
1013 abi_ulong rfd_addr, abi_ulong wfd_addr,
1014 abi_ulong efd_addr, abi_ulong target_tv_addr)
1016 fd_set rfds, wfds, efds;
1017 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1018 struct timeval tv, *tv_ptr;
1021 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1025 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1029 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1034 if (target_tv_addr) {
1035 if (copy_from_user_timeval(&tv, target_tv_addr))
1036 return -TARGET_EFAULT;
1042 ret = get_errno(select(n, rfds_ptr, wfds_ptr, efds_ptr, tv_ptr));
1044 if (!is_error(ret)) {
1045 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1046 return -TARGET_EFAULT;
1047 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1048 return -TARGET_EFAULT;
1049 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1050 return -TARGET_EFAULT;
1052 if (target_tv_addr && copy_to_user_timeval(target_tv_addr, &tv))
1053 return -TARGET_EFAULT;
1060 static abi_long do_pipe2(int host_pipe[], int flags)
1063 return pipe2(host_pipe, flags);
1069 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1070 int flags, int is_pipe2)
1074 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1077 return get_errno(ret);
1079 /* Several targets have special calling conventions for the original
1080 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1082 #if defined(TARGET_ALPHA)
1083 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1084 return host_pipe[0];
1085 #elif defined(TARGET_MIPS)
1086 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1087 return host_pipe[0];
1088 #elif defined(TARGET_SH4)
1089 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1090 return host_pipe[0];
1094 if (put_user_s32(host_pipe[0], pipedes)
1095 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1096 return -TARGET_EFAULT;
1097 return get_errno(ret);
1100 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1101 abi_ulong target_addr,
1104 struct target_ip_mreqn *target_smreqn;
1106 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1108 return -TARGET_EFAULT;
1109 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1110 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1111 if (len == sizeof(struct target_ip_mreqn))
1112 mreqn->imr_ifindex = tswapl(target_smreqn->imr_ifindex);
1113 unlock_user(target_smreqn, target_addr, 0);
1118 static inline abi_long target_to_host_sockaddr(struct sockaddr *addr,
1119 abi_ulong target_addr,
1122 const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1123 sa_family_t sa_family;
1124 struct target_sockaddr *target_saddr;
1126 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1128 return -TARGET_EFAULT;
1130 sa_family = tswap16(target_saddr->sa_family);
1132 /* Oops. The caller might send a incomplete sun_path; sun_path
1133 * must be terminated by \0 (see the manual page), but
1134 * unfortunately it is quite common to specify sockaddr_un
1135 * length as "strlen(x->sun_path)" while it should be
1136 * "strlen(...) + 1". We'll fix that here if needed.
1137 * Linux kernel has a similar feature.
1140 if (sa_family == AF_UNIX) {
1141 if (len < unix_maxlen && len > 0) {
1142 char *cp = (char*)target_saddr;
1144 if ( cp[len-1] && !cp[len] )
1147 if (len > unix_maxlen)
1151 memcpy(addr, target_saddr, len);
1152 addr->sa_family = sa_family;
1153 unlock_user(target_saddr, target_addr, 0);
1158 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1159 struct sockaddr *addr,
1162 struct target_sockaddr *target_saddr;
1164 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1166 return -TARGET_EFAULT;
1167 memcpy(target_saddr, addr, len);
1168 target_saddr->sa_family = tswap16(addr->sa_family);
1169 unlock_user(target_saddr, target_addr, len);
1174 /* ??? Should this also swap msgh->name? */
1175 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1176 struct target_msghdr *target_msgh)
1178 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1179 abi_long msg_controllen;
1180 abi_ulong target_cmsg_addr;
1181 struct target_cmsghdr *target_cmsg;
1182 socklen_t space = 0;
1184 msg_controllen = tswapl(target_msgh->msg_controllen);
1185 if (msg_controllen < sizeof (struct target_cmsghdr))
1187 target_cmsg_addr = tswapl(target_msgh->msg_control);
1188 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1190 return -TARGET_EFAULT;
1192 while (cmsg && target_cmsg) {
1193 void *data = CMSG_DATA(cmsg);
1194 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1196 int len = tswapl(target_cmsg->cmsg_len)
1197 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr));
1199 space += CMSG_SPACE(len);
1200 if (space > msgh->msg_controllen) {
1201 space -= CMSG_SPACE(len);
1202 gemu_log("Host cmsg overflow\n");
1206 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1207 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1208 cmsg->cmsg_len = CMSG_LEN(len);
1210 if (cmsg->cmsg_level != TARGET_SOL_SOCKET || cmsg->cmsg_type != SCM_RIGHTS) {
1211 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg->cmsg_level, cmsg->cmsg_type);
1212 memcpy(data, target_data, len);
1214 int *fd = (int *)data;
1215 int *target_fd = (int *)target_data;
1216 int i, numfds = len / sizeof(int);
1218 for (i = 0; i < numfds; i++)
1219 fd[i] = tswap32(target_fd[i]);
1222 cmsg = CMSG_NXTHDR(msgh, cmsg);
1223 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg);
1225 unlock_user(target_cmsg, target_cmsg_addr, 0);
1227 msgh->msg_controllen = space;
1231 /* ??? Should this also swap msgh->name? */
1232 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1233 struct msghdr *msgh)
1235 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1236 abi_long msg_controllen;
1237 abi_ulong target_cmsg_addr;
1238 struct target_cmsghdr *target_cmsg;
1239 socklen_t space = 0;
1241 msg_controllen = tswapl(target_msgh->msg_controllen);
1242 if (msg_controllen < sizeof (struct target_cmsghdr))
1244 target_cmsg_addr = tswapl(target_msgh->msg_control);
1245 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1247 return -TARGET_EFAULT;
1249 while (cmsg && target_cmsg) {
1250 void *data = CMSG_DATA(cmsg);
1251 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1253 int len = cmsg->cmsg_len - CMSG_ALIGN(sizeof (struct cmsghdr));
1255 space += TARGET_CMSG_SPACE(len);
1256 if (space > msg_controllen) {
1257 space -= TARGET_CMSG_SPACE(len);
1258 gemu_log("Target cmsg overflow\n");
1262 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1263 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1264 target_cmsg->cmsg_len = tswapl(TARGET_CMSG_LEN(len));
1266 if (cmsg->cmsg_level != TARGET_SOL_SOCKET || cmsg->cmsg_type != SCM_RIGHTS) {
1267 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg->cmsg_level, cmsg->cmsg_type);
1268 memcpy(target_data, data, len);
1270 int *fd = (int *)data;
1271 int *target_fd = (int *)target_data;
1272 int i, numfds = len / sizeof(int);
1274 for (i = 0; i < numfds; i++)
1275 target_fd[i] = tswap32(fd[i]);
1278 cmsg = CMSG_NXTHDR(msgh, cmsg);
1279 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg);
1281 unlock_user(target_cmsg, target_cmsg_addr, space);
1283 target_msgh->msg_controllen = tswapl(space);
1287 /* do_setsockopt() Must return target values and target errnos. */
1288 static abi_long do_setsockopt(int sockfd, int level, int optname,
1289 abi_ulong optval_addr, socklen_t optlen)
1293 struct ip_mreqn *ip_mreq;
1294 struct ip_mreq_source *ip_mreq_source;
1298 /* TCP options all take an 'int' value. */
1299 if (optlen < sizeof(uint32_t))
1300 return -TARGET_EINVAL;
1302 if (get_user_u32(val, optval_addr))
1303 return -TARGET_EFAULT;
1304 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1311 case IP_ROUTER_ALERT:
1315 case IP_MTU_DISCOVER:
1321 case IP_MULTICAST_TTL:
1322 case IP_MULTICAST_LOOP:
1324 if (optlen >= sizeof(uint32_t)) {
1325 if (get_user_u32(val, optval_addr))
1326 return -TARGET_EFAULT;
1327 } else if (optlen >= 1) {
1328 if (get_user_u8(val, optval_addr))
1329 return -TARGET_EFAULT;
1331 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1333 case IP_ADD_MEMBERSHIP:
1334 case IP_DROP_MEMBERSHIP:
1335 if (optlen < sizeof (struct target_ip_mreq) ||
1336 optlen > sizeof (struct target_ip_mreqn))
1337 return -TARGET_EINVAL;
1339 ip_mreq = (struct ip_mreqn *) alloca(optlen);
1340 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
1341 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
1344 case IP_BLOCK_SOURCE:
1345 case IP_UNBLOCK_SOURCE:
1346 case IP_ADD_SOURCE_MEMBERSHIP:
1347 case IP_DROP_SOURCE_MEMBERSHIP:
1348 if (optlen != sizeof (struct target_ip_mreq_source))
1349 return -TARGET_EINVAL;
1351 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
1352 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
1353 unlock_user (ip_mreq_source, optval_addr, 0);
1360 case TARGET_SOL_SOCKET:
1362 /* Options with 'int' argument. */
1363 case TARGET_SO_DEBUG:
1366 case TARGET_SO_REUSEADDR:
1367 optname = SO_REUSEADDR;
1369 case TARGET_SO_TYPE:
1372 case TARGET_SO_ERROR:
1375 case TARGET_SO_DONTROUTE:
1376 optname = SO_DONTROUTE;
1378 case TARGET_SO_BROADCAST:
1379 optname = SO_BROADCAST;
1381 case TARGET_SO_SNDBUF:
1382 optname = SO_SNDBUF;
1384 case TARGET_SO_RCVBUF:
1385 optname = SO_RCVBUF;
1387 case TARGET_SO_KEEPALIVE:
1388 optname = SO_KEEPALIVE;
1390 case TARGET_SO_OOBINLINE:
1391 optname = SO_OOBINLINE;
1393 case TARGET_SO_NO_CHECK:
1394 optname = SO_NO_CHECK;
1396 case TARGET_SO_PRIORITY:
1397 optname = SO_PRIORITY;
1400 case TARGET_SO_BSDCOMPAT:
1401 optname = SO_BSDCOMPAT;
1404 case TARGET_SO_PASSCRED:
1405 optname = SO_PASSCRED;
1407 case TARGET_SO_TIMESTAMP:
1408 optname = SO_TIMESTAMP;
1410 case TARGET_SO_RCVLOWAT:
1411 optname = SO_RCVLOWAT;
1413 case TARGET_SO_RCVTIMEO:
1414 optname = SO_RCVTIMEO;
1416 case TARGET_SO_SNDTIMEO:
1417 optname = SO_SNDTIMEO;
1423 if (optlen < sizeof(uint32_t))
1424 return -TARGET_EINVAL;
1426 if (get_user_u32(val, optval_addr))
1427 return -TARGET_EFAULT;
1428 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
1432 gemu_log("Unsupported setsockopt level=%d optname=%d \n", level, optname);
1433 ret = -TARGET_ENOPROTOOPT;
1438 /* do_getsockopt() Must return target values and target errnos. */
1439 static abi_long do_getsockopt(int sockfd, int level, int optname,
1440 abi_ulong optval_addr, abi_ulong optlen)
1447 case TARGET_SOL_SOCKET:
1450 /* These don't just return a single integer */
1451 case TARGET_SO_LINGER:
1452 case TARGET_SO_RCVTIMEO:
1453 case TARGET_SO_SNDTIMEO:
1454 case TARGET_SO_PEERCRED:
1455 case TARGET_SO_PEERNAME:
1457 /* Options with 'int' argument. */
1458 case TARGET_SO_DEBUG:
1461 case TARGET_SO_REUSEADDR:
1462 optname = SO_REUSEADDR;
1464 case TARGET_SO_TYPE:
1467 case TARGET_SO_ERROR:
1470 case TARGET_SO_DONTROUTE:
1471 optname = SO_DONTROUTE;
1473 case TARGET_SO_BROADCAST:
1474 optname = SO_BROADCAST;
1476 case TARGET_SO_SNDBUF:
1477 optname = SO_SNDBUF;
1479 case TARGET_SO_RCVBUF:
1480 optname = SO_RCVBUF;
1482 case TARGET_SO_KEEPALIVE:
1483 optname = SO_KEEPALIVE;
1485 case TARGET_SO_OOBINLINE:
1486 optname = SO_OOBINLINE;
1488 case TARGET_SO_NO_CHECK:
1489 optname = SO_NO_CHECK;
1491 case TARGET_SO_PRIORITY:
1492 optname = SO_PRIORITY;
1495 case TARGET_SO_BSDCOMPAT:
1496 optname = SO_BSDCOMPAT;
1499 case TARGET_SO_PASSCRED:
1500 optname = SO_PASSCRED;
1502 case TARGET_SO_TIMESTAMP:
1503 optname = SO_TIMESTAMP;
1505 case TARGET_SO_RCVLOWAT:
1506 optname = SO_RCVLOWAT;
1513 /* TCP options all take an 'int' value. */
1515 if (get_user_u32(len, optlen))
1516 return -TARGET_EFAULT;
1518 return -TARGET_EINVAL;
1520 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
1526 if (put_user_u32(val, optval_addr))
1527 return -TARGET_EFAULT;
1529 if (put_user_u8(val, optval_addr))
1530 return -TARGET_EFAULT;
1532 if (put_user_u32(len, optlen))
1533 return -TARGET_EFAULT;
1540 case IP_ROUTER_ALERT:
1544 case IP_MTU_DISCOVER:
1550 case IP_MULTICAST_TTL:
1551 case IP_MULTICAST_LOOP:
1552 if (get_user_u32(len, optlen))
1553 return -TARGET_EFAULT;
1555 return -TARGET_EINVAL;
1557 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
1560 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
1562 if (put_user_u32(len, optlen)
1563 || put_user_u8(val, optval_addr))
1564 return -TARGET_EFAULT;
1566 if (len > sizeof(int))
1568 if (put_user_u32(len, optlen)
1569 || put_user_u32(val, optval_addr))
1570 return -TARGET_EFAULT;
1574 ret = -TARGET_ENOPROTOOPT;
1580 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
1582 ret = -TARGET_EOPNOTSUPP;
1589 * lock_iovec()/unlock_iovec() have a return code of 0 for success where
1590 * other lock functions have a return code of 0 for failure.
1592 static abi_long lock_iovec(int type, struct iovec *vec, abi_ulong target_addr,
1593 int count, int copy)
1595 struct target_iovec *target_vec;
1599 target_vec = lock_user(VERIFY_READ, target_addr, count * sizeof(struct target_iovec), 1);
1601 return -TARGET_EFAULT;
1602 for(i = 0;i < count; i++) {
1603 base = tswapl(target_vec[i].iov_base);
1604 vec[i].iov_len = tswapl(target_vec[i].iov_len);
1605 if (vec[i].iov_len != 0) {
1606 vec[i].iov_base = lock_user(type, base, vec[i].iov_len, copy);
1607 /* Don't check lock_user return value. We must call writev even
1608 if a element has invalid base address. */
1610 /* zero length pointer is ignored */
1611 vec[i].iov_base = NULL;
1614 unlock_user (target_vec, target_addr, 0);
1618 static abi_long unlock_iovec(struct iovec *vec, abi_ulong target_addr,
1619 int count, int copy)
1621 struct target_iovec *target_vec;
1625 target_vec = lock_user(VERIFY_READ, target_addr, count * sizeof(struct target_iovec), 1);
1627 return -TARGET_EFAULT;
1628 for(i = 0;i < count; i++) {
1629 if (target_vec[i].iov_base) {
1630 base = tswapl(target_vec[i].iov_base);
1631 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
1634 unlock_user (target_vec, target_addr, 0);
1639 /* do_socket() Must return target values and target errnos. */
1640 static abi_long do_socket(int domain, int type, int protocol)
1642 #if defined(TARGET_MIPS)
1644 case TARGET_SOCK_DGRAM:
1647 case TARGET_SOCK_STREAM:
1650 case TARGET_SOCK_RAW:
1653 case TARGET_SOCK_RDM:
1656 case TARGET_SOCK_SEQPACKET:
1657 type = SOCK_SEQPACKET;
1659 case TARGET_SOCK_PACKET:
1664 if (domain == PF_NETLINK)
1665 return -EAFNOSUPPORT; /* do not NETLINK socket connections possible */
1666 return get_errno(socket(domain, type, protocol));
1669 /* do_bind() Must return target values and target errnos. */
1670 static abi_long do_bind(int sockfd, abi_ulong target_addr,
1676 if ((int)addrlen < 0) {
1677 return -TARGET_EINVAL;
1680 addr = alloca(addrlen+1);
1682 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
1686 return get_errno(bind(sockfd, addr, addrlen));
1689 /* do_connect() Must return target values and target errnos. */
1690 static abi_long do_connect(int sockfd, abi_ulong target_addr,
1696 if ((int)addrlen < 0) {
1697 return -TARGET_EINVAL;
1700 addr = alloca(addrlen);
1702 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
1706 return get_errno(connect(sockfd, addr, addrlen));
1709 /* do_sendrecvmsg() Must return target values and target errnos. */
1710 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
1711 int flags, int send)
1714 struct target_msghdr *msgp;
1718 abi_ulong target_vec;
1721 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
1725 return -TARGET_EFAULT;
1726 if (msgp->msg_name) {
1727 msg.msg_namelen = tswap32(msgp->msg_namelen);
1728 msg.msg_name = alloca(msg.msg_namelen);
1729 ret = target_to_host_sockaddr(msg.msg_name, tswapl(msgp->msg_name),
1732 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
1736 msg.msg_name = NULL;
1737 msg.msg_namelen = 0;
1739 msg.msg_controllen = 2 * tswapl(msgp->msg_controllen);
1740 msg.msg_control = alloca(msg.msg_controllen);
1741 msg.msg_flags = tswap32(msgp->msg_flags);
1743 count = tswapl(msgp->msg_iovlen);
1744 vec = alloca(count * sizeof(struct iovec));
1745 target_vec = tswapl(msgp->msg_iov);
1746 lock_iovec(send ? VERIFY_READ : VERIFY_WRITE, vec, target_vec, count, send);
1747 msg.msg_iovlen = count;
1751 ret = target_to_host_cmsg(&msg, msgp);
1753 ret = get_errno(sendmsg(fd, &msg, flags));
1755 ret = get_errno(recvmsg(fd, &msg, flags));
1756 if (!is_error(ret)) {
1758 ret = host_to_target_cmsg(msgp, &msg);
1763 unlock_iovec(vec, target_vec, count, !send);
1764 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
1768 /* do_accept() Must return target values and target errnos. */
1769 static abi_long do_accept(int fd, abi_ulong target_addr,
1770 abi_ulong target_addrlen_addr)
1776 if (target_addr == 0)
1777 return get_errno(accept(fd, NULL, NULL));
1779 /* linux returns EINVAL if addrlen pointer is invalid */
1780 if (get_user_u32(addrlen, target_addrlen_addr))
1781 return -TARGET_EINVAL;
1783 if ((int)addrlen < 0) {
1784 return -TARGET_EINVAL;
1787 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
1788 return -TARGET_EINVAL;
1790 addr = alloca(addrlen);
1792 ret = get_errno(accept(fd, addr, &addrlen));
1793 if (!is_error(ret)) {
1794 host_to_target_sockaddr(target_addr, addr, addrlen);
1795 if (put_user_u32(addrlen, target_addrlen_addr))
1796 ret = -TARGET_EFAULT;
1801 /* do_getpeername() Must return target values and target errnos. */
1802 static abi_long do_getpeername(int fd, abi_ulong target_addr,
1803 abi_ulong target_addrlen_addr)
1809 if (get_user_u32(addrlen, target_addrlen_addr))
1810 return -TARGET_EFAULT;
1812 if ((int)addrlen < 0) {
1813 return -TARGET_EINVAL;
1816 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
1817 return -TARGET_EFAULT;
1819 addr = alloca(addrlen);
1821 ret = get_errno(getpeername(fd, addr, &addrlen));
1822 if (!is_error(ret)) {
1823 host_to_target_sockaddr(target_addr, addr, addrlen);
1824 if (put_user_u32(addrlen, target_addrlen_addr))
1825 ret = -TARGET_EFAULT;
1830 /* do_getsockname() Must return target values and target errnos. */
1831 static abi_long do_getsockname(int fd, abi_ulong target_addr,
1832 abi_ulong target_addrlen_addr)
1838 if (get_user_u32(addrlen, target_addrlen_addr))
1839 return -TARGET_EFAULT;
1841 if ((int)addrlen < 0) {
1842 return -TARGET_EINVAL;
1845 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
1846 return -TARGET_EFAULT;
1848 addr = alloca(addrlen);
1850 ret = get_errno(getsockname(fd, addr, &addrlen));
1851 if (!is_error(ret)) {
1852 host_to_target_sockaddr(target_addr, addr, addrlen);
1853 if (put_user_u32(addrlen, target_addrlen_addr))
1854 ret = -TARGET_EFAULT;
1859 /* do_socketpair() Must return target values and target errnos. */
1860 static abi_long do_socketpair(int domain, int type, int protocol,
1861 abi_ulong target_tab_addr)
1866 ret = get_errno(socketpair(domain, type, protocol, tab));
1867 if (!is_error(ret)) {
1868 if (put_user_s32(tab[0], target_tab_addr)
1869 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
1870 ret = -TARGET_EFAULT;
1875 /* do_sendto() Must return target values and target errnos. */
1876 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
1877 abi_ulong target_addr, socklen_t addrlen)
1883 if ((int)addrlen < 0) {
1884 return -TARGET_EINVAL;
1887 host_msg = lock_user(VERIFY_READ, msg, len, 1);
1889 return -TARGET_EFAULT;
1891 addr = alloca(addrlen);
1892 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
1894 unlock_user(host_msg, msg, 0);
1897 ret = get_errno(sendto(fd, host_msg, len, flags, addr, addrlen));
1899 ret = get_errno(send(fd, host_msg, len, flags));
1901 unlock_user(host_msg, msg, 0);
1905 /* do_recvfrom() Must return target values and target errnos. */
1906 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
1907 abi_ulong target_addr,
1908 abi_ulong target_addrlen)
1915 host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
1917 return -TARGET_EFAULT;
1919 if (get_user_u32(addrlen, target_addrlen)) {
1920 ret = -TARGET_EFAULT;
1923 if ((int)addrlen < 0) {
1924 ret = -TARGET_EINVAL;
1927 addr = alloca(addrlen);
1928 ret = get_errno(recvfrom(fd, host_msg, len, flags, addr, &addrlen));
1930 addr = NULL; /* To keep compiler quiet. */
1931 ret = get_errno(recv(fd, host_msg, len, flags));
1933 if (!is_error(ret)) {
1935 host_to_target_sockaddr(target_addr, addr, addrlen);
1936 if (put_user_u32(addrlen, target_addrlen)) {
1937 ret = -TARGET_EFAULT;
1941 unlock_user(host_msg, msg, len);
1944 unlock_user(host_msg, msg, 0);
1949 #ifdef TARGET_NR_socketcall
1950 /* do_socketcall() Must return target values and target errnos. */
1951 static abi_long do_socketcall(int num, abi_ulong vptr)
1954 const int n = sizeof(abi_ulong);
1959 abi_ulong domain, type, protocol;
1961 if (get_user_ual(domain, vptr)
1962 || get_user_ual(type, vptr + n)
1963 || get_user_ual(protocol, vptr + 2 * n))
1964 return -TARGET_EFAULT;
1966 ret = do_socket(domain, type, protocol);
1972 abi_ulong target_addr;
1975 if (get_user_ual(sockfd, vptr)
1976 || get_user_ual(target_addr, vptr + n)
1977 || get_user_ual(addrlen, vptr + 2 * n))
1978 return -TARGET_EFAULT;
1980 ret = do_bind(sockfd, target_addr, addrlen);
1983 case SOCKOP_connect:
1986 abi_ulong target_addr;
1989 if (get_user_ual(sockfd, vptr)
1990 || get_user_ual(target_addr, vptr + n)
1991 || get_user_ual(addrlen, vptr + 2 * n))
1992 return -TARGET_EFAULT;
1994 ret = do_connect(sockfd, target_addr, addrlen);
1999 abi_ulong sockfd, backlog;
2001 if (get_user_ual(sockfd, vptr)
2002 || get_user_ual(backlog, vptr + n))
2003 return -TARGET_EFAULT;
2005 ret = get_errno(listen(sockfd, backlog));
2011 abi_ulong target_addr, target_addrlen;
2013 if (get_user_ual(sockfd, vptr)
2014 || get_user_ual(target_addr, vptr + n)
2015 || get_user_ual(target_addrlen, vptr + 2 * n))
2016 return -TARGET_EFAULT;
2018 ret = do_accept(sockfd, target_addr, target_addrlen);
2021 case SOCKOP_getsockname:
2024 abi_ulong target_addr, target_addrlen;
2026 if (get_user_ual(sockfd, vptr)
2027 || get_user_ual(target_addr, vptr + n)
2028 || get_user_ual(target_addrlen, vptr + 2 * n))
2029 return -TARGET_EFAULT;
2031 ret = do_getsockname(sockfd, target_addr, target_addrlen);
2034 case SOCKOP_getpeername:
2037 abi_ulong target_addr, target_addrlen;
2039 if (get_user_ual(sockfd, vptr)
2040 || get_user_ual(target_addr, vptr + n)
2041 || get_user_ual(target_addrlen, vptr + 2 * n))
2042 return -TARGET_EFAULT;
2044 ret = do_getpeername(sockfd, target_addr, target_addrlen);
2047 case SOCKOP_socketpair:
2049 abi_ulong domain, type, protocol;
2052 if (get_user_ual(domain, vptr)
2053 || get_user_ual(type, vptr + n)
2054 || get_user_ual(protocol, vptr + 2 * n)
2055 || get_user_ual(tab, vptr + 3 * n))
2056 return -TARGET_EFAULT;
2058 ret = do_socketpair(domain, type, protocol, tab);
2068 if (get_user_ual(sockfd, vptr)
2069 || get_user_ual(msg, vptr + n)
2070 || get_user_ual(len, vptr + 2 * n)
2071 || get_user_ual(flags, vptr + 3 * n))
2072 return -TARGET_EFAULT;
2074 ret = do_sendto(sockfd, msg, len, flags, 0, 0);
2084 if (get_user_ual(sockfd, vptr)
2085 || get_user_ual(msg, vptr + n)
2086 || get_user_ual(len, vptr + 2 * n)
2087 || get_user_ual(flags, vptr + 3 * n))
2088 return -TARGET_EFAULT;
2090 ret = do_recvfrom(sockfd, msg, len, flags, 0, 0);
2102 if (get_user_ual(sockfd, vptr)
2103 || get_user_ual(msg, vptr + n)
2104 || get_user_ual(len, vptr + 2 * n)
2105 || get_user_ual(flags, vptr + 3 * n)
2106 || get_user_ual(addr, vptr + 4 * n)
2107 || get_user_ual(addrlen, vptr + 5 * n))
2108 return -TARGET_EFAULT;
2110 ret = do_sendto(sockfd, msg, len, flags, addr, addrlen);
2113 case SOCKOP_recvfrom:
2122 if (get_user_ual(sockfd, vptr)
2123 || get_user_ual(msg, vptr + n)
2124 || get_user_ual(len, vptr + 2 * n)
2125 || get_user_ual(flags, vptr + 3 * n)
2126 || get_user_ual(addr, vptr + 4 * n)
2127 || get_user_ual(addrlen, vptr + 5 * n))
2128 return -TARGET_EFAULT;
2130 ret = do_recvfrom(sockfd, msg, len, flags, addr, addrlen);
2133 case SOCKOP_shutdown:
2135 abi_ulong sockfd, how;
2137 if (get_user_ual(sockfd, vptr)
2138 || get_user_ual(how, vptr + n))
2139 return -TARGET_EFAULT;
2141 ret = get_errno(shutdown(sockfd, how));
2144 case SOCKOP_sendmsg:
2145 case SOCKOP_recvmsg:
2148 abi_ulong target_msg;
2151 if (get_user_ual(fd, vptr)
2152 || get_user_ual(target_msg, vptr + n)
2153 || get_user_ual(flags, vptr + 2 * n))
2154 return -TARGET_EFAULT;
2156 ret = do_sendrecvmsg(fd, target_msg, flags,
2157 (num == SOCKOP_sendmsg));
2160 case SOCKOP_setsockopt:
2168 if (get_user_ual(sockfd, vptr)
2169 || get_user_ual(level, vptr + n)
2170 || get_user_ual(optname, vptr + 2 * n)
2171 || get_user_ual(optval, vptr + 3 * n)
2172 || get_user_ual(optlen, vptr + 4 * n))
2173 return -TARGET_EFAULT;
2175 ret = do_setsockopt(sockfd, level, optname, optval, optlen);
2178 case SOCKOP_getsockopt:
2186 if (get_user_ual(sockfd, vptr)
2187 || get_user_ual(level, vptr + n)
2188 || get_user_ual(optname, vptr + 2 * n)
2189 || get_user_ual(optval, vptr + 3 * n)
2190 || get_user_ual(optlen, vptr + 4 * n))
2191 return -TARGET_EFAULT;
2193 ret = do_getsockopt(sockfd, level, optname, optval, optlen);
2197 gemu_log("Unsupported socketcall: %d\n", num);
2198 ret = -TARGET_ENOSYS;
2205 #define N_SHM_REGIONS 32
2207 static struct shm_region {
2210 } shm_regions[N_SHM_REGIONS];
2212 struct target_ipc_perm
2219 unsigned short int mode;
2220 unsigned short int __pad1;
2221 unsigned short int __seq;
2222 unsigned short int __pad2;
2223 abi_ulong __unused1;
2224 abi_ulong __unused2;
2227 struct target_semid_ds
2229 struct target_ipc_perm sem_perm;
2230 abi_ulong sem_otime;
2231 abi_ulong __unused1;
2232 abi_ulong sem_ctime;
2233 abi_ulong __unused2;
2234 abi_ulong sem_nsems;
2235 abi_ulong __unused3;
2236 abi_ulong __unused4;
2239 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
2240 abi_ulong target_addr)
2242 struct target_ipc_perm *target_ip;
2243 struct target_semid_ds *target_sd;
2245 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2246 return -TARGET_EFAULT;
2247 target_ip = &(target_sd->sem_perm);
2248 host_ip->__key = tswapl(target_ip->__key);
2249 host_ip->uid = tswapl(target_ip->uid);
2250 host_ip->gid = tswapl(target_ip->gid);
2251 host_ip->cuid = tswapl(target_ip->cuid);
2252 host_ip->cgid = tswapl(target_ip->cgid);
2253 host_ip->mode = tswapl(target_ip->mode);
2254 unlock_user_struct(target_sd, target_addr, 0);
2258 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
2259 struct ipc_perm *host_ip)
2261 struct target_ipc_perm *target_ip;
2262 struct target_semid_ds *target_sd;
2264 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2265 return -TARGET_EFAULT;
2266 target_ip = &(target_sd->sem_perm);
2267 target_ip->__key = tswapl(host_ip->__key);
2268 target_ip->uid = tswapl(host_ip->uid);
2269 target_ip->gid = tswapl(host_ip->gid);
2270 target_ip->cuid = tswapl(host_ip->cuid);
2271 target_ip->cgid = tswapl(host_ip->cgid);
2272 target_ip->mode = tswapl(host_ip->mode);
2273 unlock_user_struct(target_sd, target_addr, 1);
2277 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
2278 abi_ulong target_addr)
2280 struct target_semid_ds *target_sd;
2282 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2283 return -TARGET_EFAULT;
2284 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
2285 return -TARGET_EFAULT;
2286 host_sd->sem_nsems = tswapl(target_sd->sem_nsems);
2287 host_sd->sem_otime = tswapl(target_sd->sem_otime);
2288 host_sd->sem_ctime = tswapl(target_sd->sem_ctime);
2289 unlock_user_struct(target_sd, target_addr, 0);
2293 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
2294 struct semid_ds *host_sd)
2296 struct target_semid_ds *target_sd;
2298 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2299 return -TARGET_EFAULT;
2300 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
2301 return -TARGET_EFAULT;;
2302 target_sd->sem_nsems = tswapl(host_sd->sem_nsems);
2303 target_sd->sem_otime = tswapl(host_sd->sem_otime);
2304 target_sd->sem_ctime = tswapl(host_sd->sem_ctime);
2305 unlock_user_struct(target_sd, target_addr, 1);
2309 struct target_seminfo {
2322 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
2323 struct seminfo *host_seminfo)
2325 struct target_seminfo *target_seminfo;
2326 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
2327 return -TARGET_EFAULT;
2328 __put_user(host_seminfo->semmap, &target_seminfo->semmap);
2329 __put_user(host_seminfo->semmni, &target_seminfo->semmni);
2330 __put_user(host_seminfo->semmns, &target_seminfo->semmns);
2331 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
2332 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
2333 __put_user(host_seminfo->semopm, &target_seminfo->semopm);
2334 __put_user(host_seminfo->semume, &target_seminfo->semume);
2335 __put_user(host_seminfo->semusz, &target_seminfo->semusz);
2336 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
2337 __put_user(host_seminfo->semaem, &target_seminfo->semaem);
2338 unlock_user_struct(target_seminfo, target_addr, 1);
2344 struct semid_ds *buf;
2345 unsigned short *array;
2346 struct seminfo *__buf;
2349 union target_semun {
2356 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
2357 abi_ulong target_addr)
2360 unsigned short *array;
2362 struct semid_ds semid_ds;
2365 semun.buf = &semid_ds;
2367 ret = semctl(semid, 0, IPC_STAT, semun);
2369 return get_errno(ret);
2371 nsems = semid_ds.sem_nsems;
2373 *host_array = malloc(nsems*sizeof(unsigned short));
2374 array = lock_user(VERIFY_READ, target_addr,
2375 nsems*sizeof(unsigned short), 1);
2377 return -TARGET_EFAULT;
2379 for(i=0; i<nsems; i++) {
2380 __get_user((*host_array)[i], &array[i]);
2382 unlock_user(array, target_addr, 0);
2387 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
2388 unsigned short **host_array)
2391 unsigned short *array;
2393 struct semid_ds semid_ds;
2396 semun.buf = &semid_ds;
2398 ret = semctl(semid, 0, IPC_STAT, semun);
2400 return get_errno(ret);
2402 nsems = semid_ds.sem_nsems;
2404 array = lock_user(VERIFY_WRITE, target_addr,
2405 nsems*sizeof(unsigned short), 0);
2407 return -TARGET_EFAULT;
2409 for(i=0; i<nsems; i++) {
2410 __put_user((*host_array)[i], &array[i]);
2413 unlock_user(array, target_addr, 1);
2418 static inline abi_long do_semctl(int semid, int semnum, int cmd,
2419 union target_semun target_su)
2422 struct semid_ds dsarg;
2423 unsigned short *array = NULL;
2424 struct seminfo seminfo;
2425 abi_long ret = -TARGET_EINVAL;
2432 arg.val = tswapl(target_su.val);
2433 ret = get_errno(semctl(semid, semnum, cmd, arg));
2434 target_su.val = tswapl(arg.val);
2438 err = target_to_host_semarray(semid, &array, target_su.array);
2442 ret = get_errno(semctl(semid, semnum, cmd, arg));
2443 err = host_to_target_semarray(semid, target_su.array, &array);
2450 err = target_to_host_semid_ds(&dsarg, target_su.buf);
2454 ret = get_errno(semctl(semid, semnum, cmd, arg));
2455 err = host_to_target_semid_ds(target_su.buf, &dsarg);
2461 arg.__buf = &seminfo;
2462 ret = get_errno(semctl(semid, semnum, cmd, arg));
2463 err = host_to_target_seminfo(target_su.__buf, &seminfo);
2471 ret = get_errno(semctl(semid, semnum, cmd, NULL));
2478 struct target_sembuf {
2479 unsigned short sem_num;
2484 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
2485 abi_ulong target_addr,
2488 struct target_sembuf *target_sembuf;
2491 target_sembuf = lock_user(VERIFY_READ, target_addr,
2492 nsops*sizeof(struct target_sembuf), 1);
2494 return -TARGET_EFAULT;
2496 for(i=0; i<nsops; i++) {
2497 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
2498 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
2499 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
2502 unlock_user(target_sembuf, target_addr, 0);
2507 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
2509 struct sembuf sops[nsops];
2511 if (target_to_host_sembuf(sops, ptr, nsops))
2512 return -TARGET_EFAULT;
2514 return semop(semid, sops, nsops);
2517 struct target_msqid_ds
2519 struct target_ipc_perm msg_perm;
2520 abi_ulong msg_stime;
2521 #if TARGET_ABI_BITS == 32
2522 abi_ulong __unused1;
2524 abi_ulong msg_rtime;
2525 #if TARGET_ABI_BITS == 32
2526 abi_ulong __unused2;
2528 abi_ulong msg_ctime;
2529 #if TARGET_ABI_BITS == 32
2530 abi_ulong __unused3;
2532 abi_ulong __msg_cbytes;
2534 abi_ulong msg_qbytes;
2535 abi_ulong msg_lspid;
2536 abi_ulong msg_lrpid;
2537 abi_ulong __unused4;
2538 abi_ulong __unused5;
2541 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
2542 abi_ulong target_addr)
2544 struct target_msqid_ds *target_md;
2546 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
2547 return -TARGET_EFAULT;
2548 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
2549 return -TARGET_EFAULT;
2550 host_md->msg_stime = tswapl(target_md->msg_stime);
2551 host_md->msg_rtime = tswapl(target_md->msg_rtime);
2552 host_md->msg_ctime = tswapl(target_md->msg_ctime);
2553 host_md->__msg_cbytes = tswapl(target_md->__msg_cbytes);
2554 host_md->msg_qnum = tswapl(target_md->msg_qnum);
2555 host_md->msg_qbytes = tswapl(target_md->msg_qbytes);
2556 host_md->msg_lspid = tswapl(target_md->msg_lspid);
2557 host_md->msg_lrpid = tswapl(target_md->msg_lrpid);
2558 unlock_user_struct(target_md, target_addr, 0);
2562 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
2563 struct msqid_ds *host_md)
2565 struct target_msqid_ds *target_md;
2567 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
2568 return -TARGET_EFAULT;
2569 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
2570 return -TARGET_EFAULT;
2571 target_md->msg_stime = tswapl(host_md->msg_stime);
2572 target_md->msg_rtime = tswapl(host_md->msg_rtime);
2573 target_md->msg_ctime = tswapl(host_md->msg_ctime);
2574 target_md->__msg_cbytes = tswapl(host_md->__msg_cbytes);
2575 target_md->msg_qnum = tswapl(host_md->msg_qnum);
2576 target_md->msg_qbytes = tswapl(host_md->msg_qbytes);
2577 target_md->msg_lspid = tswapl(host_md->msg_lspid);
2578 target_md->msg_lrpid = tswapl(host_md->msg_lrpid);
2579 unlock_user_struct(target_md, target_addr, 1);
2583 struct target_msginfo {
2591 unsigned short int msgseg;
2594 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
2595 struct msginfo *host_msginfo)
2597 struct target_msginfo *target_msginfo;
2598 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
2599 return -TARGET_EFAULT;
2600 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
2601 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
2602 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
2603 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
2604 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
2605 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
2606 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
2607 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
2608 unlock_user_struct(target_msginfo, target_addr, 1);
2612 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
2614 struct msqid_ds dsarg;
2615 struct msginfo msginfo;
2616 abi_long ret = -TARGET_EINVAL;
2624 if (target_to_host_msqid_ds(&dsarg,ptr))
2625 return -TARGET_EFAULT;
2626 ret = get_errno(msgctl(msgid, cmd, &dsarg));
2627 if (host_to_target_msqid_ds(ptr,&dsarg))
2628 return -TARGET_EFAULT;
2631 ret = get_errno(msgctl(msgid, cmd, NULL));
2635 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
2636 if (host_to_target_msginfo(ptr, &msginfo))
2637 return -TARGET_EFAULT;
2644 struct target_msgbuf {
2649 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
2650 unsigned int msgsz, int msgflg)
2652 struct target_msgbuf *target_mb;
2653 struct msgbuf *host_mb;
2656 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
2657 return -TARGET_EFAULT;
2658 host_mb = malloc(msgsz+sizeof(long));
2659 host_mb->mtype = (abi_long) tswapl(target_mb->mtype);
2660 memcpy(host_mb->mtext, target_mb->mtext, msgsz);
2661 ret = get_errno(msgsnd(msqid, host_mb, msgsz, msgflg));
2663 unlock_user_struct(target_mb, msgp, 0);
2668 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
2669 unsigned int msgsz, abi_long msgtyp,
2672 struct target_msgbuf *target_mb;
2674 struct msgbuf *host_mb;
2677 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
2678 return -TARGET_EFAULT;
2680 host_mb = malloc(msgsz+sizeof(long));
2681 ret = get_errno(msgrcv(msqid, host_mb, msgsz, tswapl(msgtyp), msgflg));
2684 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
2685 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
2686 if (!target_mtext) {
2687 ret = -TARGET_EFAULT;
2690 memcpy(target_mb->mtext, host_mb->mtext, ret);
2691 unlock_user(target_mtext, target_mtext_addr, ret);
2694 target_mb->mtype = tswapl(host_mb->mtype);
2699 unlock_user_struct(target_mb, msgp, 1);
2703 struct target_shmid_ds
2705 struct target_ipc_perm shm_perm;
2706 abi_ulong shm_segsz;
2707 abi_ulong shm_atime;
2708 #if TARGET_ABI_BITS == 32
2709 abi_ulong __unused1;
2711 abi_ulong shm_dtime;
2712 #if TARGET_ABI_BITS == 32
2713 abi_ulong __unused2;
2715 abi_ulong shm_ctime;
2716 #if TARGET_ABI_BITS == 32
2717 abi_ulong __unused3;
2721 abi_ulong shm_nattch;
2722 unsigned long int __unused4;
2723 unsigned long int __unused5;
2726 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
2727 abi_ulong target_addr)
2729 struct target_shmid_ds *target_sd;
2731 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2732 return -TARGET_EFAULT;
2733 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
2734 return -TARGET_EFAULT;
2735 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
2736 __get_user(host_sd->shm_atime, &target_sd->shm_atime);
2737 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
2738 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
2739 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
2740 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
2741 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
2742 unlock_user_struct(target_sd, target_addr, 0);
2746 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
2747 struct shmid_ds *host_sd)
2749 struct target_shmid_ds *target_sd;
2751 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2752 return -TARGET_EFAULT;
2753 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
2754 return -TARGET_EFAULT;
2755 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
2756 __put_user(host_sd->shm_atime, &target_sd->shm_atime);
2757 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
2758 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
2759 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
2760 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
2761 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
2762 unlock_user_struct(target_sd, target_addr, 1);
2766 struct target_shminfo {
2774 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
2775 struct shminfo *host_shminfo)
2777 struct target_shminfo *target_shminfo;
2778 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
2779 return -TARGET_EFAULT;
2780 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
2781 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
2782 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
2783 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
2784 __put_user(host_shminfo->shmall, &target_shminfo->shmall);
2785 unlock_user_struct(target_shminfo, target_addr, 1);
2789 struct target_shm_info {
2794 abi_ulong swap_attempts;
2795 abi_ulong swap_successes;
2798 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
2799 struct shm_info *host_shm_info)
2801 struct target_shm_info *target_shm_info;
2802 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
2803 return -TARGET_EFAULT;
2804 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
2805 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
2806 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
2807 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
2808 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
2809 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
2810 unlock_user_struct(target_shm_info, target_addr, 1);
2814 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
2816 struct shmid_ds dsarg;
2817 struct shminfo shminfo;
2818 struct shm_info shm_info;
2819 abi_long ret = -TARGET_EINVAL;
2827 if (target_to_host_shmid_ds(&dsarg, buf))
2828 return -TARGET_EFAULT;
2829 ret = get_errno(shmctl(shmid, cmd, &dsarg));
2830 if (host_to_target_shmid_ds(buf, &dsarg))
2831 return -TARGET_EFAULT;
2834 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
2835 if (host_to_target_shminfo(buf, &shminfo))
2836 return -TARGET_EFAULT;
2839 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
2840 if (host_to_target_shm_info(buf, &shm_info))
2841 return -TARGET_EFAULT;
2846 ret = get_errno(shmctl(shmid, cmd, NULL));
2853 static inline abi_ulong do_shmat(int shmid, abi_ulong shmaddr, int shmflg)
2857 struct shmid_ds shm_info;
2860 /* find out the length of the shared memory segment */
2861 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
2862 if (is_error(ret)) {
2863 /* can't get length, bail out */
2870 host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
2872 abi_ulong mmap_start;
2874 mmap_start = mmap_find_vma(0, shm_info.shm_segsz);
2876 if (mmap_start == -1) {
2878 host_raddr = (void *)-1;
2880 host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
2883 if (host_raddr == (void *)-1) {
2885 return get_errno((long)host_raddr);
2887 raddr=h2g((unsigned long)host_raddr);
2889 page_set_flags(raddr, raddr + shm_info.shm_segsz,
2890 PAGE_VALID | PAGE_READ |
2891 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
2893 for (i = 0; i < N_SHM_REGIONS; i++) {
2894 if (shm_regions[i].start == 0) {
2895 shm_regions[i].start = raddr;
2896 shm_regions[i].size = shm_info.shm_segsz;
2906 static inline abi_long do_shmdt(abi_ulong shmaddr)
2910 for (i = 0; i < N_SHM_REGIONS; ++i) {
2911 if (shm_regions[i].start == shmaddr) {
2912 shm_regions[i].start = 0;
2913 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
2918 return get_errno(shmdt(g2h(shmaddr)));
2921 #ifdef TARGET_NR_ipc
2922 /* ??? This only works with linear mappings. */
2923 /* do_ipc() must return target values and target errnos. */
2924 static abi_long do_ipc(unsigned int call, int first,
2925 int second, int third,
2926 abi_long ptr, abi_long fifth)
2931 version = call >> 16;
2936 ret = do_semop(first, ptr, second);
2940 ret = get_errno(semget(first, second, third));
2944 ret = do_semctl(first, second, third, (union target_semun)(abi_ulong) ptr);
2948 ret = get_errno(msgget(first, second));
2952 ret = do_msgsnd(first, ptr, second, third);
2956 ret = do_msgctl(first, second, ptr);
2963 struct target_ipc_kludge {
2968 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
2969 ret = -TARGET_EFAULT;
2973 ret = do_msgrcv(first, tmp->msgp, second, tmp->msgtyp, third);
2975 unlock_user_struct(tmp, ptr, 0);
2979 ret = do_msgrcv(first, ptr, second, fifth, third);
2988 raddr = do_shmat(first, ptr, second);
2989 if (is_error(raddr))
2990 return get_errno(raddr);
2991 if (put_user_ual(raddr, third))
2992 return -TARGET_EFAULT;
2996 ret = -TARGET_EINVAL;
3001 ret = do_shmdt(ptr);
3005 /* IPC_* flag values are the same on all linux platforms */
3006 ret = get_errno(shmget(first, second, third));
3009 /* IPC_* and SHM_* command values are the same on all linux platforms */
3011 ret = do_shmctl(first, second, third);
3014 gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
3015 ret = -TARGET_ENOSYS;
3022 /* kernel structure types definitions */
3024 #define STRUCT(name, ...) STRUCT_ ## name,
3025 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
3027 #include "syscall_types.h"
3030 #undef STRUCT_SPECIAL
3032 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
3033 #define STRUCT_SPECIAL(name)
3034 #include "syscall_types.h"
3036 #undef STRUCT_SPECIAL
3038 typedef struct IOCTLEntry IOCTLEntry;
3040 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
3041 int fd, abi_long cmd, abi_long arg);
3044 unsigned int target_cmd;
3045 unsigned int host_cmd;
3048 do_ioctl_fn *do_ioctl;
3049 const argtype arg_type[5];
3052 #define IOC_R 0x0001
3053 #define IOC_W 0x0002
3054 #define IOC_RW (IOC_R | IOC_W)
3056 #define MAX_STRUCT_SIZE 4096
3058 #ifdef CONFIG_FIEMAP
3059 /* So fiemap access checks don't overflow on 32 bit systems.
3060 * This is very slightly smaller than the limit imposed by
3061 * the underlying kernel.
3063 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
3064 / sizeof(struct fiemap_extent))
3066 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
3067 int fd, abi_long cmd, abi_long arg)
3069 /* The parameter for this ioctl is a struct fiemap followed
3070 * by an array of struct fiemap_extent whose size is set
3071 * in fiemap->fm_extent_count. The array is filled in by the
3074 int target_size_in, target_size_out;
3076 const argtype *arg_type = ie->arg_type;
3077 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
3080 int i, extent_size = thunk_type_size(extent_arg_type, 0);
3084 assert(arg_type[0] == TYPE_PTR);
3085 assert(ie->access == IOC_RW);
3087 target_size_in = thunk_type_size(arg_type, 0);
3088 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
3090 return -TARGET_EFAULT;
3092 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3093 unlock_user(argptr, arg, 0);
3094 fm = (struct fiemap *)buf_temp;
3095 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
3096 return -TARGET_EINVAL;
3099 outbufsz = sizeof (*fm) +
3100 (sizeof(struct fiemap_extent) * fm->fm_extent_count);
3102 if (outbufsz > MAX_STRUCT_SIZE) {
3103 /* We can't fit all the extents into the fixed size buffer.
3104 * Allocate one that is large enough and use it instead.
3106 fm = malloc(outbufsz);
3108 return -TARGET_ENOMEM;
3110 memcpy(fm, buf_temp, sizeof(struct fiemap));
3113 ret = get_errno(ioctl(fd, ie->host_cmd, fm));
3114 if (!is_error(ret)) {
3115 target_size_out = target_size_in;
3116 /* An extent_count of 0 means we were only counting the extents
3117 * so there are no structs to copy
3119 if (fm->fm_extent_count != 0) {
3120 target_size_out += fm->fm_mapped_extents * extent_size;
3122 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
3124 ret = -TARGET_EFAULT;
3126 /* Convert the struct fiemap */
3127 thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
3128 if (fm->fm_extent_count != 0) {
3129 p = argptr + target_size_in;
3130 /* ...and then all the struct fiemap_extents */
3131 for (i = 0; i < fm->fm_mapped_extents; i++) {
3132 thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
3137 unlock_user(argptr, arg, target_size_out);
3147 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
3148 int fd, abi_long cmd, abi_long arg)
3150 const argtype *arg_type = ie->arg_type;
3154 struct ifconf *host_ifconf;
3156 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
3157 int target_ifreq_size;
3162 abi_long target_ifc_buf;
3166 assert(arg_type[0] == TYPE_PTR);
3167 assert(ie->access == IOC_RW);
3170 target_size = thunk_type_size(arg_type, 0);
3172 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3174 return -TARGET_EFAULT;
3175 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3176 unlock_user(argptr, arg, 0);
3178 host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
3179 target_ifc_len = host_ifconf->ifc_len;
3180 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
3182 target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
3183 nb_ifreq = target_ifc_len / target_ifreq_size;
3184 host_ifc_len = nb_ifreq * sizeof(struct ifreq);
3186 outbufsz = sizeof(*host_ifconf) + host_ifc_len;
3187 if (outbufsz > MAX_STRUCT_SIZE) {
3188 /* We can't fit all the extents into the fixed size buffer.
3189 * Allocate one that is large enough and use it instead.
3191 host_ifconf = malloc(outbufsz);
3193 return -TARGET_ENOMEM;
3195 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
3198 host_ifc_buf = (char*)host_ifconf + sizeof(*host_ifconf);
3200 host_ifconf->ifc_len = host_ifc_len;
3201 host_ifconf->ifc_buf = host_ifc_buf;
3203 ret = get_errno(ioctl(fd, ie->host_cmd, host_ifconf));
3204 if (!is_error(ret)) {
3205 /* convert host ifc_len to target ifc_len */
3207 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
3208 target_ifc_len = nb_ifreq * target_ifreq_size;
3209 host_ifconf->ifc_len = target_ifc_len;
3211 /* restore target ifc_buf */
3213 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
3215 /* copy struct ifconf to target user */
3217 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3219 return -TARGET_EFAULT;
3220 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
3221 unlock_user(argptr, arg, target_size);
3223 /* copy ifreq[] to target user */
3225 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
3226 for (i = 0; i < nb_ifreq ; i++) {
3227 thunk_convert(argptr + i * target_ifreq_size,
3228 host_ifc_buf + i * sizeof(struct ifreq),
3229 ifreq_arg_type, THUNK_TARGET);
3231 unlock_user(argptr, target_ifc_buf, target_ifc_len);
3241 static IOCTLEntry ioctl_entries[] = {
3242 #define IOCTL(cmd, access, ...) \
3243 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
3244 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
3245 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
3250 /* ??? Implement proper locking for ioctls. */
3251 /* do_ioctl() Must return target values and target errnos. */
3252 static abi_long do_ioctl(int fd, abi_long cmd, abi_long arg)
3254 const IOCTLEntry *ie;
3255 const argtype *arg_type;
3257 uint8_t buf_temp[MAX_STRUCT_SIZE];
3263 if (ie->target_cmd == 0) {
3264 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
3265 return -TARGET_ENOSYS;
3267 if (ie->target_cmd == cmd)
3271 arg_type = ie->arg_type;
3273 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd, ie->name);
3276 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
3279 switch(arg_type[0]) {
3282 ret = get_errno(ioctl(fd, ie->host_cmd));
3287 ret = get_errno(ioctl(fd, ie->host_cmd, arg));
3291 target_size = thunk_type_size(arg_type, 0);
3292 switch(ie->access) {
3294 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3295 if (!is_error(ret)) {
3296 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3298 return -TARGET_EFAULT;
3299 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
3300 unlock_user(argptr, arg, target_size);
3304 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3306 return -TARGET_EFAULT;
3307 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3308 unlock_user(argptr, arg, 0);
3309 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3313 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3315 return -TARGET_EFAULT;
3316 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3317 unlock_user(argptr, arg, 0);
3318 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3319 if (!is_error(ret)) {
3320 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3322 return -TARGET_EFAULT;
3323 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
3324 unlock_user(argptr, arg, target_size);
3330 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
3331 (long)cmd, arg_type[0]);
3332 ret = -TARGET_ENOSYS;
3338 static const bitmask_transtbl iflag_tbl[] = {
3339 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
3340 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
3341 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
3342 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
3343 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
3344 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
3345 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
3346 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
3347 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
3348 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
3349 { TARGET_IXON, TARGET_IXON, IXON, IXON },
3350 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
3351 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
3352 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
3356 static const bitmask_transtbl oflag_tbl[] = {
3357 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
3358 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
3359 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
3360 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
3361 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
3362 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
3363 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
3364 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
3365 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
3366 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
3367 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
3368 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
3369 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
3370 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
3371 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
3372 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
3373 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
3374 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
3375 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
3376 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
3377 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
3378 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
3379 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
3380 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
3384 static const bitmask_transtbl cflag_tbl[] = {
3385 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
3386 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
3387 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
3388 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
3389 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
3390 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
3391 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
3392 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
3393 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
3394 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
3395 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
3396 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
3397 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
3398 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
3399 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
3400 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
3401 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
3402 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
3403 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
3404 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
3405 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
3406 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
3407 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
3408 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
3409 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
3410 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
3411 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
3412 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
3413 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
3414 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
3415 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
3419 static const bitmask_transtbl lflag_tbl[] = {
3420 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
3421 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
3422 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
3423 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
3424 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
3425 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
3426 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
3427 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
3428 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
3429 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
3430 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
3431 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
3432 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
3433 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
3434 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
3438 static void target_to_host_termios (void *dst, const void *src)
3440 struct host_termios *host = dst;
3441 const struct target_termios *target = src;
3444 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
3446 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
3448 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
3450 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
3451 host->c_line = target->c_line;
3453 memset(host->c_cc, 0, sizeof(host->c_cc));
3454 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
3455 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
3456 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
3457 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
3458 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
3459 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
3460 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
3461 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
3462 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
3463 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
3464 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
3465 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
3466 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
3467 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
3468 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
3469 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
3470 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
3473 static void host_to_target_termios (void *dst, const void *src)
3475 struct target_termios *target = dst;
3476 const struct host_termios *host = src;
3479 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
3481 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
3483 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
3485 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
3486 target->c_line = host->c_line;
3488 memset(target->c_cc, 0, sizeof(target->c_cc));
3489 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
3490 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
3491 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
3492 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
3493 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
3494 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
3495 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
3496 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
3497 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
3498 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
3499 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
3500 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
3501 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
3502 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
3503 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
3504 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
3505 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
3508 static const StructEntry struct_termios_def = {
3509 .convert = { host_to_target_termios, target_to_host_termios },
3510 .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
3511 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
3514 static bitmask_transtbl mmap_flags_tbl[] = {
3515 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
3516 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
3517 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
3518 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS, MAP_ANONYMOUS, MAP_ANONYMOUS },
3519 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN, MAP_GROWSDOWN, MAP_GROWSDOWN },
3520 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE, MAP_DENYWRITE, MAP_DENYWRITE },
3521 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE, MAP_EXECUTABLE, MAP_EXECUTABLE },
3522 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
3526 #if defined(TARGET_I386)
3528 /* NOTE: there is really one LDT for all the threads */
3529 static uint8_t *ldt_table;
3531 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
3538 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
3539 if (size > bytecount)
3541 p = lock_user(VERIFY_WRITE, ptr, size, 0);
3543 return -TARGET_EFAULT;
3544 /* ??? Should this by byteswapped? */
3545 memcpy(p, ldt_table, size);
3546 unlock_user(p, ptr, size);
3550 /* XXX: add locking support */
3551 static abi_long write_ldt(CPUX86State *env,
3552 abi_ulong ptr, unsigned long bytecount, int oldmode)
3554 struct target_modify_ldt_ldt_s ldt_info;
3555 struct target_modify_ldt_ldt_s *target_ldt_info;
3556 int seg_32bit, contents, read_exec_only, limit_in_pages;
3557 int seg_not_present, useable, lm;
3558 uint32_t *lp, entry_1, entry_2;
3560 if (bytecount != sizeof(ldt_info))
3561 return -TARGET_EINVAL;
3562 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
3563 return -TARGET_EFAULT;
3564 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
3565 ldt_info.base_addr = tswapl(target_ldt_info->base_addr);
3566 ldt_info.limit = tswap32(target_ldt_info->limit);
3567 ldt_info.flags = tswap32(target_ldt_info->flags);
3568 unlock_user_struct(target_ldt_info, ptr, 0);
3570 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
3571 return -TARGET_EINVAL;
3572 seg_32bit = ldt_info.flags & 1;
3573 contents = (ldt_info.flags >> 1) & 3;
3574 read_exec_only = (ldt_info.flags >> 3) & 1;
3575 limit_in_pages = (ldt_info.flags >> 4) & 1;
3576 seg_not_present = (ldt_info.flags >> 5) & 1;
3577 useable = (ldt_info.flags >> 6) & 1;
3581 lm = (ldt_info.flags >> 7) & 1;
3583 if (contents == 3) {
3585 return -TARGET_EINVAL;
3586 if (seg_not_present == 0)
3587 return -TARGET_EINVAL;
3589 /* allocate the LDT */
3591 env->ldt.base = target_mmap(0,
3592 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
3593 PROT_READ|PROT_WRITE,
3594 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
3595 if (env->ldt.base == -1)
3596 return -TARGET_ENOMEM;
3597 memset(g2h(env->ldt.base), 0,
3598 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
3599 env->ldt.limit = 0xffff;
3600 ldt_table = g2h(env->ldt.base);
3603 /* NOTE: same code as Linux kernel */
3604 /* Allow LDTs to be cleared by the user. */
3605 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
3608 read_exec_only == 1 &&
3610 limit_in_pages == 0 &&
3611 seg_not_present == 1 &&
3619 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
3620 (ldt_info.limit & 0x0ffff);
3621 entry_2 = (ldt_info.base_addr & 0xff000000) |
3622 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
3623 (ldt_info.limit & 0xf0000) |
3624 ((read_exec_only ^ 1) << 9) |
3626 ((seg_not_present ^ 1) << 15) |
3628 (limit_in_pages << 23) |
3632 entry_2 |= (useable << 20);
3634 /* Install the new entry ... */
3636 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
3637 lp[0] = tswap32(entry_1);
3638 lp[1] = tswap32(entry_2);
3642 /* specific and weird i386 syscalls */
3643 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
3644 unsigned long bytecount)
3650 ret = read_ldt(ptr, bytecount);
3653 ret = write_ldt(env, ptr, bytecount, 1);
3656 ret = write_ldt(env, ptr, bytecount, 0);
3659 ret = -TARGET_ENOSYS;
3665 #if defined(TARGET_I386) && defined(TARGET_ABI32)
3666 static abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
3668 uint64_t *gdt_table = g2h(env->gdt.base);
3669 struct target_modify_ldt_ldt_s ldt_info;
3670 struct target_modify_ldt_ldt_s *target_ldt_info;
3671 int seg_32bit, contents, read_exec_only, limit_in_pages;
3672 int seg_not_present, useable, lm;
3673 uint32_t *lp, entry_1, entry_2;
3676 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
3677 if (!target_ldt_info)
3678 return -TARGET_EFAULT;
3679 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
3680 ldt_info.base_addr = tswapl(target_ldt_info->base_addr);
3681 ldt_info.limit = tswap32(target_ldt_info->limit);
3682 ldt_info.flags = tswap32(target_ldt_info->flags);
3683 if (ldt_info.entry_number == -1) {
3684 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
3685 if (gdt_table[i] == 0) {
3686 ldt_info.entry_number = i;
3687 target_ldt_info->entry_number = tswap32(i);
3692 unlock_user_struct(target_ldt_info, ptr, 1);
3694 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
3695 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
3696 return -TARGET_EINVAL;
3697 seg_32bit = ldt_info.flags & 1;
3698 contents = (ldt_info.flags >> 1) & 3;
3699 read_exec_only = (ldt_info.flags >> 3) & 1;
3700 limit_in_pages = (ldt_info.flags >> 4) & 1;
3701 seg_not_present = (ldt_info.flags >> 5) & 1;
3702 useable = (ldt_info.flags >> 6) & 1;
3706 lm = (ldt_info.flags >> 7) & 1;
3709 if (contents == 3) {
3710 if (seg_not_present == 0)
3711 return -TARGET_EINVAL;
3714 /* NOTE: same code as Linux kernel */
3715 /* Allow LDTs to be cleared by the user. */
3716 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
3717 if ((contents == 0 &&
3718 read_exec_only == 1 &&
3720 limit_in_pages == 0 &&
3721 seg_not_present == 1 &&
3729 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
3730 (ldt_info.limit & 0x0ffff);
3731 entry_2 = (ldt_info.base_addr & 0xff000000) |
3732 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
3733 (ldt_info.limit & 0xf0000) |
3734 ((read_exec_only ^ 1) << 9) |
3736 ((seg_not_present ^ 1) << 15) |
3738 (limit_in_pages << 23) |
3743 /* Install the new entry ... */
3745 lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
3746 lp[0] = tswap32(entry_1);
3747 lp[1] = tswap32(entry_2);
3751 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
3753 struct target_modify_ldt_ldt_s *target_ldt_info;
3754 uint64_t *gdt_table = g2h(env->gdt.base);
3755 uint32_t base_addr, limit, flags;
3756 int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
3757 int seg_not_present, useable, lm;
3758 uint32_t *lp, entry_1, entry_2;
3760 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
3761 if (!target_ldt_info)
3762 return -TARGET_EFAULT;
3763 idx = tswap32(target_ldt_info->entry_number);
3764 if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
3765 idx > TARGET_GDT_ENTRY_TLS_MAX) {
3766 unlock_user_struct(target_ldt_info, ptr, 1);
3767 return -TARGET_EINVAL;
3769 lp = (uint32_t *)(gdt_table + idx);
3770 entry_1 = tswap32(lp[0]);
3771 entry_2 = tswap32(lp[1]);
3773 read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
3774 contents = (entry_2 >> 10) & 3;
3775 seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
3776 seg_32bit = (entry_2 >> 22) & 1;
3777 limit_in_pages = (entry_2 >> 23) & 1;
3778 useable = (entry_2 >> 20) & 1;
3782 lm = (entry_2 >> 21) & 1;
3784 flags = (seg_32bit << 0) | (contents << 1) |
3785 (read_exec_only << 3) | (limit_in_pages << 4) |
3786 (seg_not_present << 5) | (useable << 6) | (lm << 7);
3787 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000);
3788 base_addr = (entry_1 >> 16) |
3789 (entry_2 & 0xff000000) |
3790 ((entry_2 & 0xff) << 16);
3791 target_ldt_info->base_addr = tswapl(base_addr);
3792 target_ldt_info->limit = tswap32(limit);
3793 target_ldt_info->flags = tswap32(flags);
3794 unlock_user_struct(target_ldt_info, ptr, 1);
3797 #endif /* TARGET_I386 && TARGET_ABI32 */
3799 #ifndef TARGET_ABI32
3800 static abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
3807 case TARGET_ARCH_SET_GS:
3808 case TARGET_ARCH_SET_FS:
3809 if (code == TARGET_ARCH_SET_GS)
3813 cpu_x86_load_seg(env, idx, 0);
3814 env->segs[idx].base = addr;
3816 case TARGET_ARCH_GET_GS:
3817 case TARGET_ARCH_GET_FS:
3818 if (code == TARGET_ARCH_GET_GS)
3822 val = env->segs[idx].base;
3823 if (put_user(val, addr, abi_ulong))
3824 ret = -TARGET_EFAULT;
3827 ret = -TARGET_EINVAL;
3834 #endif /* defined(TARGET_I386) */
3836 #define NEW_STACK_SIZE 0x40000
3838 #if defined(CONFIG_USE_NPTL)
3840 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
3843 pthread_mutex_t mutex;
3844 pthread_cond_t cond;
3847 abi_ulong child_tidptr;
3848 abi_ulong parent_tidptr;
3852 static void *clone_func(void *arg)
3854 new_thread_info *info = arg;
3860 ts = (TaskState *)thread_env->opaque;
3861 info->tid = gettid();
3862 env->host_tid = info->tid;
3864 if (info->child_tidptr)
3865 put_user_u32(info->tid, info->child_tidptr);
3866 if (info->parent_tidptr)
3867 put_user_u32(info->tid, info->parent_tidptr);
3868 /* Enable signals. */
3869 sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
3870 /* Signal to the parent that we're ready. */
3871 pthread_mutex_lock(&info->mutex);
3872 pthread_cond_broadcast(&info->cond);
3873 pthread_mutex_unlock(&info->mutex);
3874 /* Wait until the parent has finshed initializing the tls state. */
3875 pthread_mutex_lock(&clone_lock);
3876 pthread_mutex_unlock(&clone_lock);
3883 static int clone_func(void *arg)
3885 CPUState *env = arg;
3892 /* do_fork() Must return host values and target errnos (unlike most
3893 do_*() functions). */
3894 static int do_fork(CPUState *env, unsigned int flags, abi_ulong newsp,
3895 abi_ulong parent_tidptr, target_ulong newtls,
3896 abi_ulong child_tidptr)
3901 #if defined(CONFIG_USE_NPTL)
3902 unsigned int nptl_flags;
3908 /* Emulate vfork() with fork() */
3909 if (flags & CLONE_VFORK)
3910 flags &= ~(CLONE_VFORK | CLONE_VM);
3912 if (flags & CLONE_VM) {
3913 TaskState *parent_ts = (TaskState *)env->opaque;
3914 #if defined(CONFIG_USE_NPTL)
3915 new_thread_info info;
3916 pthread_attr_t attr;
3918 ts = qemu_mallocz(sizeof(TaskState));
3919 init_task_state(ts);
3920 /* we create a new CPU instance. */
3921 new_env = cpu_copy(env);
3922 #if defined(TARGET_I386) || defined(TARGET_SPARC) || defined(TARGET_PPC)
3925 /* Init regs that differ from the parent. */
3926 cpu_clone_regs(new_env, newsp);
3927 new_env->opaque = ts;
3928 ts->bprm = parent_ts->bprm;
3929 ts->info = parent_ts->info;
3930 #if defined(CONFIG_USE_NPTL)
3932 flags &= ~CLONE_NPTL_FLAGS2;
3934 if (nptl_flags & CLONE_CHILD_CLEARTID) {
3935 ts->child_tidptr = child_tidptr;
3938 if (nptl_flags & CLONE_SETTLS)
3939 cpu_set_tls (new_env, newtls);
3941 /* Grab a mutex so that thread setup appears atomic. */
3942 pthread_mutex_lock(&clone_lock);
3944 memset(&info, 0, sizeof(info));
3945 pthread_mutex_init(&info.mutex, NULL);
3946 pthread_mutex_lock(&info.mutex);
3947 pthread_cond_init(&info.cond, NULL);
3949 if (nptl_flags & CLONE_CHILD_SETTID)
3950 info.child_tidptr = child_tidptr;
3951 if (nptl_flags & CLONE_PARENT_SETTID)
3952 info.parent_tidptr = parent_tidptr;
3954 ret = pthread_attr_init(&attr);
3955 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
3956 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
3957 /* It is not safe to deliver signals until the child has finished
3958 initializing, so temporarily block all signals. */
3959 sigfillset(&sigmask);
3960 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
3962 ret = pthread_create(&info.thread, &attr, clone_func, &info);
3963 /* TODO: Free new CPU state if thread creation failed. */
3965 sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
3966 pthread_attr_destroy(&attr);
3968 /* Wait for the child to initialize. */
3969 pthread_cond_wait(&info.cond, &info.mutex);
3971 if (flags & CLONE_PARENT_SETTID)
3972 put_user_u32(ret, parent_tidptr);
3976 pthread_mutex_unlock(&info.mutex);
3977 pthread_cond_destroy(&info.cond);
3978 pthread_mutex_destroy(&info.mutex);
3979 pthread_mutex_unlock(&clone_lock);
3981 if (flags & CLONE_NPTL_FLAGS2)
3983 /* This is probably going to die very quickly, but do it anyway. */
3984 new_stack = qemu_mallocz (NEW_STACK_SIZE);
3986 ret = __clone2(clone_func, new_stack, NEW_STACK_SIZE, flags, new_env);
3988 ret = clone(clone_func, new_stack + NEW_STACK_SIZE, flags, new_env);
3992 /* if no CLONE_VM, we consider it is a fork */
3993 if ((flags & ~(CSIGNAL | CLONE_NPTL_FLAGS2)) != 0)
3998 /* Child Process. */
3999 cpu_clone_regs(env, newsp);
4001 #if defined(CONFIG_USE_NPTL)
4002 /* There is a race condition here. The parent process could
4003 theoretically read the TID in the child process before the child
4004 tid is set. This would require using either ptrace
4005 (not implemented) or having *_tidptr to point at a shared memory
4006 mapping. We can't repeat the spinlock hack used above because
4007 the child process gets its own copy of the lock. */
4008 if (flags & CLONE_CHILD_SETTID)
4009 put_user_u32(gettid(), child_tidptr);
4010 if (flags & CLONE_PARENT_SETTID)
4011 put_user_u32(gettid(), parent_tidptr);
4012 ts = (TaskState *)env->opaque;
4013 if (flags & CLONE_SETTLS)
4014 cpu_set_tls (env, newtls);
4015 if (flags & CLONE_CHILD_CLEARTID)
4016 ts->child_tidptr = child_tidptr;
4025 /* warning : doesn't handle linux specific flags... */
4026 static int target_to_host_fcntl_cmd(int cmd)
4029 case TARGET_F_DUPFD:
4030 case TARGET_F_GETFD:
4031 case TARGET_F_SETFD:
4032 case TARGET_F_GETFL:
4033 case TARGET_F_SETFL:
4035 case TARGET_F_GETLK:
4037 case TARGET_F_SETLK:
4039 case TARGET_F_SETLKW:
4041 case TARGET_F_GETOWN:
4043 case TARGET_F_SETOWN:
4045 case TARGET_F_GETSIG:
4047 case TARGET_F_SETSIG:
4049 #if TARGET_ABI_BITS == 32
4050 case TARGET_F_GETLK64:
4052 case TARGET_F_SETLK64:
4054 case TARGET_F_SETLKW64:
4057 case TARGET_F_SETLEASE:
4059 case TARGET_F_GETLEASE:
4061 #ifdef F_DUPFD_CLOEXEC
4062 case TARGET_F_DUPFD_CLOEXEC:
4063 return F_DUPFD_CLOEXEC;
4065 case TARGET_F_NOTIFY:
4068 return -TARGET_EINVAL;
4070 return -TARGET_EINVAL;
4073 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
4076 struct target_flock *target_fl;
4077 struct flock64 fl64;
4078 struct target_flock64 *target_fl64;
4080 int host_cmd = target_to_host_fcntl_cmd(cmd);
4082 if (host_cmd == -TARGET_EINVAL)
4086 case TARGET_F_GETLK:
4087 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
4088 return -TARGET_EFAULT;
4089 fl.l_type = tswap16(target_fl->l_type);
4090 fl.l_whence = tswap16(target_fl->l_whence);
4091 fl.l_start = tswapl(target_fl->l_start);
4092 fl.l_len = tswapl(target_fl->l_len);
4093 fl.l_pid = tswap32(target_fl->l_pid);
4094 unlock_user_struct(target_fl, arg, 0);
4095 ret = get_errno(fcntl(fd, host_cmd, &fl));
4097 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg, 0))
4098 return -TARGET_EFAULT;
4099 target_fl->l_type = tswap16(fl.l_type);
4100 target_fl->l_whence = tswap16(fl.l_whence);
4101 target_fl->l_start = tswapl(fl.l_start);
4102 target_fl->l_len = tswapl(fl.l_len);
4103 target_fl->l_pid = tswap32(fl.l_pid);
4104 unlock_user_struct(target_fl, arg, 1);
4108 case TARGET_F_SETLK:
4109 case TARGET_F_SETLKW:
4110 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
4111 return -TARGET_EFAULT;
4112 fl.l_type = tswap16(target_fl->l_type);
4113 fl.l_whence = tswap16(target_fl->l_whence);
4114 fl.l_start = tswapl(target_fl->l_start);
4115 fl.l_len = tswapl(target_fl->l_len);
4116 fl.l_pid = tswap32(target_fl->l_pid);
4117 unlock_user_struct(target_fl, arg, 0);
4118 ret = get_errno(fcntl(fd, host_cmd, &fl));
4121 case TARGET_F_GETLK64:
4122 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
4123 return -TARGET_EFAULT;
4124 fl64.l_type = tswap16(target_fl64->l_type) >> 1;
4125 fl64.l_whence = tswap16(target_fl64->l_whence);
4126 fl64.l_start = tswapl(target_fl64->l_start);
4127 fl64.l_len = tswapl(target_fl64->l_len);
4128 fl64.l_pid = tswap32(target_fl64->l_pid);
4129 unlock_user_struct(target_fl64, arg, 0);
4130 ret = get_errno(fcntl(fd, host_cmd, &fl64));
4132 if (!lock_user_struct(VERIFY_WRITE, target_fl64, arg, 0))
4133 return -TARGET_EFAULT;
4134 target_fl64->l_type = tswap16(fl64.l_type) >> 1;
4135 target_fl64->l_whence = tswap16(fl64.l_whence);
4136 target_fl64->l_start = tswapl(fl64.l_start);
4137 target_fl64->l_len = tswapl(fl64.l_len);
4138 target_fl64->l_pid = tswap32(fl64.l_pid);
4139 unlock_user_struct(target_fl64, arg, 1);
4142 case TARGET_F_SETLK64:
4143 case TARGET_F_SETLKW64:
4144 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
4145 return -TARGET_EFAULT;
4146 fl64.l_type = tswap16(target_fl64->l_type) >> 1;
4147 fl64.l_whence = tswap16(target_fl64->l_whence);
4148 fl64.l_start = tswapl(target_fl64->l_start);
4149 fl64.l_len = tswapl(target_fl64->l_len);
4150 fl64.l_pid = tswap32(target_fl64->l_pid);
4151 unlock_user_struct(target_fl64, arg, 0);
4152 ret = get_errno(fcntl(fd, host_cmd, &fl64));
4155 case TARGET_F_GETFL:
4156 ret = get_errno(fcntl(fd, host_cmd, arg));
4158 ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
4162 case TARGET_F_SETFL:
4163 ret = get_errno(fcntl(fd, host_cmd, target_to_host_bitmask(arg, fcntl_flags_tbl)));
4166 case TARGET_F_SETOWN:
4167 case TARGET_F_GETOWN:
4168 case TARGET_F_SETSIG:
4169 case TARGET_F_GETSIG:
4170 case TARGET_F_SETLEASE:
4171 case TARGET_F_GETLEASE:
4172 ret = get_errno(fcntl(fd, host_cmd, arg));
4176 ret = get_errno(fcntl(fd, cmd, arg));
4184 static inline int high2lowuid(int uid)
4192 static inline int high2lowgid(int gid)
4200 static inline int low2highuid(int uid)
4202 if ((int16_t)uid == -1)
4208 static inline int low2highgid(int gid)
4210 if ((int16_t)gid == -1)
4215 static inline int tswapid(int id)
4219 #else /* !USE_UID16 */
4220 static inline int high2lowuid(int uid)
4224 static inline int high2lowgid(int gid)
4228 static inline int low2highuid(int uid)
4232 static inline int low2highgid(int gid)
4236 static inline int tswapid(int id)
4240 #endif /* USE_UID16 */
4242 void syscall_init(void)
4245 const argtype *arg_type;
4249 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
4250 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
4251 #include "syscall_types.h"
4253 #undef STRUCT_SPECIAL
4255 /* we patch the ioctl size if necessary. We rely on the fact that
4256 no ioctl has all the bits at '1' in the size field */
4258 while (ie->target_cmd != 0) {
4259 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
4260 TARGET_IOC_SIZEMASK) {
4261 arg_type = ie->arg_type;
4262 if (arg_type[0] != TYPE_PTR) {
4263 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
4268 size = thunk_type_size(arg_type, 0);
4269 ie->target_cmd = (ie->target_cmd &
4270 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
4271 (size << TARGET_IOC_SIZESHIFT);
4274 /* Build target_to_host_errno_table[] table from
4275 * host_to_target_errno_table[]. */
4276 for (i=0; i < ERRNO_TABLE_SIZE; i++)
4277 target_to_host_errno_table[host_to_target_errno_table[i]] = i;
4279 /* automatic consistency check if same arch */
4280 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
4281 (defined(__x86_64__) && defined(TARGET_X86_64))
4282 if (unlikely(ie->target_cmd != ie->host_cmd)) {
4283 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
4284 ie->name, ie->target_cmd, ie->host_cmd);
4291 #if TARGET_ABI_BITS == 32
4292 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
4294 #ifdef TARGET_WORDS_BIGENDIAN
4295 return ((uint64_t)word0 << 32) | word1;
4297 return ((uint64_t)word1 << 32) | word0;
4300 #else /* TARGET_ABI_BITS == 32 */
4301 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
4305 #endif /* TARGET_ABI_BITS != 32 */
4307 #ifdef TARGET_NR_truncate64
4308 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
4314 if (((CPUARMState *)cpu_env)->eabi)
4320 return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
4324 #ifdef TARGET_NR_ftruncate64
4325 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
4331 if (((CPUARMState *)cpu_env)->eabi)
4337 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
4341 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
4342 abi_ulong target_addr)
4344 struct target_timespec *target_ts;
4346 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1))
4347 return -TARGET_EFAULT;
4348 host_ts->tv_sec = tswapl(target_ts->tv_sec);
4349 host_ts->tv_nsec = tswapl(target_ts->tv_nsec);
4350 unlock_user_struct(target_ts, target_addr, 0);
4354 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
4355 struct timespec *host_ts)
4357 struct target_timespec *target_ts;
4359 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0))
4360 return -TARGET_EFAULT;
4361 target_ts->tv_sec = tswapl(host_ts->tv_sec);
4362 target_ts->tv_nsec = tswapl(host_ts->tv_nsec);
4363 unlock_user_struct(target_ts, target_addr, 1);
4367 #if defined(TARGET_NR_stat64) || defined(TARGET_NR_newfstatat)
4368 static inline abi_long host_to_target_stat64(void *cpu_env,
4369 abi_ulong target_addr,
4370 struct stat *host_st)
4373 if (((CPUARMState *)cpu_env)->eabi) {
4374 struct target_eabi_stat64 *target_st;
4376 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
4377 return -TARGET_EFAULT;
4378 memset(target_st, 0, sizeof(struct target_eabi_stat64));
4379 __put_user(host_st->st_dev, &target_st->st_dev);
4380 __put_user(host_st->st_ino, &target_st->st_ino);
4381 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
4382 __put_user(host_st->st_ino, &target_st->__st_ino);
4384 __put_user(host_st->st_mode, &target_st->st_mode);
4385 __put_user(host_st->st_nlink, &target_st->st_nlink);
4386 __put_user(host_st->st_uid, &target_st->st_uid);
4387 __put_user(host_st->st_gid, &target_st->st_gid);
4388 __put_user(host_st->st_rdev, &target_st->st_rdev);
4389 __put_user(host_st->st_size, &target_st->st_size);
4390 __put_user(host_st->st_blksize, &target_st->st_blksize);
4391 __put_user(host_st->st_blocks, &target_st->st_blocks);
4392 __put_user(host_st->st_atime, &target_st->target_st_atime);
4393 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
4394 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
4395 unlock_user_struct(target_st, target_addr, 1);
4399 #if TARGET_ABI_BITS == 64 && !defined(TARGET_ALPHA)
4400 struct target_stat *target_st;
4402 struct target_stat64 *target_st;
4405 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
4406 return -TARGET_EFAULT;
4407 memset(target_st, 0, sizeof(*target_st));
4408 __put_user(host_st->st_dev, &target_st->st_dev);
4409 __put_user(host_st->st_ino, &target_st->st_ino);
4410 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
4411 __put_user(host_st->st_ino, &target_st->__st_ino);
4413 __put_user(host_st->st_mode, &target_st->st_mode);
4414 __put_user(host_st->st_nlink, &target_st->st_nlink);
4415 __put_user(host_st->st_uid, &target_st->st_uid);
4416 __put_user(host_st->st_gid, &target_st->st_gid);
4417 __put_user(host_st->st_rdev, &target_st->st_rdev);
4418 /* XXX: better use of kernel struct */
4419 __put_user(host_st->st_size, &target_st->st_size);
4420 __put_user(host_st->st_blksize, &target_st->st_blksize);
4421 __put_user(host_st->st_blocks, &target_st->st_blocks);
4422 __put_user(host_st->st_atime, &target_st->target_st_atime);
4423 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
4424 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
4425 unlock_user_struct(target_st, target_addr, 1);
4432 #if defined(CONFIG_USE_NPTL)
4433 /* ??? Using host futex calls even when target atomic operations
4434 are not really atomic probably breaks things. However implementing
4435 futexes locally would make futexes shared between multiple processes
4436 tricky. However they're probably useless because guest atomic
4437 operations won't work either. */
4438 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
4439 target_ulong uaddr2, int val3)
4441 struct timespec ts, *pts;
4444 /* ??? We assume FUTEX_* constants are the same on both host
4446 #ifdef FUTEX_CMD_MASK
4447 base_op = op & FUTEX_CMD_MASK;
4455 target_to_host_timespec(pts, timeout);
4459 return get_errno(sys_futex(g2h(uaddr), op, tswap32(val),
4462 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0));
4464 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0));
4466 case FUTEX_CMP_REQUEUE:
4468 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
4469 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
4470 But the prototype takes a `struct timespec *'; insert casts
4471 to satisfy the compiler. We do not need to tswap TIMEOUT
4472 since it's not compared to guest memory. */
4473 pts = (struct timespec *)(uintptr_t) timeout;
4474 return get_errno(sys_futex(g2h(uaddr), op, val, pts,
4476 (base_op == FUTEX_CMP_REQUEUE
4480 return -TARGET_ENOSYS;
4485 /* Map host to target signal numbers for the wait family of syscalls.
4486 Assume all other status bits are the same. */
4487 static int host_to_target_waitstatus(int status)
4489 if (WIFSIGNALED(status)) {
4490 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
4492 if (WIFSTOPPED(status)) {
4493 return (host_to_target_signal(WSTOPSIG(status)) << 8)
4499 int get_osversion(void)
4501 static int osversion;
4502 struct new_utsname buf;
4507 if (qemu_uname_release && *qemu_uname_release) {
4508 s = qemu_uname_release;
4510 if (sys_uname(&buf))
4515 for (i = 0; i < 3; i++) {
4517 while (*s >= '0' && *s <= '9') {
4522 tmp = (tmp << 8) + n;
4530 /* do_syscall() should always have a single exit point at the end so
4531 that actions, such as logging of syscall results, can be performed.
4532 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
4533 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
4534 abi_long arg2, abi_long arg3, abi_long arg4,
4535 abi_long arg5, abi_long arg6, abi_long arg7,
4544 gemu_log("syscall %d", num);
4547 print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
4550 case TARGET_NR_exit:
4551 #ifdef CONFIG_USE_NPTL
4552 /* In old applications this may be used to implement _exit(2).
4553 However in threaded applictions it is used for thread termination,
4554 and _exit_group is used for application termination.
4555 Do thread termination if we have more then one thread. */
4556 /* FIXME: This probably breaks if a signal arrives. We should probably
4557 be disabling signals. */
4558 if (first_cpu->next_cpu) {
4566 while (p && p != (CPUState *)cpu_env) {
4567 lastp = &p->next_cpu;
4570 /* If we didn't find the CPU for this thread then something is
4574 /* Remove the CPU from the list. */
4575 *lastp = p->next_cpu;
4577 ts = ((CPUState *)cpu_env)->opaque;
4578 if (ts->child_tidptr) {
4579 put_user_u32(0, ts->child_tidptr);
4580 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
4592 gdb_exit(cpu_env, arg1);
4594 ret = 0; /* avoid warning */
4596 case TARGET_NR_read:
4600 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
4602 ret = get_errno(read(arg1, p, arg3));
4603 unlock_user(p, arg2, ret);
4606 case TARGET_NR_write:
4607 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
4609 ret = get_errno(write(arg1, p, arg3));
4610 unlock_user(p, arg2, 0);
4612 case TARGET_NR_open:
4613 if (!(p = lock_user_string(arg1)))
4615 ret = get_errno(open(path(p),
4616 target_to_host_bitmask(arg2, fcntl_flags_tbl),
4618 unlock_user(p, arg1, 0);
4620 #if defined(TARGET_NR_openat) && defined(__NR_openat)
4621 case TARGET_NR_openat:
4622 if (!(p = lock_user_string(arg2)))
4624 ret = get_errno(sys_openat(arg1,
4626 target_to_host_bitmask(arg3, fcntl_flags_tbl),
4628 unlock_user(p, arg2, 0);
4631 case TARGET_NR_close:
4632 ret = get_errno(close(arg1));
4637 case TARGET_NR_fork:
4638 ret = get_errno(do_fork(cpu_env, SIGCHLD, 0, 0, 0, 0));
4640 #ifdef TARGET_NR_waitpid
4641 case TARGET_NR_waitpid:
4644 ret = get_errno(waitpid(arg1, &status, arg3));
4645 if (!is_error(ret) && arg2
4646 && put_user_s32(host_to_target_waitstatus(status), arg2))
4651 #ifdef TARGET_NR_waitid
4652 case TARGET_NR_waitid:
4656 ret = get_errno(waitid(arg1, arg2, &info, arg4));
4657 if (!is_error(ret) && arg3 && info.si_pid != 0) {
4658 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
4660 host_to_target_siginfo(p, &info);
4661 unlock_user(p, arg3, sizeof(target_siginfo_t));
4666 #ifdef TARGET_NR_creat /* not on alpha */
4667 case TARGET_NR_creat:
4668 if (!(p = lock_user_string(arg1)))
4670 ret = get_errno(creat(p, arg2));
4671 unlock_user(p, arg1, 0);
4674 case TARGET_NR_link:
4677 p = lock_user_string(arg1);
4678 p2 = lock_user_string(arg2);
4680 ret = -TARGET_EFAULT;
4682 ret = get_errno(link(p, p2));
4683 unlock_user(p2, arg2, 0);
4684 unlock_user(p, arg1, 0);
4687 #if defined(TARGET_NR_linkat) && defined(__NR_linkat)
4688 case TARGET_NR_linkat:
4693 p = lock_user_string(arg2);
4694 p2 = lock_user_string(arg4);
4696 ret = -TARGET_EFAULT;
4698 ret = get_errno(sys_linkat(arg1, p, arg3, p2, arg5));
4699 unlock_user(p, arg2, 0);
4700 unlock_user(p2, arg4, 0);
4704 case TARGET_NR_unlink:
4705 if (!(p = lock_user_string(arg1)))
4707 ret = get_errno(unlink(p));
4708 unlock_user(p, arg1, 0);
4710 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat)
4711 case TARGET_NR_unlinkat:
4712 if (!(p = lock_user_string(arg2)))
4714 ret = get_errno(sys_unlinkat(arg1, p, arg3));
4715 unlock_user(p, arg2, 0);
4718 case TARGET_NR_execve:
4720 char **argp, **envp;
4723 abi_ulong guest_argp;
4724 abi_ulong guest_envp;
4730 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
4731 if (get_user_ual(addr, gp))
4739 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
4740 if (get_user_ual(addr, gp))
4747 argp = alloca((argc + 1) * sizeof(void *));
4748 envp = alloca((envc + 1) * sizeof(void *));
4750 for (gp = guest_argp, q = argp; gp;
4751 gp += sizeof(abi_ulong), q++) {
4752 if (get_user_ual(addr, gp))
4756 if (!(*q = lock_user_string(addr)))
4761 for (gp = guest_envp, q = envp; gp;
4762 gp += sizeof(abi_ulong), q++) {
4763 if (get_user_ual(addr, gp))
4767 if (!(*q = lock_user_string(addr)))
4772 if (!(p = lock_user_string(arg1)))
4774 ret = get_errno(execve(p, argp, envp));
4775 unlock_user(p, arg1, 0);
4780 ret = -TARGET_EFAULT;
4783 for (gp = guest_argp, q = argp; *q;
4784 gp += sizeof(abi_ulong), q++) {
4785 if (get_user_ual(addr, gp)
4788 unlock_user(*q, addr, 0);
4790 for (gp = guest_envp, q = envp; *q;
4791 gp += sizeof(abi_ulong), q++) {
4792 if (get_user_ual(addr, gp)
4795 unlock_user(*q, addr, 0);
4799 case TARGET_NR_chdir:
4800 if (!(p = lock_user_string(arg1)))
4802 ret = get_errno(chdir(p));
4803 unlock_user(p, arg1, 0);
4805 #ifdef TARGET_NR_time
4806 case TARGET_NR_time:
4809 ret = get_errno(time(&host_time));
4812 && put_user_sal(host_time, arg1))
4817 case TARGET_NR_mknod:
4818 if (!(p = lock_user_string(arg1)))
4820 ret = get_errno(mknod(p, arg2, arg3));
4821 unlock_user(p, arg1, 0);
4823 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat)
4824 case TARGET_NR_mknodat:
4825 if (!(p = lock_user_string(arg2)))
4827 ret = get_errno(sys_mknodat(arg1, p, arg3, arg4));
4828 unlock_user(p, arg2, 0);
4831 case TARGET_NR_chmod:
4832 if (!(p = lock_user_string(arg1)))
4834 ret = get_errno(chmod(p, arg2));
4835 unlock_user(p, arg1, 0);
4837 #ifdef TARGET_NR_break
4838 case TARGET_NR_break:
4841 #ifdef TARGET_NR_oldstat
4842 case TARGET_NR_oldstat:
4845 case TARGET_NR_lseek:
4846 ret = get_errno(lseek(arg1, arg2, arg3));
4848 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
4849 /* Alpha specific */
4850 case TARGET_NR_getxpid:
4851 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
4852 ret = get_errno(getpid());
4855 #ifdef TARGET_NR_getpid
4856 case TARGET_NR_getpid:
4857 ret = get_errno(getpid());
4860 case TARGET_NR_mount:
4862 /* need to look at the data field */
4864 p = lock_user_string(arg1);
4865 p2 = lock_user_string(arg2);
4866 p3 = lock_user_string(arg3);
4867 if (!p || !p2 || !p3)
4868 ret = -TARGET_EFAULT;
4870 /* FIXME - arg5 should be locked, but it isn't clear how to
4871 * do that since it's not guaranteed to be a NULL-terminated
4875 ret = get_errno(mount(p, p2, p3, (unsigned long)arg4, NULL));
4877 ret = get_errno(mount(p, p2, p3, (unsigned long)arg4, g2h(arg5)));
4879 unlock_user(p, arg1, 0);
4880 unlock_user(p2, arg2, 0);
4881 unlock_user(p3, arg3, 0);
4884 #ifdef TARGET_NR_umount
4885 case TARGET_NR_umount:
4886 if (!(p = lock_user_string(arg1)))
4888 ret = get_errno(umount(p));
4889 unlock_user(p, arg1, 0);
4892 #ifdef TARGET_NR_stime /* not on alpha */
4893 case TARGET_NR_stime:
4896 if (get_user_sal(host_time, arg1))
4898 ret = get_errno(stime(&host_time));
4902 case TARGET_NR_ptrace:
4904 #ifdef TARGET_NR_alarm /* not on alpha */
4905 case TARGET_NR_alarm:
4909 #ifdef TARGET_NR_oldfstat
4910 case TARGET_NR_oldfstat:
4913 #ifdef TARGET_NR_pause /* not on alpha */
4914 case TARGET_NR_pause:
4915 ret = get_errno(pause());
4918 #ifdef TARGET_NR_utime
4919 case TARGET_NR_utime:
4921 struct utimbuf tbuf, *host_tbuf;
4922 struct target_utimbuf *target_tbuf;
4924 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
4926 tbuf.actime = tswapl(target_tbuf->actime);
4927 tbuf.modtime = tswapl(target_tbuf->modtime);
4928 unlock_user_struct(target_tbuf, arg2, 0);
4933 if (!(p = lock_user_string(arg1)))
4935 ret = get_errno(utime(p, host_tbuf));
4936 unlock_user(p, arg1, 0);
4940 case TARGET_NR_utimes:
4942 struct timeval *tvp, tv[2];
4944 if (copy_from_user_timeval(&tv[0], arg2)
4945 || copy_from_user_timeval(&tv[1],
4946 arg2 + sizeof(struct target_timeval)))
4952 if (!(p = lock_user_string(arg1)))
4954 ret = get_errno(utimes(p, tvp));
4955 unlock_user(p, arg1, 0);
4958 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat)
4959 case TARGET_NR_futimesat:
4961 struct timeval *tvp, tv[2];
4963 if (copy_from_user_timeval(&tv[0], arg3)
4964 || copy_from_user_timeval(&tv[1],
4965 arg3 + sizeof(struct target_timeval)))
4971 if (!(p = lock_user_string(arg2)))
4973 ret = get_errno(sys_futimesat(arg1, path(p), tvp));
4974 unlock_user(p, arg2, 0);
4978 #ifdef TARGET_NR_stty
4979 case TARGET_NR_stty:
4982 #ifdef TARGET_NR_gtty
4983 case TARGET_NR_gtty:
4986 case TARGET_NR_access:
4987 if (!(p = lock_user_string(arg1)))
4989 ret = get_errno(access(path(p), arg2));
4990 unlock_user(p, arg1, 0);
4992 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
4993 case TARGET_NR_faccessat:
4994 if (!(p = lock_user_string(arg2)))
4996 ret = get_errno(sys_faccessat(arg1, p, arg3));
4997 unlock_user(p, arg2, 0);
5000 #ifdef TARGET_NR_nice /* not on alpha */
5001 case TARGET_NR_nice:
5002 ret = get_errno(nice(arg1));
5005 #ifdef TARGET_NR_ftime
5006 case TARGET_NR_ftime:
5009 case TARGET_NR_sync:
5013 case TARGET_NR_kill:
5014 ret = get_errno(kill(arg1, target_to_host_signal(arg2)));
5016 case TARGET_NR_rename:
5019 p = lock_user_string(arg1);
5020 p2 = lock_user_string(arg2);
5022 ret = -TARGET_EFAULT;
5024 ret = get_errno(rename(p, p2));
5025 unlock_user(p2, arg2, 0);
5026 unlock_user(p, arg1, 0);
5029 #if defined(TARGET_NR_renameat) && defined(__NR_renameat)
5030 case TARGET_NR_renameat:
5033 p = lock_user_string(arg2);
5034 p2 = lock_user_string(arg4);
5036 ret = -TARGET_EFAULT;
5038 ret = get_errno(sys_renameat(arg1, p, arg3, p2));
5039 unlock_user(p2, arg4, 0);
5040 unlock_user(p, arg2, 0);
5044 case TARGET_NR_mkdir:
5045 if (!(p = lock_user_string(arg1)))
5047 ret = get_errno(mkdir(p, arg2));
5048 unlock_user(p, arg1, 0);
5050 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat)
5051 case TARGET_NR_mkdirat:
5052 if (!(p = lock_user_string(arg2)))
5054 ret = get_errno(sys_mkdirat(arg1, p, arg3));
5055 unlock_user(p, arg2, 0);
5058 case TARGET_NR_rmdir:
5059 if (!(p = lock_user_string(arg1)))
5061 ret = get_errno(rmdir(p));
5062 unlock_user(p, arg1, 0);
5065 ret = get_errno(dup(arg1));
5067 case TARGET_NR_pipe:
5068 ret = do_pipe(cpu_env, arg1, 0, 0);
5070 #ifdef TARGET_NR_pipe2
5071 case TARGET_NR_pipe2:
5072 ret = do_pipe(cpu_env, arg1, arg2, 1);
5075 case TARGET_NR_times:
5077 struct target_tms *tmsp;
5079 ret = get_errno(times(&tms));
5081 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
5084 tmsp->tms_utime = tswapl(host_to_target_clock_t(tms.tms_utime));
5085 tmsp->tms_stime = tswapl(host_to_target_clock_t(tms.tms_stime));
5086 tmsp->tms_cutime = tswapl(host_to_target_clock_t(tms.tms_cutime));
5087 tmsp->tms_cstime = tswapl(host_to_target_clock_t(tms.tms_cstime));
5090 ret = host_to_target_clock_t(ret);
5093 #ifdef TARGET_NR_prof
5094 case TARGET_NR_prof:
5097 #ifdef TARGET_NR_signal
5098 case TARGET_NR_signal:
5101 case TARGET_NR_acct:
5103 ret = get_errno(acct(NULL));
5105 if (!(p = lock_user_string(arg1)))
5107 ret = get_errno(acct(path(p)));
5108 unlock_user(p, arg1, 0);
5111 #ifdef TARGET_NR_umount2 /* not on alpha */
5112 case TARGET_NR_umount2:
5113 if (!(p = lock_user_string(arg1)))
5115 ret = get_errno(umount2(p, arg2));
5116 unlock_user(p, arg1, 0);
5119 #ifdef TARGET_NR_lock
5120 case TARGET_NR_lock:
5123 case TARGET_NR_ioctl:
5124 ret = do_ioctl(arg1, arg2, arg3);
5126 case TARGET_NR_fcntl:
5127 ret = do_fcntl(arg1, arg2, arg3);
5129 #ifdef TARGET_NR_mpx
5133 case TARGET_NR_setpgid:
5134 ret = get_errno(setpgid(arg1, arg2));
5136 #ifdef TARGET_NR_ulimit
5137 case TARGET_NR_ulimit:
5140 #ifdef TARGET_NR_oldolduname
5141 case TARGET_NR_oldolduname:
5144 case TARGET_NR_umask:
5145 ret = get_errno(umask(arg1));
5147 case TARGET_NR_chroot:
5148 if (!(p = lock_user_string(arg1)))
5150 ret = get_errno(chroot(p));
5151 unlock_user(p, arg1, 0);
5153 case TARGET_NR_ustat:
5155 case TARGET_NR_dup2:
5156 ret = get_errno(dup2(arg1, arg2));
5158 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
5159 case TARGET_NR_dup3:
5160 ret = get_errno(dup3(arg1, arg2, arg3));
5163 #ifdef TARGET_NR_getppid /* not on alpha */
5164 case TARGET_NR_getppid:
5165 ret = get_errno(getppid());
5168 case TARGET_NR_getpgrp:
5169 ret = get_errno(getpgrp());
5171 case TARGET_NR_setsid:
5172 ret = get_errno(setsid());
5174 #ifdef TARGET_NR_sigaction
5175 case TARGET_NR_sigaction:
5177 #if defined(TARGET_ALPHA)
5178 struct target_sigaction act, oact, *pact = 0;
5179 struct target_old_sigaction *old_act;
5181 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
5183 act._sa_handler = old_act->_sa_handler;
5184 target_siginitset(&act.sa_mask, old_act->sa_mask);
5185 act.sa_flags = old_act->sa_flags;
5186 act.sa_restorer = 0;
5187 unlock_user_struct(old_act, arg2, 0);
5190 ret = get_errno(do_sigaction(arg1, pact, &oact));
5191 if (!is_error(ret) && arg3) {
5192 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
5194 old_act->_sa_handler = oact._sa_handler;
5195 old_act->sa_mask = oact.sa_mask.sig[0];
5196 old_act->sa_flags = oact.sa_flags;
5197 unlock_user_struct(old_act, arg3, 1);
5199 #elif defined(TARGET_MIPS)
5200 struct target_sigaction act, oact, *pact, *old_act;
5203 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
5205 act._sa_handler = old_act->_sa_handler;
5206 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
5207 act.sa_flags = old_act->sa_flags;
5208 unlock_user_struct(old_act, arg2, 0);
5214 ret = get_errno(do_sigaction(arg1, pact, &oact));
5216 if (!is_error(ret) && arg3) {
5217 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
5219 old_act->_sa_handler = oact._sa_handler;
5220 old_act->sa_flags = oact.sa_flags;
5221 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
5222 old_act->sa_mask.sig[1] = 0;
5223 old_act->sa_mask.sig[2] = 0;
5224 old_act->sa_mask.sig[3] = 0;
5225 unlock_user_struct(old_act, arg3, 1);
5228 struct target_old_sigaction *old_act;
5229 struct target_sigaction act, oact, *pact;
5231 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
5233 act._sa_handler = old_act->_sa_handler;
5234 target_siginitset(&act.sa_mask, old_act->sa_mask);
5235 act.sa_flags = old_act->sa_flags;
5236 act.sa_restorer = old_act->sa_restorer;
5237 unlock_user_struct(old_act, arg2, 0);
5242 ret = get_errno(do_sigaction(arg1, pact, &oact));
5243 if (!is_error(ret) && arg3) {
5244 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
5246 old_act->_sa_handler = oact._sa_handler;
5247 old_act->sa_mask = oact.sa_mask.sig[0];
5248 old_act->sa_flags = oact.sa_flags;
5249 old_act->sa_restorer = oact.sa_restorer;
5250 unlock_user_struct(old_act, arg3, 1);
5256 case TARGET_NR_rt_sigaction:
5258 #if defined(TARGET_ALPHA)
5259 struct target_sigaction act, oact, *pact = 0;
5260 struct target_rt_sigaction *rt_act;
5261 /* ??? arg4 == sizeof(sigset_t). */
5263 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
5265 act._sa_handler = rt_act->_sa_handler;
5266 act.sa_mask = rt_act->sa_mask;
5267 act.sa_flags = rt_act->sa_flags;
5268 act.sa_restorer = arg5;
5269 unlock_user_struct(rt_act, arg2, 0);
5272 ret = get_errno(do_sigaction(arg1, pact, &oact));
5273 if (!is_error(ret) && arg3) {
5274 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
5276 rt_act->_sa_handler = oact._sa_handler;
5277 rt_act->sa_mask = oact.sa_mask;
5278 rt_act->sa_flags = oact.sa_flags;
5279 unlock_user_struct(rt_act, arg3, 1);
5282 struct target_sigaction *act;
5283 struct target_sigaction *oact;
5286 if (!lock_user_struct(VERIFY_READ, act, arg2, 1))
5291 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
5292 ret = -TARGET_EFAULT;
5293 goto rt_sigaction_fail;
5297 ret = get_errno(do_sigaction(arg1, act, oact));
5300 unlock_user_struct(act, arg2, 0);
5302 unlock_user_struct(oact, arg3, 1);
5306 #ifdef TARGET_NR_sgetmask /* not on alpha */
5307 case TARGET_NR_sgetmask:
5310 abi_ulong target_set;
5311 sigprocmask(0, NULL, &cur_set);
5312 host_to_target_old_sigset(&target_set, &cur_set);
5317 #ifdef TARGET_NR_ssetmask /* not on alpha */
5318 case TARGET_NR_ssetmask:
5320 sigset_t set, oset, cur_set;
5321 abi_ulong target_set = arg1;
5322 sigprocmask(0, NULL, &cur_set);
5323 target_to_host_old_sigset(&set, &target_set);
5324 sigorset(&set, &set, &cur_set);
5325 sigprocmask(SIG_SETMASK, &set, &oset);
5326 host_to_target_old_sigset(&target_set, &oset);
5331 #ifdef TARGET_NR_sigprocmask
5332 case TARGET_NR_sigprocmask:
5334 #if defined(TARGET_ALPHA)
5335 sigset_t set, oldset;
5340 case TARGET_SIG_BLOCK:
5343 case TARGET_SIG_UNBLOCK:
5346 case TARGET_SIG_SETMASK:
5350 ret = -TARGET_EINVAL;
5354 target_to_host_old_sigset(&set, &mask);
5356 ret = get_errno(sigprocmask(how, &set, &oldset));
5358 if (!is_error(ret)) {
5359 host_to_target_old_sigset(&mask, &oldset);
5361 ((CPUAlphaState *)cpu_env)->[IR_V0] = 0; /* force no error */
5364 sigset_t set, oldset, *set_ptr;
5369 case TARGET_SIG_BLOCK:
5372 case TARGET_SIG_UNBLOCK:
5375 case TARGET_SIG_SETMASK:
5379 ret = -TARGET_EINVAL;
5382 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
5384 target_to_host_old_sigset(&set, p);
5385 unlock_user(p, arg2, 0);
5391 ret = get_errno(sigprocmask(how, set_ptr, &oldset));
5392 if (!is_error(ret) && arg3) {
5393 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
5395 host_to_target_old_sigset(p, &oldset);
5396 unlock_user(p, arg3, sizeof(target_sigset_t));
5402 case TARGET_NR_rt_sigprocmask:
5405 sigset_t set, oldset, *set_ptr;
5409 case TARGET_SIG_BLOCK:
5412 case TARGET_SIG_UNBLOCK:
5415 case TARGET_SIG_SETMASK:
5419 ret = -TARGET_EINVAL;
5422 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
5424 target_to_host_sigset(&set, p);
5425 unlock_user(p, arg2, 0);
5431 ret = get_errno(sigprocmask(how, set_ptr, &oldset));
5432 if (!is_error(ret) && arg3) {
5433 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
5435 host_to_target_sigset(p, &oldset);
5436 unlock_user(p, arg3, sizeof(target_sigset_t));
5440 #ifdef TARGET_NR_sigpending
5441 case TARGET_NR_sigpending:
5444 ret = get_errno(sigpending(&set));
5445 if (!is_error(ret)) {
5446 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
5448 host_to_target_old_sigset(p, &set);
5449 unlock_user(p, arg1, sizeof(target_sigset_t));
5454 case TARGET_NR_rt_sigpending:
5457 ret = get_errno(sigpending(&set));
5458 if (!is_error(ret)) {
5459 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
5461 host_to_target_sigset(p, &set);
5462 unlock_user(p, arg1, sizeof(target_sigset_t));
5466 #ifdef TARGET_NR_sigsuspend
5467 case TARGET_NR_sigsuspend:
5470 #if defined(TARGET_ALPHA)
5471 abi_ulong mask = arg1;
5472 target_to_host_old_sigset(&set, &mask);
5474 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
5476 target_to_host_old_sigset(&set, p);
5477 unlock_user(p, arg1, 0);
5479 ret = get_errno(sigsuspend(&set));
5483 case TARGET_NR_rt_sigsuspend:
5486 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
5488 target_to_host_sigset(&set, p);
5489 unlock_user(p, arg1, 0);
5490 ret = get_errno(sigsuspend(&set));
5493 case TARGET_NR_rt_sigtimedwait:
5496 struct timespec uts, *puts;
5499 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
5501 target_to_host_sigset(&set, p);
5502 unlock_user(p, arg1, 0);
5505 target_to_host_timespec(puts, arg3);
5509 ret = get_errno(sigtimedwait(&set, &uinfo, puts));
5510 if (!is_error(ret) && arg2) {
5511 if (!(p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t), 0)))
5513 host_to_target_siginfo(p, &uinfo);
5514 unlock_user(p, arg2, sizeof(target_siginfo_t));
5518 case TARGET_NR_rt_sigqueueinfo:
5521 if (!(p = lock_user(VERIFY_READ, arg3, sizeof(target_sigset_t), 1)))
5523 target_to_host_siginfo(&uinfo, p);
5524 unlock_user(p, arg1, 0);
5525 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
5528 #ifdef TARGET_NR_sigreturn
5529 case TARGET_NR_sigreturn:
5530 /* NOTE: ret is eax, so not transcoding must be done */
5531 ret = do_sigreturn(cpu_env);
5534 case TARGET_NR_rt_sigreturn:
5535 /* NOTE: ret is eax, so not transcoding must be done */
5536 ret = do_rt_sigreturn(cpu_env);
5538 case TARGET_NR_sethostname:
5539 if (!(p = lock_user_string(arg1)))
5541 ret = get_errno(sethostname(p, arg2));
5542 unlock_user(p, arg1, 0);
5544 case TARGET_NR_setrlimit:
5546 int resource = arg1;
5547 struct target_rlimit *target_rlim;
5549 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
5551 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
5552 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
5553 unlock_user_struct(target_rlim, arg2, 0);
5554 ret = get_errno(setrlimit(resource, &rlim));
5557 case TARGET_NR_getrlimit:
5559 int resource = arg1;
5560 struct target_rlimit *target_rlim;
5563 ret = get_errno(getrlimit(resource, &rlim));
5564 if (!is_error(ret)) {
5565 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
5567 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
5568 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
5569 unlock_user_struct(target_rlim, arg2, 1);
5573 case TARGET_NR_getrusage:
5575 struct rusage rusage;
5576 ret = get_errno(getrusage(arg1, &rusage));
5577 if (!is_error(ret)) {
5578 host_to_target_rusage(arg2, &rusage);
5582 case TARGET_NR_gettimeofday:
5585 ret = get_errno(gettimeofday(&tv, NULL));
5586 if (!is_error(ret)) {
5587 if (copy_to_user_timeval(arg1, &tv))
5592 case TARGET_NR_settimeofday:
5595 if (copy_from_user_timeval(&tv, arg1))
5597 ret = get_errno(settimeofday(&tv, NULL));
5600 #if defined(TARGET_NR_select) && !defined(TARGET_S390X) && !defined(TARGET_S390)
5601 case TARGET_NR_select:
5603 struct target_sel_arg_struct *sel;
5604 abi_ulong inp, outp, exp, tvp;
5607 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1))
5609 nsel = tswapl(sel->n);
5610 inp = tswapl(sel->inp);
5611 outp = tswapl(sel->outp);
5612 exp = tswapl(sel->exp);
5613 tvp = tswapl(sel->tvp);
5614 unlock_user_struct(sel, arg1, 0);
5615 ret = do_select(nsel, inp, outp, exp, tvp);
5619 #ifdef TARGET_NR_pselect6
5620 case TARGET_NR_pselect6:
5622 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
5623 fd_set rfds, wfds, efds;
5624 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
5625 struct timespec ts, *ts_ptr;
5628 * The 6th arg is actually two args smashed together,
5629 * so we cannot use the C library.
5637 abi_ulong arg_sigset, arg_sigsize, *arg7;
5638 target_sigset_t *target_sigset;
5646 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
5650 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
5654 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
5660 * This takes a timespec, and not a timeval, so we cannot
5661 * use the do_select() helper ...
5664 if (target_to_host_timespec(&ts, ts_addr)) {
5672 /* Extract the two packed args for the sigset */
5675 sig.size = _NSIG / 8;
5677 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
5681 arg_sigset = tswapl(arg7[0]);
5682 arg_sigsize = tswapl(arg7[1]);
5683 unlock_user(arg7, arg6, 0);
5687 target_sigset = lock_user(VERIFY_READ, arg_sigset,
5688 sizeof(*target_sigset), 1);
5689 if (!target_sigset) {
5692 target_to_host_sigset(&set, target_sigset);
5693 unlock_user(target_sigset, arg_sigset, 0);
5701 ret = get_errno(sys_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
5704 if (!is_error(ret)) {
5705 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
5707 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
5709 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
5712 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
5718 case TARGET_NR_symlink:
5721 p = lock_user_string(arg1);
5722 p2 = lock_user_string(arg2);
5724 ret = -TARGET_EFAULT;
5726 ret = get_errno(symlink(p, p2));
5727 unlock_user(p2, arg2, 0);
5728 unlock_user(p, arg1, 0);
5731 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat)
5732 case TARGET_NR_symlinkat:
5735 p = lock_user_string(arg1);
5736 p2 = lock_user_string(arg3);
5738 ret = -TARGET_EFAULT;
5740 ret = get_errno(sys_symlinkat(p, arg2, p2));
5741 unlock_user(p2, arg3, 0);
5742 unlock_user(p, arg1, 0);
5746 #ifdef TARGET_NR_oldlstat
5747 case TARGET_NR_oldlstat:
5750 case TARGET_NR_readlink:
5753 p = lock_user_string(arg1);
5754 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
5756 ret = -TARGET_EFAULT;
5758 if (strncmp((const char *)p, "/proc/self/exe", 14) == 0) {
5759 char real[PATH_MAX];
5760 temp = realpath(exec_path,real);
5761 ret = (temp==NULL) ? get_errno(-1) : strlen(real) ;
5762 snprintf((char *)p2, arg3, "%s", real);
5765 ret = get_errno(readlink(path(p), p2, arg3));
5767 unlock_user(p2, arg2, ret);
5768 unlock_user(p, arg1, 0);
5771 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat)
5772 case TARGET_NR_readlinkat:
5775 p = lock_user_string(arg2);
5776 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
5778 ret = -TARGET_EFAULT;
5780 ret = get_errno(sys_readlinkat(arg1, path(p), p2, arg4));
5781 unlock_user(p2, arg3, ret);
5782 unlock_user(p, arg2, 0);
5786 #ifdef TARGET_NR_uselib
5787 case TARGET_NR_uselib:
5790 #ifdef TARGET_NR_swapon
5791 case TARGET_NR_swapon:
5792 if (!(p = lock_user_string(arg1)))
5794 ret = get_errno(swapon(p, arg2));
5795 unlock_user(p, arg1, 0);
5798 case TARGET_NR_reboot:
5800 #ifdef TARGET_NR_readdir
5801 case TARGET_NR_readdir:
5804 #ifdef TARGET_NR_mmap
5805 case TARGET_NR_mmap:
5806 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || defined(TARGET_ARM) || \
5807 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
5808 || defined(TARGET_S390X)
5811 abi_ulong v1, v2, v3, v4, v5, v6;
5812 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
5820 unlock_user(v, arg1, 0);
5821 ret = get_errno(target_mmap(v1, v2, v3,
5822 target_to_host_bitmask(v4, mmap_flags_tbl),
5826 ret = get_errno(target_mmap(arg1, arg2, arg3,
5827 target_to_host_bitmask(arg4, mmap_flags_tbl),
5833 #ifdef TARGET_NR_mmap2
5834 case TARGET_NR_mmap2:
5836 #define MMAP_SHIFT 12
5838 ret = get_errno(target_mmap(arg1, arg2, arg3,
5839 target_to_host_bitmask(arg4, mmap_flags_tbl),
5841 arg6 << MMAP_SHIFT));
5844 case TARGET_NR_munmap:
5845 ret = get_errno(target_munmap(arg1, arg2));
5847 case TARGET_NR_mprotect:
5849 TaskState *ts = ((CPUState *)cpu_env)->opaque;
5850 /* Special hack to detect libc making the stack executable. */
5851 if ((arg3 & PROT_GROWSDOWN)
5852 && arg1 >= ts->info->stack_limit
5853 && arg1 <= ts->info->start_stack) {
5854 arg3 &= ~PROT_GROWSDOWN;
5855 arg2 = arg2 + arg1 - ts->info->stack_limit;
5856 arg1 = ts->info->stack_limit;
5859 ret = get_errno(target_mprotect(arg1, arg2, arg3));
5861 #ifdef TARGET_NR_mremap
5862 case TARGET_NR_mremap:
5863 ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
5866 /* ??? msync/mlock/munlock are broken for softmmu. */
5867 #ifdef TARGET_NR_msync
5868 case TARGET_NR_msync:
5869 ret = get_errno(msync(g2h(arg1), arg2, arg3));
5872 #ifdef TARGET_NR_mlock
5873 case TARGET_NR_mlock:
5874 ret = get_errno(mlock(g2h(arg1), arg2));
5877 #ifdef TARGET_NR_munlock
5878 case TARGET_NR_munlock:
5879 ret = get_errno(munlock(g2h(arg1), arg2));
5882 #ifdef TARGET_NR_mlockall
5883 case TARGET_NR_mlockall:
5884 ret = get_errno(mlockall(arg1));
5887 #ifdef TARGET_NR_munlockall
5888 case TARGET_NR_munlockall:
5889 ret = get_errno(munlockall());
5892 case TARGET_NR_truncate:
5893 if (!(p = lock_user_string(arg1)))
5895 ret = get_errno(truncate(p, arg2));
5896 unlock_user(p, arg1, 0);
5898 case TARGET_NR_ftruncate:
5899 ret = get_errno(ftruncate(arg1, arg2));
5901 case TARGET_NR_fchmod:
5902 ret = get_errno(fchmod(arg1, arg2));
5904 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat)
5905 case TARGET_NR_fchmodat:
5906 if (!(p = lock_user_string(arg2)))
5908 ret = get_errno(sys_fchmodat(arg1, p, arg3));
5909 unlock_user(p, arg2, 0);
5912 case TARGET_NR_getpriority:
5913 /* libc does special remapping of the return value of
5914 * sys_getpriority() so it's just easiest to call
5915 * sys_getpriority() directly rather than through libc. */
5916 ret = get_errno(sys_getpriority(arg1, arg2));
5918 case TARGET_NR_setpriority:
5919 ret = get_errno(setpriority(arg1, arg2, arg3));
5921 #ifdef TARGET_NR_profil
5922 case TARGET_NR_profil:
5925 case TARGET_NR_statfs:
5926 if (!(p = lock_user_string(arg1)))
5928 ret = get_errno(statfs(path(p), &stfs));
5929 unlock_user(p, arg1, 0);
5931 if (!is_error(ret)) {
5932 struct target_statfs *target_stfs;
5934 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
5936 __put_user(stfs.f_type, &target_stfs->f_type);
5937 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
5938 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
5939 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
5940 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
5941 __put_user(stfs.f_files, &target_stfs->f_files);
5942 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
5943 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
5944 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
5945 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
5946 unlock_user_struct(target_stfs, arg2, 1);
5949 case TARGET_NR_fstatfs:
5950 ret = get_errno(fstatfs(arg1, &stfs));
5951 goto convert_statfs;
5952 #ifdef TARGET_NR_statfs64
5953 case TARGET_NR_statfs64:
5954 if (!(p = lock_user_string(arg1)))
5956 ret = get_errno(statfs(path(p), &stfs));
5957 unlock_user(p, arg1, 0);
5959 if (!is_error(ret)) {
5960 struct target_statfs64 *target_stfs;
5962 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
5964 __put_user(stfs.f_type, &target_stfs->f_type);
5965 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
5966 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
5967 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
5968 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
5969 __put_user(stfs.f_files, &target_stfs->f_files);
5970 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
5971 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
5972 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
5973 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
5974 unlock_user_struct(target_stfs, arg3, 1);
5977 case TARGET_NR_fstatfs64:
5978 ret = get_errno(fstatfs(arg1, &stfs));
5979 goto convert_statfs64;
5981 #ifdef TARGET_NR_ioperm
5982 case TARGET_NR_ioperm:
5985 #ifdef TARGET_NR_socketcall
5986 case TARGET_NR_socketcall:
5987 ret = do_socketcall(arg1, arg2);
5990 #ifdef TARGET_NR_accept
5991 case TARGET_NR_accept:
5992 ret = do_accept(arg1, arg2, arg3);
5995 #ifdef TARGET_NR_bind
5996 case TARGET_NR_bind:
5997 ret = do_bind(arg1, arg2, arg3);
6000 #ifdef TARGET_NR_connect
6001 case TARGET_NR_connect:
6002 ret = do_connect(arg1, arg2, arg3);
6005 #ifdef TARGET_NR_getpeername
6006 case TARGET_NR_getpeername:
6007 ret = do_getpeername(arg1, arg2, arg3);
6010 #ifdef TARGET_NR_getsockname
6011 case TARGET_NR_getsockname:
6012 ret = do_getsockname(arg1, arg2, arg3);
6015 #ifdef TARGET_NR_getsockopt
6016 case TARGET_NR_getsockopt:
6017 ret = do_getsockopt(arg1, arg2, arg3, arg4, arg5);
6020 #ifdef TARGET_NR_listen
6021 case TARGET_NR_listen:
6022 ret = get_errno(listen(arg1, arg2));
6025 #ifdef TARGET_NR_recv
6026 case TARGET_NR_recv:
6027 ret = do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
6030 #ifdef TARGET_NR_recvfrom
6031 case TARGET_NR_recvfrom:
6032 ret = do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
6035 #ifdef TARGET_NR_recvmsg
6036 case TARGET_NR_recvmsg:
6037 ret = do_sendrecvmsg(arg1, arg2, arg3, 0);
6040 #ifdef TARGET_NR_send
6041 case TARGET_NR_send:
6042 ret = do_sendto(arg1, arg2, arg3, arg4, 0, 0);
6045 #ifdef TARGET_NR_sendmsg
6046 case TARGET_NR_sendmsg:
6047 ret = do_sendrecvmsg(arg1, arg2, arg3, 1);
6050 #ifdef TARGET_NR_sendto
6051 case TARGET_NR_sendto:
6052 ret = do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
6055 #ifdef TARGET_NR_shutdown
6056 case TARGET_NR_shutdown:
6057 ret = get_errno(shutdown(arg1, arg2));
6060 #ifdef TARGET_NR_socket
6061 case TARGET_NR_socket:
6062 ret = do_socket(arg1, arg2, arg3);
6065 #ifdef TARGET_NR_socketpair
6066 case TARGET_NR_socketpair:
6067 ret = do_socketpair(arg1, arg2, arg3, arg4);
6070 #ifdef TARGET_NR_setsockopt
6071 case TARGET_NR_setsockopt:
6072 ret = do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
6076 case TARGET_NR_syslog:
6077 if (!(p = lock_user_string(arg2)))
6079 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
6080 unlock_user(p, arg2, 0);
6083 case TARGET_NR_setitimer:
6085 struct itimerval value, ovalue, *pvalue;
6089 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
6090 || copy_from_user_timeval(&pvalue->it_value,
6091 arg2 + sizeof(struct target_timeval)))
6096 ret = get_errno(setitimer(arg1, pvalue, &ovalue));
6097 if (!is_error(ret) && arg3) {
6098 if (copy_to_user_timeval(arg3,
6099 &ovalue.it_interval)
6100 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
6106 case TARGET_NR_getitimer:
6108 struct itimerval value;
6110 ret = get_errno(getitimer(arg1, &value));
6111 if (!is_error(ret) && arg2) {
6112 if (copy_to_user_timeval(arg2,
6114 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
6120 case TARGET_NR_stat:
6121 if (!(p = lock_user_string(arg1)))
6123 ret = get_errno(stat(path(p), &st));
6124 unlock_user(p, arg1, 0);
6126 case TARGET_NR_lstat:
6127 if (!(p = lock_user_string(arg1)))
6129 ret = get_errno(lstat(path(p), &st));
6130 unlock_user(p, arg1, 0);
6132 case TARGET_NR_fstat:
6134 ret = get_errno(fstat(arg1, &st));
6136 if (!is_error(ret)) {
6137 struct target_stat *target_st;
6139 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
6141 memset(target_st, 0, sizeof(*target_st));
6142 __put_user(st.st_dev, &target_st->st_dev);
6143 __put_user(st.st_ino, &target_st->st_ino);
6144 __put_user(st.st_mode, &target_st->st_mode);
6145 __put_user(st.st_uid, &target_st->st_uid);
6146 __put_user(st.st_gid, &target_st->st_gid);
6147 __put_user(st.st_nlink, &target_st->st_nlink);
6148 __put_user(st.st_rdev, &target_st->st_rdev);
6149 __put_user(st.st_size, &target_st->st_size);
6150 __put_user(st.st_blksize, &target_st->st_blksize);
6151 __put_user(st.st_blocks, &target_st->st_blocks);
6152 __put_user(st.st_atime, &target_st->target_st_atime);
6153 __put_user(st.st_mtime, &target_st->target_st_mtime);
6154 __put_user(st.st_ctime, &target_st->target_st_ctime);
6155 unlock_user_struct(target_st, arg2, 1);
6159 #ifdef TARGET_NR_olduname
6160 case TARGET_NR_olduname:
6163 #ifdef TARGET_NR_iopl
6164 case TARGET_NR_iopl:
6167 case TARGET_NR_vhangup:
6168 ret = get_errno(vhangup());
6170 #ifdef TARGET_NR_idle
6171 case TARGET_NR_idle:
6174 #ifdef TARGET_NR_syscall
6175 case TARGET_NR_syscall:
6176 ret = do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
6177 arg6, arg7, arg8, 0);
6180 case TARGET_NR_wait4:
6183 abi_long status_ptr = arg2;
6184 struct rusage rusage, *rusage_ptr;
6185 abi_ulong target_rusage = arg4;
6187 rusage_ptr = &rusage;
6190 ret = get_errno(wait4(arg1, &status, arg3, rusage_ptr));
6191 if (!is_error(ret)) {
6193 status = host_to_target_waitstatus(status);
6194 if (put_user_s32(status, status_ptr))
6198 host_to_target_rusage(target_rusage, &rusage);
6202 #ifdef TARGET_NR_swapoff
6203 case TARGET_NR_swapoff:
6204 if (!(p = lock_user_string(arg1)))
6206 ret = get_errno(swapoff(p));
6207 unlock_user(p, arg1, 0);
6210 case TARGET_NR_sysinfo:
6212 struct target_sysinfo *target_value;
6213 struct sysinfo value;
6214 ret = get_errno(sysinfo(&value));
6215 if (!is_error(ret) && arg1)
6217 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
6219 __put_user(value.uptime, &target_value->uptime);
6220 __put_user(value.loads[0], &target_value->loads[0]);
6221 __put_user(value.loads[1], &target_value->loads[1]);
6222 __put_user(value.loads[2], &target_value->loads[2]);
6223 __put_user(value.totalram, &target_value->totalram);
6224 __put_user(value.freeram, &target_value->freeram);
6225 __put_user(value.sharedram, &target_value->sharedram);
6226 __put_user(value.bufferram, &target_value->bufferram);
6227 __put_user(value.totalswap, &target_value->totalswap);
6228 __put_user(value.freeswap, &target_value->freeswap);
6229 __put_user(value.procs, &target_value->procs);
6230 __put_user(value.totalhigh, &target_value->totalhigh);
6231 __put_user(value.freehigh, &target_value->freehigh);
6232 __put_user(value.mem_unit, &target_value->mem_unit);
6233 unlock_user_struct(target_value, arg1, 1);
6237 #ifdef TARGET_NR_ipc
6239 ret = do_ipc(arg1, arg2, arg3, arg4, arg5, arg6);
6242 #ifdef TARGET_NR_semget
6243 case TARGET_NR_semget:
6244 ret = get_errno(semget(arg1, arg2, arg3));
6247 #ifdef TARGET_NR_semop
6248 case TARGET_NR_semop:
6249 ret = get_errno(do_semop(arg1, arg2, arg3));
6252 #ifdef TARGET_NR_semctl
6253 case TARGET_NR_semctl:
6254 ret = do_semctl(arg1, arg2, arg3, (union target_semun)(abi_ulong)arg4);
6257 #ifdef TARGET_NR_msgctl
6258 case TARGET_NR_msgctl:
6259 ret = do_msgctl(arg1, arg2, arg3);
6262 #ifdef TARGET_NR_msgget
6263 case TARGET_NR_msgget:
6264 ret = get_errno(msgget(arg1, arg2));
6267 #ifdef TARGET_NR_msgrcv
6268 case TARGET_NR_msgrcv:
6269 ret = do_msgrcv(arg1, arg2, arg3, arg4, arg5);
6272 #ifdef TARGET_NR_msgsnd
6273 case TARGET_NR_msgsnd:
6274 ret = do_msgsnd(arg1, arg2, arg3, arg4);
6277 #ifdef TARGET_NR_shmget
6278 case TARGET_NR_shmget:
6279 ret = get_errno(shmget(arg1, arg2, arg3));
6282 #ifdef TARGET_NR_shmctl
6283 case TARGET_NR_shmctl:
6284 ret = do_shmctl(arg1, arg2, arg3);
6287 #ifdef TARGET_NR_shmat
6288 case TARGET_NR_shmat:
6289 ret = do_shmat(arg1, arg2, arg3);
6292 #ifdef TARGET_NR_shmdt
6293 case TARGET_NR_shmdt:
6294 ret = do_shmdt(arg1);
6297 case TARGET_NR_fsync:
6298 ret = get_errno(fsync(arg1));
6300 case TARGET_NR_clone:
6301 #if defined(TARGET_SH4) || defined(TARGET_ALPHA)
6302 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
6303 #elif defined(TARGET_CRIS)
6304 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg4, arg5));
6305 #elif defined(TARGET_S390X)
6306 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
6308 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
6311 #ifdef __NR_exit_group
6312 /* new thread calls */
6313 case TARGET_NR_exit_group:
6317 gdb_exit(cpu_env, arg1);
6318 ret = get_errno(exit_group(arg1));
6321 case TARGET_NR_setdomainname:
6322 if (!(p = lock_user_string(arg1)))
6324 ret = get_errno(setdomainname(p, arg2));
6325 unlock_user(p, arg1, 0);
6327 case TARGET_NR_uname:
6328 /* no need to transcode because we use the linux syscall */
6330 struct new_utsname * buf;
6332 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
6334 ret = get_errno(sys_uname(buf));
6335 if (!is_error(ret)) {
6336 /* Overrite the native machine name with whatever is being
6338 strcpy (buf->machine, cpu_to_uname_machine(cpu_env));
6339 /* Allow the user to override the reported release. */
6340 if (qemu_uname_release && *qemu_uname_release)
6341 strcpy (buf->release, qemu_uname_release);
6343 unlock_user_struct(buf, arg1, 1);
6347 case TARGET_NR_modify_ldt:
6348 ret = do_modify_ldt(cpu_env, arg1, arg2, arg3);
6350 #if !defined(TARGET_X86_64)
6351 case TARGET_NR_vm86old:
6353 case TARGET_NR_vm86:
6354 ret = do_vm86(cpu_env, arg1, arg2);
6358 case TARGET_NR_adjtimex:
6360 #ifdef TARGET_NR_create_module
6361 case TARGET_NR_create_module:
6363 case TARGET_NR_init_module:
6364 case TARGET_NR_delete_module:
6365 #ifdef TARGET_NR_get_kernel_syms
6366 case TARGET_NR_get_kernel_syms:
6369 case TARGET_NR_quotactl:
6371 case TARGET_NR_getpgid:
6372 ret = get_errno(getpgid(arg1));
6374 case TARGET_NR_fchdir:
6375 ret = get_errno(fchdir(arg1));
6377 #ifdef TARGET_NR_bdflush /* not on x86_64 */
6378 case TARGET_NR_bdflush:
6381 #ifdef TARGET_NR_sysfs
6382 case TARGET_NR_sysfs:
6385 case TARGET_NR_personality:
6386 ret = get_errno(personality(arg1));
6388 #ifdef TARGET_NR_afs_syscall
6389 case TARGET_NR_afs_syscall:
6392 #ifdef TARGET_NR__llseek /* Not on alpha */
6393 case TARGET_NR__llseek:
6396 #if !defined(__NR_llseek)
6397 res = lseek(arg1, ((uint64_t)arg2 << 32) | arg3, arg5);
6399 ret = get_errno(res);
6404 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
6406 if ((ret == 0) && put_user_s64(res, arg4)) {
6412 case TARGET_NR_getdents:
6413 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
6415 struct target_dirent *target_dirp;
6416 struct linux_dirent *dirp;
6417 abi_long count = arg3;
6419 dirp = malloc(count);
6421 ret = -TARGET_ENOMEM;
6425 ret = get_errno(sys_getdents(arg1, dirp, count));
6426 if (!is_error(ret)) {
6427 struct linux_dirent *de;
6428 struct target_dirent *tde;
6430 int reclen, treclen;
6431 int count1, tnamelen;
6435 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
6439 reclen = de->d_reclen;
6440 treclen = reclen - (2 * (sizeof(long) - sizeof(abi_long)));
6441 tde->d_reclen = tswap16(treclen);
6442 tde->d_ino = tswapl(de->d_ino);
6443 tde->d_off = tswapl(de->d_off);
6444 tnamelen = treclen - (2 * sizeof(abi_long) + 2);
6447 /* XXX: may not be correct */
6448 pstrcpy(tde->d_name, tnamelen, de->d_name);
6449 de = (struct linux_dirent *)((char *)de + reclen);
6451 tde = (struct target_dirent *)((char *)tde + treclen);
6455 unlock_user(target_dirp, arg2, ret);
6461 struct linux_dirent *dirp;
6462 abi_long count = arg3;
6464 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
6466 ret = get_errno(sys_getdents(arg1, dirp, count));
6467 if (!is_error(ret)) {
6468 struct linux_dirent *de;
6473 reclen = de->d_reclen;
6476 de->d_reclen = tswap16(reclen);
6477 tswapls(&de->d_ino);
6478 tswapls(&de->d_off);
6479 de = (struct linux_dirent *)((char *)de + reclen);
6483 unlock_user(dirp, arg2, ret);
6487 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
6488 case TARGET_NR_getdents64:
6490 struct linux_dirent64 *dirp;
6491 abi_long count = arg3;
6492 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
6494 ret = get_errno(sys_getdents64(arg1, dirp, count));
6495 if (!is_error(ret)) {
6496 struct linux_dirent64 *de;
6501 reclen = de->d_reclen;
6504 de->d_reclen = tswap16(reclen);
6505 tswap64s((uint64_t *)&de->d_ino);
6506 tswap64s((uint64_t *)&de->d_off);
6507 de = (struct linux_dirent64 *)((char *)de + reclen);
6511 unlock_user(dirp, arg2, ret);
6514 #endif /* TARGET_NR_getdents64 */
6515 #if defined(TARGET_NR__newselect) || defined(TARGET_S390X)
6517 case TARGET_NR_select:
6519 case TARGET_NR__newselect:
6521 ret = do_select(arg1, arg2, arg3, arg4, arg5);
6524 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
6525 # ifdef TARGET_NR_poll
6526 case TARGET_NR_poll:
6528 # ifdef TARGET_NR_ppoll
6529 case TARGET_NR_ppoll:
6532 struct target_pollfd *target_pfd;
6533 unsigned int nfds = arg2;
6538 target_pfd = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_pollfd) * nfds, 1);
6542 pfd = alloca(sizeof(struct pollfd) * nfds);
6543 for(i = 0; i < nfds; i++) {
6544 pfd[i].fd = tswap32(target_pfd[i].fd);
6545 pfd[i].events = tswap16(target_pfd[i].events);
6548 # ifdef TARGET_NR_ppoll
6549 if (num == TARGET_NR_ppoll) {
6550 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
6551 target_sigset_t *target_set;
6552 sigset_t _set, *set = &_set;
6555 if (target_to_host_timespec(timeout_ts, arg3)) {
6556 unlock_user(target_pfd, arg1, 0);
6564 target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
6566 unlock_user(target_pfd, arg1, 0);
6569 target_to_host_sigset(set, target_set);
6574 ret = get_errno(sys_ppoll(pfd, nfds, timeout_ts, set, _NSIG/8));
6576 if (!is_error(ret) && arg3) {
6577 host_to_target_timespec(arg3, timeout_ts);
6580 unlock_user(target_set, arg4, 0);
6584 ret = get_errno(poll(pfd, nfds, timeout));
6586 if (!is_error(ret)) {
6587 for(i = 0; i < nfds; i++) {
6588 target_pfd[i].revents = tswap16(pfd[i].revents);
6591 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
6595 case TARGET_NR_flock:
6596 /* NOTE: the flock constant seems to be the same for every
6598 ret = get_errno(flock(arg1, arg2));
6600 case TARGET_NR_readv:
6605 vec = alloca(count * sizeof(struct iovec));
6606 if (lock_iovec(VERIFY_WRITE, vec, arg2, count, 0) < 0)
6608 ret = get_errno(readv(arg1, vec, count));
6609 unlock_iovec(vec, arg2, count, 1);
6612 case TARGET_NR_writev:
6617 vec = alloca(count * sizeof(struct iovec));
6618 if (lock_iovec(VERIFY_READ, vec, arg2, count, 1) < 0)
6620 ret = get_errno(writev(arg1, vec, count));
6621 unlock_iovec(vec, arg2, count, 0);
6624 case TARGET_NR_getsid:
6625 ret = get_errno(getsid(arg1));
6627 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
6628 case TARGET_NR_fdatasync:
6629 ret = get_errno(fdatasync(arg1));
6632 case TARGET_NR__sysctl:
6633 /* We don't implement this, but ENOTDIR is always a safe
6635 ret = -TARGET_ENOTDIR;
6637 case TARGET_NR_sched_getaffinity:
6639 unsigned int mask_size;
6640 unsigned long *mask;
6643 * sched_getaffinity needs multiples of ulong, so need to take
6644 * care of mismatches between target ulong and host ulong sizes.
6646 if (arg2 & (sizeof(abi_ulong) - 1)) {
6647 ret = -TARGET_EINVAL;
6650 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
6652 mask = alloca(mask_size);
6653 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
6655 if (!is_error(ret)) {
6656 if (copy_to_user(arg3, mask, ret)) {
6662 case TARGET_NR_sched_setaffinity:
6664 unsigned int mask_size;
6665 unsigned long *mask;
6668 * sched_setaffinity needs multiples of ulong, so need to take
6669 * care of mismatches between target ulong and host ulong sizes.
6671 if (arg2 & (sizeof(abi_ulong) - 1)) {
6672 ret = -TARGET_EINVAL;
6675 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
6677 mask = alloca(mask_size);
6678 if (!lock_user_struct(VERIFY_READ, p, arg3, 1)) {
6681 memcpy(mask, p, arg2);
6682 unlock_user_struct(p, arg2, 0);
6684 ret = get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
6687 case TARGET_NR_sched_setparam:
6689 struct sched_param *target_schp;
6690 struct sched_param schp;
6692 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
6694 schp.sched_priority = tswap32(target_schp->sched_priority);
6695 unlock_user_struct(target_schp, arg2, 0);
6696 ret = get_errno(sched_setparam(arg1, &schp));
6699 case TARGET_NR_sched_getparam:
6701 struct sched_param *target_schp;
6702 struct sched_param schp;
6703 ret = get_errno(sched_getparam(arg1, &schp));
6704 if (!is_error(ret)) {
6705 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
6707 target_schp->sched_priority = tswap32(schp.sched_priority);
6708 unlock_user_struct(target_schp, arg2, 1);
6712 case TARGET_NR_sched_setscheduler:
6714 struct sched_param *target_schp;
6715 struct sched_param schp;
6716 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
6718 schp.sched_priority = tswap32(target_schp->sched_priority);
6719 unlock_user_struct(target_schp, arg3, 0);
6720 ret = get_errno(sched_setscheduler(arg1, arg2, &schp));
6723 case TARGET_NR_sched_getscheduler:
6724 ret = get_errno(sched_getscheduler(arg1));
6726 case TARGET_NR_sched_yield:
6727 ret = get_errno(sched_yield());
6729 case TARGET_NR_sched_get_priority_max:
6730 ret = get_errno(sched_get_priority_max(arg1));
6732 case TARGET_NR_sched_get_priority_min:
6733 ret = get_errno(sched_get_priority_min(arg1));
6735 case TARGET_NR_sched_rr_get_interval:
6738 ret = get_errno(sched_rr_get_interval(arg1, &ts));
6739 if (!is_error(ret)) {
6740 host_to_target_timespec(arg2, &ts);
6744 case TARGET_NR_nanosleep:
6746 struct timespec req, rem;
6747 target_to_host_timespec(&req, arg1);
6748 ret = get_errno(nanosleep(&req, &rem));
6749 if (is_error(ret) && arg2) {
6750 host_to_target_timespec(arg2, &rem);
6754 #ifdef TARGET_NR_query_module
6755 case TARGET_NR_query_module:
6758 #ifdef TARGET_NR_nfsservctl
6759 case TARGET_NR_nfsservctl:
6762 case TARGET_NR_prctl:
6765 case PR_GET_PDEATHSIG:
6768 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
6769 if (!is_error(ret) && arg2
6770 && put_user_ual(deathsig, arg2))
6775 ret = get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
6779 #ifdef TARGET_NR_arch_prctl
6780 case TARGET_NR_arch_prctl:
6781 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
6782 ret = do_arch_prctl(cpu_env, arg1, arg2);
6788 #ifdef TARGET_NR_pread
6789 case TARGET_NR_pread:
6791 if (((CPUARMState *)cpu_env)->eabi)
6794 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
6796 ret = get_errno(pread(arg1, p, arg3, arg4));
6797 unlock_user(p, arg2, ret);
6799 case TARGET_NR_pwrite:
6801 if (((CPUARMState *)cpu_env)->eabi)
6804 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
6806 ret = get_errno(pwrite(arg1, p, arg3, arg4));
6807 unlock_user(p, arg2, 0);
6810 #ifdef TARGET_NR_pread64
6811 case TARGET_NR_pread64:
6812 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
6814 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
6815 unlock_user(p, arg2, ret);
6817 case TARGET_NR_pwrite64:
6818 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
6820 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
6821 unlock_user(p, arg2, 0);
6824 case TARGET_NR_getcwd:
6825 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
6827 ret = get_errno(sys_getcwd1(p, arg2));
6828 unlock_user(p, arg1, ret);
6830 case TARGET_NR_capget:
6832 case TARGET_NR_capset:
6834 case TARGET_NR_sigaltstack:
6835 #if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_MIPS) || \
6836 defined(TARGET_SPARC) || defined(TARGET_PPC) || defined(TARGET_ALPHA) || \
6837 defined(TARGET_M68K) || defined(TARGET_S390X)
6838 ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUState *)cpu_env));
6843 case TARGET_NR_sendfile:
6845 #ifdef TARGET_NR_getpmsg
6846 case TARGET_NR_getpmsg:
6849 #ifdef TARGET_NR_putpmsg
6850 case TARGET_NR_putpmsg:
6853 #ifdef TARGET_NR_vfork
6854 case TARGET_NR_vfork:
6855 ret = get_errno(do_fork(cpu_env, CLONE_VFORK | CLONE_VM | SIGCHLD,
6859 #ifdef TARGET_NR_ugetrlimit
6860 case TARGET_NR_ugetrlimit:
6863 ret = get_errno(getrlimit(arg1, &rlim));
6864 if (!is_error(ret)) {
6865 struct target_rlimit *target_rlim;
6866 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
6868 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
6869 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
6870 unlock_user_struct(target_rlim, arg2, 1);
6875 #ifdef TARGET_NR_truncate64
6876 case TARGET_NR_truncate64:
6877 if (!(p = lock_user_string(arg1)))
6879 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
6880 unlock_user(p, arg1, 0);
6883 #ifdef TARGET_NR_ftruncate64
6884 case TARGET_NR_ftruncate64:
6885 ret = target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
6888 #ifdef TARGET_NR_stat64
6889 case TARGET_NR_stat64:
6890 if (!(p = lock_user_string(arg1)))
6892 ret = get_errno(stat(path(p), &st));
6893 unlock_user(p, arg1, 0);
6895 ret = host_to_target_stat64(cpu_env, arg2, &st);
6898 #ifdef TARGET_NR_lstat64
6899 case TARGET_NR_lstat64:
6900 if (!(p = lock_user_string(arg1)))
6902 ret = get_errno(lstat(path(p), &st));
6903 unlock_user(p, arg1, 0);
6905 ret = host_to_target_stat64(cpu_env, arg2, &st);
6908 #ifdef TARGET_NR_fstat64
6909 case TARGET_NR_fstat64:
6910 ret = get_errno(fstat(arg1, &st));
6912 ret = host_to_target_stat64(cpu_env, arg2, &st);
6915 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \
6916 (defined(__NR_fstatat64) || defined(__NR_newfstatat))
6917 #ifdef TARGET_NR_fstatat64
6918 case TARGET_NR_fstatat64:
6920 #ifdef TARGET_NR_newfstatat
6921 case TARGET_NR_newfstatat:
6923 if (!(p = lock_user_string(arg2)))
6925 #ifdef __NR_fstatat64
6926 ret = get_errno(sys_fstatat64(arg1, path(p), &st, arg4));
6928 ret = get_errno(sys_newfstatat(arg1, path(p), &st, arg4));
6931 ret = host_to_target_stat64(cpu_env, arg3, &st);
6934 case TARGET_NR_lchown:
6935 if (!(p = lock_user_string(arg1)))
6937 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
6938 unlock_user(p, arg1, 0);
6940 #ifdef TARGET_NR_getuid
6941 case TARGET_NR_getuid:
6942 ret = get_errno(high2lowuid(getuid()));
6945 #ifdef TARGET_NR_getgid
6946 case TARGET_NR_getgid:
6947 ret = get_errno(high2lowgid(getgid()));
6950 #ifdef TARGET_NR_geteuid
6951 case TARGET_NR_geteuid:
6952 ret = get_errno(high2lowuid(geteuid()));
6955 #ifdef TARGET_NR_getegid
6956 case TARGET_NR_getegid:
6957 ret = get_errno(high2lowgid(getegid()));
6960 case TARGET_NR_setreuid:
6961 ret = get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
6963 case TARGET_NR_setregid:
6964 ret = get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
6966 case TARGET_NR_getgroups:
6968 int gidsetsize = arg1;
6969 target_id *target_grouplist;
6973 grouplist = alloca(gidsetsize * sizeof(gid_t));
6974 ret = get_errno(getgroups(gidsetsize, grouplist));
6975 if (gidsetsize == 0)
6977 if (!is_error(ret)) {
6978 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 2, 0);
6979 if (!target_grouplist)
6981 for(i = 0;i < ret; i++)
6982 target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
6983 unlock_user(target_grouplist, arg2, gidsetsize * 2);
6987 case TARGET_NR_setgroups:
6989 int gidsetsize = arg1;
6990 target_id *target_grouplist;
6994 grouplist = alloca(gidsetsize * sizeof(gid_t));
6995 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 2, 1);
6996 if (!target_grouplist) {
6997 ret = -TARGET_EFAULT;
7000 for(i = 0;i < gidsetsize; i++)
7001 grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
7002 unlock_user(target_grouplist, arg2, 0);
7003 ret = get_errno(setgroups(gidsetsize, grouplist));
7006 case TARGET_NR_fchown:
7007 ret = get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
7009 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat)
7010 case TARGET_NR_fchownat:
7011 if (!(p = lock_user_string(arg2)))
7013 ret = get_errno(sys_fchownat(arg1, p, low2highuid(arg3), low2highgid(arg4), arg5));
7014 unlock_user(p, arg2, 0);
7017 #ifdef TARGET_NR_setresuid
7018 case TARGET_NR_setresuid:
7019 ret = get_errno(setresuid(low2highuid(arg1),
7021 low2highuid(arg3)));
7024 #ifdef TARGET_NR_getresuid
7025 case TARGET_NR_getresuid:
7027 uid_t ruid, euid, suid;
7028 ret = get_errno(getresuid(&ruid, &euid, &suid));
7029 if (!is_error(ret)) {
7030 if (put_user_u16(high2lowuid(ruid), arg1)
7031 || put_user_u16(high2lowuid(euid), arg2)
7032 || put_user_u16(high2lowuid(suid), arg3))
7038 #ifdef TARGET_NR_getresgid
7039 case TARGET_NR_setresgid:
7040 ret = get_errno(setresgid(low2highgid(arg1),
7042 low2highgid(arg3)));
7045 #ifdef TARGET_NR_getresgid
7046 case TARGET_NR_getresgid:
7048 gid_t rgid, egid, sgid;
7049 ret = get_errno(getresgid(&rgid, &egid, &sgid));
7050 if (!is_error(ret)) {
7051 if (put_user_u16(high2lowgid(rgid), arg1)
7052 || put_user_u16(high2lowgid(egid), arg2)
7053 || put_user_u16(high2lowgid(sgid), arg3))
7059 case TARGET_NR_chown:
7060 if (!(p = lock_user_string(arg1)))
7062 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
7063 unlock_user(p, arg1, 0);
7065 case TARGET_NR_setuid:
7066 ret = get_errno(setuid(low2highuid(arg1)));
7068 case TARGET_NR_setgid:
7069 ret = get_errno(setgid(low2highgid(arg1)));
7071 case TARGET_NR_setfsuid:
7072 ret = get_errno(setfsuid(arg1));
7074 case TARGET_NR_setfsgid:
7075 ret = get_errno(setfsgid(arg1));
7078 #ifdef TARGET_NR_lchown32
7079 case TARGET_NR_lchown32:
7080 if (!(p = lock_user_string(arg1)))
7082 ret = get_errno(lchown(p, arg2, arg3));
7083 unlock_user(p, arg1, 0);
7086 #ifdef TARGET_NR_getuid32
7087 case TARGET_NR_getuid32:
7088 ret = get_errno(getuid());
7092 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
7093 /* Alpha specific */
7094 case TARGET_NR_getxuid:
7098 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
7100 ret = get_errno(getuid());
7103 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
7104 /* Alpha specific */
7105 case TARGET_NR_getxgid:
7109 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
7111 ret = get_errno(getgid());
7114 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
7115 /* Alpha specific */
7116 case TARGET_NR_osf_getsysinfo:
7117 ret = -TARGET_EOPNOTSUPP;
7119 case TARGET_GSI_IEEE_FP_CONTROL:
7121 uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env);
7123 /* Copied from linux ieee_fpcr_to_swcr. */
7124 swcr = (fpcr >> 35) & SWCR_STATUS_MASK;
7125 swcr |= (fpcr >> 36) & SWCR_MAP_DMZ;
7126 swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV
7127 | SWCR_TRAP_ENABLE_DZE
7128 | SWCR_TRAP_ENABLE_OVF);
7129 swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF
7130 | SWCR_TRAP_ENABLE_INE);
7131 swcr |= (fpcr >> 47) & SWCR_MAP_UMZ;
7132 swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO;
7134 if (put_user_u64 (swcr, arg2))
7140 /* case GSI_IEEE_STATE_AT_SIGNAL:
7141 -- Not implemented in linux kernel.
7143 -- Retrieves current unaligned access state; not much used.
7145 -- Retrieves implver information; surely not used.
7147 -- Grabs a copy of the HWRPB; surely not used.
7152 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
7153 /* Alpha specific */
7154 case TARGET_NR_osf_setsysinfo:
7155 ret = -TARGET_EOPNOTSUPP;
7157 case TARGET_SSI_IEEE_FP_CONTROL:
7158 case TARGET_SSI_IEEE_RAISE_EXCEPTION:
7160 uint64_t swcr, fpcr, orig_fpcr;
7162 if (get_user_u64 (swcr, arg2))
7164 orig_fpcr = cpu_alpha_load_fpcr (cpu_env);
7165 fpcr = orig_fpcr & FPCR_DYN_MASK;
7167 /* Copied from linux ieee_swcr_to_fpcr. */
7168 fpcr |= (swcr & SWCR_STATUS_MASK) << 35;
7169 fpcr |= (swcr & SWCR_MAP_DMZ) << 36;
7170 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV
7171 | SWCR_TRAP_ENABLE_DZE
7172 | SWCR_TRAP_ENABLE_OVF)) << 48;
7173 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF
7174 | SWCR_TRAP_ENABLE_INE)) << 57;
7175 fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0);
7176 fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41;
7178 cpu_alpha_store_fpcr (cpu_env, fpcr);
7181 if (arg1 == TARGET_SSI_IEEE_RAISE_EXCEPTION) {
7182 /* Old exceptions are not signaled. */
7183 fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK);
7185 /* If any exceptions set by this call, and are unmasked,
7192 /* case SSI_NVPAIRS:
7193 -- Used with SSIN_UACPROC to enable unaligned accesses.
7194 case SSI_IEEE_STATE_AT_SIGNAL:
7195 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
7196 -- Not implemented in linux kernel
7201 #ifdef TARGET_NR_osf_sigprocmask
7202 /* Alpha specific. */
7203 case TARGET_NR_osf_sigprocmask:
7207 sigset_t set, oldset;
7210 case TARGET_SIG_BLOCK:
7213 case TARGET_SIG_UNBLOCK:
7216 case TARGET_SIG_SETMASK:
7220 ret = -TARGET_EINVAL;
7224 target_to_host_old_sigset(&set, &mask);
7225 sigprocmask(how, &set, &oldset);
7226 host_to_target_old_sigset(&mask, &oldset);
7232 #ifdef TARGET_NR_getgid32
7233 case TARGET_NR_getgid32:
7234 ret = get_errno(getgid());
7237 #ifdef TARGET_NR_geteuid32
7238 case TARGET_NR_geteuid32:
7239 ret = get_errno(geteuid());
7242 #ifdef TARGET_NR_getegid32
7243 case TARGET_NR_getegid32:
7244 ret = get_errno(getegid());
7247 #ifdef TARGET_NR_setreuid32
7248 case TARGET_NR_setreuid32:
7249 ret = get_errno(setreuid(arg1, arg2));
7252 #ifdef TARGET_NR_setregid32
7253 case TARGET_NR_setregid32:
7254 ret = get_errno(setregid(arg1, arg2));
7257 #ifdef TARGET_NR_getgroups32
7258 case TARGET_NR_getgroups32:
7260 int gidsetsize = arg1;
7261 uint32_t *target_grouplist;
7265 grouplist = alloca(gidsetsize * sizeof(gid_t));
7266 ret = get_errno(getgroups(gidsetsize, grouplist));
7267 if (gidsetsize == 0)
7269 if (!is_error(ret)) {
7270 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
7271 if (!target_grouplist) {
7272 ret = -TARGET_EFAULT;
7275 for(i = 0;i < ret; i++)
7276 target_grouplist[i] = tswap32(grouplist[i]);
7277 unlock_user(target_grouplist, arg2, gidsetsize * 4);
7282 #ifdef TARGET_NR_setgroups32
7283 case TARGET_NR_setgroups32:
7285 int gidsetsize = arg1;
7286 uint32_t *target_grouplist;
7290 grouplist = alloca(gidsetsize * sizeof(gid_t));
7291 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
7292 if (!target_grouplist) {
7293 ret = -TARGET_EFAULT;
7296 for(i = 0;i < gidsetsize; i++)
7297 grouplist[i] = tswap32(target_grouplist[i]);
7298 unlock_user(target_grouplist, arg2, 0);
7299 ret = get_errno(setgroups(gidsetsize, grouplist));
7303 #ifdef TARGET_NR_fchown32
7304 case TARGET_NR_fchown32:
7305 ret = get_errno(fchown(arg1, arg2, arg3));
7308 #ifdef TARGET_NR_setresuid32
7309 case TARGET_NR_setresuid32:
7310 ret = get_errno(setresuid(arg1, arg2, arg3));
7313 #ifdef TARGET_NR_getresuid32
7314 case TARGET_NR_getresuid32:
7316 uid_t ruid, euid, suid;
7317 ret = get_errno(getresuid(&ruid, &euid, &suid));
7318 if (!is_error(ret)) {
7319 if (put_user_u32(ruid, arg1)
7320 || put_user_u32(euid, arg2)
7321 || put_user_u32(suid, arg3))
7327 #ifdef TARGET_NR_setresgid32
7328 case TARGET_NR_setresgid32:
7329 ret = get_errno(setresgid(arg1, arg2, arg3));
7332 #ifdef TARGET_NR_getresgid32
7333 case TARGET_NR_getresgid32:
7335 gid_t rgid, egid, sgid;
7336 ret = get_errno(getresgid(&rgid, &egid, &sgid));
7337 if (!is_error(ret)) {
7338 if (put_user_u32(rgid, arg1)
7339 || put_user_u32(egid, arg2)
7340 || put_user_u32(sgid, arg3))
7346 #ifdef TARGET_NR_chown32
7347 case TARGET_NR_chown32:
7348 if (!(p = lock_user_string(arg1)))
7350 ret = get_errno(chown(p, arg2, arg3));
7351 unlock_user(p, arg1, 0);
7354 #ifdef TARGET_NR_setuid32
7355 case TARGET_NR_setuid32:
7356 ret = get_errno(setuid(arg1));
7359 #ifdef TARGET_NR_setgid32
7360 case TARGET_NR_setgid32:
7361 ret = get_errno(setgid(arg1));
7364 #ifdef TARGET_NR_setfsuid32
7365 case TARGET_NR_setfsuid32:
7366 ret = get_errno(setfsuid(arg1));
7369 #ifdef TARGET_NR_setfsgid32
7370 case TARGET_NR_setfsgid32:
7371 ret = get_errno(setfsgid(arg1));
7375 case TARGET_NR_pivot_root:
7377 #ifdef TARGET_NR_mincore
7378 case TARGET_NR_mincore:
7381 ret = -TARGET_EFAULT;
7382 if (!(a = lock_user(VERIFY_READ, arg1,arg2, 0)))
7384 if (!(p = lock_user_string(arg3)))
7386 ret = get_errno(mincore(a, arg2, p));
7387 unlock_user(p, arg3, ret);
7389 unlock_user(a, arg1, 0);
7393 #ifdef TARGET_NR_arm_fadvise64_64
7394 case TARGET_NR_arm_fadvise64_64:
7397 * arm_fadvise64_64 looks like fadvise64_64 but
7398 * with different argument order
7406 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_arm_fadvise64_64) || defined(TARGET_NR_fadvise64)
7407 #ifdef TARGET_NR_fadvise64_64
7408 case TARGET_NR_fadvise64_64:
7410 #ifdef TARGET_NR_fadvise64
7411 case TARGET_NR_fadvise64:
7415 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
7416 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
7417 case 6: arg4 = POSIX_FADV_DONTNEED; break;
7418 case 7: arg4 = POSIX_FADV_NOREUSE; break;
7422 ret = -posix_fadvise(arg1, arg2, arg3, arg4);
7425 #ifdef TARGET_NR_madvise
7426 case TARGET_NR_madvise:
7427 /* A straight passthrough may not be safe because qemu sometimes
7428 turns private flie-backed mappings into anonymous mappings.
7429 This will break MADV_DONTNEED.
7430 This is a hint, so ignoring and returning success is ok. */
7434 #if TARGET_ABI_BITS == 32
7435 case TARGET_NR_fcntl64:
7439 struct target_flock64 *target_fl;
7441 struct target_eabi_flock64 *target_efl;
7444 cmd = target_to_host_fcntl_cmd(arg2);
7445 if (cmd == -TARGET_EINVAL)
7449 case TARGET_F_GETLK64:
7451 if (((CPUARMState *)cpu_env)->eabi) {
7452 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
7454 fl.l_type = tswap16(target_efl->l_type);
7455 fl.l_whence = tswap16(target_efl->l_whence);
7456 fl.l_start = tswap64(target_efl->l_start);
7457 fl.l_len = tswap64(target_efl->l_len);
7458 fl.l_pid = tswap32(target_efl->l_pid);
7459 unlock_user_struct(target_efl, arg3, 0);
7463 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
7465 fl.l_type = tswap16(target_fl->l_type);
7466 fl.l_whence = tswap16(target_fl->l_whence);
7467 fl.l_start = tswap64(target_fl->l_start);
7468 fl.l_len = tswap64(target_fl->l_len);
7469 fl.l_pid = tswap32(target_fl->l_pid);
7470 unlock_user_struct(target_fl, arg3, 0);
7472 ret = get_errno(fcntl(arg1, cmd, &fl));
7475 if (((CPUARMState *)cpu_env)->eabi) {
7476 if (!lock_user_struct(VERIFY_WRITE, target_efl, arg3, 0))
7478 target_efl->l_type = tswap16(fl.l_type);
7479 target_efl->l_whence = tswap16(fl.l_whence);
7480 target_efl->l_start = tswap64(fl.l_start);
7481 target_efl->l_len = tswap64(fl.l_len);
7482 target_efl->l_pid = tswap32(fl.l_pid);
7483 unlock_user_struct(target_efl, arg3, 1);
7487 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg3, 0))
7489 target_fl->l_type = tswap16(fl.l_type);
7490 target_fl->l_whence = tswap16(fl.l_whence);
7491 target_fl->l_start = tswap64(fl.l_start);
7492 target_fl->l_len = tswap64(fl.l_len);
7493 target_fl->l_pid = tswap32(fl.l_pid);
7494 unlock_user_struct(target_fl, arg3, 1);
7499 case TARGET_F_SETLK64:
7500 case TARGET_F_SETLKW64:
7502 if (((CPUARMState *)cpu_env)->eabi) {
7503 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
7505 fl.l_type = tswap16(target_efl->l_type);
7506 fl.l_whence = tswap16(target_efl->l_whence);
7507 fl.l_start = tswap64(target_efl->l_start);
7508 fl.l_len = tswap64(target_efl->l_len);
7509 fl.l_pid = tswap32(target_efl->l_pid);
7510 unlock_user_struct(target_efl, arg3, 0);
7514 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
7516 fl.l_type = tswap16(target_fl->l_type);
7517 fl.l_whence = tswap16(target_fl->l_whence);
7518 fl.l_start = tswap64(target_fl->l_start);
7519 fl.l_len = tswap64(target_fl->l_len);
7520 fl.l_pid = tswap32(target_fl->l_pid);
7521 unlock_user_struct(target_fl, arg3, 0);
7523 ret = get_errno(fcntl(arg1, cmd, &fl));
7526 ret = do_fcntl(arg1, arg2, arg3);
7532 #ifdef TARGET_NR_cacheflush
7533 case TARGET_NR_cacheflush:
7534 /* self-modifying code is handled automatically, so nothing needed */
7538 #ifdef TARGET_NR_security
7539 case TARGET_NR_security:
7542 #ifdef TARGET_NR_getpagesize
7543 case TARGET_NR_getpagesize:
7544 ret = TARGET_PAGE_SIZE;
7547 case TARGET_NR_gettid:
7548 ret = get_errno(gettid());
7550 #ifdef TARGET_NR_readahead
7551 case TARGET_NR_readahead:
7552 #if TARGET_ABI_BITS == 32
7554 if (((CPUARMState *)cpu_env)->eabi)
7561 ret = get_errno(readahead(arg1, ((off64_t)arg3 << 32) | arg2, arg4));
7563 ret = get_errno(readahead(arg1, arg2, arg3));
7567 #ifdef TARGET_NR_setxattr
7568 case TARGET_NR_setxattr:
7569 case TARGET_NR_lsetxattr:
7570 case TARGET_NR_fsetxattr:
7571 case TARGET_NR_getxattr:
7572 case TARGET_NR_lgetxattr:
7573 case TARGET_NR_fgetxattr:
7574 case TARGET_NR_listxattr:
7575 case TARGET_NR_llistxattr:
7576 case TARGET_NR_flistxattr:
7577 case TARGET_NR_removexattr:
7578 case TARGET_NR_lremovexattr:
7579 case TARGET_NR_fremovexattr:
7580 ret = -TARGET_EOPNOTSUPP;
7583 #ifdef TARGET_NR_set_thread_area
7584 case TARGET_NR_set_thread_area:
7585 #if defined(TARGET_MIPS)
7586 ((CPUMIPSState *) cpu_env)->tls_value = arg1;
7589 #elif defined(TARGET_CRIS)
7591 ret = -TARGET_EINVAL;
7593 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
7597 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
7598 ret = do_set_thread_area(cpu_env, arg1);
7601 goto unimplemented_nowarn;
7604 #ifdef TARGET_NR_get_thread_area
7605 case TARGET_NR_get_thread_area:
7606 #if defined(TARGET_I386) && defined(TARGET_ABI32)
7607 ret = do_get_thread_area(cpu_env, arg1);
7609 goto unimplemented_nowarn;
7612 #ifdef TARGET_NR_getdomainname
7613 case TARGET_NR_getdomainname:
7614 goto unimplemented_nowarn;
7617 #ifdef TARGET_NR_clock_gettime
7618 case TARGET_NR_clock_gettime:
7621 ret = get_errno(clock_gettime(arg1, &ts));
7622 if (!is_error(ret)) {
7623 host_to_target_timespec(arg2, &ts);
7628 #ifdef TARGET_NR_clock_getres
7629 case TARGET_NR_clock_getres:
7632 ret = get_errno(clock_getres(arg1, &ts));
7633 if (!is_error(ret)) {
7634 host_to_target_timespec(arg2, &ts);
7639 #ifdef TARGET_NR_clock_nanosleep
7640 case TARGET_NR_clock_nanosleep:
7643 target_to_host_timespec(&ts, arg3);
7644 ret = get_errno(clock_nanosleep(arg1, arg2, &ts, arg4 ? &ts : NULL));
7646 host_to_target_timespec(arg4, &ts);
7651 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
7652 case TARGET_NR_set_tid_address:
7653 ret = get_errno(set_tid_address((int *)g2h(arg1)));
7657 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
7658 case TARGET_NR_tkill:
7659 ret = get_errno(sys_tkill((int)arg1, target_to_host_signal(arg2)));
7663 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
7664 case TARGET_NR_tgkill:
7665 ret = get_errno(sys_tgkill((int)arg1, (int)arg2,
7666 target_to_host_signal(arg3)));
7670 #ifdef TARGET_NR_set_robust_list
7671 case TARGET_NR_set_robust_list:
7672 goto unimplemented_nowarn;
7675 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat)
7676 case TARGET_NR_utimensat:
7678 struct timespec *tsp, ts[2];
7682 target_to_host_timespec(ts, arg3);
7683 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
7687 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
7689 if (!(p = lock_user_string(arg2))) {
7690 ret = -TARGET_EFAULT;
7693 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
7694 unlock_user(p, arg2, 0);
7699 #if defined(CONFIG_USE_NPTL)
7700 case TARGET_NR_futex:
7701 ret = do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
7704 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
7705 case TARGET_NR_inotify_init:
7706 ret = get_errno(sys_inotify_init());
7709 #ifdef CONFIG_INOTIFY1
7710 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
7711 case TARGET_NR_inotify_init1:
7712 ret = get_errno(sys_inotify_init1(arg1));
7716 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
7717 case TARGET_NR_inotify_add_watch:
7718 p = lock_user_string(arg2);
7719 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
7720 unlock_user(p, arg2, 0);
7723 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
7724 case TARGET_NR_inotify_rm_watch:
7725 ret = get_errno(sys_inotify_rm_watch(arg1, arg2));
7729 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
7730 case TARGET_NR_mq_open:
7732 struct mq_attr posix_mq_attr;
7734 p = lock_user_string(arg1 - 1);
7736 copy_from_user_mq_attr (&posix_mq_attr, arg4);
7737 ret = get_errno(mq_open(p, arg2, arg3, &posix_mq_attr));
7738 unlock_user (p, arg1, 0);
7742 case TARGET_NR_mq_unlink:
7743 p = lock_user_string(arg1 - 1);
7744 ret = get_errno(mq_unlink(p));
7745 unlock_user (p, arg1, 0);
7748 case TARGET_NR_mq_timedsend:
7752 p = lock_user (VERIFY_READ, arg2, arg3, 1);
7754 target_to_host_timespec(&ts, arg5);
7755 ret = get_errno(mq_timedsend(arg1, p, arg3, arg4, &ts));
7756 host_to_target_timespec(arg5, &ts);
7759 ret = get_errno(mq_send(arg1, p, arg3, arg4));
7760 unlock_user (p, arg2, arg3);
7764 case TARGET_NR_mq_timedreceive:
7769 p = lock_user (VERIFY_READ, arg2, arg3, 1);
7771 target_to_host_timespec(&ts, arg5);
7772 ret = get_errno(mq_timedreceive(arg1, p, arg3, &prio, &ts));
7773 host_to_target_timespec(arg5, &ts);
7776 ret = get_errno(mq_receive(arg1, p, arg3, &prio));
7777 unlock_user (p, arg2, arg3);
7779 put_user_u32(prio, arg4);
7783 /* Not implemented for now... */
7784 /* case TARGET_NR_mq_notify: */
7787 case TARGET_NR_mq_getsetattr:
7789 struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
7792 ret = mq_getattr(arg1, &posix_mq_attr_out);
7793 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
7796 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
7797 ret |= mq_setattr(arg1, &posix_mq_attr_in, &posix_mq_attr_out);
7804 #ifdef CONFIG_SPLICE
7805 #ifdef TARGET_NR_tee
7808 ret = get_errno(tee(arg1,arg2,arg3,arg4));
7812 #ifdef TARGET_NR_splice
7813 case TARGET_NR_splice:
7815 loff_t loff_in, loff_out;
7816 loff_t *ploff_in = NULL, *ploff_out = NULL;
7818 get_user_u64(loff_in, arg2);
7819 ploff_in = &loff_in;
7822 get_user_u64(loff_out, arg2);
7823 ploff_out = &loff_out;
7825 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
7829 #ifdef TARGET_NR_vmsplice
7830 case TARGET_NR_vmsplice:
7835 vec = alloca(count * sizeof(struct iovec));
7836 if (lock_iovec(VERIFY_READ, vec, arg2, count, 1) < 0)
7838 ret = get_errno(vmsplice(arg1, vec, count, arg4));
7839 unlock_iovec(vec, arg2, count, 0);
7843 #endif /* CONFIG_SPLICE */
7844 #ifdef CONFIG_EVENTFD
7845 #if defined(TARGET_NR_eventfd)
7846 case TARGET_NR_eventfd:
7847 ret = get_errno(eventfd(arg1, 0));
7850 #if defined(TARGET_NR_eventfd2)
7851 case TARGET_NR_eventfd2:
7852 ret = get_errno(eventfd(arg1, arg2));
7855 #endif /* CONFIG_EVENTFD */
7856 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
7857 case TARGET_NR_fallocate:
7858 ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
7861 #if defined(CONFIG_SYNC_FILE_RANGE)
7862 #if defined(TARGET_NR_sync_file_range)
7863 case TARGET_NR_sync_file_range:
7864 #if TARGET_ABI_BITS == 32
7865 #if defined(TARGET_MIPS)
7866 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
7867 target_offset64(arg5, arg6), arg7));
7869 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
7870 target_offset64(arg4, arg5), arg6));
7871 #endif /* !TARGET_MIPS */
7873 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
7877 #if defined(TARGET_NR_sync_file_range2)
7878 case TARGET_NR_sync_file_range2:
7879 /* This is like sync_file_range but the arguments are reordered */
7880 #if TARGET_ABI_BITS == 32
7881 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
7882 target_offset64(arg5, arg6), arg2));
7884 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
7889 #if defined(CONFIG_EPOLL)
7890 #if defined(TARGET_NR_epoll_create)
7891 case TARGET_NR_epoll_create:
7892 ret = get_errno(epoll_create(arg1));
7895 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
7896 case TARGET_NR_epoll_create1:
7897 ret = get_errno(epoll_create1(arg1));
7900 #if defined(TARGET_NR_epoll_ctl)
7901 case TARGET_NR_epoll_ctl:
7903 struct epoll_event ep;
7904 struct epoll_event *epp = 0;
7906 struct target_epoll_event *target_ep;
7907 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
7910 ep.events = tswap32(target_ep->events);
7911 /* The epoll_data_t union is just opaque data to the kernel,
7912 * so we transfer all 64 bits across and need not worry what
7913 * actual data type it is.
7915 ep.data.u64 = tswap64(target_ep->data.u64);
7916 unlock_user_struct(target_ep, arg4, 0);
7919 ret = get_errno(epoll_ctl(arg1, arg2, arg3, epp));
7924 #if defined(TARGET_NR_epoll_pwait) && defined(CONFIG_EPOLL_PWAIT)
7925 #define IMPLEMENT_EPOLL_PWAIT
7927 #if defined(TARGET_NR_epoll_wait) || defined(IMPLEMENT_EPOLL_PWAIT)
7928 #if defined(TARGET_NR_epoll_wait)
7929 case TARGET_NR_epoll_wait:
7931 #if defined(IMPLEMENT_EPOLL_PWAIT)
7932 case TARGET_NR_epoll_pwait:
7935 struct target_epoll_event *target_ep;
7936 struct epoll_event *ep;
7938 int maxevents = arg3;
7941 target_ep = lock_user(VERIFY_WRITE, arg2,
7942 maxevents * sizeof(struct target_epoll_event), 1);
7947 ep = alloca(maxevents * sizeof(struct epoll_event));
7950 #if defined(IMPLEMENT_EPOLL_PWAIT)
7951 case TARGET_NR_epoll_pwait:
7953 target_sigset_t *target_set;
7954 sigset_t _set, *set = &_set;
7957 target_set = lock_user(VERIFY_READ, arg5,
7958 sizeof(target_sigset_t), 1);
7960 unlock_user(target_ep, arg2, 0);
7963 target_to_host_sigset(set, target_set);
7964 unlock_user(target_set, arg5, 0);
7969 ret = get_errno(epoll_pwait(epfd, ep, maxevents, timeout, set));
7973 #if defined(TARGET_NR_epoll_wait)
7974 case TARGET_NR_epoll_wait:
7975 ret = get_errno(epoll_wait(epfd, ep, maxevents, timeout));
7979 ret = -TARGET_ENOSYS;
7981 if (!is_error(ret)) {
7983 for (i = 0; i < ret; i++) {
7984 target_ep[i].events = tswap32(ep[i].events);
7985 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
7988 unlock_user(target_ep, arg2, ret * sizeof(struct target_epoll_event));
7995 gemu_log("qemu: Unsupported syscall: %d\n", num);
7996 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
7997 unimplemented_nowarn:
7999 ret = -TARGET_ENOSYS;
8004 gemu_log(" = " TARGET_ABI_FMT_ld "\n", ret);
8007 print_syscall_ret(num, ret);
8010 ret = -TARGET_EFAULT;