4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
31 #include <sys/types.h>
37 #include <sys/mount.h>
38 #include <sys/prctl.h>
39 #include <sys/resource.h>
45 int __clone2(int (*fn)(void *), void *child_stack_base,
46 size_t stack_size, int flags, void *arg, ...);
48 #include <sys/socket.h>
52 #include <sys/times.h>
55 #include <sys/statfs.h>
57 #include <sys/sysinfo.h>
58 #include <sys/utsname.h>
59 //#include <sys/user.h>
60 #include <netinet/ip.h>
61 #include <netinet/tcp.h>
62 #include <qemu-common.h>
67 #include <sys/eventfd.h>
70 #define termios host_termios
71 #define winsize host_winsize
72 #define termio host_termio
73 #define sgttyb host_sgttyb /* same as target */
74 #define tchars host_tchars /* same as target */
75 #define ltchars host_ltchars /* same as target */
77 #include <linux/termios.h>
78 #include <linux/unistd.h>
79 #include <linux/utsname.h>
80 #include <linux/cdrom.h>
81 #include <linux/hdreg.h>
82 #include <linux/soundcard.h>
84 #include <linux/mtio.h>
88 #include "linux_loop.h"
89 #include "cpu-uname.h"
92 #include "qemu-common.h"
94 #if defined(CONFIG_USE_NPTL)
95 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
96 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
98 /* XXX: Hardcode the above values. */
99 #define CLONE_NPTL_FLAGS2 0
104 //#include <linux/msdos_fs.h>
105 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
106 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
117 #define _syscall0(type,name) \
118 static type name (void) \
120 return syscall(__NR_##name); \
123 #define _syscall1(type,name,type1,arg1) \
124 static type name (type1 arg1) \
126 return syscall(__NR_##name, arg1); \
129 #define _syscall2(type,name,type1,arg1,type2,arg2) \
130 static type name (type1 arg1,type2 arg2) \
132 return syscall(__NR_##name, arg1, arg2); \
135 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
136 static type name (type1 arg1,type2 arg2,type3 arg3) \
138 return syscall(__NR_##name, arg1, arg2, arg3); \
141 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
142 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
144 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
147 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
149 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
151 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
155 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
156 type5,arg5,type6,arg6) \
157 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
160 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
164 #define __NR_sys_uname __NR_uname
165 #define __NR_sys_faccessat __NR_faccessat
166 #define __NR_sys_fchmodat __NR_fchmodat
167 #define __NR_sys_fchownat __NR_fchownat
168 #define __NR_sys_fstatat64 __NR_fstatat64
169 #define __NR_sys_futimesat __NR_futimesat
170 #define __NR_sys_getcwd1 __NR_getcwd
171 #define __NR_sys_getdents __NR_getdents
172 #define __NR_sys_getdents64 __NR_getdents64
173 #define __NR_sys_getpriority __NR_getpriority
174 #define __NR_sys_linkat __NR_linkat
175 #define __NR_sys_mkdirat __NR_mkdirat
176 #define __NR_sys_mknodat __NR_mknodat
177 #define __NR_sys_newfstatat __NR_newfstatat
178 #define __NR_sys_openat __NR_openat
179 #define __NR_sys_readlinkat __NR_readlinkat
180 #define __NR_sys_renameat __NR_renameat
181 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
182 #define __NR_sys_symlinkat __NR_symlinkat
183 #define __NR_sys_syslog __NR_syslog
184 #define __NR_sys_tgkill __NR_tgkill
185 #define __NR_sys_tkill __NR_tkill
186 #define __NR_sys_unlinkat __NR_unlinkat
187 #define __NR_sys_utimensat __NR_utimensat
188 #define __NR_sys_futex __NR_futex
189 #define __NR_sys_inotify_init __NR_inotify_init
190 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
191 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
193 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__)
194 #define __NR__llseek __NR_lseek
198 _syscall0(int, gettid)
200 /* This is a replacement for the host gettid() and must return a host
202 static int gettid(void) {
206 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
207 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
208 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
210 _syscall2(int, sys_getpriority, int, which, int, who);
211 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
212 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
213 loff_t *, res, uint, wh);
215 _syscall3(int,sys_rt_sigqueueinfo,int,pid,int,sig,siginfo_t *,uinfo)
216 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
217 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
218 _syscall3(int,sys_tgkill,int,tgid,int,pid,int,sig)
220 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
221 _syscall2(int,sys_tkill,int,tid,int,sig)
223 #ifdef __NR_exit_group
224 _syscall1(int,exit_group,int,error_code)
226 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
227 _syscall1(int,set_tid_address,int *,tidptr)
229 #if defined(CONFIG_USE_NPTL)
230 #if defined(TARGET_NR_futex) && defined(__NR_futex)
231 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
232 const struct timespec *,timeout,int *,uaddr2,int,val3)
236 static bitmask_transtbl fcntl_flags_tbl[] = {
237 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
238 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, },
239 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, },
240 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, },
241 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, },
242 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, },
243 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, },
244 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, },
245 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, },
246 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, },
247 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
248 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, },
249 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
250 #if defined(O_DIRECT)
251 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, },
256 #define COPY_UTSNAME_FIELD(dest, src) \
258 /* __NEW_UTS_LEN doesn't include terminating null */ \
259 (void) strncpy((dest), (src), __NEW_UTS_LEN); \
260 (dest)[__NEW_UTS_LEN] = '\0'; \
263 static int sys_uname(struct new_utsname *buf)
265 struct utsname uts_buf;
267 if (uname(&uts_buf) < 0)
271 * Just in case these have some differences, we
272 * translate utsname to new_utsname (which is the
273 * struct linux kernel uses).
276 bzero(buf, sizeof (*buf));
277 COPY_UTSNAME_FIELD(buf->sysname, uts_buf.sysname);
278 COPY_UTSNAME_FIELD(buf->nodename, uts_buf.nodename);
279 COPY_UTSNAME_FIELD(buf->release, uts_buf.release);
280 COPY_UTSNAME_FIELD(buf->version, uts_buf.version);
281 COPY_UTSNAME_FIELD(buf->machine, uts_buf.machine);
283 COPY_UTSNAME_FIELD(buf->domainname, uts_buf.domainname);
287 #undef COPY_UTSNAME_FIELD
290 static int sys_getcwd1(char *buf, size_t size)
292 if (getcwd(buf, size) == NULL) {
293 /* getcwd() sets errno */
296 return strlen(buf)+1;
301 * Host system seems to have atfile syscall stubs available. We
302 * now enable them one by one as specified by target syscall_nr.h.
305 #ifdef TARGET_NR_faccessat
306 static int sys_faccessat(int dirfd, const char *pathname, int mode)
308 return (faccessat(dirfd, pathname, mode, 0));
311 #ifdef TARGET_NR_fchmodat
312 static int sys_fchmodat(int dirfd, const char *pathname, mode_t mode)
314 return (fchmodat(dirfd, pathname, mode, 0));
317 #if defined(TARGET_NR_fchownat) && defined(USE_UID16)
318 static int sys_fchownat(int dirfd, const char *pathname, uid_t owner,
319 gid_t group, int flags)
321 return (fchownat(dirfd, pathname, owner, group, flags));
324 #ifdef __NR_fstatat64
325 static int sys_fstatat64(int dirfd, const char *pathname, struct stat *buf,
328 return (fstatat(dirfd, pathname, buf, flags));
331 #ifdef __NR_newfstatat
332 static int sys_newfstatat(int dirfd, const char *pathname, struct stat *buf,
335 return (fstatat(dirfd, pathname, buf, flags));
338 #ifdef TARGET_NR_futimesat
339 static int sys_futimesat(int dirfd, const char *pathname,
340 const struct timeval times[2])
342 return (futimesat(dirfd, pathname, times));
345 #ifdef TARGET_NR_linkat
346 static int sys_linkat(int olddirfd, const char *oldpath,
347 int newdirfd, const char *newpath, int flags)
349 return (linkat(olddirfd, oldpath, newdirfd, newpath, flags));
352 #ifdef TARGET_NR_mkdirat
353 static int sys_mkdirat(int dirfd, const char *pathname, mode_t mode)
355 return (mkdirat(dirfd, pathname, mode));
358 #ifdef TARGET_NR_mknodat
359 static int sys_mknodat(int dirfd, const char *pathname, mode_t mode,
362 return (mknodat(dirfd, pathname, mode, dev));
365 #ifdef TARGET_NR_openat
366 static int sys_openat(int dirfd, const char *pathname, int flags, ...)
369 * open(2) has extra parameter 'mode' when called with
372 if ((flags & O_CREAT) != 0) {
377 * Get the 'mode' parameter and translate it to
381 mode = va_arg(ap, mode_t);
382 mode = target_to_host_bitmask(mode, fcntl_flags_tbl);
385 return (openat(dirfd, pathname, flags, mode));
387 return (openat(dirfd, pathname, flags));
390 #ifdef TARGET_NR_readlinkat
391 static int sys_readlinkat(int dirfd, const char *pathname, char *buf, size_t bufsiz)
393 return (readlinkat(dirfd, pathname, buf, bufsiz));
396 #ifdef TARGET_NR_renameat
397 static int sys_renameat(int olddirfd, const char *oldpath,
398 int newdirfd, const char *newpath)
400 return (renameat(olddirfd, oldpath, newdirfd, newpath));
403 #ifdef TARGET_NR_symlinkat
404 static int sys_symlinkat(const char *oldpath, int newdirfd, const char *newpath)
406 return (symlinkat(oldpath, newdirfd, newpath));
409 #ifdef TARGET_NR_unlinkat
410 static int sys_unlinkat(int dirfd, const char *pathname, int flags)
412 return (unlinkat(dirfd, pathname, flags));
415 #else /* !CONFIG_ATFILE */
418 * Try direct syscalls instead
420 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
421 _syscall3(int,sys_faccessat,int,dirfd,const char *,pathname,int,mode)
423 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat)
424 _syscall3(int,sys_fchmodat,int,dirfd,const char *,pathname, mode_t,mode)
426 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat) && defined(USE_UID16)
427 _syscall5(int,sys_fchownat,int,dirfd,const char *,pathname,
428 uid_t,owner,gid_t,group,int,flags)
430 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \
431 defined(__NR_fstatat64)
432 _syscall4(int,sys_fstatat64,int,dirfd,const char *,pathname,
433 struct stat *,buf,int,flags)
435 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat)
436 _syscall3(int,sys_futimesat,int,dirfd,const char *,pathname,
437 const struct timeval *,times)
439 #if (defined(TARGET_NR_newfstatat) || defined(TARGET_NR_fstatat64) ) && \
440 defined(__NR_newfstatat)
441 _syscall4(int,sys_newfstatat,int,dirfd,const char *,pathname,
442 struct stat *,buf,int,flags)
444 #if defined(TARGET_NR_linkat) && defined(__NR_linkat)
445 _syscall5(int,sys_linkat,int,olddirfd,const char *,oldpath,
446 int,newdirfd,const char *,newpath,int,flags)
448 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat)
449 _syscall3(int,sys_mkdirat,int,dirfd,const char *,pathname,mode_t,mode)
451 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat)
452 _syscall4(int,sys_mknodat,int,dirfd,const char *,pathname,
453 mode_t,mode,dev_t,dev)
455 #if defined(TARGET_NR_openat) && defined(__NR_openat)
456 _syscall4(int,sys_openat,int,dirfd,const char *,pathname,int,flags,mode_t,mode)
458 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat)
459 _syscall4(int,sys_readlinkat,int,dirfd,const char *,pathname,
460 char *,buf,size_t,bufsize)
462 #if defined(TARGET_NR_renameat) && defined(__NR_renameat)
463 _syscall4(int,sys_renameat,int,olddirfd,const char *,oldpath,
464 int,newdirfd,const char *,newpath)
466 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat)
467 _syscall3(int,sys_symlinkat,const char *,oldpath,
468 int,newdirfd,const char *,newpath)
470 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat)
471 _syscall3(int,sys_unlinkat,int,dirfd,const char *,pathname,int,flags)
474 #endif /* CONFIG_ATFILE */
476 #ifdef CONFIG_UTIMENSAT
477 static int sys_utimensat(int dirfd, const char *pathname,
478 const struct timespec times[2], int flags)
480 if (pathname == NULL)
481 return futimens(dirfd, times);
483 return utimensat(dirfd, pathname, times, flags);
486 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat)
487 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
488 const struct timespec *,tsp,int,flags)
490 #endif /* CONFIG_UTIMENSAT */
492 #ifdef CONFIG_INOTIFY
493 #include <sys/inotify.h>
495 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
496 static int sys_inotify_init(void)
498 return (inotify_init());
501 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
502 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
504 return (inotify_add_watch(fd, pathname, mask));
507 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
508 static int sys_inotify_rm_watch(int fd, int32_t wd)
510 return (inotify_rm_watch(fd, wd));
513 #ifdef CONFIG_INOTIFY1
514 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
515 static int sys_inotify_init1(int flags)
517 return (inotify_init1(flags));
522 /* Userspace can usually survive runtime without inotify */
523 #undef TARGET_NR_inotify_init
524 #undef TARGET_NR_inotify_init1
525 #undef TARGET_NR_inotify_add_watch
526 #undef TARGET_NR_inotify_rm_watch
527 #endif /* CONFIG_INOTIFY */
530 extern int personality(int);
531 extern int flock(int, int);
532 extern int setfsuid(int);
533 extern int setfsgid(int);
534 extern int setgroups(int, gid_t *);
536 #define ERRNO_TABLE_SIZE 1200
538 /* target_to_host_errno_table[] is initialized from
539 * host_to_target_errno_table[] in syscall_init(). */
540 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
544 * This list is the union of errno values overridden in asm-<arch>/errno.h
545 * minus the errnos that are not actually generic to all archs.
547 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
548 [EIDRM] = TARGET_EIDRM,
549 [ECHRNG] = TARGET_ECHRNG,
550 [EL2NSYNC] = TARGET_EL2NSYNC,
551 [EL3HLT] = TARGET_EL3HLT,
552 [EL3RST] = TARGET_EL3RST,
553 [ELNRNG] = TARGET_ELNRNG,
554 [EUNATCH] = TARGET_EUNATCH,
555 [ENOCSI] = TARGET_ENOCSI,
556 [EL2HLT] = TARGET_EL2HLT,
557 [EDEADLK] = TARGET_EDEADLK,
558 [ENOLCK] = TARGET_ENOLCK,
559 [EBADE] = TARGET_EBADE,
560 [EBADR] = TARGET_EBADR,
561 [EXFULL] = TARGET_EXFULL,
562 [ENOANO] = TARGET_ENOANO,
563 [EBADRQC] = TARGET_EBADRQC,
564 [EBADSLT] = TARGET_EBADSLT,
565 [EBFONT] = TARGET_EBFONT,
566 [ENOSTR] = TARGET_ENOSTR,
567 [ENODATA] = TARGET_ENODATA,
568 [ETIME] = TARGET_ETIME,
569 [ENOSR] = TARGET_ENOSR,
570 [ENONET] = TARGET_ENONET,
571 [ENOPKG] = TARGET_ENOPKG,
572 [EREMOTE] = TARGET_EREMOTE,
573 [ENOLINK] = TARGET_ENOLINK,
574 [EADV] = TARGET_EADV,
575 [ESRMNT] = TARGET_ESRMNT,
576 [ECOMM] = TARGET_ECOMM,
577 [EPROTO] = TARGET_EPROTO,
578 [EDOTDOT] = TARGET_EDOTDOT,
579 [EMULTIHOP] = TARGET_EMULTIHOP,
580 [EBADMSG] = TARGET_EBADMSG,
581 [ENAMETOOLONG] = TARGET_ENAMETOOLONG,
582 [EOVERFLOW] = TARGET_EOVERFLOW,
583 [ENOTUNIQ] = TARGET_ENOTUNIQ,
584 [EBADFD] = TARGET_EBADFD,
585 [EREMCHG] = TARGET_EREMCHG,
586 [ELIBACC] = TARGET_ELIBACC,
587 [ELIBBAD] = TARGET_ELIBBAD,
588 [ELIBSCN] = TARGET_ELIBSCN,
589 [ELIBMAX] = TARGET_ELIBMAX,
590 [ELIBEXEC] = TARGET_ELIBEXEC,
591 [EILSEQ] = TARGET_EILSEQ,
592 [ENOSYS] = TARGET_ENOSYS,
593 [ELOOP] = TARGET_ELOOP,
594 [ERESTART] = TARGET_ERESTART,
595 [ESTRPIPE] = TARGET_ESTRPIPE,
596 [ENOTEMPTY] = TARGET_ENOTEMPTY,
597 [EUSERS] = TARGET_EUSERS,
598 [ENOTSOCK] = TARGET_ENOTSOCK,
599 [EDESTADDRREQ] = TARGET_EDESTADDRREQ,
600 [EMSGSIZE] = TARGET_EMSGSIZE,
601 [EPROTOTYPE] = TARGET_EPROTOTYPE,
602 [ENOPROTOOPT] = TARGET_ENOPROTOOPT,
603 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT,
604 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT,
605 [EOPNOTSUPP] = TARGET_EOPNOTSUPP,
606 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT,
607 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT,
608 [EADDRINUSE] = TARGET_EADDRINUSE,
609 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL,
610 [ENETDOWN] = TARGET_ENETDOWN,
611 [ENETUNREACH] = TARGET_ENETUNREACH,
612 [ENETRESET] = TARGET_ENETRESET,
613 [ECONNABORTED] = TARGET_ECONNABORTED,
614 [ECONNRESET] = TARGET_ECONNRESET,
615 [ENOBUFS] = TARGET_ENOBUFS,
616 [EISCONN] = TARGET_EISCONN,
617 [ENOTCONN] = TARGET_ENOTCONN,
618 [EUCLEAN] = TARGET_EUCLEAN,
619 [ENOTNAM] = TARGET_ENOTNAM,
620 [ENAVAIL] = TARGET_ENAVAIL,
621 [EISNAM] = TARGET_EISNAM,
622 [EREMOTEIO] = TARGET_EREMOTEIO,
623 [ESHUTDOWN] = TARGET_ESHUTDOWN,
624 [ETOOMANYREFS] = TARGET_ETOOMANYREFS,
625 [ETIMEDOUT] = TARGET_ETIMEDOUT,
626 [ECONNREFUSED] = TARGET_ECONNREFUSED,
627 [EHOSTDOWN] = TARGET_EHOSTDOWN,
628 [EHOSTUNREACH] = TARGET_EHOSTUNREACH,
629 [EALREADY] = TARGET_EALREADY,
630 [EINPROGRESS] = TARGET_EINPROGRESS,
631 [ESTALE] = TARGET_ESTALE,
632 [ECANCELED] = TARGET_ECANCELED,
633 [ENOMEDIUM] = TARGET_ENOMEDIUM,
634 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE,
636 [ENOKEY] = TARGET_ENOKEY,
639 [EKEYEXPIRED] = TARGET_EKEYEXPIRED,
642 [EKEYREVOKED] = TARGET_EKEYREVOKED,
645 [EKEYREJECTED] = TARGET_EKEYREJECTED,
648 [EOWNERDEAD] = TARGET_EOWNERDEAD,
650 #ifdef ENOTRECOVERABLE
651 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE,
655 static inline int host_to_target_errno(int err)
657 if(host_to_target_errno_table[err])
658 return host_to_target_errno_table[err];
662 static inline int target_to_host_errno(int err)
664 if (target_to_host_errno_table[err])
665 return target_to_host_errno_table[err];
669 static inline abi_long get_errno(abi_long ret)
672 return -host_to_target_errno(errno);
677 static inline int is_error(abi_long ret)
679 return (abi_ulong)ret >= (abi_ulong)(-4096);
682 char *target_strerror(int err)
684 return strerror(target_to_host_errno(err));
687 static abi_ulong target_brk;
688 static abi_ulong target_original_brk;
690 void target_set_brk(abi_ulong new_brk)
692 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
695 /* do_brk() must return target values and target errnos. */
696 abi_long do_brk(abi_ulong new_brk)
699 abi_long mapped_addr;
704 if (new_brk < target_original_brk)
707 brk_page = HOST_PAGE_ALIGN(target_brk);
709 /* If the new brk is less than this, set it and we're done... */
710 if (new_brk < brk_page) {
711 target_brk = new_brk;
715 /* We need to allocate more memory after the brk... */
716 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page + 1);
717 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
718 PROT_READ|PROT_WRITE,
719 MAP_ANON|MAP_FIXED|MAP_PRIVATE, 0, 0));
721 #if defined(TARGET_ALPHA)
722 /* We (partially) emulate OSF/1 on Alpha, which requires we
723 return a proper errno, not an unchanged brk value. */
724 if (is_error(mapped_addr)) {
725 return -TARGET_ENOMEM;
729 if (!is_error(mapped_addr)) {
730 target_brk = new_brk;
735 static inline abi_long copy_from_user_fdset(fd_set *fds,
736 abi_ulong target_fds_addr,
740 abi_ulong b, *target_fds;
742 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
743 if (!(target_fds = lock_user(VERIFY_READ,
745 sizeof(abi_ulong) * nw,
747 return -TARGET_EFAULT;
751 for (i = 0; i < nw; i++) {
752 /* grab the abi_ulong */
753 __get_user(b, &target_fds[i]);
754 for (j = 0; j < TARGET_ABI_BITS; j++) {
755 /* check the bit inside the abi_ulong */
762 unlock_user(target_fds, target_fds_addr, 0);
767 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
773 abi_ulong *target_fds;
775 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
776 if (!(target_fds = lock_user(VERIFY_WRITE,
778 sizeof(abi_ulong) * nw,
780 return -TARGET_EFAULT;
783 for (i = 0; i < nw; i++) {
785 for (j = 0; j < TARGET_ABI_BITS; j++) {
786 v |= ((FD_ISSET(k, fds) != 0) << j);
789 __put_user(v, &target_fds[i]);
792 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
797 #if defined(__alpha__)
803 static inline abi_long host_to_target_clock_t(long ticks)
805 #if HOST_HZ == TARGET_HZ
808 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
812 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
813 const struct rusage *rusage)
815 struct target_rusage *target_rusage;
817 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
818 return -TARGET_EFAULT;
819 target_rusage->ru_utime.tv_sec = tswapl(rusage->ru_utime.tv_sec);
820 target_rusage->ru_utime.tv_usec = tswapl(rusage->ru_utime.tv_usec);
821 target_rusage->ru_stime.tv_sec = tswapl(rusage->ru_stime.tv_sec);
822 target_rusage->ru_stime.tv_usec = tswapl(rusage->ru_stime.tv_usec);
823 target_rusage->ru_maxrss = tswapl(rusage->ru_maxrss);
824 target_rusage->ru_ixrss = tswapl(rusage->ru_ixrss);
825 target_rusage->ru_idrss = tswapl(rusage->ru_idrss);
826 target_rusage->ru_isrss = tswapl(rusage->ru_isrss);
827 target_rusage->ru_minflt = tswapl(rusage->ru_minflt);
828 target_rusage->ru_majflt = tswapl(rusage->ru_majflt);
829 target_rusage->ru_nswap = tswapl(rusage->ru_nswap);
830 target_rusage->ru_inblock = tswapl(rusage->ru_inblock);
831 target_rusage->ru_oublock = tswapl(rusage->ru_oublock);
832 target_rusage->ru_msgsnd = tswapl(rusage->ru_msgsnd);
833 target_rusage->ru_msgrcv = tswapl(rusage->ru_msgrcv);
834 target_rusage->ru_nsignals = tswapl(rusage->ru_nsignals);
835 target_rusage->ru_nvcsw = tswapl(rusage->ru_nvcsw);
836 target_rusage->ru_nivcsw = tswapl(rusage->ru_nivcsw);
837 unlock_user_struct(target_rusage, target_addr, 1);
842 static inline rlim_t target_to_host_rlim(target_ulong target_rlim)
844 if (target_rlim == TARGET_RLIM_INFINITY)
845 return RLIM_INFINITY;
847 return tswapl(target_rlim);
850 static inline target_ulong host_to_target_rlim(rlim_t rlim)
852 if (rlim == RLIM_INFINITY || rlim != (target_long)rlim)
853 return TARGET_RLIM_INFINITY;
858 static inline abi_long copy_from_user_timeval(struct timeval *tv,
859 abi_ulong target_tv_addr)
861 struct target_timeval *target_tv;
863 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1))
864 return -TARGET_EFAULT;
866 __get_user(tv->tv_sec, &target_tv->tv_sec);
867 __get_user(tv->tv_usec, &target_tv->tv_usec);
869 unlock_user_struct(target_tv, target_tv_addr, 0);
874 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
875 const struct timeval *tv)
877 struct target_timeval *target_tv;
879 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0))
880 return -TARGET_EFAULT;
882 __put_user(tv->tv_sec, &target_tv->tv_sec);
883 __put_user(tv->tv_usec, &target_tv->tv_usec);
885 unlock_user_struct(target_tv, target_tv_addr, 1);
890 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
893 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
894 abi_ulong target_mq_attr_addr)
896 struct target_mq_attr *target_mq_attr;
898 if (!lock_user_struct(VERIFY_READ, target_mq_attr,
899 target_mq_attr_addr, 1))
900 return -TARGET_EFAULT;
902 __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
903 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
904 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
905 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
907 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
912 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
913 const struct mq_attr *attr)
915 struct target_mq_attr *target_mq_attr;
917 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
918 target_mq_attr_addr, 0))
919 return -TARGET_EFAULT;
921 __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
922 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
923 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
924 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
926 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
932 /* do_select() must return target values and target errnos. */
933 static abi_long do_select(int n,
934 abi_ulong rfd_addr, abi_ulong wfd_addr,
935 abi_ulong efd_addr, abi_ulong target_tv_addr)
937 fd_set rfds, wfds, efds;
938 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
939 struct timeval tv, *tv_ptr;
943 if (copy_from_user_fdset(&rfds, rfd_addr, n))
944 return -TARGET_EFAULT;
950 if (copy_from_user_fdset(&wfds, wfd_addr, n))
951 return -TARGET_EFAULT;
957 if (copy_from_user_fdset(&efds, efd_addr, n))
958 return -TARGET_EFAULT;
964 if (target_tv_addr) {
965 if (copy_from_user_timeval(&tv, target_tv_addr))
966 return -TARGET_EFAULT;
972 ret = get_errno(select(n, rfds_ptr, wfds_ptr, efds_ptr, tv_ptr));
974 if (!is_error(ret)) {
975 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
976 return -TARGET_EFAULT;
977 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
978 return -TARGET_EFAULT;
979 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
980 return -TARGET_EFAULT;
982 if (target_tv_addr && copy_to_user_timeval(target_tv_addr, &tv))
983 return -TARGET_EFAULT;
989 static abi_long do_pipe2(int host_pipe[], int flags)
992 return pipe2(host_pipe, flags);
998 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
999 int flags, int is_pipe2)
1003 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1006 return get_errno(ret);
1008 /* Several targets have special calling conventions for the original
1009 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1011 #if defined(TARGET_ALPHA)
1012 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1013 return host_pipe[0];
1014 #elif defined(TARGET_MIPS)
1015 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1016 return host_pipe[0];
1017 #elif defined(TARGET_SH4)
1018 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1019 return host_pipe[0];
1023 if (put_user_s32(host_pipe[0], pipedes)
1024 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1025 return -TARGET_EFAULT;
1026 return get_errno(ret);
1029 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1030 abi_ulong target_addr,
1033 struct target_ip_mreqn *target_smreqn;
1035 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1037 return -TARGET_EFAULT;
1038 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1039 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1040 if (len == sizeof(struct target_ip_mreqn))
1041 mreqn->imr_ifindex = tswapl(target_smreqn->imr_ifindex);
1042 unlock_user(target_smreqn, target_addr, 0);
1047 static inline abi_long target_to_host_sockaddr(struct sockaddr *addr,
1048 abi_ulong target_addr,
1051 const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1052 sa_family_t sa_family;
1053 struct target_sockaddr *target_saddr;
1055 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1057 return -TARGET_EFAULT;
1059 sa_family = tswap16(target_saddr->sa_family);
1061 /* Oops. The caller might send a incomplete sun_path; sun_path
1062 * must be terminated by \0 (see the manual page), but
1063 * unfortunately it is quite common to specify sockaddr_un
1064 * length as "strlen(x->sun_path)" while it should be
1065 * "strlen(...) + 1". We'll fix that here if needed.
1066 * Linux kernel has a similar feature.
1069 if (sa_family == AF_UNIX) {
1070 if (len < unix_maxlen && len > 0) {
1071 char *cp = (char*)target_saddr;
1073 if ( cp[len-1] && !cp[len] )
1076 if (len > unix_maxlen)
1080 memcpy(addr, target_saddr, len);
1081 addr->sa_family = sa_family;
1082 unlock_user(target_saddr, target_addr, 0);
1087 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1088 struct sockaddr *addr,
1091 struct target_sockaddr *target_saddr;
1093 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1095 return -TARGET_EFAULT;
1096 memcpy(target_saddr, addr, len);
1097 target_saddr->sa_family = tswap16(addr->sa_family);
1098 unlock_user(target_saddr, target_addr, len);
1103 /* ??? Should this also swap msgh->name? */
1104 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1105 struct target_msghdr *target_msgh)
1107 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1108 abi_long msg_controllen;
1109 abi_ulong target_cmsg_addr;
1110 struct target_cmsghdr *target_cmsg;
1111 socklen_t space = 0;
1113 msg_controllen = tswapl(target_msgh->msg_controllen);
1114 if (msg_controllen < sizeof (struct target_cmsghdr))
1116 target_cmsg_addr = tswapl(target_msgh->msg_control);
1117 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1119 return -TARGET_EFAULT;
1121 while (cmsg && target_cmsg) {
1122 void *data = CMSG_DATA(cmsg);
1123 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1125 int len = tswapl(target_cmsg->cmsg_len)
1126 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr));
1128 space += CMSG_SPACE(len);
1129 if (space > msgh->msg_controllen) {
1130 space -= CMSG_SPACE(len);
1131 gemu_log("Host cmsg overflow\n");
1135 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1136 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1137 cmsg->cmsg_len = CMSG_LEN(len);
1139 if (cmsg->cmsg_level != TARGET_SOL_SOCKET || cmsg->cmsg_type != SCM_RIGHTS) {
1140 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg->cmsg_level, cmsg->cmsg_type);
1141 memcpy(data, target_data, len);
1143 int *fd = (int *)data;
1144 int *target_fd = (int *)target_data;
1145 int i, numfds = len / sizeof(int);
1147 for (i = 0; i < numfds; i++)
1148 fd[i] = tswap32(target_fd[i]);
1151 cmsg = CMSG_NXTHDR(msgh, cmsg);
1152 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg);
1154 unlock_user(target_cmsg, target_cmsg_addr, 0);
1156 msgh->msg_controllen = space;
1160 /* ??? Should this also swap msgh->name? */
1161 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1162 struct msghdr *msgh)
1164 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1165 abi_long msg_controllen;
1166 abi_ulong target_cmsg_addr;
1167 struct target_cmsghdr *target_cmsg;
1168 socklen_t space = 0;
1170 msg_controllen = tswapl(target_msgh->msg_controllen);
1171 if (msg_controllen < sizeof (struct target_cmsghdr))
1173 target_cmsg_addr = tswapl(target_msgh->msg_control);
1174 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1176 return -TARGET_EFAULT;
1178 while (cmsg && target_cmsg) {
1179 void *data = CMSG_DATA(cmsg);
1180 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1182 int len = cmsg->cmsg_len - CMSG_ALIGN(sizeof (struct cmsghdr));
1184 space += TARGET_CMSG_SPACE(len);
1185 if (space > msg_controllen) {
1186 space -= TARGET_CMSG_SPACE(len);
1187 gemu_log("Target cmsg overflow\n");
1191 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1192 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1193 target_cmsg->cmsg_len = tswapl(TARGET_CMSG_LEN(len));
1195 if (cmsg->cmsg_level != TARGET_SOL_SOCKET || cmsg->cmsg_type != SCM_RIGHTS) {
1196 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg->cmsg_level, cmsg->cmsg_type);
1197 memcpy(target_data, data, len);
1199 int *fd = (int *)data;
1200 int *target_fd = (int *)target_data;
1201 int i, numfds = len / sizeof(int);
1203 for (i = 0; i < numfds; i++)
1204 target_fd[i] = tswap32(fd[i]);
1207 cmsg = CMSG_NXTHDR(msgh, cmsg);
1208 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg);
1210 unlock_user(target_cmsg, target_cmsg_addr, space);
1212 target_msgh->msg_controllen = tswapl(space);
1216 /* do_setsockopt() Must return target values and target errnos. */
1217 static abi_long do_setsockopt(int sockfd, int level, int optname,
1218 abi_ulong optval_addr, socklen_t optlen)
1222 struct ip_mreqn *ip_mreq;
1223 struct ip_mreq_source *ip_mreq_source;
1227 /* TCP options all take an 'int' value. */
1228 if (optlen < sizeof(uint32_t))
1229 return -TARGET_EINVAL;
1231 if (get_user_u32(val, optval_addr))
1232 return -TARGET_EFAULT;
1233 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1240 case IP_ROUTER_ALERT:
1244 case IP_MTU_DISCOVER:
1250 case IP_MULTICAST_TTL:
1251 case IP_MULTICAST_LOOP:
1253 if (optlen >= sizeof(uint32_t)) {
1254 if (get_user_u32(val, optval_addr))
1255 return -TARGET_EFAULT;
1256 } else if (optlen >= 1) {
1257 if (get_user_u8(val, optval_addr))
1258 return -TARGET_EFAULT;
1260 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1262 case IP_ADD_MEMBERSHIP:
1263 case IP_DROP_MEMBERSHIP:
1264 if (optlen < sizeof (struct target_ip_mreq) ||
1265 optlen > sizeof (struct target_ip_mreqn))
1266 return -TARGET_EINVAL;
1268 ip_mreq = (struct ip_mreqn *) alloca(optlen);
1269 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
1270 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
1273 case IP_BLOCK_SOURCE:
1274 case IP_UNBLOCK_SOURCE:
1275 case IP_ADD_SOURCE_MEMBERSHIP:
1276 case IP_DROP_SOURCE_MEMBERSHIP:
1277 if (optlen != sizeof (struct target_ip_mreq_source))
1278 return -TARGET_EINVAL;
1280 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
1281 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
1282 unlock_user (ip_mreq_source, optval_addr, 0);
1289 case TARGET_SOL_SOCKET:
1291 /* Options with 'int' argument. */
1292 case TARGET_SO_DEBUG:
1295 case TARGET_SO_REUSEADDR:
1296 optname = SO_REUSEADDR;
1298 case TARGET_SO_TYPE:
1301 case TARGET_SO_ERROR:
1304 case TARGET_SO_DONTROUTE:
1305 optname = SO_DONTROUTE;
1307 case TARGET_SO_BROADCAST:
1308 optname = SO_BROADCAST;
1310 case TARGET_SO_SNDBUF:
1311 optname = SO_SNDBUF;
1313 case TARGET_SO_RCVBUF:
1314 optname = SO_RCVBUF;
1316 case TARGET_SO_KEEPALIVE:
1317 optname = SO_KEEPALIVE;
1319 case TARGET_SO_OOBINLINE:
1320 optname = SO_OOBINLINE;
1322 case TARGET_SO_NO_CHECK:
1323 optname = SO_NO_CHECK;
1325 case TARGET_SO_PRIORITY:
1326 optname = SO_PRIORITY;
1329 case TARGET_SO_BSDCOMPAT:
1330 optname = SO_BSDCOMPAT;
1333 case TARGET_SO_PASSCRED:
1334 optname = SO_PASSCRED;
1336 case TARGET_SO_TIMESTAMP:
1337 optname = SO_TIMESTAMP;
1339 case TARGET_SO_RCVLOWAT:
1340 optname = SO_RCVLOWAT;
1342 case TARGET_SO_RCVTIMEO:
1343 optname = SO_RCVTIMEO;
1345 case TARGET_SO_SNDTIMEO:
1346 optname = SO_SNDTIMEO;
1352 if (optlen < sizeof(uint32_t))
1353 return -TARGET_EINVAL;
1355 if (get_user_u32(val, optval_addr))
1356 return -TARGET_EFAULT;
1357 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
1361 gemu_log("Unsupported setsockopt level=%d optname=%d \n", level, optname);
1362 ret = -TARGET_ENOPROTOOPT;
1367 /* do_getsockopt() Must return target values and target errnos. */
1368 static abi_long do_getsockopt(int sockfd, int level, int optname,
1369 abi_ulong optval_addr, abi_ulong optlen)
1376 case TARGET_SOL_SOCKET:
1379 case TARGET_SO_LINGER:
1380 case TARGET_SO_RCVTIMEO:
1381 case TARGET_SO_SNDTIMEO:
1382 case TARGET_SO_PEERCRED:
1383 case TARGET_SO_PEERNAME:
1384 /* These don't just return a single integer */
1391 /* TCP options all take an 'int' value. */
1393 if (get_user_u32(len, optlen))
1394 return -TARGET_EFAULT;
1396 return -TARGET_EINVAL;
1398 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
1404 if (put_user_u32(val, optval_addr))
1405 return -TARGET_EFAULT;
1407 if (put_user_u8(val, optval_addr))
1408 return -TARGET_EFAULT;
1410 if (put_user_u32(len, optlen))
1411 return -TARGET_EFAULT;
1418 case IP_ROUTER_ALERT:
1422 case IP_MTU_DISCOVER:
1428 case IP_MULTICAST_TTL:
1429 case IP_MULTICAST_LOOP:
1430 if (get_user_u32(len, optlen))
1431 return -TARGET_EFAULT;
1433 return -TARGET_EINVAL;
1435 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
1438 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
1440 if (put_user_u32(len, optlen)
1441 || put_user_u8(val, optval_addr))
1442 return -TARGET_EFAULT;
1444 if (len > sizeof(int))
1446 if (put_user_u32(len, optlen)
1447 || put_user_u32(val, optval_addr))
1448 return -TARGET_EFAULT;
1452 ret = -TARGET_ENOPROTOOPT;
1458 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
1460 ret = -TARGET_EOPNOTSUPP;
1467 * lock_iovec()/unlock_iovec() have a return code of 0 for success where
1468 * other lock functions have a return code of 0 for failure.
1470 static abi_long lock_iovec(int type, struct iovec *vec, abi_ulong target_addr,
1471 int count, int copy)
1473 struct target_iovec *target_vec;
1477 target_vec = lock_user(VERIFY_READ, target_addr, count * sizeof(struct target_iovec), 1);
1479 return -TARGET_EFAULT;
1480 for(i = 0;i < count; i++) {
1481 base = tswapl(target_vec[i].iov_base);
1482 vec[i].iov_len = tswapl(target_vec[i].iov_len);
1483 if (vec[i].iov_len != 0) {
1484 vec[i].iov_base = lock_user(type, base, vec[i].iov_len, copy);
1485 /* Don't check lock_user return value. We must call writev even
1486 if a element has invalid base address. */
1488 /* zero length pointer is ignored */
1489 vec[i].iov_base = NULL;
1492 unlock_user (target_vec, target_addr, 0);
1496 static abi_long unlock_iovec(struct iovec *vec, abi_ulong target_addr,
1497 int count, int copy)
1499 struct target_iovec *target_vec;
1503 target_vec = lock_user(VERIFY_READ, target_addr, count * sizeof(struct target_iovec), 1);
1505 return -TARGET_EFAULT;
1506 for(i = 0;i < count; i++) {
1507 if (target_vec[i].iov_base) {
1508 base = tswapl(target_vec[i].iov_base);
1509 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
1512 unlock_user (target_vec, target_addr, 0);
1517 /* do_socket() Must return target values and target errnos. */
1518 static abi_long do_socket(int domain, int type, int protocol)
1520 #if defined(TARGET_MIPS)
1522 case TARGET_SOCK_DGRAM:
1525 case TARGET_SOCK_STREAM:
1528 case TARGET_SOCK_RAW:
1531 case TARGET_SOCK_RDM:
1534 case TARGET_SOCK_SEQPACKET:
1535 type = SOCK_SEQPACKET;
1537 case TARGET_SOCK_PACKET:
1542 if (domain == PF_NETLINK)
1543 return -EAFNOSUPPORT; /* do not NETLINK socket connections possible */
1544 return get_errno(socket(domain, type, protocol));
1547 /* do_bind() Must return target values and target errnos. */
1548 static abi_long do_bind(int sockfd, abi_ulong target_addr,
1554 if ((int)addrlen < 0) {
1555 return -TARGET_EINVAL;
1558 addr = alloca(addrlen+1);
1560 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
1564 return get_errno(bind(sockfd, addr, addrlen));
1567 /* do_connect() Must return target values and target errnos. */
1568 static abi_long do_connect(int sockfd, abi_ulong target_addr,
1574 if ((int)addrlen < 0) {
1575 return -TARGET_EINVAL;
1578 addr = alloca(addrlen);
1580 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
1584 return get_errno(connect(sockfd, addr, addrlen));
1587 /* do_sendrecvmsg() Must return target values and target errnos. */
1588 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
1589 int flags, int send)
1592 struct target_msghdr *msgp;
1596 abi_ulong target_vec;
1599 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
1603 return -TARGET_EFAULT;
1604 if (msgp->msg_name) {
1605 msg.msg_namelen = tswap32(msgp->msg_namelen);
1606 msg.msg_name = alloca(msg.msg_namelen);
1607 ret = target_to_host_sockaddr(msg.msg_name, tswapl(msgp->msg_name),
1610 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
1614 msg.msg_name = NULL;
1615 msg.msg_namelen = 0;
1617 msg.msg_controllen = 2 * tswapl(msgp->msg_controllen);
1618 msg.msg_control = alloca(msg.msg_controllen);
1619 msg.msg_flags = tswap32(msgp->msg_flags);
1621 count = tswapl(msgp->msg_iovlen);
1622 vec = alloca(count * sizeof(struct iovec));
1623 target_vec = tswapl(msgp->msg_iov);
1624 lock_iovec(send ? VERIFY_READ : VERIFY_WRITE, vec, target_vec, count, send);
1625 msg.msg_iovlen = count;
1629 ret = target_to_host_cmsg(&msg, msgp);
1631 ret = get_errno(sendmsg(fd, &msg, flags));
1633 ret = get_errno(recvmsg(fd, &msg, flags));
1634 if (!is_error(ret)) {
1636 ret = host_to_target_cmsg(msgp, &msg);
1641 unlock_iovec(vec, target_vec, count, !send);
1642 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
1646 /* do_accept() Must return target values and target errnos. */
1647 static abi_long do_accept(int fd, abi_ulong target_addr,
1648 abi_ulong target_addrlen_addr)
1654 if (target_addr == 0)
1655 return get_errno(accept(fd, NULL, NULL));
1657 /* linux returns EINVAL if addrlen pointer is invalid */
1658 if (get_user_u32(addrlen, target_addrlen_addr))
1659 return -TARGET_EINVAL;
1661 if ((int)addrlen < 0) {
1662 return -TARGET_EINVAL;
1665 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
1666 return -TARGET_EINVAL;
1668 addr = alloca(addrlen);
1670 ret = get_errno(accept(fd, addr, &addrlen));
1671 if (!is_error(ret)) {
1672 host_to_target_sockaddr(target_addr, addr, addrlen);
1673 if (put_user_u32(addrlen, target_addrlen_addr))
1674 ret = -TARGET_EFAULT;
1679 /* do_getpeername() Must return target values and target errnos. */
1680 static abi_long do_getpeername(int fd, abi_ulong target_addr,
1681 abi_ulong target_addrlen_addr)
1687 if (get_user_u32(addrlen, target_addrlen_addr))
1688 return -TARGET_EFAULT;
1690 if ((int)addrlen < 0) {
1691 return -TARGET_EINVAL;
1694 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
1695 return -TARGET_EFAULT;
1697 addr = alloca(addrlen);
1699 ret = get_errno(getpeername(fd, addr, &addrlen));
1700 if (!is_error(ret)) {
1701 host_to_target_sockaddr(target_addr, addr, addrlen);
1702 if (put_user_u32(addrlen, target_addrlen_addr))
1703 ret = -TARGET_EFAULT;
1708 /* do_getsockname() Must return target values and target errnos. */
1709 static abi_long do_getsockname(int fd, abi_ulong target_addr,
1710 abi_ulong target_addrlen_addr)
1716 if (get_user_u32(addrlen, target_addrlen_addr))
1717 return -TARGET_EFAULT;
1719 if ((int)addrlen < 0) {
1720 return -TARGET_EINVAL;
1723 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
1724 return -TARGET_EFAULT;
1726 addr = alloca(addrlen);
1728 ret = get_errno(getsockname(fd, addr, &addrlen));
1729 if (!is_error(ret)) {
1730 host_to_target_sockaddr(target_addr, addr, addrlen);
1731 if (put_user_u32(addrlen, target_addrlen_addr))
1732 ret = -TARGET_EFAULT;
1737 /* do_socketpair() Must return target values and target errnos. */
1738 static abi_long do_socketpair(int domain, int type, int protocol,
1739 abi_ulong target_tab_addr)
1744 ret = get_errno(socketpair(domain, type, protocol, tab));
1745 if (!is_error(ret)) {
1746 if (put_user_s32(tab[0], target_tab_addr)
1747 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
1748 ret = -TARGET_EFAULT;
1753 /* do_sendto() Must return target values and target errnos. */
1754 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
1755 abi_ulong target_addr, socklen_t addrlen)
1761 if ((int)addrlen < 0) {
1762 return -TARGET_EINVAL;
1765 host_msg = lock_user(VERIFY_READ, msg, len, 1);
1767 return -TARGET_EFAULT;
1769 addr = alloca(addrlen);
1770 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
1772 unlock_user(host_msg, msg, 0);
1775 ret = get_errno(sendto(fd, host_msg, len, flags, addr, addrlen));
1777 ret = get_errno(send(fd, host_msg, len, flags));
1779 unlock_user(host_msg, msg, 0);
1783 /* do_recvfrom() Must return target values and target errnos. */
1784 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
1785 abi_ulong target_addr,
1786 abi_ulong target_addrlen)
1793 host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
1795 return -TARGET_EFAULT;
1797 if (get_user_u32(addrlen, target_addrlen)) {
1798 ret = -TARGET_EFAULT;
1801 if ((int)addrlen < 0) {
1802 ret = -TARGET_EINVAL;
1805 addr = alloca(addrlen);
1806 ret = get_errno(recvfrom(fd, host_msg, len, flags, addr, &addrlen));
1808 addr = NULL; /* To keep compiler quiet. */
1809 ret = get_errno(recv(fd, host_msg, len, flags));
1811 if (!is_error(ret)) {
1813 host_to_target_sockaddr(target_addr, addr, addrlen);
1814 if (put_user_u32(addrlen, target_addrlen)) {
1815 ret = -TARGET_EFAULT;
1819 unlock_user(host_msg, msg, len);
1822 unlock_user(host_msg, msg, 0);
1827 #ifdef TARGET_NR_socketcall
1828 /* do_socketcall() Must return target values and target errnos. */
1829 static abi_long do_socketcall(int num, abi_ulong vptr)
1832 const int n = sizeof(abi_ulong);
1837 abi_ulong domain, type, protocol;
1839 if (get_user_ual(domain, vptr)
1840 || get_user_ual(type, vptr + n)
1841 || get_user_ual(protocol, vptr + 2 * n))
1842 return -TARGET_EFAULT;
1844 ret = do_socket(domain, type, protocol);
1850 abi_ulong target_addr;
1853 if (get_user_ual(sockfd, vptr)
1854 || get_user_ual(target_addr, vptr + n)
1855 || get_user_ual(addrlen, vptr + 2 * n))
1856 return -TARGET_EFAULT;
1858 ret = do_bind(sockfd, target_addr, addrlen);
1861 case SOCKOP_connect:
1864 abi_ulong target_addr;
1867 if (get_user_ual(sockfd, vptr)
1868 || get_user_ual(target_addr, vptr + n)
1869 || get_user_ual(addrlen, vptr + 2 * n))
1870 return -TARGET_EFAULT;
1872 ret = do_connect(sockfd, target_addr, addrlen);
1877 abi_ulong sockfd, backlog;
1879 if (get_user_ual(sockfd, vptr)
1880 || get_user_ual(backlog, vptr + n))
1881 return -TARGET_EFAULT;
1883 ret = get_errno(listen(sockfd, backlog));
1889 abi_ulong target_addr, target_addrlen;
1891 if (get_user_ual(sockfd, vptr)
1892 || get_user_ual(target_addr, vptr + n)
1893 || get_user_ual(target_addrlen, vptr + 2 * n))
1894 return -TARGET_EFAULT;
1896 ret = do_accept(sockfd, target_addr, target_addrlen);
1899 case SOCKOP_getsockname:
1902 abi_ulong target_addr, target_addrlen;
1904 if (get_user_ual(sockfd, vptr)
1905 || get_user_ual(target_addr, vptr + n)
1906 || get_user_ual(target_addrlen, vptr + 2 * n))
1907 return -TARGET_EFAULT;
1909 ret = do_getsockname(sockfd, target_addr, target_addrlen);
1912 case SOCKOP_getpeername:
1915 abi_ulong target_addr, target_addrlen;
1917 if (get_user_ual(sockfd, vptr)
1918 || get_user_ual(target_addr, vptr + n)
1919 || get_user_ual(target_addrlen, vptr + 2 * n))
1920 return -TARGET_EFAULT;
1922 ret = do_getpeername(sockfd, target_addr, target_addrlen);
1925 case SOCKOP_socketpair:
1927 abi_ulong domain, type, protocol;
1930 if (get_user_ual(domain, vptr)
1931 || get_user_ual(type, vptr + n)
1932 || get_user_ual(protocol, vptr + 2 * n)
1933 || get_user_ual(tab, vptr + 3 * n))
1934 return -TARGET_EFAULT;
1936 ret = do_socketpair(domain, type, protocol, tab);
1946 if (get_user_ual(sockfd, vptr)
1947 || get_user_ual(msg, vptr + n)
1948 || get_user_ual(len, vptr + 2 * n)
1949 || get_user_ual(flags, vptr + 3 * n))
1950 return -TARGET_EFAULT;
1952 ret = do_sendto(sockfd, msg, len, flags, 0, 0);
1962 if (get_user_ual(sockfd, vptr)
1963 || get_user_ual(msg, vptr + n)
1964 || get_user_ual(len, vptr + 2 * n)
1965 || get_user_ual(flags, vptr + 3 * n))
1966 return -TARGET_EFAULT;
1968 ret = do_recvfrom(sockfd, msg, len, flags, 0, 0);
1980 if (get_user_ual(sockfd, vptr)
1981 || get_user_ual(msg, vptr + n)
1982 || get_user_ual(len, vptr + 2 * n)
1983 || get_user_ual(flags, vptr + 3 * n)
1984 || get_user_ual(addr, vptr + 4 * n)
1985 || get_user_ual(addrlen, vptr + 5 * n))
1986 return -TARGET_EFAULT;
1988 ret = do_sendto(sockfd, msg, len, flags, addr, addrlen);
1991 case SOCKOP_recvfrom:
2000 if (get_user_ual(sockfd, vptr)
2001 || get_user_ual(msg, vptr + n)
2002 || get_user_ual(len, vptr + 2 * n)
2003 || get_user_ual(flags, vptr + 3 * n)
2004 || get_user_ual(addr, vptr + 4 * n)
2005 || get_user_ual(addrlen, vptr + 5 * n))
2006 return -TARGET_EFAULT;
2008 ret = do_recvfrom(sockfd, msg, len, flags, addr, addrlen);
2011 case SOCKOP_shutdown:
2013 abi_ulong sockfd, how;
2015 if (get_user_ual(sockfd, vptr)
2016 || get_user_ual(how, vptr + n))
2017 return -TARGET_EFAULT;
2019 ret = get_errno(shutdown(sockfd, how));
2022 case SOCKOP_sendmsg:
2023 case SOCKOP_recvmsg:
2026 abi_ulong target_msg;
2029 if (get_user_ual(fd, vptr)
2030 || get_user_ual(target_msg, vptr + n)
2031 || get_user_ual(flags, vptr + 2 * n))
2032 return -TARGET_EFAULT;
2034 ret = do_sendrecvmsg(fd, target_msg, flags,
2035 (num == SOCKOP_sendmsg));
2038 case SOCKOP_setsockopt:
2046 if (get_user_ual(sockfd, vptr)
2047 || get_user_ual(level, vptr + n)
2048 || get_user_ual(optname, vptr + 2 * n)
2049 || get_user_ual(optval, vptr + 3 * n)
2050 || get_user_ual(optlen, vptr + 4 * n))
2051 return -TARGET_EFAULT;
2053 ret = do_setsockopt(sockfd, level, optname, optval, optlen);
2056 case SOCKOP_getsockopt:
2064 if (get_user_ual(sockfd, vptr)
2065 || get_user_ual(level, vptr + n)
2066 || get_user_ual(optname, vptr + 2 * n)
2067 || get_user_ual(optval, vptr + 3 * n)
2068 || get_user_ual(optlen, vptr + 4 * n))
2069 return -TARGET_EFAULT;
2071 ret = do_getsockopt(sockfd, level, optname, optval, optlen);
2075 gemu_log("Unsupported socketcall: %d\n", num);
2076 ret = -TARGET_ENOSYS;
2083 #define N_SHM_REGIONS 32
2085 static struct shm_region {
2088 } shm_regions[N_SHM_REGIONS];
2090 struct target_ipc_perm
2097 unsigned short int mode;
2098 unsigned short int __pad1;
2099 unsigned short int __seq;
2100 unsigned short int __pad2;
2101 abi_ulong __unused1;
2102 abi_ulong __unused2;
2105 struct target_semid_ds
2107 struct target_ipc_perm sem_perm;
2108 abi_ulong sem_otime;
2109 abi_ulong __unused1;
2110 abi_ulong sem_ctime;
2111 abi_ulong __unused2;
2112 abi_ulong sem_nsems;
2113 abi_ulong __unused3;
2114 abi_ulong __unused4;
2117 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
2118 abi_ulong target_addr)
2120 struct target_ipc_perm *target_ip;
2121 struct target_semid_ds *target_sd;
2123 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2124 return -TARGET_EFAULT;
2125 target_ip = &(target_sd->sem_perm);
2126 host_ip->__key = tswapl(target_ip->__key);
2127 host_ip->uid = tswapl(target_ip->uid);
2128 host_ip->gid = tswapl(target_ip->gid);
2129 host_ip->cuid = tswapl(target_ip->cuid);
2130 host_ip->cgid = tswapl(target_ip->cgid);
2131 host_ip->mode = tswapl(target_ip->mode);
2132 unlock_user_struct(target_sd, target_addr, 0);
2136 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
2137 struct ipc_perm *host_ip)
2139 struct target_ipc_perm *target_ip;
2140 struct target_semid_ds *target_sd;
2142 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2143 return -TARGET_EFAULT;
2144 target_ip = &(target_sd->sem_perm);
2145 target_ip->__key = tswapl(host_ip->__key);
2146 target_ip->uid = tswapl(host_ip->uid);
2147 target_ip->gid = tswapl(host_ip->gid);
2148 target_ip->cuid = tswapl(host_ip->cuid);
2149 target_ip->cgid = tswapl(host_ip->cgid);
2150 target_ip->mode = tswapl(host_ip->mode);
2151 unlock_user_struct(target_sd, target_addr, 1);
2155 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
2156 abi_ulong target_addr)
2158 struct target_semid_ds *target_sd;
2160 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2161 return -TARGET_EFAULT;
2162 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
2163 return -TARGET_EFAULT;
2164 host_sd->sem_nsems = tswapl(target_sd->sem_nsems);
2165 host_sd->sem_otime = tswapl(target_sd->sem_otime);
2166 host_sd->sem_ctime = tswapl(target_sd->sem_ctime);
2167 unlock_user_struct(target_sd, target_addr, 0);
2171 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
2172 struct semid_ds *host_sd)
2174 struct target_semid_ds *target_sd;
2176 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2177 return -TARGET_EFAULT;
2178 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
2179 return -TARGET_EFAULT;;
2180 target_sd->sem_nsems = tswapl(host_sd->sem_nsems);
2181 target_sd->sem_otime = tswapl(host_sd->sem_otime);
2182 target_sd->sem_ctime = tswapl(host_sd->sem_ctime);
2183 unlock_user_struct(target_sd, target_addr, 1);
2187 struct target_seminfo {
2200 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
2201 struct seminfo *host_seminfo)
2203 struct target_seminfo *target_seminfo;
2204 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
2205 return -TARGET_EFAULT;
2206 __put_user(host_seminfo->semmap, &target_seminfo->semmap);
2207 __put_user(host_seminfo->semmni, &target_seminfo->semmni);
2208 __put_user(host_seminfo->semmns, &target_seminfo->semmns);
2209 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
2210 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
2211 __put_user(host_seminfo->semopm, &target_seminfo->semopm);
2212 __put_user(host_seminfo->semume, &target_seminfo->semume);
2213 __put_user(host_seminfo->semusz, &target_seminfo->semusz);
2214 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
2215 __put_user(host_seminfo->semaem, &target_seminfo->semaem);
2216 unlock_user_struct(target_seminfo, target_addr, 1);
2222 struct semid_ds *buf;
2223 unsigned short *array;
2224 struct seminfo *__buf;
2227 union target_semun {
2234 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
2235 abi_ulong target_addr)
2238 unsigned short *array;
2240 struct semid_ds semid_ds;
2243 semun.buf = &semid_ds;
2245 ret = semctl(semid, 0, IPC_STAT, semun);
2247 return get_errno(ret);
2249 nsems = semid_ds.sem_nsems;
2251 *host_array = malloc(nsems*sizeof(unsigned short));
2252 array = lock_user(VERIFY_READ, target_addr,
2253 nsems*sizeof(unsigned short), 1);
2255 return -TARGET_EFAULT;
2257 for(i=0; i<nsems; i++) {
2258 __get_user((*host_array)[i], &array[i]);
2260 unlock_user(array, target_addr, 0);
2265 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
2266 unsigned short **host_array)
2269 unsigned short *array;
2271 struct semid_ds semid_ds;
2274 semun.buf = &semid_ds;
2276 ret = semctl(semid, 0, IPC_STAT, semun);
2278 return get_errno(ret);
2280 nsems = semid_ds.sem_nsems;
2282 array = lock_user(VERIFY_WRITE, target_addr,
2283 nsems*sizeof(unsigned short), 0);
2285 return -TARGET_EFAULT;
2287 for(i=0; i<nsems; i++) {
2288 __put_user((*host_array)[i], &array[i]);
2291 unlock_user(array, target_addr, 1);
2296 static inline abi_long do_semctl(int semid, int semnum, int cmd,
2297 union target_semun target_su)
2300 struct semid_ds dsarg;
2301 unsigned short *array = NULL;
2302 struct seminfo seminfo;
2303 abi_long ret = -TARGET_EINVAL;
2310 arg.val = tswapl(target_su.val);
2311 ret = get_errno(semctl(semid, semnum, cmd, arg));
2312 target_su.val = tswapl(arg.val);
2316 err = target_to_host_semarray(semid, &array, target_su.array);
2320 ret = get_errno(semctl(semid, semnum, cmd, arg));
2321 err = host_to_target_semarray(semid, target_su.array, &array);
2328 err = target_to_host_semid_ds(&dsarg, target_su.buf);
2332 ret = get_errno(semctl(semid, semnum, cmd, arg));
2333 err = host_to_target_semid_ds(target_su.buf, &dsarg);
2339 arg.__buf = &seminfo;
2340 ret = get_errno(semctl(semid, semnum, cmd, arg));
2341 err = host_to_target_seminfo(target_su.__buf, &seminfo);
2349 ret = get_errno(semctl(semid, semnum, cmd, NULL));
2356 struct target_sembuf {
2357 unsigned short sem_num;
2362 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
2363 abi_ulong target_addr,
2366 struct target_sembuf *target_sembuf;
2369 target_sembuf = lock_user(VERIFY_READ, target_addr,
2370 nsops*sizeof(struct target_sembuf), 1);
2372 return -TARGET_EFAULT;
2374 for(i=0; i<nsops; i++) {
2375 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
2376 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
2377 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
2380 unlock_user(target_sembuf, target_addr, 0);
2385 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
2387 struct sembuf sops[nsops];
2389 if (target_to_host_sembuf(sops, ptr, nsops))
2390 return -TARGET_EFAULT;
2392 return semop(semid, sops, nsops);
2395 struct target_msqid_ds
2397 struct target_ipc_perm msg_perm;
2398 abi_ulong msg_stime;
2399 #if TARGET_ABI_BITS == 32
2400 abi_ulong __unused1;
2402 abi_ulong msg_rtime;
2403 #if TARGET_ABI_BITS == 32
2404 abi_ulong __unused2;
2406 abi_ulong msg_ctime;
2407 #if TARGET_ABI_BITS == 32
2408 abi_ulong __unused3;
2410 abi_ulong __msg_cbytes;
2412 abi_ulong msg_qbytes;
2413 abi_ulong msg_lspid;
2414 abi_ulong msg_lrpid;
2415 abi_ulong __unused4;
2416 abi_ulong __unused5;
2419 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
2420 abi_ulong target_addr)
2422 struct target_msqid_ds *target_md;
2424 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
2425 return -TARGET_EFAULT;
2426 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
2427 return -TARGET_EFAULT;
2428 host_md->msg_stime = tswapl(target_md->msg_stime);
2429 host_md->msg_rtime = tswapl(target_md->msg_rtime);
2430 host_md->msg_ctime = tswapl(target_md->msg_ctime);
2431 host_md->__msg_cbytes = tswapl(target_md->__msg_cbytes);
2432 host_md->msg_qnum = tswapl(target_md->msg_qnum);
2433 host_md->msg_qbytes = tswapl(target_md->msg_qbytes);
2434 host_md->msg_lspid = tswapl(target_md->msg_lspid);
2435 host_md->msg_lrpid = tswapl(target_md->msg_lrpid);
2436 unlock_user_struct(target_md, target_addr, 0);
2440 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
2441 struct msqid_ds *host_md)
2443 struct target_msqid_ds *target_md;
2445 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
2446 return -TARGET_EFAULT;
2447 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
2448 return -TARGET_EFAULT;
2449 target_md->msg_stime = tswapl(host_md->msg_stime);
2450 target_md->msg_rtime = tswapl(host_md->msg_rtime);
2451 target_md->msg_ctime = tswapl(host_md->msg_ctime);
2452 target_md->__msg_cbytes = tswapl(host_md->__msg_cbytes);
2453 target_md->msg_qnum = tswapl(host_md->msg_qnum);
2454 target_md->msg_qbytes = tswapl(host_md->msg_qbytes);
2455 target_md->msg_lspid = tswapl(host_md->msg_lspid);
2456 target_md->msg_lrpid = tswapl(host_md->msg_lrpid);
2457 unlock_user_struct(target_md, target_addr, 1);
2461 struct target_msginfo {
2469 unsigned short int msgseg;
2472 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
2473 struct msginfo *host_msginfo)
2475 struct target_msginfo *target_msginfo;
2476 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
2477 return -TARGET_EFAULT;
2478 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
2479 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
2480 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
2481 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
2482 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
2483 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
2484 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
2485 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
2486 unlock_user_struct(target_msginfo, target_addr, 1);
2490 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
2492 struct msqid_ds dsarg;
2493 struct msginfo msginfo;
2494 abi_long ret = -TARGET_EINVAL;
2502 if (target_to_host_msqid_ds(&dsarg,ptr))
2503 return -TARGET_EFAULT;
2504 ret = get_errno(msgctl(msgid, cmd, &dsarg));
2505 if (host_to_target_msqid_ds(ptr,&dsarg))
2506 return -TARGET_EFAULT;
2509 ret = get_errno(msgctl(msgid, cmd, NULL));
2513 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
2514 if (host_to_target_msginfo(ptr, &msginfo))
2515 return -TARGET_EFAULT;
2522 struct target_msgbuf {
2527 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
2528 unsigned int msgsz, int msgflg)
2530 struct target_msgbuf *target_mb;
2531 struct msgbuf *host_mb;
2534 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
2535 return -TARGET_EFAULT;
2536 host_mb = malloc(msgsz+sizeof(long));
2537 host_mb->mtype = (abi_long) tswapl(target_mb->mtype);
2538 memcpy(host_mb->mtext, target_mb->mtext, msgsz);
2539 ret = get_errno(msgsnd(msqid, host_mb, msgsz, msgflg));
2541 unlock_user_struct(target_mb, msgp, 0);
2546 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
2547 unsigned int msgsz, abi_long msgtyp,
2550 struct target_msgbuf *target_mb;
2552 struct msgbuf *host_mb;
2555 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
2556 return -TARGET_EFAULT;
2558 host_mb = malloc(msgsz+sizeof(long));
2559 ret = get_errno(msgrcv(msqid, host_mb, msgsz, tswapl(msgtyp), msgflg));
2562 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
2563 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
2564 if (!target_mtext) {
2565 ret = -TARGET_EFAULT;
2568 memcpy(target_mb->mtext, host_mb->mtext, ret);
2569 unlock_user(target_mtext, target_mtext_addr, ret);
2572 target_mb->mtype = tswapl(host_mb->mtype);
2577 unlock_user_struct(target_mb, msgp, 1);
2581 struct target_shmid_ds
2583 struct target_ipc_perm shm_perm;
2584 abi_ulong shm_segsz;
2585 abi_ulong shm_atime;
2586 #if TARGET_ABI_BITS == 32
2587 abi_ulong __unused1;
2589 abi_ulong shm_dtime;
2590 #if TARGET_ABI_BITS == 32
2591 abi_ulong __unused2;
2593 abi_ulong shm_ctime;
2594 #if TARGET_ABI_BITS == 32
2595 abi_ulong __unused3;
2599 abi_ulong shm_nattch;
2600 unsigned long int __unused4;
2601 unsigned long int __unused5;
2604 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
2605 abi_ulong target_addr)
2607 struct target_shmid_ds *target_sd;
2609 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2610 return -TARGET_EFAULT;
2611 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
2612 return -TARGET_EFAULT;
2613 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
2614 __get_user(host_sd->shm_atime, &target_sd->shm_atime);
2615 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
2616 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
2617 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
2618 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
2619 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
2620 unlock_user_struct(target_sd, target_addr, 0);
2624 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
2625 struct shmid_ds *host_sd)
2627 struct target_shmid_ds *target_sd;
2629 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2630 return -TARGET_EFAULT;
2631 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
2632 return -TARGET_EFAULT;
2633 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
2634 __put_user(host_sd->shm_atime, &target_sd->shm_atime);
2635 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
2636 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
2637 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
2638 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
2639 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
2640 unlock_user_struct(target_sd, target_addr, 1);
2644 struct target_shminfo {
2652 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
2653 struct shminfo *host_shminfo)
2655 struct target_shminfo *target_shminfo;
2656 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
2657 return -TARGET_EFAULT;
2658 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
2659 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
2660 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
2661 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
2662 __put_user(host_shminfo->shmall, &target_shminfo->shmall);
2663 unlock_user_struct(target_shminfo, target_addr, 1);
2667 struct target_shm_info {
2672 abi_ulong swap_attempts;
2673 abi_ulong swap_successes;
2676 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
2677 struct shm_info *host_shm_info)
2679 struct target_shm_info *target_shm_info;
2680 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
2681 return -TARGET_EFAULT;
2682 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
2683 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
2684 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
2685 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
2686 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
2687 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
2688 unlock_user_struct(target_shm_info, target_addr, 1);
2692 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
2694 struct shmid_ds dsarg;
2695 struct shminfo shminfo;
2696 struct shm_info shm_info;
2697 abi_long ret = -TARGET_EINVAL;
2705 if (target_to_host_shmid_ds(&dsarg, buf))
2706 return -TARGET_EFAULT;
2707 ret = get_errno(shmctl(shmid, cmd, &dsarg));
2708 if (host_to_target_shmid_ds(buf, &dsarg))
2709 return -TARGET_EFAULT;
2712 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
2713 if (host_to_target_shminfo(buf, &shminfo))
2714 return -TARGET_EFAULT;
2717 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
2718 if (host_to_target_shm_info(buf, &shm_info))
2719 return -TARGET_EFAULT;
2724 ret = get_errno(shmctl(shmid, cmd, NULL));
2731 static inline abi_ulong do_shmat(int shmid, abi_ulong shmaddr, int shmflg)
2735 struct shmid_ds shm_info;
2738 /* find out the length of the shared memory segment */
2739 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
2740 if (is_error(ret)) {
2741 /* can't get length, bail out */
2748 host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
2750 abi_ulong mmap_start;
2752 mmap_start = mmap_find_vma(0, shm_info.shm_segsz);
2754 if (mmap_start == -1) {
2756 host_raddr = (void *)-1;
2758 host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
2761 if (host_raddr == (void *)-1) {
2763 return get_errno((long)host_raddr);
2765 raddr=h2g((unsigned long)host_raddr);
2767 page_set_flags(raddr, raddr + shm_info.shm_segsz,
2768 PAGE_VALID | PAGE_READ |
2769 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
2771 for (i = 0; i < N_SHM_REGIONS; i++) {
2772 if (shm_regions[i].start == 0) {
2773 shm_regions[i].start = raddr;
2774 shm_regions[i].size = shm_info.shm_segsz;
2784 static inline abi_long do_shmdt(abi_ulong shmaddr)
2788 for (i = 0; i < N_SHM_REGIONS; ++i) {
2789 if (shm_regions[i].start == shmaddr) {
2790 shm_regions[i].start = 0;
2791 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
2796 return get_errno(shmdt(g2h(shmaddr)));
2799 #ifdef TARGET_NR_ipc
2800 /* ??? This only works with linear mappings. */
2801 /* do_ipc() must return target values and target errnos. */
2802 static abi_long do_ipc(unsigned int call, int first,
2803 int second, int third,
2804 abi_long ptr, abi_long fifth)
2809 version = call >> 16;
2814 ret = do_semop(first, ptr, second);
2818 ret = get_errno(semget(first, second, third));
2822 ret = do_semctl(first, second, third, (union target_semun)(abi_ulong) ptr);
2826 ret = get_errno(msgget(first, second));
2830 ret = do_msgsnd(first, ptr, second, third);
2834 ret = do_msgctl(first, second, ptr);
2841 struct target_ipc_kludge {
2846 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
2847 ret = -TARGET_EFAULT;
2851 ret = do_msgrcv(first, tmp->msgp, second, tmp->msgtyp, third);
2853 unlock_user_struct(tmp, ptr, 0);
2857 ret = do_msgrcv(first, ptr, second, fifth, third);
2866 raddr = do_shmat(first, ptr, second);
2867 if (is_error(raddr))
2868 return get_errno(raddr);
2869 if (put_user_ual(raddr, third))
2870 return -TARGET_EFAULT;
2874 ret = -TARGET_EINVAL;
2879 ret = do_shmdt(ptr);
2883 /* IPC_* flag values are the same on all linux platforms */
2884 ret = get_errno(shmget(first, second, third));
2887 /* IPC_* and SHM_* command values are the same on all linux platforms */
2889 ret = do_shmctl(first, second, third);
2892 gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
2893 ret = -TARGET_ENOSYS;
2900 /* kernel structure types definitions */
2903 #define STRUCT(name, ...) STRUCT_ ## name,
2904 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
2906 #include "syscall_types.h"
2909 #undef STRUCT_SPECIAL
2911 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
2912 #define STRUCT_SPECIAL(name)
2913 #include "syscall_types.h"
2915 #undef STRUCT_SPECIAL
2917 typedef struct IOCTLEntry {
2918 unsigned int target_cmd;
2919 unsigned int host_cmd;
2922 const argtype arg_type[5];
2925 #define IOC_R 0x0001
2926 #define IOC_W 0x0002
2927 #define IOC_RW (IOC_R | IOC_W)
2929 #define MAX_STRUCT_SIZE 4096
2931 static IOCTLEntry ioctl_entries[] = {
2932 #define IOCTL(cmd, access, ...) \
2933 { TARGET_ ## cmd, cmd, #cmd, access, { __VA_ARGS__ } },
2938 /* ??? Implement proper locking for ioctls. */
2939 /* do_ioctl() Must return target values and target errnos. */
2940 static abi_long do_ioctl(int fd, abi_long cmd, abi_long arg)
2942 const IOCTLEntry *ie;
2943 const argtype *arg_type;
2945 uint8_t buf_temp[MAX_STRUCT_SIZE];
2951 if (ie->target_cmd == 0) {
2952 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
2953 return -TARGET_ENOSYS;
2955 if (ie->target_cmd == cmd)
2959 arg_type = ie->arg_type;
2961 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd, ie->name);
2963 switch(arg_type[0]) {
2966 ret = get_errno(ioctl(fd, ie->host_cmd));
2971 ret = get_errno(ioctl(fd, ie->host_cmd, arg));
2975 target_size = thunk_type_size(arg_type, 0);
2976 switch(ie->access) {
2978 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
2979 if (!is_error(ret)) {
2980 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
2982 return -TARGET_EFAULT;
2983 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
2984 unlock_user(argptr, arg, target_size);
2988 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
2990 return -TARGET_EFAULT;
2991 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
2992 unlock_user(argptr, arg, 0);
2993 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
2997 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
2999 return -TARGET_EFAULT;
3000 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3001 unlock_user(argptr, arg, 0);
3002 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3003 if (!is_error(ret)) {
3004 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3006 return -TARGET_EFAULT;
3007 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
3008 unlock_user(argptr, arg, target_size);
3014 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
3015 (long)cmd, arg_type[0]);
3016 ret = -TARGET_ENOSYS;
3022 static const bitmask_transtbl iflag_tbl[] = {
3023 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
3024 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
3025 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
3026 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
3027 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
3028 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
3029 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
3030 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
3031 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
3032 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
3033 { TARGET_IXON, TARGET_IXON, IXON, IXON },
3034 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
3035 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
3036 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
3040 static const bitmask_transtbl oflag_tbl[] = {
3041 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
3042 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
3043 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
3044 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
3045 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
3046 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
3047 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
3048 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
3049 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
3050 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
3051 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
3052 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
3053 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
3054 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
3055 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
3056 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
3057 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
3058 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
3059 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
3060 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
3061 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
3062 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
3063 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
3064 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
3068 static const bitmask_transtbl cflag_tbl[] = {
3069 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
3070 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
3071 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
3072 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
3073 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
3074 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
3075 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
3076 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
3077 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
3078 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
3079 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
3080 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
3081 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
3082 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
3083 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
3084 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
3085 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
3086 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
3087 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
3088 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
3089 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
3090 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
3091 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
3092 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
3093 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
3094 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
3095 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
3096 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
3097 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
3098 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
3099 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
3103 static const bitmask_transtbl lflag_tbl[] = {
3104 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
3105 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
3106 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
3107 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
3108 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
3109 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
3110 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
3111 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
3112 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
3113 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
3114 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
3115 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
3116 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
3117 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
3118 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
3122 static void target_to_host_termios (void *dst, const void *src)
3124 struct host_termios *host = dst;
3125 const struct target_termios *target = src;
3128 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
3130 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
3132 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
3134 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
3135 host->c_line = target->c_line;
3137 memset(host->c_cc, 0, sizeof(host->c_cc));
3138 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
3139 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
3140 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
3141 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
3142 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
3143 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
3144 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
3145 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
3146 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
3147 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
3148 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
3149 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
3150 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
3151 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
3152 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
3153 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
3154 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
3157 static void host_to_target_termios (void *dst, const void *src)
3159 struct target_termios *target = dst;
3160 const struct host_termios *host = src;
3163 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
3165 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
3167 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
3169 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
3170 target->c_line = host->c_line;
3172 memset(target->c_cc, 0, sizeof(target->c_cc));
3173 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
3174 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
3175 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
3176 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
3177 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
3178 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
3179 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
3180 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
3181 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
3182 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
3183 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
3184 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
3185 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
3186 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
3187 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
3188 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
3189 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
3192 static const StructEntry struct_termios_def = {
3193 .convert = { host_to_target_termios, target_to_host_termios },
3194 .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
3195 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
3198 static bitmask_transtbl mmap_flags_tbl[] = {
3199 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
3200 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
3201 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
3202 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS, MAP_ANONYMOUS, MAP_ANONYMOUS },
3203 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN, MAP_GROWSDOWN, MAP_GROWSDOWN },
3204 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE, MAP_DENYWRITE, MAP_DENYWRITE },
3205 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE, MAP_EXECUTABLE, MAP_EXECUTABLE },
3206 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
3210 #if defined(TARGET_I386)
3212 /* NOTE: there is really one LDT for all the threads */
3213 static uint8_t *ldt_table;
3215 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
3222 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
3223 if (size > bytecount)
3225 p = lock_user(VERIFY_WRITE, ptr, size, 0);
3227 return -TARGET_EFAULT;
3228 /* ??? Should this by byteswapped? */
3229 memcpy(p, ldt_table, size);
3230 unlock_user(p, ptr, size);
3234 /* XXX: add locking support */
3235 static abi_long write_ldt(CPUX86State *env,
3236 abi_ulong ptr, unsigned long bytecount, int oldmode)
3238 struct target_modify_ldt_ldt_s ldt_info;
3239 struct target_modify_ldt_ldt_s *target_ldt_info;
3240 int seg_32bit, contents, read_exec_only, limit_in_pages;
3241 int seg_not_present, useable, lm;
3242 uint32_t *lp, entry_1, entry_2;
3244 if (bytecount != sizeof(ldt_info))
3245 return -TARGET_EINVAL;
3246 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
3247 return -TARGET_EFAULT;
3248 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
3249 ldt_info.base_addr = tswapl(target_ldt_info->base_addr);
3250 ldt_info.limit = tswap32(target_ldt_info->limit);
3251 ldt_info.flags = tswap32(target_ldt_info->flags);
3252 unlock_user_struct(target_ldt_info, ptr, 0);
3254 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
3255 return -TARGET_EINVAL;
3256 seg_32bit = ldt_info.flags & 1;
3257 contents = (ldt_info.flags >> 1) & 3;
3258 read_exec_only = (ldt_info.flags >> 3) & 1;
3259 limit_in_pages = (ldt_info.flags >> 4) & 1;
3260 seg_not_present = (ldt_info.flags >> 5) & 1;
3261 useable = (ldt_info.flags >> 6) & 1;
3265 lm = (ldt_info.flags >> 7) & 1;
3267 if (contents == 3) {
3269 return -TARGET_EINVAL;
3270 if (seg_not_present == 0)
3271 return -TARGET_EINVAL;
3273 /* allocate the LDT */
3275 env->ldt.base = target_mmap(0,
3276 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
3277 PROT_READ|PROT_WRITE,
3278 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
3279 if (env->ldt.base == -1)
3280 return -TARGET_ENOMEM;
3281 memset(g2h(env->ldt.base), 0,
3282 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
3283 env->ldt.limit = 0xffff;
3284 ldt_table = g2h(env->ldt.base);
3287 /* NOTE: same code as Linux kernel */
3288 /* Allow LDTs to be cleared by the user. */
3289 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
3292 read_exec_only == 1 &&
3294 limit_in_pages == 0 &&
3295 seg_not_present == 1 &&
3303 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
3304 (ldt_info.limit & 0x0ffff);
3305 entry_2 = (ldt_info.base_addr & 0xff000000) |
3306 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
3307 (ldt_info.limit & 0xf0000) |
3308 ((read_exec_only ^ 1) << 9) |
3310 ((seg_not_present ^ 1) << 15) |
3312 (limit_in_pages << 23) |
3316 entry_2 |= (useable << 20);
3318 /* Install the new entry ... */
3320 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
3321 lp[0] = tswap32(entry_1);
3322 lp[1] = tswap32(entry_2);
3326 /* specific and weird i386 syscalls */
3327 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
3328 unsigned long bytecount)
3334 ret = read_ldt(ptr, bytecount);
3337 ret = write_ldt(env, ptr, bytecount, 1);
3340 ret = write_ldt(env, ptr, bytecount, 0);
3343 ret = -TARGET_ENOSYS;
3349 #if defined(TARGET_I386) && defined(TARGET_ABI32)
3350 static abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
3352 uint64_t *gdt_table = g2h(env->gdt.base);
3353 struct target_modify_ldt_ldt_s ldt_info;
3354 struct target_modify_ldt_ldt_s *target_ldt_info;
3355 int seg_32bit, contents, read_exec_only, limit_in_pages;
3356 int seg_not_present, useable, lm;
3357 uint32_t *lp, entry_1, entry_2;
3360 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
3361 if (!target_ldt_info)
3362 return -TARGET_EFAULT;
3363 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
3364 ldt_info.base_addr = tswapl(target_ldt_info->base_addr);
3365 ldt_info.limit = tswap32(target_ldt_info->limit);
3366 ldt_info.flags = tswap32(target_ldt_info->flags);
3367 if (ldt_info.entry_number == -1) {
3368 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
3369 if (gdt_table[i] == 0) {
3370 ldt_info.entry_number = i;
3371 target_ldt_info->entry_number = tswap32(i);
3376 unlock_user_struct(target_ldt_info, ptr, 1);
3378 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
3379 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
3380 return -TARGET_EINVAL;
3381 seg_32bit = ldt_info.flags & 1;
3382 contents = (ldt_info.flags >> 1) & 3;
3383 read_exec_only = (ldt_info.flags >> 3) & 1;
3384 limit_in_pages = (ldt_info.flags >> 4) & 1;
3385 seg_not_present = (ldt_info.flags >> 5) & 1;
3386 useable = (ldt_info.flags >> 6) & 1;
3390 lm = (ldt_info.flags >> 7) & 1;
3393 if (contents == 3) {
3394 if (seg_not_present == 0)
3395 return -TARGET_EINVAL;
3398 /* NOTE: same code as Linux kernel */
3399 /* Allow LDTs to be cleared by the user. */
3400 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
3401 if ((contents == 0 &&
3402 read_exec_only == 1 &&
3404 limit_in_pages == 0 &&
3405 seg_not_present == 1 &&
3413 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
3414 (ldt_info.limit & 0x0ffff);
3415 entry_2 = (ldt_info.base_addr & 0xff000000) |
3416 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
3417 (ldt_info.limit & 0xf0000) |
3418 ((read_exec_only ^ 1) << 9) |
3420 ((seg_not_present ^ 1) << 15) |
3422 (limit_in_pages << 23) |
3427 /* Install the new entry ... */
3429 lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
3430 lp[0] = tswap32(entry_1);
3431 lp[1] = tswap32(entry_2);
3435 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
3437 struct target_modify_ldt_ldt_s *target_ldt_info;
3438 uint64_t *gdt_table = g2h(env->gdt.base);
3439 uint32_t base_addr, limit, flags;
3440 int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
3441 int seg_not_present, useable, lm;
3442 uint32_t *lp, entry_1, entry_2;
3444 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
3445 if (!target_ldt_info)
3446 return -TARGET_EFAULT;
3447 idx = tswap32(target_ldt_info->entry_number);
3448 if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
3449 idx > TARGET_GDT_ENTRY_TLS_MAX) {
3450 unlock_user_struct(target_ldt_info, ptr, 1);
3451 return -TARGET_EINVAL;
3453 lp = (uint32_t *)(gdt_table + idx);
3454 entry_1 = tswap32(lp[0]);
3455 entry_2 = tswap32(lp[1]);
3457 read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
3458 contents = (entry_2 >> 10) & 3;
3459 seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
3460 seg_32bit = (entry_2 >> 22) & 1;
3461 limit_in_pages = (entry_2 >> 23) & 1;
3462 useable = (entry_2 >> 20) & 1;
3466 lm = (entry_2 >> 21) & 1;
3468 flags = (seg_32bit << 0) | (contents << 1) |
3469 (read_exec_only << 3) | (limit_in_pages << 4) |
3470 (seg_not_present << 5) | (useable << 6) | (lm << 7);
3471 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000);
3472 base_addr = (entry_1 >> 16) |
3473 (entry_2 & 0xff000000) |
3474 ((entry_2 & 0xff) << 16);
3475 target_ldt_info->base_addr = tswapl(base_addr);
3476 target_ldt_info->limit = tswap32(limit);
3477 target_ldt_info->flags = tswap32(flags);
3478 unlock_user_struct(target_ldt_info, ptr, 1);
3481 #endif /* TARGET_I386 && TARGET_ABI32 */
3483 #ifndef TARGET_ABI32
3484 static abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
3491 case TARGET_ARCH_SET_GS:
3492 case TARGET_ARCH_SET_FS:
3493 if (code == TARGET_ARCH_SET_GS)
3497 cpu_x86_load_seg(env, idx, 0);
3498 env->segs[idx].base = addr;
3500 case TARGET_ARCH_GET_GS:
3501 case TARGET_ARCH_GET_FS:
3502 if (code == TARGET_ARCH_GET_GS)
3506 val = env->segs[idx].base;
3507 if (put_user(val, addr, abi_ulong))
3508 return -TARGET_EFAULT;
3511 ret = -TARGET_EINVAL;
3518 #endif /* defined(TARGET_I386) */
3520 #if defined(CONFIG_USE_NPTL)
3522 #define NEW_STACK_SIZE PTHREAD_STACK_MIN
3524 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
3527 pthread_mutex_t mutex;
3528 pthread_cond_t cond;
3531 abi_ulong child_tidptr;
3532 abi_ulong parent_tidptr;
3536 static void *clone_func(void *arg)
3538 new_thread_info *info = arg;
3544 ts = (TaskState *)thread_env->opaque;
3545 info->tid = gettid();
3546 env->host_tid = info->tid;
3548 if (info->child_tidptr)
3549 put_user_u32(info->tid, info->child_tidptr);
3550 if (info->parent_tidptr)
3551 put_user_u32(info->tid, info->parent_tidptr);
3552 /* Enable signals. */
3553 sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
3554 /* Signal to the parent that we're ready. */
3555 pthread_mutex_lock(&info->mutex);
3556 pthread_cond_broadcast(&info->cond);
3557 pthread_mutex_unlock(&info->mutex);
3558 /* Wait until the parent has finshed initializing the tls state. */
3559 pthread_mutex_lock(&clone_lock);
3560 pthread_mutex_unlock(&clone_lock);
3566 /* this stack is the equivalent of the kernel stack associated with a
3568 #define NEW_STACK_SIZE 8192
3570 static int clone_func(void *arg)
3572 CPUState *env = arg;
3579 /* do_fork() Must return host values and target errnos (unlike most
3580 do_*() functions). */
3581 static int do_fork(CPUState *env, unsigned int flags, abi_ulong newsp,
3582 abi_ulong parent_tidptr, target_ulong newtls,
3583 abi_ulong child_tidptr)
3589 #if defined(CONFIG_USE_NPTL)
3590 unsigned int nptl_flags;
3594 /* Emulate vfork() with fork() */
3595 if (flags & CLONE_VFORK)
3596 flags &= ~(CLONE_VFORK | CLONE_VM);
3598 if (flags & CLONE_VM) {
3599 TaskState *parent_ts = (TaskState *)env->opaque;
3600 #if defined(CONFIG_USE_NPTL)
3601 new_thread_info info;
3602 pthread_attr_t attr;
3604 ts = qemu_mallocz(sizeof(TaskState));
3605 init_task_state(ts);
3606 /* we create a new CPU instance. */
3607 new_env = cpu_copy(env);
3608 #if defined(TARGET_I386) || defined(TARGET_SPARC) || defined(TARGET_PPC)
3611 /* Init regs that differ from the parent. */
3612 cpu_clone_regs(new_env, newsp);
3613 new_env->opaque = ts;
3614 ts->bprm = parent_ts->bprm;
3615 ts->info = parent_ts->info;
3616 #if defined(CONFIG_USE_NPTL)
3618 flags &= ~CLONE_NPTL_FLAGS2;
3620 if (nptl_flags & CLONE_CHILD_CLEARTID) {
3621 ts->child_tidptr = child_tidptr;
3624 if (nptl_flags & CLONE_SETTLS)
3625 cpu_set_tls (new_env, newtls);
3627 /* Grab a mutex so that thread setup appears atomic. */
3628 pthread_mutex_lock(&clone_lock);
3630 memset(&info, 0, sizeof(info));
3631 pthread_mutex_init(&info.mutex, NULL);
3632 pthread_mutex_lock(&info.mutex);
3633 pthread_cond_init(&info.cond, NULL);
3635 if (nptl_flags & CLONE_CHILD_SETTID)
3636 info.child_tidptr = child_tidptr;
3637 if (nptl_flags & CLONE_PARENT_SETTID)
3638 info.parent_tidptr = parent_tidptr;
3640 ret = pthread_attr_init(&attr);
3641 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
3642 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
3643 /* It is not safe to deliver signals until the child has finished
3644 initializing, so temporarily block all signals. */
3645 sigfillset(&sigmask);
3646 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
3648 ret = pthread_create(&info.thread, &attr, clone_func, &info);
3649 /* TODO: Free new CPU state if thread creation failed. */
3651 sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
3652 pthread_attr_destroy(&attr);
3654 /* Wait for the child to initialize. */
3655 pthread_cond_wait(&info.cond, &info.mutex);
3657 if (flags & CLONE_PARENT_SETTID)
3658 put_user_u32(ret, parent_tidptr);
3662 pthread_mutex_unlock(&info.mutex);
3663 pthread_cond_destroy(&info.cond);
3664 pthread_mutex_destroy(&info.mutex);
3665 pthread_mutex_unlock(&clone_lock);
3667 if (flags & CLONE_NPTL_FLAGS2)
3669 /* This is probably going to die very quickly, but do it anyway. */
3670 new_stack = qemu_mallocz (NEW_STACK_SIZE);
3672 ret = __clone2(clone_func, new_stack, NEW_STACK_SIZE, flags, new_env);
3674 ret = clone(clone_func, new_stack + NEW_STACK_SIZE, flags, new_env);
3678 /* if no CLONE_VM, we consider it is a fork */
3679 if ((flags & ~(CSIGNAL | CLONE_NPTL_FLAGS2)) != 0)
3684 /* Child Process. */
3685 cpu_clone_regs(env, newsp);
3687 #if defined(CONFIG_USE_NPTL)
3688 /* There is a race condition here. The parent process could
3689 theoretically read the TID in the child process before the child
3690 tid is set. This would require using either ptrace
3691 (not implemented) or having *_tidptr to point at a shared memory
3692 mapping. We can't repeat the spinlock hack used above because
3693 the child process gets its own copy of the lock. */
3694 if (flags & CLONE_CHILD_SETTID)
3695 put_user_u32(gettid(), child_tidptr);
3696 if (flags & CLONE_PARENT_SETTID)
3697 put_user_u32(gettid(), parent_tidptr);
3698 ts = (TaskState *)env->opaque;
3699 if (flags & CLONE_SETTLS)
3700 cpu_set_tls (env, newtls);
3701 if (flags & CLONE_CHILD_CLEARTID)
3702 ts->child_tidptr = child_tidptr;
3711 /* warning : doesn't handle linux specific flags... */
3712 static int target_to_host_fcntl_cmd(int cmd)
3715 case TARGET_F_DUPFD:
3716 case TARGET_F_GETFD:
3717 case TARGET_F_SETFD:
3718 case TARGET_F_GETFL:
3719 case TARGET_F_SETFL:
3721 case TARGET_F_GETLK:
3723 case TARGET_F_SETLK:
3725 case TARGET_F_SETLKW:
3727 case TARGET_F_GETOWN:
3729 case TARGET_F_SETOWN:
3731 case TARGET_F_GETSIG:
3733 case TARGET_F_SETSIG:
3735 #if TARGET_ABI_BITS == 32
3736 case TARGET_F_GETLK64:
3738 case TARGET_F_SETLK64:
3740 case TARGET_F_SETLKW64:
3743 case TARGET_F_SETLEASE:
3745 case TARGET_F_GETLEASE:
3747 #ifdef F_DUPFD_CLOEXEC
3748 case TARGET_F_DUPFD_CLOEXEC:
3749 return F_DUPFD_CLOEXEC;
3751 case TARGET_F_NOTIFY:
3754 return -TARGET_EINVAL;
3756 return -TARGET_EINVAL;
3759 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
3762 struct target_flock *target_fl;
3763 struct flock64 fl64;
3764 struct target_flock64 *target_fl64;
3766 int host_cmd = target_to_host_fcntl_cmd(cmd);
3768 if (host_cmd == -TARGET_EINVAL)
3772 case TARGET_F_GETLK:
3773 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
3774 return -TARGET_EFAULT;
3775 fl.l_type = tswap16(target_fl->l_type);
3776 fl.l_whence = tswap16(target_fl->l_whence);
3777 fl.l_start = tswapl(target_fl->l_start);
3778 fl.l_len = tswapl(target_fl->l_len);
3779 fl.l_pid = tswap32(target_fl->l_pid);
3780 unlock_user_struct(target_fl, arg, 0);
3781 ret = get_errno(fcntl(fd, host_cmd, &fl));
3783 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg, 0))
3784 return -TARGET_EFAULT;
3785 target_fl->l_type = tswap16(fl.l_type);
3786 target_fl->l_whence = tswap16(fl.l_whence);
3787 target_fl->l_start = tswapl(fl.l_start);
3788 target_fl->l_len = tswapl(fl.l_len);
3789 target_fl->l_pid = tswap32(fl.l_pid);
3790 unlock_user_struct(target_fl, arg, 1);
3794 case TARGET_F_SETLK:
3795 case TARGET_F_SETLKW:
3796 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
3797 return -TARGET_EFAULT;
3798 fl.l_type = tswap16(target_fl->l_type);
3799 fl.l_whence = tswap16(target_fl->l_whence);
3800 fl.l_start = tswapl(target_fl->l_start);
3801 fl.l_len = tswapl(target_fl->l_len);
3802 fl.l_pid = tswap32(target_fl->l_pid);
3803 unlock_user_struct(target_fl, arg, 0);
3804 ret = get_errno(fcntl(fd, host_cmd, &fl));
3807 case TARGET_F_GETLK64:
3808 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
3809 return -TARGET_EFAULT;
3810 fl64.l_type = tswap16(target_fl64->l_type) >> 1;
3811 fl64.l_whence = tswap16(target_fl64->l_whence);
3812 fl64.l_start = tswapl(target_fl64->l_start);
3813 fl64.l_len = tswapl(target_fl64->l_len);
3814 fl64.l_pid = tswap32(target_fl64->l_pid);
3815 unlock_user_struct(target_fl64, arg, 0);
3816 ret = get_errno(fcntl(fd, host_cmd, &fl64));
3818 if (!lock_user_struct(VERIFY_WRITE, target_fl64, arg, 0))
3819 return -TARGET_EFAULT;
3820 target_fl64->l_type = tswap16(fl64.l_type) >> 1;
3821 target_fl64->l_whence = tswap16(fl64.l_whence);
3822 target_fl64->l_start = tswapl(fl64.l_start);
3823 target_fl64->l_len = tswapl(fl64.l_len);
3824 target_fl64->l_pid = tswap32(fl64.l_pid);
3825 unlock_user_struct(target_fl64, arg, 1);
3828 case TARGET_F_SETLK64:
3829 case TARGET_F_SETLKW64:
3830 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
3831 return -TARGET_EFAULT;
3832 fl64.l_type = tswap16(target_fl64->l_type) >> 1;
3833 fl64.l_whence = tswap16(target_fl64->l_whence);
3834 fl64.l_start = tswapl(target_fl64->l_start);
3835 fl64.l_len = tswapl(target_fl64->l_len);
3836 fl64.l_pid = tswap32(target_fl64->l_pid);
3837 unlock_user_struct(target_fl64, arg, 0);
3838 ret = get_errno(fcntl(fd, host_cmd, &fl64));
3841 case TARGET_F_GETFL:
3842 ret = get_errno(fcntl(fd, host_cmd, arg));
3844 ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
3848 case TARGET_F_SETFL:
3849 ret = get_errno(fcntl(fd, host_cmd, target_to_host_bitmask(arg, fcntl_flags_tbl)));
3852 case TARGET_F_SETOWN:
3853 case TARGET_F_GETOWN:
3854 case TARGET_F_SETSIG:
3855 case TARGET_F_GETSIG:
3856 case TARGET_F_SETLEASE:
3857 case TARGET_F_GETLEASE:
3858 ret = get_errno(fcntl(fd, host_cmd, arg));
3862 ret = get_errno(fcntl(fd, cmd, arg));
3870 static inline int high2lowuid(int uid)
3878 static inline int high2lowgid(int gid)
3886 static inline int low2highuid(int uid)
3888 if ((int16_t)uid == -1)
3894 static inline int low2highgid(int gid)
3896 if ((int16_t)gid == -1)
3902 #endif /* USE_UID16 */
3904 void syscall_init(void)
3907 const argtype *arg_type;
3911 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
3912 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
3913 #include "syscall_types.h"
3915 #undef STRUCT_SPECIAL
3917 /* we patch the ioctl size if necessary. We rely on the fact that
3918 no ioctl has all the bits at '1' in the size field */
3920 while (ie->target_cmd != 0) {
3921 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
3922 TARGET_IOC_SIZEMASK) {
3923 arg_type = ie->arg_type;
3924 if (arg_type[0] != TYPE_PTR) {
3925 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
3930 size = thunk_type_size(arg_type, 0);
3931 ie->target_cmd = (ie->target_cmd &
3932 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
3933 (size << TARGET_IOC_SIZESHIFT);
3936 /* Build target_to_host_errno_table[] table from
3937 * host_to_target_errno_table[]. */
3938 for (i=0; i < ERRNO_TABLE_SIZE; i++)
3939 target_to_host_errno_table[host_to_target_errno_table[i]] = i;
3941 /* automatic consistency check if same arch */
3942 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
3943 (defined(__x86_64__) && defined(TARGET_X86_64))
3944 if (unlikely(ie->target_cmd != ie->host_cmd)) {
3945 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
3946 ie->name, ie->target_cmd, ie->host_cmd);
3953 #if TARGET_ABI_BITS == 32
3954 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
3956 #ifdef TARGET_WORDS_BIGENDIAN
3957 return ((uint64_t)word0 << 32) | word1;
3959 return ((uint64_t)word1 << 32) | word0;
3962 #else /* TARGET_ABI_BITS == 32 */
3963 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
3967 #endif /* TARGET_ABI_BITS != 32 */
3969 #ifdef TARGET_NR_truncate64
3970 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
3976 if (((CPUARMState *)cpu_env)->eabi)
3982 return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
3986 #ifdef TARGET_NR_ftruncate64
3987 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
3993 if (((CPUARMState *)cpu_env)->eabi)
3999 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
4003 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
4004 abi_ulong target_addr)
4006 struct target_timespec *target_ts;
4008 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1))
4009 return -TARGET_EFAULT;
4010 host_ts->tv_sec = tswapl(target_ts->tv_sec);
4011 host_ts->tv_nsec = tswapl(target_ts->tv_nsec);
4012 unlock_user_struct(target_ts, target_addr, 0);
4016 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
4017 struct timespec *host_ts)
4019 struct target_timespec *target_ts;
4021 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0))
4022 return -TARGET_EFAULT;
4023 target_ts->tv_sec = tswapl(host_ts->tv_sec);
4024 target_ts->tv_nsec = tswapl(host_ts->tv_nsec);
4025 unlock_user_struct(target_ts, target_addr, 1);
4029 #if defined(TARGET_NR_stat64) || defined(TARGET_NR_newfstatat)
4030 static inline abi_long host_to_target_stat64(void *cpu_env,
4031 abi_ulong target_addr,
4032 struct stat *host_st)
4035 if (((CPUARMState *)cpu_env)->eabi) {
4036 struct target_eabi_stat64 *target_st;
4038 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
4039 return -TARGET_EFAULT;
4040 memset(target_st, 0, sizeof(struct target_eabi_stat64));
4041 __put_user(host_st->st_dev, &target_st->st_dev);
4042 __put_user(host_st->st_ino, &target_st->st_ino);
4043 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
4044 __put_user(host_st->st_ino, &target_st->__st_ino);
4046 __put_user(host_st->st_mode, &target_st->st_mode);
4047 __put_user(host_st->st_nlink, &target_st->st_nlink);
4048 __put_user(host_st->st_uid, &target_st->st_uid);
4049 __put_user(host_st->st_gid, &target_st->st_gid);
4050 __put_user(host_st->st_rdev, &target_st->st_rdev);
4051 __put_user(host_st->st_size, &target_st->st_size);
4052 __put_user(host_st->st_blksize, &target_st->st_blksize);
4053 __put_user(host_st->st_blocks, &target_st->st_blocks);
4054 __put_user(host_st->st_atime, &target_st->target_st_atime);
4055 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
4056 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
4057 unlock_user_struct(target_st, target_addr, 1);
4061 #if TARGET_ABI_BITS == 64 && !defined(TARGET_ALPHA)
4062 struct target_stat *target_st;
4064 struct target_stat64 *target_st;
4067 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
4068 return -TARGET_EFAULT;
4069 memset(target_st, 0, sizeof(*target_st));
4070 __put_user(host_st->st_dev, &target_st->st_dev);
4071 __put_user(host_st->st_ino, &target_st->st_ino);
4072 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
4073 __put_user(host_st->st_ino, &target_st->__st_ino);
4075 __put_user(host_st->st_mode, &target_st->st_mode);
4076 __put_user(host_st->st_nlink, &target_st->st_nlink);
4077 __put_user(host_st->st_uid, &target_st->st_uid);
4078 __put_user(host_st->st_gid, &target_st->st_gid);
4079 __put_user(host_st->st_rdev, &target_st->st_rdev);
4080 /* XXX: better use of kernel struct */
4081 __put_user(host_st->st_size, &target_st->st_size);
4082 __put_user(host_st->st_blksize, &target_st->st_blksize);
4083 __put_user(host_st->st_blocks, &target_st->st_blocks);
4084 __put_user(host_st->st_atime, &target_st->target_st_atime);
4085 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
4086 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
4087 unlock_user_struct(target_st, target_addr, 1);
4094 #if defined(CONFIG_USE_NPTL)
4095 /* ??? Using host futex calls even when target atomic operations
4096 are not really atomic probably breaks things. However implementing
4097 futexes locally would make futexes shared between multiple processes
4098 tricky. However they're probably useless because guest atomic
4099 operations won't work either. */
4100 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
4101 target_ulong uaddr2, int val3)
4103 struct timespec ts, *pts;
4106 /* ??? We assume FUTEX_* constants are the same on both host
4108 #ifdef FUTEX_CMD_MASK
4109 base_op = op & FUTEX_CMD_MASK;
4117 target_to_host_timespec(pts, timeout);
4121 return get_errno(sys_futex(g2h(uaddr), op, tswap32(val),
4124 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0));
4126 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0));
4128 case FUTEX_CMP_REQUEUE:
4130 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
4131 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
4132 But the prototype takes a `struct timespec *'; insert casts
4133 to satisfy the compiler. We do not need to tswap TIMEOUT
4134 since it's not compared to guest memory. */
4135 pts = (struct timespec *)(uintptr_t) timeout;
4136 return get_errno(sys_futex(g2h(uaddr), op, val, pts,
4138 (base_op == FUTEX_CMP_REQUEUE
4142 return -TARGET_ENOSYS;
4147 /* Map host to target signal numbers for the wait family of syscalls.
4148 Assume all other status bits are the same. */
4149 static int host_to_target_waitstatus(int status)
4151 if (WIFSIGNALED(status)) {
4152 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
4154 if (WIFSTOPPED(status)) {
4155 return (host_to_target_signal(WSTOPSIG(status)) << 8)
4161 int get_osversion(void)
4163 static int osversion;
4164 struct new_utsname buf;
4169 if (qemu_uname_release && *qemu_uname_release) {
4170 s = qemu_uname_release;
4172 if (sys_uname(&buf))
4177 for (i = 0; i < 3; i++) {
4179 while (*s >= '0' && *s <= '9') {
4184 tmp = (tmp << 8) + n;
4192 /* do_syscall() should always have a single exit point at the end so
4193 that actions, such as logging of syscall results, can be performed.
4194 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
4195 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
4196 abi_long arg2, abi_long arg3, abi_long arg4,
4197 abi_long arg5, abi_long arg6)
4205 gemu_log("syscall %d", num);
4208 print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
4211 case TARGET_NR_exit:
4212 #ifdef CONFIG_USE_NPTL
4213 /* In old applications this may be used to implement _exit(2).
4214 However in threaded applictions it is used for thread termination,
4215 and _exit_group is used for application termination.
4216 Do thread termination if we have more then one thread. */
4217 /* FIXME: This probably breaks if a signal arrives. We should probably
4218 be disabling signals. */
4219 if (first_cpu->next_cpu) {
4227 while (p && p != (CPUState *)cpu_env) {
4228 lastp = &p->next_cpu;
4231 /* If we didn't find the CPU for this thread then something is
4235 /* Remove the CPU from the list. */
4236 *lastp = p->next_cpu;
4238 ts = ((CPUState *)cpu_env)->opaque;
4239 if (ts->child_tidptr) {
4240 put_user_u32(0, ts->child_tidptr);
4241 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
4253 gdb_exit(cpu_env, arg1);
4255 ret = 0; /* avoid warning */
4257 case TARGET_NR_read:
4261 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
4263 ret = get_errno(read(arg1, p, arg3));
4264 unlock_user(p, arg2, ret);
4267 case TARGET_NR_write:
4268 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
4270 ret = get_errno(write(arg1, p, arg3));
4271 unlock_user(p, arg2, 0);
4273 case TARGET_NR_open:
4274 if (!(p = lock_user_string(arg1)))
4276 ret = get_errno(open(path(p),
4277 target_to_host_bitmask(arg2, fcntl_flags_tbl),
4279 unlock_user(p, arg1, 0);
4281 #if defined(TARGET_NR_openat) && defined(__NR_openat)
4282 case TARGET_NR_openat:
4283 if (!(p = lock_user_string(arg2)))
4285 ret = get_errno(sys_openat(arg1,
4287 target_to_host_bitmask(arg3, fcntl_flags_tbl),
4289 unlock_user(p, arg2, 0);
4292 case TARGET_NR_close:
4293 ret = get_errno(close(arg1));
4298 case TARGET_NR_fork:
4299 ret = get_errno(do_fork(cpu_env, SIGCHLD, 0, 0, 0, 0));
4301 #ifdef TARGET_NR_waitpid
4302 case TARGET_NR_waitpid:
4305 ret = get_errno(waitpid(arg1, &status, arg3));
4306 if (!is_error(ret) && arg2
4307 && put_user_s32(host_to_target_waitstatus(status), arg2))
4312 #ifdef TARGET_NR_waitid
4313 case TARGET_NR_waitid:
4317 ret = get_errno(waitid(arg1, arg2, &info, arg4));
4318 if (!is_error(ret) && arg3 && info.si_pid != 0) {
4319 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
4321 host_to_target_siginfo(p, &info);
4322 unlock_user(p, arg3, sizeof(target_siginfo_t));
4327 #ifdef TARGET_NR_creat /* not on alpha */
4328 case TARGET_NR_creat:
4329 if (!(p = lock_user_string(arg1)))
4331 ret = get_errno(creat(p, arg2));
4332 unlock_user(p, arg1, 0);
4335 case TARGET_NR_link:
4338 p = lock_user_string(arg1);
4339 p2 = lock_user_string(arg2);
4341 ret = -TARGET_EFAULT;
4343 ret = get_errno(link(p, p2));
4344 unlock_user(p2, arg2, 0);
4345 unlock_user(p, arg1, 0);
4348 #if defined(TARGET_NR_linkat) && defined(__NR_linkat)
4349 case TARGET_NR_linkat:
4354 p = lock_user_string(arg2);
4355 p2 = lock_user_string(arg4);
4357 ret = -TARGET_EFAULT;
4359 ret = get_errno(sys_linkat(arg1, p, arg3, p2, arg5));
4360 unlock_user(p, arg2, 0);
4361 unlock_user(p2, arg4, 0);
4365 case TARGET_NR_unlink:
4366 if (!(p = lock_user_string(arg1)))
4368 ret = get_errno(unlink(p));
4369 unlock_user(p, arg1, 0);
4371 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat)
4372 case TARGET_NR_unlinkat:
4373 if (!(p = lock_user_string(arg2)))
4375 ret = get_errno(sys_unlinkat(arg1, p, arg3));
4376 unlock_user(p, arg2, 0);
4379 case TARGET_NR_execve:
4381 char **argp, **envp;
4384 abi_ulong guest_argp;
4385 abi_ulong guest_envp;
4391 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
4392 if (get_user_ual(addr, gp))
4400 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
4401 if (get_user_ual(addr, gp))
4408 argp = alloca((argc + 1) * sizeof(void *));
4409 envp = alloca((envc + 1) * sizeof(void *));
4411 for (gp = guest_argp, q = argp; gp;
4412 gp += sizeof(abi_ulong), q++) {
4413 if (get_user_ual(addr, gp))
4417 if (!(*q = lock_user_string(addr)))
4422 for (gp = guest_envp, q = envp; gp;
4423 gp += sizeof(abi_ulong), q++) {
4424 if (get_user_ual(addr, gp))
4428 if (!(*q = lock_user_string(addr)))
4433 if (!(p = lock_user_string(arg1)))
4435 ret = get_errno(execve(p, argp, envp));
4436 unlock_user(p, arg1, 0);
4441 ret = -TARGET_EFAULT;
4444 for (gp = guest_argp, q = argp; *q;
4445 gp += sizeof(abi_ulong), q++) {
4446 if (get_user_ual(addr, gp)
4449 unlock_user(*q, addr, 0);
4451 for (gp = guest_envp, q = envp; *q;
4452 gp += sizeof(abi_ulong), q++) {
4453 if (get_user_ual(addr, gp)
4456 unlock_user(*q, addr, 0);
4460 case TARGET_NR_chdir:
4461 if (!(p = lock_user_string(arg1)))
4463 ret = get_errno(chdir(p));
4464 unlock_user(p, arg1, 0);
4466 #ifdef TARGET_NR_time
4467 case TARGET_NR_time:
4470 ret = get_errno(time(&host_time));
4473 && put_user_sal(host_time, arg1))
4478 case TARGET_NR_mknod:
4479 if (!(p = lock_user_string(arg1)))
4481 ret = get_errno(mknod(p, arg2, arg3));
4482 unlock_user(p, arg1, 0);
4484 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat)
4485 case TARGET_NR_mknodat:
4486 if (!(p = lock_user_string(arg2)))
4488 ret = get_errno(sys_mknodat(arg1, p, arg3, arg4));
4489 unlock_user(p, arg2, 0);
4492 case TARGET_NR_chmod:
4493 if (!(p = lock_user_string(arg1)))
4495 ret = get_errno(chmod(p, arg2));
4496 unlock_user(p, arg1, 0);
4498 #ifdef TARGET_NR_break
4499 case TARGET_NR_break:
4502 #ifdef TARGET_NR_oldstat
4503 case TARGET_NR_oldstat:
4506 case TARGET_NR_lseek:
4507 ret = get_errno(lseek(arg1, arg2, arg3));
4509 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
4510 /* Alpha specific */
4511 case TARGET_NR_getxpid:
4512 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
4513 ret = get_errno(getpid());
4516 #ifdef TARGET_NR_getpid
4517 case TARGET_NR_getpid:
4518 ret = get_errno(getpid());
4521 case TARGET_NR_mount:
4523 /* need to look at the data field */
4525 p = lock_user_string(arg1);
4526 p2 = lock_user_string(arg2);
4527 p3 = lock_user_string(arg3);
4528 if (!p || !p2 || !p3)
4529 ret = -TARGET_EFAULT;
4531 /* FIXME - arg5 should be locked, but it isn't clear how to
4532 * do that since it's not guaranteed to be a NULL-terminated
4536 ret = get_errno(mount(p, p2, p3, (unsigned long)arg4, NULL));
4538 ret = get_errno(mount(p, p2, p3, (unsigned long)arg4, g2h(arg5)));
4540 unlock_user(p, arg1, 0);
4541 unlock_user(p2, arg2, 0);
4542 unlock_user(p3, arg3, 0);
4545 #ifdef TARGET_NR_umount
4546 case TARGET_NR_umount:
4547 if (!(p = lock_user_string(arg1)))
4549 ret = get_errno(umount(p));
4550 unlock_user(p, arg1, 0);
4553 #ifdef TARGET_NR_stime /* not on alpha */
4554 case TARGET_NR_stime:
4557 if (get_user_sal(host_time, arg1))
4559 ret = get_errno(stime(&host_time));
4563 case TARGET_NR_ptrace:
4565 #ifdef TARGET_NR_alarm /* not on alpha */
4566 case TARGET_NR_alarm:
4570 #ifdef TARGET_NR_oldfstat
4571 case TARGET_NR_oldfstat:
4574 #ifdef TARGET_NR_pause /* not on alpha */
4575 case TARGET_NR_pause:
4576 ret = get_errno(pause());
4579 #ifdef TARGET_NR_utime
4580 case TARGET_NR_utime:
4582 struct utimbuf tbuf, *host_tbuf;
4583 struct target_utimbuf *target_tbuf;
4585 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
4587 tbuf.actime = tswapl(target_tbuf->actime);
4588 tbuf.modtime = tswapl(target_tbuf->modtime);
4589 unlock_user_struct(target_tbuf, arg2, 0);
4594 if (!(p = lock_user_string(arg1)))
4596 ret = get_errno(utime(p, host_tbuf));
4597 unlock_user(p, arg1, 0);
4601 case TARGET_NR_utimes:
4603 struct timeval *tvp, tv[2];
4605 if (copy_from_user_timeval(&tv[0], arg2)
4606 || copy_from_user_timeval(&tv[1],
4607 arg2 + sizeof(struct target_timeval)))
4613 if (!(p = lock_user_string(arg1)))
4615 ret = get_errno(utimes(p, tvp));
4616 unlock_user(p, arg1, 0);
4619 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat)
4620 case TARGET_NR_futimesat:
4622 struct timeval *tvp, tv[2];
4624 if (copy_from_user_timeval(&tv[0], arg3)
4625 || copy_from_user_timeval(&tv[1],
4626 arg3 + sizeof(struct target_timeval)))
4632 if (!(p = lock_user_string(arg2)))
4634 ret = get_errno(sys_futimesat(arg1, path(p), tvp));
4635 unlock_user(p, arg2, 0);
4639 #ifdef TARGET_NR_stty
4640 case TARGET_NR_stty:
4643 #ifdef TARGET_NR_gtty
4644 case TARGET_NR_gtty:
4647 case TARGET_NR_access:
4648 if (!(p = lock_user_string(arg1)))
4650 ret = get_errno(access(path(p), arg2));
4651 unlock_user(p, arg1, 0);
4653 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
4654 case TARGET_NR_faccessat:
4655 if (!(p = lock_user_string(arg2)))
4657 ret = get_errno(sys_faccessat(arg1, p, arg3));
4658 unlock_user(p, arg2, 0);
4661 #ifdef TARGET_NR_nice /* not on alpha */
4662 case TARGET_NR_nice:
4663 ret = get_errno(nice(arg1));
4666 #ifdef TARGET_NR_ftime
4667 case TARGET_NR_ftime:
4670 case TARGET_NR_sync:
4674 case TARGET_NR_kill:
4675 ret = get_errno(kill(arg1, target_to_host_signal(arg2)));
4677 case TARGET_NR_rename:
4680 p = lock_user_string(arg1);
4681 p2 = lock_user_string(arg2);
4683 ret = -TARGET_EFAULT;
4685 ret = get_errno(rename(p, p2));
4686 unlock_user(p2, arg2, 0);
4687 unlock_user(p, arg1, 0);
4690 #if defined(TARGET_NR_renameat) && defined(__NR_renameat)
4691 case TARGET_NR_renameat:
4694 p = lock_user_string(arg2);
4695 p2 = lock_user_string(arg4);
4697 ret = -TARGET_EFAULT;
4699 ret = get_errno(sys_renameat(arg1, p, arg3, p2));
4700 unlock_user(p2, arg4, 0);
4701 unlock_user(p, arg2, 0);
4705 case TARGET_NR_mkdir:
4706 if (!(p = lock_user_string(arg1)))
4708 ret = get_errno(mkdir(p, arg2));
4709 unlock_user(p, arg1, 0);
4711 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat)
4712 case TARGET_NR_mkdirat:
4713 if (!(p = lock_user_string(arg2)))
4715 ret = get_errno(sys_mkdirat(arg1, p, arg3));
4716 unlock_user(p, arg2, 0);
4719 case TARGET_NR_rmdir:
4720 if (!(p = lock_user_string(arg1)))
4722 ret = get_errno(rmdir(p));
4723 unlock_user(p, arg1, 0);
4726 ret = get_errno(dup(arg1));
4728 case TARGET_NR_pipe:
4729 ret = do_pipe(cpu_env, arg1, 0, 0);
4731 #ifdef TARGET_NR_pipe2
4732 case TARGET_NR_pipe2:
4733 ret = do_pipe(cpu_env, arg1, arg2, 1);
4736 case TARGET_NR_times:
4738 struct target_tms *tmsp;
4740 ret = get_errno(times(&tms));
4742 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
4745 tmsp->tms_utime = tswapl(host_to_target_clock_t(tms.tms_utime));
4746 tmsp->tms_stime = tswapl(host_to_target_clock_t(tms.tms_stime));
4747 tmsp->tms_cutime = tswapl(host_to_target_clock_t(tms.tms_cutime));
4748 tmsp->tms_cstime = tswapl(host_to_target_clock_t(tms.tms_cstime));
4751 ret = host_to_target_clock_t(ret);
4754 #ifdef TARGET_NR_prof
4755 case TARGET_NR_prof:
4758 #ifdef TARGET_NR_signal
4759 case TARGET_NR_signal:
4762 case TARGET_NR_acct:
4764 ret = get_errno(acct(NULL));
4766 if (!(p = lock_user_string(arg1)))
4768 ret = get_errno(acct(path(p)));
4769 unlock_user(p, arg1, 0);
4772 #ifdef TARGET_NR_umount2 /* not on alpha */
4773 case TARGET_NR_umount2:
4774 if (!(p = lock_user_string(arg1)))
4776 ret = get_errno(umount2(p, arg2));
4777 unlock_user(p, arg1, 0);
4780 #ifdef TARGET_NR_lock
4781 case TARGET_NR_lock:
4784 case TARGET_NR_ioctl:
4785 ret = do_ioctl(arg1, arg2, arg3);
4787 case TARGET_NR_fcntl:
4788 ret = do_fcntl(arg1, arg2, arg3);
4790 #ifdef TARGET_NR_mpx
4794 case TARGET_NR_setpgid:
4795 ret = get_errno(setpgid(arg1, arg2));
4797 #ifdef TARGET_NR_ulimit
4798 case TARGET_NR_ulimit:
4801 #ifdef TARGET_NR_oldolduname
4802 case TARGET_NR_oldolduname:
4805 case TARGET_NR_umask:
4806 ret = get_errno(umask(arg1));
4808 case TARGET_NR_chroot:
4809 if (!(p = lock_user_string(arg1)))
4811 ret = get_errno(chroot(p));
4812 unlock_user(p, arg1, 0);
4814 case TARGET_NR_ustat:
4816 case TARGET_NR_dup2:
4817 ret = get_errno(dup2(arg1, arg2));
4819 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
4820 case TARGET_NR_dup3:
4821 ret = get_errno(dup3(arg1, arg2, arg3));
4824 #ifdef TARGET_NR_getppid /* not on alpha */
4825 case TARGET_NR_getppid:
4826 ret = get_errno(getppid());
4829 case TARGET_NR_getpgrp:
4830 ret = get_errno(getpgrp());
4832 case TARGET_NR_setsid:
4833 ret = get_errno(setsid());
4835 #ifdef TARGET_NR_sigaction
4836 case TARGET_NR_sigaction:
4838 #if defined(TARGET_ALPHA)
4839 struct target_sigaction act, oact, *pact = 0;
4840 struct target_old_sigaction *old_act;
4842 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
4844 act._sa_handler = old_act->_sa_handler;
4845 target_siginitset(&act.sa_mask, old_act->sa_mask);
4846 act.sa_flags = old_act->sa_flags;
4847 act.sa_restorer = 0;
4848 unlock_user_struct(old_act, arg2, 0);
4851 ret = get_errno(do_sigaction(arg1, pact, &oact));
4852 if (!is_error(ret) && arg3) {
4853 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
4855 old_act->_sa_handler = oact._sa_handler;
4856 old_act->sa_mask = oact.sa_mask.sig[0];
4857 old_act->sa_flags = oact.sa_flags;
4858 unlock_user_struct(old_act, arg3, 1);
4860 #elif defined(TARGET_MIPS)
4861 struct target_sigaction act, oact, *pact, *old_act;
4864 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
4866 act._sa_handler = old_act->_sa_handler;
4867 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
4868 act.sa_flags = old_act->sa_flags;
4869 unlock_user_struct(old_act, arg2, 0);
4875 ret = get_errno(do_sigaction(arg1, pact, &oact));
4877 if (!is_error(ret) && arg3) {
4878 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
4880 old_act->_sa_handler = oact._sa_handler;
4881 old_act->sa_flags = oact.sa_flags;
4882 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
4883 old_act->sa_mask.sig[1] = 0;
4884 old_act->sa_mask.sig[2] = 0;
4885 old_act->sa_mask.sig[3] = 0;
4886 unlock_user_struct(old_act, arg3, 1);
4889 struct target_old_sigaction *old_act;
4890 struct target_sigaction act, oact, *pact;
4892 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
4894 act._sa_handler = old_act->_sa_handler;
4895 target_siginitset(&act.sa_mask, old_act->sa_mask);
4896 act.sa_flags = old_act->sa_flags;
4897 act.sa_restorer = old_act->sa_restorer;
4898 unlock_user_struct(old_act, arg2, 0);
4903 ret = get_errno(do_sigaction(arg1, pact, &oact));
4904 if (!is_error(ret) && arg3) {
4905 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
4907 old_act->_sa_handler = oact._sa_handler;
4908 old_act->sa_mask = oact.sa_mask.sig[0];
4909 old_act->sa_flags = oact.sa_flags;
4910 old_act->sa_restorer = oact.sa_restorer;
4911 unlock_user_struct(old_act, arg3, 1);
4917 case TARGET_NR_rt_sigaction:
4919 #if defined(TARGET_ALPHA)
4920 struct target_sigaction act, oact, *pact = 0;
4921 struct target_rt_sigaction *rt_act;
4922 /* ??? arg4 == sizeof(sigset_t). */
4924 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
4926 act._sa_handler = rt_act->_sa_handler;
4927 act.sa_mask = rt_act->sa_mask;
4928 act.sa_flags = rt_act->sa_flags;
4929 act.sa_restorer = arg5;
4930 unlock_user_struct(rt_act, arg2, 0);
4933 ret = get_errno(do_sigaction(arg1, pact, &oact));
4934 if (!is_error(ret) && arg3) {
4935 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
4937 rt_act->_sa_handler = oact._sa_handler;
4938 rt_act->sa_mask = oact.sa_mask;
4939 rt_act->sa_flags = oact.sa_flags;
4940 unlock_user_struct(rt_act, arg3, 1);
4943 struct target_sigaction *act;
4944 struct target_sigaction *oact;
4947 if (!lock_user_struct(VERIFY_READ, act, arg2, 1))
4952 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
4953 ret = -TARGET_EFAULT;
4954 goto rt_sigaction_fail;
4958 ret = get_errno(do_sigaction(arg1, act, oact));
4961 unlock_user_struct(act, arg2, 0);
4963 unlock_user_struct(oact, arg3, 1);
4967 #ifdef TARGET_NR_sgetmask /* not on alpha */
4968 case TARGET_NR_sgetmask:
4971 abi_ulong target_set;
4972 sigprocmask(0, NULL, &cur_set);
4973 host_to_target_old_sigset(&target_set, &cur_set);
4978 #ifdef TARGET_NR_ssetmask /* not on alpha */
4979 case TARGET_NR_ssetmask:
4981 sigset_t set, oset, cur_set;
4982 abi_ulong target_set = arg1;
4983 sigprocmask(0, NULL, &cur_set);
4984 target_to_host_old_sigset(&set, &target_set);
4985 sigorset(&set, &set, &cur_set);
4986 sigprocmask(SIG_SETMASK, &set, &oset);
4987 host_to_target_old_sigset(&target_set, &oset);
4992 #ifdef TARGET_NR_sigprocmask
4993 case TARGET_NR_sigprocmask:
4995 #if defined(TARGET_ALPHA)
4996 sigset_t set, oldset;
5001 case TARGET_SIG_BLOCK:
5004 case TARGET_SIG_UNBLOCK:
5007 case TARGET_SIG_SETMASK:
5011 ret = -TARGET_EINVAL;
5015 target_to_host_old_sigset(&set, &mask);
5017 ret = get_errno(sigprocmask(how, &set, &oldset));
5019 if (!is_error(ret)) {
5020 host_to_target_old_sigset(&mask, &oldset);
5022 ((CPUAlphaState *)cpu_env)->[IR_V0] = 0; /* force no error */
5025 sigset_t set, oldset, *set_ptr;
5030 case TARGET_SIG_BLOCK:
5033 case TARGET_SIG_UNBLOCK:
5036 case TARGET_SIG_SETMASK:
5040 ret = -TARGET_EINVAL;
5043 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
5045 target_to_host_old_sigset(&set, p);
5046 unlock_user(p, arg2, 0);
5052 ret = get_errno(sigprocmask(how, set_ptr, &oldset));
5053 if (!is_error(ret) && arg3) {
5054 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
5056 host_to_target_old_sigset(p, &oldset);
5057 unlock_user(p, arg3, sizeof(target_sigset_t));
5063 case TARGET_NR_rt_sigprocmask:
5066 sigset_t set, oldset, *set_ptr;
5070 case TARGET_SIG_BLOCK:
5073 case TARGET_SIG_UNBLOCK:
5076 case TARGET_SIG_SETMASK:
5080 ret = -TARGET_EINVAL;
5083 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
5085 target_to_host_sigset(&set, p);
5086 unlock_user(p, arg2, 0);
5092 ret = get_errno(sigprocmask(how, set_ptr, &oldset));
5093 if (!is_error(ret) && arg3) {
5094 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
5096 host_to_target_sigset(p, &oldset);
5097 unlock_user(p, arg3, sizeof(target_sigset_t));
5101 #ifdef TARGET_NR_sigpending
5102 case TARGET_NR_sigpending:
5105 ret = get_errno(sigpending(&set));
5106 if (!is_error(ret)) {
5107 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
5109 host_to_target_old_sigset(p, &set);
5110 unlock_user(p, arg1, sizeof(target_sigset_t));
5115 case TARGET_NR_rt_sigpending:
5118 ret = get_errno(sigpending(&set));
5119 if (!is_error(ret)) {
5120 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
5122 host_to_target_sigset(p, &set);
5123 unlock_user(p, arg1, sizeof(target_sigset_t));
5127 #ifdef TARGET_NR_sigsuspend
5128 case TARGET_NR_sigsuspend:
5131 #if defined(TARGET_ALPHA)
5132 abi_ulong mask = arg1;
5133 target_to_host_old_sigset(&set, &mask);
5135 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
5137 target_to_host_old_sigset(&set, p);
5138 unlock_user(p, arg1, 0);
5140 ret = get_errno(sigsuspend(&set));
5144 case TARGET_NR_rt_sigsuspend:
5147 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
5149 target_to_host_sigset(&set, p);
5150 unlock_user(p, arg1, 0);
5151 ret = get_errno(sigsuspend(&set));
5154 case TARGET_NR_rt_sigtimedwait:
5157 struct timespec uts, *puts;
5160 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
5162 target_to_host_sigset(&set, p);
5163 unlock_user(p, arg1, 0);
5166 target_to_host_timespec(puts, arg3);
5170 ret = get_errno(sigtimedwait(&set, &uinfo, puts));
5171 if (!is_error(ret) && arg2) {
5172 if (!(p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t), 0)))
5174 host_to_target_siginfo(p, &uinfo);
5175 unlock_user(p, arg2, sizeof(target_siginfo_t));
5179 case TARGET_NR_rt_sigqueueinfo:
5182 if (!(p = lock_user(VERIFY_READ, arg3, sizeof(target_sigset_t), 1)))
5184 target_to_host_siginfo(&uinfo, p);
5185 unlock_user(p, arg1, 0);
5186 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
5189 #ifdef TARGET_NR_sigreturn
5190 case TARGET_NR_sigreturn:
5191 /* NOTE: ret is eax, so not transcoding must be done */
5192 ret = do_sigreturn(cpu_env);
5195 case TARGET_NR_rt_sigreturn:
5196 /* NOTE: ret is eax, so not transcoding must be done */
5197 ret = do_rt_sigreturn(cpu_env);
5199 case TARGET_NR_sethostname:
5200 if (!(p = lock_user_string(arg1)))
5202 ret = get_errno(sethostname(p, arg2));
5203 unlock_user(p, arg1, 0);
5205 case TARGET_NR_setrlimit:
5207 int resource = arg1;
5208 struct target_rlimit *target_rlim;
5210 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
5212 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
5213 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
5214 unlock_user_struct(target_rlim, arg2, 0);
5215 ret = get_errno(setrlimit(resource, &rlim));
5218 case TARGET_NR_getrlimit:
5220 int resource = arg1;
5221 struct target_rlimit *target_rlim;
5224 ret = get_errno(getrlimit(resource, &rlim));
5225 if (!is_error(ret)) {
5226 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
5228 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
5229 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
5230 unlock_user_struct(target_rlim, arg2, 1);
5234 case TARGET_NR_getrusage:
5236 struct rusage rusage;
5237 ret = get_errno(getrusage(arg1, &rusage));
5238 if (!is_error(ret)) {
5239 host_to_target_rusage(arg2, &rusage);
5243 case TARGET_NR_gettimeofday:
5246 ret = get_errno(gettimeofday(&tv, NULL));
5247 if (!is_error(ret)) {
5248 if (copy_to_user_timeval(arg1, &tv))
5253 case TARGET_NR_settimeofday:
5256 if (copy_from_user_timeval(&tv, arg1))
5258 ret = get_errno(settimeofday(&tv, NULL));
5261 #ifdef TARGET_NR_select
5262 case TARGET_NR_select:
5264 struct target_sel_arg_struct *sel;
5265 abi_ulong inp, outp, exp, tvp;
5268 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1))
5270 nsel = tswapl(sel->n);
5271 inp = tswapl(sel->inp);
5272 outp = tswapl(sel->outp);
5273 exp = tswapl(sel->exp);
5274 tvp = tswapl(sel->tvp);
5275 unlock_user_struct(sel, arg1, 0);
5276 ret = do_select(nsel, inp, outp, exp, tvp);
5280 #ifdef TARGET_NR_pselect6
5281 case TARGET_NR_pselect6:
5282 goto unimplemented_nowarn;
5284 case TARGET_NR_symlink:
5287 p = lock_user_string(arg1);
5288 p2 = lock_user_string(arg2);
5290 ret = -TARGET_EFAULT;
5292 ret = get_errno(symlink(p, p2));
5293 unlock_user(p2, arg2, 0);
5294 unlock_user(p, arg1, 0);
5297 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat)
5298 case TARGET_NR_symlinkat:
5301 p = lock_user_string(arg1);
5302 p2 = lock_user_string(arg3);
5304 ret = -TARGET_EFAULT;
5306 ret = get_errno(sys_symlinkat(p, arg2, p2));
5307 unlock_user(p2, arg3, 0);
5308 unlock_user(p, arg1, 0);
5312 #ifdef TARGET_NR_oldlstat
5313 case TARGET_NR_oldlstat:
5316 case TARGET_NR_readlink:
5319 p = lock_user_string(arg1);
5320 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
5322 ret = -TARGET_EFAULT;
5324 if (strncmp((const char *)p, "/proc/self/exe", 14) == 0) {
5325 char real[PATH_MAX];
5326 temp = realpath(exec_path,real);
5327 ret = (temp==NULL) ? get_errno(-1) : strlen(real) ;
5328 snprintf((char *)p2, arg3, "%s", real);
5331 ret = get_errno(readlink(path(p), p2, arg3));
5333 unlock_user(p2, arg2, ret);
5334 unlock_user(p, arg1, 0);
5337 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat)
5338 case TARGET_NR_readlinkat:
5341 p = lock_user_string(arg2);
5342 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
5344 ret = -TARGET_EFAULT;
5346 ret = get_errno(sys_readlinkat(arg1, path(p), p2, arg4));
5347 unlock_user(p2, arg3, ret);
5348 unlock_user(p, arg2, 0);
5352 #ifdef TARGET_NR_uselib
5353 case TARGET_NR_uselib:
5356 #ifdef TARGET_NR_swapon
5357 case TARGET_NR_swapon:
5358 if (!(p = lock_user_string(arg1)))
5360 ret = get_errno(swapon(p, arg2));
5361 unlock_user(p, arg1, 0);
5364 case TARGET_NR_reboot:
5366 #ifdef TARGET_NR_readdir
5367 case TARGET_NR_readdir:
5370 #ifdef TARGET_NR_mmap
5371 case TARGET_NR_mmap:
5372 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE)
5375 abi_ulong v1, v2, v3, v4, v5, v6;
5376 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
5384 unlock_user(v, arg1, 0);
5385 ret = get_errno(target_mmap(v1, v2, v3,
5386 target_to_host_bitmask(v4, mmap_flags_tbl),
5390 ret = get_errno(target_mmap(arg1, arg2, arg3,
5391 target_to_host_bitmask(arg4, mmap_flags_tbl),
5397 #ifdef TARGET_NR_mmap2
5398 case TARGET_NR_mmap2:
5400 #define MMAP_SHIFT 12
5402 ret = get_errno(target_mmap(arg1, arg2, arg3,
5403 target_to_host_bitmask(arg4, mmap_flags_tbl),
5405 arg6 << MMAP_SHIFT));
5408 case TARGET_NR_munmap:
5409 ret = get_errno(target_munmap(arg1, arg2));
5411 case TARGET_NR_mprotect:
5413 TaskState *ts = ((CPUState *)cpu_env)->opaque;
5414 /* Special hack to detect libc making the stack executable. */
5415 if ((arg3 & PROT_GROWSDOWN)
5416 && arg1 >= ts->info->stack_limit
5417 && arg1 <= ts->info->start_stack) {
5418 arg3 &= ~PROT_GROWSDOWN;
5419 arg2 = arg2 + arg1 - ts->info->stack_limit;
5420 arg1 = ts->info->stack_limit;
5423 ret = get_errno(target_mprotect(arg1, arg2, arg3));
5425 #ifdef TARGET_NR_mremap
5426 case TARGET_NR_mremap:
5427 ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
5430 /* ??? msync/mlock/munlock are broken for softmmu. */
5431 #ifdef TARGET_NR_msync
5432 case TARGET_NR_msync:
5433 ret = get_errno(msync(g2h(arg1), arg2, arg3));
5436 #ifdef TARGET_NR_mlock
5437 case TARGET_NR_mlock:
5438 ret = get_errno(mlock(g2h(arg1), arg2));
5441 #ifdef TARGET_NR_munlock
5442 case TARGET_NR_munlock:
5443 ret = get_errno(munlock(g2h(arg1), arg2));
5446 #ifdef TARGET_NR_mlockall
5447 case TARGET_NR_mlockall:
5448 ret = get_errno(mlockall(arg1));
5451 #ifdef TARGET_NR_munlockall
5452 case TARGET_NR_munlockall:
5453 ret = get_errno(munlockall());
5456 case TARGET_NR_truncate:
5457 if (!(p = lock_user_string(arg1)))
5459 ret = get_errno(truncate(p, arg2));
5460 unlock_user(p, arg1, 0);
5462 case TARGET_NR_ftruncate:
5463 ret = get_errno(ftruncate(arg1, arg2));
5465 case TARGET_NR_fchmod:
5466 ret = get_errno(fchmod(arg1, arg2));
5468 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat)
5469 case TARGET_NR_fchmodat:
5470 if (!(p = lock_user_string(arg2)))
5472 ret = get_errno(sys_fchmodat(arg1, p, arg3));
5473 unlock_user(p, arg2, 0);
5476 case TARGET_NR_getpriority:
5477 /* libc does special remapping of the return value of
5478 * sys_getpriority() so it's just easiest to call
5479 * sys_getpriority() directly rather than through libc. */
5480 ret = get_errno(sys_getpriority(arg1, arg2));
5482 case TARGET_NR_setpriority:
5483 ret = get_errno(setpriority(arg1, arg2, arg3));
5485 #ifdef TARGET_NR_profil
5486 case TARGET_NR_profil:
5489 case TARGET_NR_statfs:
5490 if (!(p = lock_user_string(arg1)))
5492 ret = get_errno(statfs(path(p), &stfs));
5493 unlock_user(p, arg1, 0);
5495 if (!is_error(ret)) {
5496 struct target_statfs *target_stfs;
5498 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
5500 __put_user(stfs.f_type, &target_stfs->f_type);
5501 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
5502 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
5503 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
5504 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
5505 __put_user(stfs.f_files, &target_stfs->f_files);
5506 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
5507 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
5508 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
5509 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
5510 unlock_user_struct(target_stfs, arg2, 1);
5513 case TARGET_NR_fstatfs:
5514 ret = get_errno(fstatfs(arg1, &stfs));
5515 goto convert_statfs;
5516 #ifdef TARGET_NR_statfs64
5517 case TARGET_NR_statfs64:
5518 if (!(p = lock_user_string(arg1)))
5520 ret = get_errno(statfs(path(p), &stfs));
5521 unlock_user(p, arg1, 0);
5523 if (!is_error(ret)) {
5524 struct target_statfs64 *target_stfs;
5526 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
5528 __put_user(stfs.f_type, &target_stfs->f_type);
5529 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
5530 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
5531 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
5532 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
5533 __put_user(stfs.f_files, &target_stfs->f_files);
5534 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
5535 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
5536 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
5537 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
5538 unlock_user_struct(target_stfs, arg3, 1);
5541 case TARGET_NR_fstatfs64:
5542 ret = get_errno(fstatfs(arg1, &stfs));
5543 goto convert_statfs64;
5545 #ifdef TARGET_NR_ioperm
5546 case TARGET_NR_ioperm:
5549 #ifdef TARGET_NR_socketcall
5550 case TARGET_NR_socketcall:
5551 ret = do_socketcall(arg1, arg2);
5554 #ifdef TARGET_NR_accept
5555 case TARGET_NR_accept:
5556 ret = do_accept(arg1, arg2, arg3);
5559 #ifdef TARGET_NR_bind
5560 case TARGET_NR_bind:
5561 ret = do_bind(arg1, arg2, arg3);
5564 #ifdef TARGET_NR_connect
5565 case TARGET_NR_connect:
5566 ret = do_connect(arg1, arg2, arg3);
5569 #ifdef TARGET_NR_getpeername
5570 case TARGET_NR_getpeername:
5571 ret = do_getpeername(arg1, arg2, arg3);
5574 #ifdef TARGET_NR_getsockname
5575 case TARGET_NR_getsockname:
5576 ret = do_getsockname(arg1, arg2, arg3);
5579 #ifdef TARGET_NR_getsockopt
5580 case TARGET_NR_getsockopt:
5581 ret = do_getsockopt(arg1, arg2, arg3, arg4, arg5);
5584 #ifdef TARGET_NR_listen
5585 case TARGET_NR_listen:
5586 ret = get_errno(listen(arg1, arg2));
5589 #ifdef TARGET_NR_recv
5590 case TARGET_NR_recv:
5591 ret = do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
5594 #ifdef TARGET_NR_recvfrom
5595 case TARGET_NR_recvfrom:
5596 ret = do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
5599 #ifdef TARGET_NR_recvmsg
5600 case TARGET_NR_recvmsg:
5601 ret = do_sendrecvmsg(arg1, arg2, arg3, 0);
5604 #ifdef TARGET_NR_send
5605 case TARGET_NR_send:
5606 ret = do_sendto(arg1, arg2, arg3, arg4, 0, 0);
5609 #ifdef TARGET_NR_sendmsg
5610 case TARGET_NR_sendmsg:
5611 ret = do_sendrecvmsg(arg1, arg2, arg3, 1);
5614 #ifdef TARGET_NR_sendto
5615 case TARGET_NR_sendto:
5616 ret = do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
5619 #ifdef TARGET_NR_shutdown
5620 case TARGET_NR_shutdown:
5621 ret = get_errno(shutdown(arg1, arg2));
5624 #ifdef TARGET_NR_socket
5625 case TARGET_NR_socket:
5626 ret = do_socket(arg1, arg2, arg3);
5629 #ifdef TARGET_NR_socketpair
5630 case TARGET_NR_socketpair:
5631 ret = do_socketpair(arg1, arg2, arg3, arg4);
5634 #ifdef TARGET_NR_setsockopt
5635 case TARGET_NR_setsockopt:
5636 ret = do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
5640 case TARGET_NR_syslog:
5641 if (!(p = lock_user_string(arg2)))
5643 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
5644 unlock_user(p, arg2, 0);
5647 case TARGET_NR_setitimer:
5649 struct itimerval value, ovalue, *pvalue;
5653 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
5654 || copy_from_user_timeval(&pvalue->it_value,
5655 arg2 + sizeof(struct target_timeval)))
5660 ret = get_errno(setitimer(arg1, pvalue, &ovalue));
5661 if (!is_error(ret) && arg3) {
5662 if (copy_to_user_timeval(arg3,
5663 &ovalue.it_interval)
5664 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
5670 case TARGET_NR_getitimer:
5672 struct itimerval value;
5674 ret = get_errno(getitimer(arg1, &value));
5675 if (!is_error(ret) && arg2) {
5676 if (copy_to_user_timeval(arg2,
5678 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
5684 case TARGET_NR_stat:
5685 if (!(p = lock_user_string(arg1)))
5687 ret = get_errno(stat(path(p), &st));
5688 unlock_user(p, arg1, 0);
5690 case TARGET_NR_lstat:
5691 if (!(p = lock_user_string(arg1)))
5693 ret = get_errno(lstat(path(p), &st));
5694 unlock_user(p, arg1, 0);
5696 case TARGET_NR_fstat:
5698 ret = get_errno(fstat(arg1, &st));
5700 if (!is_error(ret)) {
5701 struct target_stat *target_st;
5703 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
5705 memset(target_st, 0, sizeof(*target_st));
5706 __put_user(st.st_dev, &target_st->st_dev);
5707 __put_user(st.st_ino, &target_st->st_ino);
5708 __put_user(st.st_mode, &target_st->st_mode);
5709 __put_user(st.st_uid, &target_st->st_uid);
5710 __put_user(st.st_gid, &target_st->st_gid);
5711 __put_user(st.st_nlink, &target_st->st_nlink);
5712 __put_user(st.st_rdev, &target_st->st_rdev);
5713 __put_user(st.st_size, &target_st->st_size);
5714 __put_user(st.st_blksize, &target_st->st_blksize);
5715 __put_user(st.st_blocks, &target_st->st_blocks);
5716 __put_user(st.st_atime, &target_st->target_st_atime);
5717 __put_user(st.st_mtime, &target_st->target_st_mtime);
5718 __put_user(st.st_ctime, &target_st->target_st_ctime);
5719 unlock_user_struct(target_st, arg2, 1);
5723 #ifdef TARGET_NR_olduname
5724 case TARGET_NR_olduname:
5727 #ifdef TARGET_NR_iopl
5728 case TARGET_NR_iopl:
5731 case TARGET_NR_vhangup:
5732 ret = get_errno(vhangup());
5734 #ifdef TARGET_NR_idle
5735 case TARGET_NR_idle:
5738 #ifdef TARGET_NR_syscall
5739 case TARGET_NR_syscall:
5740 ret = do_syscall(cpu_env,arg1 & 0xffff,arg2,arg3,arg4,arg5,arg6,0);
5743 case TARGET_NR_wait4:
5746 abi_long status_ptr = arg2;
5747 struct rusage rusage, *rusage_ptr;
5748 abi_ulong target_rusage = arg4;
5750 rusage_ptr = &rusage;
5753 ret = get_errno(wait4(arg1, &status, arg3, rusage_ptr));
5754 if (!is_error(ret)) {
5756 status = host_to_target_waitstatus(status);
5757 if (put_user_s32(status, status_ptr))
5761 host_to_target_rusage(target_rusage, &rusage);
5765 #ifdef TARGET_NR_swapoff
5766 case TARGET_NR_swapoff:
5767 if (!(p = lock_user_string(arg1)))
5769 ret = get_errno(swapoff(p));
5770 unlock_user(p, arg1, 0);
5773 case TARGET_NR_sysinfo:
5775 struct target_sysinfo *target_value;
5776 struct sysinfo value;
5777 ret = get_errno(sysinfo(&value));
5778 if (!is_error(ret) && arg1)
5780 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
5782 __put_user(value.uptime, &target_value->uptime);
5783 __put_user(value.loads[0], &target_value->loads[0]);
5784 __put_user(value.loads[1], &target_value->loads[1]);
5785 __put_user(value.loads[2], &target_value->loads[2]);
5786 __put_user(value.totalram, &target_value->totalram);
5787 __put_user(value.freeram, &target_value->freeram);
5788 __put_user(value.sharedram, &target_value->sharedram);
5789 __put_user(value.bufferram, &target_value->bufferram);
5790 __put_user(value.totalswap, &target_value->totalswap);
5791 __put_user(value.freeswap, &target_value->freeswap);
5792 __put_user(value.procs, &target_value->procs);
5793 __put_user(value.totalhigh, &target_value->totalhigh);
5794 __put_user(value.freehigh, &target_value->freehigh);
5795 __put_user(value.mem_unit, &target_value->mem_unit);
5796 unlock_user_struct(target_value, arg1, 1);
5800 #ifdef TARGET_NR_ipc
5802 ret = do_ipc(arg1, arg2, arg3, arg4, arg5, arg6);
5805 #ifdef TARGET_NR_semget
5806 case TARGET_NR_semget:
5807 ret = get_errno(semget(arg1, arg2, arg3));
5810 #ifdef TARGET_NR_semop
5811 case TARGET_NR_semop:
5812 ret = get_errno(do_semop(arg1, arg2, arg3));
5815 #ifdef TARGET_NR_semctl
5816 case TARGET_NR_semctl:
5817 ret = do_semctl(arg1, arg2, arg3, (union target_semun)(abi_ulong)arg4);
5820 #ifdef TARGET_NR_msgctl
5821 case TARGET_NR_msgctl:
5822 ret = do_msgctl(arg1, arg2, arg3);
5825 #ifdef TARGET_NR_msgget
5826 case TARGET_NR_msgget:
5827 ret = get_errno(msgget(arg1, arg2));
5830 #ifdef TARGET_NR_msgrcv
5831 case TARGET_NR_msgrcv:
5832 ret = do_msgrcv(arg1, arg2, arg3, arg4, arg5);
5835 #ifdef TARGET_NR_msgsnd
5836 case TARGET_NR_msgsnd:
5837 ret = do_msgsnd(arg1, arg2, arg3, arg4);
5840 #ifdef TARGET_NR_shmget
5841 case TARGET_NR_shmget:
5842 ret = get_errno(shmget(arg1, arg2, arg3));
5845 #ifdef TARGET_NR_shmctl
5846 case TARGET_NR_shmctl:
5847 ret = do_shmctl(arg1, arg2, arg3);
5850 #ifdef TARGET_NR_shmat
5851 case TARGET_NR_shmat:
5852 ret = do_shmat(arg1, arg2, arg3);
5855 #ifdef TARGET_NR_shmdt
5856 case TARGET_NR_shmdt:
5857 ret = do_shmdt(arg1);
5860 case TARGET_NR_fsync:
5861 ret = get_errno(fsync(arg1));
5863 case TARGET_NR_clone:
5864 #if defined(TARGET_SH4) || defined(TARGET_ALPHA)
5865 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
5866 #elif defined(TARGET_CRIS)
5867 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg4, arg5));
5869 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
5872 #ifdef __NR_exit_group
5873 /* new thread calls */
5874 case TARGET_NR_exit_group:
5878 gdb_exit(cpu_env, arg1);
5879 ret = get_errno(exit_group(arg1));
5882 case TARGET_NR_setdomainname:
5883 if (!(p = lock_user_string(arg1)))
5885 ret = get_errno(setdomainname(p, arg2));
5886 unlock_user(p, arg1, 0);
5888 case TARGET_NR_uname:
5889 /* no need to transcode because we use the linux syscall */
5891 struct new_utsname * buf;
5893 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
5895 ret = get_errno(sys_uname(buf));
5896 if (!is_error(ret)) {
5897 /* Overrite the native machine name with whatever is being
5899 strcpy (buf->machine, cpu_to_uname_machine(cpu_env));
5900 /* Allow the user to override the reported release. */
5901 if (qemu_uname_release && *qemu_uname_release)
5902 strcpy (buf->release, qemu_uname_release);
5904 unlock_user_struct(buf, arg1, 1);
5908 case TARGET_NR_modify_ldt:
5909 ret = do_modify_ldt(cpu_env, arg1, arg2, arg3);
5911 #if !defined(TARGET_X86_64)
5912 case TARGET_NR_vm86old:
5914 case TARGET_NR_vm86:
5915 ret = do_vm86(cpu_env, arg1, arg2);
5919 case TARGET_NR_adjtimex:
5921 #ifdef TARGET_NR_create_module
5922 case TARGET_NR_create_module:
5924 case TARGET_NR_init_module:
5925 case TARGET_NR_delete_module:
5926 #ifdef TARGET_NR_get_kernel_syms
5927 case TARGET_NR_get_kernel_syms:
5930 case TARGET_NR_quotactl:
5932 case TARGET_NR_getpgid:
5933 ret = get_errno(getpgid(arg1));
5935 case TARGET_NR_fchdir:
5936 ret = get_errno(fchdir(arg1));
5938 #ifdef TARGET_NR_bdflush /* not on x86_64 */
5939 case TARGET_NR_bdflush:
5942 #ifdef TARGET_NR_sysfs
5943 case TARGET_NR_sysfs:
5946 case TARGET_NR_personality:
5947 ret = get_errno(personality(arg1));
5949 #ifdef TARGET_NR_afs_syscall
5950 case TARGET_NR_afs_syscall:
5953 #ifdef TARGET_NR__llseek /* Not on alpha */
5954 case TARGET_NR__llseek:
5956 #if !defined(__NR_llseek)
5957 ret = get_errno(lseek(arg1, ((uint64_t )arg2 << 32) | arg3, arg5));
5958 if (put_user_s64(ret, arg4))
5962 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
5963 if (put_user_s64(res, arg4))
5969 case TARGET_NR_getdents:
5970 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
5972 struct target_dirent *target_dirp;
5973 struct linux_dirent *dirp;
5974 abi_long count = arg3;
5976 dirp = malloc(count);
5978 ret = -TARGET_ENOMEM;
5982 ret = get_errno(sys_getdents(arg1, dirp, count));
5983 if (!is_error(ret)) {
5984 struct linux_dirent *de;
5985 struct target_dirent *tde;
5987 int reclen, treclen;
5988 int count1, tnamelen;
5992 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
5996 reclen = de->d_reclen;
5997 treclen = reclen - (2 * (sizeof(long) - sizeof(abi_long)));
5998 tde->d_reclen = tswap16(treclen);
5999 tde->d_ino = tswapl(de->d_ino);
6000 tde->d_off = tswapl(de->d_off);
6001 tnamelen = treclen - (2 * sizeof(abi_long) + 2);
6004 /* XXX: may not be correct */
6005 pstrcpy(tde->d_name, tnamelen, de->d_name);
6006 de = (struct linux_dirent *)((char *)de + reclen);
6008 tde = (struct target_dirent *)((char *)tde + treclen);
6012 unlock_user(target_dirp, arg2, ret);
6018 struct linux_dirent *dirp;
6019 abi_long count = arg3;
6021 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
6023 ret = get_errno(sys_getdents(arg1, dirp, count));
6024 if (!is_error(ret)) {
6025 struct linux_dirent *de;
6030 reclen = de->d_reclen;
6033 de->d_reclen = tswap16(reclen);
6034 tswapls(&de->d_ino);
6035 tswapls(&de->d_off);
6036 de = (struct linux_dirent *)((char *)de + reclen);
6040 unlock_user(dirp, arg2, ret);
6044 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
6045 case TARGET_NR_getdents64:
6047 struct linux_dirent64 *dirp;
6048 abi_long count = arg3;
6049 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
6051 ret = get_errno(sys_getdents64(arg1, dirp, count));
6052 if (!is_error(ret)) {
6053 struct linux_dirent64 *de;
6058 reclen = de->d_reclen;
6061 de->d_reclen = tswap16(reclen);
6062 tswap64s((uint64_t *)&de->d_ino);
6063 tswap64s((uint64_t *)&de->d_off);
6064 de = (struct linux_dirent64 *)((char *)de + reclen);
6068 unlock_user(dirp, arg2, ret);
6071 #endif /* TARGET_NR_getdents64 */
6072 #ifdef TARGET_NR__newselect
6073 case TARGET_NR__newselect:
6074 ret = do_select(arg1, arg2, arg3, arg4, arg5);
6077 #ifdef TARGET_NR_poll
6078 case TARGET_NR_poll:
6080 struct target_pollfd *target_pfd;
6081 unsigned int nfds = arg2;
6086 target_pfd = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_pollfd) * nfds, 1);
6089 pfd = alloca(sizeof(struct pollfd) * nfds);
6090 for(i = 0; i < nfds; i++) {
6091 pfd[i].fd = tswap32(target_pfd[i].fd);
6092 pfd[i].events = tswap16(target_pfd[i].events);
6094 ret = get_errno(poll(pfd, nfds, timeout));
6095 if (!is_error(ret)) {
6096 for(i = 0; i < nfds; i++) {
6097 target_pfd[i].revents = tswap16(pfd[i].revents);
6099 ret += nfds * (sizeof(struct target_pollfd)
6100 - sizeof(struct pollfd));
6102 unlock_user(target_pfd, arg1, ret);
6106 case TARGET_NR_flock:
6107 /* NOTE: the flock constant seems to be the same for every
6109 ret = get_errno(flock(arg1, arg2));
6111 case TARGET_NR_readv:
6116 vec = alloca(count * sizeof(struct iovec));
6117 if (lock_iovec(VERIFY_WRITE, vec, arg2, count, 0) < 0)
6119 ret = get_errno(readv(arg1, vec, count));
6120 unlock_iovec(vec, arg2, count, 1);
6123 case TARGET_NR_writev:
6128 vec = alloca(count * sizeof(struct iovec));
6129 if (lock_iovec(VERIFY_READ, vec, arg2, count, 1) < 0)
6131 ret = get_errno(writev(arg1, vec, count));
6132 unlock_iovec(vec, arg2, count, 0);
6135 case TARGET_NR_getsid:
6136 ret = get_errno(getsid(arg1));
6138 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
6139 case TARGET_NR_fdatasync:
6140 ret = get_errno(fdatasync(arg1));
6143 case TARGET_NR__sysctl:
6144 /* We don't implement this, but ENOTDIR is always a safe
6146 ret = -TARGET_ENOTDIR;
6148 case TARGET_NR_sched_setparam:
6150 struct sched_param *target_schp;
6151 struct sched_param schp;
6153 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
6155 schp.sched_priority = tswap32(target_schp->sched_priority);
6156 unlock_user_struct(target_schp, arg2, 0);
6157 ret = get_errno(sched_setparam(arg1, &schp));
6160 case TARGET_NR_sched_getparam:
6162 struct sched_param *target_schp;
6163 struct sched_param schp;
6164 ret = get_errno(sched_getparam(arg1, &schp));
6165 if (!is_error(ret)) {
6166 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
6168 target_schp->sched_priority = tswap32(schp.sched_priority);
6169 unlock_user_struct(target_schp, arg2, 1);
6173 case TARGET_NR_sched_setscheduler:
6175 struct sched_param *target_schp;
6176 struct sched_param schp;
6177 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
6179 schp.sched_priority = tswap32(target_schp->sched_priority);
6180 unlock_user_struct(target_schp, arg3, 0);
6181 ret = get_errno(sched_setscheduler(arg1, arg2, &schp));
6184 case TARGET_NR_sched_getscheduler:
6185 ret = get_errno(sched_getscheduler(arg1));
6187 case TARGET_NR_sched_yield:
6188 ret = get_errno(sched_yield());
6190 case TARGET_NR_sched_get_priority_max:
6191 ret = get_errno(sched_get_priority_max(arg1));
6193 case TARGET_NR_sched_get_priority_min:
6194 ret = get_errno(sched_get_priority_min(arg1));
6196 case TARGET_NR_sched_rr_get_interval:
6199 ret = get_errno(sched_rr_get_interval(arg1, &ts));
6200 if (!is_error(ret)) {
6201 host_to_target_timespec(arg2, &ts);
6205 case TARGET_NR_nanosleep:
6207 struct timespec req, rem;
6208 target_to_host_timespec(&req, arg1);
6209 ret = get_errno(nanosleep(&req, &rem));
6210 if (is_error(ret) && arg2) {
6211 host_to_target_timespec(arg2, &rem);
6215 #ifdef TARGET_NR_query_module
6216 case TARGET_NR_query_module:
6219 #ifdef TARGET_NR_nfsservctl
6220 case TARGET_NR_nfsservctl:
6223 case TARGET_NR_prctl:
6226 case PR_GET_PDEATHSIG:
6229 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
6230 if (!is_error(ret) && arg2
6231 && put_user_ual(deathsig, arg2))
6236 ret = get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
6240 #ifdef TARGET_NR_arch_prctl
6241 case TARGET_NR_arch_prctl:
6242 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
6243 ret = do_arch_prctl(cpu_env, arg1, arg2);
6249 #ifdef TARGET_NR_pread
6250 case TARGET_NR_pread:
6252 if (((CPUARMState *)cpu_env)->eabi)
6255 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
6257 ret = get_errno(pread(arg1, p, arg3, arg4));
6258 unlock_user(p, arg2, ret);
6260 case TARGET_NR_pwrite:
6262 if (((CPUARMState *)cpu_env)->eabi)
6265 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
6267 ret = get_errno(pwrite(arg1, p, arg3, arg4));
6268 unlock_user(p, arg2, 0);
6271 #ifdef TARGET_NR_pread64
6272 case TARGET_NR_pread64:
6273 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
6275 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
6276 unlock_user(p, arg2, ret);
6278 case TARGET_NR_pwrite64:
6279 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
6281 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
6282 unlock_user(p, arg2, 0);
6285 case TARGET_NR_getcwd:
6286 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
6288 ret = get_errno(sys_getcwd1(p, arg2));
6289 unlock_user(p, arg1, ret);
6291 case TARGET_NR_capget:
6293 case TARGET_NR_capset:
6295 case TARGET_NR_sigaltstack:
6296 #if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_MIPS) || \
6297 defined(TARGET_SPARC) || defined(TARGET_PPC) || defined(TARGET_ALPHA) || \
6298 defined(TARGET_M68K)
6299 ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUState *)cpu_env));
6304 case TARGET_NR_sendfile:
6306 #ifdef TARGET_NR_getpmsg
6307 case TARGET_NR_getpmsg:
6310 #ifdef TARGET_NR_putpmsg
6311 case TARGET_NR_putpmsg:
6314 #ifdef TARGET_NR_vfork
6315 case TARGET_NR_vfork:
6316 ret = get_errno(do_fork(cpu_env, CLONE_VFORK | CLONE_VM | SIGCHLD,
6320 #ifdef TARGET_NR_ugetrlimit
6321 case TARGET_NR_ugetrlimit:
6324 ret = get_errno(getrlimit(arg1, &rlim));
6325 if (!is_error(ret)) {
6326 struct target_rlimit *target_rlim;
6327 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
6329 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
6330 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
6331 unlock_user_struct(target_rlim, arg2, 1);
6336 #ifdef TARGET_NR_truncate64
6337 case TARGET_NR_truncate64:
6338 if (!(p = lock_user_string(arg1)))
6340 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
6341 unlock_user(p, arg1, 0);
6344 #ifdef TARGET_NR_ftruncate64
6345 case TARGET_NR_ftruncate64:
6346 ret = target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
6349 #ifdef TARGET_NR_stat64
6350 case TARGET_NR_stat64:
6351 if (!(p = lock_user_string(arg1)))
6353 ret = get_errno(stat(path(p), &st));
6354 unlock_user(p, arg1, 0);
6356 ret = host_to_target_stat64(cpu_env, arg2, &st);
6359 #ifdef TARGET_NR_lstat64
6360 case TARGET_NR_lstat64:
6361 if (!(p = lock_user_string(arg1)))
6363 ret = get_errno(lstat(path(p), &st));
6364 unlock_user(p, arg1, 0);
6366 ret = host_to_target_stat64(cpu_env, arg2, &st);
6369 #ifdef TARGET_NR_fstat64
6370 case TARGET_NR_fstat64:
6371 ret = get_errno(fstat(arg1, &st));
6373 ret = host_to_target_stat64(cpu_env, arg2, &st);
6376 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \
6377 (defined(__NR_fstatat64) || defined(__NR_newfstatat))
6378 #ifdef TARGET_NR_fstatat64
6379 case TARGET_NR_fstatat64:
6381 #ifdef TARGET_NR_newfstatat
6382 case TARGET_NR_newfstatat:
6384 if (!(p = lock_user_string(arg2)))
6386 #ifdef __NR_fstatat64
6387 ret = get_errno(sys_fstatat64(arg1, path(p), &st, arg4));
6389 ret = get_errno(sys_newfstatat(arg1, path(p), &st, arg4));
6392 ret = host_to_target_stat64(cpu_env, arg3, &st);
6396 case TARGET_NR_lchown:
6397 if (!(p = lock_user_string(arg1)))
6399 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
6400 unlock_user(p, arg1, 0);
6402 case TARGET_NR_getuid:
6403 ret = get_errno(high2lowuid(getuid()));
6405 case TARGET_NR_getgid:
6406 ret = get_errno(high2lowgid(getgid()));
6408 case TARGET_NR_geteuid:
6409 ret = get_errno(high2lowuid(geteuid()));
6411 case TARGET_NR_getegid:
6412 ret = get_errno(high2lowgid(getegid()));
6414 case TARGET_NR_setreuid:
6415 ret = get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
6417 case TARGET_NR_setregid:
6418 ret = get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
6420 case TARGET_NR_getgroups:
6422 int gidsetsize = arg1;
6423 uint16_t *target_grouplist;
6427 grouplist = alloca(gidsetsize * sizeof(gid_t));
6428 ret = get_errno(getgroups(gidsetsize, grouplist));
6429 if (gidsetsize == 0)
6431 if (!is_error(ret)) {
6432 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 2, 0);
6433 if (!target_grouplist)
6435 for(i = 0;i < ret; i++)
6436 target_grouplist[i] = tswap16(grouplist[i]);
6437 unlock_user(target_grouplist, arg2, gidsetsize * 2);
6441 case TARGET_NR_setgroups:
6443 int gidsetsize = arg1;
6444 uint16_t *target_grouplist;
6448 grouplist = alloca(gidsetsize * sizeof(gid_t));
6449 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 2, 1);
6450 if (!target_grouplist) {
6451 ret = -TARGET_EFAULT;
6454 for(i = 0;i < gidsetsize; i++)
6455 grouplist[i] = tswap16(target_grouplist[i]);
6456 unlock_user(target_grouplist, arg2, 0);
6457 ret = get_errno(setgroups(gidsetsize, grouplist));
6460 case TARGET_NR_fchown:
6461 ret = get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
6463 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat)
6464 case TARGET_NR_fchownat:
6465 if (!(p = lock_user_string(arg2)))
6467 ret = get_errno(sys_fchownat(arg1, p, low2highuid(arg3), low2highgid(arg4), arg5));
6468 unlock_user(p, arg2, 0);
6471 #ifdef TARGET_NR_setresuid
6472 case TARGET_NR_setresuid:
6473 ret = get_errno(setresuid(low2highuid(arg1),
6475 low2highuid(arg3)));
6478 #ifdef TARGET_NR_getresuid
6479 case TARGET_NR_getresuid:
6481 uid_t ruid, euid, suid;
6482 ret = get_errno(getresuid(&ruid, &euid, &suid));
6483 if (!is_error(ret)) {
6484 if (put_user_u16(high2lowuid(ruid), arg1)
6485 || put_user_u16(high2lowuid(euid), arg2)
6486 || put_user_u16(high2lowuid(suid), arg3))
6492 #ifdef TARGET_NR_getresgid
6493 case TARGET_NR_setresgid:
6494 ret = get_errno(setresgid(low2highgid(arg1),
6496 low2highgid(arg3)));
6499 #ifdef TARGET_NR_getresgid
6500 case TARGET_NR_getresgid:
6502 gid_t rgid, egid, sgid;
6503 ret = get_errno(getresgid(&rgid, &egid, &sgid));
6504 if (!is_error(ret)) {
6505 if (put_user_u16(high2lowgid(rgid), arg1)
6506 || put_user_u16(high2lowgid(egid), arg2)
6507 || put_user_u16(high2lowgid(sgid), arg3))
6513 case TARGET_NR_chown:
6514 if (!(p = lock_user_string(arg1)))
6516 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
6517 unlock_user(p, arg1, 0);
6519 case TARGET_NR_setuid:
6520 ret = get_errno(setuid(low2highuid(arg1)));
6522 case TARGET_NR_setgid:
6523 ret = get_errno(setgid(low2highgid(arg1)));
6525 case TARGET_NR_setfsuid:
6526 ret = get_errno(setfsuid(arg1));
6528 case TARGET_NR_setfsgid:
6529 ret = get_errno(setfsgid(arg1));
6531 #endif /* USE_UID16 */
6533 #ifdef TARGET_NR_lchown32
6534 case TARGET_NR_lchown32:
6535 if (!(p = lock_user_string(arg1)))
6537 ret = get_errno(lchown(p, arg2, arg3));
6538 unlock_user(p, arg1, 0);
6541 #ifdef TARGET_NR_getuid32
6542 case TARGET_NR_getuid32:
6543 ret = get_errno(getuid());
6547 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
6548 /* Alpha specific */
6549 case TARGET_NR_getxuid:
6553 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
6555 ret = get_errno(getuid());
6558 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
6559 /* Alpha specific */
6560 case TARGET_NR_getxgid:
6564 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
6566 ret = get_errno(getgid());
6569 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
6570 /* Alpha specific */
6571 case TARGET_NR_osf_getsysinfo:
6572 ret = -TARGET_EOPNOTSUPP;
6574 case TARGET_GSI_IEEE_FP_CONTROL:
6576 uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env);
6578 /* Copied from linux ieee_fpcr_to_swcr. */
6579 swcr = (fpcr >> 35) & SWCR_STATUS_MASK;
6580 swcr |= (fpcr >> 36) & SWCR_MAP_DMZ;
6581 swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV
6582 | SWCR_TRAP_ENABLE_DZE
6583 | SWCR_TRAP_ENABLE_OVF);
6584 swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF
6585 | SWCR_TRAP_ENABLE_INE);
6586 swcr |= (fpcr >> 47) & SWCR_MAP_UMZ;
6587 swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO;
6589 if (put_user_u64 (swcr, arg2))
6595 /* case GSI_IEEE_STATE_AT_SIGNAL:
6596 -- Not implemented in linux kernel.
6598 -- Retrieves current unaligned access state; not much used.
6600 -- Retrieves implver information; surely not used.
6602 -- Grabs a copy of the HWRPB; surely not used.
6607 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
6608 /* Alpha specific */
6609 case TARGET_NR_osf_setsysinfo:
6610 ret = -TARGET_EOPNOTSUPP;
6612 case TARGET_SSI_IEEE_FP_CONTROL:
6613 case TARGET_SSI_IEEE_RAISE_EXCEPTION:
6615 uint64_t swcr, fpcr, orig_fpcr;
6617 if (get_user_u64 (swcr, arg2))
6619 orig_fpcr = cpu_alpha_load_fpcr (cpu_env);
6620 fpcr = orig_fpcr & FPCR_DYN_MASK;
6622 /* Copied from linux ieee_swcr_to_fpcr. */
6623 fpcr |= (swcr & SWCR_STATUS_MASK) << 35;
6624 fpcr |= (swcr & SWCR_MAP_DMZ) << 36;
6625 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV
6626 | SWCR_TRAP_ENABLE_DZE
6627 | SWCR_TRAP_ENABLE_OVF)) << 48;
6628 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF
6629 | SWCR_TRAP_ENABLE_INE)) << 57;
6630 fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0);
6631 fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41;
6633 cpu_alpha_store_fpcr (cpu_env, fpcr);
6636 if (arg1 == TARGET_SSI_IEEE_RAISE_EXCEPTION) {
6637 /* Old exceptions are not signaled. */
6638 fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK);
6640 /* If any exceptions set by this call, and are unmasked,
6647 /* case SSI_NVPAIRS:
6648 -- Used with SSIN_UACPROC to enable unaligned accesses.
6649 case SSI_IEEE_STATE_AT_SIGNAL:
6650 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
6651 -- Not implemented in linux kernel
6656 #ifdef TARGET_NR_osf_sigprocmask
6657 /* Alpha specific. */
6658 case TARGET_NR_osf_sigprocmask:
6662 sigset_t set, oldset;
6665 case TARGET_SIG_BLOCK:
6668 case TARGET_SIG_UNBLOCK:
6671 case TARGET_SIG_SETMASK:
6675 ret = -TARGET_EINVAL;
6679 target_to_host_old_sigset(&set, &mask);
6680 sigprocmask(arg1, &set, &oldset);
6681 host_to_target_old_sigset(&mask, &oldset);
6687 #ifdef TARGET_NR_getgid32
6688 case TARGET_NR_getgid32:
6689 ret = get_errno(getgid());
6692 #ifdef TARGET_NR_geteuid32
6693 case TARGET_NR_geteuid32:
6694 ret = get_errno(geteuid());
6697 #ifdef TARGET_NR_getegid32
6698 case TARGET_NR_getegid32:
6699 ret = get_errno(getegid());
6702 #ifdef TARGET_NR_setreuid32
6703 case TARGET_NR_setreuid32:
6704 ret = get_errno(setreuid(arg1, arg2));
6707 #ifdef TARGET_NR_setregid32
6708 case TARGET_NR_setregid32:
6709 ret = get_errno(setregid(arg1, arg2));
6712 #ifdef TARGET_NR_getgroups32
6713 case TARGET_NR_getgroups32:
6715 int gidsetsize = arg1;
6716 uint32_t *target_grouplist;
6720 grouplist = alloca(gidsetsize * sizeof(gid_t));
6721 ret = get_errno(getgroups(gidsetsize, grouplist));
6722 if (gidsetsize == 0)
6724 if (!is_error(ret)) {
6725 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
6726 if (!target_grouplist) {
6727 ret = -TARGET_EFAULT;
6730 for(i = 0;i < ret; i++)
6731 target_grouplist[i] = tswap32(grouplist[i]);
6732 unlock_user(target_grouplist, arg2, gidsetsize * 4);
6737 #ifdef TARGET_NR_setgroups32
6738 case TARGET_NR_setgroups32:
6740 int gidsetsize = arg1;
6741 uint32_t *target_grouplist;
6745 grouplist = alloca(gidsetsize * sizeof(gid_t));
6746 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
6747 if (!target_grouplist) {
6748 ret = -TARGET_EFAULT;
6751 for(i = 0;i < gidsetsize; i++)
6752 grouplist[i] = tswap32(target_grouplist[i]);
6753 unlock_user(target_grouplist, arg2, 0);
6754 ret = get_errno(setgroups(gidsetsize, grouplist));
6758 #ifdef TARGET_NR_fchown32
6759 case TARGET_NR_fchown32:
6760 ret = get_errno(fchown(arg1, arg2, arg3));
6763 #ifdef TARGET_NR_setresuid32
6764 case TARGET_NR_setresuid32:
6765 ret = get_errno(setresuid(arg1, arg2, arg3));
6768 #ifdef TARGET_NR_getresuid32
6769 case TARGET_NR_getresuid32:
6771 uid_t ruid, euid, suid;
6772 ret = get_errno(getresuid(&ruid, &euid, &suid));
6773 if (!is_error(ret)) {
6774 if (put_user_u32(ruid, arg1)
6775 || put_user_u32(euid, arg2)
6776 || put_user_u32(suid, arg3))
6782 #ifdef TARGET_NR_setresgid32
6783 case TARGET_NR_setresgid32:
6784 ret = get_errno(setresgid(arg1, arg2, arg3));
6787 #ifdef TARGET_NR_getresgid32
6788 case TARGET_NR_getresgid32:
6790 gid_t rgid, egid, sgid;
6791 ret = get_errno(getresgid(&rgid, &egid, &sgid));
6792 if (!is_error(ret)) {
6793 if (put_user_u32(rgid, arg1)
6794 || put_user_u32(egid, arg2)
6795 || put_user_u32(sgid, arg3))
6801 #ifdef TARGET_NR_chown32
6802 case TARGET_NR_chown32:
6803 if (!(p = lock_user_string(arg1)))
6805 ret = get_errno(chown(p, arg2, arg3));
6806 unlock_user(p, arg1, 0);
6809 #ifdef TARGET_NR_setuid32
6810 case TARGET_NR_setuid32:
6811 ret = get_errno(setuid(arg1));
6814 #ifdef TARGET_NR_setgid32
6815 case TARGET_NR_setgid32:
6816 ret = get_errno(setgid(arg1));
6819 #ifdef TARGET_NR_setfsuid32
6820 case TARGET_NR_setfsuid32:
6821 ret = get_errno(setfsuid(arg1));
6824 #ifdef TARGET_NR_setfsgid32
6825 case TARGET_NR_setfsgid32:
6826 ret = get_errno(setfsgid(arg1));
6830 case TARGET_NR_pivot_root:
6832 #ifdef TARGET_NR_mincore
6833 case TARGET_NR_mincore:
6836 ret = -TARGET_EFAULT;
6837 if (!(a = lock_user(VERIFY_READ, arg1,arg2, 0)))
6839 if (!(p = lock_user_string(arg3)))
6841 ret = get_errno(mincore(a, arg2, p));
6842 unlock_user(p, arg3, ret);
6844 unlock_user(a, arg1, 0);
6848 #ifdef TARGET_NR_arm_fadvise64_64
6849 case TARGET_NR_arm_fadvise64_64:
6852 * arm_fadvise64_64 looks like fadvise64_64 but
6853 * with different argument order
6861 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_arm_fadvise64_64) || defined(TARGET_NR_fadvise64)
6862 #ifdef TARGET_NR_fadvise64_64
6863 case TARGET_NR_fadvise64_64:
6865 #ifdef TARGET_NR_fadvise64
6866 case TARGET_NR_fadvise64:
6870 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
6871 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
6872 case 6: arg4 = POSIX_FADV_DONTNEED; break;
6873 case 7: arg4 = POSIX_FADV_NOREUSE; break;
6877 ret = -posix_fadvise(arg1, arg2, arg3, arg4);
6880 #ifdef TARGET_NR_madvise
6881 case TARGET_NR_madvise:
6882 /* A straight passthrough may not be safe because qemu sometimes
6883 turns private flie-backed mappings into anonymous mappings.
6884 This will break MADV_DONTNEED.
6885 This is a hint, so ignoring and returning success is ok. */
6889 #if TARGET_ABI_BITS == 32
6890 case TARGET_NR_fcntl64:
6894 struct target_flock64 *target_fl;
6896 struct target_eabi_flock64 *target_efl;
6899 cmd = target_to_host_fcntl_cmd(arg2);
6900 if (cmd == -TARGET_EINVAL)
6904 case TARGET_F_GETLK64:
6906 if (((CPUARMState *)cpu_env)->eabi) {
6907 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
6909 fl.l_type = tswap16(target_efl->l_type);
6910 fl.l_whence = tswap16(target_efl->l_whence);
6911 fl.l_start = tswap64(target_efl->l_start);
6912 fl.l_len = tswap64(target_efl->l_len);
6913 fl.l_pid = tswap32(target_efl->l_pid);
6914 unlock_user_struct(target_efl, arg3, 0);
6918 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
6920 fl.l_type = tswap16(target_fl->l_type);
6921 fl.l_whence = tswap16(target_fl->l_whence);
6922 fl.l_start = tswap64(target_fl->l_start);
6923 fl.l_len = tswap64(target_fl->l_len);
6924 fl.l_pid = tswap32(target_fl->l_pid);
6925 unlock_user_struct(target_fl, arg3, 0);
6927 ret = get_errno(fcntl(arg1, cmd, &fl));
6930 if (((CPUARMState *)cpu_env)->eabi) {
6931 if (!lock_user_struct(VERIFY_WRITE, target_efl, arg3, 0))
6933 target_efl->l_type = tswap16(fl.l_type);
6934 target_efl->l_whence = tswap16(fl.l_whence);
6935 target_efl->l_start = tswap64(fl.l_start);
6936 target_efl->l_len = tswap64(fl.l_len);
6937 target_efl->l_pid = tswap32(fl.l_pid);
6938 unlock_user_struct(target_efl, arg3, 1);
6942 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg3, 0))
6944 target_fl->l_type = tswap16(fl.l_type);
6945 target_fl->l_whence = tswap16(fl.l_whence);
6946 target_fl->l_start = tswap64(fl.l_start);
6947 target_fl->l_len = tswap64(fl.l_len);
6948 target_fl->l_pid = tswap32(fl.l_pid);
6949 unlock_user_struct(target_fl, arg3, 1);
6954 case TARGET_F_SETLK64:
6955 case TARGET_F_SETLKW64:
6957 if (((CPUARMState *)cpu_env)->eabi) {
6958 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
6960 fl.l_type = tswap16(target_efl->l_type);
6961 fl.l_whence = tswap16(target_efl->l_whence);
6962 fl.l_start = tswap64(target_efl->l_start);
6963 fl.l_len = tswap64(target_efl->l_len);
6964 fl.l_pid = tswap32(target_efl->l_pid);
6965 unlock_user_struct(target_efl, arg3, 0);
6969 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
6971 fl.l_type = tswap16(target_fl->l_type);
6972 fl.l_whence = tswap16(target_fl->l_whence);
6973 fl.l_start = tswap64(target_fl->l_start);
6974 fl.l_len = tswap64(target_fl->l_len);
6975 fl.l_pid = tswap32(target_fl->l_pid);
6976 unlock_user_struct(target_fl, arg3, 0);
6978 ret = get_errno(fcntl(arg1, cmd, &fl));
6981 ret = do_fcntl(arg1, arg2, arg3);
6987 #ifdef TARGET_NR_cacheflush
6988 case TARGET_NR_cacheflush:
6989 /* self-modifying code is handled automatically, so nothing needed */
6993 #ifdef TARGET_NR_security
6994 case TARGET_NR_security:
6997 #ifdef TARGET_NR_getpagesize
6998 case TARGET_NR_getpagesize:
6999 ret = TARGET_PAGE_SIZE;
7002 case TARGET_NR_gettid:
7003 ret = get_errno(gettid());
7005 #ifdef TARGET_NR_readahead
7006 case TARGET_NR_readahead:
7007 #if TARGET_ABI_BITS == 32
7009 if (((CPUARMState *)cpu_env)->eabi)
7016 ret = get_errno(readahead(arg1, ((off64_t)arg3 << 32) | arg2, arg4));
7018 ret = get_errno(readahead(arg1, arg2, arg3));
7022 #ifdef TARGET_NR_setxattr
7023 case TARGET_NR_setxattr:
7024 case TARGET_NR_lsetxattr:
7025 case TARGET_NR_fsetxattr:
7026 case TARGET_NR_getxattr:
7027 case TARGET_NR_lgetxattr:
7028 case TARGET_NR_fgetxattr:
7029 case TARGET_NR_listxattr:
7030 case TARGET_NR_llistxattr:
7031 case TARGET_NR_flistxattr:
7032 case TARGET_NR_removexattr:
7033 case TARGET_NR_lremovexattr:
7034 case TARGET_NR_fremovexattr:
7035 ret = -TARGET_EOPNOTSUPP;
7038 #ifdef TARGET_NR_set_thread_area
7039 case TARGET_NR_set_thread_area:
7040 #if defined(TARGET_MIPS)
7041 ((CPUMIPSState *) cpu_env)->tls_value = arg1;
7044 #elif defined(TARGET_CRIS)
7046 ret = -TARGET_EINVAL;
7048 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
7052 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
7053 ret = do_set_thread_area(cpu_env, arg1);
7056 goto unimplemented_nowarn;
7059 #ifdef TARGET_NR_get_thread_area
7060 case TARGET_NR_get_thread_area:
7061 #if defined(TARGET_I386) && defined(TARGET_ABI32)
7062 ret = do_get_thread_area(cpu_env, arg1);
7064 goto unimplemented_nowarn;
7067 #ifdef TARGET_NR_getdomainname
7068 case TARGET_NR_getdomainname:
7069 goto unimplemented_nowarn;
7072 #ifdef TARGET_NR_clock_gettime
7073 case TARGET_NR_clock_gettime:
7076 ret = get_errno(clock_gettime(arg1, &ts));
7077 if (!is_error(ret)) {
7078 host_to_target_timespec(arg2, &ts);
7083 #ifdef TARGET_NR_clock_getres
7084 case TARGET_NR_clock_getres:
7087 ret = get_errno(clock_getres(arg1, &ts));
7088 if (!is_error(ret)) {
7089 host_to_target_timespec(arg2, &ts);
7094 #ifdef TARGET_NR_clock_nanosleep
7095 case TARGET_NR_clock_nanosleep:
7098 target_to_host_timespec(&ts, arg3);
7099 ret = get_errno(clock_nanosleep(arg1, arg2, &ts, arg4 ? &ts : NULL));
7101 host_to_target_timespec(arg4, &ts);
7106 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
7107 case TARGET_NR_set_tid_address:
7108 ret = get_errno(set_tid_address((int *)g2h(arg1)));
7112 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
7113 case TARGET_NR_tkill:
7114 ret = get_errno(sys_tkill((int)arg1, target_to_host_signal(arg2)));
7118 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
7119 case TARGET_NR_tgkill:
7120 ret = get_errno(sys_tgkill((int)arg1, (int)arg2,
7121 target_to_host_signal(arg3)));
7125 #ifdef TARGET_NR_set_robust_list
7126 case TARGET_NR_set_robust_list:
7127 goto unimplemented_nowarn;
7130 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat)
7131 case TARGET_NR_utimensat:
7133 struct timespec *tsp, ts[2];
7137 target_to_host_timespec(ts, arg3);
7138 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
7142 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
7144 if (!(p = lock_user_string(arg2))) {
7145 ret = -TARGET_EFAULT;
7148 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
7149 unlock_user(p, arg2, 0);
7154 #if defined(CONFIG_USE_NPTL)
7155 case TARGET_NR_futex:
7156 ret = do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
7159 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
7160 case TARGET_NR_inotify_init:
7161 ret = get_errno(sys_inotify_init());
7164 #ifdef CONFIG_INOTIFY1
7165 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
7166 case TARGET_NR_inotify_init1:
7167 ret = get_errno(sys_inotify_init1(arg1));
7171 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
7172 case TARGET_NR_inotify_add_watch:
7173 p = lock_user_string(arg2);
7174 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
7175 unlock_user(p, arg2, 0);
7178 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
7179 case TARGET_NR_inotify_rm_watch:
7180 ret = get_errno(sys_inotify_rm_watch(arg1, arg2));
7184 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
7185 case TARGET_NR_mq_open:
7187 struct mq_attr posix_mq_attr;
7189 p = lock_user_string(arg1 - 1);
7191 copy_from_user_mq_attr (&posix_mq_attr, arg4);
7192 ret = get_errno(mq_open(p, arg2, arg3, &posix_mq_attr));
7193 unlock_user (p, arg1, 0);
7197 case TARGET_NR_mq_unlink:
7198 p = lock_user_string(arg1 - 1);
7199 ret = get_errno(mq_unlink(p));
7200 unlock_user (p, arg1, 0);
7203 case TARGET_NR_mq_timedsend:
7207 p = lock_user (VERIFY_READ, arg2, arg3, 1);
7209 target_to_host_timespec(&ts, arg5);
7210 ret = get_errno(mq_timedsend(arg1, p, arg3, arg4, &ts));
7211 host_to_target_timespec(arg5, &ts);
7214 ret = get_errno(mq_send(arg1, p, arg3, arg4));
7215 unlock_user (p, arg2, arg3);
7219 case TARGET_NR_mq_timedreceive:
7224 p = lock_user (VERIFY_READ, arg2, arg3, 1);
7226 target_to_host_timespec(&ts, arg5);
7227 ret = get_errno(mq_timedreceive(arg1, p, arg3, &prio, &ts));
7228 host_to_target_timespec(arg5, &ts);
7231 ret = get_errno(mq_receive(arg1, p, arg3, &prio));
7232 unlock_user (p, arg2, arg3);
7234 put_user_u32(prio, arg4);
7238 /* Not implemented for now... */
7239 /* case TARGET_NR_mq_notify: */
7242 case TARGET_NR_mq_getsetattr:
7244 struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
7247 ret = mq_getattr(arg1, &posix_mq_attr_out);
7248 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
7251 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
7252 ret |= mq_setattr(arg1, &posix_mq_attr_in, &posix_mq_attr_out);
7259 #ifdef CONFIG_SPLICE
7260 #ifdef TARGET_NR_tee
7263 ret = get_errno(tee(arg1,arg2,arg3,arg4));
7267 #ifdef TARGET_NR_splice
7268 case TARGET_NR_splice:
7270 loff_t loff_in, loff_out;
7271 loff_t *ploff_in = NULL, *ploff_out = NULL;
7273 get_user_u64(loff_in, arg2);
7274 ploff_in = &loff_in;
7277 get_user_u64(loff_out, arg2);
7278 ploff_out = &loff_out;
7280 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
7284 #ifdef TARGET_NR_vmsplice
7285 case TARGET_NR_vmsplice:
7290 vec = alloca(count * sizeof(struct iovec));
7291 if (lock_iovec(VERIFY_READ, vec, arg2, count, 1) < 0)
7293 ret = get_errno(vmsplice(arg1, vec, count, arg4));
7294 unlock_iovec(vec, arg2, count, 0);
7298 #endif /* CONFIG_SPLICE */
7299 #ifdef CONFIG_EVENTFD
7300 #if defined(TARGET_NR_eventfd)
7301 case TARGET_NR_eventfd:
7302 ret = get_errno(eventfd(arg1, 0));
7305 #if defined(TARGET_NR_eventfd2)
7306 case TARGET_NR_eventfd2:
7307 ret = get_errno(eventfd(arg1, arg2));
7310 #endif /* CONFIG_EVENTFD */
7311 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
7312 case TARGET_NR_fallocate:
7313 ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
7318 gemu_log("qemu: Unsupported syscall: %d\n", num);
7319 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
7320 unimplemented_nowarn:
7322 ret = -TARGET_ENOSYS;
7327 gemu_log(" = " TARGET_ABI_FMT_ld "\n", ret);
7330 print_syscall_ret(num, ret);
7333 ret = -TARGET_EFAULT;