4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
32 #include <sys/types.h>
38 #include <sys/mount.h>
39 #include <sys/prctl.h>
40 #include <sys/resource.h>
45 #include <sys/socket.h>
48 #include <sys/times.h>
51 #include <sys/statfs.h>
53 #include <sys/sysinfo.h>
54 //#include <sys/user.h>
55 #include <netinet/ip.h>
56 #include <netinet/tcp.h>
57 #include <qemu-common.h>
59 #define termios host_termios
60 #define winsize host_winsize
61 #define termio host_termio
62 #define sgttyb host_sgttyb /* same as target */
63 #define tchars host_tchars /* same as target */
64 #define ltchars host_ltchars /* same as target */
66 #include <linux/termios.h>
67 #include <linux/unistd.h>
68 #include <linux/utsname.h>
69 #include <linux/cdrom.h>
70 #include <linux/hdreg.h>
71 #include <linux/soundcard.h>
73 #include <linux/mtio.h>
74 #include "linux_loop.h"
77 #include "qemu-common.h"
80 #include <linux/futex.h>
81 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
82 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
84 /* XXX: Hardcode the above values. */
85 #define CLONE_NPTL_FLAGS2 0
90 #if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_SPARC) \
91 || defined(TARGET_M68K) || defined(TARGET_SH4) || defined(TARGET_CRIS)
92 /* 16 bit uid wrappers emulation */
96 //#include <linux/msdos_fs.h>
97 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct dirent [2])
98 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct dirent [2])
109 #define _syscall0(type,name) \
110 static type name (void) \
112 return syscall(__NR_##name); \
115 #define _syscall1(type,name,type1,arg1) \
116 static type name (type1 arg1) \
118 return syscall(__NR_##name, arg1); \
121 #define _syscall2(type,name,type1,arg1,type2,arg2) \
122 static type name (type1 arg1,type2 arg2) \
124 return syscall(__NR_##name, arg1, arg2); \
127 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
128 static type name (type1 arg1,type2 arg2,type3 arg3) \
130 return syscall(__NR_##name, arg1, arg2, arg3); \
133 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
134 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
136 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
139 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
141 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
143 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
147 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
148 type5,arg5,type6,arg6) \
149 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
152 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
156 #define __NR_sys_uname __NR_uname
157 #define __NR_sys_faccessat __NR_faccessat
158 #define __NR_sys_fchmodat __NR_fchmodat
159 #define __NR_sys_fchownat __NR_fchownat
160 #define __NR_sys_fstatat64 __NR_fstatat64
161 #define __NR_sys_futimesat __NR_futimesat
162 #define __NR_sys_getcwd1 __NR_getcwd
163 #define __NR_sys_getdents __NR_getdents
164 #define __NR_sys_getdents64 __NR_getdents64
165 #define __NR_sys_getpriority __NR_getpriority
166 #define __NR_sys_linkat __NR_linkat
167 #define __NR_sys_mkdirat __NR_mkdirat
168 #define __NR_sys_mknodat __NR_mknodat
169 #define __NR_sys_openat __NR_openat
170 #define __NR_sys_readlinkat __NR_readlinkat
171 #define __NR_sys_renameat __NR_renameat
172 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
173 #define __NR_sys_symlinkat __NR_symlinkat
174 #define __NR_sys_syslog __NR_syslog
175 #define __NR_sys_tgkill __NR_tgkill
176 #define __NR_sys_tkill __NR_tkill
177 #define __NR_sys_unlinkat __NR_unlinkat
178 #define __NR_sys_utimensat __NR_utimensat
179 #define __NR_sys_futex __NR_futex
180 #define __NR_sys_inotify_init __NR_inotify_init
181 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
182 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
184 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__)
185 #define __NR__llseek __NR_lseek
189 _syscall0(int, gettid)
191 /* This is a replacement for the host gettid() and must return a host
193 static int gettid(void) {
197 _syscall1(int,sys_uname,struct new_utsname *,buf)
198 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
199 _syscall4(int,sys_faccessat,int,dirfd,const char *,pathname,int,mode,int,flags)
201 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat)
202 _syscall4(int,sys_fchmodat,int,dirfd,const char *,pathname,
203 mode_t,mode,int,flags)
205 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat) && defined(USE_UID16)
206 _syscall5(int,sys_fchownat,int,dirfd,const char *,pathname,
207 uid_t,owner,gid_t,group,int,flags)
209 #if defined(TARGET_NR_fstatat64) && defined(__NR_fstatat64)
210 _syscall4(int,sys_fstatat64,int,dirfd,const char *,pathname,
211 struct stat *,buf,int,flags)
213 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat)
214 _syscall3(int,sys_futimesat,int,dirfd,const char *,pathname,
215 const struct timeval *,times)
217 _syscall2(int,sys_getcwd1,char *,buf,size_t,size)
218 #if TARGET_ABI_BITS == 32
219 _syscall3(int, sys_getdents, uint, fd, struct dirent *, dirp, uint, count);
221 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
222 _syscall3(int, sys_getdents64, uint, fd, struct dirent64 *, dirp, uint, count);
224 _syscall2(int, sys_getpriority, int, which, int, who);
225 #if !defined (__x86_64__)
226 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
227 loff_t *, res, uint, wh);
229 #if defined(TARGET_NR_linkat) && defined(__NR_linkat)
230 _syscall5(int,sys_linkat,int,olddirfd,const char *,oldpath,
231 int,newdirfd,const char *,newpath,int,flags)
233 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat)
234 _syscall3(int,sys_mkdirat,int,dirfd,const char *,pathname,mode_t,mode)
236 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat)
237 _syscall4(int,sys_mknodat,int,dirfd,const char *,pathname,
238 mode_t,mode,dev_t,dev)
240 #if defined(TARGET_NR_openat) && defined(__NR_openat)
241 _syscall4(int,sys_openat,int,dirfd,const char *,pathname,int,flags,mode_t,mode)
243 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat)
244 _syscall4(int,sys_readlinkat,int,dirfd,const char *,pathname,
245 char *,buf,size_t,bufsize)
247 #if defined(TARGET_NR_renameat) && defined(__NR_renameat)
248 _syscall4(int,sys_renameat,int,olddirfd,const char *,oldpath,
249 int,newdirfd,const char *,newpath)
251 _syscall3(int,sys_rt_sigqueueinfo,int,pid,int,sig,siginfo_t *,uinfo)
252 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat)
253 _syscall3(int,sys_symlinkat,const char *,oldpath,
254 int,newdirfd,const char *,newpath)
256 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
257 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
258 _syscall3(int,sys_tgkill,int,tgid,int,pid,int,sig)
260 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
261 _syscall2(int,sys_tkill,int,tid,int,sig)
263 #ifdef __NR_exit_group
264 _syscall1(int,exit_group,int,error_code)
266 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
267 _syscall1(int,set_tid_address,int *,tidptr)
269 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat)
270 _syscall3(int,sys_unlinkat,int,dirfd,const char *,pathname,int,flags)
272 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat)
273 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
274 const struct timespec *,tsp,int,flags)
276 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
277 _syscall0(int,sys_inotify_init)
279 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
280 _syscall3(int,sys_inotify_add_watch,int,fd,const char *,pathname,uint32_t,mask)
282 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
283 _syscall2(int,sys_inotify_rm_watch,int,fd,uint32_t,wd)
285 #if defined(USE_NPTL)
286 #if defined(TARGET_NR_futex) && defined(__NR_futex)
287 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
288 const struct timespec *,timeout,int *,uaddr2,int,val3)
292 extern int personality(int);
293 extern int flock(int, int);
294 extern int setfsuid(int);
295 extern int setfsgid(int);
296 extern int setgroups(int, gid_t *);
298 #define ERRNO_TABLE_SIZE 1200
300 /* target_to_host_errno_table[] is initialized from
301 * host_to_target_errno_table[] in syscall_init(). */
302 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
306 * This list is the union of errno values overridden in asm-<arch>/errno.h
307 * minus the errnos that are not actually generic to all archs.
309 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
310 [EIDRM] = TARGET_EIDRM,
311 [ECHRNG] = TARGET_ECHRNG,
312 [EL2NSYNC] = TARGET_EL2NSYNC,
313 [EL3HLT] = TARGET_EL3HLT,
314 [EL3RST] = TARGET_EL3RST,
315 [ELNRNG] = TARGET_ELNRNG,
316 [EUNATCH] = TARGET_EUNATCH,
317 [ENOCSI] = TARGET_ENOCSI,
318 [EL2HLT] = TARGET_EL2HLT,
319 [EDEADLK] = TARGET_EDEADLK,
320 [ENOLCK] = TARGET_ENOLCK,
321 [EBADE] = TARGET_EBADE,
322 [EBADR] = TARGET_EBADR,
323 [EXFULL] = TARGET_EXFULL,
324 [ENOANO] = TARGET_ENOANO,
325 [EBADRQC] = TARGET_EBADRQC,
326 [EBADSLT] = TARGET_EBADSLT,
327 [EBFONT] = TARGET_EBFONT,
328 [ENOSTR] = TARGET_ENOSTR,
329 [ENODATA] = TARGET_ENODATA,
330 [ETIME] = TARGET_ETIME,
331 [ENOSR] = TARGET_ENOSR,
332 [ENONET] = TARGET_ENONET,
333 [ENOPKG] = TARGET_ENOPKG,
334 [EREMOTE] = TARGET_EREMOTE,
335 [ENOLINK] = TARGET_ENOLINK,
336 [EADV] = TARGET_EADV,
337 [ESRMNT] = TARGET_ESRMNT,
338 [ECOMM] = TARGET_ECOMM,
339 [EPROTO] = TARGET_EPROTO,
340 [EDOTDOT] = TARGET_EDOTDOT,
341 [EMULTIHOP] = TARGET_EMULTIHOP,
342 [EBADMSG] = TARGET_EBADMSG,
343 [ENAMETOOLONG] = TARGET_ENAMETOOLONG,
344 [EOVERFLOW] = TARGET_EOVERFLOW,
345 [ENOTUNIQ] = TARGET_ENOTUNIQ,
346 [EBADFD] = TARGET_EBADFD,
347 [EREMCHG] = TARGET_EREMCHG,
348 [ELIBACC] = TARGET_ELIBACC,
349 [ELIBBAD] = TARGET_ELIBBAD,
350 [ELIBSCN] = TARGET_ELIBSCN,
351 [ELIBMAX] = TARGET_ELIBMAX,
352 [ELIBEXEC] = TARGET_ELIBEXEC,
353 [EILSEQ] = TARGET_EILSEQ,
354 [ENOSYS] = TARGET_ENOSYS,
355 [ELOOP] = TARGET_ELOOP,
356 [ERESTART] = TARGET_ERESTART,
357 [ESTRPIPE] = TARGET_ESTRPIPE,
358 [ENOTEMPTY] = TARGET_ENOTEMPTY,
359 [EUSERS] = TARGET_EUSERS,
360 [ENOTSOCK] = TARGET_ENOTSOCK,
361 [EDESTADDRREQ] = TARGET_EDESTADDRREQ,
362 [EMSGSIZE] = TARGET_EMSGSIZE,
363 [EPROTOTYPE] = TARGET_EPROTOTYPE,
364 [ENOPROTOOPT] = TARGET_ENOPROTOOPT,
365 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT,
366 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT,
367 [EOPNOTSUPP] = TARGET_EOPNOTSUPP,
368 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT,
369 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT,
370 [EADDRINUSE] = TARGET_EADDRINUSE,
371 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL,
372 [ENETDOWN] = TARGET_ENETDOWN,
373 [ENETUNREACH] = TARGET_ENETUNREACH,
374 [ENETRESET] = TARGET_ENETRESET,
375 [ECONNABORTED] = TARGET_ECONNABORTED,
376 [ECONNRESET] = TARGET_ECONNRESET,
377 [ENOBUFS] = TARGET_ENOBUFS,
378 [EISCONN] = TARGET_EISCONN,
379 [ENOTCONN] = TARGET_ENOTCONN,
380 [EUCLEAN] = TARGET_EUCLEAN,
381 [ENOTNAM] = TARGET_ENOTNAM,
382 [ENAVAIL] = TARGET_ENAVAIL,
383 [EISNAM] = TARGET_EISNAM,
384 [EREMOTEIO] = TARGET_EREMOTEIO,
385 [ESHUTDOWN] = TARGET_ESHUTDOWN,
386 [ETOOMANYREFS] = TARGET_ETOOMANYREFS,
387 [ETIMEDOUT] = TARGET_ETIMEDOUT,
388 [ECONNREFUSED] = TARGET_ECONNREFUSED,
389 [EHOSTDOWN] = TARGET_EHOSTDOWN,
390 [EHOSTUNREACH] = TARGET_EHOSTUNREACH,
391 [EALREADY] = TARGET_EALREADY,
392 [EINPROGRESS] = TARGET_EINPROGRESS,
393 [ESTALE] = TARGET_ESTALE,
394 [ECANCELED] = TARGET_ECANCELED,
395 [ENOMEDIUM] = TARGET_ENOMEDIUM,
396 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE,
398 [ENOKEY] = TARGET_ENOKEY,
401 [EKEYEXPIRED] = TARGET_EKEYEXPIRED,
404 [EKEYREVOKED] = TARGET_EKEYREVOKED,
407 [EKEYREJECTED] = TARGET_EKEYREJECTED,
410 [EOWNERDEAD] = TARGET_EOWNERDEAD,
412 #ifdef ENOTRECOVERABLE
413 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE,
417 static inline int host_to_target_errno(int err)
419 if(host_to_target_errno_table[err])
420 return host_to_target_errno_table[err];
424 static inline int target_to_host_errno(int err)
426 if (target_to_host_errno_table[err])
427 return target_to_host_errno_table[err];
431 static inline abi_long get_errno(abi_long ret)
434 return -host_to_target_errno(errno);
439 static inline int is_error(abi_long ret)
441 return (abi_ulong)ret >= (abi_ulong)(-4096);
444 char *target_strerror(int err)
446 return strerror(target_to_host_errno(err));
449 static abi_ulong target_brk;
450 static abi_ulong target_original_brk;
452 void target_set_brk(abi_ulong new_brk)
454 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
457 /* do_brk() must return target values and target errnos. */
458 abi_long do_brk(abi_ulong new_brk)
461 abi_long mapped_addr;
466 if (new_brk < target_original_brk)
469 brk_page = HOST_PAGE_ALIGN(target_brk);
471 /* If the new brk is less than this, set it and we're done... */
472 if (new_brk < brk_page) {
473 target_brk = new_brk;
477 /* We need to allocate more memory after the brk... */
478 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page + 1);
479 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
480 PROT_READ|PROT_WRITE,
481 MAP_ANON|MAP_FIXED|MAP_PRIVATE, 0, 0));
483 if (!is_error(mapped_addr))
484 target_brk = new_brk;
489 static inline abi_long copy_from_user_fdset(fd_set *fds,
490 abi_ulong target_fds_addr,
494 abi_ulong b, *target_fds;
496 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
497 if (!(target_fds = lock_user(VERIFY_READ,
499 sizeof(abi_ulong) * nw,
501 return -TARGET_EFAULT;
505 for (i = 0; i < nw; i++) {
506 /* grab the abi_ulong */
507 __get_user(b, &target_fds[i]);
508 for (j = 0; j < TARGET_ABI_BITS; j++) {
509 /* check the bit inside the abi_ulong */
516 unlock_user(target_fds, target_fds_addr, 0);
521 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
527 abi_ulong *target_fds;
529 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
530 if (!(target_fds = lock_user(VERIFY_WRITE,
532 sizeof(abi_ulong) * nw,
534 return -TARGET_EFAULT;
537 for (i = 0; i < nw; i++) {
539 for (j = 0; j < TARGET_ABI_BITS; j++) {
540 v |= ((FD_ISSET(k, fds) != 0) << j);
543 __put_user(v, &target_fds[i]);
546 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
551 #if defined(__alpha__)
557 static inline abi_long host_to_target_clock_t(long ticks)
559 #if HOST_HZ == TARGET_HZ
562 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
566 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
567 const struct rusage *rusage)
569 struct target_rusage *target_rusage;
571 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
572 return -TARGET_EFAULT;
573 target_rusage->ru_utime.tv_sec = tswapl(rusage->ru_utime.tv_sec);
574 target_rusage->ru_utime.tv_usec = tswapl(rusage->ru_utime.tv_usec);
575 target_rusage->ru_stime.tv_sec = tswapl(rusage->ru_stime.tv_sec);
576 target_rusage->ru_stime.tv_usec = tswapl(rusage->ru_stime.tv_usec);
577 target_rusage->ru_maxrss = tswapl(rusage->ru_maxrss);
578 target_rusage->ru_ixrss = tswapl(rusage->ru_ixrss);
579 target_rusage->ru_idrss = tswapl(rusage->ru_idrss);
580 target_rusage->ru_isrss = tswapl(rusage->ru_isrss);
581 target_rusage->ru_minflt = tswapl(rusage->ru_minflt);
582 target_rusage->ru_majflt = tswapl(rusage->ru_majflt);
583 target_rusage->ru_nswap = tswapl(rusage->ru_nswap);
584 target_rusage->ru_inblock = tswapl(rusage->ru_inblock);
585 target_rusage->ru_oublock = tswapl(rusage->ru_oublock);
586 target_rusage->ru_msgsnd = tswapl(rusage->ru_msgsnd);
587 target_rusage->ru_msgrcv = tswapl(rusage->ru_msgrcv);
588 target_rusage->ru_nsignals = tswapl(rusage->ru_nsignals);
589 target_rusage->ru_nvcsw = tswapl(rusage->ru_nvcsw);
590 target_rusage->ru_nivcsw = tswapl(rusage->ru_nivcsw);
591 unlock_user_struct(target_rusage, target_addr, 1);
596 static inline abi_long copy_from_user_timeval(struct timeval *tv,
597 abi_ulong target_tv_addr)
599 struct target_timeval *target_tv;
601 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1))
602 return -TARGET_EFAULT;
604 __get_user(tv->tv_sec, &target_tv->tv_sec);
605 __get_user(tv->tv_usec, &target_tv->tv_usec);
607 unlock_user_struct(target_tv, target_tv_addr, 0);
612 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
613 const struct timeval *tv)
615 struct target_timeval *target_tv;
617 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0))
618 return -TARGET_EFAULT;
620 __put_user(tv->tv_sec, &target_tv->tv_sec);
621 __put_user(tv->tv_usec, &target_tv->tv_usec);
623 unlock_user_struct(target_tv, target_tv_addr, 1);
629 /* do_select() must return target values and target errnos. */
630 static abi_long do_select(int n,
631 abi_ulong rfd_addr, abi_ulong wfd_addr,
632 abi_ulong efd_addr, abi_ulong target_tv_addr)
634 fd_set rfds, wfds, efds;
635 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
636 struct timeval tv, *tv_ptr;
640 if (copy_from_user_fdset(&rfds, rfd_addr, n))
641 return -TARGET_EFAULT;
647 if (copy_from_user_fdset(&wfds, wfd_addr, n))
648 return -TARGET_EFAULT;
654 if (copy_from_user_fdset(&efds, efd_addr, n))
655 return -TARGET_EFAULT;
661 if (target_tv_addr) {
662 if (copy_from_user_timeval(&tv, target_tv_addr))
663 return -TARGET_EFAULT;
669 ret = get_errno(select(n, rfds_ptr, wfds_ptr, efds_ptr, tv_ptr));
671 if (!is_error(ret)) {
672 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
673 return -TARGET_EFAULT;
674 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
675 return -TARGET_EFAULT;
676 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
677 return -TARGET_EFAULT;
679 if (target_tv_addr && copy_to_user_timeval(target_tv_addr, &tv))
680 return -TARGET_EFAULT;
686 static inline abi_long target_to_host_sockaddr(struct sockaddr *addr,
687 abi_ulong target_addr,
690 struct target_sockaddr *target_saddr;
692 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
694 return -TARGET_EFAULT;
695 memcpy(addr, target_saddr, len);
696 addr->sa_family = tswap16(target_saddr->sa_family);
697 unlock_user(target_saddr, target_addr, 0);
702 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
703 struct sockaddr *addr,
706 struct target_sockaddr *target_saddr;
708 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
710 return -TARGET_EFAULT;
711 memcpy(target_saddr, addr, len);
712 target_saddr->sa_family = tswap16(addr->sa_family);
713 unlock_user(target_saddr, target_addr, len);
718 /* ??? Should this also swap msgh->name? */
719 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
720 struct target_msghdr *target_msgh)
722 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
723 abi_long msg_controllen;
724 abi_ulong target_cmsg_addr;
725 struct target_cmsghdr *target_cmsg;
728 msg_controllen = tswapl(target_msgh->msg_controllen);
729 if (msg_controllen < sizeof (struct target_cmsghdr))
731 target_cmsg_addr = tswapl(target_msgh->msg_control);
732 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
734 return -TARGET_EFAULT;
736 while (cmsg && target_cmsg) {
737 void *data = CMSG_DATA(cmsg);
738 void *target_data = TARGET_CMSG_DATA(target_cmsg);
740 int len = tswapl(target_cmsg->cmsg_len)
741 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr));
743 space += CMSG_SPACE(len);
744 if (space > msgh->msg_controllen) {
745 space -= CMSG_SPACE(len);
746 gemu_log("Host cmsg overflow\n");
750 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
751 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
752 cmsg->cmsg_len = CMSG_LEN(len);
754 if (cmsg->cmsg_level != TARGET_SOL_SOCKET || cmsg->cmsg_type != SCM_RIGHTS) {
755 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg->cmsg_level, cmsg->cmsg_type);
756 memcpy(data, target_data, len);
758 int *fd = (int *)data;
759 int *target_fd = (int *)target_data;
760 int i, numfds = len / sizeof(int);
762 for (i = 0; i < numfds; i++)
763 fd[i] = tswap32(target_fd[i]);
766 cmsg = CMSG_NXTHDR(msgh, cmsg);
767 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg);
769 unlock_user(target_cmsg, target_cmsg_addr, 0);
771 msgh->msg_controllen = space;
775 /* ??? Should this also swap msgh->name? */
776 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
779 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
780 abi_long msg_controllen;
781 abi_ulong target_cmsg_addr;
782 struct target_cmsghdr *target_cmsg;
785 msg_controllen = tswapl(target_msgh->msg_controllen);
786 if (msg_controllen < sizeof (struct target_cmsghdr))
788 target_cmsg_addr = tswapl(target_msgh->msg_control);
789 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
791 return -TARGET_EFAULT;
793 while (cmsg && target_cmsg) {
794 void *data = CMSG_DATA(cmsg);
795 void *target_data = TARGET_CMSG_DATA(target_cmsg);
797 int len = cmsg->cmsg_len - CMSG_ALIGN(sizeof (struct cmsghdr));
799 space += TARGET_CMSG_SPACE(len);
800 if (space > msg_controllen) {
801 space -= TARGET_CMSG_SPACE(len);
802 gemu_log("Target cmsg overflow\n");
806 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
807 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
808 target_cmsg->cmsg_len = tswapl(TARGET_CMSG_LEN(len));
810 if (cmsg->cmsg_level != TARGET_SOL_SOCKET || cmsg->cmsg_type != SCM_RIGHTS) {
811 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg->cmsg_level, cmsg->cmsg_type);
812 memcpy(target_data, data, len);
814 int *fd = (int *)data;
815 int *target_fd = (int *)target_data;
816 int i, numfds = len / sizeof(int);
818 for (i = 0; i < numfds; i++)
819 target_fd[i] = tswap32(fd[i]);
822 cmsg = CMSG_NXTHDR(msgh, cmsg);
823 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg);
825 unlock_user(target_cmsg, target_cmsg_addr, space);
827 target_msgh->msg_controllen = tswapl(space);
831 /* do_setsockopt() Must return target values and target errnos. */
832 static abi_long do_setsockopt(int sockfd, int level, int optname,
833 abi_ulong optval_addr, socklen_t optlen)
840 /* TCP options all take an 'int' value. */
841 if (optlen < sizeof(uint32_t))
842 return -TARGET_EINVAL;
844 if (get_user_u32(val, optval_addr))
845 return -TARGET_EFAULT;
846 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
853 case IP_ROUTER_ALERT:
857 case IP_MTU_DISCOVER:
863 case IP_MULTICAST_TTL:
864 case IP_MULTICAST_LOOP:
866 if (optlen >= sizeof(uint32_t)) {
867 if (get_user_u32(val, optval_addr))
868 return -TARGET_EFAULT;
869 } else if (optlen >= 1) {
870 if (get_user_u8(val, optval_addr))
871 return -TARGET_EFAULT;
873 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
879 case TARGET_SOL_SOCKET:
881 /* Options with 'int' argument. */
882 case TARGET_SO_DEBUG:
885 case TARGET_SO_REUSEADDR:
886 optname = SO_REUSEADDR;
891 case TARGET_SO_ERROR:
894 case TARGET_SO_DONTROUTE:
895 optname = SO_DONTROUTE;
897 case TARGET_SO_BROADCAST:
898 optname = SO_BROADCAST;
900 case TARGET_SO_SNDBUF:
903 case TARGET_SO_RCVBUF:
906 case TARGET_SO_KEEPALIVE:
907 optname = SO_KEEPALIVE;
909 case TARGET_SO_OOBINLINE:
910 optname = SO_OOBINLINE;
912 case TARGET_SO_NO_CHECK:
913 optname = SO_NO_CHECK;
915 case TARGET_SO_PRIORITY:
916 optname = SO_PRIORITY;
919 case TARGET_SO_BSDCOMPAT:
920 optname = SO_BSDCOMPAT;
923 case TARGET_SO_PASSCRED:
924 optname = SO_PASSCRED;
926 case TARGET_SO_TIMESTAMP:
927 optname = SO_TIMESTAMP;
929 case TARGET_SO_RCVLOWAT:
930 optname = SO_RCVLOWAT;
932 case TARGET_SO_RCVTIMEO:
933 optname = SO_RCVTIMEO;
935 case TARGET_SO_SNDTIMEO:
936 optname = SO_SNDTIMEO;
942 if (optlen < sizeof(uint32_t))
943 return -TARGET_EINVAL;
945 if (get_user_u32(val, optval_addr))
946 return -TARGET_EFAULT;
947 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
951 gemu_log("Unsupported setsockopt level=%d optname=%d \n", level, optname);
952 ret = -TARGET_ENOPROTOOPT;
957 /* do_getsockopt() Must return target values and target errnos. */
958 static abi_long do_getsockopt(int sockfd, int level, int optname,
959 abi_ulong optval_addr, abi_ulong optlen)
966 case TARGET_SOL_SOCKET:
969 case TARGET_SO_LINGER:
970 case TARGET_SO_RCVTIMEO:
971 case TARGET_SO_SNDTIMEO:
972 case TARGET_SO_PEERCRED:
973 case TARGET_SO_PEERNAME:
974 /* These don't just return a single integer */
981 /* TCP options all take an 'int' value. */
983 if (get_user_u32(len, optlen))
984 return -TARGET_EFAULT;
986 return -TARGET_EINVAL;
988 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
995 if (put_user_u32(val, optval_addr))
996 return -TARGET_EFAULT;
998 if (put_user_u8(val, optval_addr))
999 return -TARGET_EFAULT;
1001 if (put_user_u32(len, optlen))
1002 return -TARGET_EFAULT;
1009 case IP_ROUTER_ALERT:
1013 case IP_MTU_DISCOVER:
1019 case IP_MULTICAST_TTL:
1020 case IP_MULTICAST_LOOP:
1021 if (get_user_u32(len, optlen))
1022 return -TARGET_EFAULT;
1024 return -TARGET_EINVAL;
1026 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
1029 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
1031 if (put_user_u32(len, optlen)
1032 || put_user_u8(val, optval_addr))
1033 return -TARGET_EFAULT;
1035 if (len > sizeof(int))
1037 if (put_user_u32(len, optlen)
1038 || put_user_u32(val, optval_addr))
1039 return -TARGET_EFAULT;
1043 ret = -TARGET_ENOPROTOOPT;
1049 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
1051 ret = -TARGET_EOPNOTSUPP;
1058 * lock_iovec()/unlock_iovec() have a return code of 0 for success where
1059 * other lock functions have a return code of 0 for failure.
1061 static abi_long lock_iovec(int type, struct iovec *vec, abi_ulong target_addr,
1062 int count, int copy)
1064 struct target_iovec *target_vec;
1068 target_vec = lock_user(VERIFY_READ, target_addr, count * sizeof(struct target_iovec), 1);
1070 return -TARGET_EFAULT;
1071 for(i = 0;i < count; i++) {
1072 base = tswapl(target_vec[i].iov_base);
1073 vec[i].iov_len = tswapl(target_vec[i].iov_len);
1074 if (vec[i].iov_len != 0) {
1075 vec[i].iov_base = lock_user(type, base, vec[i].iov_len, copy);
1076 if (!vec[i].iov_base && vec[i].iov_len)
1079 /* zero length pointer is ignored */
1080 vec[i].iov_base = NULL;
1083 unlock_user (target_vec, target_addr, 0);
1086 /* failure - unwind locks */
1087 for (j = 0; j < i; j++) {
1088 base = tswapl(target_vec[j].iov_base);
1089 unlock_user(vec[j].iov_base, base, 0);
1091 unlock_user (target_vec, target_addr, 0);
1092 return -TARGET_EFAULT;
1095 static abi_long unlock_iovec(struct iovec *vec, abi_ulong target_addr,
1096 int count, int copy)
1098 struct target_iovec *target_vec;
1102 target_vec = lock_user(VERIFY_READ, target_addr, count * sizeof(struct target_iovec), 1);
1104 return -TARGET_EFAULT;
1105 for(i = 0;i < count; i++) {
1106 base = tswapl(target_vec[i].iov_base);
1107 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
1109 unlock_user (target_vec, target_addr, 0);
1114 /* do_socket() Must return target values and target errnos. */
1115 static abi_long do_socket(int domain, int type, int protocol)
1117 #if defined(TARGET_MIPS)
1119 case TARGET_SOCK_DGRAM:
1122 case TARGET_SOCK_STREAM:
1125 case TARGET_SOCK_RAW:
1128 case TARGET_SOCK_RDM:
1131 case TARGET_SOCK_SEQPACKET:
1132 type = SOCK_SEQPACKET;
1134 case TARGET_SOCK_PACKET:
1139 if (domain == PF_NETLINK)
1140 return -EAFNOSUPPORT; /* do not NETLINK socket connections possible */
1141 return get_errno(socket(domain, type, protocol));
1144 /* do_bind() Must return target values and target errnos. */
1145 static abi_long do_bind(int sockfd, abi_ulong target_addr,
1148 void *addr = alloca(addrlen);
1150 target_to_host_sockaddr(addr, target_addr, addrlen);
1151 return get_errno(bind(sockfd, addr, addrlen));
1154 /* do_connect() Must return target values and target errnos. */
1155 static abi_long do_connect(int sockfd, abi_ulong target_addr,
1158 void *addr = alloca(addrlen);
1160 target_to_host_sockaddr(addr, target_addr, addrlen);
1161 return get_errno(connect(sockfd, addr, addrlen));
1164 /* do_sendrecvmsg() Must return target values and target errnos. */
1165 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
1166 int flags, int send)
1169 struct target_msghdr *msgp;
1173 abi_ulong target_vec;
1176 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
1180 return -TARGET_EFAULT;
1181 if (msgp->msg_name) {
1182 msg.msg_namelen = tswap32(msgp->msg_namelen);
1183 msg.msg_name = alloca(msg.msg_namelen);
1184 target_to_host_sockaddr(msg.msg_name, tswapl(msgp->msg_name),
1187 msg.msg_name = NULL;
1188 msg.msg_namelen = 0;
1190 msg.msg_controllen = 2 * tswapl(msgp->msg_controllen);
1191 msg.msg_control = alloca(msg.msg_controllen);
1192 msg.msg_flags = tswap32(msgp->msg_flags);
1194 count = tswapl(msgp->msg_iovlen);
1195 vec = alloca(count * sizeof(struct iovec));
1196 target_vec = tswapl(msgp->msg_iov);
1197 lock_iovec(send ? VERIFY_READ : VERIFY_WRITE, vec, target_vec, count, send);
1198 msg.msg_iovlen = count;
1202 ret = target_to_host_cmsg(&msg, msgp);
1204 ret = get_errno(sendmsg(fd, &msg, flags));
1206 ret = get_errno(recvmsg(fd, &msg, flags));
1208 ret = host_to_target_cmsg(msgp, &msg);
1210 unlock_iovec(vec, target_vec, count, !send);
1211 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
1215 /* do_accept() Must return target values and target errnos. */
1216 static abi_long do_accept(int fd, abi_ulong target_addr,
1217 abi_ulong target_addrlen_addr)
1223 if (get_user_u32(addrlen, target_addrlen_addr))
1224 return -TARGET_EFAULT;
1226 addr = alloca(addrlen);
1228 ret = get_errno(accept(fd, addr, &addrlen));
1229 if (!is_error(ret)) {
1230 host_to_target_sockaddr(target_addr, addr, addrlen);
1231 if (put_user_u32(addrlen, target_addrlen_addr))
1232 ret = -TARGET_EFAULT;
1237 /* do_getpeername() Must return target values and target errnos. */
1238 static abi_long do_getpeername(int fd, abi_ulong target_addr,
1239 abi_ulong target_addrlen_addr)
1245 if (get_user_u32(addrlen, target_addrlen_addr))
1246 return -TARGET_EFAULT;
1248 addr = alloca(addrlen);
1250 ret = get_errno(getpeername(fd, addr, &addrlen));
1251 if (!is_error(ret)) {
1252 host_to_target_sockaddr(target_addr, addr, addrlen);
1253 if (put_user_u32(addrlen, target_addrlen_addr))
1254 ret = -TARGET_EFAULT;
1259 /* do_getsockname() Must return target values and target errnos. */
1260 static abi_long do_getsockname(int fd, abi_ulong target_addr,
1261 abi_ulong target_addrlen_addr)
1267 if (get_user_u32(addrlen, target_addrlen_addr))
1268 return -TARGET_EFAULT;
1270 addr = alloca(addrlen);
1272 ret = get_errno(getsockname(fd, addr, &addrlen));
1273 if (!is_error(ret)) {
1274 host_to_target_sockaddr(target_addr, addr, addrlen);
1275 if (put_user_u32(addrlen, target_addrlen_addr))
1276 ret = -TARGET_EFAULT;
1281 /* do_socketpair() Must return target values and target errnos. */
1282 static abi_long do_socketpair(int domain, int type, int protocol,
1283 abi_ulong target_tab_addr)
1288 ret = get_errno(socketpair(domain, type, protocol, tab));
1289 if (!is_error(ret)) {
1290 if (put_user_s32(tab[0], target_tab_addr)
1291 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
1292 ret = -TARGET_EFAULT;
1297 /* do_sendto() Must return target values and target errnos. */
1298 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
1299 abi_ulong target_addr, socklen_t addrlen)
1305 host_msg = lock_user(VERIFY_READ, msg, len, 1);
1307 return -TARGET_EFAULT;
1309 addr = alloca(addrlen);
1310 target_to_host_sockaddr(addr, target_addr, addrlen);
1311 ret = get_errno(sendto(fd, host_msg, len, flags, addr, addrlen));
1313 ret = get_errno(send(fd, host_msg, len, flags));
1315 unlock_user(host_msg, msg, 0);
1319 /* do_recvfrom() Must return target values and target errnos. */
1320 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
1321 abi_ulong target_addr,
1322 abi_ulong target_addrlen)
1329 host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
1331 return -TARGET_EFAULT;
1333 if (get_user_u32(addrlen, target_addrlen)) {
1334 ret = -TARGET_EFAULT;
1337 addr = alloca(addrlen);
1338 ret = get_errno(recvfrom(fd, host_msg, len, flags, addr, &addrlen));
1340 addr = NULL; /* To keep compiler quiet. */
1341 ret = get_errno(recv(fd, host_msg, len, flags));
1343 if (!is_error(ret)) {
1345 host_to_target_sockaddr(target_addr, addr, addrlen);
1346 if (put_user_u32(addrlen, target_addrlen)) {
1347 ret = -TARGET_EFAULT;
1351 unlock_user(host_msg, msg, len);
1354 unlock_user(host_msg, msg, 0);
1359 #ifdef TARGET_NR_socketcall
1360 /* do_socketcall() Must return target values and target errnos. */
1361 static abi_long do_socketcall(int num, abi_ulong vptr)
1364 const int n = sizeof(abi_ulong);
1369 int domain, type, protocol;
1371 if (get_user_s32(domain, vptr)
1372 || get_user_s32(type, vptr + n)
1373 || get_user_s32(protocol, vptr + 2 * n))
1374 return -TARGET_EFAULT;
1376 ret = do_socket(domain, type, protocol);
1382 abi_ulong target_addr;
1385 if (get_user_s32(sockfd, vptr)
1386 || get_user_ual(target_addr, vptr + n)
1387 || get_user_u32(addrlen, vptr + 2 * n))
1388 return -TARGET_EFAULT;
1390 ret = do_bind(sockfd, target_addr, addrlen);
1393 case SOCKOP_connect:
1396 abi_ulong target_addr;
1399 if (get_user_s32(sockfd, vptr)
1400 || get_user_ual(target_addr, vptr + n)
1401 || get_user_u32(addrlen, vptr + 2 * n))
1402 return -TARGET_EFAULT;
1404 ret = do_connect(sockfd, target_addr, addrlen);
1409 int sockfd, backlog;
1411 if (get_user_s32(sockfd, vptr)
1412 || get_user_s32(backlog, vptr + n))
1413 return -TARGET_EFAULT;
1415 ret = get_errno(listen(sockfd, backlog));
1421 abi_ulong target_addr, target_addrlen;
1423 if (get_user_s32(sockfd, vptr)
1424 || get_user_ual(target_addr, vptr + n)
1425 || get_user_u32(target_addrlen, vptr + 2 * n))
1426 return -TARGET_EFAULT;
1428 ret = do_accept(sockfd, target_addr, target_addrlen);
1431 case SOCKOP_getsockname:
1434 abi_ulong target_addr, target_addrlen;
1436 if (get_user_s32(sockfd, vptr)
1437 || get_user_ual(target_addr, vptr + n)
1438 || get_user_u32(target_addrlen, vptr + 2 * n))
1439 return -TARGET_EFAULT;
1441 ret = do_getsockname(sockfd, target_addr, target_addrlen);
1444 case SOCKOP_getpeername:
1447 abi_ulong target_addr, target_addrlen;
1449 if (get_user_s32(sockfd, vptr)
1450 || get_user_ual(target_addr, vptr + n)
1451 || get_user_u32(target_addrlen, vptr + 2 * n))
1452 return -TARGET_EFAULT;
1454 ret = do_getpeername(sockfd, target_addr, target_addrlen);
1457 case SOCKOP_socketpair:
1459 int domain, type, protocol;
1462 if (get_user_s32(domain, vptr)
1463 || get_user_s32(type, vptr + n)
1464 || get_user_s32(protocol, vptr + 2 * n)
1465 || get_user_ual(tab, vptr + 3 * n))
1466 return -TARGET_EFAULT;
1468 ret = do_socketpair(domain, type, protocol, tab);
1478 if (get_user_s32(sockfd, vptr)
1479 || get_user_ual(msg, vptr + n)
1480 || get_user_ual(len, vptr + 2 * n)
1481 || get_user_s32(flags, vptr + 3 * n))
1482 return -TARGET_EFAULT;
1484 ret = do_sendto(sockfd, msg, len, flags, 0, 0);
1494 if (get_user_s32(sockfd, vptr)
1495 || get_user_ual(msg, vptr + n)
1496 || get_user_ual(len, vptr + 2 * n)
1497 || get_user_s32(flags, vptr + 3 * n))
1498 return -TARGET_EFAULT;
1500 ret = do_recvfrom(sockfd, msg, len, flags, 0, 0);
1512 if (get_user_s32(sockfd, vptr)
1513 || get_user_ual(msg, vptr + n)
1514 || get_user_ual(len, vptr + 2 * n)
1515 || get_user_s32(flags, vptr + 3 * n)
1516 || get_user_ual(addr, vptr + 4 * n)
1517 || get_user_u32(addrlen, vptr + 5 * n))
1518 return -TARGET_EFAULT;
1520 ret = do_sendto(sockfd, msg, len, flags, addr, addrlen);
1523 case SOCKOP_recvfrom:
1532 if (get_user_s32(sockfd, vptr)
1533 || get_user_ual(msg, vptr + n)
1534 || get_user_ual(len, vptr + 2 * n)
1535 || get_user_s32(flags, vptr + 3 * n)
1536 || get_user_ual(addr, vptr + 4 * n)
1537 || get_user_u32(addrlen, vptr + 5 * n))
1538 return -TARGET_EFAULT;
1540 ret = do_recvfrom(sockfd, msg, len, flags, addr, addrlen);
1543 case SOCKOP_shutdown:
1547 if (get_user_s32(sockfd, vptr)
1548 || get_user_s32(how, vptr + n))
1549 return -TARGET_EFAULT;
1551 ret = get_errno(shutdown(sockfd, how));
1554 case SOCKOP_sendmsg:
1555 case SOCKOP_recvmsg:
1558 abi_ulong target_msg;
1561 if (get_user_s32(fd, vptr)
1562 || get_user_ual(target_msg, vptr + n)
1563 || get_user_s32(flags, vptr + 2 * n))
1564 return -TARGET_EFAULT;
1566 ret = do_sendrecvmsg(fd, target_msg, flags,
1567 (num == SOCKOP_sendmsg));
1570 case SOCKOP_setsockopt:
1578 if (get_user_s32(sockfd, vptr)
1579 || get_user_s32(level, vptr + n)
1580 || get_user_s32(optname, vptr + 2 * n)
1581 || get_user_ual(optval, vptr + 3 * n)
1582 || get_user_u32(optlen, vptr + 4 * n))
1583 return -TARGET_EFAULT;
1585 ret = do_setsockopt(sockfd, level, optname, optval, optlen);
1588 case SOCKOP_getsockopt:
1596 if (get_user_s32(sockfd, vptr)
1597 || get_user_s32(level, vptr + n)
1598 || get_user_s32(optname, vptr + 2 * n)
1599 || get_user_ual(optval, vptr + 3 * n)
1600 || get_user_u32(optlen, vptr + 4 * n))
1601 return -TARGET_EFAULT;
1603 ret = do_getsockopt(sockfd, level, optname, optval, optlen);
1607 gemu_log("Unsupported socketcall: %d\n", num);
1608 ret = -TARGET_ENOSYS;
1615 #ifdef TARGET_NR_ipc
1616 #define N_SHM_REGIONS 32
1618 static struct shm_region {
1621 } shm_regions[N_SHM_REGIONS];
1623 struct target_ipc_perm
1630 unsigned short int mode;
1631 unsigned short int __pad1;
1632 unsigned short int __seq;
1633 unsigned short int __pad2;
1634 abi_ulong __unused1;
1635 abi_ulong __unused2;
1638 struct target_semid_ds
1640 struct target_ipc_perm sem_perm;
1641 abi_ulong sem_otime;
1642 abi_ulong __unused1;
1643 abi_ulong sem_ctime;
1644 abi_ulong __unused2;
1645 abi_ulong sem_nsems;
1646 abi_ulong __unused3;
1647 abi_ulong __unused4;
1650 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
1651 abi_ulong target_addr)
1653 struct target_ipc_perm *target_ip;
1654 struct target_semid_ds *target_sd;
1656 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
1657 return -TARGET_EFAULT;
1658 target_ip=&(target_sd->sem_perm);
1659 host_ip->__key = tswapl(target_ip->__key);
1660 host_ip->uid = tswapl(target_ip->uid);
1661 host_ip->gid = tswapl(target_ip->gid);
1662 host_ip->cuid = tswapl(target_ip->cuid);
1663 host_ip->cgid = tswapl(target_ip->cgid);
1664 host_ip->mode = tswapl(target_ip->mode);
1665 unlock_user_struct(target_sd, target_addr, 0);
1669 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
1670 struct ipc_perm *host_ip)
1672 struct target_ipc_perm *target_ip;
1673 struct target_semid_ds *target_sd;
1675 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
1676 return -TARGET_EFAULT;
1677 target_ip = &(target_sd->sem_perm);
1678 target_ip->__key = tswapl(host_ip->__key);
1679 target_ip->uid = tswapl(host_ip->uid);
1680 target_ip->gid = tswapl(host_ip->gid);
1681 target_ip->cuid = tswapl(host_ip->cuid);
1682 target_ip->cgid = tswapl(host_ip->cgid);
1683 target_ip->mode = tswapl(host_ip->mode);
1684 unlock_user_struct(target_sd, target_addr, 1);
1688 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
1689 abi_ulong target_addr)
1691 struct target_semid_ds *target_sd;
1693 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
1694 return -TARGET_EFAULT;
1695 target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr);
1696 host_sd->sem_nsems = tswapl(target_sd->sem_nsems);
1697 host_sd->sem_otime = tswapl(target_sd->sem_otime);
1698 host_sd->sem_ctime = tswapl(target_sd->sem_ctime);
1699 unlock_user_struct(target_sd, target_addr, 0);
1703 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
1704 struct semid_ds *host_sd)
1706 struct target_semid_ds *target_sd;
1708 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
1709 return -TARGET_EFAULT;
1710 host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm));
1711 target_sd->sem_nsems = tswapl(host_sd->sem_nsems);
1712 target_sd->sem_otime = tswapl(host_sd->sem_otime);
1713 target_sd->sem_ctime = tswapl(host_sd->sem_ctime);
1714 unlock_user_struct(target_sd, target_addr, 1);
1720 struct semid_ds *buf;
1721 unsigned short *array;
1724 union target_semun {
1727 unsigned short int *array;
1730 static inline abi_long target_to_host_semun(int cmd,
1731 union semun *host_su,
1732 abi_ulong target_addr,
1733 struct semid_ds *ds)
1735 union target_semun *target_su;
1740 if (!lock_user_struct(VERIFY_READ, target_su, target_addr, 1))
1741 return -TARGET_EFAULT;
1742 target_to_host_semid_ds(ds,target_su->buf);
1744 unlock_user_struct(target_su, target_addr, 0);
1748 if (!lock_user_struct(VERIFY_READ, target_su, target_addr, 1))
1749 return -TARGET_EFAULT;
1750 host_su->val = tswapl(target_su->val);
1751 unlock_user_struct(target_su, target_addr, 0);
1755 if (!lock_user_struct(VERIFY_READ, target_su, target_addr, 1))
1756 return -TARGET_EFAULT;
1757 *host_su->array = tswap16(*target_su->array);
1758 unlock_user_struct(target_su, target_addr, 0);
1761 gemu_log("semun operation not fully supported: %d\n", (int)cmd);
1766 static inline abi_long host_to_target_semun(int cmd,
1767 abi_ulong target_addr,
1768 union semun *host_su,
1769 struct semid_ds *ds)
1771 union target_semun *target_su;
1776 if (lock_user_struct(VERIFY_WRITE, target_su, target_addr, 0))
1777 return -TARGET_EFAULT;
1778 host_to_target_semid_ds(target_su->buf,ds);
1779 unlock_user_struct(target_su, target_addr, 1);
1783 if (lock_user_struct(VERIFY_WRITE, target_su, target_addr, 0))
1784 return -TARGET_EFAULT;
1785 target_su->val = tswapl(host_su->val);
1786 unlock_user_struct(target_su, target_addr, 1);
1790 if (lock_user_struct(VERIFY_WRITE, target_su, target_addr, 0))
1791 return -TARGET_EFAULT;
1792 *target_su->array = tswap16(*host_su->array);
1793 unlock_user_struct(target_su, target_addr, 1);
1796 gemu_log("semun operation not fully supported: %d\n", (int)cmd);
1801 static inline abi_long do_semctl(int first, int second, int third,
1805 struct semid_ds dsarg;
1806 int cmd = third&0xff;
1811 target_to_host_semun(cmd,&arg,ptr,&dsarg);
1812 ret = get_errno(semctl(first, second, cmd, arg));
1813 host_to_target_semun(cmd,ptr,&arg,&dsarg);
1816 target_to_host_semun(cmd,&arg,ptr,&dsarg);
1817 ret = get_errno(semctl(first, second, cmd, arg));
1818 host_to_target_semun(cmd,ptr,&arg,&dsarg);
1821 target_to_host_semun(cmd,&arg,ptr,&dsarg);
1822 ret = get_errno(semctl(first, second, cmd, arg));
1823 host_to_target_semun(cmd,ptr,&arg,&dsarg);
1826 target_to_host_semun(cmd,&arg,ptr,&dsarg);
1827 ret = get_errno(semctl(first, second, cmd, arg));
1828 host_to_target_semun(cmd,ptr,&arg,&dsarg);
1831 target_to_host_semun(cmd,&arg,ptr,&dsarg);
1832 ret = get_errno(semctl(first, second, cmd, arg));
1833 host_to_target_semun(cmd,ptr,&arg,&dsarg);
1836 target_to_host_semun(cmd,&arg,ptr,&dsarg);
1837 ret = get_errno(semctl(first, second, cmd, arg));
1838 host_to_target_semun(cmd,ptr,&arg,&dsarg);
1841 ret = get_errno(semctl(first, second, cmd, arg));
1847 struct target_msqid_ds
1849 struct target_ipc_perm msg_perm;
1850 abi_ulong msg_stime;
1851 abi_ulong __unused1;
1852 abi_ulong msg_rtime;
1853 abi_ulong __unused2;
1854 abi_ulong msg_ctime;
1855 abi_ulong __unused3;
1856 abi_ulong __msg_cbytes;
1858 abi_ulong msg_qbytes;
1859 abi_ulong msg_lspid;
1860 abi_ulong msg_lrpid;
1861 abi_ulong __unused4;
1862 abi_ulong __unused5;
1865 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
1866 abi_ulong target_addr)
1868 struct target_msqid_ds *target_md;
1870 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
1871 return -TARGET_EFAULT;
1872 target_to_host_ipc_perm(&(host_md->msg_perm),target_addr);
1873 host_md->msg_stime = tswapl(target_md->msg_stime);
1874 host_md->msg_rtime = tswapl(target_md->msg_rtime);
1875 host_md->msg_ctime = tswapl(target_md->msg_ctime);
1876 host_md->__msg_cbytes = tswapl(target_md->__msg_cbytes);
1877 host_md->msg_qnum = tswapl(target_md->msg_qnum);
1878 host_md->msg_qbytes = tswapl(target_md->msg_qbytes);
1879 host_md->msg_lspid = tswapl(target_md->msg_lspid);
1880 host_md->msg_lrpid = tswapl(target_md->msg_lrpid);
1881 unlock_user_struct(target_md, target_addr, 0);
1885 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
1886 struct msqid_ds *host_md)
1888 struct target_msqid_ds *target_md;
1890 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
1891 return -TARGET_EFAULT;
1892 host_to_target_ipc_perm(target_addr,&(host_md->msg_perm));
1893 target_md->msg_stime = tswapl(host_md->msg_stime);
1894 target_md->msg_rtime = tswapl(host_md->msg_rtime);
1895 target_md->msg_ctime = tswapl(host_md->msg_ctime);
1896 target_md->__msg_cbytes = tswapl(host_md->__msg_cbytes);
1897 target_md->msg_qnum = tswapl(host_md->msg_qnum);
1898 target_md->msg_qbytes = tswapl(host_md->msg_qbytes);
1899 target_md->msg_lspid = tswapl(host_md->msg_lspid);
1900 target_md->msg_lrpid = tswapl(host_md->msg_lrpid);
1901 unlock_user_struct(target_md, target_addr, 1);
1905 static inline abi_long do_msgctl(int first, int second, abi_long ptr)
1907 struct msqid_ds dsarg;
1908 int cmd = second&0xff;
1913 target_to_host_msqid_ds(&dsarg,ptr);
1914 ret = get_errno(msgctl(first, cmd, &dsarg));
1915 host_to_target_msqid_ds(ptr,&dsarg);
1917 ret = get_errno(msgctl(first, cmd, &dsarg));
1922 struct target_msgbuf {
1927 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
1928 unsigned int msgsz, int msgflg)
1930 struct target_msgbuf *target_mb;
1931 struct msgbuf *host_mb;
1934 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
1935 return -TARGET_EFAULT;
1936 host_mb = malloc(msgsz+sizeof(long));
1937 host_mb->mtype = tswapl(target_mb->mtype);
1938 memcpy(host_mb->mtext,target_mb->mtext,msgsz);
1939 ret = get_errno(msgsnd(msqid, host_mb, msgsz, msgflg));
1941 unlock_user_struct(target_mb, msgp, 0);
1946 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
1947 unsigned int msgsz, int msgtype,
1950 struct target_msgbuf *target_mb;
1952 struct msgbuf *host_mb;
1955 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
1956 return -TARGET_EFAULT;
1957 host_mb = malloc(msgsz+sizeof(long));
1958 ret = get_errno(msgrcv(msqid, host_mb, msgsz, 1, msgflg));
1960 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
1961 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
1962 if (!target_mtext) {
1963 ret = -TARGET_EFAULT;
1966 memcpy(target_mb->mtext, host_mb->mtext, ret);
1967 unlock_user(target_mtext, target_mtext_addr, ret);
1969 target_mb->mtype = tswapl(host_mb->mtype);
1974 unlock_user_struct(target_mb, msgp, 1);
1978 /* ??? This only works with linear mappings. */
1979 /* do_ipc() must return target values and target errnos. */
1980 static abi_long do_ipc(unsigned int call, int first,
1981 int second, int third,
1982 abi_long ptr, abi_long fifth)
1986 struct shmid_ds shm_info;
1989 version = call >> 16;
1994 ret = get_errno(semop(first,(struct sembuf *)g2h(ptr), second));
1998 ret = get_errno(semget(first, second, third));
2002 ret = do_semctl(first, second, third, ptr);
2005 case IPCOP_semtimedop:
2006 gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
2007 ret = -TARGET_ENOSYS;
2011 ret = get_errno(msgget(first, second));
2015 ret = do_msgsnd(first, ptr, second, third);
2019 ret = do_msgctl(first, second, ptr);
2024 /* XXX: this code is not correct */
2027 void *__unbounded msgp;
2031 struct ipc_kludge *foo = (struct ipc_kludge *)g2h(ptr);
2032 struct msgbuf *msgp = (struct msgbuf *) foo->msgp;
2034 ret = do_msgrcv(first, (long)msgp, second, 0, third);
2043 /* SHM_* flags are the same on all linux platforms */
2044 host_addr = shmat(first, (void *)g2h(ptr), second);
2045 if (host_addr == (void *)-1) {
2046 ret = get_errno((long)host_addr);
2049 raddr = h2g((unsigned long)host_addr);
2050 /* find out the length of the shared memory segment */
2052 ret = get_errno(shmctl(first, IPC_STAT, &shm_info));
2053 if (is_error(ret)) {
2054 /* can't get length, bail out */
2058 page_set_flags(raddr, raddr + shm_info.shm_segsz,
2059 PAGE_VALID | PAGE_READ |
2060 ((second & SHM_RDONLY)? 0: PAGE_WRITE));
2061 for (i = 0; i < N_SHM_REGIONS; ++i) {
2062 if (shm_regions[i].start == 0) {
2063 shm_regions[i].start = raddr;
2064 shm_regions[i].size = shm_info.shm_segsz;
2068 if (put_user_ual(raddr, third))
2069 return -TARGET_EFAULT;
2074 for (i = 0; i < N_SHM_REGIONS; ++i) {
2075 if (shm_regions[i].start == ptr) {
2076 shm_regions[i].start = 0;
2077 page_set_flags(ptr, shm_regions[i].size, 0);
2081 ret = get_errno(shmdt((void *)g2h(ptr)));
2085 /* IPC_* flag values are the same on all linux platforms */
2086 ret = get_errno(shmget(first, second, third));
2089 /* IPC_* and SHM_* command values are the same on all linux platforms */
2095 ret = get_errno(shmctl(first, second, NULL));
2103 gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
2104 ret = -TARGET_ENOSYS;
2111 /* kernel structure types definitions */
2114 #define STRUCT(name, list...) STRUCT_ ## name,
2115 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
2117 #include "syscall_types.h"
2120 #undef STRUCT_SPECIAL
2122 #define STRUCT(name, list...) const argtype struct_ ## name ## _def[] = { list, TYPE_NULL };
2123 #define STRUCT_SPECIAL(name)
2124 #include "syscall_types.h"
2126 #undef STRUCT_SPECIAL
2128 typedef struct IOCTLEntry {
2129 unsigned int target_cmd;
2130 unsigned int host_cmd;
2133 const argtype arg_type[5];
2136 #define IOC_R 0x0001
2137 #define IOC_W 0x0002
2138 #define IOC_RW (IOC_R | IOC_W)
2140 #define MAX_STRUCT_SIZE 4096
2142 static IOCTLEntry ioctl_entries[] = {
2143 #define IOCTL(cmd, access, types...) \
2144 { TARGET_ ## cmd, cmd, #cmd, access, { types } },
2149 /* ??? Implement proper locking for ioctls. */
2150 /* do_ioctl() Must return target values and target errnos. */
2151 static abi_long do_ioctl(int fd, abi_long cmd, abi_long arg)
2153 const IOCTLEntry *ie;
2154 const argtype *arg_type;
2156 uint8_t buf_temp[MAX_STRUCT_SIZE];
2162 if (ie->target_cmd == 0) {
2163 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
2164 return -TARGET_ENOSYS;
2166 if (ie->target_cmd == cmd)
2170 arg_type = ie->arg_type;
2172 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd, ie->name);
2174 switch(arg_type[0]) {
2177 ret = get_errno(ioctl(fd, ie->host_cmd));
2182 ret = get_errno(ioctl(fd, ie->host_cmd, arg));
2186 target_size = thunk_type_size(arg_type, 0);
2187 switch(ie->access) {
2189 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
2190 if (!is_error(ret)) {
2191 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
2193 return -TARGET_EFAULT;
2194 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
2195 unlock_user(argptr, arg, target_size);
2199 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
2201 return -TARGET_EFAULT;
2202 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
2203 unlock_user(argptr, arg, 0);
2204 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
2208 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
2210 return -TARGET_EFAULT;
2211 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
2212 unlock_user(argptr, arg, 0);
2213 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
2214 if (!is_error(ret)) {
2215 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
2217 return -TARGET_EFAULT;
2218 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
2219 unlock_user(argptr, arg, target_size);
2225 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
2226 (long)cmd, arg_type[0]);
2227 ret = -TARGET_ENOSYS;
2233 static const bitmask_transtbl iflag_tbl[] = {
2234 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
2235 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
2236 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
2237 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
2238 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
2239 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
2240 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
2241 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
2242 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
2243 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
2244 { TARGET_IXON, TARGET_IXON, IXON, IXON },
2245 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
2246 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
2247 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
2251 static const bitmask_transtbl oflag_tbl[] = {
2252 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
2253 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
2254 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
2255 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
2256 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
2257 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
2258 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
2259 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
2260 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
2261 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
2262 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
2263 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
2264 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
2265 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
2266 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
2267 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
2268 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
2269 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
2270 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
2271 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
2272 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
2273 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
2274 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
2275 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
2279 static const bitmask_transtbl cflag_tbl[] = {
2280 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
2281 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
2282 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
2283 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
2284 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
2285 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
2286 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
2287 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
2288 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
2289 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
2290 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
2291 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
2292 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
2293 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
2294 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
2295 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
2296 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
2297 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
2298 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
2299 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
2300 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
2301 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
2302 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
2303 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
2304 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
2305 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
2306 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
2307 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
2308 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
2309 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
2310 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
2314 static const bitmask_transtbl lflag_tbl[] = {
2315 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
2316 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
2317 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
2318 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
2319 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
2320 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
2321 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
2322 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
2323 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
2324 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
2325 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
2326 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
2327 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
2328 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
2329 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
2333 static void target_to_host_termios (void *dst, const void *src)
2335 struct host_termios *host = dst;
2336 const struct target_termios *target = src;
2339 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
2341 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
2343 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
2345 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
2346 host->c_line = target->c_line;
2348 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
2349 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
2350 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
2351 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
2352 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
2353 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
2354 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
2355 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
2356 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
2357 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
2358 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
2359 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
2360 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
2361 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
2362 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
2363 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
2364 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
2367 static void host_to_target_termios (void *dst, const void *src)
2369 struct target_termios *target = dst;
2370 const struct host_termios *host = src;
2373 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
2375 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
2377 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
2379 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
2380 target->c_line = host->c_line;
2382 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
2383 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
2384 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
2385 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
2386 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
2387 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
2388 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
2389 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
2390 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
2391 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
2392 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
2393 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
2394 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
2395 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
2396 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
2397 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
2398 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
2401 static const StructEntry struct_termios_def = {
2402 .convert = { host_to_target_termios, target_to_host_termios },
2403 .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
2404 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
2407 static bitmask_transtbl mmap_flags_tbl[] = {
2408 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
2409 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
2410 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
2411 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS, MAP_ANONYMOUS, MAP_ANONYMOUS },
2412 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN, MAP_GROWSDOWN, MAP_GROWSDOWN },
2413 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE, MAP_DENYWRITE, MAP_DENYWRITE },
2414 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE, MAP_EXECUTABLE, MAP_EXECUTABLE },
2415 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
2419 static bitmask_transtbl fcntl_flags_tbl[] = {
2420 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
2421 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, },
2422 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, },
2423 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, },
2424 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, },
2425 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, },
2426 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, },
2427 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, },
2428 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, },
2429 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, },
2430 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
2431 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, },
2432 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
2433 #if defined(O_DIRECT)
2434 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, },
2439 #if defined(TARGET_I386)
2441 /* NOTE: there is really one LDT for all the threads */
2444 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
2451 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
2452 if (size > bytecount)
2454 p = lock_user(VERIFY_WRITE, ptr, size, 0);
2456 return -TARGET_EFAULT;
2457 /* ??? Should this by byteswapped? */
2458 memcpy(p, ldt_table, size);
2459 unlock_user(p, ptr, size);
2463 /* XXX: add locking support */
2464 static abi_long write_ldt(CPUX86State *env,
2465 abi_ulong ptr, unsigned long bytecount, int oldmode)
2467 struct target_modify_ldt_ldt_s ldt_info;
2468 struct target_modify_ldt_ldt_s *target_ldt_info;
2469 int seg_32bit, contents, read_exec_only, limit_in_pages;
2470 int seg_not_present, useable, lm;
2471 uint32_t *lp, entry_1, entry_2;
2473 if (bytecount != sizeof(ldt_info))
2474 return -TARGET_EINVAL;
2475 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
2476 return -TARGET_EFAULT;
2477 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
2478 ldt_info.base_addr = tswapl(target_ldt_info->base_addr);
2479 ldt_info.limit = tswap32(target_ldt_info->limit);
2480 ldt_info.flags = tswap32(target_ldt_info->flags);
2481 unlock_user_struct(target_ldt_info, ptr, 0);
2483 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
2484 return -TARGET_EINVAL;
2485 seg_32bit = ldt_info.flags & 1;
2486 contents = (ldt_info.flags >> 1) & 3;
2487 read_exec_only = (ldt_info.flags >> 3) & 1;
2488 limit_in_pages = (ldt_info.flags >> 4) & 1;
2489 seg_not_present = (ldt_info.flags >> 5) & 1;
2490 useable = (ldt_info.flags >> 6) & 1;
2494 lm = (ldt_info.flags >> 7) & 1;
2496 if (contents == 3) {
2498 return -TARGET_EINVAL;
2499 if (seg_not_present == 0)
2500 return -TARGET_EINVAL;
2502 /* allocate the LDT */
2504 ldt_table = malloc(TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
2506 return -TARGET_ENOMEM;
2507 memset(ldt_table, 0, TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
2508 env->ldt.base = h2g((unsigned long)ldt_table);
2509 env->ldt.limit = 0xffff;
2512 /* NOTE: same code as Linux kernel */
2513 /* Allow LDTs to be cleared by the user. */
2514 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
2517 read_exec_only == 1 &&
2519 limit_in_pages == 0 &&
2520 seg_not_present == 1 &&
2528 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
2529 (ldt_info.limit & 0x0ffff);
2530 entry_2 = (ldt_info.base_addr & 0xff000000) |
2531 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
2532 (ldt_info.limit & 0xf0000) |
2533 ((read_exec_only ^ 1) << 9) |
2535 ((seg_not_present ^ 1) << 15) |
2537 (limit_in_pages << 23) |
2541 entry_2 |= (useable << 20);
2543 /* Install the new entry ... */
2545 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
2546 lp[0] = tswap32(entry_1);
2547 lp[1] = tswap32(entry_2);
2551 /* specific and weird i386 syscalls */
2552 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
2553 unsigned long bytecount)
2559 ret = read_ldt(ptr, bytecount);
2562 ret = write_ldt(env, ptr, bytecount, 1);
2565 ret = write_ldt(env, ptr, bytecount, 0);
2568 ret = -TARGET_ENOSYS;
2574 #if defined(TARGET_I386) && defined(TARGET_ABI32)
2575 static abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
2577 uint64_t *gdt_table = g2h(env->gdt.base);
2578 struct target_modify_ldt_ldt_s ldt_info;
2579 struct target_modify_ldt_ldt_s *target_ldt_info;
2580 int seg_32bit, contents, read_exec_only, limit_in_pages;
2581 int seg_not_present, useable, lm;
2582 uint32_t *lp, entry_1, entry_2;
2585 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
2586 if (!target_ldt_info)
2587 return -TARGET_EFAULT;
2588 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
2589 ldt_info.base_addr = tswapl(target_ldt_info->base_addr);
2590 ldt_info.limit = tswap32(target_ldt_info->limit);
2591 ldt_info.flags = tswap32(target_ldt_info->flags);
2592 if (ldt_info.entry_number == -1) {
2593 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
2594 if (gdt_table[i] == 0) {
2595 ldt_info.entry_number = i;
2596 target_ldt_info->entry_number = tswap32(i);
2601 unlock_user_struct(target_ldt_info, ptr, 1);
2603 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
2604 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
2605 return -TARGET_EINVAL;
2606 seg_32bit = ldt_info.flags & 1;
2607 contents = (ldt_info.flags >> 1) & 3;
2608 read_exec_only = (ldt_info.flags >> 3) & 1;
2609 limit_in_pages = (ldt_info.flags >> 4) & 1;
2610 seg_not_present = (ldt_info.flags >> 5) & 1;
2611 useable = (ldt_info.flags >> 6) & 1;
2615 lm = (ldt_info.flags >> 7) & 1;
2618 if (contents == 3) {
2619 if (seg_not_present == 0)
2620 return -TARGET_EINVAL;
2623 /* NOTE: same code as Linux kernel */
2624 /* Allow LDTs to be cleared by the user. */
2625 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
2626 if ((contents == 0 &&
2627 read_exec_only == 1 &&
2629 limit_in_pages == 0 &&
2630 seg_not_present == 1 &&
2638 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
2639 (ldt_info.limit & 0x0ffff);
2640 entry_2 = (ldt_info.base_addr & 0xff000000) |
2641 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
2642 (ldt_info.limit & 0xf0000) |
2643 ((read_exec_only ^ 1) << 9) |
2645 ((seg_not_present ^ 1) << 15) |
2647 (limit_in_pages << 23) |
2652 /* Install the new entry ... */
2654 lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
2655 lp[0] = tswap32(entry_1);
2656 lp[1] = tswap32(entry_2);
2660 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
2662 struct target_modify_ldt_ldt_s *target_ldt_info;
2663 uint64_t *gdt_table = g2h(env->gdt.base);
2664 uint32_t base_addr, limit, flags;
2665 int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
2666 int seg_not_present, useable, lm;
2667 uint32_t *lp, entry_1, entry_2;
2669 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
2670 if (!target_ldt_info)
2671 return -TARGET_EFAULT;
2672 idx = tswap32(target_ldt_info->entry_number);
2673 if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
2674 idx > TARGET_GDT_ENTRY_TLS_MAX) {
2675 unlock_user_struct(target_ldt_info, ptr, 1);
2676 return -TARGET_EINVAL;
2678 lp = (uint32_t *)(gdt_table + idx);
2679 entry_1 = tswap32(lp[0]);
2680 entry_2 = tswap32(lp[1]);
2682 read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
2683 contents = (entry_2 >> 10) & 3;
2684 seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
2685 seg_32bit = (entry_2 >> 22) & 1;
2686 limit_in_pages = (entry_2 >> 23) & 1;
2687 useable = (entry_2 >> 20) & 1;
2691 lm = (entry_2 >> 21) & 1;
2693 flags = (seg_32bit << 0) | (contents << 1) |
2694 (read_exec_only << 3) | (limit_in_pages << 4) |
2695 (seg_not_present << 5) | (useable << 6) | (lm << 7);
2696 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000);
2697 base_addr = (entry_1 >> 16) |
2698 (entry_2 & 0xff000000) |
2699 ((entry_2 & 0xff) << 16);
2700 target_ldt_info->base_addr = tswapl(base_addr);
2701 target_ldt_info->limit = tswap32(limit);
2702 target_ldt_info->flags = tswap32(flags);
2703 unlock_user_struct(target_ldt_info, ptr, 1);
2706 #endif /* TARGET_I386 && TARGET_ABI32 */
2708 #ifndef TARGET_ABI32
2709 static abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
2716 case TARGET_ARCH_SET_GS:
2717 case TARGET_ARCH_SET_FS:
2718 if (code == TARGET_ARCH_SET_GS)
2722 cpu_x86_load_seg(env, idx, 0);
2723 env->segs[idx].base = addr;
2725 case TARGET_ARCH_GET_GS:
2726 case TARGET_ARCH_GET_FS:
2727 if (code == TARGET_ARCH_GET_GS)
2731 val = env->segs[idx].base;
2732 if (put_user(val, addr, abi_ulong))
2733 return -TARGET_EFAULT;
2736 ret = -TARGET_EINVAL;
2743 #endif /* defined(TARGET_I386) */
2745 #if defined(USE_NPTL)
2747 #define NEW_STACK_SIZE PTHREAD_STACK_MIN
2749 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
2752 pthread_mutex_t mutex;
2753 pthread_cond_t cond;
2756 abi_ulong child_tidptr;
2757 abi_ulong parent_tidptr;
2761 static void *clone_func(void *arg)
2763 new_thread_info *info = arg;
2768 info->tid = gettid();
2769 if (info->child_tidptr)
2770 put_user_u32(info->tid, info->child_tidptr);
2771 if (info->parent_tidptr)
2772 put_user_u32(info->tid, info->parent_tidptr);
2773 /* Enable signals. */
2774 sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
2775 /* Signal to the parent that we're ready. */
2776 pthread_mutex_lock(&info->mutex);
2777 pthread_cond_broadcast(&info->cond);
2778 pthread_mutex_unlock(&info->mutex);
2779 /* Wait until the parent has finshed initializing the tls state. */
2780 pthread_mutex_lock(&clone_lock);
2781 pthread_mutex_unlock(&clone_lock);
2787 /* this stack is the equivalent of the kernel stack associated with a
2789 #define NEW_STACK_SIZE 8192
2791 static int clone_func(void *arg)
2793 CPUState *env = arg;
2800 /* do_fork() Must return host values and target errnos (unlike most
2801 do_*() functions). */
2802 static int do_fork(CPUState *env, unsigned int flags, abi_ulong newsp,
2803 abi_ulong parent_tidptr, target_ulong newtls,
2804 abi_ulong child_tidptr)
2810 #if defined(USE_NPTL)
2811 unsigned int nptl_flags;
2815 /* Emulate vfork() with fork() */
2816 if (flags & CLONE_VFORK)
2817 flags &= ~(CLONE_VFORK | CLONE_VM);
2819 if (flags & CLONE_VM) {
2820 #if defined(USE_NPTL)
2821 new_thread_info info;
2822 pthread_attr_t attr;
2824 ts = qemu_mallocz(sizeof(TaskState) + NEW_STACK_SIZE);
2825 init_task_state(ts);
2826 new_stack = ts->stack;
2827 /* we create a new CPU instance. */
2828 new_env = cpu_copy(env);
2829 /* Init regs that differ from the parent. */
2830 cpu_clone_regs(new_env, newsp);
2831 new_env->opaque = ts;
2832 #if defined(USE_NPTL)
2834 flags &= ~CLONE_NPTL_FLAGS2;
2836 /* TODO: Implement CLONE_CHILD_CLEARTID. */
2837 if (nptl_flags & CLONE_SETTLS)
2838 cpu_set_tls (new_env, newtls);
2840 /* Grab a mutex so that thread setup appears atomic. */
2841 pthread_mutex_lock(&clone_lock);
2843 memset(&info, 0, sizeof(info));
2844 pthread_mutex_init(&info.mutex, NULL);
2845 pthread_mutex_lock(&info.mutex);
2846 pthread_cond_init(&info.cond, NULL);
2848 if (nptl_flags & CLONE_CHILD_SETTID)
2849 info.child_tidptr = child_tidptr;
2850 if (nptl_flags & CLONE_PARENT_SETTID)
2851 info.parent_tidptr = parent_tidptr;
2853 ret = pthread_attr_init(&attr);
2854 ret = pthread_attr_setstack(&attr, new_stack, NEW_STACK_SIZE);
2855 /* It is not safe to deliver signals until the child has finished
2856 initializing, so temporarily block all signals. */
2857 sigfillset(&sigmask);
2858 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
2860 ret = pthread_create(&info.thread, &attr, clone_func, &info);
2862 sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
2863 pthread_attr_destroy(&attr);
2865 /* Wait for the child to initialize. */
2866 pthread_cond_wait(&info.cond, &info.mutex);
2868 if (flags & CLONE_PARENT_SETTID)
2869 put_user_u32(ret, parent_tidptr);
2873 pthread_mutex_unlock(&info.mutex);
2874 pthread_cond_destroy(&info.cond);
2875 pthread_mutex_destroy(&info.mutex);
2876 pthread_mutex_unlock(&clone_lock);
2878 if (flags & CLONE_NPTL_FLAGS2)
2880 /* This is probably going to die very quickly, but do it anyway. */
2882 ret = __clone2(clone_func, new_stack + NEW_STACK_SIZE, flags, new_env);
2884 ret = clone(clone_func, new_stack + NEW_STACK_SIZE, flags, new_env);
2888 /* if no CLONE_VM, we consider it is a fork */
2889 if ((flags & ~(CSIGNAL | CLONE_NPTL_FLAGS2)) != 0)
2893 #if defined(USE_NPTL)
2894 /* There is a race condition here. The parent process could
2895 theoretically read the TID in the child process before the child
2896 tid is set. This would require using either ptrace
2897 (not implemented) or having *_tidptr to point at a shared memory
2898 mapping. We can't repeat the spinlock hack used above because
2899 the child process gets its own copy of the lock. */
2901 cpu_clone_regs(env, newsp);
2903 /* Child Process. */
2904 if (flags & CLONE_CHILD_SETTID)
2905 put_user_u32(gettid(), child_tidptr);
2906 if (flags & CLONE_PARENT_SETTID)
2907 put_user_u32(gettid(), parent_tidptr);
2908 ts = (TaskState *)env->opaque;
2909 if (flags & CLONE_SETTLS)
2910 cpu_set_tls (env, newtls);
2911 /* TODO: Implement CLONE_CHILD_CLEARTID. */
2917 cpu_clone_regs(env, newsp);
2924 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
2927 struct target_flock *target_fl;
2928 struct flock64 fl64;
2929 struct target_flock64 *target_fl64;
2933 case TARGET_F_GETLK:
2934 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
2935 return -TARGET_EFAULT;
2936 fl.l_type = tswap16(target_fl->l_type);
2937 fl.l_whence = tswap16(target_fl->l_whence);
2938 fl.l_start = tswapl(target_fl->l_start);
2939 fl.l_len = tswapl(target_fl->l_len);
2940 fl.l_pid = tswapl(target_fl->l_pid);
2941 unlock_user_struct(target_fl, arg, 0);
2942 ret = get_errno(fcntl(fd, cmd, &fl));
2944 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg, 0))
2945 return -TARGET_EFAULT;
2946 target_fl->l_type = tswap16(fl.l_type);
2947 target_fl->l_whence = tswap16(fl.l_whence);
2948 target_fl->l_start = tswapl(fl.l_start);
2949 target_fl->l_len = tswapl(fl.l_len);
2950 target_fl->l_pid = tswapl(fl.l_pid);
2951 unlock_user_struct(target_fl, arg, 1);
2955 case TARGET_F_SETLK:
2956 case TARGET_F_SETLKW:
2957 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
2958 return -TARGET_EFAULT;
2959 fl.l_type = tswap16(target_fl->l_type);
2960 fl.l_whence = tswap16(target_fl->l_whence);
2961 fl.l_start = tswapl(target_fl->l_start);
2962 fl.l_len = tswapl(target_fl->l_len);
2963 fl.l_pid = tswapl(target_fl->l_pid);
2964 unlock_user_struct(target_fl, arg, 0);
2965 ret = get_errno(fcntl(fd, cmd, &fl));
2968 case TARGET_F_GETLK64:
2969 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
2970 return -TARGET_EFAULT;
2971 fl64.l_type = tswap16(target_fl64->l_type) >> 1;
2972 fl64.l_whence = tswap16(target_fl64->l_whence);
2973 fl64.l_start = tswapl(target_fl64->l_start);
2974 fl64.l_len = tswapl(target_fl64->l_len);
2975 fl64.l_pid = tswap16(target_fl64->l_pid);
2976 unlock_user_struct(target_fl64, arg, 0);
2977 ret = get_errno(fcntl(fd, cmd >> 1, &fl64));
2979 if (!lock_user_struct(VERIFY_WRITE, target_fl64, arg, 0))
2980 return -TARGET_EFAULT;
2981 target_fl64->l_type = tswap16(fl64.l_type) >> 1;
2982 target_fl64->l_whence = tswap16(fl64.l_whence);
2983 target_fl64->l_start = tswapl(fl64.l_start);
2984 target_fl64->l_len = tswapl(fl64.l_len);
2985 target_fl64->l_pid = tswapl(fl64.l_pid);
2986 unlock_user_struct(target_fl64, arg, 1);
2989 case TARGET_F_SETLK64:
2990 case TARGET_F_SETLKW64:
2991 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
2992 return -TARGET_EFAULT;
2993 fl64.l_type = tswap16(target_fl64->l_type) >> 1;
2994 fl64.l_whence = tswap16(target_fl64->l_whence);
2995 fl64.l_start = tswapl(target_fl64->l_start);
2996 fl64.l_len = tswapl(target_fl64->l_len);
2997 fl64.l_pid = tswap16(target_fl64->l_pid);
2998 unlock_user_struct(target_fl64, arg, 0);
2999 ret = get_errno(fcntl(fd, cmd >> 1, &fl64));
3003 ret = get_errno(fcntl(fd, cmd, arg));
3005 ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
3010 ret = get_errno(fcntl(fd, cmd, target_to_host_bitmask(arg, fcntl_flags_tbl)));
3014 ret = get_errno(fcntl(fd, cmd, arg));
3022 static inline int high2lowuid(int uid)
3030 static inline int high2lowgid(int gid)
3038 static inline int low2highuid(int uid)
3040 if ((int16_t)uid == -1)
3046 static inline int low2highgid(int gid)
3048 if ((int16_t)gid == -1)
3054 #endif /* USE_UID16 */
3056 void syscall_init(void)
3059 const argtype *arg_type;
3063 #define STRUCT(name, list...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
3064 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
3065 #include "syscall_types.h"
3067 #undef STRUCT_SPECIAL
3069 /* we patch the ioctl size if necessary. We rely on the fact that
3070 no ioctl has all the bits at '1' in the size field */
3072 while (ie->target_cmd != 0) {
3073 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
3074 TARGET_IOC_SIZEMASK) {
3075 arg_type = ie->arg_type;
3076 if (arg_type[0] != TYPE_PTR) {
3077 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
3082 size = thunk_type_size(arg_type, 0);
3083 ie->target_cmd = (ie->target_cmd &
3084 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
3085 (size << TARGET_IOC_SIZESHIFT);
3088 /* Build target_to_host_errno_table[] table from
3089 * host_to_target_errno_table[]. */
3090 for (i=0; i < ERRNO_TABLE_SIZE; i++)
3091 target_to_host_errno_table[host_to_target_errno_table[i]] = i;
3093 /* automatic consistency check if same arch */
3094 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
3095 (defined(__x86_64__) && defined(TARGET_X86_64))
3096 if (unlikely(ie->target_cmd != ie->host_cmd)) {
3097 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
3098 ie->name, ie->target_cmd, ie->host_cmd);
3105 #if TARGET_ABI_BITS == 32
3106 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
3108 #ifdef TARGET_WORDS_BIGENDIAN
3109 return ((uint64_t)word0 << 32) | word1;
3111 return ((uint64_t)word1 << 32) | word0;
3114 #else /* TARGET_ABI_BITS == 32 */
3115 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
3119 #endif /* TARGET_ABI_BITS != 32 */
3121 #ifdef TARGET_NR_truncate64
3122 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
3128 if (((CPUARMState *)cpu_env)->eabi)
3134 return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
3138 #ifdef TARGET_NR_ftruncate64
3139 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
3145 if (((CPUARMState *)cpu_env)->eabi)
3151 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
3155 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
3156 abi_ulong target_addr)
3158 struct target_timespec *target_ts;
3160 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1))
3161 return -TARGET_EFAULT;
3162 host_ts->tv_sec = tswapl(target_ts->tv_sec);
3163 host_ts->tv_nsec = tswapl(target_ts->tv_nsec);
3164 unlock_user_struct(target_ts, target_addr, 0);
3168 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
3169 struct timespec *host_ts)
3171 struct target_timespec *target_ts;
3173 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0))
3174 return -TARGET_EFAULT;
3175 target_ts->tv_sec = tswapl(host_ts->tv_sec);
3176 target_ts->tv_nsec = tswapl(host_ts->tv_nsec);
3177 unlock_user_struct(target_ts, target_addr, 1);
3181 #ifdef TARGET_NR_stat64
3182 static inline abi_long host_to_target_stat64(void *cpu_env,
3183 abi_ulong target_addr,
3184 struct stat *host_st)
3187 if (((CPUARMState *)cpu_env)->eabi) {
3188 struct target_eabi_stat64 *target_st;
3190 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
3191 return -TARGET_EFAULT;
3192 memset(target_st, 0, sizeof(struct target_eabi_stat64));
3193 __put_user(host_st->st_dev, &target_st->st_dev);
3194 __put_user(host_st->st_ino, &target_st->st_ino);
3195 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
3196 __put_user(host_st->st_ino, &target_st->__st_ino);
3198 __put_user(host_st->st_mode, &target_st->st_mode);
3199 __put_user(host_st->st_nlink, &target_st->st_nlink);
3200 __put_user(host_st->st_uid, &target_st->st_uid);
3201 __put_user(host_st->st_gid, &target_st->st_gid);
3202 __put_user(host_st->st_rdev, &target_st->st_rdev);
3203 __put_user(host_st->st_size, &target_st->st_size);
3204 __put_user(host_st->st_blksize, &target_st->st_blksize);
3205 __put_user(host_st->st_blocks, &target_st->st_blocks);
3206 __put_user(host_st->st_atime, &target_st->target_st_atime);
3207 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
3208 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
3209 unlock_user_struct(target_st, target_addr, 1);
3213 struct target_stat64 *target_st;
3215 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
3216 return -TARGET_EFAULT;
3217 memset(target_st, 0, sizeof(struct target_stat64));
3218 __put_user(host_st->st_dev, &target_st->st_dev);
3219 __put_user(host_st->st_ino, &target_st->st_ino);
3220 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
3221 __put_user(host_st->st_ino, &target_st->__st_ino);
3223 __put_user(host_st->st_mode, &target_st->st_mode);
3224 __put_user(host_st->st_nlink, &target_st->st_nlink);
3225 __put_user(host_st->st_uid, &target_st->st_uid);
3226 __put_user(host_st->st_gid, &target_st->st_gid);
3227 __put_user(host_st->st_rdev, &target_st->st_rdev);
3228 /* XXX: better use of kernel struct */
3229 __put_user(host_st->st_size, &target_st->st_size);
3230 __put_user(host_st->st_blksize, &target_st->st_blksize);
3231 __put_user(host_st->st_blocks, &target_st->st_blocks);
3232 __put_user(host_st->st_atime, &target_st->target_st_atime);
3233 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
3234 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
3235 unlock_user_struct(target_st, target_addr, 1);
3242 #if defined(USE_NPTL)
3243 /* ??? Using host futex calls even when target atomic operations
3244 are not really atomic probably breaks things. However implementing
3245 futexes locally would make futexes shared between multiple processes
3246 tricky. However they're probably useless because guest atomic
3247 operations won't work either. */
3248 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
3249 target_ulong uaddr2, int val3)
3251 struct timespec ts, *pts;
3253 /* ??? We assume FUTEX_* constants are the same on both host
3259 target_to_host_timespec(pts, timeout);
3263 return get_errno(sys_futex(g2h(uaddr), FUTEX_WAIT, tswap32(val),
3266 return get_errno(sys_futex(g2h(uaddr), FUTEX_WAKE, val, NULL, NULL, 0));
3268 return get_errno(sys_futex(g2h(uaddr), FUTEX_FD, val, NULL, NULL, 0));
3270 return get_errno(sys_futex(g2h(uaddr), FUTEX_REQUEUE, val,
3271 NULL, g2h(uaddr2), 0));
3272 case FUTEX_CMP_REQUEUE:
3273 return get_errno(sys_futex(g2h(uaddr), FUTEX_CMP_REQUEUE, val,
3274 NULL, g2h(uaddr2), tswap32(val3)));
3276 return -TARGET_ENOSYS;
3281 int get_osversion(void)
3283 static int osversion;
3284 struct new_utsname buf;
3289 if (qemu_uname_release && *qemu_uname_release) {
3290 s = qemu_uname_release;
3292 if (sys_uname(&buf))
3297 for (i = 0; i < 3; i++) {
3299 while (*s >= '0' && *s <= '9') {
3304 tmp = (tmp << 8) + n;
3312 /* do_syscall() should always have a single exit point at the end so
3313 that actions, such as logging of syscall results, can be performed.
3314 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
3315 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
3316 abi_long arg2, abi_long arg3, abi_long arg4,
3317 abi_long arg5, abi_long arg6)
3325 gemu_log("syscall %d", num);
3328 print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
3331 case TARGET_NR_exit:
3335 gdb_exit(cpu_env, arg1);
3336 /* XXX: should free thread stack and CPU env */
3338 ret = 0; /* avoid warning */
3340 case TARGET_NR_read:
3341 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
3343 ret = get_errno(read(arg1, p, arg3));
3344 unlock_user(p, arg2, ret);
3346 case TARGET_NR_write:
3347 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
3349 ret = get_errno(write(arg1, p, arg3));
3350 unlock_user(p, arg2, 0);
3352 case TARGET_NR_open:
3353 if (!(p = lock_user_string(arg1)))
3355 ret = get_errno(open(path(p),
3356 target_to_host_bitmask(arg2, fcntl_flags_tbl),
3358 unlock_user(p, arg1, 0);
3360 #if defined(TARGET_NR_openat) && defined(__NR_openat)
3361 case TARGET_NR_openat:
3362 if (!(p = lock_user_string(arg2)))
3364 ret = get_errno(sys_openat(arg1,
3366 target_to_host_bitmask(arg3, fcntl_flags_tbl),
3368 unlock_user(p, arg2, 0);
3371 case TARGET_NR_close:
3372 ret = get_errno(close(arg1));
3377 case TARGET_NR_fork:
3378 ret = get_errno(do_fork(cpu_env, SIGCHLD, 0, 0, 0, 0));
3380 #ifdef TARGET_NR_waitpid
3381 case TARGET_NR_waitpid:
3384 ret = get_errno(waitpid(arg1, &status, arg3));
3385 if (!is_error(ret) && arg2
3386 && put_user_s32(status, arg2))
3391 #ifdef TARGET_NR_waitid
3392 case TARGET_NR_waitid:
3396 ret = get_errno(waitid(arg1, arg2, &info, arg4));
3397 if (!is_error(ret) && arg3 && info.si_pid != 0) {
3398 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
3400 host_to_target_siginfo(p, &info);
3401 unlock_user(p, arg3, sizeof(target_siginfo_t));
3406 #ifdef TARGET_NR_creat /* not on alpha */
3407 case TARGET_NR_creat:
3408 if (!(p = lock_user_string(arg1)))
3410 ret = get_errno(creat(p, arg2));
3411 unlock_user(p, arg1, 0);
3414 case TARGET_NR_link:
3417 p = lock_user_string(arg1);
3418 p2 = lock_user_string(arg2);
3420 ret = -TARGET_EFAULT;
3422 ret = get_errno(link(p, p2));
3423 unlock_user(p2, arg2, 0);
3424 unlock_user(p, arg1, 0);
3427 #if defined(TARGET_NR_linkat) && defined(__NR_linkat)
3428 case TARGET_NR_linkat:
3433 p = lock_user_string(arg2);
3434 p2 = lock_user_string(arg4);
3436 ret = -TARGET_EFAULT;
3438 ret = get_errno(sys_linkat(arg1, p, arg3, p2, arg5));
3439 unlock_user(p, arg2, 0);
3440 unlock_user(p2, arg4, 0);
3444 case TARGET_NR_unlink:
3445 if (!(p = lock_user_string(arg1)))
3447 ret = get_errno(unlink(p));
3448 unlock_user(p, arg1, 0);
3450 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat)
3451 case TARGET_NR_unlinkat:
3452 if (!(p = lock_user_string(arg2)))
3454 ret = get_errno(sys_unlinkat(arg1, p, arg3));
3455 unlock_user(p, arg2, 0);
3458 case TARGET_NR_execve:
3460 char **argp, **envp;
3463 abi_ulong guest_argp;
3464 abi_ulong guest_envp;
3470 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
3471 if (get_user_ual(addr, gp))
3479 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
3480 if (get_user_ual(addr, gp))
3487 argp = alloca((argc + 1) * sizeof(void *));
3488 envp = alloca((envc + 1) * sizeof(void *));
3490 for (gp = guest_argp, q = argp; gp;
3491 gp += sizeof(abi_ulong), q++) {
3492 if (get_user_ual(addr, gp))
3496 if (!(*q = lock_user_string(addr)))
3501 for (gp = guest_envp, q = envp; gp;
3502 gp += sizeof(abi_ulong), q++) {
3503 if (get_user_ual(addr, gp))
3507 if (!(*q = lock_user_string(addr)))
3512 if (!(p = lock_user_string(arg1)))
3514 ret = get_errno(execve(p, argp, envp));
3515 unlock_user(p, arg1, 0);
3520 ret = -TARGET_EFAULT;
3523 for (gp = guest_argp, q = argp; *q;
3524 gp += sizeof(abi_ulong), q++) {
3525 if (get_user_ual(addr, gp)
3528 unlock_user(*q, addr, 0);
3530 for (gp = guest_envp, q = envp; *q;
3531 gp += sizeof(abi_ulong), q++) {
3532 if (get_user_ual(addr, gp)
3535 unlock_user(*q, addr, 0);
3539 case TARGET_NR_chdir:
3540 if (!(p = lock_user_string(arg1)))
3542 ret = get_errno(chdir(p));
3543 unlock_user(p, arg1, 0);
3545 #ifdef TARGET_NR_time
3546 case TARGET_NR_time:
3549 ret = get_errno(time(&host_time));
3552 && put_user_sal(host_time, arg1))
3557 case TARGET_NR_mknod:
3558 if (!(p = lock_user_string(arg1)))
3560 ret = get_errno(mknod(p, arg2, arg3));
3561 unlock_user(p, arg1, 0);
3563 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat)
3564 case TARGET_NR_mknodat:
3565 if (!(p = lock_user_string(arg2)))
3567 ret = get_errno(sys_mknodat(arg1, p, arg3, arg4));
3568 unlock_user(p, arg2, 0);
3571 case TARGET_NR_chmod:
3572 if (!(p = lock_user_string(arg1)))
3574 ret = get_errno(chmod(p, arg2));
3575 unlock_user(p, arg1, 0);
3577 #ifdef TARGET_NR_break
3578 case TARGET_NR_break:
3581 #ifdef TARGET_NR_oldstat
3582 case TARGET_NR_oldstat:
3585 case TARGET_NR_lseek:
3586 ret = get_errno(lseek(arg1, arg2, arg3));
3588 #ifdef TARGET_NR_getxpid
3589 case TARGET_NR_getxpid:
3591 case TARGET_NR_getpid:
3593 ret = get_errno(getpid());
3595 case TARGET_NR_mount:
3597 /* need to look at the data field */
3599 p = lock_user_string(arg1);
3600 p2 = lock_user_string(arg2);
3601 p3 = lock_user_string(arg3);
3602 if (!p || !p2 || !p3)
3603 ret = -TARGET_EFAULT;
3605 /* FIXME - arg5 should be locked, but it isn't clear how to
3606 * do that since it's not guaranteed to be a NULL-terminated
3609 ret = get_errno(mount(p, p2, p3, (unsigned long)arg4, g2h(arg5)));
3610 unlock_user(p, arg1, 0);
3611 unlock_user(p2, arg2, 0);
3612 unlock_user(p3, arg3, 0);
3615 #ifdef TARGET_NR_umount
3616 case TARGET_NR_umount:
3617 if (!(p = lock_user_string(arg1)))
3619 ret = get_errno(umount(p));
3620 unlock_user(p, arg1, 0);
3623 #ifdef TARGET_NR_stime /* not on alpha */
3624 case TARGET_NR_stime:
3627 if (get_user_sal(host_time, arg1))
3629 ret = get_errno(stime(&host_time));
3633 case TARGET_NR_ptrace:
3635 #ifdef TARGET_NR_alarm /* not on alpha */
3636 case TARGET_NR_alarm:
3640 #ifdef TARGET_NR_oldfstat
3641 case TARGET_NR_oldfstat:
3644 #ifdef TARGET_NR_pause /* not on alpha */
3645 case TARGET_NR_pause:
3646 ret = get_errno(pause());
3649 #ifdef TARGET_NR_utime
3650 case TARGET_NR_utime:
3652 struct utimbuf tbuf, *host_tbuf;
3653 struct target_utimbuf *target_tbuf;
3655 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
3657 tbuf.actime = tswapl(target_tbuf->actime);
3658 tbuf.modtime = tswapl(target_tbuf->modtime);
3659 unlock_user_struct(target_tbuf, arg2, 0);
3664 if (!(p = lock_user_string(arg1)))
3666 ret = get_errno(utime(p, host_tbuf));
3667 unlock_user(p, arg1, 0);
3671 case TARGET_NR_utimes:
3673 struct timeval *tvp, tv[2];
3675 if (copy_from_user_timeval(&tv[0], arg2)
3676 || copy_from_user_timeval(&tv[1],
3677 arg2 + sizeof(struct target_timeval)))
3683 if (!(p = lock_user_string(arg1)))
3685 ret = get_errno(utimes(p, tvp));
3686 unlock_user(p, arg1, 0);
3689 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat)
3690 case TARGET_NR_futimesat:
3692 struct timeval *tvp, tv[2];
3694 if (copy_from_user_timeval(&tv[0], arg3)
3695 || copy_from_user_timeval(&tv[1],
3696 arg3 + sizeof(struct target_timeval)))
3702 if (!(p = lock_user_string(arg2)))
3704 ret = get_errno(sys_futimesat(arg1, path(p), tvp));
3705 unlock_user(p, arg2, 0);
3709 #ifdef TARGET_NR_stty
3710 case TARGET_NR_stty:
3713 #ifdef TARGET_NR_gtty
3714 case TARGET_NR_gtty:
3717 case TARGET_NR_access:
3718 if (!(p = lock_user_string(arg1)))
3720 ret = get_errno(access(p, arg2));
3721 unlock_user(p, arg1, 0);
3723 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
3724 case TARGET_NR_faccessat:
3725 if (!(p = lock_user_string(arg2)))
3727 ret = get_errno(sys_faccessat(arg1, p, arg3, arg4));
3728 unlock_user(p, arg2, 0);
3731 #ifdef TARGET_NR_nice /* not on alpha */
3732 case TARGET_NR_nice:
3733 ret = get_errno(nice(arg1));
3736 #ifdef TARGET_NR_ftime
3737 case TARGET_NR_ftime:
3740 case TARGET_NR_sync:
3744 case TARGET_NR_kill:
3745 ret = get_errno(kill(arg1, target_to_host_signal(arg2)));
3747 case TARGET_NR_rename:
3750 p = lock_user_string(arg1);
3751 p2 = lock_user_string(arg2);
3753 ret = -TARGET_EFAULT;
3755 ret = get_errno(rename(p, p2));
3756 unlock_user(p2, arg2, 0);
3757 unlock_user(p, arg1, 0);
3760 #if defined(TARGET_NR_renameat) && defined(__NR_renameat)
3761 case TARGET_NR_renameat:
3764 p = lock_user_string(arg2);
3765 p2 = lock_user_string(arg4);
3767 ret = -TARGET_EFAULT;
3769 ret = get_errno(sys_renameat(arg1, p, arg3, p2));
3770 unlock_user(p2, arg4, 0);
3771 unlock_user(p, arg2, 0);
3775 case TARGET_NR_mkdir:
3776 if (!(p = lock_user_string(arg1)))
3778 ret = get_errno(mkdir(p, arg2));
3779 unlock_user(p, arg1, 0);
3781 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat)
3782 case TARGET_NR_mkdirat:
3783 if (!(p = lock_user_string(arg2)))
3785 ret = get_errno(sys_mkdirat(arg1, p, arg3));
3786 unlock_user(p, arg2, 0);
3789 case TARGET_NR_rmdir:
3790 if (!(p = lock_user_string(arg1)))
3792 ret = get_errno(rmdir(p));
3793 unlock_user(p, arg1, 0);
3796 ret = get_errno(dup(arg1));
3798 case TARGET_NR_pipe:
3801 ret = get_errno(pipe(host_pipe));
3802 if (!is_error(ret)) {
3803 #if defined(TARGET_MIPS)
3804 CPUMIPSState *env = (CPUMIPSState*)cpu_env;
3805 env->active_tc.gpr[3] = host_pipe[1];
3807 #elif defined(TARGET_SH4)
3808 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
3811 if (put_user_s32(host_pipe[0], arg1)
3812 || put_user_s32(host_pipe[1], arg1 + sizeof(host_pipe[0])))
3818 case TARGET_NR_times:
3820 struct target_tms *tmsp;
3822 ret = get_errno(times(&tms));
3824 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
3827 tmsp->tms_utime = tswapl(host_to_target_clock_t(tms.tms_utime));
3828 tmsp->tms_stime = tswapl(host_to_target_clock_t(tms.tms_stime));
3829 tmsp->tms_cutime = tswapl(host_to_target_clock_t(tms.tms_cutime));
3830 tmsp->tms_cstime = tswapl(host_to_target_clock_t(tms.tms_cstime));
3833 ret = host_to_target_clock_t(ret);
3836 #ifdef TARGET_NR_prof
3837 case TARGET_NR_prof:
3840 #ifdef TARGET_NR_signal
3841 case TARGET_NR_signal:
3844 case TARGET_NR_acct:
3845 if (!(p = lock_user_string(arg1)))
3847 ret = get_errno(acct(path(p)));
3848 unlock_user(p, arg1, 0);
3850 #ifdef TARGET_NR_umount2 /* not on alpha */
3851 case TARGET_NR_umount2:
3852 if (!(p = lock_user_string(arg1)))
3854 ret = get_errno(umount2(p, arg2));
3855 unlock_user(p, arg1, 0);
3858 #ifdef TARGET_NR_lock
3859 case TARGET_NR_lock:
3862 case TARGET_NR_ioctl:
3863 ret = do_ioctl(arg1, arg2, arg3);
3865 case TARGET_NR_fcntl:
3866 ret = do_fcntl(arg1, arg2, arg3);
3868 #ifdef TARGET_NR_mpx
3872 case TARGET_NR_setpgid:
3873 ret = get_errno(setpgid(arg1, arg2));
3875 #ifdef TARGET_NR_ulimit
3876 case TARGET_NR_ulimit:
3879 #ifdef TARGET_NR_oldolduname
3880 case TARGET_NR_oldolduname:
3883 case TARGET_NR_umask:
3884 ret = get_errno(umask(arg1));
3886 case TARGET_NR_chroot:
3887 if (!(p = lock_user_string(arg1)))
3889 ret = get_errno(chroot(p));
3890 unlock_user(p, arg1, 0);
3892 case TARGET_NR_ustat:
3894 case TARGET_NR_dup2:
3895 ret = get_errno(dup2(arg1, arg2));
3897 #ifdef TARGET_NR_getppid /* not on alpha */
3898 case TARGET_NR_getppid:
3899 ret = get_errno(getppid());
3902 case TARGET_NR_getpgrp:
3903 ret = get_errno(getpgrp());
3905 case TARGET_NR_setsid:
3906 ret = get_errno(setsid());
3908 #ifdef TARGET_NR_sigaction
3909 case TARGET_NR_sigaction:
3911 #if !defined(TARGET_MIPS)
3912 struct target_old_sigaction *old_act;
3913 struct target_sigaction act, oact, *pact;
3915 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
3917 act._sa_handler = old_act->_sa_handler;
3918 target_siginitset(&act.sa_mask, old_act->sa_mask);
3919 act.sa_flags = old_act->sa_flags;
3920 act.sa_restorer = old_act->sa_restorer;
3921 unlock_user_struct(old_act, arg2, 0);
3926 ret = get_errno(do_sigaction(arg1, pact, &oact));
3927 if (!is_error(ret) && arg3) {
3928 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
3930 old_act->_sa_handler = oact._sa_handler;
3931 old_act->sa_mask = oact.sa_mask.sig[0];
3932 old_act->sa_flags = oact.sa_flags;
3933 old_act->sa_restorer = oact.sa_restorer;
3934 unlock_user_struct(old_act, arg3, 1);
3937 struct target_sigaction act, oact, *pact, *old_act;
3940 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
3942 act._sa_handler = old_act->_sa_handler;
3943 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
3944 act.sa_flags = old_act->sa_flags;
3945 unlock_user_struct(old_act, arg2, 0);
3951 ret = get_errno(do_sigaction(arg1, pact, &oact));
3953 if (!is_error(ret) && arg3) {
3954 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
3956 old_act->_sa_handler = oact._sa_handler;
3957 old_act->sa_flags = oact.sa_flags;
3958 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
3959 old_act->sa_mask.sig[1] = 0;
3960 old_act->sa_mask.sig[2] = 0;
3961 old_act->sa_mask.sig[3] = 0;
3962 unlock_user_struct(old_act, arg3, 1);
3968 case TARGET_NR_rt_sigaction:
3970 struct target_sigaction *act;
3971 struct target_sigaction *oact;
3974 if (!lock_user_struct(VERIFY_READ, act, arg2, 1))
3979 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
3980 ret = -TARGET_EFAULT;
3981 goto rt_sigaction_fail;
3985 ret = get_errno(do_sigaction(arg1, act, oact));
3988 unlock_user_struct(act, arg2, 0);
3990 unlock_user_struct(oact, arg3, 1);
3993 #ifdef TARGET_NR_sgetmask /* not on alpha */
3994 case TARGET_NR_sgetmask:
3997 abi_ulong target_set;
3998 sigprocmask(0, NULL, &cur_set);
3999 host_to_target_old_sigset(&target_set, &cur_set);
4004 #ifdef TARGET_NR_ssetmask /* not on alpha */
4005 case TARGET_NR_ssetmask:
4007 sigset_t set, oset, cur_set;
4008 abi_ulong target_set = arg1;
4009 sigprocmask(0, NULL, &cur_set);
4010 target_to_host_old_sigset(&set, &target_set);
4011 sigorset(&set, &set, &cur_set);
4012 sigprocmask(SIG_SETMASK, &set, &oset);
4013 host_to_target_old_sigset(&target_set, &oset);
4018 #ifdef TARGET_NR_sigprocmask
4019 case TARGET_NR_sigprocmask:
4022 sigset_t set, oldset, *set_ptr;
4026 case TARGET_SIG_BLOCK:
4029 case TARGET_SIG_UNBLOCK:
4032 case TARGET_SIG_SETMASK:
4036 ret = -TARGET_EINVAL;
4039 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
4041 target_to_host_old_sigset(&set, p);
4042 unlock_user(p, arg2, 0);
4048 ret = get_errno(sigprocmask(arg1, set_ptr, &oldset));
4049 if (!is_error(ret) && arg3) {
4050 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
4052 host_to_target_old_sigset(p, &oldset);
4053 unlock_user(p, arg3, sizeof(target_sigset_t));
4058 case TARGET_NR_rt_sigprocmask:
4061 sigset_t set, oldset, *set_ptr;
4065 case TARGET_SIG_BLOCK:
4068 case TARGET_SIG_UNBLOCK:
4071 case TARGET_SIG_SETMASK:
4075 ret = -TARGET_EINVAL;
4078 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
4080 target_to_host_sigset(&set, p);
4081 unlock_user(p, arg2, 0);
4087 ret = get_errno(sigprocmask(how, set_ptr, &oldset));
4088 if (!is_error(ret) && arg3) {
4089 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
4091 host_to_target_sigset(p, &oldset);
4092 unlock_user(p, arg3, sizeof(target_sigset_t));
4096 #ifdef TARGET_NR_sigpending
4097 case TARGET_NR_sigpending:
4100 ret = get_errno(sigpending(&set));
4101 if (!is_error(ret)) {
4102 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
4104 host_to_target_old_sigset(p, &set);
4105 unlock_user(p, arg1, sizeof(target_sigset_t));
4110 case TARGET_NR_rt_sigpending:
4113 ret = get_errno(sigpending(&set));
4114 if (!is_error(ret)) {
4115 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
4117 host_to_target_sigset(p, &set);
4118 unlock_user(p, arg1, sizeof(target_sigset_t));
4122 #ifdef TARGET_NR_sigsuspend
4123 case TARGET_NR_sigsuspend:
4126 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
4128 target_to_host_old_sigset(&set, p);
4129 unlock_user(p, arg1, 0);
4130 ret = get_errno(sigsuspend(&set));
4134 case TARGET_NR_rt_sigsuspend:
4137 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
4139 target_to_host_sigset(&set, p);
4140 unlock_user(p, arg1, 0);
4141 ret = get_errno(sigsuspend(&set));
4144 case TARGET_NR_rt_sigtimedwait:
4147 struct timespec uts, *puts;
4150 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
4152 target_to_host_sigset(&set, p);
4153 unlock_user(p, arg1, 0);
4156 target_to_host_timespec(puts, arg3);
4160 ret = get_errno(sigtimedwait(&set, &uinfo, puts));
4161 if (!is_error(ret) && arg2) {
4162 if (!(p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t), 0)))
4164 host_to_target_siginfo(p, &uinfo);
4165 unlock_user(p, arg2, sizeof(target_siginfo_t));
4169 case TARGET_NR_rt_sigqueueinfo:
4172 if (!(p = lock_user(VERIFY_READ, arg3, sizeof(target_sigset_t), 1)))
4174 target_to_host_siginfo(&uinfo, p);
4175 unlock_user(p, arg1, 0);
4176 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
4179 #ifdef TARGET_NR_sigreturn
4180 case TARGET_NR_sigreturn:
4181 /* NOTE: ret is eax, so not transcoding must be done */
4182 ret = do_sigreturn(cpu_env);
4185 case TARGET_NR_rt_sigreturn:
4186 /* NOTE: ret is eax, so not transcoding must be done */
4187 ret = do_rt_sigreturn(cpu_env);
4189 case TARGET_NR_sethostname:
4190 if (!(p = lock_user_string(arg1)))
4192 ret = get_errno(sethostname(p, arg2));
4193 unlock_user(p, arg1, 0);
4195 case TARGET_NR_setrlimit:
4197 /* XXX: convert resource ? */
4198 int resource = arg1;
4199 struct target_rlimit *target_rlim;
4201 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
4203 rlim.rlim_cur = tswapl(target_rlim->rlim_cur);
4204 rlim.rlim_max = tswapl(target_rlim->rlim_max);
4205 unlock_user_struct(target_rlim, arg2, 0);
4206 ret = get_errno(setrlimit(resource, &rlim));
4209 case TARGET_NR_getrlimit:
4211 /* XXX: convert resource ? */
4212 int resource = arg1;
4213 struct target_rlimit *target_rlim;
4216 ret = get_errno(getrlimit(resource, &rlim));
4217 if (!is_error(ret)) {
4218 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
4220 rlim.rlim_cur = tswapl(target_rlim->rlim_cur);
4221 rlim.rlim_max = tswapl(target_rlim->rlim_max);
4222 unlock_user_struct(target_rlim, arg2, 1);
4226 case TARGET_NR_getrusage:
4228 struct rusage rusage;
4229 ret = get_errno(getrusage(arg1, &rusage));
4230 if (!is_error(ret)) {
4231 host_to_target_rusage(arg2, &rusage);
4235 case TARGET_NR_gettimeofday:
4238 ret = get_errno(gettimeofday(&tv, NULL));
4239 if (!is_error(ret)) {
4240 if (copy_to_user_timeval(arg1, &tv))
4245 case TARGET_NR_settimeofday:
4248 if (copy_from_user_timeval(&tv, arg1))
4250 ret = get_errno(settimeofday(&tv, NULL));
4253 #ifdef TARGET_NR_select
4254 case TARGET_NR_select:
4256 struct target_sel_arg_struct *sel;
4257 abi_ulong inp, outp, exp, tvp;
4260 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1))
4262 nsel = tswapl(sel->n);
4263 inp = tswapl(sel->inp);
4264 outp = tswapl(sel->outp);
4265 exp = tswapl(sel->exp);
4266 tvp = tswapl(sel->tvp);
4267 unlock_user_struct(sel, arg1, 0);
4268 ret = do_select(nsel, inp, outp, exp, tvp);
4272 case TARGET_NR_symlink:
4275 p = lock_user_string(arg1);
4276 p2 = lock_user_string(arg2);
4278 ret = -TARGET_EFAULT;
4280 ret = get_errno(symlink(p, p2));
4281 unlock_user(p2, arg2, 0);
4282 unlock_user(p, arg1, 0);
4285 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat)
4286 case TARGET_NR_symlinkat:
4289 p = lock_user_string(arg1);
4290 p2 = lock_user_string(arg3);
4292 ret = -TARGET_EFAULT;
4294 ret = get_errno(sys_symlinkat(p, arg2, p2));
4295 unlock_user(p2, arg3, 0);
4296 unlock_user(p, arg1, 0);
4300 #ifdef TARGET_NR_oldlstat
4301 case TARGET_NR_oldlstat:
4304 case TARGET_NR_readlink:
4307 p = lock_user_string(arg1);
4308 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
4310 ret = -TARGET_EFAULT;
4312 ret = get_errno(readlink(path(p), p2, arg3));
4313 unlock_user(p2, arg2, ret);
4314 unlock_user(p, arg1, 0);
4317 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat)
4318 case TARGET_NR_readlinkat:
4321 p = lock_user_string(arg2);
4322 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
4324 ret = -TARGET_EFAULT;
4326 ret = get_errno(sys_readlinkat(arg1, path(p), p2, arg4));
4327 unlock_user(p2, arg3, ret);
4328 unlock_user(p, arg2, 0);
4332 #ifdef TARGET_NR_uselib
4333 case TARGET_NR_uselib:
4336 #ifdef TARGET_NR_swapon
4337 case TARGET_NR_swapon:
4338 if (!(p = lock_user_string(arg1)))
4340 ret = get_errno(swapon(p, arg2));
4341 unlock_user(p, arg1, 0);
4344 case TARGET_NR_reboot:
4346 #ifdef TARGET_NR_readdir
4347 case TARGET_NR_readdir:
4350 #ifdef TARGET_NR_mmap
4351 case TARGET_NR_mmap:
4352 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_CRIS)
4355 abi_ulong v1, v2, v3, v4, v5, v6;
4356 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
4364 unlock_user(v, arg1, 0);
4365 ret = get_errno(target_mmap(v1, v2, v3,
4366 target_to_host_bitmask(v4, mmap_flags_tbl),
4370 ret = get_errno(target_mmap(arg1, arg2, arg3,
4371 target_to_host_bitmask(arg4, mmap_flags_tbl),
4377 #ifdef TARGET_NR_mmap2
4378 case TARGET_NR_mmap2:
4380 #define MMAP_SHIFT 12
4382 ret = get_errno(target_mmap(arg1, arg2, arg3,
4383 target_to_host_bitmask(arg4, mmap_flags_tbl),
4385 arg6 << MMAP_SHIFT));
4388 case TARGET_NR_munmap:
4389 ret = get_errno(target_munmap(arg1, arg2));
4391 case TARGET_NR_mprotect:
4392 ret = get_errno(target_mprotect(arg1, arg2, arg3));
4394 #ifdef TARGET_NR_mremap
4395 case TARGET_NR_mremap:
4396 ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
4399 /* ??? msync/mlock/munlock are broken for softmmu. */
4400 #ifdef TARGET_NR_msync
4401 case TARGET_NR_msync:
4402 ret = get_errno(msync(g2h(arg1), arg2, arg3));
4405 #ifdef TARGET_NR_mlock
4406 case TARGET_NR_mlock:
4407 ret = get_errno(mlock(g2h(arg1), arg2));
4410 #ifdef TARGET_NR_munlock
4411 case TARGET_NR_munlock:
4412 ret = get_errno(munlock(g2h(arg1), arg2));
4415 #ifdef TARGET_NR_mlockall
4416 case TARGET_NR_mlockall:
4417 ret = get_errno(mlockall(arg1));
4420 #ifdef TARGET_NR_munlockall
4421 case TARGET_NR_munlockall:
4422 ret = get_errno(munlockall());
4425 case TARGET_NR_truncate:
4426 if (!(p = lock_user_string(arg1)))
4428 ret = get_errno(truncate(p, arg2));
4429 unlock_user(p, arg1, 0);
4431 case TARGET_NR_ftruncate:
4432 ret = get_errno(ftruncate(arg1, arg2));
4434 case TARGET_NR_fchmod:
4435 ret = get_errno(fchmod(arg1, arg2));
4437 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat)
4438 case TARGET_NR_fchmodat:
4439 if (!(p = lock_user_string(arg2)))
4441 ret = get_errno(sys_fchmodat(arg1, p, arg3, arg4));
4442 unlock_user(p, arg2, 0);
4445 case TARGET_NR_getpriority:
4446 /* libc does special remapping of the return value of
4447 * sys_getpriority() so it's just easiest to call
4448 * sys_getpriority() directly rather than through libc. */
4449 ret = sys_getpriority(arg1, arg2);
4451 case TARGET_NR_setpriority:
4452 ret = get_errno(setpriority(arg1, arg2, arg3));
4454 #ifdef TARGET_NR_profil
4455 case TARGET_NR_profil:
4458 case TARGET_NR_statfs:
4459 if (!(p = lock_user_string(arg1)))
4461 ret = get_errno(statfs(path(p), &stfs));
4462 unlock_user(p, arg1, 0);
4464 if (!is_error(ret)) {
4465 struct target_statfs *target_stfs;
4467 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
4469 __put_user(stfs.f_type, &target_stfs->f_type);
4470 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
4471 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
4472 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
4473 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
4474 __put_user(stfs.f_files, &target_stfs->f_files);
4475 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
4476 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
4477 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
4478 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
4479 unlock_user_struct(target_stfs, arg2, 1);
4482 case TARGET_NR_fstatfs:
4483 ret = get_errno(fstatfs(arg1, &stfs));
4484 goto convert_statfs;
4485 #ifdef TARGET_NR_statfs64
4486 case TARGET_NR_statfs64:
4487 if (!(p = lock_user_string(arg1)))
4489 ret = get_errno(statfs(path(p), &stfs));
4490 unlock_user(p, arg1, 0);
4492 if (!is_error(ret)) {
4493 struct target_statfs64 *target_stfs;
4495 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
4497 __put_user(stfs.f_type, &target_stfs->f_type);
4498 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
4499 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
4500 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
4501 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
4502 __put_user(stfs.f_files, &target_stfs->f_files);
4503 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
4504 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
4505 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
4506 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
4507 unlock_user_struct(target_stfs, arg3, 1);
4510 case TARGET_NR_fstatfs64:
4511 ret = get_errno(fstatfs(arg1, &stfs));
4512 goto convert_statfs64;
4514 #ifdef TARGET_NR_ioperm
4515 case TARGET_NR_ioperm:
4518 #ifdef TARGET_NR_socketcall
4519 case TARGET_NR_socketcall:
4520 ret = do_socketcall(arg1, arg2);
4523 #ifdef TARGET_NR_accept
4524 case TARGET_NR_accept:
4525 ret = do_accept(arg1, arg2, arg3);
4528 #ifdef TARGET_NR_bind
4529 case TARGET_NR_bind:
4530 ret = do_bind(arg1, arg2, arg3);
4533 #ifdef TARGET_NR_connect
4534 case TARGET_NR_connect:
4535 ret = do_connect(arg1, arg2, arg3);
4538 #ifdef TARGET_NR_getpeername
4539 case TARGET_NR_getpeername:
4540 ret = do_getpeername(arg1, arg2, arg3);
4543 #ifdef TARGET_NR_getsockname
4544 case TARGET_NR_getsockname:
4545 ret = do_getsockname(arg1, arg2, arg3);
4548 #ifdef TARGET_NR_getsockopt
4549 case TARGET_NR_getsockopt:
4550 ret = do_getsockopt(arg1, arg2, arg3, arg4, arg5);
4553 #ifdef TARGET_NR_listen
4554 case TARGET_NR_listen:
4555 ret = get_errno(listen(arg1, arg2));
4558 #ifdef TARGET_NR_recv
4559 case TARGET_NR_recv:
4560 ret = do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
4563 #ifdef TARGET_NR_recvfrom
4564 case TARGET_NR_recvfrom:
4565 ret = do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
4568 #ifdef TARGET_NR_recvmsg
4569 case TARGET_NR_recvmsg:
4570 ret = do_sendrecvmsg(arg1, arg2, arg3, 0);
4573 #ifdef TARGET_NR_send
4574 case TARGET_NR_send:
4575 ret = do_sendto(arg1, arg2, arg3, arg4, 0, 0);
4578 #ifdef TARGET_NR_sendmsg
4579 case TARGET_NR_sendmsg:
4580 ret = do_sendrecvmsg(arg1, arg2, arg3, 1);
4583 #ifdef TARGET_NR_sendto
4584 case TARGET_NR_sendto:
4585 ret = do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
4588 #ifdef TARGET_NR_shutdown
4589 case TARGET_NR_shutdown:
4590 ret = get_errno(shutdown(arg1, arg2));
4593 #ifdef TARGET_NR_socket
4594 case TARGET_NR_socket:
4595 ret = do_socket(arg1, arg2, arg3);
4598 #ifdef TARGET_NR_socketpair
4599 case TARGET_NR_socketpair:
4600 ret = do_socketpair(arg1, arg2, arg3, arg4);
4603 #ifdef TARGET_NR_setsockopt
4604 case TARGET_NR_setsockopt:
4605 ret = do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
4609 case TARGET_NR_syslog:
4610 if (!(p = lock_user_string(arg2)))
4612 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
4613 unlock_user(p, arg2, 0);
4616 case TARGET_NR_setitimer:
4618 struct itimerval value, ovalue, *pvalue;
4622 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
4623 || copy_from_user_timeval(&pvalue->it_value,
4624 arg2 + sizeof(struct target_timeval)))
4629 ret = get_errno(setitimer(arg1, pvalue, &ovalue));
4630 if (!is_error(ret) && arg3) {
4631 if (copy_to_user_timeval(arg3,
4632 &ovalue.it_interval)
4633 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
4639 case TARGET_NR_getitimer:
4641 struct itimerval value;
4643 ret = get_errno(getitimer(arg1, &value));
4644 if (!is_error(ret) && arg2) {
4645 if (copy_to_user_timeval(arg2,
4647 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
4653 case TARGET_NR_stat:
4654 if (!(p = lock_user_string(arg1)))
4656 ret = get_errno(stat(path(p), &st));
4657 unlock_user(p, arg1, 0);
4659 case TARGET_NR_lstat:
4660 if (!(p = lock_user_string(arg1)))
4662 ret = get_errno(lstat(path(p), &st));
4663 unlock_user(p, arg1, 0);
4665 case TARGET_NR_fstat:
4667 ret = get_errno(fstat(arg1, &st));
4669 if (!is_error(ret)) {
4670 struct target_stat *target_st;
4672 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
4674 __put_user(st.st_dev, &target_st->st_dev);
4675 __put_user(st.st_ino, &target_st->st_ino);
4676 __put_user(st.st_mode, &target_st->st_mode);
4677 __put_user(st.st_uid, &target_st->st_uid);
4678 __put_user(st.st_gid, &target_st->st_gid);
4679 __put_user(st.st_nlink, &target_st->st_nlink);
4680 __put_user(st.st_rdev, &target_st->st_rdev);
4681 __put_user(st.st_size, &target_st->st_size);
4682 __put_user(st.st_blksize, &target_st->st_blksize);
4683 __put_user(st.st_blocks, &target_st->st_blocks);
4684 __put_user(st.st_atime, &target_st->target_st_atime);
4685 __put_user(st.st_mtime, &target_st->target_st_mtime);
4686 __put_user(st.st_ctime, &target_st->target_st_ctime);
4687 unlock_user_struct(target_st, arg2, 1);
4691 #ifdef TARGET_NR_olduname
4692 case TARGET_NR_olduname:
4695 #ifdef TARGET_NR_iopl
4696 case TARGET_NR_iopl:
4699 case TARGET_NR_vhangup:
4700 ret = get_errno(vhangup());
4702 #ifdef TARGET_NR_idle
4703 case TARGET_NR_idle:
4706 #ifdef TARGET_NR_syscall
4707 case TARGET_NR_syscall:
4708 ret = do_syscall(cpu_env,arg1 & 0xffff,arg2,arg3,arg4,arg5,arg6,0);
4711 case TARGET_NR_wait4:
4714 abi_long status_ptr = arg2;
4715 struct rusage rusage, *rusage_ptr;
4716 abi_ulong target_rusage = arg4;
4718 rusage_ptr = &rusage;
4721 ret = get_errno(wait4(arg1, &status, arg3, rusage_ptr));
4722 if (!is_error(ret)) {
4724 if (put_user_s32(status, status_ptr))
4728 host_to_target_rusage(target_rusage, &rusage);
4732 #ifdef TARGET_NR_swapoff
4733 case TARGET_NR_swapoff:
4734 if (!(p = lock_user_string(arg1)))
4736 ret = get_errno(swapoff(p));
4737 unlock_user(p, arg1, 0);
4740 case TARGET_NR_sysinfo:
4742 struct target_sysinfo *target_value;
4743 struct sysinfo value;
4744 ret = get_errno(sysinfo(&value));
4745 if (!is_error(ret) && arg1)
4747 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
4749 __put_user(value.uptime, &target_value->uptime);
4750 __put_user(value.loads[0], &target_value->loads[0]);
4751 __put_user(value.loads[1], &target_value->loads[1]);
4752 __put_user(value.loads[2], &target_value->loads[2]);
4753 __put_user(value.totalram, &target_value->totalram);
4754 __put_user(value.freeram, &target_value->freeram);
4755 __put_user(value.sharedram, &target_value->sharedram);
4756 __put_user(value.bufferram, &target_value->bufferram);
4757 __put_user(value.totalswap, &target_value->totalswap);
4758 __put_user(value.freeswap, &target_value->freeswap);
4759 __put_user(value.procs, &target_value->procs);
4760 __put_user(value.totalhigh, &target_value->totalhigh);
4761 __put_user(value.freehigh, &target_value->freehigh);
4762 __put_user(value.mem_unit, &target_value->mem_unit);
4763 unlock_user_struct(target_value, arg1, 1);
4767 #ifdef TARGET_NR_ipc
4769 ret = do_ipc(arg1, arg2, arg3, arg4, arg5, arg6);
4772 case TARGET_NR_fsync:
4773 ret = get_errno(fsync(arg1));
4775 case TARGET_NR_clone:
4776 #if defined(TARGET_SH4)
4777 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
4779 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
4782 #ifdef __NR_exit_group
4783 /* new thread calls */
4784 case TARGET_NR_exit_group:
4785 gdb_exit(cpu_env, arg1);
4786 ret = get_errno(exit_group(arg1));
4789 case TARGET_NR_setdomainname:
4790 if (!(p = lock_user_string(arg1)))
4792 ret = get_errno(setdomainname(p, arg2));
4793 unlock_user(p, arg1, 0);
4795 case TARGET_NR_uname:
4796 /* no need to transcode because we use the linux syscall */
4798 struct new_utsname * buf;
4800 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
4802 ret = get_errno(sys_uname(buf));
4803 if (!is_error(ret)) {
4804 /* Overrite the native machine name with whatever is being
4806 strcpy (buf->machine, UNAME_MACHINE);
4807 /* Allow the user to override the reported release. */
4808 if (qemu_uname_release && *qemu_uname_release)
4809 strcpy (buf->release, qemu_uname_release);
4811 unlock_user_struct(buf, arg1, 1);
4815 case TARGET_NR_modify_ldt:
4816 ret = do_modify_ldt(cpu_env, arg1, arg2, arg3);
4818 #if !defined(TARGET_X86_64)
4819 case TARGET_NR_vm86old:
4821 case TARGET_NR_vm86:
4822 ret = do_vm86(cpu_env, arg1, arg2);
4826 case TARGET_NR_adjtimex:
4828 #ifdef TARGET_NR_create_module
4829 case TARGET_NR_create_module:
4831 case TARGET_NR_init_module:
4832 case TARGET_NR_delete_module:
4833 #ifdef TARGET_NR_get_kernel_syms
4834 case TARGET_NR_get_kernel_syms:
4837 case TARGET_NR_quotactl:
4839 case TARGET_NR_getpgid:
4840 ret = get_errno(getpgid(arg1));
4842 case TARGET_NR_fchdir:
4843 ret = get_errno(fchdir(arg1));
4845 #ifdef TARGET_NR_bdflush /* not on x86_64 */
4846 case TARGET_NR_bdflush:
4849 #ifdef TARGET_NR_sysfs
4850 case TARGET_NR_sysfs:
4853 case TARGET_NR_personality:
4854 ret = get_errno(personality(arg1));
4856 #ifdef TARGET_NR_afs_syscall
4857 case TARGET_NR_afs_syscall:
4860 #ifdef TARGET_NR__llseek /* Not on alpha */
4861 case TARGET_NR__llseek:
4863 #if defined (__x86_64__)
4864 ret = get_errno(lseek(arg1, ((uint64_t )arg2 << 32) | arg3, arg5));
4865 if (put_user_s64(ret, arg4))
4869 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
4870 if (put_user_s64(res, arg4))
4876 case TARGET_NR_getdents:
4877 #if TARGET_ABI_BITS != 32
4879 #elif TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
4881 struct target_dirent *target_dirp;
4882 struct dirent *dirp;
4883 abi_long count = arg3;
4885 dirp = malloc(count);
4887 ret = -TARGET_ENOMEM;
4891 ret = get_errno(sys_getdents(arg1, dirp, count));
4892 if (!is_error(ret)) {
4894 struct target_dirent *tde;
4896 int reclen, treclen;
4897 int count1, tnamelen;
4901 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
4905 reclen = de->d_reclen;
4906 treclen = reclen - (2 * (sizeof(long) - sizeof(abi_long)));
4907 tde->d_reclen = tswap16(treclen);
4908 tde->d_ino = tswapl(de->d_ino);
4909 tde->d_off = tswapl(de->d_off);
4910 tnamelen = treclen - (2 * sizeof(abi_long) + 2);
4913 /* XXX: may not be correct */
4914 strncpy(tde->d_name, de->d_name, tnamelen);
4915 de = (struct dirent *)((char *)de + reclen);
4917 tde = (struct target_dirent *)((char *)tde + treclen);
4921 unlock_user(target_dirp, arg2, ret);
4927 struct dirent *dirp;
4928 abi_long count = arg3;
4930 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
4932 ret = get_errno(sys_getdents(arg1, dirp, count));
4933 if (!is_error(ret)) {
4939 reclen = de->d_reclen;
4942 de->d_reclen = tswap16(reclen);
4943 tswapls(&de->d_ino);
4944 tswapls(&de->d_off);
4945 de = (struct dirent *)((char *)de + reclen);
4949 unlock_user(dirp, arg2, ret);
4953 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
4954 case TARGET_NR_getdents64:
4956 struct dirent64 *dirp;
4957 abi_long count = arg3;
4958 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
4960 ret = get_errno(sys_getdents64(arg1, dirp, count));
4961 if (!is_error(ret)) {
4962 struct dirent64 *de;
4967 reclen = de->d_reclen;
4970 de->d_reclen = tswap16(reclen);
4971 tswap64s((uint64_t *)&de->d_ino);
4972 tswap64s((uint64_t *)&de->d_off);
4973 de = (struct dirent64 *)((char *)de + reclen);
4977 unlock_user(dirp, arg2, ret);
4980 #endif /* TARGET_NR_getdents64 */
4981 #ifdef TARGET_NR__newselect
4982 case TARGET_NR__newselect:
4983 ret = do_select(arg1, arg2, arg3, arg4, arg5);
4986 #ifdef TARGET_NR_poll
4987 case TARGET_NR_poll:
4989 struct target_pollfd *target_pfd;
4990 unsigned int nfds = arg2;
4995 target_pfd = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_pollfd) * nfds, 1);
4998 pfd = alloca(sizeof(struct pollfd) * nfds);
4999 for(i = 0; i < nfds; i++) {
5000 pfd[i].fd = tswap32(target_pfd[i].fd);
5001 pfd[i].events = tswap16(target_pfd[i].events);
5003 ret = get_errno(poll(pfd, nfds, timeout));
5004 if (!is_error(ret)) {
5005 for(i = 0; i < nfds; i++) {
5006 target_pfd[i].revents = tswap16(pfd[i].revents);
5008 ret += nfds * (sizeof(struct target_pollfd)
5009 - sizeof(struct pollfd));
5011 unlock_user(target_pfd, arg1, ret);
5015 case TARGET_NR_flock:
5016 /* NOTE: the flock constant seems to be the same for every
5018 ret = get_errno(flock(arg1, arg2));
5020 case TARGET_NR_readv:
5025 vec = alloca(count * sizeof(struct iovec));
5026 if (lock_iovec(VERIFY_WRITE, vec, arg2, count, 0) < 0)
5028 ret = get_errno(readv(arg1, vec, count));
5029 unlock_iovec(vec, arg2, count, 1);
5032 case TARGET_NR_writev:
5037 vec = alloca(count * sizeof(struct iovec));
5038 if (lock_iovec(VERIFY_READ, vec, arg2, count, 1) < 0)
5040 ret = get_errno(writev(arg1, vec, count));
5041 unlock_iovec(vec, arg2, count, 0);
5044 case TARGET_NR_getsid:
5045 ret = get_errno(getsid(arg1));
5047 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
5048 case TARGET_NR_fdatasync:
5049 ret = get_errno(fdatasync(arg1));
5052 case TARGET_NR__sysctl:
5053 /* We don't implement this, but ENOTDIR is always a safe
5055 ret = -TARGET_ENOTDIR;
5057 case TARGET_NR_sched_setparam:
5059 struct sched_param *target_schp;
5060 struct sched_param schp;
5062 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
5064 schp.sched_priority = tswap32(target_schp->sched_priority);
5065 unlock_user_struct(target_schp, arg2, 0);
5066 ret = get_errno(sched_setparam(arg1, &schp));
5069 case TARGET_NR_sched_getparam:
5071 struct sched_param *target_schp;
5072 struct sched_param schp;
5073 ret = get_errno(sched_getparam(arg1, &schp));
5074 if (!is_error(ret)) {
5075 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
5077 target_schp->sched_priority = tswap32(schp.sched_priority);
5078 unlock_user_struct(target_schp, arg2, 1);
5082 case TARGET_NR_sched_setscheduler:
5084 struct sched_param *target_schp;
5085 struct sched_param schp;
5086 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
5088 schp.sched_priority = tswap32(target_schp->sched_priority);
5089 unlock_user_struct(target_schp, arg3, 0);
5090 ret = get_errno(sched_setscheduler(arg1, arg2, &schp));
5093 case TARGET_NR_sched_getscheduler:
5094 ret = get_errno(sched_getscheduler(arg1));
5096 case TARGET_NR_sched_yield:
5097 ret = get_errno(sched_yield());
5099 case TARGET_NR_sched_get_priority_max:
5100 ret = get_errno(sched_get_priority_max(arg1));
5102 case TARGET_NR_sched_get_priority_min:
5103 ret = get_errno(sched_get_priority_min(arg1));
5105 case TARGET_NR_sched_rr_get_interval:
5108 ret = get_errno(sched_rr_get_interval(arg1, &ts));
5109 if (!is_error(ret)) {
5110 host_to_target_timespec(arg2, &ts);
5114 case TARGET_NR_nanosleep:
5116 struct timespec req, rem;
5117 target_to_host_timespec(&req, arg1);
5118 ret = get_errno(nanosleep(&req, &rem));
5119 if (is_error(ret) && arg2) {
5120 host_to_target_timespec(arg2, &rem);
5124 #ifdef TARGET_NR_query_module
5125 case TARGET_NR_query_module:
5128 #ifdef TARGET_NR_nfsservctl
5129 case TARGET_NR_nfsservctl:
5132 case TARGET_NR_prctl:
5135 case PR_GET_PDEATHSIG:
5138 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
5139 if (!is_error(ret) && arg2
5140 && put_user_ual(deathsig, arg2))
5145 ret = get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
5149 #ifdef TARGET_NR_arch_prctl
5150 case TARGET_NR_arch_prctl:
5151 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
5152 ret = do_arch_prctl(cpu_env, arg1, arg2);
5158 #ifdef TARGET_NR_pread
5159 case TARGET_NR_pread:
5161 if (((CPUARMState *)cpu_env)->eabi)
5164 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
5166 ret = get_errno(pread(arg1, p, arg3, arg4));
5167 unlock_user(p, arg2, ret);
5169 case TARGET_NR_pwrite:
5171 if (((CPUARMState *)cpu_env)->eabi)
5174 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
5176 ret = get_errno(pwrite(arg1, p, arg3, arg4));
5177 unlock_user(p, arg2, 0);
5180 #ifdef TARGET_NR_pread64
5181 case TARGET_NR_pread64:
5182 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
5184 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
5185 unlock_user(p, arg2, ret);
5187 case TARGET_NR_pwrite64:
5188 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
5190 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
5191 unlock_user(p, arg2, 0);
5194 case TARGET_NR_getcwd:
5195 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
5197 ret = get_errno(sys_getcwd1(p, arg2));
5198 unlock_user(p, arg1, ret);
5200 case TARGET_NR_capget:
5202 case TARGET_NR_capset:
5204 case TARGET_NR_sigaltstack:
5205 #if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_MIPS) || \
5206 defined(TARGET_SPARC) || defined(TARGET_PPC) || defined(TARGET_ALPHA)
5207 ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUState *)cpu_env));
5212 case TARGET_NR_sendfile:
5214 #ifdef TARGET_NR_getpmsg
5215 case TARGET_NR_getpmsg:
5218 #ifdef TARGET_NR_putpmsg
5219 case TARGET_NR_putpmsg:
5222 #ifdef TARGET_NR_vfork
5223 case TARGET_NR_vfork:
5224 ret = get_errno(do_fork(cpu_env, CLONE_VFORK | CLONE_VM | SIGCHLD,
5228 #ifdef TARGET_NR_ugetrlimit
5229 case TARGET_NR_ugetrlimit:
5232 ret = get_errno(getrlimit(arg1, &rlim));
5233 if (!is_error(ret)) {
5234 struct target_rlimit *target_rlim;
5235 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
5237 target_rlim->rlim_cur = tswapl(rlim.rlim_cur);
5238 target_rlim->rlim_max = tswapl(rlim.rlim_max);
5239 unlock_user_struct(target_rlim, arg2, 1);
5244 #ifdef TARGET_NR_truncate64
5245 case TARGET_NR_truncate64:
5246 if (!(p = lock_user_string(arg1)))
5248 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
5249 unlock_user(p, arg1, 0);
5252 #ifdef TARGET_NR_ftruncate64
5253 case TARGET_NR_ftruncate64:
5254 ret = target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
5257 #ifdef TARGET_NR_stat64
5258 case TARGET_NR_stat64:
5259 if (!(p = lock_user_string(arg1)))
5261 ret = get_errno(stat(path(p), &st));
5262 unlock_user(p, arg1, 0);
5264 ret = host_to_target_stat64(cpu_env, arg2, &st);
5267 #ifdef TARGET_NR_lstat64
5268 case TARGET_NR_lstat64:
5269 if (!(p = lock_user_string(arg1)))
5271 ret = get_errno(lstat(path(p), &st));
5272 unlock_user(p, arg1, 0);
5274 ret = host_to_target_stat64(cpu_env, arg2, &st);
5277 #ifdef TARGET_NR_fstat64
5278 case TARGET_NR_fstat64:
5279 ret = get_errno(fstat(arg1, &st));
5281 ret = host_to_target_stat64(cpu_env, arg2, &st);
5284 #if defined(TARGET_NR_fstatat64) && defined(__NR_fstatat64)
5285 case TARGET_NR_fstatat64:
5286 if (!(p = lock_user_string(arg2)))
5288 ret = get_errno(sys_fstatat64(arg1, path(p), &st, arg4));
5290 ret = host_to_target_stat64(cpu_env, arg3, &st);
5294 case TARGET_NR_lchown:
5295 if (!(p = lock_user_string(arg1)))
5297 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
5298 unlock_user(p, arg1, 0);
5300 case TARGET_NR_getuid:
5301 ret = get_errno(high2lowuid(getuid()));
5303 case TARGET_NR_getgid:
5304 ret = get_errno(high2lowgid(getgid()));
5306 case TARGET_NR_geteuid:
5307 ret = get_errno(high2lowuid(geteuid()));
5309 case TARGET_NR_getegid:
5310 ret = get_errno(high2lowgid(getegid()));
5312 case TARGET_NR_setreuid:
5313 ret = get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
5315 case TARGET_NR_setregid:
5316 ret = get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
5318 case TARGET_NR_getgroups:
5320 int gidsetsize = arg1;
5321 uint16_t *target_grouplist;
5325 grouplist = alloca(gidsetsize * sizeof(gid_t));
5326 ret = get_errno(getgroups(gidsetsize, grouplist));
5327 if (gidsetsize == 0)
5329 if (!is_error(ret)) {
5330 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 2, 0);
5331 if (!target_grouplist)
5333 for(i = 0;i < ret; i++)
5334 target_grouplist[i] = tswap16(grouplist[i]);
5335 unlock_user(target_grouplist, arg2, gidsetsize * 2);
5339 case TARGET_NR_setgroups:
5341 int gidsetsize = arg1;
5342 uint16_t *target_grouplist;
5346 grouplist = alloca(gidsetsize * sizeof(gid_t));
5347 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 2, 1);
5348 if (!target_grouplist) {
5349 ret = -TARGET_EFAULT;
5352 for(i = 0;i < gidsetsize; i++)
5353 grouplist[i] = tswap16(target_grouplist[i]);
5354 unlock_user(target_grouplist, arg2, 0);
5355 ret = get_errno(setgroups(gidsetsize, grouplist));
5358 case TARGET_NR_fchown:
5359 ret = get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
5361 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat)
5362 case TARGET_NR_fchownat:
5363 if (!(p = lock_user_string(arg2)))
5365 ret = get_errno(sys_fchownat(arg1, p, low2highuid(arg3), low2highgid(arg4), arg5));
5366 unlock_user(p, arg2, 0);
5369 #ifdef TARGET_NR_setresuid
5370 case TARGET_NR_setresuid:
5371 ret = get_errno(setresuid(low2highuid(arg1),
5373 low2highuid(arg3)));
5376 #ifdef TARGET_NR_getresuid
5377 case TARGET_NR_getresuid:
5379 uid_t ruid, euid, suid;
5380 ret = get_errno(getresuid(&ruid, &euid, &suid));
5381 if (!is_error(ret)) {
5382 if (put_user_u16(high2lowuid(ruid), arg1)
5383 || put_user_u16(high2lowuid(euid), arg2)
5384 || put_user_u16(high2lowuid(suid), arg3))
5390 #ifdef TARGET_NR_getresgid
5391 case TARGET_NR_setresgid:
5392 ret = get_errno(setresgid(low2highgid(arg1),
5394 low2highgid(arg3)));
5397 #ifdef TARGET_NR_getresgid
5398 case TARGET_NR_getresgid:
5400 gid_t rgid, egid, sgid;
5401 ret = get_errno(getresgid(&rgid, &egid, &sgid));
5402 if (!is_error(ret)) {
5403 if (put_user_u16(high2lowgid(rgid), arg1)
5404 || put_user_u16(high2lowgid(egid), arg2)
5405 || put_user_u16(high2lowgid(sgid), arg3))
5411 case TARGET_NR_chown:
5412 if (!(p = lock_user_string(arg1)))
5414 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
5415 unlock_user(p, arg1, 0);
5417 case TARGET_NR_setuid:
5418 ret = get_errno(setuid(low2highuid(arg1)));
5420 case TARGET_NR_setgid:
5421 ret = get_errno(setgid(low2highgid(arg1)));
5423 case TARGET_NR_setfsuid:
5424 ret = get_errno(setfsuid(arg1));
5426 case TARGET_NR_setfsgid:
5427 ret = get_errno(setfsgid(arg1));
5429 #endif /* USE_UID16 */
5431 #ifdef TARGET_NR_lchown32
5432 case TARGET_NR_lchown32:
5433 if (!(p = lock_user_string(arg1)))
5435 ret = get_errno(lchown(p, arg2, arg3));
5436 unlock_user(p, arg1, 0);
5439 #ifdef TARGET_NR_getuid32
5440 case TARGET_NR_getuid32:
5441 ret = get_errno(getuid());
5444 #ifdef TARGET_NR_getgid32
5445 case TARGET_NR_getgid32:
5446 ret = get_errno(getgid());
5449 #ifdef TARGET_NR_geteuid32
5450 case TARGET_NR_geteuid32:
5451 ret = get_errno(geteuid());
5454 #ifdef TARGET_NR_getegid32
5455 case TARGET_NR_getegid32:
5456 ret = get_errno(getegid());
5459 #ifdef TARGET_NR_setreuid32
5460 case TARGET_NR_setreuid32:
5461 ret = get_errno(setreuid(arg1, arg2));
5464 #ifdef TARGET_NR_setregid32
5465 case TARGET_NR_setregid32:
5466 ret = get_errno(setregid(arg1, arg2));
5469 #ifdef TARGET_NR_getgroups32
5470 case TARGET_NR_getgroups32:
5472 int gidsetsize = arg1;
5473 uint32_t *target_grouplist;
5477 grouplist = alloca(gidsetsize * sizeof(gid_t));
5478 ret = get_errno(getgroups(gidsetsize, grouplist));
5479 if (gidsetsize == 0)
5481 if (!is_error(ret)) {
5482 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
5483 if (!target_grouplist) {
5484 ret = -TARGET_EFAULT;
5487 for(i = 0;i < ret; i++)
5488 target_grouplist[i] = tswap32(grouplist[i]);
5489 unlock_user(target_grouplist, arg2, gidsetsize * 4);
5494 #ifdef TARGET_NR_setgroups32
5495 case TARGET_NR_setgroups32:
5497 int gidsetsize = arg1;
5498 uint32_t *target_grouplist;
5502 grouplist = alloca(gidsetsize * sizeof(gid_t));
5503 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
5504 if (!target_grouplist) {
5505 ret = -TARGET_EFAULT;
5508 for(i = 0;i < gidsetsize; i++)
5509 grouplist[i] = tswap32(target_grouplist[i]);
5510 unlock_user(target_grouplist, arg2, 0);
5511 ret = get_errno(setgroups(gidsetsize, grouplist));
5515 #ifdef TARGET_NR_fchown32
5516 case TARGET_NR_fchown32:
5517 ret = get_errno(fchown(arg1, arg2, arg3));
5520 #ifdef TARGET_NR_setresuid32
5521 case TARGET_NR_setresuid32:
5522 ret = get_errno(setresuid(arg1, arg2, arg3));
5525 #ifdef TARGET_NR_getresuid32
5526 case TARGET_NR_getresuid32:
5528 uid_t ruid, euid, suid;
5529 ret = get_errno(getresuid(&ruid, &euid, &suid));
5530 if (!is_error(ret)) {
5531 if (put_user_u32(ruid, arg1)
5532 || put_user_u32(euid, arg2)
5533 || put_user_u32(suid, arg3))
5539 #ifdef TARGET_NR_setresgid32
5540 case TARGET_NR_setresgid32:
5541 ret = get_errno(setresgid(arg1, arg2, arg3));
5544 #ifdef TARGET_NR_getresgid32
5545 case TARGET_NR_getresgid32:
5547 gid_t rgid, egid, sgid;
5548 ret = get_errno(getresgid(&rgid, &egid, &sgid));
5549 if (!is_error(ret)) {
5550 if (put_user_u32(rgid, arg1)
5551 || put_user_u32(egid, arg2)
5552 || put_user_u32(sgid, arg3))
5558 #ifdef TARGET_NR_chown32
5559 case TARGET_NR_chown32:
5560 if (!(p = lock_user_string(arg1)))
5562 ret = get_errno(chown(p, arg2, arg3));
5563 unlock_user(p, arg1, 0);
5566 #ifdef TARGET_NR_setuid32
5567 case TARGET_NR_setuid32:
5568 ret = get_errno(setuid(arg1));
5571 #ifdef TARGET_NR_setgid32
5572 case TARGET_NR_setgid32:
5573 ret = get_errno(setgid(arg1));
5576 #ifdef TARGET_NR_setfsuid32
5577 case TARGET_NR_setfsuid32:
5578 ret = get_errno(setfsuid(arg1));
5581 #ifdef TARGET_NR_setfsgid32
5582 case TARGET_NR_setfsgid32:
5583 ret = get_errno(setfsgid(arg1));
5587 case TARGET_NR_pivot_root:
5589 #ifdef TARGET_NR_mincore
5590 case TARGET_NR_mincore:
5593 ret = -TARGET_EFAULT;
5594 if (!(a = lock_user(VERIFY_READ, arg1,arg2, 0)))
5596 if (!(p = lock_user_string(arg3)))
5598 ret = get_errno(mincore(a, arg2, p));
5599 unlock_user(p, arg3, ret);
5601 unlock_user(a, arg1, 0);
5605 #ifdef TARGET_NR_arm_fadvise64_64
5606 case TARGET_NR_arm_fadvise64_64:
5609 * arm_fadvise64_64 looks like fadvise64_64 but
5610 * with different argument order
5618 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_arm_fadvise64_64)
5619 #ifdef TARGET_NR_fadvise64_64
5620 case TARGET_NR_fadvise64_64:
5622 /* This is a hint, so ignoring and returning success is ok. */
5626 #ifdef TARGET_NR_madvise
5627 case TARGET_NR_madvise:
5628 /* A straight passthrough may not be safe because qemu sometimes
5629 turns private flie-backed mappings into anonymous mappings.
5630 This will break MADV_DONTNEED.
5631 This is a hint, so ignoring and returning success is ok. */
5635 #if TARGET_ABI_BITS == 32
5636 case TARGET_NR_fcntl64:
5640 struct target_flock64 *target_fl;
5642 struct target_eabi_flock64 *target_efl;
5646 case TARGET_F_GETLK64:
5649 case TARGET_F_SETLK64:
5652 case TARGET_F_SETLKW64:
5661 case TARGET_F_GETLK64:
5663 if (((CPUARMState *)cpu_env)->eabi) {
5664 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
5666 fl.l_type = tswap16(target_efl->l_type);
5667 fl.l_whence = tswap16(target_efl->l_whence);
5668 fl.l_start = tswap64(target_efl->l_start);
5669 fl.l_len = tswap64(target_efl->l_len);
5670 fl.l_pid = tswapl(target_efl->l_pid);
5671 unlock_user_struct(target_efl, arg3, 0);
5675 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
5677 fl.l_type = tswap16(target_fl->l_type);
5678 fl.l_whence = tswap16(target_fl->l_whence);
5679 fl.l_start = tswap64(target_fl->l_start);
5680 fl.l_len = tswap64(target_fl->l_len);
5681 fl.l_pid = tswapl(target_fl->l_pid);
5682 unlock_user_struct(target_fl, arg3, 0);
5684 ret = get_errno(fcntl(arg1, cmd, &fl));
5687 if (((CPUARMState *)cpu_env)->eabi) {
5688 if (!lock_user_struct(VERIFY_WRITE, target_efl, arg3, 0))
5690 target_efl->l_type = tswap16(fl.l_type);
5691 target_efl->l_whence = tswap16(fl.l_whence);
5692 target_efl->l_start = tswap64(fl.l_start);
5693 target_efl->l_len = tswap64(fl.l_len);
5694 target_efl->l_pid = tswapl(fl.l_pid);
5695 unlock_user_struct(target_efl, arg3, 1);
5699 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg3, 0))
5701 target_fl->l_type = tswap16(fl.l_type);
5702 target_fl->l_whence = tswap16(fl.l_whence);
5703 target_fl->l_start = tswap64(fl.l_start);
5704 target_fl->l_len = tswap64(fl.l_len);
5705 target_fl->l_pid = tswapl(fl.l_pid);
5706 unlock_user_struct(target_fl, arg3, 1);
5711 case TARGET_F_SETLK64:
5712 case TARGET_F_SETLKW64:
5714 if (((CPUARMState *)cpu_env)->eabi) {
5715 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
5717 fl.l_type = tswap16(target_efl->l_type);
5718 fl.l_whence = tswap16(target_efl->l_whence);
5719 fl.l_start = tswap64(target_efl->l_start);
5720 fl.l_len = tswap64(target_efl->l_len);
5721 fl.l_pid = tswapl(target_efl->l_pid);
5722 unlock_user_struct(target_efl, arg3, 0);
5726 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
5728 fl.l_type = tswap16(target_fl->l_type);
5729 fl.l_whence = tswap16(target_fl->l_whence);
5730 fl.l_start = tswap64(target_fl->l_start);
5731 fl.l_len = tswap64(target_fl->l_len);
5732 fl.l_pid = tswapl(target_fl->l_pid);
5733 unlock_user_struct(target_fl, arg3, 0);
5735 ret = get_errno(fcntl(arg1, cmd, &fl));
5738 ret = do_fcntl(arg1, cmd, arg3);
5744 #ifdef TARGET_NR_cacheflush
5745 case TARGET_NR_cacheflush:
5746 /* self-modifying code is handled automatically, so nothing needed */
5750 #ifdef TARGET_NR_security
5751 case TARGET_NR_security:
5754 #ifdef TARGET_NR_getpagesize
5755 case TARGET_NR_getpagesize:
5756 ret = TARGET_PAGE_SIZE;
5759 case TARGET_NR_gettid:
5760 ret = get_errno(gettid());
5762 #ifdef TARGET_NR_readahead
5763 case TARGET_NR_readahead:
5766 #ifdef TARGET_NR_setxattr
5767 case TARGET_NR_setxattr:
5768 case TARGET_NR_lsetxattr:
5769 case TARGET_NR_fsetxattr:
5770 case TARGET_NR_getxattr:
5771 case TARGET_NR_lgetxattr:
5772 case TARGET_NR_fgetxattr:
5773 case TARGET_NR_listxattr:
5774 case TARGET_NR_llistxattr:
5775 case TARGET_NR_flistxattr:
5776 case TARGET_NR_removexattr:
5777 case TARGET_NR_lremovexattr:
5778 case TARGET_NR_fremovexattr:
5779 goto unimplemented_nowarn;
5781 #ifdef TARGET_NR_set_thread_area
5782 case TARGET_NR_set_thread_area:
5783 #if defined(TARGET_MIPS)
5784 ((CPUMIPSState *) cpu_env)->tls_value = arg1;
5787 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
5788 ret = do_set_thread_area(cpu_env, arg1);
5791 goto unimplemented_nowarn;
5794 #ifdef TARGET_NR_get_thread_area
5795 case TARGET_NR_get_thread_area:
5796 #if defined(TARGET_I386) && defined(TARGET_ABI32)
5797 ret = do_get_thread_area(cpu_env, arg1);
5799 goto unimplemented_nowarn;
5802 #ifdef TARGET_NR_getdomainname
5803 case TARGET_NR_getdomainname:
5804 goto unimplemented_nowarn;
5807 #ifdef TARGET_NR_clock_gettime
5808 case TARGET_NR_clock_gettime:
5811 ret = get_errno(clock_gettime(arg1, &ts));
5812 if (!is_error(ret)) {
5813 host_to_target_timespec(arg2, &ts);
5818 #ifdef TARGET_NR_clock_getres
5819 case TARGET_NR_clock_getres:
5822 ret = get_errno(clock_getres(arg1, &ts));
5823 if (!is_error(ret)) {
5824 host_to_target_timespec(arg2, &ts);
5829 #ifdef TARGET_NR_clock_nanosleep
5830 case TARGET_NR_clock_nanosleep:
5833 target_to_host_timespec(&ts, arg3);
5834 ret = get_errno(clock_nanosleep(arg1, arg2, &ts, arg4 ? &ts : NULL));
5836 host_to_target_timespec(arg4, &ts);
5841 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
5842 case TARGET_NR_set_tid_address:
5843 ret = get_errno(set_tid_address((int *)g2h(arg1)));
5847 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
5848 case TARGET_NR_tkill:
5849 ret = get_errno(sys_tkill((int)arg1, target_to_host_signal(arg2)));
5853 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
5854 case TARGET_NR_tgkill:
5855 ret = get_errno(sys_tgkill((int)arg1, (int)arg2,
5856 target_to_host_signal(arg3)));
5860 #ifdef TARGET_NR_set_robust_list
5861 case TARGET_NR_set_robust_list:
5862 goto unimplemented_nowarn;
5865 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat)
5866 case TARGET_NR_utimensat:
5868 struct timespec ts[2];
5869 target_to_host_timespec(ts, arg3);
5870 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
5872 ret = get_errno(sys_utimensat(arg1, NULL, ts, arg4));
5874 if (!(p = lock_user_string(arg2))) {
5875 ret = -TARGET_EFAULT;
5878 ret = get_errno(sys_utimensat(arg1, path(p), ts, arg4));
5879 unlock_user(p, arg2, 0);
5884 #if defined(USE_NPTL)
5885 case TARGET_NR_futex:
5886 ret = do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
5889 #ifdef TARGET_NR_inotify_init
5890 case TARGET_NR_inotify_init:
5891 ret = get_errno(sys_inotify_init());
5894 #ifdef TARGET_NR_inotify_add_watch
5895 case TARGET_NR_inotify_add_watch:
5896 p = lock_user_string(arg2);
5897 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
5898 unlock_user(p, arg2, 0);
5901 #ifdef TARGET_NR_inotify_rm_watch
5902 case TARGET_NR_inotify_rm_watch:
5903 ret = get_errno(sys_inotify_rm_watch(arg1, arg2));
5909 gemu_log("qemu: Unsupported syscall: %d\n", num);
5910 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
5911 unimplemented_nowarn:
5913 ret = -TARGET_ENOSYS;
5918 gemu_log(" = %ld\n", ret);
5921 print_syscall_ret(num, ret);
5924 ret = -TARGET_EFAULT;