//#include <sys/user.h>
#include <netinet/ip.h>
#include <netinet/tcp.h>
-#include <qemu-common.h>
+#include <linux/wireless.h>
+#include "qemu-common.h"
#ifdef TARGET_GPROF
#include <sys/gmon.h>
#endif
#ifdef CONFIG_EVENTFD
#include <sys/eventfd.h>
#endif
+#ifdef CONFIG_EPOLL
+#include <sys/epoll.h>
+#endif
+#ifdef CONFIG_ATTR
+#include <attr/xattr.h>
+#endif
#define termios host_termios
#define winsize host_winsize
#include <linux/kd.h>
#include <linux/mtio.h>
#include <linux/fs.h>
+#if defined(CONFIG_FIEMAP)
+#include <linux/fiemap.h>
+#endif
#include <linux/fb.h>
#include <linux/vt.h>
#include "linux_loop.h"
#include "cpu-uname.h"
#include "qemu.h"
-#include "qemu-common.h"
#if defined(CONFIG_USE_NPTL)
#define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
#define __NR_sys_inotify_add_watch __NR_inotify_add_watch
#define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
-#if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__)
+#if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \
+ defined(__s390x__)
#define __NR__llseek __NR_lseek
#endif
_syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
#endif
_syscall2(int, sys_getpriority, int, which, int, who);
-#if defined(TARGET_NR__llseek) && !defined (__x86_64__)
+#if defined(TARGET_NR__llseek) && defined(__NR_llseek)
_syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
loff_t *, res, uint, wh);
#endif
const struct timespec *,timeout,int *,uaddr2,int,val3)
#endif
#endif
+#define __NR_sys_sched_getaffinity __NR_sched_getaffinity
+_syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
+ unsigned long *, user_mask_ptr);
+#define __NR_sys_sched_setaffinity __NR_sched_setaffinity
+_syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
+ unsigned long *, user_mask_ptr);
static bitmask_transtbl fcntl_flags_tbl[] = {
{ TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
* struct linux kernel uses).
*/
- bzero(buf, sizeof (*buf));
+ memset(buf, 0, sizeof(*buf));
COPY_UTSNAME_FIELD(buf->sysname, uts_buf.sysname);
COPY_UTSNAME_FIELD(buf->nodename, uts_buf.nodename);
COPY_UTSNAME_FIELD(buf->release, uts_buf.release);
return (fchmodat(dirfd, pathname, mode, 0));
}
#endif
-#if defined(TARGET_NR_fchownat) && defined(USE_UID16)
+#if defined(TARGET_NR_fchownat)
static int sys_fchownat(int dirfd, const char *pathname, uid_t owner,
gid_t group, int flags)
{
#if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat)
_syscall3(int,sys_fchmodat,int,dirfd,const char *,pathname, mode_t,mode)
#endif
-#if defined(TARGET_NR_fchownat) && defined(__NR_fchownat) && defined(USE_UID16)
+#if defined(TARGET_NR_fchownat) && defined(__NR_fchownat)
_syscall5(int,sys_fchownat,int,dirfd,const char *,pathname,
uid_t,owner,gid_t,group,int,flags)
#endif
#undef TARGET_NR_inotify_rm_watch
#endif /* CONFIG_INOTIFY */
+#if defined(TARGET_NR_ppoll)
+#ifndef __NR_ppoll
+# define __NR_ppoll -1
+#endif
+#define __NR_sys_ppoll __NR_ppoll
+_syscall5(int, sys_ppoll, struct pollfd *, fds, nfds_t, nfds,
+ struct timespec *, timeout, const __sigset_t *, sigmask,
+ size_t, sigsetsize)
+#endif
+
+#if defined(TARGET_NR_pselect6)
+#ifndef __NR_pselect6
+# define __NR_pselect6 -1
+#endif
+#define __NR_sys_pselect6 __NR_pselect6
+_syscall6(int, sys_pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds,
+ fd_set *, exceptfds, struct timespec *, timeout, void *, sig);
+#endif
+
+#if defined(TARGET_NR_prlimit64)
+#ifndef __NR_prlimit64
+# define __NR_prlimit64 -1
+#endif
+#define __NR_sys_prlimit64 __NR_prlimit64
+/* The glibc rlimit structure may not be that used by the underlying syscall */
+struct host_rlimit64 {
+ uint64_t rlim_cur;
+ uint64_t rlim_max;
+};
+_syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
+ const struct host_rlimit64 *, new_limit,
+ struct host_rlimit64 *, old_limit)
+#endif
extern int personality(int);
extern int flock(int, int);
extern int setfsgid(int);
extern int setgroups(int, gid_t *);
+/* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
+#ifdef TARGET_ARM
+static inline int regpairs_aligned(void *cpu_env) {
+ return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
+}
+#elif defined(TARGET_MIPS)
+static inline int regpairs_aligned(void *cpu_env) { return 1; }
+#else
+static inline int regpairs_aligned(void *cpu_env) { return 0; }
+#endif
+
#define ERRNO_TABLE_SIZE 1200
/* target_to_host_errno_table[] is initialized from
static abi_ulong target_brk;
static abi_ulong target_original_brk;
+static abi_ulong brk_page;
void target_set_brk(abi_ulong new_brk)
{
target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
+ brk_page = HOST_PAGE_ALIGN(target_brk);
}
+//#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
+#define DEBUGF_BRK(message, args...)
+
/* do_brk() must return target values and target errnos. */
abi_long do_brk(abi_ulong new_brk)
{
- abi_ulong brk_page;
abi_long mapped_addr;
int new_alloc_size;
- if (!new_brk)
+ DEBUGF_BRK("do_brk(%#010x) -> ", new_brk);
+
+ if (!new_brk) {
+ DEBUGF_BRK("%#010x (!new_brk)\n", target_brk);
return target_brk;
- if (new_brk < target_original_brk)
+ }
+ if (new_brk < target_original_brk) {
+ DEBUGF_BRK("%#010x (new_brk < target_original_brk)\n", target_brk);
return target_brk;
+ }
- brk_page = HOST_PAGE_ALIGN(target_brk);
-
- /* If the new brk is less than this, set it and we're done... */
- if (new_brk < brk_page) {
+ /* If the new brk is less than the highest page reserved to the
+ * target heap allocation, set it and we're almost done... */
+ if (new_brk <= brk_page) {
+ /* Heap contents are initialized to zero, as for anonymous
+ * mapped pages. */
+ if (new_brk > target_brk) {
+ memset(g2h(target_brk), 0, new_brk - target_brk);
+ }
target_brk = new_brk;
+ DEBUGF_BRK("%#010x (new_brk <= brk_page)\n", target_brk);
return target_brk;
}
- /* We need to allocate more memory after the brk... */
- new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page + 1);
+ /* We need to allocate more memory after the brk... Note that
+ * we don't use MAP_FIXED because that will map over the top of
+ * any existing mapping (like the one with the host libc or qemu
+ * itself); instead we treat "mapped but at wrong address" as
+ * a failure and unmap again.
+ */
+ new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
PROT_READ|PROT_WRITE,
- MAP_ANON|MAP_FIXED|MAP_PRIVATE, 0, 0));
+ MAP_ANON|MAP_PRIVATE, 0, 0));
+
+ if (mapped_addr == brk_page) {
+ /* Heap contents are initialized to zero, as for anonymous
+ * mapped pages. Technically the new pages are already
+ * initialized to zero since they *are* anonymous mapped
+ * pages, however we have to take care with the contents that
+ * come from the remaining part of the previous page: it may
+ * contains garbage data due to a previous heap usage (grown
+ * then shrunken). */
+ memset(g2h(target_brk), 0, brk_page - target_brk);
+
+ target_brk = new_brk;
+ brk_page = HOST_PAGE_ALIGN(target_brk);
+ DEBUGF_BRK("%#010x (mapped_addr == brk_page)\n", target_brk);
+ return target_brk;
+ } else if (mapped_addr != -1) {
+ /* Mapped but at wrong address, meaning there wasn't actually
+ * enough space for this brk.
+ */
+ target_munmap(mapped_addr, new_alloc_size);
+ mapped_addr = -1;
+ DEBUGF_BRK("%#010x (mapped_addr != -1)\n", target_brk);
+ }
+ else {
+ DEBUGF_BRK("%#010x (otherwise)\n", target_brk);
+ }
#if defined(TARGET_ALPHA)
/* We (partially) emulate OSF/1 on Alpha, which requires we
return a proper errno, not an unchanged brk value. */
- if (is_error(mapped_addr)) {
- return -TARGET_ENOMEM;
- }
+ return -TARGET_ENOMEM;
#endif
-
- if (!is_error(mapped_addr)) {
- target_brk = new_brk;
- }
+ /* For everything else, return the previous break. */
return target_brk;
}
return 0;
}
+static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
+ abi_ulong target_fds_addr,
+ int n)
+{
+ if (target_fds_addr) {
+ if (copy_from_user_fdset(fds, target_fds_addr, n))
+ return -TARGET_EFAULT;
+ *fds_ptr = fds;
+ } else {
+ *fds_ptr = NULL;
+ }
+ return 0;
+}
+
static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
const fd_set *fds,
int n)
if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
return -TARGET_EFAULT;
- target_rusage->ru_utime.tv_sec = tswapl(rusage->ru_utime.tv_sec);
- target_rusage->ru_utime.tv_usec = tswapl(rusage->ru_utime.tv_usec);
- target_rusage->ru_stime.tv_sec = tswapl(rusage->ru_stime.tv_sec);
- target_rusage->ru_stime.tv_usec = tswapl(rusage->ru_stime.tv_usec);
- target_rusage->ru_maxrss = tswapl(rusage->ru_maxrss);
- target_rusage->ru_ixrss = tswapl(rusage->ru_ixrss);
- target_rusage->ru_idrss = tswapl(rusage->ru_idrss);
- target_rusage->ru_isrss = tswapl(rusage->ru_isrss);
- target_rusage->ru_minflt = tswapl(rusage->ru_minflt);
- target_rusage->ru_majflt = tswapl(rusage->ru_majflt);
- target_rusage->ru_nswap = tswapl(rusage->ru_nswap);
- target_rusage->ru_inblock = tswapl(rusage->ru_inblock);
- target_rusage->ru_oublock = tswapl(rusage->ru_oublock);
- target_rusage->ru_msgsnd = tswapl(rusage->ru_msgsnd);
- target_rusage->ru_msgrcv = tswapl(rusage->ru_msgrcv);
- target_rusage->ru_nsignals = tswapl(rusage->ru_nsignals);
- target_rusage->ru_nvcsw = tswapl(rusage->ru_nvcsw);
- target_rusage->ru_nivcsw = tswapl(rusage->ru_nivcsw);
+ target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
+ target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
+ target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
+ target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
+ target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
+ target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
+ target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
+ target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
+ target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
+ target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
+ target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
+ target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
+ target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
+ target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
+ target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
+ target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
+ target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
+ target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
unlock_user_struct(target_rusage, target_addr, 1);
return 0;
}
-static inline rlim_t target_to_host_rlim(target_ulong target_rlim)
+static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
{
- if (target_rlim == TARGET_RLIM_INFINITY)
+ abi_ulong target_rlim_swap;
+ rlim_t result;
+
+ target_rlim_swap = tswapal(target_rlim);
+ if (target_rlim_swap == TARGET_RLIM_INFINITY)
return RLIM_INFINITY;
- else
- return tswapl(target_rlim);
+
+ result = target_rlim_swap;
+ if (target_rlim_swap != (rlim_t)result)
+ return RLIM_INFINITY;
+
+ return result;
}
-static inline target_ulong host_to_target_rlim(rlim_t rlim)
+static inline abi_ulong host_to_target_rlim(rlim_t rlim)
{
- if (rlim == RLIM_INFINITY || rlim != (target_long)rlim)
- return TARGET_RLIM_INFINITY;
+ abi_ulong target_rlim_swap;
+ abi_ulong result;
+
+ if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
+ target_rlim_swap = TARGET_RLIM_INFINITY;
else
- return tswapl(rlim);
+ target_rlim_swap = rlim;
+ result = tswapal(target_rlim_swap);
+
+ return result;
+}
+
+static inline int target_to_host_resource(int code)
+{
+ switch (code) {
+ case TARGET_RLIMIT_AS:
+ return RLIMIT_AS;
+ case TARGET_RLIMIT_CORE:
+ return RLIMIT_CORE;
+ case TARGET_RLIMIT_CPU:
+ return RLIMIT_CPU;
+ case TARGET_RLIMIT_DATA:
+ return RLIMIT_DATA;
+ case TARGET_RLIMIT_FSIZE:
+ return RLIMIT_FSIZE;
+ case TARGET_RLIMIT_LOCKS:
+ return RLIMIT_LOCKS;
+ case TARGET_RLIMIT_MEMLOCK:
+ return RLIMIT_MEMLOCK;
+ case TARGET_RLIMIT_MSGQUEUE:
+ return RLIMIT_MSGQUEUE;
+ case TARGET_RLIMIT_NICE:
+ return RLIMIT_NICE;
+ case TARGET_RLIMIT_NOFILE:
+ return RLIMIT_NOFILE;
+ case TARGET_RLIMIT_NPROC:
+ return RLIMIT_NPROC;
+ case TARGET_RLIMIT_RSS:
+ return RLIMIT_RSS;
+ case TARGET_RLIMIT_RTPRIO:
+ return RLIMIT_RTPRIO;
+ case TARGET_RLIMIT_SIGPENDING:
+ return RLIMIT_SIGPENDING;
+ case TARGET_RLIMIT_STACK:
+ return RLIMIT_STACK;
+ default:
+ return code;
+ }
}
static inline abi_long copy_from_user_timeval(struct timeval *tv,
}
#endif
+#if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
/* do_select() must return target values and target errnos. */
static abi_long do_select(int n,
abi_ulong rfd_addr, abi_ulong wfd_addr,
struct timeval tv, *tv_ptr;
abi_long ret;
- if (rfd_addr) {
- if (copy_from_user_fdset(&rfds, rfd_addr, n))
- return -TARGET_EFAULT;
- rfds_ptr = &rfds;
- } else {
- rfds_ptr = NULL;
+ ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
+ if (ret) {
+ return ret;
}
- if (wfd_addr) {
- if (copy_from_user_fdset(&wfds, wfd_addr, n))
- return -TARGET_EFAULT;
- wfds_ptr = &wfds;
- } else {
- wfds_ptr = NULL;
+ ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
+ if (ret) {
+ return ret;
}
- if (efd_addr) {
- if (copy_from_user_fdset(&efds, efd_addr, n))
- return -TARGET_EFAULT;
- efds_ptr = &efds;
- } else {
- efds_ptr = NULL;
+ ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
+ if (ret) {
+ return ret;
}
if (target_tv_addr) {
return ret;
}
+#endif
static abi_long do_pipe2(int host_pipe[], int flags)
{
mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
if (len == sizeof(struct target_ip_mreqn))
- mreqn->imr_ifindex = tswapl(target_smreqn->imr_ifindex);
+ mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
unlock_user(target_smreqn, target_addr, 0);
return 0;
struct target_cmsghdr *target_cmsg;
socklen_t space = 0;
- msg_controllen = tswapl(target_msgh->msg_controllen);
+ msg_controllen = tswapal(target_msgh->msg_controllen);
if (msg_controllen < sizeof (struct target_cmsghdr))
goto the_end;
- target_cmsg_addr = tswapl(target_msgh->msg_control);
+ target_cmsg_addr = tswapal(target_msgh->msg_control);
target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
if (!target_cmsg)
return -TARGET_EFAULT;
void *data = CMSG_DATA(cmsg);
void *target_data = TARGET_CMSG_DATA(target_cmsg);
- int len = tswapl(target_cmsg->cmsg_len)
+ int len = tswapal(target_cmsg->cmsg_len)
- TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr));
space += CMSG_SPACE(len);
struct target_cmsghdr *target_cmsg;
socklen_t space = 0;
- msg_controllen = tswapl(target_msgh->msg_controllen);
+ msg_controllen = tswapal(target_msgh->msg_controllen);
if (msg_controllen < sizeof (struct target_cmsghdr))
goto the_end;
- target_cmsg_addr = tswapl(target_msgh->msg_control);
+ target_cmsg_addr = tswapal(target_msgh->msg_control);
target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
if (!target_cmsg)
return -TARGET_EFAULT;
target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
- target_cmsg->cmsg_len = tswapl(TARGET_CMSG_LEN(len));
+ target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(len));
if (cmsg->cmsg_level != TARGET_SOL_SOCKET || cmsg->cmsg_type != SCM_RIGHTS) {
gemu_log("Unsupported ancillary data: %d/%d\n", cmsg->cmsg_level, cmsg->cmsg_type);
}
unlock_user(target_cmsg, target_cmsg_addr, space);
the_end:
- target_msgh->msg_controllen = tswapl(space);
+ target_msgh->msg_controllen = tswapal(space);
return 0;
}
break;
default:
unimplemented:
- gemu_log("Unsupported setsockopt level=%d optname=%d \n", level, optname);
+ gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname);
ret = -TARGET_ENOPROTOOPT;
}
return ret;
switch(level) {
case TARGET_SOL_SOCKET:
- level = SOL_SOCKET;
- switch (optname) {
- case TARGET_SO_LINGER:
- case TARGET_SO_RCVTIMEO:
- case TARGET_SO_SNDTIMEO:
- case TARGET_SO_PEERCRED:
- case TARGET_SO_PEERNAME:
- /* These don't just return a single integer */
- goto unimplemented;
+ level = SOL_SOCKET;
+ switch (optname) {
+ /* These don't just return a single integer */
+ case TARGET_SO_LINGER:
+ case TARGET_SO_RCVTIMEO:
+ case TARGET_SO_SNDTIMEO:
+ case TARGET_SO_PEERCRED:
+ case TARGET_SO_PEERNAME:
+ goto unimplemented;
+ /* Options with 'int' argument. */
+ case TARGET_SO_DEBUG:
+ optname = SO_DEBUG;
+ goto int_case;
+ case TARGET_SO_REUSEADDR:
+ optname = SO_REUSEADDR;
+ goto int_case;
+ case TARGET_SO_TYPE:
+ optname = SO_TYPE;
+ goto int_case;
+ case TARGET_SO_ERROR:
+ optname = SO_ERROR;
+ goto int_case;
+ case TARGET_SO_DONTROUTE:
+ optname = SO_DONTROUTE;
+ goto int_case;
+ case TARGET_SO_BROADCAST:
+ optname = SO_BROADCAST;
+ goto int_case;
+ case TARGET_SO_SNDBUF:
+ optname = SO_SNDBUF;
+ goto int_case;
+ case TARGET_SO_RCVBUF:
+ optname = SO_RCVBUF;
+ goto int_case;
+ case TARGET_SO_KEEPALIVE:
+ optname = SO_KEEPALIVE;
+ goto int_case;
+ case TARGET_SO_OOBINLINE:
+ optname = SO_OOBINLINE;
+ goto int_case;
+ case TARGET_SO_NO_CHECK:
+ optname = SO_NO_CHECK;
+ goto int_case;
+ case TARGET_SO_PRIORITY:
+ optname = SO_PRIORITY;
+ goto int_case;
+#ifdef SO_BSDCOMPAT
+ case TARGET_SO_BSDCOMPAT:
+ optname = SO_BSDCOMPAT;
+ goto int_case;
+#endif
+ case TARGET_SO_PASSCRED:
+ optname = SO_PASSCRED;
+ goto int_case;
+ case TARGET_SO_TIMESTAMP:
+ optname = SO_TIMESTAMP;
+ goto int_case;
+ case TARGET_SO_RCVLOWAT:
+ optname = SO_RCVLOWAT;
+ goto int_case;
default:
goto int_case;
}
return -TARGET_EFAULT;
if (len < 0)
return -TARGET_EINVAL;
- lv = sizeof(int);
+ lv = sizeof(lv);
ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
if (ret < 0)
return ret;
} else {
if (put_user_u8(val, optval_addr))
return -TARGET_EFAULT;
- }
+ }
if (put_user_u32(len, optlen))
return -TARGET_EFAULT;
break;
return -TARGET_EFAULT;
if (len < 0)
return -TARGET_EINVAL;
- lv = sizeof(int);
+ lv = sizeof(lv);
ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
if (ret < 0)
return ret;
if (!target_vec)
return -TARGET_EFAULT;
for(i = 0;i < count; i++) {
- base = tswapl(target_vec[i].iov_base);
- vec[i].iov_len = tswapl(target_vec[i].iov_len);
+ base = tswapal(target_vec[i].iov_base);
+ vec[i].iov_len = tswapal(target_vec[i].iov_len);
if (vec[i].iov_len != 0) {
vec[i].iov_base = lock_user(type, base, vec[i].iov_len, copy);
/* Don't check lock_user return value. We must call writev even
return -TARGET_EFAULT;
for(i = 0;i < count; i++) {
if (target_vec[i].iov_base) {
- base = tswapl(target_vec[i].iov_base);
+ base = tswapal(target_vec[i].iov_base);
unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
}
}
void *addr;
abi_long ret;
- if (addrlen < 0)
+ if ((int)addrlen < 0) {
return -TARGET_EINVAL;
+ }
addr = alloca(addrlen+1);
void *addr;
abi_long ret;
- if (addrlen < 0)
+ if ((int)addrlen < 0) {
return -TARGET_EINVAL;
+ }
addr = alloca(addrlen);
if (msgp->msg_name) {
msg.msg_namelen = tswap32(msgp->msg_namelen);
msg.msg_name = alloca(msg.msg_namelen);
- ret = target_to_host_sockaddr(msg.msg_name, tswapl(msgp->msg_name),
+ ret = target_to_host_sockaddr(msg.msg_name, tswapal(msgp->msg_name),
msg.msg_namelen);
if (ret) {
unlock_user_struct(msgp, target_msg, send ? 0 : 1);
msg.msg_name = NULL;
msg.msg_namelen = 0;
}
- msg.msg_controllen = 2 * tswapl(msgp->msg_controllen);
+ msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
msg.msg_control = alloca(msg.msg_controllen);
msg.msg_flags = tswap32(msgp->msg_flags);
- count = tswapl(msgp->msg_iovlen);
+ count = tswapal(msgp->msg_iovlen);
vec = alloca(count * sizeof(struct iovec));
- target_vec = tswapl(msgp->msg_iov);
+ target_vec = tswapal(msgp->msg_iov);
lock_iovec(send ? VERIFY_READ : VERIFY_WRITE, vec, target_vec, count, send);
msg.msg_iovlen = count;
msg.msg_iov = vec;
if (get_user_u32(addrlen, target_addrlen_addr))
return -TARGET_EINVAL;
- if (addrlen < 0)
+ if ((int)addrlen < 0) {
return -TARGET_EINVAL;
+ }
if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
return -TARGET_EINVAL;
if (get_user_u32(addrlen, target_addrlen_addr))
return -TARGET_EFAULT;
- if (addrlen < 0)
+ if ((int)addrlen < 0) {
return -TARGET_EINVAL;
+ }
if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
return -TARGET_EFAULT;
if (get_user_u32(addrlen, target_addrlen_addr))
return -TARGET_EFAULT;
- if (addrlen < 0)
+ if ((int)addrlen < 0) {
return -TARGET_EINVAL;
+ }
if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
return -TARGET_EFAULT;
void *host_msg;
abi_long ret;
- if (addrlen < 0)
+ if ((int)addrlen < 0) {
return -TARGET_EINVAL;
+ }
host_msg = lock_user(VERIFY_READ, msg, len, 1);
if (!host_msg)
ret = -TARGET_EFAULT;
goto fail;
}
- if (addrlen < 0) {
+ if ((int)addrlen < 0) {
ret = -TARGET_EINVAL;
goto fail;
}
ret = get_errno(recvfrom(fd, host_msg, len, flags, addr, &addrlen));
} else {
addr = NULL; /* To keep compiler quiet. */
- ret = get_errno(recv(fd, host_msg, len, flags));
+ ret = get_errno(qemu_recv(fd, host_msg, len, flags));
}
if (!is_error(ret)) {
if (target_addr) {
if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
return -TARGET_EFAULT;
target_ip = &(target_sd->sem_perm);
- host_ip->__key = tswapl(target_ip->__key);
- host_ip->uid = tswapl(target_ip->uid);
- host_ip->gid = tswapl(target_ip->gid);
- host_ip->cuid = tswapl(target_ip->cuid);
- host_ip->cgid = tswapl(target_ip->cgid);
- host_ip->mode = tswapl(target_ip->mode);
+ host_ip->__key = tswapal(target_ip->__key);
+ host_ip->uid = tswapal(target_ip->uid);
+ host_ip->gid = tswapal(target_ip->gid);
+ host_ip->cuid = tswapal(target_ip->cuid);
+ host_ip->cgid = tswapal(target_ip->cgid);
+ host_ip->mode = tswap16(target_ip->mode);
unlock_user_struct(target_sd, target_addr, 0);
return 0;
}
if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
return -TARGET_EFAULT;
target_ip = &(target_sd->sem_perm);
- target_ip->__key = tswapl(host_ip->__key);
- target_ip->uid = tswapl(host_ip->uid);
- target_ip->gid = tswapl(host_ip->gid);
- target_ip->cuid = tswapl(host_ip->cuid);
- target_ip->cgid = tswapl(host_ip->cgid);
- target_ip->mode = tswapl(host_ip->mode);
+ target_ip->__key = tswapal(host_ip->__key);
+ target_ip->uid = tswapal(host_ip->uid);
+ target_ip->gid = tswapal(host_ip->gid);
+ target_ip->cuid = tswapal(host_ip->cuid);
+ target_ip->cgid = tswapal(host_ip->cgid);
+ target_ip->mode = tswap16(host_ip->mode);
unlock_user_struct(target_sd, target_addr, 1);
return 0;
}
return -TARGET_EFAULT;
if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
return -TARGET_EFAULT;
- host_sd->sem_nsems = tswapl(target_sd->sem_nsems);
- host_sd->sem_otime = tswapl(target_sd->sem_otime);
- host_sd->sem_ctime = tswapl(target_sd->sem_ctime);
+ host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
+ host_sd->sem_otime = tswapal(target_sd->sem_otime);
+ host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
unlock_user_struct(target_sd, target_addr, 0);
return 0;
}
return -TARGET_EFAULT;
if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
return -TARGET_EFAULT;;
- target_sd->sem_nsems = tswapl(host_sd->sem_nsems);
- target_sd->sem_otime = tswapl(host_sd->sem_otime);
- target_sd->sem_ctime = tswapl(host_sd->sem_ctime);
+ target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
+ target_sd->sem_otime = tswapal(host_sd->sem_otime);
+ target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
unlock_user_struct(target_sd, target_addr, 1);
return 0;
}
switch( cmd ) {
case GETVAL:
case SETVAL:
- arg.val = tswapl(target_su.val);
+ arg.val = tswap32(target_su.val);
ret = get_errno(semctl(semid, semnum, cmd, arg));
- target_su.val = tswapl(arg.val);
+ target_su.val = tswap32(arg.val);
break;
case GETALL:
case SETALL:
return -TARGET_EFAULT;
if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
return -TARGET_EFAULT;
- host_md->msg_stime = tswapl(target_md->msg_stime);
- host_md->msg_rtime = tswapl(target_md->msg_rtime);
- host_md->msg_ctime = tswapl(target_md->msg_ctime);
- host_md->__msg_cbytes = tswapl(target_md->__msg_cbytes);
- host_md->msg_qnum = tswapl(target_md->msg_qnum);
- host_md->msg_qbytes = tswapl(target_md->msg_qbytes);
- host_md->msg_lspid = tswapl(target_md->msg_lspid);
- host_md->msg_lrpid = tswapl(target_md->msg_lrpid);
+ host_md->msg_stime = tswapal(target_md->msg_stime);
+ host_md->msg_rtime = tswapal(target_md->msg_rtime);
+ host_md->msg_ctime = tswapal(target_md->msg_ctime);
+ host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
+ host_md->msg_qnum = tswapal(target_md->msg_qnum);
+ host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
+ host_md->msg_lspid = tswapal(target_md->msg_lspid);
+ host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
unlock_user_struct(target_md, target_addr, 0);
return 0;
}
return -TARGET_EFAULT;
if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
return -TARGET_EFAULT;
- target_md->msg_stime = tswapl(host_md->msg_stime);
- target_md->msg_rtime = tswapl(host_md->msg_rtime);
- target_md->msg_ctime = tswapl(host_md->msg_ctime);
- target_md->__msg_cbytes = tswapl(host_md->__msg_cbytes);
- target_md->msg_qnum = tswapl(host_md->msg_qnum);
- target_md->msg_qbytes = tswapl(host_md->msg_qbytes);
- target_md->msg_lspid = tswapl(host_md->msg_lspid);
- target_md->msg_lrpid = tswapl(host_md->msg_lrpid);
+ target_md->msg_stime = tswapal(host_md->msg_stime);
+ target_md->msg_rtime = tswapal(host_md->msg_rtime);
+ target_md->msg_ctime = tswapal(host_md->msg_ctime);
+ target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
+ target_md->msg_qnum = tswapal(host_md->msg_qnum);
+ target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
+ target_md->msg_lspid = tswapal(host_md->msg_lspid);
+ target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
unlock_user_struct(target_md, target_addr, 1);
return 0;
}
if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
return -TARGET_EFAULT;
host_mb = malloc(msgsz+sizeof(long));
- host_mb->mtype = (abi_long) tswapl(target_mb->mtype);
+ host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
memcpy(host_mb->mtext, target_mb->mtext, msgsz);
ret = get_errno(msgsnd(msqid, host_mb, msgsz, msgflg));
free(host_mb);
return -TARGET_EFAULT;
host_mb = malloc(msgsz+sizeof(long));
- ret = get_errno(msgrcv(msqid, host_mb, msgsz, tswapl(msgtyp), msgflg));
+ ret = get_errno(msgrcv(msqid, host_mb, msgsz, tswapal(msgtyp), msgflg));
if (ret > 0) {
abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
unlock_user(target_mtext, target_mtext_addr, ret);
}
- target_mb->mtype = tswapl(host_mb->mtype);
+ target_mb->mtype = tswapal(host_mb->mtype);
free(host_mb);
end:
#endif
/* kernel structure types definitions */
-#define IFNAMSIZ 16
#define STRUCT(name, ...) STRUCT_ ## name,
#define STRUCT_SPECIAL(name) STRUCT_ ## name,
#undef STRUCT
#undef STRUCT_SPECIAL
-typedef struct IOCTLEntry {
+typedef struct IOCTLEntry IOCTLEntry;
+
+typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
+ int fd, abi_long cmd, abi_long arg);
+
+struct IOCTLEntry {
unsigned int target_cmd;
unsigned int host_cmd;
const char *name;
int access;
+ do_ioctl_fn *do_ioctl;
const argtype arg_type[5];
-} IOCTLEntry;
+};
#define IOC_R 0x0001
#define IOC_W 0x0002
#define MAX_STRUCT_SIZE 4096
+#ifdef CONFIG_FIEMAP
+/* So fiemap access checks don't overflow on 32 bit systems.
+ * This is very slightly smaller than the limit imposed by
+ * the underlying kernel.
+ */
+#define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
+ / sizeof(struct fiemap_extent))
+
+static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
+ int fd, abi_long cmd, abi_long arg)
+{
+ /* The parameter for this ioctl is a struct fiemap followed
+ * by an array of struct fiemap_extent whose size is set
+ * in fiemap->fm_extent_count. The array is filled in by the
+ * ioctl.
+ */
+ int target_size_in, target_size_out;
+ struct fiemap *fm;
+ const argtype *arg_type = ie->arg_type;
+ const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
+ void *argptr, *p;
+ abi_long ret;
+ int i, extent_size = thunk_type_size(extent_arg_type, 0);
+ uint32_t outbufsz;
+ int free_fm = 0;
+
+ assert(arg_type[0] == TYPE_PTR);
+ assert(ie->access == IOC_RW);
+ arg_type++;
+ target_size_in = thunk_type_size(arg_type, 0);
+ argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
+ if (!argptr) {
+ return -TARGET_EFAULT;
+ }
+ thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
+ unlock_user(argptr, arg, 0);
+ fm = (struct fiemap *)buf_temp;
+ if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
+ return -TARGET_EINVAL;
+ }
+
+ outbufsz = sizeof (*fm) +
+ (sizeof(struct fiemap_extent) * fm->fm_extent_count);
+
+ if (outbufsz > MAX_STRUCT_SIZE) {
+ /* We can't fit all the extents into the fixed size buffer.
+ * Allocate one that is large enough and use it instead.
+ */
+ fm = malloc(outbufsz);
+ if (!fm) {
+ return -TARGET_ENOMEM;
+ }
+ memcpy(fm, buf_temp, sizeof(struct fiemap));
+ free_fm = 1;
+ }
+ ret = get_errno(ioctl(fd, ie->host_cmd, fm));
+ if (!is_error(ret)) {
+ target_size_out = target_size_in;
+ /* An extent_count of 0 means we were only counting the extents
+ * so there are no structs to copy
+ */
+ if (fm->fm_extent_count != 0) {
+ target_size_out += fm->fm_mapped_extents * extent_size;
+ }
+ argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
+ if (!argptr) {
+ ret = -TARGET_EFAULT;
+ } else {
+ /* Convert the struct fiemap */
+ thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
+ if (fm->fm_extent_count != 0) {
+ p = argptr + target_size_in;
+ /* ...and then all the struct fiemap_extents */
+ for (i = 0; i < fm->fm_mapped_extents; i++) {
+ thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
+ THUNK_TARGET);
+ p += extent_size;
+ }
+ }
+ unlock_user(argptr, arg, target_size_out);
+ }
+ }
+ if (free_fm) {
+ free(fm);
+ }
+ return ret;
+}
+#endif
+
+static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
+ int fd, abi_long cmd, abi_long arg)
+{
+ const argtype *arg_type = ie->arg_type;
+ int target_size;
+ void *argptr;
+ int ret;
+ struct ifconf *host_ifconf;
+ uint32_t outbufsz;
+ const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
+ int target_ifreq_size;
+ int nb_ifreq;
+ int free_buf = 0;
+ int i;
+ int target_ifc_len;
+ abi_long target_ifc_buf;
+ int host_ifc_len;
+ char *host_ifc_buf;
+
+ assert(arg_type[0] == TYPE_PTR);
+ assert(ie->access == IOC_RW);
+
+ arg_type++;
+ target_size = thunk_type_size(arg_type, 0);
+
+ argptr = lock_user(VERIFY_READ, arg, target_size, 1);
+ if (!argptr)
+ return -TARGET_EFAULT;
+ thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
+ unlock_user(argptr, arg, 0);
+
+ host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
+ target_ifc_len = host_ifconf->ifc_len;
+ target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
+
+ target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
+ nb_ifreq = target_ifc_len / target_ifreq_size;
+ host_ifc_len = nb_ifreq * sizeof(struct ifreq);
+
+ outbufsz = sizeof(*host_ifconf) + host_ifc_len;
+ if (outbufsz > MAX_STRUCT_SIZE) {
+ /* We can't fit all the extents into the fixed size buffer.
+ * Allocate one that is large enough and use it instead.
+ */
+ host_ifconf = malloc(outbufsz);
+ if (!host_ifconf) {
+ return -TARGET_ENOMEM;
+ }
+ memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
+ free_buf = 1;
+ }
+ host_ifc_buf = (char*)host_ifconf + sizeof(*host_ifconf);
+
+ host_ifconf->ifc_len = host_ifc_len;
+ host_ifconf->ifc_buf = host_ifc_buf;
+
+ ret = get_errno(ioctl(fd, ie->host_cmd, host_ifconf));
+ if (!is_error(ret)) {
+ /* convert host ifc_len to target ifc_len */
+
+ nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
+ target_ifc_len = nb_ifreq * target_ifreq_size;
+ host_ifconf->ifc_len = target_ifc_len;
+
+ /* restore target ifc_buf */
+
+ host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
+
+ /* copy struct ifconf to target user */
+
+ argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
+ if (!argptr)
+ return -TARGET_EFAULT;
+ thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
+ unlock_user(argptr, arg, target_size);
+
+ /* copy ifreq[] to target user */
+
+ argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
+ for (i = 0; i < nb_ifreq ; i++) {
+ thunk_convert(argptr + i * target_ifreq_size,
+ host_ifc_buf + i * sizeof(struct ifreq),
+ ifreq_arg_type, THUNK_TARGET);
+ }
+ unlock_user(argptr, target_ifc_buf, target_ifc_len);
+ }
+
+ if (free_buf) {
+ free(host_ifconf);
+ }
+
+ return ret;
+}
+
static IOCTLEntry ioctl_entries[] = {
#define IOCTL(cmd, access, ...) \
- { TARGET_ ## cmd, cmd, #cmd, access, { __VA_ARGS__ } },
+ { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
+#define IOCTL_SPECIAL(cmd, access, dofn, ...) \
+ { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
#include "ioctls.h"
{ 0, 0, },
};
#if defined(DEBUG)
gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd, ie->name);
#endif
+ if (ie->do_ioctl) {
+ return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
+ }
+
switch(arg_type[0]) {
case TYPE_NULL:
/* no argument */
if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
return -TARGET_EFAULT;
ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
- ldt_info.base_addr = tswapl(target_ldt_info->base_addr);
+ ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
ldt_info.limit = tswap32(target_ldt_info->limit);
ldt_info.flags = tswap32(target_ldt_info->flags);
unlock_user_struct(target_ldt_info, ptr, 0);
if (!target_ldt_info)
return -TARGET_EFAULT;
ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
- ldt_info.base_addr = tswapl(target_ldt_info->base_addr);
+ ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
ldt_info.limit = tswap32(target_ldt_info->limit);
ldt_info.flags = tswap32(target_ldt_info->flags);
if (ldt_info.entry_number == -1) {
base_addr = (entry_1 >> 16) |
(entry_2 & 0xff000000) |
((entry_2 & 0xff) << 16);
- target_ldt_info->base_addr = tswapl(base_addr);
+ target_ldt_info->base_addr = tswapal(base_addr);
target_ldt_info->limit = tswap32(limit);
target_ldt_info->flags = tswap32(flags);
unlock_user_struct(target_ldt_info, ptr, 1);
#ifndef TARGET_ABI32
static abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
{
- abi_long ret;
+ abi_long ret = 0;
abi_ulong val;
int idx;
-
+
switch(code) {
case TARGET_ARCH_SET_GS:
case TARGET_ARCH_SET_FS:
idx = R_FS;
val = env->segs[idx].base;
if (put_user(val, addr, abi_ulong))
- return -TARGET_EFAULT;
+ ret = -TARGET_EFAULT;
break;
default:
ret = -TARGET_EINVAL;
break;
}
- return 0;
+ return ret;
}
#endif
#endif /* defined(TARGET_I386) */
-#if defined(CONFIG_USE_NPTL)
+#define NEW_STACK_SIZE 0x40000
-#define NEW_STACK_SIZE PTHREAD_STACK_MIN
+#if defined(CONFIG_USE_NPTL)
static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
typedef struct {
return NULL;
}
#else
-/* this stack is the equivalent of the kernel stack associated with a
- thread/process */
-#define NEW_STACK_SIZE 8192
static int clone_func(void *arg)
{
{
int ret;
TaskState *ts;
- uint8_t *new_stack;
CPUState *new_env;
#if defined(CONFIG_USE_NPTL)
unsigned int nptl_flags;
sigset_t sigmask;
+#else
+ uint8_t *new_stack;
#endif
/* Emulate vfork() with fork() */
new_thread_info info;
pthread_attr_t attr;
#endif
- ts = qemu_mallocz(sizeof(TaskState) + NEW_STACK_SIZE);
+ ts = g_malloc0(sizeof(TaskState));
init_task_state(ts);
- new_stack = ts->stack;
/* we create a new CPU instance. */
new_env = cpu_copy(env);
#if defined(TARGET_I386) || defined(TARGET_SPARC) || defined(TARGET_PPC)
info.parent_tidptr = parent_tidptr;
ret = pthread_attr_init(&attr);
- ret = pthread_attr_setstack(&attr, new_stack, NEW_STACK_SIZE);
+ ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
+ ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
/* It is not safe to deliver signals until the child has finished
initializing, so temporarily block all signals. */
sigfillset(&sigmask);
if (flags & CLONE_NPTL_FLAGS2)
return -EINVAL;
/* This is probably going to die very quickly, but do it anyway. */
+ new_stack = g_malloc0 (NEW_STACK_SIZE);
#ifdef __ia64__
ret = __clone2(clone_func, new_stack, NEW_STACK_SIZE, flags, new_env);
#else
return -TARGET_EFAULT;
fl.l_type = tswap16(target_fl->l_type);
fl.l_whence = tswap16(target_fl->l_whence);
- fl.l_start = tswapl(target_fl->l_start);
- fl.l_len = tswapl(target_fl->l_len);
+ fl.l_start = tswapal(target_fl->l_start);
+ fl.l_len = tswapal(target_fl->l_len);
fl.l_pid = tswap32(target_fl->l_pid);
unlock_user_struct(target_fl, arg, 0);
ret = get_errno(fcntl(fd, host_cmd, &fl));
return -TARGET_EFAULT;
target_fl->l_type = tswap16(fl.l_type);
target_fl->l_whence = tswap16(fl.l_whence);
- target_fl->l_start = tswapl(fl.l_start);
- target_fl->l_len = tswapl(fl.l_len);
+ target_fl->l_start = tswapal(fl.l_start);
+ target_fl->l_len = tswapal(fl.l_len);
target_fl->l_pid = tswap32(fl.l_pid);
unlock_user_struct(target_fl, arg, 1);
}
return -TARGET_EFAULT;
fl.l_type = tswap16(target_fl->l_type);
fl.l_whence = tswap16(target_fl->l_whence);
- fl.l_start = tswapl(target_fl->l_start);
- fl.l_len = tswapl(target_fl->l_len);
+ fl.l_start = tswapal(target_fl->l_start);
+ fl.l_len = tswapal(target_fl->l_len);
fl.l_pid = tswap32(target_fl->l_pid);
unlock_user_struct(target_fl, arg, 0);
ret = get_errno(fcntl(fd, host_cmd, &fl));
return -TARGET_EFAULT;
fl64.l_type = tswap16(target_fl64->l_type) >> 1;
fl64.l_whence = tswap16(target_fl64->l_whence);
- fl64.l_start = tswapl(target_fl64->l_start);
- fl64.l_len = tswapl(target_fl64->l_len);
+ fl64.l_start = tswap64(target_fl64->l_start);
+ fl64.l_len = tswap64(target_fl64->l_len);
fl64.l_pid = tswap32(target_fl64->l_pid);
unlock_user_struct(target_fl64, arg, 0);
ret = get_errno(fcntl(fd, host_cmd, &fl64));
return -TARGET_EFAULT;
target_fl64->l_type = tswap16(fl64.l_type) >> 1;
target_fl64->l_whence = tswap16(fl64.l_whence);
- target_fl64->l_start = tswapl(fl64.l_start);
- target_fl64->l_len = tswapl(fl64.l_len);
+ target_fl64->l_start = tswap64(fl64.l_start);
+ target_fl64->l_len = tswap64(fl64.l_len);
target_fl64->l_pid = tswap32(fl64.l_pid);
unlock_user_struct(target_fl64, arg, 1);
}
return -TARGET_EFAULT;
fl64.l_type = tswap16(target_fl64->l_type) >> 1;
fl64.l_whence = tswap16(target_fl64->l_whence);
- fl64.l_start = tswapl(target_fl64->l_start);
- fl64.l_len = tswapl(target_fl64->l_len);
+ fl64.l_start = tswap64(target_fl64->l_start);
+ fl64.l_len = tswap64(target_fl64->l_len);
fl64.l_pid = tswap32(target_fl64->l_pid);
unlock_user_struct(target_fl64, arg, 0);
ret = get_errno(fcntl(fd, host_cmd, &fl64));
else
return gid;
}
-
+static inline int tswapid(int id)
+{
+ return tswap16(id);
+}
+#else /* !USE_UID16 */
+static inline int high2lowuid(int uid)
+{
+ return uid;
+}
+static inline int high2lowgid(int gid)
+{
+ return gid;
+}
+static inline int low2highuid(int uid)
+{
+ return uid;
+}
+static inline int low2highgid(int gid)
+{
+ return gid;
+}
+static inline int tswapid(int id)
+{
+ return tswap32(id);
+}
#endif /* USE_UID16 */
void syscall_init(void)
abi_long arg3,
abi_long arg4)
{
-#ifdef TARGET_ARM
- if (((CPUARMState *)cpu_env)->eabi)
- {
+ if (regpairs_aligned(cpu_env)) {
arg2 = arg3;
arg3 = arg4;
- }
-#endif
+ }
return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
}
#endif
abi_long arg3,
abi_long arg4)
{
-#ifdef TARGET_ARM
- if (((CPUARMState *)cpu_env)->eabi)
- {
+ if (regpairs_aligned(cpu_env)) {
arg2 = arg3;
arg3 = arg4;
- }
-#endif
+ }
return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
}
#endif
if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1))
return -TARGET_EFAULT;
- host_ts->tv_sec = tswapl(target_ts->tv_sec);
- host_ts->tv_nsec = tswapl(target_ts->tv_nsec);
+ host_ts->tv_sec = tswapal(target_ts->tv_sec);
+ host_ts->tv_nsec = tswapal(target_ts->tv_nsec);
unlock_user_struct(target_ts, target_addr, 0);
return 0;
}
if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0))
return -TARGET_EFAULT;
- target_ts->tv_sec = tswapl(host_ts->tv_sec);
- target_ts->tv_nsec = tswapl(host_ts->tv_nsec);
+ target_ts->tv_sec = tswapal(host_ts->tv_sec);
+ target_ts->tv_nsec = tswapal(host_ts->tv_nsec);
unlock_user_struct(target_ts, target_addr, 1);
return 0;
}
All errnos that do_syscall() returns must be -TARGET_<errcode>. */
abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
abi_long arg2, abi_long arg3, abi_long arg4,
- abi_long arg5, abi_long arg6)
+ abi_long arg5, abi_long arg6, abi_long arg7,
+ abi_long arg8)
{
abi_long ret;
struct stat st;
sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
NULL, NULL, 0);
}
- /* TODO: Free CPU state. */
+ thread_env = NULL;
+ g_free(cpu_env);
+ g_free(ts);
pthread_exit(NULL);
}
#endif
if (arg2) {
if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
goto efault;
- tbuf.actime = tswapl(target_tbuf->actime);
- tbuf.modtime = tswapl(target_tbuf->modtime);
+ tbuf.actime = tswapal(target_tbuf->actime);
+ tbuf.modtime = tswapal(target_tbuf->modtime);
unlock_user_struct(target_tbuf, arg2, 0);
host_tbuf = &tbuf;
} else {
tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
if (!tmsp)
goto efault;
- tmsp->tms_utime = tswapl(host_to_target_clock_t(tms.tms_utime));
- tmsp->tms_stime = tswapl(host_to_target_clock_t(tms.tms_stime));
- tmsp->tms_cutime = tswapl(host_to_target_clock_t(tms.tms_cutime));
- tmsp->tms_cstime = tswapl(host_to_target_clock_t(tms.tms_cstime));
+ tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
+ tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
+ tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
+ tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
}
if (!is_error(ret))
ret = host_to_target_clock_t(ret);
break;
case TARGET_NR_setrlimit:
{
- int resource = arg1;
+ int resource = target_to_host_resource(arg1);
struct target_rlimit *target_rlim;
struct rlimit rlim;
if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
break;
case TARGET_NR_getrlimit:
{
- int resource = arg1;
+ int resource = target_to_host_resource(arg1);
struct target_rlimit *target_rlim;
struct rlimit rlim;
ret = get_errno(settimeofday(&tv, NULL));
}
break;
-#ifdef TARGET_NR_select
+#if defined(TARGET_NR_select) && !defined(TARGET_S390X) && !defined(TARGET_S390)
case TARGET_NR_select:
{
struct target_sel_arg_struct *sel;
if (!lock_user_struct(VERIFY_READ, sel, arg1, 1))
goto efault;
- nsel = tswapl(sel->n);
- inp = tswapl(sel->inp);
- outp = tswapl(sel->outp);
- exp = tswapl(sel->exp);
- tvp = tswapl(sel->tvp);
+ nsel = tswapal(sel->n);
+ inp = tswapal(sel->inp);
+ outp = tswapal(sel->outp);
+ exp = tswapal(sel->exp);
+ tvp = tswapal(sel->tvp);
unlock_user_struct(sel, arg1, 0);
ret = do_select(nsel, inp, outp, exp, tvp);
}
#endif
#ifdef TARGET_NR_pselect6
case TARGET_NR_pselect6:
- goto unimplemented_nowarn;
+ {
+ abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
+ fd_set rfds, wfds, efds;
+ fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
+ struct timespec ts, *ts_ptr;
+
+ /*
+ * The 6th arg is actually two args smashed together,
+ * so we cannot use the C library.
+ */
+ sigset_t set;
+ struct {
+ sigset_t *set;
+ size_t size;
+ } sig, *sig_ptr;
+
+ abi_ulong arg_sigset, arg_sigsize, *arg7;
+ target_sigset_t *target_sigset;
+
+ n = arg1;
+ rfd_addr = arg2;
+ wfd_addr = arg3;
+ efd_addr = arg4;
+ ts_addr = arg5;
+
+ ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
+ if (ret) {
+ goto fail;
+ }
+ ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
+ if (ret) {
+ goto fail;
+ }
+ ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
+ if (ret) {
+ goto fail;
+ }
+
+ /*
+ * This takes a timespec, and not a timeval, so we cannot
+ * use the do_select() helper ...
+ */
+ if (ts_addr) {
+ if (target_to_host_timespec(&ts, ts_addr)) {
+ goto efault;
+ }
+ ts_ptr = &ts;
+ } else {
+ ts_ptr = NULL;
+ }
+
+ /* Extract the two packed args for the sigset */
+ if (arg6) {
+ sig_ptr = &sig;
+ sig.size = _NSIG / 8;
+
+ arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
+ if (!arg7) {
+ goto efault;
+ }
+ arg_sigset = tswapal(arg7[0]);
+ arg_sigsize = tswapal(arg7[1]);
+ unlock_user(arg7, arg6, 0);
+
+ if (arg_sigset) {
+ sig.set = &set;
+ if (arg_sigsize != sizeof(*target_sigset)) {
+ /* Like the kernel, we enforce correct size sigsets */
+ ret = -TARGET_EINVAL;
+ goto fail;
+ }
+ target_sigset = lock_user(VERIFY_READ, arg_sigset,
+ sizeof(*target_sigset), 1);
+ if (!target_sigset) {
+ goto efault;
+ }
+ target_to_host_sigset(&set, target_sigset);
+ unlock_user(target_sigset, arg_sigset, 0);
+ } else {
+ sig.set = NULL;
+ }
+ } else {
+ sig_ptr = NULL;
+ }
+
+ ret = get_errno(sys_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
+ ts_ptr, sig_ptr));
+
+ if (!is_error(ret)) {
+ if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
+ goto efault;
+ if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
+ goto efault;
+ if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
+ goto efault;
+
+ if (ts_addr && host_to_target_timespec(ts_addr, &ts))
+ goto efault;
+ }
+ }
+ break;
#endif
case TARGET_NR_symlink:
{
#endif
#ifdef TARGET_NR_mmap
case TARGET_NR_mmap:
-#if (defined(TARGET_I386) && defined(TARGET_ABI32)) || defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE)
+#if (defined(TARGET_I386) && defined(TARGET_ABI32)) || defined(TARGET_ARM) || \
+ defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
+ || defined(TARGET_S390X)
{
abi_ulong *v;
abi_ulong v1, v2, v3, v4, v5, v6;
if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
goto efault;
- v1 = tswapl(v[0]);
- v2 = tswapl(v[1]);
- v3 = tswapl(v[2]);
- v4 = tswapl(v[3]);
- v5 = tswapl(v[4]);
- v6 = tswapl(v[5]);
+ v1 = tswapal(v[0]);
+ v2 = tswapal(v[1]);
+ v3 = tswapal(v[2]);
+ v4 = tswapal(v[3]);
+ v5 = tswapal(v[4]);
+ v6 = tswapal(v[5]);
unlock_user(v, arg1, 0);
ret = get_errno(target_mmap(v1, v2, v3,
target_to_host_bitmask(v4, mmap_flags_tbl),
ret = get_errno(target_munmap(arg1, arg2));
break;
case TARGET_NR_mprotect:
+ {
+ TaskState *ts = ((CPUState *)cpu_env)->opaque;
+ /* Special hack to detect libc making the stack executable. */
+ if ((arg3 & PROT_GROWSDOWN)
+ && arg1 >= ts->info->stack_limit
+ && arg1 <= ts->info->start_stack) {
+ arg3 &= ~PROT_GROWSDOWN;
+ arg2 = arg2 + arg1 - ts->info->stack_limit;
+ arg1 = ts->info->stack_limit;
+ }
+ }
ret = get_errno(target_mprotect(arg1, arg2, arg3));
break;
#ifdef TARGET_NR_mremap
#endif
#ifdef TARGET_NR_syscall
case TARGET_NR_syscall:
- ret = do_syscall(cpu_env,arg1 & 0xffff,arg2,arg3,arg4,arg5,arg6,0);
- break;
+ ret = do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
+ arg6, arg7, arg8, 0);
+ break;
#endif
case TARGET_NR_wait4:
{
ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
#elif defined(TARGET_CRIS)
ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg4, arg5));
+#elif defined(TARGET_S390X)
+ ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
#else
ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
#endif
#ifdef TARGET_NR__llseek /* Not on alpha */
case TARGET_NR__llseek:
{
-#if defined (__x86_64__)
- ret = get_errno(lseek(arg1, ((uint64_t )arg2 << 32) | arg3, arg5));
- if (put_user_s64(ret, arg4))
- goto efault;
-#else
int64_t res;
+#if !defined(__NR_llseek)
+ res = lseek(arg1, ((uint64_t)arg2 << 32) | arg3, arg5);
+ if (res == -1) {
+ ret = get_errno(res);
+ } else {
+ ret = 0;
+ }
+#else
ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
- if (put_user_s64(res, arg4))
- goto efault;
#endif
+ if ((ret == 0) && put_user_s64(res, arg4)) {
+ goto efault;
+ }
}
break;
#endif
reclen = de->d_reclen;
treclen = reclen - (2 * (sizeof(long) - sizeof(abi_long)));
tde->d_reclen = tswap16(treclen);
- tde->d_ino = tswapl(de->d_ino);
- tde->d_off = tswapl(de->d_off);
+ tde->d_ino = tswapal(de->d_ino);
+ tde->d_off = tswapal(de->d_off);
tnamelen = treclen - (2 * sizeof(abi_long) + 2);
if (tnamelen > 256)
tnamelen = 256;
}
break;
#endif /* TARGET_NR_getdents64 */
-#ifdef TARGET_NR__newselect
+#if defined(TARGET_NR__newselect) || defined(TARGET_S390X)
+#ifdef TARGET_S390X
+ case TARGET_NR_select:
+#else
case TARGET_NR__newselect:
+#endif
ret = do_select(arg1, arg2, arg3, arg4, arg5);
break;
#endif
-#ifdef TARGET_NR_poll
+#if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
+# ifdef TARGET_NR_poll
case TARGET_NR_poll:
+# endif
+# ifdef TARGET_NR_ppoll
+ case TARGET_NR_ppoll:
+# endif
{
struct target_pollfd *target_pfd;
unsigned int nfds = arg2;
target_pfd = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_pollfd) * nfds, 1);
if (!target_pfd)
goto efault;
+
pfd = alloca(sizeof(struct pollfd) * nfds);
for(i = 0; i < nfds; i++) {
pfd[i].fd = tswap32(target_pfd[i].fd);
pfd[i].events = tswap16(target_pfd[i].events);
}
- ret = get_errno(poll(pfd, nfds, timeout));
+
+# ifdef TARGET_NR_ppoll
+ if (num == TARGET_NR_ppoll) {
+ struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
+ target_sigset_t *target_set;
+ sigset_t _set, *set = &_set;
+
+ if (arg3) {
+ if (target_to_host_timespec(timeout_ts, arg3)) {
+ unlock_user(target_pfd, arg1, 0);
+ goto efault;
+ }
+ } else {
+ timeout_ts = NULL;
+ }
+
+ if (arg4) {
+ target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
+ if (!target_set) {
+ unlock_user(target_pfd, arg1, 0);
+ goto efault;
+ }
+ target_to_host_sigset(set, target_set);
+ } else {
+ set = NULL;
+ }
+
+ ret = get_errno(sys_ppoll(pfd, nfds, timeout_ts, set, _NSIG/8));
+
+ if (!is_error(ret) && arg3) {
+ host_to_target_timespec(arg3, timeout_ts);
+ }
+ if (arg4) {
+ unlock_user(target_set, arg4, 0);
+ }
+ } else
+# endif
+ ret = get_errno(poll(pfd, nfds, timeout));
+
if (!is_error(ret)) {
for(i = 0; i < nfds; i++) {
target_pfd[i].revents = tswap16(pfd[i].revents);
}
- ret += nfds * (sizeof(struct target_pollfd)
- - sizeof(struct pollfd));
}
- unlock_user(target_pfd, arg1, ret);
+ unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
}
break;
#endif
return value. */
ret = -TARGET_ENOTDIR;
break;
+ case TARGET_NR_sched_getaffinity:
+ {
+ unsigned int mask_size;
+ unsigned long *mask;
+
+ /*
+ * sched_getaffinity needs multiples of ulong, so need to take
+ * care of mismatches between target ulong and host ulong sizes.
+ */
+ if (arg2 & (sizeof(abi_ulong) - 1)) {
+ ret = -TARGET_EINVAL;
+ break;
+ }
+ mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
+
+ mask = alloca(mask_size);
+ ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
+
+ if (!is_error(ret)) {
+ if (copy_to_user(arg3, mask, ret)) {
+ goto efault;
+ }
+ }
+ }
+ break;
+ case TARGET_NR_sched_setaffinity:
+ {
+ unsigned int mask_size;
+ unsigned long *mask;
+
+ /*
+ * sched_setaffinity needs multiples of ulong, so need to take
+ * care of mismatches between target ulong and host ulong sizes.
+ */
+ if (arg2 & (sizeof(abi_ulong) - 1)) {
+ ret = -TARGET_EINVAL;
+ break;
+ }
+ mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
+
+ mask = alloca(mask_size);
+ if (!lock_user_struct(VERIFY_READ, p, arg3, 1)) {
+ goto efault;
+ }
+ memcpy(mask, p, arg2);
+ unlock_user_struct(p, arg2, 0);
+
+ ret = get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
+ }
+ break;
case TARGET_NR_sched_setparam:
{
struct sched_param *target_schp;
#endif
#ifdef TARGET_NR_pread
case TARGET_NR_pread:
-#ifdef TARGET_ARM
- if (((CPUARMState *)cpu_env)->eabi)
+ if (regpairs_aligned(cpu_env))
arg4 = arg5;
-#endif
if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
goto efault;
ret = get_errno(pread(arg1, p, arg3, arg4));
unlock_user(p, arg2, ret);
break;
case TARGET_NR_pwrite:
-#ifdef TARGET_ARM
- if (((CPUARMState *)cpu_env)->eabi)
+ if (regpairs_aligned(cpu_env))
arg4 = arg5;
-#endif
if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
goto efault;
ret = get_errno(pwrite(arg1, p, arg3, arg4));
case TARGET_NR_sigaltstack:
#if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_MIPS) || \
defined(TARGET_SPARC) || defined(TARGET_PPC) || defined(TARGET_ALPHA) || \
- defined(TARGET_M68K)
+ defined(TARGET_M68K) || defined(TARGET_S390X)
ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUState *)cpu_env));
break;
#else
case TARGET_NR_ugetrlimit:
{
struct rlimit rlim;
- ret = get_errno(getrlimit(arg1, &rlim));
+ int resource = target_to_host_resource(arg1);
+ ret = get_errno(getrlimit(resource, &rlim));
if (!is_error(ret)) {
struct target_rlimit *target_rlim;
if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
ret = host_to_target_stat64(cpu_env, arg3, &st);
break;
#endif
-#ifdef USE_UID16
case TARGET_NR_lchown:
if (!(p = lock_user_string(arg1)))
goto efault;
ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
unlock_user(p, arg1, 0);
break;
+#ifdef TARGET_NR_getuid
case TARGET_NR_getuid:
ret = get_errno(high2lowuid(getuid()));
break;
+#endif
+#ifdef TARGET_NR_getgid
case TARGET_NR_getgid:
ret = get_errno(high2lowgid(getgid()));
break;
+#endif
+#ifdef TARGET_NR_geteuid
case TARGET_NR_geteuid:
ret = get_errno(high2lowuid(geteuid()));
break;
+#endif
+#ifdef TARGET_NR_getegid
case TARGET_NR_getegid:
ret = get_errno(high2lowgid(getegid()));
break;
+#endif
case TARGET_NR_setreuid:
ret = get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
break;
case TARGET_NR_getgroups:
{
int gidsetsize = arg1;
- uint16_t *target_grouplist;
+ target_id *target_grouplist;
gid_t *grouplist;
int i;
if (!target_grouplist)
goto efault;
for(i = 0;i < ret; i++)
- target_grouplist[i] = tswap16(grouplist[i]);
+ target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
unlock_user(target_grouplist, arg2, gidsetsize * 2);
}
}
case TARGET_NR_setgroups:
{
int gidsetsize = arg1;
- uint16_t *target_grouplist;
+ target_id *target_grouplist;
gid_t *grouplist;
int i;
goto fail;
}
for(i = 0;i < gidsetsize; i++)
- grouplist[i] = tswap16(target_grouplist[i]);
+ grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
unlock_user(target_grouplist, arg2, 0);
ret = get_errno(setgroups(gidsetsize, grouplist));
}
case TARGET_NR_setfsgid:
ret = get_errno(setfsgid(arg1));
break;
-#endif /* USE_UID16 */
#ifdef TARGET_NR_lchown32
case TARGET_NR_lchown32:
case TARGET_NR_osf_sigprocmask:
{
abi_ulong mask;
- int how = arg1;
+ int how;
sigset_t set, oldset;
switch(arg1) {
}
mask = arg2;
target_to_host_old_sigset(&set, &mask);
- sigprocmask(arg1, &set, &oldset);
+ sigprocmask(how, &set, &oldset);
host_to_target_old_sigset(&mask, &oldset);
ret = mask;
}
#ifdef TARGET_NR_readahead
case TARGET_NR_readahead:
#if TARGET_ABI_BITS == 32
-#ifdef TARGET_ARM
- if (((CPUARMState *)cpu_env)->eabi)
- {
+ if (regpairs_aligned(cpu_env)) {
arg2 = arg3;
arg3 = arg4;
arg4 = arg5;
}
-#endif
ret = get_errno(readahead(arg1, ((off64_t)arg3 << 32) | arg2, arg4));
#else
ret = get_errno(readahead(arg1, arg2, arg3));
#endif
break;
#endif
+#ifdef CONFIG_ATTR
#ifdef TARGET_NR_setxattr
- case TARGET_NR_setxattr:
case TARGET_NR_lsetxattr:
case TARGET_NR_fsetxattr:
- case TARGET_NR_getxattr:
case TARGET_NR_lgetxattr:
case TARGET_NR_fgetxattr:
case TARGET_NR_listxattr:
case TARGET_NR_llistxattr:
case TARGET_NR_flistxattr:
- case TARGET_NR_removexattr:
case TARGET_NR_lremovexattr:
case TARGET_NR_fremovexattr:
ret = -TARGET_EOPNOTSUPP;
break;
+ case TARGET_NR_setxattr:
+ {
+ void *p, *n, *v;
+ p = lock_user_string(arg1);
+ n = lock_user_string(arg2);
+ v = lock_user(VERIFY_READ, arg3, arg4, 1);
+ if (p && n && v) {
+ ret = get_errno(setxattr(p, n, v, arg4, arg5));
+ } else {
+ ret = -TARGET_EFAULT;
+ }
+ unlock_user(p, arg1, 0);
+ unlock_user(n, arg2, 0);
+ unlock_user(v, arg3, 0);
+ }
+ break;
+ case TARGET_NR_getxattr:
+ {
+ void *p, *n, *v;
+ p = lock_user_string(arg1);
+ n = lock_user_string(arg2);
+ v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
+ if (p && n && v) {
+ ret = get_errno(getxattr(p, n, v, arg4));
+ } else {
+ ret = -TARGET_EFAULT;
+ }
+ unlock_user(p, arg1, 0);
+ unlock_user(n, arg2, 0);
+ unlock_user(v, arg3, arg4);
+ }
+ break;
+ case TARGET_NR_removexattr:
+ {
+ void *p, *n;
+ p = lock_user_string(arg1);
+ n = lock_user_string(arg2);
+ if (p && n) {
+ ret = get_errno(removexattr(p, n));
+ } else {
+ ret = -TARGET_EFAULT;
+ }
+ unlock_user(p, arg1, 0);
+ unlock_user(n, arg2, 0);
+ }
+ break;
#endif
+#endif /* CONFIG_ATTR */
#ifdef TARGET_NR_set_thread_area
case TARGET_NR_set_thread_area:
#if defined(TARGET_MIPS)
case TARGET_NR_fallocate:
ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
break;
+#endif
+#if defined(CONFIG_SYNC_FILE_RANGE)
+#if defined(TARGET_NR_sync_file_range)
+ case TARGET_NR_sync_file_range:
+#if TARGET_ABI_BITS == 32
+#if defined(TARGET_MIPS)
+ ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
+ target_offset64(arg5, arg6), arg7));
+#else
+ ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
+ target_offset64(arg4, arg5), arg6));
+#endif /* !TARGET_MIPS */
+#else
+ ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
+#endif
+ break;
+#endif
+#if defined(TARGET_NR_sync_file_range2)
+ case TARGET_NR_sync_file_range2:
+ /* This is like sync_file_range but the arguments are reordered */
+#if TARGET_ABI_BITS == 32
+ ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
+ target_offset64(arg5, arg6), arg2));
+#else
+ ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
+#endif
+ break;
+#endif
+#endif
+#if defined(CONFIG_EPOLL)
+#if defined(TARGET_NR_epoll_create)
+ case TARGET_NR_epoll_create:
+ ret = get_errno(epoll_create(arg1));
+ break;
+#endif
+#if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
+ case TARGET_NR_epoll_create1:
+ ret = get_errno(epoll_create1(arg1));
+ break;
+#endif
+#if defined(TARGET_NR_epoll_ctl)
+ case TARGET_NR_epoll_ctl:
+ {
+ struct epoll_event ep;
+ struct epoll_event *epp = 0;
+ if (arg4) {
+ struct target_epoll_event *target_ep;
+ if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
+ goto efault;
+ }
+ ep.events = tswap32(target_ep->events);
+ /* The epoll_data_t union is just opaque data to the kernel,
+ * so we transfer all 64 bits across and need not worry what
+ * actual data type it is.
+ */
+ ep.data.u64 = tswap64(target_ep->data.u64);
+ unlock_user_struct(target_ep, arg4, 0);
+ epp = &ep;
+ }
+ ret = get_errno(epoll_ctl(arg1, arg2, arg3, epp));
+ break;
+ }
+#endif
+
+#if defined(TARGET_NR_epoll_pwait) && defined(CONFIG_EPOLL_PWAIT)
+#define IMPLEMENT_EPOLL_PWAIT
+#endif
+#if defined(TARGET_NR_epoll_wait) || defined(IMPLEMENT_EPOLL_PWAIT)
+#if defined(TARGET_NR_epoll_wait)
+ case TARGET_NR_epoll_wait:
+#endif
+#if defined(IMPLEMENT_EPOLL_PWAIT)
+ case TARGET_NR_epoll_pwait:
+#endif
+ {
+ struct target_epoll_event *target_ep;
+ struct epoll_event *ep;
+ int epfd = arg1;
+ int maxevents = arg3;
+ int timeout = arg4;
+
+ target_ep = lock_user(VERIFY_WRITE, arg2,
+ maxevents * sizeof(struct target_epoll_event), 1);
+ if (!target_ep) {
+ goto efault;
+ }
+
+ ep = alloca(maxevents * sizeof(struct epoll_event));
+
+ switch (num) {
+#if defined(IMPLEMENT_EPOLL_PWAIT)
+ case TARGET_NR_epoll_pwait:
+ {
+ target_sigset_t *target_set;
+ sigset_t _set, *set = &_set;
+
+ if (arg5) {
+ target_set = lock_user(VERIFY_READ, arg5,
+ sizeof(target_sigset_t), 1);
+ if (!target_set) {
+ unlock_user(target_ep, arg2, 0);
+ goto efault;
+ }
+ target_to_host_sigset(set, target_set);
+ unlock_user(target_set, arg5, 0);
+ } else {
+ set = NULL;
+ }
+
+ ret = get_errno(epoll_pwait(epfd, ep, maxevents, timeout, set));
+ break;
+ }
+#endif
+#if defined(TARGET_NR_epoll_wait)
+ case TARGET_NR_epoll_wait:
+ ret = get_errno(epoll_wait(epfd, ep, maxevents, timeout));
+ break;
+#endif
+ default:
+ ret = -TARGET_ENOSYS;
+ }
+ if (!is_error(ret)) {
+ int i;
+ for (i = 0; i < ret; i++) {
+ target_ep[i].events = tswap32(ep[i].events);
+ target_ep[i].data.u64 = tswap64(ep[i].data.u64);
+ }
+ }
+ unlock_user(target_ep, arg2, ret * sizeof(struct target_epoll_event));
+ break;
+ }
+#endif
+#endif
+#ifdef TARGET_NR_prlimit64
+ case TARGET_NR_prlimit64:
+ {
+ /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
+ struct target_rlimit64 *target_rnew, *target_rold;
+ struct host_rlimit64 rnew, rold, *rnewp = 0;
+ if (arg3) {
+ if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
+ goto efault;
+ }
+ rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
+ rnew.rlim_max = tswap64(target_rnew->rlim_max);
+ unlock_user_struct(target_rnew, arg3, 0);
+ rnewp = &rnew;
+ }
+
+ ret = get_errno(sys_prlimit64(arg1, arg2, rnewp, arg4 ? &rold : 0));
+ if (!is_error(ret) && arg4) {
+ if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
+ goto efault;
+ }
+ target_rold->rlim_cur = tswap64(rold.rlim_cur);
+ target_rold->rlim_max = tswap64(rold.rlim_max);
+ unlock_user_struct(target_rold, arg4, 1);
+ }
+ break;
+ }
#endif
default:
unimplemented: