#include <sys/swap.h>
#include <signal.h>
#include <sched.h>
+#ifdef __ia64__
+int __clone2(int (*fn)(void *), void *child_stack_base,
+ size_t stack_size, int flags, void *arg, ...);
+#endif
#include <sys/socket.h>
#include <sys/un.h>
#include <sys/uio.h>
#ifdef CONFIG_EVENTFD
#include <sys/eventfd.h>
#endif
+#ifdef CONFIG_EPOLL
+#include <sys/epoll.h>
+#endif
#define termios host_termios
#define winsize host_winsize
#include <linux/kd.h>
#include <linux/mtio.h>
#include <linux/fs.h>
+#if defined(CONFIG_FIEMAP)
+#include <linux/fiemap.h>
+#endif
#include <linux/fb.h>
#include <linux/vt.h>
#include "linux_loop.h"
_syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
#endif
_syscall2(int, sys_getpriority, int, which, int, who);
-#if defined(TARGET_NR__llseek) && !defined (__x86_64__)
+#if defined(TARGET_NR__llseek) && defined(__NR_llseek)
_syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
loff_t *, res, uint, wh);
#endif
const struct timespec *,timeout,int *,uaddr2,int,val3)
#endif
#endif
+#define __NR_sys_sched_getaffinity __NR_sched_getaffinity
+_syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
+ unsigned long *, user_mask_ptr);
+#define __NR_sys_sched_setaffinity __NR_sched_setaffinity
+_syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
+ unsigned long *, user_mask_ptr);
static bitmask_transtbl fcntl_flags_tbl[] = {
{ TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
return (inotify_rm_watch(fd, wd));
}
#endif
+#ifdef CONFIG_INOTIFY1
+#if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
+static int sys_inotify_init1(int flags)
+{
+ return (inotify_init1(flags));
+}
+#endif
+#endif
#else
/* Userspace can usually survive runtime without inotify */
#undef TARGET_NR_inotify_init
+#undef TARGET_NR_inotify_init1
#undef TARGET_NR_inotify_add_watch
#undef TARGET_NR_inotify_rm_watch
#endif /* CONFIG_INOTIFY */
+#if defined(TARGET_NR_ppoll)
+#ifndef __NR_ppoll
+# define __NR_ppoll -1
+#endif
+#define __NR_sys_ppoll __NR_ppoll
+_syscall5(int, sys_ppoll, struct pollfd *, fds, nfds_t, nfds,
+ struct timespec *, timeout, const __sigset_t *, sigmask,
+ size_t, sigsetsize)
+#endif
extern int personality(int);
extern int flock(int, int);
PROT_READ|PROT_WRITE,
MAP_ANON|MAP_FIXED|MAP_PRIVATE, 0, 0));
- if (!is_error(mapped_addr))
+#if defined(TARGET_ALPHA)
+ /* We (partially) emulate OSF/1 on Alpha, which requires we
+ return a proper errno, not an unchanged brk value. */
+ if (is_error(mapped_addr)) {
+ return -TARGET_ENOMEM;
+ }
+#endif
+
+ if (!is_error(mapped_addr)) {
target_brk = new_brk;
-
+ }
return target_brk;
}
return 0;
}
+static inline rlim_t target_to_host_rlim(target_ulong target_rlim)
+{
+ if (target_rlim == TARGET_RLIM_INFINITY)
+ return RLIM_INFINITY;
+ else
+ return tswapl(target_rlim);
+}
+
+static inline target_ulong host_to_target_rlim(rlim_t rlim)
+{
+ if (rlim == RLIM_INFINITY || rlim != (target_long)rlim)
+ return TARGET_RLIM_INFINITY;
+ else
+ return tswapl(rlim);
+}
+
static inline abi_long copy_from_user_timeval(struct timeval *tv,
abi_ulong target_tv_addr)
{
#endif
}
-static abi_long do_pipe(void *cpu_env, abi_ulong pipedes, int flags)
+static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
+ int flags, int is_pipe2)
{
int host_pipe[2];
abi_long ret;
if (is_error(ret))
return get_errno(ret);
-#if defined(TARGET_MIPS)
- ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
- ret = host_pipe[0];
+
+ /* Several targets have special calling conventions for the original
+ pipe syscall, but didn't replicate this into the pipe2 syscall. */
+ if (!is_pipe2) {
+#if defined(TARGET_ALPHA)
+ ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
+ return host_pipe[0];
+#elif defined(TARGET_MIPS)
+ ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
+ return host_pipe[0];
#elif defined(TARGET_SH4)
- ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
- ret = host_pipe[0];
-#else
+ ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
+ return host_pipe[0];
+#endif
+ }
+
if (put_user_s32(host_pipe[0], pipedes)
|| put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
return -TARGET_EFAULT;
-#endif
return get_errno(ret);
}
switch(level) {
case TARGET_SOL_SOCKET:
- level = SOL_SOCKET;
- switch (optname) {
- case TARGET_SO_LINGER:
- case TARGET_SO_RCVTIMEO:
- case TARGET_SO_SNDTIMEO:
- case TARGET_SO_PEERCRED:
- case TARGET_SO_PEERNAME:
- /* These don't just return a single integer */
- goto unimplemented;
+ level = SOL_SOCKET;
+ switch (optname) {
+ /* These don't just return a single integer */
+ case TARGET_SO_LINGER:
+ case TARGET_SO_RCVTIMEO:
+ case TARGET_SO_SNDTIMEO:
+ case TARGET_SO_PEERCRED:
+ case TARGET_SO_PEERNAME:
+ goto unimplemented;
+ /* Options with 'int' argument. */
+ case TARGET_SO_DEBUG:
+ optname = SO_DEBUG;
+ goto int_case;
+ case TARGET_SO_REUSEADDR:
+ optname = SO_REUSEADDR;
+ goto int_case;
+ case TARGET_SO_TYPE:
+ optname = SO_TYPE;
+ goto int_case;
+ case TARGET_SO_ERROR:
+ optname = SO_ERROR;
+ goto int_case;
+ case TARGET_SO_DONTROUTE:
+ optname = SO_DONTROUTE;
+ goto int_case;
+ case TARGET_SO_BROADCAST:
+ optname = SO_BROADCAST;
+ goto int_case;
+ case TARGET_SO_SNDBUF:
+ optname = SO_SNDBUF;
+ goto int_case;
+ case TARGET_SO_RCVBUF:
+ optname = SO_RCVBUF;
+ goto int_case;
+ case TARGET_SO_KEEPALIVE:
+ optname = SO_KEEPALIVE;
+ goto int_case;
+ case TARGET_SO_OOBINLINE:
+ optname = SO_OOBINLINE;
+ goto int_case;
+ case TARGET_SO_NO_CHECK:
+ optname = SO_NO_CHECK;
+ goto int_case;
+ case TARGET_SO_PRIORITY:
+ optname = SO_PRIORITY;
+ goto int_case;
+#ifdef SO_BSDCOMPAT
+ case TARGET_SO_BSDCOMPAT:
+ optname = SO_BSDCOMPAT;
+ goto int_case;
+#endif
+ case TARGET_SO_PASSCRED:
+ optname = SO_PASSCRED;
+ goto int_case;
+ case TARGET_SO_TIMESTAMP:
+ optname = SO_TIMESTAMP;
+ goto int_case;
+ case TARGET_SO_RCVLOWAT:
+ optname = SO_RCVLOWAT;
+ goto int_case;
default:
goto int_case;
}
return -TARGET_EFAULT;
if (len < 0)
return -TARGET_EINVAL;
- lv = sizeof(int);
+ lv = sizeof(lv);
ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
if (ret < 0)
return ret;
} else {
if (put_user_u8(val, optval_addr))
return -TARGET_EFAULT;
- }
+ }
if (put_user_u32(len, optlen))
return -TARGET_EFAULT;
break;
return -TARGET_EFAULT;
if (len < 0)
return -TARGET_EINVAL;
- lv = sizeof(int);
+ lv = sizeof(lv);
ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
if (ret < 0)
return ret;
void *addr;
abi_long ret;
- if (addrlen < 0)
+ if ((int)addrlen < 0) {
return -TARGET_EINVAL;
+ }
addr = alloca(addrlen+1);
void *addr;
abi_long ret;
- if (addrlen < 0)
+ if ((int)addrlen < 0) {
return -TARGET_EINVAL;
+ }
addr = alloca(addrlen);
if (get_user_u32(addrlen, target_addrlen_addr))
return -TARGET_EINVAL;
- if (addrlen < 0)
+ if ((int)addrlen < 0) {
return -TARGET_EINVAL;
+ }
if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
return -TARGET_EINVAL;
if (get_user_u32(addrlen, target_addrlen_addr))
return -TARGET_EFAULT;
- if (addrlen < 0)
+ if ((int)addrlen < 0) {
return -TARGET_EINVAL;
+ }
if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
return -TARGET_EFAULT;
if (get_user_u32(addrlen, target_addrlen_addr))
return -TARGET_EFAULT;
- if (addrlen < 0)
+ if ((int)addrlen < 0) {
return -TARGET_EINVAL;
+ }
if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
return -TARGET_EFAULT;
void *host_msg;
abi_long ret;
- if (addrlen < 0)
+ if ((int)addrlen < 0) {
return -TARGET_EINVAL;
+ }
host_msg = lock_user(VERIFY_READ, msg, len, 1);
if (!host_msg)
ret = -TARGET_EFAULT;
goto fail;
}
- if (addrlen < 0) {
+ if ((int)addrlen < 0) {
ret = -TARGET_EINVAL;
goto fail;
}
for (i = 0; i < N_SHM_REGIONS; ++i) {
if (shm_regions[i].start == shmaddr) {
shm_regions[i].start = 0;
- page_set_flags(shmaddr, shm_regions[i].size, 0);
+ page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
break;
}
}
#undef STRUCT
#undef STRUCT_SPECIAL
-typedef struct IOCTLEntry {
+typedef struct IOCTLEntry IOCTLEntry;
+
+typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
+ int fd, abi_long cmd, abi_long arg);
+
+struct IOCTLEntry {
unsigned int target_cmd;
unsigned int host_cmd;
const char *name;
int access;
+ do_ioctl_fn *do_ioctl;
const argtype arg_type[5];
-} IOCTLEntry;
+};
#define IOC_R 0x0001
#define IOC_W 0x0002
#define MAX_STRUCT_SIZE 4096
+#ifdef CONFIG_FIEMAP
+/* So fiemap access checks don't overflow on 32 bit systems.
+ * This is very slightly smaller than the limit imposed by
+ * the underlying kernel.
+ */
+#define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
+ / sizeof(struct fiemap_extent))
+
+static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
+ int fd, abi_long cmd, abi_long arg)
+{
+ /* The parameter for this ioctl is a struct fiemap followed
+ * by an array of struct fiemap_extent whose size is set
+ * in fiemap->fm_extent_count. The array is filled in by the
+ * ioctl.
+ */
+ int target_size_in, target_size_out;
+ struct fiemap *fm;
+ const argtype *arg_type = ie->arg_type;
+ const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
+ void *argptr, *p;
+ abi_long ret;
+ int i, extent_size = thunk_type_size(extent_arg_type, 0);
+ uint32_t outbufsz;
+ int free_fm = 0;
+
+ assert(arg_type[0] == TYPE_PTR);
+ assert(ie->access == IOC_RW);
+ arg_type++;
+ target_size_in = thunk_type_size(arg_type, 0);
+ argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
+ if (!argptr) {
+ return -TARGET_EFAULT;
+ }
+ thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
+ unlock_user(argptr, arg, 0);
+ fm = (struct fiemap *)buf_temp;
+ if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
+ return -TARGET_EINVAL;
+ }
+
+ outbufsz = sizeof (*fm) +
+ (sizeof(struct fiemap_extent) * fm->fm_extent_count);
+
+ if (outbufsz > MAX_STRUCT_SIZE) {
+ /* We can't fit all the extents into the fixed size buffer.
+ * Allocate one that is large enough and use it instead.
+ */
+ fm = malloc(outbufsz);
+ if (!fm) {
+ return -TARGET_ENOMEM;
+ }
+ memcpy(fm, buf_temp, sizeof(struct fiemap));
+ free_fm = 1;
+ }
+ ret = get_errno(ioctl(fd, ie->host_cmd, fm));
+ if (!is_error(ret)) {
+ target_size_out = target_size_in;
+ /* An extent_count of 0 means we were only counting the extents
+ * so there are no structs to copy
+ */
+ if (fm->fm_extent_count != 0) {
+ target_size_out += fm->fm_mapped_extents * extent_size;
+ }
+ argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
+ if (!argptr) {
+ ret = -TARGET_EFAULT;
+ } else {
+ /* Convert the struct fiemap */
+ thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
+ if (fm->fm_extent_count != 0) {
+ p = argptr + target_size_in;
+ /* ...and then all the struct fiemap_extents */
+ for (i = 0; i < fm->fm_mapped_extents; i++) {
+ thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
+ THUNK_TARGET);
+ p += extent_size;
+ }
+ }
+ unlock_user(argptr, arg, target_size_out);
+ }
+ }
+ if (free_fm) {
+ free(fm);
+ }
+ return ret;
+}
+#endif
+
static IOCTLEntry ioctl_entries[] = {
#define IOCTL(cmd, access, ...) \
- { TARGET_ ## cmd, cmd, #cmd, access, { __VA_ARGS__ } },
+ { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
+#define IOCTL_SPECIAL(cmd, access, dofn, ...) \
+ { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
#include "ioctls.h"
{ 0, 0, },
};
#if defined(DEBUG)
gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd, ie->name);
#endif
+ if (ie->do_ioctl) {
+ return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
+ }
+
switch(arg_type[0]) {
case TYPE_NULL:
/* no argument */
{
int ret;
TaskState *ts;
- uint8_t *new_stack;
CPUState *new_env;
#if defined(CONFIG_USE_NPTL)
unsigned int nptl_flags;
sigset_t sigmask;
+#else
+ uint8_t *new_stack;
#endif
/* Emulate vfork() with fork() */
new_thread_info info;
pthread_attr_t attr;
#endif
- ts = qemu_mallocz(sizeof(TaskState) + NEW_STACK_SIZE);
+ ts = qemu_mallocz(sizeof(TaskState));
init_task_state(ts);
- new_stack = ts->stack;
/* we create a new CPU instance. */
new_env = cpu_copy(env);
#if defined(TARGET_I386) || defined(TARGET_SPARC) || defined(TARGET_PPC)
info.parent_tidptr = parent_tidptr;
ret = pthread_attr_init(&attr);
- ret = pthread_attr_setstack(&attr, new_stack, NEW_STACK_SIZE);
+ ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
+ ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
/* It is not safe to deliver signals until the child has finished
initializing, so temporarily block all signals. */
sigfillset(&sigmask);
if (flags & CLONE_NPTL_FLAGS2)
return -EINVAL;
/* This is probably going to die very quickly, but do it anyway. */
+ new_stack = qemu_mallocz (NEW_STACK_SIZE);
#ifdef __ia64__
- ret = __clone2(clone_func, new_stack + NEW_STACK_SIZE, flags, new_env);
+ ret = __clone2(clone_func, new_stack, NEW_STACK_SIZE, flags, new_env);
#else
ret = clone(clone_func, new_stack + NEW_STACK_SIZE, flags, new_env);
#endif
} else
#endif
{
-#if (TARGET_LONG_BITS == 64) && (!defined(TARGET_ALPHA))
+#if TARGET_ABI_BITS == 64 && !defined(TARGET_ALPHA)
struct target_stat *target_st;
#else
struct target_stat64 *target_st;
sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
NULL, NULL, 0);
}
- /* TODO: Free CPU state. */
+ thread_env = NULL;
+ qemu_free(cpu_env);
+ qemu_free(ts);
pthread_exit(NULL);
}
#endif
case TARGET_NR_lseek:
ret = get_errno(lseek(arg1, arg2, arg3));
break;
-#ifdef TARGET_NR_getxpid
+#if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
+ /* Alpha specific */
case TARGET_NR_getxpid:
-#else
- case TARGET_NR_getpid:
+ ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
+ ret = get_errno(getpid());
+ break;
#endif
+#ifdef TARGET_NR_getpid
+ case TARGET_NR_getpid:
ret = get_errno(getpid());
break;
+#endif
case TARGET_NR_mount:
{
/* need to look at the data field */
ret = get_errno(dup(arg1));
break;
case TARGET_NR_pipe:
- ret = do_pipe(cpu_env, arg1, 0);
+ ret = do_pipe(cpu_env, arg1, 0, 0);
break;
#ifdef TARGET_NR_pipe2
case TARGET_NR_pipe2:
- ret = do_pipe(cpu_env, arg1, arg2);
+ ret = do_pipe(cpu_env, arg1, arg2, 1);
break;
#endif
case TARGET_NR_times:
#ifdef TARGET_NR_sigaction
case TARGET_NR_sigaction:
{
-#if !defined(TARGET_MIPS)
+#if defined(TARGET_ALPHA)
+ struct target_sigaction act, oact, *pact = 0;
struct target_old_sigaction *old_act;
- struct target_sigaction act, oact, *pact;
if (arg2) {
if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
goto efault;
act._sa_handler = old_act->_sa_handler;
target_siginitset(&act.sa_mask, old_act->sa_mask);
act.sa_flags = old_act->sa_flags;
- act.sa_restorer = old_act->sa_restorer;
+ act.sa_restorer = 0;
unlock_user_struct(old_act, arg2, 0);
pact = &act;
- } else {
- pact = NULL;
}
ret = get_errno(do_sigaction(arg1, pact, &oact));
if (!is_error(ret) && arg3) {
old_act->_sa_handler = oact._sa_handler;
old_act->sa_mask = oact.sa_mask.sig[0];
old_act->sa_flags = oact.sa_flags;
- old_act->sa_restorer = oact.sa_restorer;
unlock_user_struct(old_act, arg3, 1);
}
-#else
+#elif defined(TARGET_MIPS)
struct target_sigaction act, oact, *pact, *old_act;
if (arg2) {
old_act->sa_mask.sig[3] = 0;
unlock_user_struct(old_act, arg3, 1);
}
+#else
+ struct target_old_sigaction *old_act;
+ struct target_sigaction act, oact, *pact;
+ if (arg2) {
+ if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
+ goto efault;
+ act._sa_handler = old_act->_sa_handler;
+ target_siginitset(&act.sa_mask, old_act->sa_mask);
+ act.sa_flags = old_act->sa_flags;
+ act.sa_restorer = old_act->sa_restorer;
+ unlock_user_struct(old_act, arg2, 0);
+ pact = &act;
+ } else {
+ pact = NULL;
+ }
+ ret = get_errno(do_sigaction(arg1, pact, &oact));
+ if (!is_error(ret) && arg3) {
+ if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
+ goto efault;
+ old_act->_sa_handler = oact._sa_handler;
+ old_act->sa_mask = oact.sa_mask.sig[0];
+ old_act->sa_flags = oact.sa_flags;
+ old_act->sa_restorer = oact.sa_restorer;
+ unlock_user_struct(old_act, arg3, 1);
+ }
#endif
}
break;
#endif
case TARGET_NR_rt_sigaction:
{
+#if defined(TARGET_ALPHA)
+ struct target_sigaction act, oact, *pact = 0;
+ struct target_rt_sigaction *rt_act;
+ /* ??? arg4 == sizeof(sigset_t). */
+ if (arg2) {
+ if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
+ goto efault;
+ act._sa_handler = rt_act->_sa_handler;
+ act.sa_mask = rt_act->sa_mask;
+ act.sa_flags = rt_act->sa_flags;
+ act.sa_restorer = arg5;
+ unlock_user_struct(rt_act, arg2, 0);
+ pact = &act;
+ }
+ ret = get_errno(do_sigaction(arg1, pact, &oact));
+ if (!is_error(ret) && arg3) {
+ if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
+ goto efault;
+ rt_act->_sa_handler = oact._sa_handler;
+ rt_act->sa_mask = oact.sa_mask;
+ rt_act->sa_flags = oact.sa_flags;
+ unlock_user_struct(rt_act, arg3, 1);
+ }
+#else
struct target_sigaction *act;
struct target_sigaction *oact;
unlock_user_struct(act, arg2, 0);
if (oact)
unlock_user_struct(oact, arg3, 1);
+#endif
}
break;
#ifdef TARGET_NR_sgetmask /* not on alpha */
#ifdef TARGET_NR_sigprocmask
case TARGET_NR_sigprocmask:
{
- int how = arg1;
+#if defined(TARGET_ALPHA)
+ sigset_t set, oldset;
+ abi_ulong mask;
+ int how;
+
+ switch (arg1) {
+ case TARGET_SIG_BLOCK:
+ how = SIG_BLOCK;
+ break;
+ case TARGET_SIG_UNBLOCK:
+ how = SIG_UNBLOCK;
+ break;
+ case TARGET_SIG_SETMASK:
+ how = SIG_SETMASK;
+ break;
+ default:
+ ret = -TARGET_EINVAL;
+ goto fail;
+ }
+ mask = arg2;
+ target_to_host_old_sigset(&set, &mask);
+
+ ret = get_errno(sigprocmask(how, &set, &oldset));
+
+ if (!is_error(ret)) {
+ host_to_target_old_sigset(&mask, &oldset);
+ ret = mask;
+ ((CPUAlphaState *)cpu_env)->[IR_V0] = 0; /* force no error */
+ }
+#else
sigset_t set, oldset, *set_ptr;
+ int how;
if (arg2) {
- switch(how) {
+ switch (arg1) {
case TARGET_SIG_BLOCK:
how = SIG_BLOCK;
break;
how = 0;
set_ptr = NULL;
}
- ret = get_errno(sigprocmask(arg1, set_ptr, &oldset));
+ ret = get_errno(sigprocmask(how, set_ptr, &oldset));
if (!is_error(ret) && arg3) {
if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
goto efault;
host_to_target_old_sigset(p, &oldset);
unlock_user(p, arg3, sizeof(target_sigset_t));
}
+#endif
}
break;
#endif
case TARGET_NR_sigsuspend:
{
sigset_t set;
+#if defined(TARGET_ALPHA)
+ abi_ulong mask = arg1;
+ target_to_host_old_sigset(&set, &mask);
+#else
if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
goto efault;
target_to_host_old_sigset(&set, p);
unlock_user(p, arg1, 0);
+#endif
ret = get_errno(sigsuspend(&set));
}
break;
break;
case TARGET_NR_setrlimit:
{
- /* XXX: convert resource ? */
int resource = arg1;
struct target_rlimit *target_rlim;
struct rlimit rlim;
if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
goto efault;
- rlim.rlim_cur = tswapl(target_rlim->rlim_cur);
- rlim.rlim_max = tswapl(target_rlim->rlim_max);
+ rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
+ rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
unlock_user_struct(target_rlim, arg2, 0);
ret = get_errno(setrlimit(resource, &rlim));
}
break;
case TARGET_NR_getrlimit:
{
- /* XXX: convert resource ? */
int resource = arg1;
struct target_rlimit *target_rlim;
struct rlimit rlim;
if (!is_error(ret)) {
if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
goto efault;
- target_rlim->rlim_cur = tswapl(rlim.rlim_cur);
- target_rlim->rlim_max = tswapl(rlim.rlim_max);
+ target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
+ target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
unlock_user_struct(target_rlim, arg2, 1);
}
}
ret = do_select(nsel, inp, outp, exp, tvp);
}
break;
+#endif
+#ifdef TARGET_NR_pselect6
+ case TARGET_NR_pselect6:
+ goto unimplemented_nowarn;
#endif
case TARGET_NR_symlink:
{
ret = get_errno(target_munmap(arg1, arg2));
break;
case TARGET_NR_mprotect:
+ {
+ TaskState *ts = ((CPUState *)cpu_env)->opaque;
+ /* Special hack to detect libc making the stack executable. */
+ if ((arg3 & PROT_GROWSDOWN)
+ && arg1 >= ts->info->stack_limit
+ && arg1 <= ts->info->start_stack) {
+ arg3 &= ~PROT_GROWSDOWN;
+ arg2 = arg2 + arg1 - ts->info->stack_limit;
+ arg1 = ts->info->stack_limit;
+ }
+ }
ret = get_errno(target_mprotect(arg1, arg2, arg3));
break;
#ifdef TARGET_NR_mremap
ret = get_errno(fsync(arg1));
break;
case TARGET_NR_clone:
-#if defined(TARGET_SH4)
+#if defined(TARGET_SH4) || defined(TARGET_ALPHA)
ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
#elif defined(TARGET_CRIS)
ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg4, arg5));
#ifdef TARGET_NR__llseek /* Not on alpha */
case TARGET_NR__llseek:
{
-#if defined (__x86_64__)
- ret = get_errno(lseek(arg1, ((uint64_t )arg2 << 32) | arg3, arg5));
- if (put_user_s64(ret, arg4))
- goto efault;
-#else
int64_t res;
+#if !defined(__NR_llseek)
+ res = lseek(arg1, ((uint64_t)arg2 << 32) | arg3, arg5);
+ if (res == -1) {
+ ret = get_errno(res);
+ } else {
+ ret = 0;
+ }
+#else
ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
- if (put_user_s64(res, arg4))
- goto efault;
#endif
+ if ((ret == 0) && put_user_s64(res, arg4)) {
+ goto efault;
+ }
}
break;
#endif
ret = do_select(arg1, arg2, arg3, arg4, arg5);
break;
#endif
-#ifdef TARGET_NR_poll
+#if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
+# ifdef TARGET_NR_poll
case TARGET_NR_poll:
+# endif
+# ifdef TARGET_NR_ppoll
+ case TARGET_NR_ppoll:
+# endif
{
struct target_pollfd *target_pfd;
unsigned int nfds = arg2;
target_pfd = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_pollfd) * nfds, 1);
if (!target_pfd)
goto efault;
+
pfd = alloca(sizeof(struct pollfd) * nfds);
for(i = 0; i < nfds; i++) {
pfd[i].fd = tswap32(target_pfd[i].fd);
pfd[i].events = tswap16(target_pfd[i].events);
}
- ret = get_errno(poll(pfd, nfds, timeout));
+
+# ifdef TARGET_NR_ppoll
+ if (num == TARGET_NR_ppoll) {
+ struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
+ target_sigset_t *target_set;
+ sigset_t _set, *set = &_set;
+
+ if (arg3) {
+ if (target_to_host_timespec(timeout_ts, arg3)) {
+ unlock_user(target_pfd, arg1, 0);
+ goto efault;
+ }
+ } else {
+ timeout_ts = NULL;
+ }
+
+ if (arg4) {
+ target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
+ if (!target_set) {
+ unlock_user(target_pfd, arg1, 0);
+ goto efault;
+ }
+ target_to_host_sigset(set, target_set);
+ } else {
+ set = NULL;
+ }
+
+ ret = get_errno(sys_ppoll(pfd, nfds, timeout_ts, set, _NSIG/8));
+
+ if (!is_error(ret) && arg3) {
+ host_to_target_timespec(arg3, timeout_ts);
+ }
+ if (arg4) {
+ unlock_user(target_set, arg4, 0);
+ }
+ } else
+# endif
+ ret = get_errno(poll(pfd, nfds, timeout));
+
if (!is_error(ret)) {
for(i = 0; i < nfds; i++) {
target_pfd[i].revents = tswap16(pfd[i].revents);
}
- ret += nfds * (sizeof(struct target_pollfd)
- - sizeof(struct pollfd));
}
- unlock_user(target_pfd, arg1, ret);
+ unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
}
break;
#endif
return value. */
ret = -TARGET_ENOTDIR;
break;
+ case TARGET_NR_sched_getaffinity:
+ {
+ unsigned int mask_size;
+ unsigned long *mask;
+
+ /*
+ * sched_getaffinity needs multiples of ulong, so need to take
+ * care of mismatches between target ulong and host ulong sizes.
+ */
+ if (arg2 & (sizeof(abi_ulong) - 1)) {
+ ret = -TARGET_EINVAL;
+ break;
+ }
+ mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
+
+ mask = alloca(mask_size);
+ ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
+
+ if (!is_error(ret)) {
+ if (arg2 > ret) {
+ /* Zero out any extra space kernel didn't fill */
+ unsigned long zero = arg2 - ret;
+ p = alloca(zero);
+ memset(p, 0, zero);
+ if (copy_to_user(arg3 + zero, p, zero)) {
+ goto efault;
+ }
+ arg2 = ret;
+ }
+ if (copy_to_user(arg3, mask, arg2)) {
+ goto efault;
+ }
+ ret = arg2;
+ }
+ }
+ break;
+ case TARGET_NR_sched_setaffinity:
+ {
+ unsigned int mask_size;
+ unsigned long *mask;
+
+ /*
+ * sched_setaffinity needs multiples of ulong, so need to take
+ * care of mismatches between target ulong and host ulong sizes.
+ */
+ if (arg2 & (sizeof(abi_ulong) - 1)) {
+ ret = -TARGET_EINVAL;
+ break;
+ }
+ mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
+
+ mask = alloca(mask_size);
+ if (!lock_user_struct(VERIFY_READ, p, arg3, 1)) {
+ goto efault;
+ }
+ memcpy(mask, p, arg2);
+ unlock_user_struct(p, arg2, 0);
+
+ ret = get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
+ }
+ break;
case TARGET_NR_sched_setparam:
{
struct sched_param *target_schp;
struct target_rlimit *target_rlim;
if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
goto efault;
- target_rlim->rlim_cur = tswapl(rlim.rlim_cur);
- target_rlim->rlim_max = tswapl(rlim.rlim_max);
+ target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
+ target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
unlock_user_struct(target_rlim, arg2, 1);
}
break;
ret = get_errno(sys_inotify_init());
break;
#endif
+#ifdef CONFIG_INOTIFY1
+#if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
+ case TARGET_NR_inotify_init1:
+ ret = get_errno(sys_inotify_init1(arg1));
+ break;
+#endif
+#endif
#if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
case TARGET_NR_inotify_add_watch:
p = lock_user_string(arg2);
case TARGET_NR_fallocate:
ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
break;
+#endif
+#if defined(CONFIG_SYNC_FILE_RANGE)
+#if defined(TARGET_NR_sync_file_range)
+ case TARGET_NR_sync_file_range:
+#if TARGET_ABI_BITS == 32
+ ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
+ target_offset64(arg4, arg5), arg6));
+#else
+ ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
+#endif
+ break;
+#endif
+#if defined(TARGET_NR_sync_file_range2)
+ case TARGET_NR_sync_file_range2:
+ /* This is like sync_file_range but the arguments are reordered */
+#if TARGET_ABI_BITS == 32
+ ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
+ target_offset64(arg5, arg6), arg2));
+#else
+ ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
+#endif
+ break;
+#endif
+#endif
+#if defined(CONFIG_EPOLL)
+#if defined(TARGET_NR_epoll_create)
+ case TARGET_NR_epoll_create:
+ ret = get_errno(epoll_create(arg1));
+ break;
+#endif
+#if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
+ case TARGET_NR_epoll_create1:
+ ret = get_errno(epoll_create1(arg1));
+ break;
+#endif
+#if defined(TARGET_NR_epoll_ctl)
+ case TARGET_NR_epoll_ctl:
+ {
+ struct epoll_event ep;
+ struct epoll_event *epp = 0;
+ if (arg4) {
+ struct target_epoll_event *target_ep;
+ if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
+ goto efault;
+ }
+ ep.events = tswap32(target_ep->events);
+ /* The epoll_data_t union is just opaque data to the kernel,
+ * so we transfer all 64 bits across and need not worry what
+ * actual data type it is.
+ */
+ ep.data.u64 = tswap64(target_ep->data.u64);
+ unlock_user_struct(target_ep, arg4, 0);
+ epp = &ep;
+ }
+ ret = get_errno(epoll_ctl(arg1, arg2, arg3, epp));
+ break;
+ }
+#endif
+
+#if defined(TARGET_NR_epoll_pwait) && defined(CONFIG_EPOLL_PWAIT)
+#define IMPLEMENT_EPOLL_PWAIT
+#endif
+#if defined(TARGET_NR_epoll_wait) || defined(IMPLEMENT_EPOLL_PWAIT)
+#if defined(TARGET_NR_epoll_wait)
+ case TARGET_NR_epoll_wait:
+#endif
+#if defined(IMPLEMENT_EPOLL_PWAIT)
+ case TARGET_NR_epoll_pwait:
+#endif
+ {
+ struct target_epoll_event *target_ep;
+ struct epoll_event *ep;
+ int epfd = arg1;
+ int maxevents = arg3;
+ int timeout = arg4;
+
+ target_ep = lock_user(VERIFY_WRITE, arg2,
+ maxevents * sizeof(struct target_epoll_event), 1);
+ if (!target_ep) {
+ goto efault;
+ }
+
+ ep = alloca(maxevents * sizeof(struct epoll_event));
+
+ switch (num) {
+#if defined(IMPLEMENT_EPOLL_PWAIT)
+ case TARGET_NR_epoll_pwait:
+ {
+ target_sigset_t *target_set;
+ sigset_t _set, *set = &_set;
+
+ if (arg5) {
+ target_set = lock_user(VERIFY_READ, arg5,
+ sizeof(target_sigset_t), 1);
+ if (!target_set) {
+ unlock_user(target_ep, arg2, 0);
+ goto efault;
+ }
+ target_to_host_sigset(set, target_set);
+ unlock_user(target_set, arg5, 0);
+ } else {
+ set = NULL;
+ }
+
+ ret = get_errno(epoll_pwait(epfd, ep, maxevents, timeout, set));
+ break;
+ }
+#endif
+#if defined(TARGET_NR_epoll_wait)
+ case TARGET_NR_epoll_wait:
+ ret = get_errno(epoll_wait(epfd, ep, maxevents, timeout));
+ break;
+#endif
+ default:
+ ret = -TARGET_ENOSYS;
+ }
+ if (!is_error(ret)) {
+ int i;
+ for (i = 0; i < ret; i++) {
+ target_ep[i].events = tswap32(ep[i].events);
+ target_ep[i].data.u64 = tswap64(ep[i].data.u64);
+ }
+ }
+ unlock_user(target_ep, arg2, ret * sizeof(struct target_epoll_event));
+ break;
+ }
+#endif
#endif
default:
unimplemented: