#include <fcntl.h>
#include <time.h>
#include <limits.h>
+#include <grp.h>
#include <sys/types.h>
#include <sys/ipc.h>
#include <sys/msg.h>
#include <sys/time.h>
#include <sys/stat.h>
#include <sys/mount.h>
+#include <sys/file.h>
+#include <sys/fsuid.h>
+#include <sys/personality.h>
#include <sys/prctl.h>
#include <sys/resource.h>
#include <sys/mman.h>
#include <sys/epoll.h>
#endif
#ifdef CONFIG_ATTR
-#include "qemu-xattr.h"
+#include "qemu/xattr.h"
+#endif
+#ifdef CONFIG_SENDFILE
+#include <sys/sendfile.h>
#endif
#define termios host_termios
#include <linux/fb.h>
#include <linux/vt.h>
#include <linux/dm-ioctl.h>
+#include <linux/reboot.h>
#include "linux_loop.h"
#include "cpu-uname.h"
struct host_rlimit64 *, old_limit)
#endif
-extern int personality(int);
-extern int flock(int, int);
-extern int setfsuid(int);
-extern int setfsgid(int);
-extern int setgroups(int, gid_t *);
-
/* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
-#ifdef TARGET_ARM
+#ifdef TARGET_ARM
static inline int regpairs_aligned(void *cpu_env) {
return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
}
#elif defined(TARGET_MIPS)
static inline int regpairs_aligned(void *cpu_env) { return 1; }
+#elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
+/* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
+ * of registers which translates to the same as ARM/MIPS, because we start with
+ * r3 as arg1 */
+static inline int regpairs_aligned(void *cpu_env) { return 1; }
#else
static inline int regpairs_aligned(void *cpu_env) { return 0; }
#endif
for (i = 0; i < nw; i++) {
v = 0;
for (j = 0; j < TARGET_ABI_BITS; j++) {
- v |= ((FD_ISSET(k, fds) != 0) << j);
+ v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
k++;
}
__put_user(v, &target_fds[i]);
break;
case TARGET_SOL_SOCKET:
switch (optname) {
+ case TARGET_SO_RCVTIMEO:
+ {
+ struct timeval tv;
+
+ optname = SO_RCVTIMEO;
+
+set_timeout:
+ if (optlen != sizeof(struct target_timeval)) {
+ return -TARGET_EINVAL;
+ }
+
+ if (copy_from_user_timeval(&tv, optval_addr)) {
+ return -TARGET_EFAULT;
+ }
+
+ ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
+ &tv, sizeof(tv)));
+ return ret;
+ }
+ case TARGET_SO_SNDTIMEO:
+ optname = SO_SNDTIMEO;
+ goto set_timeout;
/* Options with 'int' argument. */
case TARGET_SO_DEBUG:
optname = SO_DEBUG;
case TARGET_SO_RCVLOWAT:
optname = SO_RCVLOWAT;
break;
- case TARGET_SO_RCVTIMEO:
- optname = SO_RCVTIMEO;
- break;
- case TARGET_SO_SNDTIMEO:
- optname = SO_SNDTIMEO;
- break;
break;
default:
goto unimplemented;
return ret;
}
-/* FIXME
- * lock_iovec()/unlock_iovec() have a return code of 0 for success where
- * other lock functions have a return code of 0 for failure.
- */
-static abi_long lock_iovec(int type, struct iovec *vec, abi_ulong target_addr,
- int count, int copy)
+static struct iovec *lock_iovec(int type, abi_ulong target_addr,
+ int count, int copy)
{
struct target_iovec *target_vec;
- abi_ulong base;
+ struct iovec *vec;
+ abi_ulong total_len, max_len;
int i;
- target_vec = lock_user(VERIFY_READ, target_addr, count * sizeof(struct target_iovec), 1);
- if (!target_vec)
- return -TARGET_EFAULT;
- for(i = 0;i < count; i++) {
- base = tswapal(target_vec[i].iov_base);
- vec[i].iov_len = tswapal(target_vec[i].iov_len);
- if (vec[i].iov_len != 0) {
- vec[i].iov_base = lock_user(type, base, vec[i].iov_len, copy);
- /* Don't check lock_user return value. We must call writev even
- if a element has invalid base address. */
+ if (count == 0) {
+ errno = 0;
+ return NULL;
+ }
+ if (count < 0 || count > IOV_MAX) {
+ errno = EINVAL;
+ return NULL;
+ }
+
+ vec = calloc(count, sizeof(struct iovec));
+ if (vec == NULL) {
+ errno = ENOMEM;
+ return NULL;
+ }
+
+ target_vec = lock_user(VERIFY_READ, target_addr,
+ count * sizeof(struct target_iovec), 1);
+ if (target_vec == NULL) {
+ errno = EFAULT;
+ goto fail2;
+ }
+
+ /* ??? If host page size > target page size, this will result in a
+ value larger than what we can actually support. */
+ max_len = 0x7fffffff & TARGET_PAGE_MASK;
+ total_len = 0;
+
+ for (i = 0; i < count; i++) {
+ abi_ulong base = tswapal(target_vec[i].iov_base);
+ abi_long len = tswapal(target_vec[i].iov_len);
+
+ if (len < 0) {
+ errno = EINVAL;
+ goto fail;
+ } else if (len == 0) {
+ /* Zero length pointer is ignored. */
+ vec[i].iov_base = 0;
} else {
- /* zero length pointer is ignored */
- vec[i].iov_base = NULL;
+ vec[i].iov_base = lock_user(type, base, len, copy);
+ if (!vec[i].iov_base) {
+ errno = EFAULT;
+ goto fail;
+ }
+ if (len > max_len - total_len) {
+ len = max_len - total_len;
+ }
}
+ vec[i].iov_len = len;
+ total_len += len;
}
- unlock_user (target_vec, target_addr, 0);
- return 0;
+
+ unlock_user(target_vec, target_addr, 0);
+ return vec;
+
+ fail:
+ free(vec);
+ fail2:
+ unlock_user(target_vec, target_addr, 0);
+ return NULL;
}
-static abi_long unlock_iovec(struct iovec *vec, abi_ulong target_addr,
- int count, int copy)
+static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
+ int count, int copy)
{
struct target_iovec *target_vec;
- abi_ulong base;
int i;
- target_vec = lock_user(VERIFY_READ, target_addr, count * sizeof(struct target_iovec), 1);
- if (!target_vec)
- return -TARGET_EFAULT;
- for(i = 0;i < count; i++) {
- if (target_vec[i].iov_base) {
- base = tswapal(target_vec[i].iov_base);
+ target_vec = lock_user(VERIFY_READ, target_addr,
+ count * sizeof(struct target_iovec), 1);
+ if (target_vec) {
+ for (i = 0; i < count; i++) {
+ abi_ulong base = tswapal(target_vec[i].iov_base);
+ abi_long len = tswapal(target_vec[i].iov_base);
+ if (len < 0) {
+ break;
+ }
unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
}
+ unlock_user(target_vec, target_addr, 0);
}
- unlock_user (target_vec, target_addr, 0);
- return 0;
+ free(vec);
}
/* do_socket() Must return target values and target errnos. */
ret = target_to_host_sockaddr(msg.msg_name, tswapal(msgp->msg_name),
msg.msg_namelen);
if (ret) {
- unlock_user_struct(msgp, target_msg, send ? 0 : 1);
- return ret;
+ goto out2;
}
} else {
msg.msg_name = NULL;
msg.msg_flags = tswap32(msgp->msg_flags);
count = tswapal(msgp->msg_iovlen);
- vec = alloca(count * sizeof(struct iovec));
target_vec = tswapal(msgp->msg_iov);
- lock_iovec(send ? VERIFY_READ : VERIFY_WRITE, vec, target_vec, count, send);
+ vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
+ target_vec, count, send);
+ if (vec == NULL) {
+ ret = -host_to_target_errno(errno);
+ goto out2;
+ }
msg.msg_iovlen = count;
msg.msg_iov = vec;
out:
unlock_iovec(vec, target_vec, count, !send);
+out2:
unlock_user_struct(msgp, target_msg, send ? 0 : 1);
return ret;
}
-/* do_accept() Must return target values and target errnos. */
-static abi_long do_accept(int fd, abi_ulong target_addr,
- abi_ulong target_addrlen_addr)
+/* If we don't have a system accept4() then just call accept.
+ * The callsites to do_accept4() will ensure that they don't
+ * pass a non-zero flags argument in this config.
+ */
+#ifndef CONFIG_ACCEPT4
+static inline int accept4(int sockfd, struct sockaddr *addr,
+ socklen_t *addrlen, int flags)
+{
+ assert(flags == 0);
+ return accept(sockfd, addr, addrlen);
+}
+#endif
+
+/* do_accept4() Must return target values and target errnos. */
+static abi_long do_accept4(int fd, abi_ulong target_addr,
+ abi_ulong target_addrlen_addr, int flags)
{
socklen_t addrlen;
void *addr;
abi_long ret;
- if (target_addr == 0)
- return get_errno(accept(fd, NULL, NULL));
+ if (target_addr == 0) {
+ return get_errno(accept4(fd, NULL, NULL, flags));
+ }
/* linux returns EINVAL if addrlen pointer is invalid */
if (get_user_u32(addrlen, target_addrlen_addr))
addr = alloca(addrlen);
- ret = get_errno(accept(fd, addr, &addrlen));
+ ret = get_errno(accept4(fd, addr, &addrlen, flags));
if (!is_error(ret)) {
host_to_target_sockaddr(target_addr, addr, addrlen);
if (put_user_u32(addrlen, target_addrlen_addr))
|| get_user_ual(target_addrlen, vptr + 2 * n))
return -TARGET_EFAULT;
- ret = do_accept(sockfd, target_addr, target_addrlen);
+ ret = do_accept4(sockfd, target_addr, target_addrlen, 0);
}
break;
case SOCKOP_getsockname:
if (target_to_host_sembuf(sops, ptr, nsops))
return -TARGET_EFAULT;
- return semop(semid, sops, nsops);
+ return get_errno(semop(semid, sops, nsops));
}
struct target_msqid_ds
return -TARGET_EFAULT;
host_mb = g_malloc(msgsz+sizeof(long));
- ret = get_errno(msgrcv(msqid, host_mb, msgsz, tswapal(msgtyp), msgflg));
+ ret = get_errno(msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
if (ret > 0) {
abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
break;
}
- ret = do_msgrcv(first, tmp->msgp, second, tmp->msgtyp, third);
+ ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
unlock_user_struct(tmp, ptr, 0);
break;
{
new_thread_info *info = arg;
CPUArchState *env;
+ CPUState *cpu;
TaskState *ts;
env = info->env;
+ cpu = ENV_GET_CPU(env);
thread_env = env;
ts = (TaskState *)thread_env->opaque;
info->tid = gettid();
- env->host_tid = info->tid;
+ cpu->host_tid = info->tid;
task_settid(ts);
if (info->child_tidptr)
put_user_u32(info->tid, info->child_tidptr);
return -TARGET_EINVAL;
}
+#define TRANSTBL_CONVERT(a) { -1, TARGET_##a, -1, a }
+static const bitmask_transtbl flock_tbl[] = {
+ TRANSTBL_CONVERT(F_RDLCK),
+ TRANSTBL_CONVERT(F_WRLCK),
+ TRANSTBL_CONVERT(F_UNLCK),
+ TRANSTBL_CONVERT(F_EXLCK),
+ TRANSTBL_CONVERT(F_SHLCK),
+ { 0, 0, 0, 0 }
+};
+
static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
{
struct flock fl;
case TARGET_F_GETLK:
if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
return -TARGET_EFAULT;
- fl.l_type = tswap16(target_fl->l_type);
+ fl.l_type =
+ target_to_host_bitmask(tswap16(target_fl->l_type), flock_tbl);
fl.l_whence = tswap16(target_fl->l_whence);
fl.l_start = tswapal(target_fl->l_start);
fl.l_len = tswapal(target_fl->l_len);
if (ret == 0) {
if (!lock_user_struct(VERIFY_WRITE, target_fl, arg, 0))
return -TARGET_EFAULT;
- target_fl->l_type = tswap16(fl.l_type);
+ target_fl->l_type =
+ host_to_target_bitmask(tswap16(fl.l_type), flock_tbl);
target_fl->l_whence = tswap16(fl.l_whence);
target_fl->l_start = tswapal(fl.l_start);
target_fl->l_len = tswapal(fl.l_len);
case TARGET_F_SETLKW:
if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
return -TARGET_EFAULT;
- fl.l_type = tswap16(target_fl->l_type);
+ fl.l_type =
+ target_to_host_bitmask(tswap16(target_fl->l_type), flock_tbl);
fl.l_whence = tswap16(target_fl->l_whence);
fl.l_start = tswapal(target_fl->l_start);
fl.l_len = tswapal(target_fl->l_len);
case TARGET_F_GETLK64:
if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
return -TARGET_EFAULT;
- fl64.l_type = tswap16(target_fl64->l_type) >> 1;
+ fl64.l_type =
+ target_to_host_bitmask(tswap16(target_fl64->l_type), flock_tbl) >> 1;
fl64.l_whence = tswap16(target_fl64->l_whence);
fl64.l_start = tswap64(target_fl64->l_start);
fl64.l_len = tswap64(target_fl64->l_len);
if (ret == 0) {
if (!lock_user_struct(VERIFY_WRITE, target_fl64, arg, 0))
return -TARGET_EFAULT;
- target_fl64->l_type = tswap16(fl64.l_type) >> 1;
+ target_fl64->l_type =
+ host_to_target_bitmask(tswap16(fl64.l_type), flock_tbl) >> 1;
target_fl64->l_whence = tswap16(fl64.l_whence);
target_fl64->l_start = tswap64(fl64.l_start);
target_fl64->l_len = tswap64(fl64.l_len);
case TARGET_F_SETLKW64:
if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
return -TARGET_EFAULT;
- fl64.l_type = tswap16(target_fl64->l_type) >> 1;
+ fl64.l_type =
+ target_to_host_bitmask(tswap16(target_fl64->l_type), flock_tbl) >> 1;
fl64.l_whence = tswap16(target_fl64->l_whence);
fl64.l_start = tswap64(target_fl64->l_start);
fl64.l_len = tswap64(target_fl64->l_len);
#endif
switch (base_op) {
case FUTEX_WAIT:
+ case FUTEX_WAIT_BITSET:
if (timeout) {
pts = &ts;
target_to_host_timespec(pts, timeout);
pts = NULL;
}
return get_errno(sys_futex(g2h(uaddr), op, tswap32(val),
- pts, NULL, 0));
+ pts, NULL, val3));
case FUTEX_WAKE:
return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0));
case FUTEX_FD:
/* Map host to target signal numbers for the wait family of syscalls.
Assume all other status bits are the same. */
-static int host_to_target_waitstatus(int status)
+int host_to_target_waitstatus(int status)
{
if (WIFSIGNALED(status)) {
return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
}
if (h2g_valid(min) && h2g_valid(max)) {
dprintf(fd, TARGET_ABI_FMT_lx "-" TARGET_ABI_FMT_lx
- " %c%c%c%c %08" PRIx64 " %02x:%02x %d%s%s\n",
+ " %c%c%c%c %08" PRIx64 " %02x:%02x %d %s%s\n",
h2g(min), h2g(max), flag_r, flag_w,
flag_x, flag_p, offset, dev_maj, dev_min, inode,
- path[0] ? " " : "", path);
+ path[0] ? " " : "", path);
}
}
NULL, NULL, 0);
}
thread_env = NULL;
- object_delete(OBJECT(ENV_GET_CPU(cpu_env)));
+ object_unref(OBJECT(ENV_GET_CPU(cpu_env)));
g_free(ts);
pthread_exit(NULL);
}
ret = get_errno(settimeofday(&tv, NULL));
}
break;
-#if defined(TARGET_NR_select) && !defined(TARGET_S390X) && !defined(TARGET_S390)
+#if defined(TARGET_NR_select)
case TARGET_NR_select:
+#if defined(TARGET_S390X) || defined(TARGET_ALPHA)
+ ret = do_select(arg1, arg2, arg3, arg4, arg5);
+#else
{
struct target_sel_arg_struct *sel;
abi_ulong inp, outp, exp, tvp;
unlock_user_struct(sel, arg1, 0);
ret = do_select(nsel, inp, outp, exp, tvp);
}
+#endif
break;
#endif
#ifdef TARGET_NR_pselect6
break;
#endif
case TARGET_NR_reboot:
- if (!(p = lock_user_string(arg4)))
- goto efault;
- ret = reboot(arg1, arg2, arg3, p);
- unlock_user(p, arg4, 0);
+ if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
+ /* arg4 must be ignored in all other cases */
+ p = lock_user_string(arg4);
+ if (!p) {
+ goto efault;
+ }
+ ret = get_errno(reboot(arg1, arg2, arg3, p));
+ unlock_user(p, arg4, 0);
+ } else {
+ ret = get_errno(reboot(arg1, arg2, arg3, NULL));
+ }
break;
#ifdef TARGET_NR_readdir
case TARGET_NR_readdir:
#endif
#ifdef TARGET_NR_accept
case TARGET_NR_accept:
- ret = do_accept(arg1, arg2, arg3);
+ ret = do_accept4(arg1, arg2, arg3, 0);
+ break;
+#endif
+#ifdef TARGET_NR_accept4
+ case TARGET_NR_accept4:
+#ifdef CONFIG_ACCEPT4
+ ret = do_accept4(arg1, arg2, arg3, arg4);
+#else
+ goto unimplemented;
+#endif
break;
#endif
#ifdef TARGET_NR_bind
#endif
#ifdef TARGET_NR_semop
case TARGET_NR_semop:
- ret = get_errno(do_semop(arg1, arg2, arg3));
+ ret = do_semop(arg1, arg2, arg3);
break;
#endif
#ifdef TARGET_NR_semctl
ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
#elif defined(TARGET_CRIS)
ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg4, arg5));
+#elif defined(TARGET_MICROBLAZE)
+ ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
#elif defined(TARGET_S390X)
ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
#else
}
break;
#endif /* TARGET_NR_getdents64 */
-#if defined(TARGET_NR__newselect) || defined(TARGET_S390X)
-#ifdef TARGET_S390X
- case TARGET_NR_select:
-#else
+#if defined(TARGET_NR__newselect)
case TARGET_NR__newselect:
-#endif
ret = do_select(arg1, arg2, arg3, arg4, arg5);
break;
#endif
break;
case TARGET_NR_readv:
{
- int count = arg3;
- struct iovec *vec;
-
- vec = alloca(count * sizeof(struct iovec));
- if (lock_iovec(VERIFY_WRITE, vec, arg2, count, 0) < 0)
- goto efault;
- ret = get_errno(readv(arg1, vec, count));
- unlock_iovec(vec, arg2, count, 1);
+ struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
+ if (vec != NULL) {
+ ret = get_errno(readv(arg1, vec, arg3));
+ unlock_iovec(vec, arg2, arg3, 1);
+ } else {
+ ret = -host_to_target_errno(errno);
+ }
}
break;
case TARGET_NR_writev:
{
- int count = arg3;
- struct iovec *vec;
-
- vec = alloca(count * sizeof(struct iovec));
- if (lock_iovec(VERIFY_READ, vec, arg2, count, 1) < 0)
- goto efault;
- ret = get_errno(writev(arg1, vec, count));
- unlock_iovec(vec, arg2, count, 0);
+ struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
+ if (vec != NULL) {
+ ret = get_errno(writev(arg1, vec, arg3));
+ unlock_iovec(vec, arg2, arg3, 0);
+ } else {
+ ret = -host_to_target_errno(errno);
+ }
}
break;
case TARGET_NR_getsid:
goto unimplemented;
#endif
#endif
-#ifdef TARGET_NR_pread
- case TARGET_NR_pread:
- if (regpairs_aligned(cpu_env))
- arg4 = arg5;
- if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
- goto efault;
- ret = get_errno(pread(arg1, p, arg3, arg4));
- unlock_user(p, arg2, ret);
- break;
- case TARGET_NR_pwrite:
- if (regpairs_aligned(cpu_env))
- arg4 = arg5;
- if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
- goto efault;
- ret = get_errno(pwrite(arg1, p, arg3, arg4));
- unlock_user(p, arg2, 0);
- break;
-#endif
#ifdef TARGET_NR_pread64
case TARGET_NR_pread64:
+ if (regpairs_aligned(cpu_env)) {
+ arg4 = arg5;
+ arg5 = arg6;
+ }
if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
goto efault;
ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
unlock_user(p, arg2, ret);
break;
case TARGET_NR_pwrite64:
+ if (regpairs_aligned(cpu_env)) {
+ arg4 = arg5;
+ arg5 = arg6;
+ }
if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
goto efault;
ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
#else
goto unimplemented;
#endif
+
+#ifdef CONFIG_SENDFILE
case TARGET_NR_sendfile:
+ {
+ off_t *offp = NULL;
+ off_t off;
+ if (arg3) {
+ ret = get_user_sal(off, arg3);
+ if (is_error(ret)) {
+ break;
+ }
+ offp = &off;
+ }
+ ret = get_errno(sendfile(arg1, arg2, offp, arg4));
+ if (!is_error(ret) && arg3) {
+ abi_long ret2 = put_user_sal(off, arg3);
+ if (is_error(ret2)) {
+ ret = ret2;
+ }
+ }
+ break;
+ }
+#ifdef TARGET_NR_sendfile64
+ case TARGET_NR_sendfile64:
+ {
+ off_t *offp = NULL;
+ off_t off;
+ if (arg3) {
+ ret = get_user_s64(off, arg3);
+ if (is_error(ret)) {
+ break;
+ }
+ offp = &off;
+ }
+ ret = get_errno(sendfile(arg1, arg2, offp, arg4));
+ if (!is_error(ret) && arg3) {
+ abi_long ret2 = put_user_s64(off, arg3);
+ if (is_error(ret2)) {
+ ret = ret2;
+ }
+ }
+ break;
+ }
+#endif
+#else
+ case TARGET_NR_sendfile:
+#ifdef TARGET_NR_sendfile64
+ case TARGET_NR_sendfile64:
+#endif
goto unimplemented;
+#endif
+
#ifdef TARGET_NR_getpmsg
case TARGET_NR_getpmsg:
goto unimplemented;
if (gidsetsize == 0)
break;
if (!is_error(ret)) {
- target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 2, 0);
+ target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
if (!target_grouplist)
goto efault;
for(i = 0;i < ret; i++)
target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
- unlock_user(target_grouplist, arg2, gidsetsize * 2);
+ unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
}
}
break;
{
int gidsetsize = arg1;
target_id *target_grouplist;
- gid_t *grouplist;
+ gid_t *grouplist = NULL;
int i;
-
- grouplist = alloca(gidsetsize * sizeof(gid_t));
- target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 2, 1);
- if (!target_grouplist) {
- ret = -TARGET_EFAULT;
- goto fail;
+ if (gidsetsize) {
+ grouplist = alloca(gidsetsize * sizeof(gid_t));
+ target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
+ if (!target_grouplist) {
+ ret = -TARGET_EFAULT;
+ goto fail;
+ }
+ for (i = 0; i < gidsetsize; i++) {
+ grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
+ }
+ unlock_user(target_grouplist, arg2, 0);
}
- for(i = 0;i < gidsetsize; i++)
- grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
- unlock_user(target_grouplist, arg2, 0);
ret = get_errno(setgroups(gidsetsize, grouplist));
}
break;
#ifdef TARGET_NR_madvise
case TARGET_NR_madvise:
/* A straight passthrough may not be safe because qemu sometimes
- turns private flie-backed mappings into anonymous mappings.
+ turns private file-backed mappings into anonymous mappings.
This will break MADV_DONTNEED.
This is a hint, so ignoring and returning success is ok. */
ret = get_errno(0);
#ifdef TARGET_NR_set_robust_list
case TARGET_NR_set_robust_list:
- goto unimplemented_nowarn;
+ case TARGET_NR_get_robust_list:
+ /* The ABI for supporting robust futexes has userspace pass
+ * the kernel a pointer to a linked list which is updated by
+ * userspace after the syscall; the list is walked by the kernel
+ * when the thread exits. Since the linked list in QEMU guest
+ * memory isn't a valid linked list for the host and we have
+ * no way to reliably intercept the thread-death event, we can't
+ * support these. Silently return ENOSYS so that guest userspace
+ * falls back to a non-robust futex implementation (which should
+ * be OK except in the corner case of the guest crashing while
+ * holding a mutex that is shared with another process via
+ * shared memory).
+ */
+ goto unimplemented_nowarn;
#endif
#if defined(TARGET_NR_utimensat) && defined(__NR_utimensat)
#ifdef TARGET_NR_vmsplice
case TARGET_NR_vmsplice:
{
- int count = arg3;
- struct iovec *vec;
-
- vec = alloca(count * sizeof(struct iovec));
- if (lock_iovec(VERIFY_READ, vec, arg2, count, 1) < 0)
- goto efault;
- ret = get_errno(vmsplice(arg1, vec, count, arg4));
- unlock_iovec(vec, arg2, count, 0);
+ struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
+ if (vec != NULL) {
+ ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
+ unlock_iovec(vec, arg2, arg3, 0);
+ } else {
+ ret = -host_to_target_errno(errno);
+ }
}
break;
#endif
#endif
#if defined(TARGET_NR_eventfd2)
case TARGET_NR_eventfd2:
- ret = get_errno(eventfd(arg1, arg2));
+ {
+ int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
+ if (arg2 & TARGET_O_NONBLOCK) {
+ host_flags |= O_NONBLOCK;
+ }
+ if (arg2 & TARGET_O_CLOEXEC) {
+ host_flags |= O_CLOEXEC;
+ }
+ ret = get_errno(eventfd(arg1, host_flags));
break;
+ }
#endif
#endif /* CONFIG_EVENTFD */
#if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
}
break;
}
+#endif
+#ifdef TARGET_NR_gethostname
+ case TARGET_NR_gethostname:
+ {
+ char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
+ if (name) {
+ ret = get_errno(gethostname(name, arg2));
+ unlock_user(name, arg1, arg2);
+ } else {
+ ret = -TARGET_EFAULT;
+ }
+ break;
+ }
#endif
default:
unimplemented: