#include <sys/swap.h>
#include <linux/capability.h>
#include <sched.h>
+#include <sys/timex.h>
#ifdef __ia64__
int __clone2(int (*fn)(void *), void *child_stack_base,
size_t stack_size, int flags, void *arg, ...);
#include <sys/socket.h>
#include <sys/un.h>
#include <sys/uio.h>
-#include <sys/poll.h>
+#include <poll.h>
#include <sys/times.h>
#include <sys/shm.h>
#include <sys/sem.h>
#include <sys/statfs.h>
+#include <time.h>
#include <utime.h>
#include <sys/sysinfo.h>
#include <sys/signalfd.h>
#include <netinet/tcp.h>
#include <linux/wireless.h>
#include <linux/icmp.h>
+#include <linux/icmpv6.h>
+#include <linux/errqueue.h>
#include "qemu-common.h"
#ifdef CONFIG_TIMERFD
#include <sys/timerfd.h>
#include "qemu.h"
-#define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
- CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
+#ifndef CLONE_IO
+#define CLONE_IO 0x80000000 /* Clone io context */
+#endif
+
+/* We can't directly call the host clone syscall, because this will
+ * badly confuse libc (breaking mutexes, for example). So we must
+ * divide clone flags into:
+ * * flag combinations that look like pthread_create()
+ * * flag combinations that look like fork()
+ * * flags we can implement within QEMU itself
+ * * flags we can't support and will return an error for
+ */
+/* For thread creation, all these flags must be present; for
+ * fork, none must be present.
+ */
+#define CLONE_THREAD_FLAGS \
+ (CLONE_VM | CLONE_FS | CLONE_FILES | \
+ CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
+
+/* These flags are ignored:
+ * CLONE_DETACHED is now ignored by the kernel;
+ * CLONE_IO is just an optimisation hint to the I/O scheduler
+ */
+#define CLONE_IGNORED_FLAGS \
+ (CLONE_DETACHED | CLONE_IO)
+
+/* Flags for fork which we can implement within QEMU itself */
+#define CLONE_OPTIONAL_FORK_FLAGS \
+ (CLONE_SETTLS | CLONE_PARENT_SETTID | \
+ CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
+
+/* Flags for thread creation which we can implement within QEMU itself */
+#define CLONE_OPTIONAL_THREAD_FLAGS \
+ (CLONE_SETTLS | CLONE_PARENT_SETTID | \
+ CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
+
+#define CLONE_INVALID_FORK_FLAGS \
+ (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
+
+#define CLONE_INVALID_THREAD_FLAGS \
+ (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS | \
+ CLONE_IGNORED_FLAGS))
+
+/* CLONE_VFORK is special cased early in do_fork(). The other flag bits
+ * have almost all been allocated. We cannot support any of
+ * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
+ * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
+ * The checks against the invalid thread masks above will catch these.
+ * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
+ */
//#define DEBUG
/* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
#define __NR_sys_getdents64 __NR_getdents64
#define __NR_sys_getpriority __NR_getpriority
#define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
+#define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
#define __NR_sys_syslog __NR_syslog
#define __NR_sys_futex __NR_futex
#define __NR_sys_inotify_init __NR_inotify_init
_syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
loff_t *, res, uint, wh);
#endif
-_syscall3(int,sys_rt_sigqueueinfo,int,pid,int,sig,siginfo_t *,uinfo)
+_syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
+_syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
+ siginfo_t *, uinfo)
_syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
#ifdef __NR_exit_group
_syscall1(int,exit_group,int,error_code)
_syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
#endif
+#if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
+_syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
+ unsigned long, idx1, unsigned long, idx2)
+#endif
+
static bitmask_transtbl fcntl_flags_tbl[] = {
{ TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
{ TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, },
{ 0, 0, 0, 0 }
};
+enum {
+ QEMU_IFLA_BR_UNSPEC,
+ QEMU_IFLA_BR_FORWARD_DELAY,
+ QEMU_IFLA_BR_HELLO_TIME,
+ QEMU_IFLA_BR_MAX_AGE,
+ QEMU_IFLA_BR_AGEING_TIME,
+ QEMU_IFLA_BR_STP_STATE,
+ QEMU_IFLA_BR_PRIORITY,
+ QEMU_IFLA_BR_VLAN_FILTERING,
+ QEMU_IFLA_BR_VLAN_PROTOCOL,
+ QEMU_IFLA_BR_GROUP_FWD_MASK,
+ QEMU_IFLA_BR_ROOT_ID,
+ QEMU_IFLA_BR_BRIDGE_ID,
+ QEMU_IFLA_BR_ROOT_PORT,
+ QEMU_IFLA_BR_ROOT_PATH_COST,
+ QEMU_IFLA_BR_TOPOLOGY_CHANGE,
+ QEMU_IFLA_BR_TOPOLOGY_CHANGE_DETECTED,
+ QEMU_IFLA_BR_HELLO_TIMER,
+ QEMU_IFLA_BR_TCN_TIMER,
+ QEMU_IFLA_BR_TOPOLOGY_CHANGE_TIMER,
+ QEMU_IFLA_BR_GC_TIMER,
+ QEMU_IFLA_BR_GROUP_ADDR,
+ QEMU_IFLA_BR_FDB_FLUSH,
+ QEMU_IFLA_BR_MCAST_ROUTER,
+ QEMU_IFLA_BR_MCAST_SNOOPING,
+ QEMU_IFLA_BR_MCAST_QUERY_USE_IFADDR,
+ QEMU_IFLA_BR_MCAST_QUERIER,
+ QEMU_IFLA_BR_MCAST_HASH_ELASTICITY,
+ QEMU_IFLA_BR_MCAST_HASH_MAX,
+ QEMU_IFLA_BR_MCAST_LAST_MEMBER_CNT,
+ QEMU_IFLA_BR_MCAST_STARTUP_QUERY_CNT,
+ QEMU_IFLA_BR_MCAST_LAST_MEMBER_INTVL,
+ QEMU_IFLA_BR_MCAST_MEMBERSHIP_INTVL,
+ QEMU_IFLA_BR_MCAST_QUERIER_INTVL,
+ QEMU_IFLA_BR_MCAST_QUERY_INTVL,
+ QEMU_IFLA_BR_MCAST_QUERY_RESPONSE_INTVL,
+ QEMU_IFLA_BR_MCAST_STARTUP_QUERY_INTVL,
+ QEMU_IFLA_BR_NF_CALL_IPTABLES,
+ QEMU_IFLA_BR_NF_CALL_IP6TABLES,
+ QEMU_IFLA_BR_NF_CALL_ARPTABLES,
+ QEMU_IFLA_BR_VLAN_DEFAULT_PVID,
+ QEMU_IFLA_BR_PAD,
+ QEMU_IFLA_BR_VLAN_STATS_ENABLED,
+ QEMU_IFLA_BR_MCAST_STATS_ENABLED,
+ QEMU___IFLA_BR_MAX,
+};
+
+enum {
+ QEMU_IFLA_UNSPEC,
+ QEMU_IFLA_ADDRESS,
+ QEMU_IFLA_BROADCAST,
+ QEMU_IFLA_IFNAME,
+ QEMU_IFLA_MTU,
+ QEMU_IFLA_LINK,
+ QEMU_IFLA_QDISC,
+ QEMU_IFLA_STATS,
+ QEMU_IFLA_COST,
+ QEMU_IFLA_PRIORITY,
+ QEMU_IFLA_MASTER,
+ QEMU_IFLA_WIRELESS,
+ QEMU_IFLA_PROTINFO,
+ QEMU_IFLA_TXQLEN,
+ QEMU_IFLA_MAP,
+ QEMU_IFLA_WEIGHT,
+ QEMU_IFLA_OPERSTATE,
+ QEMU_IFLA_LINKMODE,
+ QEMU_IFLA_LINKINFO,
+ QEMU_IFLA_NET_NS_PID,
+ QEMU_IFLA_IFALIAS,
+ QEMU_IFLA_NUM_VF,
+ QEMU_IFLA_VFINFO_LIST,
+ QEMU_IFLA_STATS64,
+ QEMU_IFLA_VF_PORTS,
+ QEMU_IFLA_PORT_SELF,
+ QEMU_IFLA_AF_SPEC,
+ QEMU_IFLA_GROUP,
+ QEMU_IFLA_NET_NS_FD,
+ QEMU_IFLA_EXT_MASK,
+ QEMU_IFLA_PROMISCUITY,
+ QEMU_IFLA_NUM_TX_QUEUES,
+ QEMU_IFLA_NUM_RX_QUEUES,
+ QEMU_IFLA_CARRIER,
+ QEMU_IFLA_PHYS_PORT_ID,
+ QEMU_IFLA_CARRIER_CHANGES,
+ QEMU_IFLA_PHYS_SWITCH_ID,
+ QEMU_IFLA_LINK_NETNSID,
+ QEMU_IFLA_PHYS_PORT_NAME,
+ QEMU_IFLA_PROTO_DOWN,
+ QEMU_IFLA_GSO_MAX_SEGS,
+ QEMU_IFLA_GSO_MAX_SIZE,
+ QEMU_IFLA_PAD,
+ QEMU_IFLA_XDP,
+ QEMU___IFLA_MAX
+};
+
+enum {
+ QEMU_IFLA_BRPORT_UNSPEC,
+ QEMU_IFLA_BRPORT_STATE,
+ QEMU_IFLA_BRPORT_PRIORITY,
+ QEMU_IFLA_BRPORT_COST,
+ QEMU_IFLA_BRPORT_MODE,
+ QEMU_IFLA_BRPORT_GUARD,
+ QEMU_IFLA_BRPORT_PROTECT,
+ QEMU_IFLA_BRPORT_FAST_LEAVE,
+ QEMU_IFLA_BRPORT_LEARNING,
+ QEMU_IFLA_BRPORT_UNICAST_FLOOD,
+ QEMU_IFLA_BRPORT_PROXYARP,
+ QEMU_IFLA_BRPORT_LEARNING_SYNC,
+ QEMU_IFLA_BRPORT_PROXYARP_WIFI,
+ QEMU_IFLA_BRPORT_ROOT_ID,
+ QEMU_IFLA_BRPORT_BRIDGE_ID,
+ QEMU_IFLA_BRPORT_DESIGNATED_PORT,
+ QEMU_IFLA_BRPORT_DESIGNATED_COST,
+ QEMU_IFLA_BRPORT_ID,
+ QEMU_IFLA_BRPORT_NO,
+ QEMU_IFLA_BRPORT_TOPOLOGY_CHANGE_ACK,
+ QEMU_IFLA_BRPORT_CONFIG_PENDING,
+ QEMU_IFLA_BRPORT_MESSAGE_AGE_TIMER,
+ QEMU_IFLA_BRPORT_FORWARD_DELAY_TIMER,
+ QEMU_IFLA_BRPORT_HOLD_TIMER,
+ QEMU_IFLA_BRPORT_FLUSH,
+ QEMU_IFLA_BRPORT_MULTICAST_ROUTER,
+ QEMU_IFLA_BRPORT_PAD,
+ QEMU___IFLA_BRPORT_MAX
+};
+
+enum {
+ QEMU_IFLA_INFO_UNSPEC,
+ QEMU_IFLA_INFO_KIND,
+ QEMU_IFLA_INFO_DATA,
+ QEMU_IFLA_INFO_XSTATS,
+ QEMU_IFLA_INFO_SLAVE_KIND,
+ QEMU_IFLA_INFO_SLAVE_DATA,
+ QEMU___IFLA_INFO_MAX,
+};
+
+enum {
+ QEMU_IFLA_INET_UNSPEC,
+ QEMU_IFLA_INET_CONF,
+ QEMU___IFLA_INET_MAX,
+};
+
+enum {
+ QEMU_IFLA_INET6_UNSPEC,
+ QEMU_IFLA_INET6_FLAGS,
+ QEMU_IFLA_INET6_CONF,
+ QEMU_IFLA_INET6_STATS,
+ QEMU_IFLA_INET6_MCAST,
+ QEMU_IFLA_INET6_CACHEINFO,
+ QEMU_IFLA_INET6_ICMP6STATS,
+ QEMU_IFLA_INET6_TOKEN,
+ QEMU_IFLA_INET6_ADDR_GEN_MODE,
+ QEMU___IFLA_INET6_MAX
+};
+
typedef abi_long (*TargetFdDataFunc)(void *, size_t);
typedef abi_long (*TargetFdAddrFunc)(void *, abi_ulong, socklen_t);
typedef struct TargetFdTrans {
}
#ifdef TARGET_NR_utimensat
-#ifdef CONFIG_UTIMENSAT
-static int sys_utimensat(int dirfd, const char *pathname,
- const struct timespec times[2], int flags)
-{
- if (pathname == NULL)
- return futimens(dirfd, times);
- else
- return utimensat(dirfd, pathname, times, flags);
-}
-#elif defined(__NR_utimensat)
+#if defined(__NR_utimensat)
#define __NR_sys_utimensat __NR_utimensat
_syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
const struct timespec *,tsp,int,flags)
static inline int regpairs_aligned(void *cpu_env) {
return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
}
-#elif defined(TARGET_MIPS)
+#elif defined(TARGET_MIPS) && (TARGET_ABI_BITS == 32)
static inline int regpairs_aligned(void *cpu_env) { return 1; }
#elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
/* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
[ENAVAIL] = TARGET_ENAVAIL,
[EISNAM] = TARGET_EISNAM,
[EREMOTEIO] = TARGET_EREMOTEIO,
+ [EDQUOT] = TARGET_EDQUOT,
[ESHUTDOWN] = TARGET_ESHUTDOWN,
[ETOOMANYREFS] = TARGET_ETOOMANYREFS,
[ETIMEDOUT] = TARGET_ETIMEDOUT,
#ifdef ENOTRECOVERABLE
[ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE,
#endif
+#ifdef ENOMSG
+ [ENOMSG] = TARGET_ENOMSG,
+#endif
+#ifdef ERKFILL
+ [ERFKILL] = TARGET_ERFKILL,
+#endif
+#ifdef EHWPOISON
+ [EHWPOISON] = TARGET_EHWPOISON,
+#endif
};
static inline int host_to_target_errno(int err)
safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
+safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
+ unsigned long, pos_l, unsigned long, pos_h)
+safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
+ unsigned long, pos_l, unsigned long, pos_h)
safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
socklen_t, addrlen)
safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
return ret;
}
+
+#if defined(TARGET_WANT_OLD_SYS_SELECT)
+static abi_long do_old_select(abi_ulong arg1)
+{
+ struct target_sel_arg_struct *sel;
+ abi_ulong inp, outp, exp, tvp;
+ long nsel;
+
+ if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
+ return -TARGET_EFAULT;
+ }
+
+ nsel = tswapal(sel->n);
+ inp = tswapal(sel->inp);
+ outp = tswapal(sel->outp);
+ exp = tswapal(sel->exp);
+ tvp = tswapal(sel->tvp);
+
+ unlock_user_struct(sel, arg1, 0);
+
+ return do_select(nsel, inp, outp, exp, tvp);
+}
+#endif
#endif
static abi_long do_pipe2(int host_pipe[], int flags)
struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
+ } else if (addr->sa_family == AF_INET6 &&
+ len >= sizeof(struct target_sockaddr_in6)) {
+ struct target_sockaddr_in6 *target_in6 =
+ (struct target_sockaddr_in6 *)target_saddr;
+ target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
}
unlock_user(target_saddr, target_addr, len);
}
break;
+ case SOL_IP:
+ switch (cmsg->cmsg_type) {
+ case IP_TTL:
+ {
+ uint32_t *v = (uint32_t *)data;
+ uint32_t *t_int = (uint32_t *)target_data;
+
+ __put_user(*v, t_int);
+ break;
+ }
+ case IP_RECVERR:
+ {
+ struct errhdr_t {
+ struct sock_extended_err ee;
+ struct sockaddr_in offender;
+ };
+ struct errhdr_t *errh = (struct errhdr_t *)data;
+ struct errhdr_t *target_errh =
+ (struct errhdr_t *)target_data;
+
+ __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
+ __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
+ __put_user(errh->ee.ee_type, &target_errh->ee.ee_type);
+ __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
+ __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
+ __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
+ __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
+ host_to_target_sockaddr((unsigned long) &target_errh->offender,
+ (void *) &errh->offender, sizeof(errh->offender));
+ break;
+ }
+ default:
+ goto unimplemented;
+ }
+ break;
+
+ case SOL_IPV6:
+ switch (cmsg->cmsg_type) {
+ case IPV6_HOPLIMIT:
+ {
+ uint32_t *v = (uint32_t *)data;
+ uint32_t *t_int = (uint32_t *)target_data;
+
+ __put_user(*v, t_int);
+ break;
+ }
+ case IPV6_RECVERR:
+ {
+ struct errhdr6_t {
+ struct sock_extended_err ee;
+ struct sockaddr_in6 offender;
+ };
+ struct errhdr6_t *errh = (struct errhdr6_t *)data;
+ struct errhdr6_t *target_errh =
+ (struct errhdr6_t *)target_data;
+
+ __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
+ __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
+ __put_user(errh->ee.ee_type, &target_errh->ee.ee_type);
+ __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
+ __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
+ __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
+ __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
+ host_to_target_sockaddr((unsigned long) &target_errh->offender,
+ (void *) &errh->offender, sizeof(errh->offender));
+ break;
+ }
+ default:
+ goto unimplemented;
+ }
+ break;
+
default:
unimplemented:
gemu_log("Unsupported ancillary data: %d/%d\n",
uint64_t *u64;
switch (nlattr->nla_type) {
-#ifdef IFLA_BR_FDB_FLUSH
/* no data */
- case IFLA_BR_FDB_FLUSH:
+ case QEMU_IFLA_BR_FDB_FLUSH:
break;
-#endif
-#ifdef IFLA_BR_GROUP_ADDR
/* binary */
- case IFLA_BR_GROUP_ADDR:
+ case QEMU_IFLA_BR_GROUP_ADDR:
break;
-#endif
/* uint8_t */
- case IFLA_BR_VLAN_FILTERING:
-#ifdef IFLA_BR_TOPOLOGY_CHANGE
- case IFLA_BR_TOPOLOGY_CHANGE:
-#endif
-#ifdef IFLA_BR_TOPOLOGY_CHANGE_DETECTED
- case IFLA_BR_TOPOLOGY_CHANGE_DETECTED:
-#endif
-#ifdef IFLA_BR_MCAST_ROUTER
- case IFLA_BR_MCAST_ROUTER:
-#endif
-#ifdef IFLA_BR_MCAST_SNOOPING
- case IFLA_BR_MCAST_SNOOPING:
-#endif
-#ifdef IFLA_BR_MCAST_QUERY_USE_IFADDR
- case IFLA_BR_MCAST_QUERY_USE_IFADDR:
-#endif
-#ifdef IFLA_BR_MCAST_QUERIER
- case IFLA_BR_MCAST_QUERIER:
-#endif
-#ifdef IFLA_BR_NF_CALL_IPTABLES
- case IFLA_BR_NF_CALL_IPTABLES:
-#endif
-#ifdef IFLA_BR_NF_CALL_IP6TABLES
- case IFLA_BR_NF_CALL_IP6TABLES:
-#endif
-#ifdef IFLA_BR_NF_CALL_ARPTABLES
- case IFLA_BR_NF_CALL_ARPTABLES:
-#endif
+ case QEMU_IFLA_BR_VLAN_FILTERING:
+ case QEMU_IFLA_BR_TOPOLOGY_CHANGE:
+ case QEMU_IFLA_BR_TOPOLOGY_CHANGE_DETECTED:
+ case QEMU_IFLA_BR_MCAST_ROUTER:
+ case QEMU_IFLA_BR_MCAST_SNOOPING:
+ case QEMU_IFLA_BR_MCAST_QUERY_USE_IFADDR:
+ case QEMU_IFLA_BR_MCAST_QUERIER:
+ case QEMU_IFLA_BR_NF_CALL_IPTABLES:
+ case QEMU_IFLA_BR_NF_CALL_IP6TABLES:
+ case QEMU_IFLA_BR_NF_CALL_ARPTABLES:
break;
/* uint16_t */
- case IFLA_BR_PRIORITY:
- case IFLA_BR_VLAN_PROTOCOL:
-#ifdef IFLA_BR_GROUP_FWD_MASK
- case IFLA_BR_GROUP_FWD_MASK:
-#endif
-#ifdef IFLA_BR_ROOT_PORT
- case IFLA_BR_ROOT_PORT:
-#endif
-#ifdef IFLA_BR_VLAN_DEFAULT_PVID
- case IFLA_BR_VLAN_DEFAULT_PVID:
-#endif
+ case QEMU_IFLA_BR_PRIORITY:
+ case QEMU_IFLA_BR_VLAN_PROTOCOL:
+ case QEMU_IFLA_BR_GROUP_FWD_MASK:
+ case QEMU_IFLA_BR_ROOT_PORT:
+ case QEMU_IFLA_BR_VLAN_DEFAULT_PVID:
u16 = NLA_DATA(nlattr);
*u16 = tswap16(*u16);
break;
/* uint32_t */
- case IFLA_BR_FORWARD_DELAY:
- case IFLA_BR_HELLO_TIME:
- case IFLA_BR_MAX_AGE:
- case IFLA_BR_AGEING_TIME:
- case IFLA_BR_STP_STATE:
-#ifdef IFLA_BR_ROOT_PATH_COST
- case IFLA_BR_ROOT_PATH_COST:
-#endif
-#ifdef IFLA_BR_MCAST_HASH_ELASTICITY
- case IFLA_BR_MCAST_HASH_ELASTICITY:
-#endif
-#ifdef IFLA_BR_MCAST_HASH_MAX
- case IFLA_BR_MCAST_HASH_MAX:
-#endif
-#ifdef IFLA_BR_MCAST_LAST_MEMBER_CNT
- case IFLA_BR_MCAST_LAST_MEMBER_CNT:
-#endif
-#ifdef IFLA_BR_MCAST_STARTUP_QUERY_CNT
- case IFLA_BR_MCAST_STARTUP_QUERY_CNT:
-#endif
+ case QEMU_IFLA_BR_FORWARD_DELAY:
+ case QEMU_IFLA_BR_HELLO_TIME:
+ case QEMU_IFLA_BR_MAX_AGE:
+ case QEMU_IFLA_BR_AGEING_TIME:
+ case QEMU_IFLA_BR_STP_STATE:
+ case QEMU_IFLA_BR_ROOT_PATH_COST:
+ case QEMU_IFLA_BR_MCAST_HASH_ELASTICITY:
+ case QEMU_IFLA_BR_MCAST_HASH_MAX:
+ case QEMU_IFLA_BR_MCAST_LAST_MEMBER_CNT:
+ case QEMU_IFLA_BR_MCAST_STARTUP_QUERY_CNT:
u32 = NLA_DATA(nlattr);
*u32 = tswap32(*u32);
break;
/* uint64_t */
-#ifdef IFLA_BR_HELLO_TIMER
- case IFLA_BR_HELLO_TIMER:
-#endif
-#ifdef IFLA_BR_TCN_TIMER
- case IFLA_BR_TCN_TIMER:
-#endif
-#ifdef IFLA_BR_GC_TIMER
- case IFLA_BR_GC_TIMER:
-#endif
-#ifdef IFLA_BR_TOPOLOGY_CHANGE_TIMER
- case IFLA_BR_TOPOLOGY_CHANGE_TIMER:
-#endif
-#ifdef IFLA_BR_MCAST_LAST_MEMBER_INTVL
- case IFLA_BR_MCAST_LAST_MEMBER_INTVL:
-#endif
-#ifdef IFLA_BR_MCAST_MEMBERSHIP_INTVL
- case IFLA_BR_MCAST_MEMBERSHIP_INTVL:
-#endif
-#ifdef IFLA_BR_MCAST_QUERIER_INTVL
- case IFLA_BR_MCAST_QUERIER_INTVL:
-#endif
-#ifdef IFLA_BR_MCAST_QUERY_INTVL
- case IFLA_BR_MCAST_QUERY_INTVL:
-#endif
-#ifdef IFLA_BR_MCAST_QUERY_RESPONSE_INTVL
- case IFLA_BR_MCAST_QUERY_RESPONSE_INTVL:
-#endif
-#ifdef IFLA_BR_MCAST_STARTUP_QUERY_INTVL
- case IFLA_BR_MCAST_STARTUP_QUERY_INTVL:
-#endif
+ case QEMU_IFLA_BR_HELLO_TIMER:
+ case QEMU_IFLA_BR_TCN_TIMER:
+ case QEMU_IFLA_BR_GC_TIMER:
+ case QEMU_IFLA_BR_TOPOLOGY_CHANGE_TIMER:
+ case QEMU_IFLA_BR_MCAST_LAST_MEMBER_INTVL:
+ case QEMU_IFLA_BR_MCAST_MEMBERSHIP_INTVL:
+ case QEMU_IFLA_BR_MCAST_QUERIER_INTVL:
+ case QEMU_IFLA_BR_MCAST_QUERY_INTVL:
+ case QEMU_IFLA_BR_MCAST_QUERY_RESPONSE_INTVL:
+ case QEMU_IFLA_BR_MCAST_STARTUP_QUERY_INTVL:
u64 = NLA_DATA(nlattr);
*u64 = tswap64(*u64);
break;
/* ifla_bridge_id: uin8_t[] */
-#ifdef IFLA_BR_ROOT_ID
- case IFLA_BR_ROOT_ID:
-#endif
-#ifdef IFLA_BR_BRIDGE_ID
- case IFLA_BR_BRIDGE_ID:
-#endif
+ case QEMU_IFLA_BR_ROOT_ID:
+ case QEMU_IFLA_BR_BRIDGE_ID:
break;
default:
- gemu_log("Unknown IFLA_BR type %d\n", nlattr->nla_type);
+ gemu_log("Unknown QEMU_IFLA_BR type %d\n", nlattr->nla_type);
break;
}
return 0;
switch (nlattr->nla_type) {
/* uint8_t */
- case IFLA_BRPORT_STATE:
- case IFLA_BRPORT_MODE:
- case IFLA_BRPORT_GUARD:
- case IFLA_BRPORT_PROTECT:
- case IFLA_BRPORT_FAST_LEAVE:
- case IFLA_BRPORT_LEARNING:
- case IFLA_BRPORT_UNICAST_FLOOD:
- case IFLA_BRPORT_PROXYARP:
- case IFLA_BRPORT_LEARNING_SYNC:
- case IFLA_BRPORT_PROXYARP_WIFI:
-#ifdef IFLA_BRPORT_TOPOLOGY_CHANGE_ACK
- case IFLA_BRPORT_TOPOLOGY_CHANGE_ACK:
-#endif
-#ifdef IFLA_BRPORT_CONFIG_PENDING
- case IFLA_BRPORT_CONFIG_PENDING:
-#endif
-#ifdef IFLA_BRPORT_MULTICAST_ROUTER
- case IFLA_BRPORT_MULTICAST_ROUTER:
-#endif
+ case QEMU_IFLA_BRPORT_STATE:
+ case QEMU_IFLA_BRPORT_MODE:
+ case QEMU_IFLA_BRPORT_GUARD:
+ case QEMU_IFLA_BRPORT_PROTECT:
+ case QEMU_IFLA_BRPORT_FAST_LEAVE:
+ case QEMU_IFLA_BRPORT_LEARNING:
+ case QEMU_IFLA_BRPORT_UNICAST_FLOOD:
+ case QEMU_IFLA_BRPORT_PROXYARP:
+ case QEMU_IFLA_BRPORT_LEARNING_SYNC:
+ case QEMU_IFLA_BRPORT_PROXYARP_WIFI:
+ case QEMU_IFLA_BRPORT_TOPOLOGY_CHANGE_ACK:
+ case QEMU_IFLA_BRPORT_CONFIG_PENDING:
+ case QEMU_IFLA_BRPORT_MULTICAST_ROUTER:
break;
/* uint16_t */
- case IFLA_BRPORT_PRIORITY:
-#ifdef IFLA_BRPORT_DESIGNATED_PORT
- case IFLA_BRPORT_DESIGNATED_PORT:
-#endif
-#ifdef IFLA_BRPORT_DESIGNATED_COST
- case IFLA_BRPORT_DESIGNATED_COST:
-#endif
-#ifdef IFLA_BRPORT_ID
- case IFLA_BRPORT_ID:
-#endif
-#ifdef IFLA_BRPORT_NO
- case IFLA_BRPORT_NO:
-#endif
+ case QEMU_IFLA_BRPORT_PRIORITY:
+ case QEMU_IFLA_BRPORT_DESIGNATED_PORT:
+ case QEMU_IFLA_BRPORT_DESIGNATED_COST:
+ case QEMU_IFLA_BRPORT_ID:
+ case QEMU_IFLA_BRPORT_NO:
u16 = NLA_DATA(nlattr);
*u16 = tswap16(*u16);
break;
/* uin32_t */
- case IFLA_BRPORT_COST:
+ case QEMU_IFLA_BRPORT_COST:
u32 = NLA_DATA(nlattr);
*u32 = tswap32(*u32);
break;
/* uint64_t */
-#ifdef IFLA_BRPORT_MESSAGE_AGE_TIMER
- case IFLA_BRPORT_MESSAGE_AGE_TIMER:
-#endif
-#ifdef IFLA_BRPORT_FORWARD_DELAY_TIMER
- case IFLA_BRPORT_FORWARD_DELAY_TIMER:
-#endif
-#ifdef IFLA_BRPORT_HOLD_TIMER
- case IFLA_BRPORT_HOLD_TIMER:
-#endif
+ case QEMU_IFLA_BRPORT_MESSAGE_AGE_TIMER:
+ case QEMU_IFLA_BRPORT_FORWARD_DELAY_TIMER:
+ case QEMU_IFLA_BRPORT_HOLD_TIMER:
u64 = NLA_DATA(nlattr);
*u64 = tswap64(*u64);
break;
/* ifla_bridge_id: uint8_t[] */
-#ifdef IFLA_BRPORT_ROOT_ID
- case IFLA_BRPORT_ROOT_ID:
-#endif
-#ifdef IFLA_BRPORT_BRIDGE_ID
- case IFLA_BRPORT_BRIDGE_ID:
-#endif
+ case QEMU_IFLA_BRPORT_ROOT_ID:
+ case QEMU_IFLA_BRPORT_BRIDGE_ID:
break;
default:
- gemu_log("Unknown IFLA_BRPORT type %d\n", nlattr->nla_type);
+ gemu_log("Unknown QEMU_IFLA_BRPORT type %d\n", nlattr->nla_type);
break;
}
return 0;
switch (nlattr->nla_type) {
/* string */
- case IFLA_INFO_KIND:
+ case QEMU_IFLA_INFO_KIND:
li_context->name = NLA_DATA(nlattr);
li_context->len = nlattr->nla_len - NLA_HDRLEN;
break;
- case IFLA_INFO_SLAVE_KIND:
+ case QEMU_IFLA_INFO_SLAVE_KIND:
li_context->slave_name = NLA_DATA(nlattr);
li_context->slave_len = nlattr->nla_len - NLA_HDRLEN;
break;
/* stats */
- case IFLA_INFO_XSTATS:
+ case QEMU_IFLA_INFO_XSTATS:
/* FIXME: only used by CAN */
break;
/* nested */
- case IFLA_INFO_DATA:
+ case QEMU_IFLA_INFO_DATA:
if (strncmp(li_context->name, "bridge",
li_context->len) == 0) {
return host_to_target_for_each_nlattr(NLA_DATA(nlattr),
NULL,
host_to_target_data_bridge_nlattr);
} else {
- gemu_log("Unknown IFLA_INFO_KIND %s\n", li_context->name);
+ gemu_log("Unknown QEMU_IFLA_INFO_KIND %s\n", li_context->name);
}
break;
- case IFLA_INFO_SLAVE_DATA:
+ case QEMU_IFLA_INFO_SLAVE_DATA:
if (strncmp(li_context->slave_name, "bridge",
li_context->slave_len) == 0) {
return host_to_target_for_each_nlattr(NLA_DATA(nlattr),
NULL,
host_to_target_slave_data_bridge_nlattr);
} else {
- gemu_log("Unknown IFLA_INFO_SLAVE_KIND %s\n",
+ gemu_log("Unknown QEMU_IFLA_INFO_SLAVE_KIND %s\n",
li_context->slave_name);
}
break;
default:
- gemu_log("Unknown host IFLA_INFO type: %d\n", nlattr->nla_type);
+ gemu_log("Unknown host QEMU_IFLA_INFO type: %d\n", nlattr->nla_type);
break;
}
int i;
switch (nlattr->nla_type) {
- case IFLA_INET_CONF:
+ case QEMU_IFLA_INET_CONF:
u32 = NLA_DATA(nlattr);
for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u32);
i++) {
switch (nlattr->nla_type) {
/* binaries */
- case IFLA_INET6_TOKEN:
+ case QEMU_IFLA_INET6_TOKEN:
break;
/* uint8_t */
- case IFLA_INET6_ADDR_GEN_MODE:
+ case QEMU_IFLA_INET6_ADDR_GEN_MODE:
break;
/* uint32_t */
- case IFLA_INET6_FLAGS:
+ case QEMU_IFLA_INET6_FLAGS:
u32 = NLA_DATA(nlattr);
*u32 = tswap32(*u32);
break;
/* uint32_t[] */
- case IFLA_INET6_CONF:
+ case QEMU_IFLA_INET6_CONF:
u32 = NLA_DATA(nlattr);
for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u32);
i++) {
}
break;
/* ifla_cacheinfo */
- case IFLA_INET6_CACHEINFO:
+ case QEMU_IFLA_INET6_CACHEINFO:
ci = NLA_DATA(nlattr);
ci->max_reasm_len = tswap32(ci->max_reasm_len);
ci->tstamp = tswap32(ci->tstamp);
ci->retrans_time = tswap32(ci->retrans_time);
break;
/* uint64_t[] */
- case IFLA_INET6_STATS:
- case IFLA_INET6_ICMP6STATS:
+ case QEMU_IFLA_INET6_STATS:
+ case QEMU_IFLA_INET6_ICMP6STATS:
u64 = NLA_DATA(nlattr);
for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u64);
i++) {
switch (rtattr->rta_type) {
/* binary stream */
- case IFLA_ADDRESS:
- case IFLA_BROADCAST:
+ case QEMU_IFLA_ADDRESS:
+ case QEMU_IFLA_BROADCAST:
/* string */
- case IFLA_IFNAME:
- case IFLA_QDISC:
+ case QEMU_IFLA_IFNAME:
+ case QEMU_IFLA_QDISC:
break;
/* uin8_t */
- case IFLA_OPERSTATE:
- case IFLA_LINKMODE:
- case IFLA_CARRIER:
- case IFLA_PROTO_DOWN:
+ case QEMU_IFLA_OPERSTATE:
+ case QEMU_IFLA_LINKMODE:
+ case QEMU_IFLA_CARRIER:
+ case QEMU_IFLA_PROTO_DOWN:
break;
/* uint32_t */
- case IFLA_MTU:
- case IFLA_LINK:
- case IFLA_WEIGHT:
- case IFLA_TXQLEN:
- case IFLA_CARRIER_CHANGES:
- case IFLA_NUM_RX_QUEUES:
- case IFLA_NUM_TX_QUEUES:
- case IFLA_PROMISCUITY:
- case IFLA_EXT_MASK:
- case IFLA_LINK_NETNSID:
- case IFLA_GROUP:
- case IFLA_MASTER:
- case IFLA_NUM_VF:
+ case QEMU_IFLA_MTU:
+ case QEMU_IFLA_LINK:
+ case QEMU_IFLA_WEIGHT:
+ case QEMU_IFLA_TXQLEN:
+ case QEMU_IFLA_CARRIER_CHANGES:
+ case QEMU_IFLA_NUM_RX_QUEUES:
+ case QEMU_IFLA_NUM_TX_QUEUES:
+ case QEMU_IFLA_PROMISCUITY:
+ case QEMU_IFLA_EXT_MASK:
+ case QEMU_IFLA_LINK_NETNSID:
+ case QEMU_IFLA_GROUP:
+ case QEMU_IFLA_MASTER:
+ case QEMU_IFLA_NUM_VF:
+ case QEMU_IFLA_GSO_MAX_SEGS:
+ case QEMU_IFLA_GSO_MAX_SIZE:
u32 = RTA_DATA(rtattr);
*u32 = tswap32(*u32);
break;
/* struct rtnl_link_stats */
- case IFLA_STATS:
+ case QEMU_IFLA_STATS:
st = RTA_DATA(rtattr);
st->rx_packets = tswap32(st->rx_packets);
st->tx_packets = tswap32(st->tx_packets);
st->tx_compressed = tswap32(st->tx_compressed);
break;
/* struct rtnl_link_stats64 */
- case IFLA_STATS64:
+ case QEMU_IFLA_STATS64:
st64 = RTA_DATA(rtattr);
st64->rx_packets = tswap64(st64->rx_packets);
st64->tx_packets = tswap64(st64->tx_packets);
st64->tx_compressed = tswap64(st64->tx_compressed);
break;
/* struct rtnl_link_ifmap */
- case IFLA_MAP:
+ case QEMU_IFLA_MAP:
map = RTA_DATA(rtattr);
map->mem_start = tswap64(map->mem_start);
map->mem_end = tswap64(map->mem_end);
map->irq = tswap16(map->irq);
break;
/* nested */
- case IFLA_LINKINFO:
+ case QEMU_IFLA_LINKINFO:
memset(&li_context, 0, sizeof(li_context));
return host_to_target_for_each_nlattr(RTA_DATA(rtattr), rtattr->rta_len,
&li_context,
host_to_target_data_linkinfo_nlattr);
- case IFLA_AF_SPEC:
+ case QEMU_IFLA_AF_SPEC:
return host_to_target_for_each_nlattr(RTA_DATA(rtattr), rtattr->rta_len,
NULL,
host_to_target_data_spec_nlattr);
default:
- gemu_log("Unknown host IFLA type: %d\n", rtattr->rta_type);
+ gemu_log("Unknown host QEMU_IFLA type: %d\n", rtattr->rta_type);
break;
}
return 0;
{
switch (rtattr->rta_type) {
default:
- gemu_log("Unknown target IFLA type: %d\n", rtattr->rta_type);
+ gemu_log("Unknown target QEMU_IFLA type: %d\n", rtattr->rta_type);
break;
}
return 0;
case RTA_GATEWAY:
break;
/* u32 */
+ case RTA_PRIORITY:
case RTA_OIF:
u32 = RTA_DATA(rtattr);
*u32 = tswap32(*u32);
case IP_PKTINFO:
case IP_MTU_DISCOVER:
case IP_RECVERR:
+ case IP_RECVTTL:
case IP_RECVTOS:
#ifdef IP_FREEBIND
case IP_FREEBIND:
case IPV6_MTU:
case IPV6_V6ONLY:
case IPV6_RECVPKTINFO:
+ case IPV6_UNICAST_HOPS:
+ case IPV6_RECVERR:
+ case IPV6_RECVHOPLIMIT:
+ case IPV6_2292HOPLIMIT:
+ case IPV6_CHECKSUM:
val = 0;
if (optlen < sizeof(uint32_t)) {
return -TARGET_EINVAL;
ret = get_errno(setsockopt(sockfd, level, optname,
&val, sizeof(val)));
break;
+ case IPV6_PKTINFO:
+ {
+ struct in6_pktinfo pki;
+
+ if (optlen < sizeof(pki)) {
+ return -TARGET_EINVAL;
+ }
+
+ if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
+ return -TARGET_EFAULT;
+ }
+
+ pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
+
+ ret = get_errno(setsockopt(sockfd, level, optname,
+ &pki, sizeof(pki)));
+ break;
+ }
+ default:
+ goto unimplemented;
+ }
+ break;
+ case SOL_ICMPV6:
+ switch (optname) {
+ case ICMPV6_FILTER:
+ {
+ struct icmp6_filter icmp6f;
+
+ if (optlen > sizeof(icmp6f)) {
+ optlen = sizeof(icmp6f);
+ }
+
+ if (copy_from_user(&icmp6f, optval_addr, optlen)) {
+ return -TARGET_EFAULT;
+ }
+
+ for (val = 0; val < 8; val++) {
+ icmp6f.data[val] = tswap32(icmp6f.data[val]);
+ }
+
+ ret = get_errno(setsockopt(sockfd, level, optname,
+ &icmp6f, optlen));
+ break;
+ }
default:
goto unimplemented;
}
case SOL_RAW:
switch (optname) {
case ICMP_FILTER:
- /* struct icmp_filter takes an u32 value */
+ case IPV6_CHECKSUM:
+ /* those take an u32 value */
if (optlen < sizeof(uint32_t)) {
return -TARGET_EINVAL;
}
}
static struct iovec *lock_iovec(int type, abi_ulong target_addr,
- int count, int copy)
+ abi_ulong count, int copy)
{
struct target_iovec *target_vec;
struct iovec *vec;
errno = 0;
return NULL;
}
- if (count < 0 || count > IOV_MAX) {
+ if (count > IOV_MAX) {
errno = EINVAL;
return NULL;
}
}
static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
- int count, int copy)
+ abi_ulong count, int copy)
{
struct target_iovec *target_vec;
int i;
{
abi_long ret, len;
struct msghdr msg;
- int count;
+ abi_ulong count;
struct iovec *vec;
abi_ulong target_vec;
ret = target_to_host_sockaddr(fd, msg.msg_name,
tswapal(msgp->msg_name),
msg.msg_namelen);
- if (ret) {
+ if (ret == -TARGET_EFAULT) {
+ /* For connected sockets msg_name and msg_namelen must
+ * be ignored, so returning EFAULT immediately is wrong.
+ * Instead, pass a bad msg_name to the host kernel, and
+ * let it decide whether to return EFAULT or not.
+ */
+ msg.msg_name = (void *)-1;
+ } else if (ret) {
goto out2;
}
} else {
count = tswapal(msgp->msg_iovlen);
target_vec = tswapal(msgp->msg_iov);
+
+ if (count > IOV_MAX) {
+ /* sendrcvmsg returns a different errno for this condition than
+ * readv/writev, so we must catch it here before lock_iovec() does.
+ */
+ ret = -TARGET_EMSGSIZE;
+ goto out2;
+ }
+
vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
target_vec, count, send);
if (vec == NULL) {
}
if (!is_error(ret)) {
msgp->msg_namelen = tswap32(msg.msg_namelen);
- if (msg.msg_name != NULL) {
+ if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
msg.msg_name, msg.msg_namelen);
if (ret) {
}
#ifdef TARGET_NR_socketcall
-/* do_socketcall() Must return target values and target errnos. */
+/* do_socketcall() must return target values and target errnos. */
static abi_long do_socketcall(int num, abi_ulong vptr)
{
- static const unsigned ac[] = { /* number of arguments per call */
- [SOCKOP_socket] = 3, /* domain, type, protocol */
- [SOCKOP_bind] = 3, /* sockfd, addr, addrlen */
- [SOCKOP_connect] = 3, /* sockfd, addr, addrlen */
- [SOCKOP_listen] = 2, /* sockfd, backlog */
- [SOCKOP_accept] = 3, /* sockfd, addr, addrlen */
- [SOCKOP_accept4] = 4, /* sockfd, addr, addrlen, flags */
- [SOCKOP_getsockname] = 3, /* sockfd, addr, addrlen */
- [SOCKOP_getpeername] = 3, /* sockfd, addr, addrlen */
- [SOCKOP_socketpair] = 4, /* domain, type, protocol, tab */
- [SOCKOP_send] = 4, /* sockfd, msg, len, flags */
- [SOCKOP_recv] = 4, /* sockfd, msg, len, flags */
- [SOCKOP_sendto] = 6, /* sockfd, msg, len, flags, addr, addrlen */
- [SOCKOP_recvfrom] = 6, /* sockfd, msg, len, flags, addr, addrlen */
- [SOCKOP_shutdown] = 2, /* sockfd, how */
- [SOCKOP_sendmsg] = 3, /* sockfd, msg, flags */
- [SOCKOP_recvmsg] = 3, /* sockfd, msg, flags */
- [SOCKOP_sendmmsg] = 4, /* sockfd, msgvec, vlen, flags */
- [SOCKOP_recvmmsg] = 4, /* sockfd, msgvec, vlen, flags */
- [SOCKOP_setsockopt] = 5, /* sockfd, level, optname, optval, optlen */
- [SOCKOP_getsockopt] = 5, /* sockfd, level, optname, optval, optlen */
+ static const unsigned nargs[] = { /* number of arguments per operation */
+ [TARGET_SYS_SOCKET] = 3, /* domain, type, protocol */
+ [TARGET_SYS_BIND] = 3, /* fd, addr, addrlen */
+ [TARGET_SYS_CONNECT] = 3, /* fd, addr, addrlen */
+ [TARGET_SYS_LISTEN] = 2, /* fd, backlog */
+ [TARGET_SYS_ACCEPT] = 3, /* fd, addr, addrlen */
+ [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
+ [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
+ [TARGET_SYS_SOCKETPAIR] = 4, /* domain, type, protocol, tab */
+ [TARGET_SYS_SEND] = 4, /* fd, msg, len, flags */
+ [TARGET_SYS_RECV] = 4, /* fd, msg, len, flags */
+ [TARGET_SYS_SENDTO] = 6, /* fd, msg, len, flags, addr, addrlen */
+ [TARGET_SYS_RECVFROM] = 6, /* fd, msg, len, flags, addr, addrlen */
+ [TARGET_SYS_SHUTDOWN] = 2, /* fd, how */
+ [TARGET_SYS_SETSOCKOPT] = 5, /* fd, level, optname, optval, optlen */
+ [TARGET_SYS_GETSOCKOPT] = 5, /* fd, level, optname, optval, optlen */
+ [TARGET_SYS_SENDMSG] = 3, /* fd, msg, flags */
+ [TARGET_SYS_RECVMSG] = 3, /* fd, msg, flags */
+ [TARGET_SYS_ACCEPT4] = 4, /* fd, addr, addrlen, flags */
+ [TARGET_SYS_RECVMMSG] = 4, /* fd, msgvec, vlen, flags */
+ [TARGET_SYS_SENDMMSG] = 4, /* fd, msgvec, vlen, flags */
};
abi_long a[6]; /* max 6 args */
+ unsigned i;
- /* first, collect the arguments in a[] according to ac[] */
- if (num >= 0 && num < ARRAY_SIZE(ac)) {
- unsigned i;
- assert(ARRAY_SIZE(a) >= ac[num]); /* ensure we have space for args */
- for (i = 0; i < ac[num]; ++i) {
- if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
- return -TARGET_EFAULT;
- }
+ /* check the range of the first argument num */
+ /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
+ if (num < 1 || num > TARGET_SYS_SENDMMSG) {
+ return -TARGET_EINVAL;
+ }
+ /* ensure we have space for args */
+ if (nargs[num] > ARRAY_SIZE(a)) {
+ return -TARGET_EINVAL;
+ }
+ /* collect the arguments in a[] according to nargs[] */
+ for (i = 0; i < nargs[num]; ++i) {
+ if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
+ return -TARGET_EFAULT;
}
}
-
- /* now when we have the args, actually handle the call */
+ /* now when we have the args, invoke the appropriate underlying function */
switch (num) {
- case SOCKOP_socket: /* domain, type, protocol */
+ case TARGET_SYS_SOCKET: /* domain, type, protocol */
return do_socket(a[0], a[1], a[2]);
- case SOCKOP_bind: /* sockfd, addr, addrlen */
+ case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
return do_bind(a[0], a[1], a[2]);
- case SOCKOP_connect: /* sockfd, addr, addrlen */
+ case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
return do_connect(a[0], a[1], a[2]);
- case SOCKOP_listen: /* sockfd, backlog */
+ case TARGET_SYS_LISTEN: /* sockfd, backlog */
return get_errno(listen(a[0], a[1]));
- case SOCKOP_accept: /* sockfd, addr, addrlen */
+ case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
return do_accept4(a[0], a[1], a[2], 0);
- case SOCKOP_accept4: /* sockfd, addr, addrlen, flags */
- return do_accept4(a[0], a[1], a[2], a[3]);
- case SOCKOP_getsockname: /* sockfd, addr, addrlen */
+ case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
return do_getsockname(a[0], a[1], a[2]);
- case SOCKOP_getpeername: /* sockfd, addr, addrlen */
+ case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
return do_getpeername(a[0], a[1], a[2]);
- case SOCKOP_socketpair: /* domain, type, protocol, tab */
+ case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
return do_socketpair(a[0], a[1], a[2], a[3]);
- case SOCKOP_send: /* sockfd, msg, len, flags */
+ case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
- case SOCKOP_recv: /* sockfd, msg, len, flags */
+ case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
- case SOCKOP_sendto: /* sockfd, msg, len, flags, addr, addrlen */
+ case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
- case SOCKOP_recvfrom: /* sockfd, msg, len, flags, addr, addrlen */
+ case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
- case SOCKOP_shutdown: /* sockfd, how */
+ case TARGET_SYS_SHUTDOWN: /* sockfd, how */
return get_errno(shutdown(a[0], a[1]));
- case SOCKOP_sendmsg: /* sockfd, msg, flags */
+ case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
+ return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
+ case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
+ return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
+ case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
return do_sendrecvmsg(a[0], a[1], a[2], 1);
- case SOCKOP_recvmsg: /* sockfd, msg, flags */
+ case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
return do_sendrecvmsg(a[0], a[1], a[2], 0);
- case SOCKOP_sendmmsg: /* sockfd, msgvec, vlen, flags */
- return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
- case SOCKOP_recvmmsg: /* sockfd, msgvec, vlen, flags */
+ case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
+ return do_accept4(a[0], a[1], a[2], a[3]);
+ case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
- case SOCKOP_setsockopt: /* sockfd, level, optname, optval, optlen */
- return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
- case SOCKOP_getsockopt: /* sockfd, level, optname, optval, optlen */
- return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
+ case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
+ return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
default:
gemu_log("Unsupported socketcall: %d\n", num);
- return -TARGET_ENOSYS;
+ return -TARGET_EINVAL;
}
}
#endif
return ret;
}
-static inline abi_ulong do_shmat(int shmid, abi_ulong shmaddr, int shmflg)
+#ifndef TARGET_FORCE_SHMLBA
+/* For most architectures, SHMLBA is the same as the page size;
+ * some architectures have larger values, in which case they should
+ * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
+ * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
+ * and defining its own value for SHMLBA.
+ *
+ * The kernel also permits SHMLBA to be set by the architecture to a
+ * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
+ * this means that addresses are rounded to the large size if
+ * SHM_RND is set but addresses not aligned to that size are not rejected
+ * as long as they are at least page-aligned. Since the only architecture
+ * which uses this is ia64 this code doesn't provide for that oddity.
+ */
+static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
+{
+ return TARGET_PAGE_SIZE;
+}
+#endif
+
+static inline abi_ulong do_shmat(CPUArchState *cpu_env,
+ int shmid, abi_ulong shmaddr, int shmflg)
{
abi_long raddr;
void *host_raddr;
struct shmid_ds shm_info;
int i,ret;
+ abi_ulong shmlba;
/* find out the length of the shared memory segment */
ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
return ret;
}
+ shmlba = target_shmlba(cpu_env);
+
+ if (shmaddr & (shmlba - 1)) {
+ if (shmflg & SHM_RND) {
+ shmaddr &= ~(shmlba - 1);
+ } else {
+ return -TARGET_EINVAL;
+ }
+ }
+
mmap_lock();
if (shmaddr)
#ifdef TARGET_NR_ipc
/* ??? This only works with linear mappings. */
/* do_ipc() must return target values and target errnos. */
-static abi_long do_ipc(unsigned int call, abi_long first,
+static abi_long do_ipc(CPUArchState *cpu_env,
+ unsigned int call, abi_long first,
abi_long second, abi_long third,
abi_long ptr, abi_long fifth)
{
default:
{
abi_ulong raddr;
- raddr = do_shmat(first, ptr, second);
+ raddr = do_shmat(cpu_env, first, ptr, second);
if (is_error(raddr))
return get_errno(raddr);
if (put_user_ual(raddr, third))
guest_data = arg + host_dm->data_start;
if ((guest_data - arg) < 0) {
- ret = -EINVAL;
+ ret = -TARGET_EINVAL;
goto out;
}
guest_data_size = host_dm->data_size - host_dm->data_start;
host_data = (char*)host_dm + host_dm->data_start;
argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
+ if (!argptr) {
+ ret = -TARGET_EFAULT;
+ goto out;
+ }
+
switch (ie->host_cmd) {
case DM_REMOVE_ALL:
case DM_LIST_DEVICES:
{ TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
#define IOCTL_SPECIAL(cmd, access, dofn, ...) \
{ TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
+#define IOCTL_IGNORE(cmd) \
+ { TARGET_ ## cmd, 0, #cmd },
#include "ioctls.h"
{ 0, 0, },
};
#endif
if (ie->do_ioctl) {
return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
+ } else if (!ie->host_cmd) {
+ /* Some architectures define BSD ioctls in their headers
+ that are not implemented in Linux. */
+ return -TARGET_ENOSYS;
}
switch(arg_type[0]) {
TaskState *ts;
CPUState *new_cpu;
CPUArchState *new_env;
- unsigned int nptl_flags;
sigset_t sigmask;
+ flags &= ~CLONE_IGNORED_FLAGS;
+
/* Emulate vfork() with fork() */
if (flags & CLONE_VFORK)
flags &= ~(CLONE_VFORK | CLONE_VM);
new_thread_info info;
pthread_attr_t attr;
+ if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
+ (flags & CLONE_INVALID_THREAD_FLAGS)) {
+ return -TARGET_EINVAL;
+ }
+
ts = g_new0(TaskState, 1);
init_task_state(ts);
/* we create a new CPU instance. */
ts->bprm = parent_ts->bprm;
ts->info = parent_ts->info;
ts->signal_mask = parent_ts->signal_mask;
- nptl_flags = flags;
- flags &= ~CLONE_NPTL_FLAGS2;
- if (nptl_flags & CLONE_CHILD_CLEARTID) {
+ if (flags & CLONE_CHILD_CLEARTID) {
ts->child_tidptr = child_tidptr;
}
- if (nptl_flags & CLONE_SETTLS)
+ if (flags & CLONE_SETTLS) {
cpu_set_tls (new_env, newtls);
+ }
/* Grab a mutex so that thread setup appears atomic. */
pthread_mutex_lock(&clone_lock);
pthread_mutex_lock(&info.mutex);
pthread_cond_init(&info.cond, NULL);
info.env = new_env;
- if (nptl_flags & CLONE_CHILD_SETTID)
+ if (flags & CLONE_CHILD_SETTID) {
info.child_tidptr = child_tidptr;
- if (nptl_flags & CLONE_PARENT_SETTID)
+ }
+ if (flags & CLONE_PARENT_SETTID) {
info.parent_tidptr = parent_tidptr;
+ }
ret = pthread_attr_init(&attr);
ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
sigfillset(&sigmask);
sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
+ /* If this is our first additional thread, we need to ensure we
+ * generate code for parallel execution and flush old translations.
+ */
+ if (!parallel_cpus) {
+ parallel_cpus = true;
+ tb_flush(cpu);
+ }
+
ret = pthread_create(&info.thread, &attr, clone_func, &info);
/* TODO: Free new CPU state if thread creation failed. */
/* Wait for the child to initialize. */
pthread_cond_wait(&info.cond, &info.mutex);
ret = info.tid;
- if (flags & CLONE_PARENT_SETTID)
- put_user_u32(ret, parent_tidptr);
} else {
ret = -1;
}
pthread_mutex_unlock(&clone_lock);
} else {
/* if no CLONE_VM, we consider it is a fork */
- if ((flags & ~(CSIGNAL | CLONE_NPTL_FLAGS2)) != 0) {
+ if (flags & CLONE_INVALID_FORK_FLAGS) {
+ return -TARGET_EINVAL;
+ }
+
+ /* We can't support custom termination signals */
+ if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
return -TARGET_EINVAL;
}
return 0;
}
+static inline abi_long target_to_host_timex(struct timex *host_tx,
+ abi_long target_addr)
+{
+ struct target_timex *target_tx;
+
+ if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
+ return -TARGET_EFAULT;
+ }
+
+ __get_user(host_tx->modes, &target_tx->modes);
+ __get_user(host_tx->offset, &target_tx->offset);
+ __get_user(host_tx->freq, &target_tx->freq);
+ __get_user(host_tx->maxerror, &target_tx->maxerror);
+ __get_user(host_tx->esterror, &target_tx->esterror);
+ __get_user(host_tx->status, &target_tx->status);
+ __get_user(host_tx->constant, &target_tx->constant);
+ __get_user(host_tx->precision, &target_tx->precision);
+ __get_user(host_tx->tolerance, &target_tx->tolerance);
+ __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
+ __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
+ __get_user(host_tx->tick, &target_tx->tick);
+ __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
+ __get_user(host_tx->jitter, &target_tx->jitter);
+ __get_user(host_tx->shift, &target_tx->shift);
+ __get_user(host_tx->stabil, &target_tx->stabil);
+ __get_user(host_tx->jitcnt, &target_tx->jitcnt);
+ __get_user(host_tx->calcnt, &target_tx->calcnt);
+ __get_user(host_tx->errcnt, &target_tx->errcnt);
+ __get_user(host_tx->stbcnt, &target_tx->stbcnt);
+ __get_user(host_tx->tai, &target_tx->tai);
+
+ unlock_user_struct(target_tx, target_addr, 0);
+ return 0;
+}
+
+static inline abi_long host_to_target_timex(abi_long target_addr,
+ struct timex *host_tx)
+{
+ struct target_timex *target_tx;
+
+ if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
+ return -TARGET_EFAULT;
+ }
+
+ __put_user(host_tx->modes, &target_tx->modes);
+ __put_user(host_tx->offset, &target_tx->offset);
+ __put_user(host_tx->freq, &target_tx->freq);
+ __put_user(host_tx->maxerror, &target_tx->maxerror);
+ __put_user(host_tx->esterror, &target_tx->esterror);
+ __put_user(host_tx->status, &target_tx->status);
+ __put_user(host_tx->constant, &target_tx->constant);
+ __put_user(host_tx->precision, &target_tx->precision);
+ __put_user(host_tx->tolerance, &target_tx->tolerance);
+ __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
+ __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
+ __put_user(host_tx->tick, &target_tx->tick);
+ __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
+ __put_user(host_tx->jitter, &target_tx->jitter);
+ __put_user(host_tx->shift, &target_tx->shift);
+ __put_user(host_tx->stabil, &target_tx->stabil);
+ __put_user(host_tx->jitcnt, &target_tx->jitcnt);
+ __put_user(host_tx->calcnt, &target_tx->calcnt);
+ __put_user(host_tx->errcnt, &target_tx->errcnt);
+ __put_user(host_tx->stbcnt, &target_tx->stbcnt);
+ __put_user(host_tx->tai, &target_tx->tai);
+
+ unlock_user_struct(target_tx, target_addr, 1);
+ return 0;
+}
+
+
static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
abi_ulong target_addr)
{
static int open_self_cmdline(void *cpu_env, int fd)
{
- int fd_orig = -1;
- bool word_skipped = false;
-
- fd_orig = open("/proc/self/cmdline", O_RDONLY);
- if (fd_orig < 0) {
- return fd_orig;
- }
+ CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
+ struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
+ int i;
- while (true) {
- ssize_t nb_read;
- char buf[128];
- char *cp_buf = buf;
+ for (i = 0; i < bprm->argc; i++) {
+ size_t len = strlen(bprm->argv[i]) + 1;
- nb_read = read(fd_orig, buf, sizeof(buf));
- if (nb_read < 0) {
- int e = errno;
- fd_orig = close(fd_orig);
- errno = e;
+ if (write(fd, bprm->argv[i], len) != len) {
return -1;
- } else if (nb_read == 0) {
- break;
- }
-
- if (!word_skipped) {
- /* Skip the first string, which is the path to qemu-*-static
- instead of the actual command. */
- cp_buf = memchr(buf, 0, nb_read);
- if (cp_buf) {
- /* Null byte found, skip one string */
- cp_buf++;
- nb_read -= cp_buf - buf;
- word_skipped = true;
- }
- }
-
- if (word_skipped) {
- if (write(fd, cp_buf, nb_read) != nb_read) {
- int e = errno;
- close(fd_orig);
- errno = e;
- return -1;
- }
}
}
- return close(fd_orig);
+ return 0;
}
static int open_self_maps(void *cpu_env, int fd)
return timerid;
}
+static abi_long swap_data_eventfd(void *buf, size_t len)
+{
+ uint64_t *counter = buf;
+ int i;
+
+ if (len < sizeof(uint64_t)) {
+ return -EINVAL;
+ }
+
+ for (i = 0; i < len; i += sizeof(uint64_t)) {
+ *counter = tswap64(*counter);
+ counter++;
+ }
+
+ return len;
+}
+
+static TargetFdTrans target_eventfd_trans = {
+ .host_to_target_data = swap_data_eventfd,
+ .target_to_host_data = swap_data_eventfd,
+};
+
+#if (defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)) || \
+ (defined(CONFIG_INOTIFY1) && defined(TARGET_NR_inotify_init1) && \
+ defined(__NR_inotify_init1))
+static abi_long host_to_target_data_inotify(void *buf, size_t len)
+{
+ struct inotify_event *ev;
+ int i;
+ uint32_t name_len;
+
+ for (i = 0; i < len; i += sizeof(struct inotify_event) + name_len) {
+ ev = (struct inotify_event *)((char *)buf + i);
+ name_len = ev->len;
+
+ ev->wd = tswap32(ev->wd);
+ ev->mask = tswap32(ev->mask);
+ ev->cookie = tswap32(ev->cookie);
+ ev->len = tswap32(name_len);
+ }
+
+ return len;
+}
+
+static TargetFdTrans target_inotify_trans = {
+ .host_to_target_data = host_to_target_data_inotify,
+};
+#endif
+
/* do_syscall() should always have a single exit point at the end so
that actions, such as logging of syscall results, can be performed.
All errnos that do_syscall() returns must be -TARGET_<errcode>. */
break;
}
+ cpu_list_lock();
+
if (CPU_NEXT(first_cpu)) {
TaskState *ts;
- cpu_list_lock();
/* Remove the CPU from the list. */
QTAILQ_REMOVE(&cpus, cpu, node);
+
cpu_list_unlock();
+
ts = cpu->opaque;
if (ts->child_tidptr) {
put_user_u32(0, ts->child_tidptr);
rcu_unregister_thread();
pthread_exit(NULL);
}
+
+ cpu_list_unlock();
#ifdef TARGET_GPROF
_mcleanup();
#endif
case TARGET_NR_write:
if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
goto efault;
- ret = get_errno(safe_write(arg1, p, arg3));
+ if (fd_trans_target_to_host_data(arg1)) {
+ void *copy = g_malloc(arg3);
+ memcpy(copy, p, arg3);
+ ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
+ if (ret >= 0) {
+ ret = get_errno(safe_write(arg1, copy, ret));
+ }
+ g_free(copy);
+ } else {
+ ret = get_errno(safe_write(arg1, p, arg3));
+ }
unlock_user(p, arg2, 0);
break;
#ifdef TARGET_NR_open
break;
#ifdef TARGET_NR_fork
case TARGET_NR_fork:
- ret = get_errno(do_fork(cpu_env, SIGCHLD, 0, 0, 0, 0));
+ ret = get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
break;
#endif
#ifdef TARGET_NR_waitpid
envc++;
}
- argp = alloca((argc + 1) * sizeof(void *));
- envp = alloca((envc + 1) * sizeof(void *));
+ argp = g_new0(char *, argc + 1);
+ envp = g_new0(char *, envc + 1);
for (gp = guest_argp, q = argp; gp;
gp += sizeof(abi_ulong), q++) {
break;
unlock_user(*q, addr, 0);
}
+
+ g_free(argp);
+ g_free(envp);
}
break;
case TARGET_NR_chdir:
sync();
ret = 0;
break;
+#if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
+ case TARGET_NR_syncfs:
+ ret = get_errno(syncfs(arg1));
+ break;
+#endif
case TARGET_NR_kill:
ret = get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
break;
#ifdef TARGET_NR_ssetmask /* not on alpha */
case TARGET_NR_ssetmask:
{
- sigset_t set, oset, cur_set;
+ sigset_t set, oset;
abi_ulong target_set = arg1;
- /* We only have one word of the new mask so we must read
- * the rest of it with do_sigprocmask() and OR in this word.
- * We are guaranteed that a do_sigprocmask() that only queries
- * the signal mask will not fail.
- */
- ret = do_sigprocmask(0, NULL, &cur_set);
- assert(!ret);
target_to_host_old_sigset(&set, &target_set);
- sigorset(&set, &set, &cur_set);
ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
if (!ret) {
host_to_target_old_sigset(&target_set, &oset);
goto efault;
}
target_to_host_siginfo(&uinfo, p);
- unlock_user(p, arg1, 0);
+ unlock_user(p, arg3, 0);
ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
}
break;
+ case TARGET_NR_rt_tgsigqueueinfo:
+ {
+ siginfo_t uinfo;
+
+ p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
+ if (!p) {
+ goto efault;
+ }
+ target_to_host_siginfo(&uinfo, p);
+ unlock_user(p, arg4, 0);
+ ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, arg3, &uinfo));
+ }
+ break;
#ifdef TARGET_NR_sigreturn
case TARGET_NR_sigreturn:
if (block_signals()) {
break;
#if defined(TARGET_NR_select)
case TARGET_NR_select:
-#if defined(TARGET_S390X) || defined(TARGET_ALPHA)
- ret = do_select(arg1, arg2, arg3, arg4, arg5);
+#if defined(TARGET_WANT_NI_OLD_SELECT)
+ /* some architectures used to have old_select here
+ * but now ENOSYS it.
+ */
+ ret = -TARGET_ENOSYS;
+#elif defined(TARGET_WANT_OLD_SYS_SELECT)
+ ret = do_old_select(arg1);
#else
- {
- struct target_sel_arg_struct *sel;
- abi_ulong inp, outp, exp, tvp;
- long nsel;
-
- if (!lock_user_struct(VERIFY_READ, sel, arg1, 1))
- goto efault;
- nsel = tswapal(sel->n);
- inp = tswapal(sel->inp);
- outp = tswapal(sel->outp);
- exp = tswapal(sel->exp);
- tvp = tswapal(sel->tvp);
- unlock_user_struct(sel, arg1, 0);
- ret = do_select(nsel, inp, outp, exp, tvp);
- }
+ ret = do_select(arg1, arg2, arg3, arg4, arg5);
#endif
break;
#endif
#ifdef TARGET_NR_socket
case TARGET_NR_socket:
ret = do_socket(arg1, arg2, arg3);
- fd_trans_unregister(ret);
break;
#endif
#ifdef TARGET_NR_socketpair
ret = do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
break;
#endif
-
+#if defined(TARGET_NR_syslog)
case TARGET_NR_syslog:
- if (!(p = lock_user_string(arg2)))
- goto efault;
- ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
- unlock_user(p, arg2, 0);
- break;
+ {
+ int len = arg2;
+ switch (arg1) {
+ case TARGET_SYSLOG_ACTION_CLOSE: /* Close log */
+ case TARGET_SYSLOG_ACTION_OPEN: /* Open log */
+ case TARGET_SYSLOG_ACTION_CLEAR: /* Clear ring buffer */
+ case TARGET_SYSLOG_ACTION_CONSOLE_OFF: /* Disable logging */
+ case TARGET_SYSLOG_ACTION_CONSOLE_ON: /* Enable logging */
+ case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
+ case TARGET_SYSLOG_ACTION_SIZE_UNREAD: /* Number of chars */
+ case TARGET_SYSLOG_ACTION_SIZE_BUFFER: /* Size of the buffer */
+ {
+ ret = get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
+ }
+ break;
+ case TARGET_SYSLOG_ACTION_READ: /* Read from log */
+ case TARGET_SYSLOG_ACTION_READ_CLEAR: /* Read/clear msgs */
+ case TARGET_SYSLOG_ACTION_READ_ALL: /* Read last messages */
+ {
+ ret = -TARGET_EINVAL;
+ if (len < 0) {
+ goto fail;
+ }
+ ret = 0;
+ if (len == 0) {
+ break;
+ }
+ p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
+ if (!p) {
+ ret = -TARGET_EFAULT;
+ goto fail;
+ }
+ ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
+ unlock_user(p, arg2, arg3);
+ }
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ }
+ break;
+#endif
case TARGET_NR_setitimer:
{
struct itimerval value, ovalue, *pvalue;
break;
#ifdef TARGET_NR_ipc
case TARGET_NR_ipc:
- ret = do_ipc(arg1, arg2, arg3, arg4, arg5, arg6);
- break;
+ ret = do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
+ break;
#endif
#ifdef TARGET_NR_semget
case TARGET_NR_semget:
#endif
#ifdef TARGET_NR_shmat
case TARGET_NR_shmat:
- ret = do_shmat(arg1, arg2, arg3);
+ ret = do_shmat(cpu_env, arg1, arg2, arg3);
break;
#endif
#ifdef TARGET_NR_shmdt
#endif
#endif
case TARGET_NR_adjtimex:
- goto unimplemented;
+ {
+ struct timex host_buf;
+
+ if (target_to_host_timex(&host_buf, arg1) != 0) {
+ goto efault;
+ }
+ ret = get_errno(adjtimex(&host_buf));
+ if (!is_error(ret)) {
+ if (host_to_target_timex(arg1, &host_buf) != 0) {
+ goto efault;
+ }
+ }
+ }
+ break;
+#if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
+ case TARGET_NR_clock_adjtime:
+ {
+ struct timex htx, *phtx = &htx;
+
+ if (target_to_host_timex(phtx, arg2) != 0) {
+ goto efault;
+ }
+ ret = get_errno(clock_adjtime(arg1, phtx));
+ if (!is_error(ret) && phtx) {
+ if (host_to_target_timex(arg2, phtx) != 0) {
+ goto efault;
+ }
+ }
+ }
+ break;
+#endif
#ifdef TARGET_NR_create_module
case TARGET_NR_create_module:
#endif
pfd = NULL;
target_pfd = NULL;
if (nfds) {
+ if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
+ ret = -TARGET_EINVAL;
+ break;
+ }
+
target_pfd = lock_user(VERIFY_WRITE, arg1,
sizeof(struct target_pollfd) * nfds, 1);
if (!target_pfd) {
}
}
break;
+#if defined(TARGET_NR_preadv)
+ case TARGET_NR_preadv:
+ {
+ struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
+ if (vec != NULL) {
+ ret = get_errno(safe_preadv(arg1, vec, arg3, arg4, arg5));
+ unlock_iovec(vec, arg2, arg3, 1);
+ } else {
+ ret = -host_to_target_errno(errno);
+ }
+ }
+ break;
+#endif
+#if defined(TARGET_NR_pwritev)
+ case TARGET_NR_pwritev:
+ {
+ struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
+ if (vec != NULL) {
+ ret = get_errno(safe_pwritev(arg1, vec, arg3, arg4, arg5));
+ unlock_iovec(vec, arg2, arg3, 0);
+ } else {
+ ret = -host_to_target_errno(errno);
+ }
+ }
+ break;
+#endif
case TARGET_NR_getsid:
ret = get_errno(getsid(arg1));
break;
#endif
#ifdef TARGET_NR_vfork
case TARGET_NR_vfork:
- ret = get_errno(do_fork(cpu_env, CLONE_VFORK | CLONE_VM | SIGCHLD,
+ ret = get_errno(do_fork(cpu_env,
+ CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
0, 0, 0, 0));
break;
#endif
info.si_code = si_code;
info._sifields._sigfault._addr
= ((CPUArchState *)cpu_env)->pc;
- queue_signal((CPUArchState *)cpu_env, info.si_signo, &info);
+ queue_signal((CPUArchState *)cpu_env, info.si_signo,
+ QEMU_SI_FAULT, &info);
}
}
break;
case TARGET_NR_mincore:
{
void *a;
+ ret = -TARGET_ENOMEM;
+ a = lock_user(VERIFY_READ, arg1, arg2, 0);
+ if (!a) {
+ goto fail;
+ }
ret = -TARGET_EFAULT;
- if (!(a = lock_user(VERIFY_READ, arg1,arg2, 0)))
- goto efault;
- if (!(p = lock_user_string(arg3)))
+ p = lock_user_string(arg3);
+ if (!p) {
goto mincore_fail;
+ }
ret = get_errno(mincore(a, arg2, p));
unlock_user(p, arg3, ret);
mincore_fail:
#ifdef TARGET_NR_fadvise64_64
case TARGET_NR_fadvise64_64:
+#if defined(TARGET_PPC)
+ /* 6 args: fd, advice, offset (high, low), len (high, low) */
+ ret = arg2;
+ arg2 = arg3;
+ arg3 = arg4;
+ arg4 = arg5;
+ arg5 = arg6;
+ arg6 = ret;
+#else
/* 6 args: fd, offset (high, low), len (high, low), advice */
if (regpairs_aligned(cpu_env)) {
/* offset is in (3,4), len in (5,6) and advice in 7 */
arg5 = arg6;
arg6 = arg7;
}
+#endif
ret = -host_to_target_errno(posix_fadvise(arg1,
target_offset64(arg2, arg3),
target_offset64(arg4, arg5),
arg3 = arg4;
arg4 = arg5;
}
- ret = get_errno(readahead(arg1, ((off64_t)arg3 << 32) | arg2, arg4));
+ ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
#else
ret = get_errno(readahead(arg1, arg2, arg3));
#endif
#if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
case TARGET_NR_inotify_init:
ret = get_errno(sys_inotify_init());
+ fd_trans_register(ret, &target_inotify_trans);
break;
#endif
#ifdef CONFIG_INOTIFY1
#if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
case TARGET_NR_inotify_init1:
- ret = get_errno(sys_inotify_init1(arg1));
+ ret = get_errno(sys_inotify_init1(target_to_host_bitmask(arg1,
+ fcntl_flags_tbl)));
+ fd_trans_register(ret, &target_inotify_trans);
break;
#endif
#endif
#if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
case TARGET_NR_mq_open:
{
- struct mq_attr posix_mq_attr, *attrp;
+ struct mq_attr posix_mq_attr;
+ struct mq_attr *pposix_mq_attr;
+ int host_flags;
+ host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
+ pposix_mq_attr = NULL;
+ if (arg4) {
+ if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
+ goto efault;
+ }
+ pposix_mq_attr = &posix_mq_attr;
+ }
p = lock_user_string(arg1 - 1);
- if (arg4 != 0) {
- copy_from_user_mq_attr (&posix_mq_attr, arg4);
- attrp = &posix_mq_attr;
- } else {
- attrp = 0;
+ if (!p) {
+ goto efault;
}
- ret = get_errno(mq_open(p, arg2, arg3, attrp));
+ ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
unlock_user (p, arg1, 0);
}
break;
case TARGET_NR_mq_unlink:
p = lock_user_string(arg1 - 1);
+ if (!p) {
+ ret = -TARGET_EFAULT;
+ break;
+ }
ret = get_errno(mq_unlink(p));
unlock_user (p, arg1, 0);
break;
#if defined(TARGET_NR_eventfd)
case TARGET_NR_eventfd:
ret = get_errno(eventfd(arg1, 0));
- fd_trans_unregister(ret);
+ fd_trans_register(ret, &target_eventfd_trans);
break;
#endif
#if defined(TARGET_NR_eventfd2)
host_flags |= O_CLOEXEC;
}
ret = get_errno(eventfd(arg1, host_flags));
- fd_trans_unregister(ret);
+ fd_trans_register(ret, &target_eventfd_trans);
break;
}
#endif
int maxevents = arg3;
int timeout = arg4;
+ if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
+ ret = -TARGET_EINVAL;
+ break;
+ }
+
target_ep = lock_user(VERIFY_WRITE, arg2,
maxevents * sizeof(struct target_epoll_event), 1);
if (!target_ep) {
goto efault;
}
- ep = alloca(maxevents * sizeof(struct epoll_event));
+ ep = g_try_new(struct epoll_event, maxevents);
+ if (!ep) {
+ unlock_user(target_ep, arg2, 0);
+ ret = -TARGET_ENOMEM;
+ break;
+ }
switch (num) {
#if defined(TARGET_NR_epoll_pwait)
target_set = lock_user(VERIFY_READ, arg5,
sizeof(target_sigset_t), 1);
if (!target_set) {
- unlock_user(target_ep, arg2, 0);
- goto efault;
+ ret = -TARGET_EFAULT;
+ break;
}
target_to_host_sigset(set, target_set);
unlock_user(target_set, arg5, 0);
target_ep[i].events = tswap32(ep[i].events);
target_ep[i].data.u64 = tswap64(ep[i].data.u64);
}
+ unlock_user(target_ep, arg2,
+ ret * sizeof(struct target_epoll_event));
+ } else {
+ unlock_user(target_ep, arg2, 0);
}
- unlock_user(target_ep, arg2, ret * sizeof(struct target_epoll_event));
+ g_free(ep);
break;
}
#endif
info.si_errno = 0;
info.si_code = TARGET_SEGV_MAPERR;
info._sifields._sigfault._addr = arg6;
- queue_signal((CPUArchState *)cpu_env, info.si_signo, &info);
+ queue_signal((CPUArchState *)cpu_env, info.si_signo,
+ QEMU_SI_FAULT, &info);
ret = 0xdeadbeef;
}
timer_t htimer = g_posix_timers[timerid];
struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
- target_to_host_itimerspec(&hspec_new, arg3);
+ if (target_to_host_itimerspec(&hspec_new, arg3)) {
+ goto efault;
+ }
ret = get_errno(
timer_settime(htimer, arg2, &hspec_new, &hspec_old));
- host_to_target_itimerspec(arg2, &hspec_old);
+ if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
+ goto efault;
+ }
}
break;
}
ret = get_errno(unshare(arg1));
break;
#endif
+#if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
+ case TARGET_NR_kcmp:
+ ret = get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
+ break;
+#endif
default:
unimplemented: