*
*/
+#include "qemu/osdep.h"
#include "qemu/iov.h"
#include "hw/virtio/virtio.h"
#include "net/net.h"
#define MAC_TABLE_ENTRIES 64
#define MAX_VLAN (1 << 12) /* Per 802.1Q definition */
+/* previously fixed value */
+#define VIRTIO_NET_RX_QUEUE_DEFAULT_SIZE 256
+/* for now, only allow larger queues; with virtio-1, guest can downsize */
+#define VIRTIO_NET_RX_QUEUE_MIN_SIZE VIRTIO_NET_RX_QUEUE_DEFAULT_SIZE
+
/*
* Calculate the number of bytes up to and including the given 'field' of
* 'container'.
memcpy(&netcfg, config, n->config_size);
- if (!virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR) &&
- !virtio_has_feature(vdev, VIRTIO_F_VERSION_1) &&
+ if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR) &&
+ !virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1) &&
memcmp(netcfg.mac, n->mac, ETH_ALEN)) {
memcpy(n->mac, netcfg.mac, ETH_ALEN);
qemu_format_nic_info_str(qemu_get_queue(n->nic), n->mac);
if (!n->vhost_started) {
int r, i;
+ if (n->needs_vnet_hdr_swap) {
+ error_report("backend does not support %s vnet headers; "
+ "falling back on userspace virtio",
+ virtio_is_big_endian(vdev) ? "BE" : "LE");
+ return;
+ }
+
/* Any packets outstanding? Purge them to avoid touching rings
* when vhost is running.
*/
}
}
+static int virtio_net_set_vnet_endian_one(VirtIODevice *vdev,
+ NetClientState *peer,
+ bool enable)
+{
+ if (virtio_is_big_endian(vdev)) {
+ return qemu_set_vnet_be(peer, enable);
+ } else {
+ return qemu_set_vnet_le(peer, enable);
+ }
+}
+
+static bool virtio_net_set_vnet_endian(VirtIODevice *vdev, NetClientState *ncs,
+ int queues, bool enable)
+{
+ int i;
+
+ for (i = 0; i < queues; i++) {
+ if (virtio_net_set_vnet_endian_one(vdev, ncs[i].peer, enable) < 0 &&
+ enable) {
+ while (--i >= 0) {
+ virtio_net_set_vnet_endian_one(vdev, ncs[i].peer, false);
+ }
+
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static void virtio_net_vnet_endian_status(VirtIONet *n, uint8_t status)
+{
+ VirtIODevice *vdev = VIRTIO_DEVICE(n);
+ int queues = n->multiqueue ? n->max_queues : 1;
+
+ if (virtio_net_started(n, status)) {
+ /* Before using the device, we tell the network backend about the
+ * endianness to use when parsing vnet headers. If the backend
+ * can't do it, we fallback onto fixing the headers in the core
+ * virtio-net code.
+ */
+ n->needs_vnet_hdr_swap = virtio_net_set_vnet_endian(vdev, n->nic->ncs,
+ queues, true);
+ } else if (virtio_net_started(n, vdev->status)) {
+ /* After using the device, we need to reset the network backend to
+ * the default (guest native endianness), otherwise the guest may
+ * lose network connectivity if it is rebooted into a different
+ * endianness.
+ */
+ virtio_net_set_vnet_endian(vdev, n->nic->ncs, queues, false);
+ }
+}
+
static void virtio_net_set_status(struct VirtIODevice *vdev, uint8_t status)
{
VirtIONet *n = VIRTIO_NET(vdev);
int i;
uint8_t queue_status;
+ virtio_net_vnet_endian_status(n, status);
virtio_net_vhost_status(n, status);
for (i = 0; i < n->max_queues; i++) {
+ NetClientState *ncs = qemu_get_subqueue(n->nic, i);
+ bool queue_started;
q = &n->vqs[i];
if ((!n->multiqueue && i != 0) || i >= n->curr_queues) {
} else {
queue_status = status;
}
+ queue_started =
+ virtio_net_started(n, queue_status) && !n->vhost_started;
+
+ if (queue_started) {
+ qemu_flush_queued_packets(ncs);
+ }
if (!q->tx_waiting) {
continue;
}
- if (virtio_net_started(n, queue_status) && !n->vhost_started) {
+ if (queue_started) {
if (q->tx_timer) {
timer_mod(q->tx_timer,
qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + n->tx_timeout);
info->multicast_table = str_list;
info->vlan_table = get_vlan_table(n);
- if (!virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VLAN)) {
+ if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_VLAN)) {
info->vlan = RX_STATE_ALL;
} else if (!info->vlan_table) {
info->vlan = RX_STATE_NONE;
return 0;
}
- if (nc->peer->info->type != NET_CLIENT_OPTIONS_KIND_TAP) {
+ if (nc->peer->info->type == NET_CLIENT_DRIVER_VHOST_USER) {
+ vhost_set_vring_enable(nc->peer, 1);
+ }
+
+ if (nc->peer->info->type != NET_CLIENT_DRIVER_TAP) {
return 0;
}
return 0;
}
- if (nc->peer->info->type != NET_CLIENT_OPTIONS_KIND_TAP) {
+ if (nc->peer->info->type == NET_CLIENT_DRIVER_VHOST_USER) {
+ vhost_set_vring_enable(nc->peer, 0);
+ }
+
+ if (nc->peer->info->type != NET_CLIENT_DRIVER_TAP) {
return 0;
}
static void virtio_net_set_multiqueue(VirtIONet *n, int multiqueue);
-static uint64_t virtio_net_get_features(VirtIODevice *vdev, uint64_t features)
+static uint64_t virtio_net_get_features(VirtIODevice *vdev, uint64_t features,
+ Error **errp)
{
VirtIONet *n = VIRTIO_NET(vdev);
NetClientState *nc = qemu_get_queue(n->nic);
}
if (!get_vhost_net(nc->peer)) {
- virtio_add_feature(&features, VIRTIO_F_VERSION_1);
return features;
}
return vhost_net_get_features(get_vhost_net(nc->peer), features);
int i;
virtio_net_set_multiqueue(n,
- __virtio_has_feature(features, VIRTIO_NET_F_MQ));
+ virtio_has_feature(features, VIRTIO_NET_F_MQ));
virtio_net_set_mrg_rx_bufs(n,
- __virtio_has_feature(features,
- VIRTIO_NET_F_MRG_RXBUF),
- __virtio_has_feature(features,
- VIRTIO_F_VERSION_1));
+ virtio_has_feature(features,
+ VIRTIO_NET_F_MRG_RXBUF),
+ virtio_has_feature(features,
+ VIRTIO_F_VERSION_1));
if (n->has_vnet_hdr) {
n->curr_guest_offloads =
vhost_net_ack_features(get_vhost_net(nc->peer), features);
}
- if (__virtio_has_feature(features, VIRTIO_NET_F_CTRL_VLAN)) {
+ if (virtio_has_feature(features, VIRTIO_NET_F_CTRL_VLAN)) {
memset(n->vlans, 0, MAX_VLAN >> 3);
} else {
memset(n->vlans, 0xff, MAX_VLAN >> 3);
uint64_t offloads;
size_t s;
- if (!virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)) {
+ if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)) {
return VIRTIO_NET_ERR;
}
return VIRTIO_NET_OK;
}
+
static void virtio_net_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq)
{
VirtIONet *n = VIRTIO_NET(vdev);
struct virtio_net_ctrl_hdr ctrl;
virtio_net_ctrl_ack status = VIRTIO_NET_ERR;
- VirtQueueElement elem;
+ VirtQueueElement *elem;
size_t s;
struct iovec *iov, *iov2;
unsigned int iov_cnt;
- while (virtqueue_pop(vq, &elem)) {
- if (iov_size(elem.in_sg, elem.in_num) < sizeof(status) ||
- iov_size(elem.out_sg, elem.out_num) < sizeof(ctrl)) {
- error_report("virtio-net ctrl missing headers");
- exit(1);
+ for (;;) {
+ elem = virtqueue_pop(vq, sizeof(VirtQueueElement));
+ if (!elem) {
+ break;
+ }
+ if (iov_size(elem->in_sg, elem->in_num) < sizeof(status) ||
+ iov_size(elem->out_sg, elem->out_num) < sizeof(ctrl)) {
+ virtio_error(vdev, "virtio-net ctrl missing headers");
+ virtqueue_detach_element(vq, elem, 0);
+ g_free(elem);
+ break;
}
- iov_cnt = elem.out_num;
- iov2 = iov = g_memdup(elem.out_sg, sizeof(struct iovec) * elem.out_num);
+ iov_cnt = elem->out_num;
+ iov2 = iov = g_memdup(elem->out_sg, sizeof(struct iovec) * elem->out_num);
s = iov_to_buf(iov, iov_cnt, 0, &ctrl, sizeof(ctrl));
iov_discard_front(&iov, &iov_cnt, sizeof(ctrl));
if (s != sizeof(ctrl)) {
status = virtio_net_handle_offloads(n, ctrl.cmd, iov, iov_cnt);
}
- s = iov_from_buf(elem.in_sg, elem.in_num, 0, &status, sizeof(status));
+ s = iov_from_buf(elem->in_sg, elem->in_num, 0, &status, sizeof(status));
assert(s == sizeof(status));
- virtqueue_push(vq, &elem, sizeof(status));
+ virtqueue_push(vq, elem, sizeof(status));
virtio_notify(vdev, vq);
g_free(iov2);
+ g_free(elem);
}
}
void *wbuf = (void *)buf;
work_around_broken_dhclient(wbuf, wbuf + n->host_hdr_len,
size - n->host_hdr_len);
- virtio_net_hdr_swap(VIRTIO_DEVICE(n), wbuf);
+
+ if (n->needs_vnet_hdr_swap) {
+ virtio_net_hdr_swap(VIRTIO_DEVICE(n), wbuf);
+ }
iov_from_buf(iov, iov_cnt, 0, buf, sizeof(struct virtio_net_hdr));
} else {
struct virtio_net_hdr hdr = {
ptr += n->host_hdr_len;
if (!memcmp(&ptr[12], vlan, sizeof(vlan))) {
- int vid = be16_to_cpup((uint16_t *)(ptr + 14)) & 0xfff;
+ int vid = lduw_be_p(ptr + 14) & 0xfff;
if (!(n->vlans[vid >> 5] & (1U << (vid & 0x1f))))
return 0;
}
offset = i = 0;
while (offset < size) {
- VirtQueueElement elem;
+ VirtQueueElement *elem;
int len, total;
- const struct iovec *sg = elem.in_sg;
+ const struct iovec *sg;
total = 0;
- if (virtqueue_pop(q->rx_vq, &elem) == 0) {
- if (i == 0)
- return -1;
- error_report("virtio-net unexpected empty queue: "
- "i %zd mergeable %d offset %zd, size %zd, "
- "guest hdr len %zd, host hdr len %zd "
- "guest features 0x%" PRIx64,
- i, n->mergeable_rx_bufs, offset, size,
- n->guest_hdr_len, n->host_hdr_len,
- vdev->guest_features);
- exit(1);
+ elem = virtqueue_pop(q->rx_vq, sizeof(VirtQueueElement));
+ if (!elem) {
+ if (i) {
+ virtio_error(vdev, "virtio-net unexpected empty queue: "
+ "i %zd mergeable %d offset %zd, size %zd, "
+ "guest hdr len %zd, host hdr len %zd "
+ "guest features 0x%" PRIx64,
+ i, n->mergeable_rx_bufs, offset, size,
+ n->guest_hdr_len, n->host_hdr_len,
+ vdev->guest_features);
+ }
+ return -1;
}
- if (elem.in_num < 1) {
- error_report("virtio-net receive queue contains no in buffers");
- exit(1);
+ if (elem->in_num < 1) {
+ virtio_error(vdev,
+ "virtio-net receive queue contains no in buffers");
+ virtqueue_detach_element(q->rx_vq, elem, 0);
+ g_free(elem);
+ return -1;
}
+ sg = elem->in_sg;
if (i == 0) {
assert(offset == 0);
if (n->mergeable_rx_bufs) {
mhdr_cnt = iov_copy(mhdr_sg, ARRAY_SIZE(mhdr_sg),
- sg, elem.in_num,
+ sg, elem->in_num,
offsetof(typeof(mhdr), num_buffers),
sizeof(mhdr.num_buffers));
}
- receive_header(n, sg, elem.in_num, buf, size);
+ receive_header(n, sg, elem->in_num, buf, size);
offset = n->host_hdr_len;
total += n->guest_hdr_len;
guest_offset = n->guest_hdr_len;
}
/* copy in packet. ugh */
- len = iov_from_buf(sg, elem.in_num, guest_offset,
+ len = iov_from_buf(sg, elem->in_num, guest_offset,
buf + offset, size - offset);
total += len;
offset += len;
* must have consumed the complete packet.
* Otherwise, drop it. */
if (!n->mergeable_rx_bufs && offset < size) {
-#if 0
- error_report("virtio-net truncated non-mergeable packet: "
- "i %zd mergeable %d offset %zd, size %zd, "
- "guest hdr len %zd, host hdr len %zd",
- i, n->mergeable_rx_bufs,
- offset, size, n->guest_hdr_len, n->host_hdr_len);
-#endif
+ virtqueue_discard(q->rx_vq, elem, total);
+ g_free(elem);
return size;
}
/* signal other side */
- virtqueue_fill(q->rx_vq, &elem, total, i++);
+ virtqueue_fill(q->rx_vq, elem, total, i++);
+ g_free(elem);
}
if (mhdr_cnt) {
VirtIONetQueue *q = virtio_net_get_subqueue(nc);
VirtIODevice *vdev = VIRTIO_DEVICE(n);
- virtqueue_push(q->tx_vq, &q->async_tx.elem, 0);
+ virtqueue_push(q->tx_vq, q->async_tx.elem, 0);
virtio_notify(vdev, q->tx_vq);
- q->async_tx.elem.out_num = q->async_tx.len = 0;
+ g_free(q->async_tx.elem);
+ q->async_tx.elem = NULL;
virtio_queue_set_notification(q->tx_vq, 1);
virtio_net_flush_tx(q);
{
VirtIONet *n = q->n;
VirtIODevice *vdev = VIRTIO_DEVICE(n);
- VirtQueueElement elem;
+ VirtQueueElement *elem;
int32_t num_packets = 0;
int queue_index = vq2q(virtio_get_queue_index(q->tx_vq));
if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) {
return num_packets;
}
- if (q->async_tx.elem.out_num) {
+ if (q->async_tx.elem) {
virtio_queue_set_notification(q->tx_vq, 0);
return num_packets;
}
- while (virtqueue_pop(q->tx_vq, &elem)) {
- ssize_t ret, len;
- unsigned int out_num = elem.out_num;
- struct iovec *out_sg = &elem.out_sg[0];
- struct iovec sg[VIRTQUEUE_MAX_SIZE];
+ for (;;) {
+ ssize_t ret;
+ unsigned int out_num;
+ struct iovec sg[VIRTQUEUE_MAX_SIZE], sg2[VIRTQUEUE_MAX_SIZE + 1], *out_sg;
+ struct virtio_net_hdr_mrg_rxbuf mhdr;
+
+ elem = virtqueue_pop(q->tx_vq, sizeof(VirtQueueElement));
+ if (!elem) {
+ break;
+ }
+ out_num = elem->out_num;
+ out_sg = elem->out_sg;
if (out_num < 1) {
- error_report("virtio-net header not in first element");
- exit(1);
+ virtio_error(vdev, "virtio-net header not in first element");
+ virtqueue_detach_element(q->tx_vq, elem, 0);
+ g_free(elem);
+ return -EINVAL;
}
if (n->has_vnet_hdr) {
- if (out_sg[0].iov_len < n->guest_hdr_len) {
- error_report("virtio-net header incorrect");
- exit(1);
+ if (iov_to_buf(out_sg, out_num, 0, &mhdr, n->guest_hdr_len) <
+ n->guest_hdr_len) {
+ virtio_error(vdev, "virtio-net header incorrect");
+ virtqueue_detach_element(q->tx_vq, elem, 0);
+ g_free(elem);
+ return -EINVAL;
}
- virtio_net_hdr_swap(vdev, (void *) out_sg[0].iov_base);
+ if (n->needs_vnet_hdr_swap) {
+ virtio_net_hdr_swap(vdev, (void *) &mhdr);
+ sg2[0].iov_base = &mhdr;
+ sg2[0].iov_len = n->guest_hdr_len;
+ out_num = iov_copy(&sg2[1], ARRAY_SIZE(sg2) - 1,
+ out_sg, out_num,
+ n->guest_hdr_len, -1);
+ if (out_num == VIRTQUEUE_MAX_SIZE) {
+ goto drop;
+ }
+ out_num += 1;
+ out_sg = sg2;
+ }
}
-
/*
* If host wants to see the guest header as is, we can
* pass it on unchanged. Otherwise, copy just the parts
out_sg = sg;
}
- len = n->guest_hdr_len;
-
ret = qemu_sendv_packet_async(qemu_get_subqueue(n->nic, queue_index),
out_sg, out_num, virtio_net_tx_complete);
if (ret == 0) {
virtio_queue_set_notification(q->tx_vq, 0);
q->async_tx.elem = elem;
- q->async_tx.len = len;
return -EBUSY;
}
- len += ret;
-
- virtqueue_push(q->tx_vq, &elem, 0);
+drop:
+ virtqueue_push(q->tx_vq, elem, 0);
virtio_notify(vdev, q->tx_vq);
+ g_free(elem);
if (++num_packets >= n->tx_burst) {
break;
virtio_queue_set_notification(vq, 1);
timer_del(q->tx_timer);
q->tx_waiting = 0;
- virtio_net_flush_tx(q);
+ if (virtio_net_flush_tx(q) == -EINVAL) {
+ return;
+ }
} else {
timer_mod(q->tx_timer,
qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + n->tx_timeout);
}
ret = virtio_net_flush_tx(q);
- if (ret == -EBUSY) {
- return; /* Notification re-enable handled by tx_complete */
+ if (ret == -EBUSY || ret == -EINVAL) {
+ return; /* Notification re-enable handled by tx_complete or device
+ * broken */
}
/* If we flush a full burst of packets, assume there are
* anything that may have come in while we weren't looking. If
* we find something, assume the guest is still active and reschedule */
virtio_queue_set_notification(q->tx_vq, 1);
- if (virtio_net_flush_tx(q) > 0) {
+ ret = virtio_net_flush_tx(q);
+ if (ret == -EINVAL) {
+ return;
+ } else if (ret > 0) {
virtio_queue_set_notification(q->tx_vq, 0);
qemu_bh_schedule(q->tx_bh);
q->tx_waiting = 1;
}
}
-static void virtio_net_set_multiqueue(VirtIONet *n, int multiqueue)
+static void virtio_net_add_queue(VirtIONet *n, int index)
{
- n->multiqueue = multiqueue;
+ VirtIODevice *vdev = VIRTIO_DEVICE(n);
- virtio_net_set_queues(n);
+ n->vqs[index].rx_vq = virtio_add_queue(vdev, n->net_conf.rx_queue_size,
+ virtio_net_handle_rx);
+ if (n->net_conf.tx && !strcmp(n->net_conf.tx, "timer")) {
+ n->vqs[index].tx_vq =
+ virtio_add_queue(vdev, 256, virtio_net_handle_tx_timer);
+ n->vqs[index].tx_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
+ virtio_net_tx_timer,
+ &n->vqs[index]);
+ } else {
+ n->vqs[index].tx_vq =
+ virtio_add_queue(vdev, 256, virtio_net_handle_tx_bh);
+ n->vqs[index].tx_bh = qemu_bh_new(virtio_net_tx_bh, &n->vqs[index]);
+ }
+
+ n->vqs[index].tx_waiting = 0;
+ n->vqs[index].n = n;
}
-static void virtio_net_save(QEMUFile *f, void *opaque)
+static void virtio_net_del_queue(VirtIONet *n, int index)
{
- VirtIONet *n = opaque;
VirtIODevice *vdev = VIRTIO_DEVICE(n);
+ VirtIONetQueue *q = &n->vqs[index];
+ NetClientState *nc = qemu_get_subqueue(n->nic, index);
- /* At this point, backend must be stopped, otherwise
- * it might keep writing to memory. */
- assert(!n->vhost_started);
- virtio_save(vdev, f);
+ qemu_purge_queued_packets(nc);
+
+ virtio_del_queue(vdev, index * 2);
+ if (q->tx_timer) {
+ timer_del(q->tx_timer);
+ timer_free(q->tx_timer);
+ } else {
+ qemu_bh_delete(q->tx_bh);
+ }
+ virtio_del_queue(vdev, index * 2 + 1);
+}
+
+static void virtio_net_change_num_queues(VirtIONet *n, int new_max_queues)
+{
+ VirtIODevice *vdev = VIRTIO_DEVICE(n);
+ int old_num_queues = virtio_get_num_queues(vdev);
+ int new_num_queues = new_max_queues * 2 + 1;
+ int i;
+
+ assert(old_num_queues >= 3);
+ assert(old_num_queues % 2 == 1);
+
+ if (old_num_queues == new_num_queues) {
+ return;
+ }
+
+ /*
+ * We always need to remove and add ctrl vq if
+ * old_num_queues != new_num_queues. Remove ctrl_vq first,
+ * and then we only enter one of the following too loops.
+ */
+ virtio_del_queue(vdev, old_num_queues - 1);
+
+ for (i = new_num_queues - 1; i < old_num_queues - 1; i += 2) {
+ /* new_num_queues < old_num_queues */
+ virtio_net_del_queue(n, i / 2);
+ }
+
+ for (i = old_num_queues - 1; i < new_num_queues - 1; i += 2) {
+ /* new_num_queues > old_num_queues */
+ virtio_net_add_queue(n, i / 2);
+ }
+
+ /* add ctrl_vq last */
+ n->ctrl_vq = virtio_add_queue(vdev, 64, virtio_net_handle_ctrl);
+}
+
+static void virtio_net_set_multiqueue(VirtIONet *n, int multiqueue)
+{
+ int max = multiqueue ? n->max_queues : 1;
+
+ n->multiqueue = multiqueue;
+ virtio_net_change_num_queues(n, max);
+
+ virtio_net_set_queues(n);
}
static void virtio_net_save_device(VirtIODevice *vdev, QEMUFile *f)
}
}
- if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)) {
+ if (virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)) {
qemu_put_be64(f, n->curr_guest_offloads);
}
}
-static int virtio_net_load(QEMUFile *f, void *opaque, int version_id)
-{
- VirtIONet *n = opaque;
- VirtIODevice *vdev = VIRTIO_DEVICE(n);
-
- if (version_id < 2 || version_id > VIRTIO_NET_VM_VERSION)
- return -EINVAL;
-
- return virtio_load(vdev, f, version_id);
-}
-
static int virtio_net_load_device(VirtIODevice *vdev, QEMUFile *f,
int version_id)
{
n->vqs[0].tx_waiting = qemu_get_be32(f);
virtio_net_set_mrg_rx_bufs(n, qemu_get_be32(f),
- virtio_has_feature(vdev, VIRTIO_F_VERSION_1));
+ virtio_vdev_has_feature(vdev,
+ VIRTIO_F_VERSION_1));
- if (version_id >= 3)
- n->status = qemu_get_be16(f);
+ n->status = qemu_get_be16(f);
- if (version_id >= 4) {
- if (version_id < 8) {
- n->promisc = qemu_get_be32(f);
- n->allmulti = qemu_get_be32(f);
- } else {
- n->promisc = qemu_get_byte(f);
- n->allmulti = qemu_get_byte(f);
- }
- }
+ n->promisc = qemu_get_byte(f);
+ n->allmulti = qemu_get_byte(f);
- if (version_id >= 5) {
- n->mac_table.in_use = qemu_get_be32(f);
- /* MAC_TABLE_ENTRIES may be different from the saved image */
- if (n->mac_table.in_use <= MAC_TABLE_ENTRIES) {
- qemu_get_buffer(f, n->mac_table.macs,
- n->mac_table.in_use * ETH_ALEN);
- } else {
- int64_t i;
-
- /* Overflow detected - can happen if source has a larger MAC table.
- * We simply set overflow flag so there's no need to maintain the
- * table of addresses, discard them all.
- * Note: 64 bit math to avoid integer overflow.
- */
- for (i = 0; i < (int64_t)n->mac_table.in_use * ETH_ALEN; ++i) {
- qemu_get_byte(f);
- }
- n->mac_table.multi_overflow = n->mac_table.uni_overflow = 1;
- n->mac_table.in_use = 0;
+ n->mac_table.in_use = qemu_get_be32(f);
+ /* MAC_TABLE_ENTRIES may be different from the saved image */
+ if (n->mac_table.in_use <= MAC_TABLE_ENTRIES) {
+ qemu_get_buffer(f, n->mac_table.macs,
+ n->mac_table.in_use * ETH_ALEN);
+ } else {
+ int64_t i;
+
+ /* Overflow detected - can happen if source has a larger MAC table.
+ * We simply set overflow flag so there's no need to maintain the
+ * table of addresses, discard them all.
+ * Note: 64 bit math to avoid integer overflow.
+ */
+ for (i = 0; i < (int64_t)n->mac_table.in_use * ETH_ALEN; ++i) {
+ qemu_get_byte(f);
}
+ n->mac_table.multi_overflow = n->mac_table.uni_overflow = 1;
+ n->mac_table.in_use = 0;
}
- if (version_id >= 6)
- qemu_get_buffer(f, (uint8_t *)n->vlans, MAX_VLAN >> 3);
+ qemu_get_buffer(f, (uint8_t *)n->vlans, MAX_VLAN >> 3);
- if (version_id >= 7) {
- if (qemu_get_be32(f) && !peer_has_vnet_hdr(n)) {
- error_report("virtio-net: saved image requires vnet_hdr=on");
- return -1;
- }
+ if (qemu_get_be32(f) && !peer_has_vnet_hdr(n)) {
+ error_report("virtio-net: saved image requires vnet_hdr=on");
+ return -1;
}
- if (version_id >= 9) {
- n->mac_table.multi_overflow = qemu_get_byte(f);
- n->mac_table.uni_overflow = qemu_get_byte(f);
- }
+ n->mac_table.multi_overflow = qemu_get_byte(f);
+ n->mac_table.uni_overflow = qemu_get_byte(f);
- if (version_id >= 10) {
- n->alluni = qemu_get_byte(f);
- n->nomulti = qemu_get_byte(f);
- n->nouni = qemu_get_byte(f);
- n->nobcast = qemu_get_byte(f);
- }
+ n->alluni = qemu_get_byte(f);
+ n->nomulti = qemu_get_byte(f);
+ n->nouni = qemu_get_byte(f);
+ n->nobcast = qemu_get_byte(f);
- if (version_id >= 11) {
- if (qemu_get_byte(f) && !peer_has_ufo(n)) {
- error_report("virtio-net: saved image requires TUN_F_UFO support");
- return -1;
- }
+ if (qemu_get_byte(f) && !peer_has_ufo(n)) {
+ error_report("virtio-net: saved image requires TUN_F_UFO support");
+ return -1;
}
if (n->max_queues > 1) {
}
}
- if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)) {
+ if (virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)) {
n->curr_guest_offloads = qemu_get_be64(f);
} else {
n->curr_guest_offloads = virtio_net_supported_guest_offloads(n);
qemu_get_subqueue(n->nic, i)->link_down = link_down;
}
- if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ANNOUNCE) &&
- virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) {
+ if (virtio_vdev_has_feature(vdev, VIRTIO_NET_F_GUEST_ANNOUNCE) &&
+ virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) {
n->announce_counter = SELF_ANNOUNCE_ROUNDS;
timer_mod(n->announce_timer, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL));
}
}
static NetClientInfo net_virtio_info = {
- .type = NET_CLIENT_OPTIONS_KIND_NIC,
+ .type = NET_CLIENT_DRIVER_NIC,
.size = sizeof(NICState),
.can_receive = virtio_net_can_receive,
.receive = virtio_net_receive,
virtio_net_set_config_size(n, n->host_features);
virtio_init(vdev, "virtio-net", VIRTIO_ID_NET, n->config_size);
+ /*
+ * We set a lower limit on RX queue size to what it always was.
+ * Guests that want a smaller ring can always resize it without
+ * help from us (using virtio 1 and up).
+ */
+ if (n->net_conf.rx_queue_size < VIRTIO_NET_RX_QUEUE_MIN_SIZE ||
+ n->net_conf.rx_queue_size > VIRTQUEUE_MAX_SIZE ||
+ (n->net_conf.rx_queue_size & (n->net_conf.rx_queue_size - 1))) {
+ error_setg(errp, "Invalid rx_queue_size (= %" PRIu16 "), "
+ "must be a power of 2 between %d and %d.",
+ n->net_conf.rx_queue_size, VIRTIO_NET_RX_QUEUE_MIN_SIZE,
+ VIRTQUEUE_MAX_SIZE);
+ virtio_cleanup(vdev);
+ return;
+ }
+
n->max_queues = MAX(n->nic_conf.peers.queues, 1);
if (n->max_queues * 2 + 1 > VIRTIO_QUEUE_MAX) {
error_setg(errp, "Invalid number of queues (= %" PRIu32 "), "
}
for (i = 0; i < n->max_queues; i++) {
- n->vqs[i].rx_vq = virtio_add_queue(vdev, 256, virtio_net_handle_rx);
- if (n->net_conf.tx && !strcmp(n->net_conf.tx, "timer")) {
- n->vqs[i].tx_vq =
- virtio_add_queue(vdev, 256, virtio_net_handle_tx_timer);
- n->vqs[i].tx_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
- virtio_net_tx_timer,
- &n->vqs[i]);
- } else {
- n->vqs[i].tx_vq =
- virtio_add_queue(vdev, 256, virtio_net_handle_tx_bh);
- n->vqs[i].tx_bh = qemu_bh_new(virtio_net_tx_bh, &n->vqs[i]);
- }
-
- n->vqs[i].tx_waiting = 0;
- n->vqs[i].n = n;
+ virtio_net_add_queue(n, i);
}
n->ctrl_vq = virtio_add_queue(vdev, 64, virtio_net_handle_ctrl);
nc->rxfilter_notify_enabled = 1;
n->qdev = dev;
- register_savevm(dev, "virtio-net", -1, VIRTIO_NET_VM_VERSION,
- virtio_net_save, virtio_net_load, n);
}
static void virtio_net_device_unrealize(DeviceState *dev, Error **errp)
{
VirtIODevice *vdev = VIRTIO_DEVICE(dev);
VirtIONet *n = VIRTIO_NET(dev);
- int i;
+ int i, max_queues;
/* This will stop vhost backend if appropriate. */
virtio_net_set_status(vdev, 0);
- unregister_savevm(dev, "virtio-net", n);
-
g_free(n->netclient_name);
n->netclient_name = NULL;
g_free(n->netclient_type);
g_free(n->mac_table.macs);
g_free(n->vlans);
- for (i = 0; i < n->max_queues; i++) {
- VirtIONetQueue *q = &n->vqs[i];
- NetClientState *nc = qemu_get_subqueue(n->nic, i);
-
- qemu_purge_queued_packets(nc);
-
- if (q->tx_timer) {
- timer_del(q->tx_timer);
- timer_free(q->tx_timer);
- } else if (q->tx_bh) {
- qemu_bh_delete(q->tx_bh);
- }
+ max_queues = n->multiqueue ? n->max_queues : 1;
+ for (i = 0; i < max_queues; i++) {
+ virtio_net_del_queue(n, i);
}
timer_del(n->announce_timer);
DEVICE(n), NULL);
}
+static void virtio_net_pre_save(void *opaque)
+{
+ VirtIONet *n = opaque;
+
+ /* At this point, backend must be stopped, otherwise
+ * it might keep writing to memory. */
+ assert(!n->vhost_started);
+}
+
+static const VMStateDescription vmstate_virtio_net = {
+ .name = "virtio-net",
+ .minimum_version_id = VIRTIO_NET_VM_VERSION,
+ .version_id = VIRTIO_NET_VM_VERSION,
+ .fields = (VMStateField[]) {
+ VMSTATE_VIRTIO_DEVICE,
+ VMSTATE_END_OF_LIST()
+ },
+ .pre_save = virtio_net_pre_save,
+};
+
static Property virtio_net_properties[] = {
- DEFINE_PROP_BIT("any_layout", VirtIONet, host_features,
- VIRTIO_F_ANY_LAYOUT, true),
DEFINE_PROP_BIT("csum", VirtIONet, host_features, VIRTIO_NET_F_CSUM, true),
DEFINE_PROP_BIT("guest_csum", VirtIONet, host_features,
VIRTIO_NET_F_GUEST_CSUM, true),
TX_TIMER_INTERVAL),
DEFINE_PROP_INT32("x-txburst", VirtIONet, net_conf.txburst, TX_BURST),
DEFINE_PROP_STRING("tx", VirtIONet, net_conf.tx),
+ DEFINE_PROP_UINT16("rx_queue_size", VirtIONet, net_conf.rx_queue_size,
+ VIRTIO_NET_RX_QUEUE_DEFAULT_SIZE),
DEFINE_PROP_END_OF_LIST(),
};
VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
dc->props = virtio_net_properties;
+ dc->vmsd = &vmstate_virtio_net;
set_bit(DEVICE_CATEGORY_NETWORK, dc->categories);
vdc->realize = virtio_net_device_realize;
vdc->unrealize = virtio_net_device_unrealize;