#include "qapi/qmp/qjson.h"
#include "qapi-event.h"
#include "hw/virtio/virtio-access.h"
+#include "migration/misc.h"
#define VIRTIO_NET_VM_VERSION 11
#define MAC_TABLE_ENTRIES 64
#define MAX_VLAN (1 << 12) /* Per 802.1Q definition */
+/* previously fixed value */
+#define VIRTIO_NET_RX_QUEUE_DEFAULT_SIZE 256
+#define VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE 256
+
+/* for now, only allow larger queues; with virtio-1, guest can downsize */
+#define VIRTIO_NET_RX_QUEUE_MIN_SIZE VIRTIO_NET_RX_QUEUE_DEFAULT_SIZE
+#define VIRTIO_NET_TX_QUEUE_MIN_SIZE VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE
+
/*
* Calculate the number of bytes up to and including the given 'field' of
* 'container'.
.end = endof(struct virtio_net_config, status)},
{.flags = 1 << VIRTIO_NET_F_MQ,
.end = endof(struct virtio_net_config, max_virtqueue_pairs)},
+ {.flags = 1 << VIRTIO_NET_F_MTU,
+ .end = endof(struct virtio_net_config, mtu)},
{}
};
virtio_stw_p(vdev, &netcfg.status, n->status);
virtio_stw_p(vdev, &netcfg.max_virtqueue_pairs, n->max_queues);
+ virtio_stw_p(vdev, &netcfg.mtu, n->net_conf.mtu);
memcpy(netcfg.mac, n->mac, ETH_ALEN);
memcpy(config, &netcfg, n->config_size);
}
qemu_net_queue_purge(qnc->incoming_queue, qnc->peer);
}
+ if (virtio_has_feature(vdev->guest_features, VIRTIO_NET_F_MTU)) {
+ r = vhost_net_set_mtu(get_vhost_net(nc->peer), n->net_conf.mtu);
+ if (r < 0) {
+ error_report("%uBytes MTU not supported by the backend",
+ n->net_conf.mtu);
+
+ return;
+ }
+ }
+
n->vhost_started = 1;
r = vhost_net_start(vdev, n->nic->ncs, queues);
if (r < 0) {
}
}
+static void virtio_net_drop_tx_queue_data(VirtIODevice *vdev, VirtQueue *vq)
+{
+ unsigned int dropped = virtqueue_drop_all(vq);
+ if (dropped) {
+ virtio_notify(vdev, vq);
+ }
+}
+
static void virtio_net_set_status(struct VirtIODevice *vdev, uint8_t status)
{
VirtIONet *n = VIRTIO_NET(vdev);
} else {
qemu_bh_cancel(q->tx_bh);
}
+ if ((n->status & VIRTIO_NET_S_LINK_UP) == 0 &&
+ (queue_status & VIRTIO_CONFIG_S_DRIVER_OK)) {
+ /* if tx is waiting we are likely have some packets in tx queue
+ * and disabled notification */
+ q->tx_waiting = 0;
+ virtio_queue_set_notification(q->tx_vq, 1);
+ virtio_net_drop_tx_queue_data(vdev, q->tx_vq);
+ }
}
}
}
}
}
+static int virtio_net_max_tx_queue_size(VirtIONet *n)
+{
+ NetClientState *peer = n->nic_conf.peers.ncs[0];
+
+ /*
+ * Backends other than vhost-user don't support max queue size.
+ */
+ if (!peer) {
+ return VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE;
+ }
+
+ if (peer->info->type != NET_CLIENT_DRIVER_VHOST_USER) {
+ return VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE;
+ }
+
+ return VIRTQUEUE_MAX_SIZE;
+}
+
static int peer_attach(VirtIONet *n, int index)
{
NetClientState *nc = qemu_get_subqueue(n->nic, index);
return 0;
}
- if (nc->peer->info->type == NET_CLIENT_OPTIONS_KIND_VHOST_USER) {
+ if (nc->peer->info->type == NET_CLIENT_DRIVER_VHOST_USER) {
vhost_set_vring_enable(nc->peer, 1);
}
- if (nc->peer->info->type != NET_CLIENT_OPTIONS_KIND_TAP) {
+ if (nc->peer->info->type != NET_CLIENT_DRIVER_TAP) {
+ return 0;
+ }
+
+ if (n->max_queues == 1) {
return 0;
}
return 0;
}
- if (nc->peer->info->type == NET_CLIENT_OPTIONS_KIND_VHOST_USER) {
+ if (nc->peer->info->type == NET_CLIENT_DRIVER_VHOST_USER) {
vhost_set_vring_enable(nc->peer, 0);
}
- if (nc->peer->info->type != NET_CLIENT_OPTIONS_KIND_TAP) {
+ if (nc->peer->info->type != NET_CLIENT_DRIVER_TAP) {
return 0;
}
int i;
int r;
+ if (n->nic->peer_deleted) {
+ return;
+ }
+
for (i = 0; i < n->max_queues; i++) {
if (i < n->curr_queues) {
r = peer_attach(n, i);
if (!get_vhost_net(nc->peer)) {
return features;
}
- return vhost_net_get_features(get_vhost_net(nc->peer), features);
+ features = vhost_net_get_features(get_vhost_net(nc->peer), features);
+ vdev->backend_features = features;
+
+ if (n->mtu_bypass_backend &&
+ (n->host_features & 1ULL << VIRTIO_NET_F_MTU)) {
+ features |= (1ULL << VIRTIO_NET_F_MTU);
+ }
+
+ return features;
}
static uint64_t virtio_net_bad_features(VirtIODevice *vdev)
VirtIONet *n = VIRTIO_NET(vdev);
int i;
+ if (n->mtu_bypass_backend &&
+ !virtio_has_feature(vdev->backend_features, VIRTIO_NET_F_MTU)) {
+ features &= ~(1ULL << VIRTIO_NET_F_MTU);
+ }
+
virtio_net_set_multiqueue(n,
virtio_has_feature(features, VIRTIO_NET_F_MQ));
return VIRTIO_NET_OK;
}
+
static void virtio_net_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq)
{
VirtIONet *n = VIRTIO_NET(vdev);
}
if (iov_size(elem->in_sg, elem->in_num) < sizeof(status) ||
iov_size(elem->out_sg, elem->out_num) < sizeof(ctrl)) {
- error_report("virtio-net ctrl missing headers");
- exit(1);
+ virtio_error(vdev, "virtio-net ctrl missing headers");
+ virtqueue_detach_element(vq, elem, 0);
+ g_free(elem);
+ break;
}
iov_cnt = elem->out_num;
return 0;
}
-static ssize_t virtio_net_receive(NetClientState *nc, const uint8_t *buf, size_t size)
+static ssize_t virtio_net_receive_rcu(NetClientState *nc, const uint8_t *buf,
+ size_t size)
{
VirtIONet *n = qemu_get_nic_opaque(nc);
VirtIONetQueue *q = virtio_net_get_subqueue(nc);
elem = virtqueue_pop(q->rx_vq, sizeof(VirtQueueElement));
if (!elem) {
- if (i == 0)
- return -1;
- error_report("virtio-net unexpected empty queue: "
- "i %zd mergeable %d offset %zd, size %zd, "
- "guest hdr len %zd, host hdr len %zd "
- "guest features 0x%" PRIx64,
- i, n->mergeable_rx_bufs, offset, size,
- n->guest_hdr_len, n->host_hdr_len,
- vdev->guest_features);
- exit(1);
+ if (i) {
+ virtio_error(vdev, "virtio-net unexpected empty queue: "
+ "i %zd mergeable %d offset %zd, size %zd, "
+ "guest hdr len %zd, host hdr len %zd "
+ "guest features 0x%" PRIx64,
+ i, n->mergeable_rx_bufs, offset, size,
+ n->guest_hdr_len, n->host_hdr_len,
+ vdev->guest_features);
+ }
+ return -1;
}
if (elem->in_num < 1) {
- error_report("virtio-net receive queue contains no in buffers");
- exit(1);
+ virtio_error(vdev,
+ "virtio-net receive queue contains no in buffers");
+ virtqueue_detach_element(q->rx_vq, elem, 0);
+ g_free(elem);
+ return -1;
}
sg = elem->in_sg;
* must have consumed the complete packet.
* Otherwise, drop it. */
if (!n->mergeable_rx_bufs && offset < size) {
- virtqueue_discard(q->rx_vq, elem, total);
+ virtqueue_unpop(q->rx_vq, elem, total);
g_free(elem);
return size;
}
return size;
}
+static ssize_t virtio_net_receive(NetClientState *nc, const uint8_t *buf,
+ size_t size)
+{
+ ssize_t r;
+
+ rcu_read_lock();
+ r = virtio_net_receive_rcu(nc, buf, size);
+ rcu_read_unlock();
+ return r;
+}
+
static int32_t virtio_net_flush_tx(VirtIONetQueue *q);
static void virtio_net_tx_complete(NetClientState *nc, ssize_t len)
out_num = elem->out_num;
out_sg = elem->out_sg;
if (out_num < 1) {
- error_report("virtio-net header not in first element");
- exit(1);
+ virtio_error(vdev, "virtio-net header not in first element");
+ virtqueue_detach_element(q->tx_vq, elem, 0);
+ g_free(elem);
+ return -EINVAL;
}
if (n->has_vnet_hdr) {
if (iov_to_buf(out_sg, out_num, 0, &mhdr, n->guest_hdr_len) <
n->guest_hdr_len) {
- error_report("virtio-net header incorrect");
- exit(1);
+ virtio_error(vdev, "virtio-net header incorrect");
+ virtqueue_detach_element(q->tx_vq, elem, 0);
+ g_free(elem);
+ return -EINVAL;
}
if (n->needs_vnet_hdr_swap) {
virtio_net_hdr_swap(vdev, (void *) &mhdr);
VirtIONet *n = VIRTIO_NET(vdev);
VirtIONetQueue *q = &n->vqs[vq2q(virtio_get_queue_index(vq))];
+ if (unlikely((n->status & VIRTIO_NET_S_LINK_UP) == 0)) {
+ virtio_net_drop_tx_queue_data(vdev, vq);
+ return;
+ }
+
/* This happens when device was stopped but VCPU wasn't. */
if (!vdev->vm_running) {
q->tx_waiting = 1;
virtio_queue_set_notification(vq, 1);
timer_del(q->tx_timer);
q->tx_waiting = 0;
- virtio_net_flush_tx(q);
+ if (virtio_net_flush_tx(q) == -EINVAL) {
+ return;
+ }
} else {
timer_mod(q->tx_timer,
qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + n->tx_timeout);
VirtIONet *n = VIRTIO_NET(vdev);
VirtIONetQueue *q = &n->vqs[vq2q(virtio_get_queue_index(vq))];
+ if (unlikely((n->status & VIRTIO_NET_S_LINK_UP) == 0)) {
+ virtio_net_drop_tx_queue_data(vdev, vq);
+ return;
+ }
+
if (unlikely(q->tx_waiting)) {
return;
}
}
ret = virtio_net_flush_tx(q);
- if (ret == -EBUSY) {
- return; /* Notification re-enable handled by tx_complete */
+ if (ret == -EBUSY || ret == -EINVAL) {
+ return; /* Notification re-enable handled by tx_complete or device
+ * broken */
}
/* If we flush a full burst of packets, assume there are
* anything that may have come in while we weren't looking. If
* we find something, assume the guest is still active and reschedule */
virtio_queue_set_notification(q->tx_vq, 1);
- if (virtio_net_flush_tx(q) > 0) {
+ ret = virtio_net_flush_tx(q);
+ if (ret == -EINVAL) {
+ return;
+ } else if (ret > 0) {
virtio_queue_set_notification(q->tx_vq, 0);
qemu_bh_schedule(q->tx_bh);
q->tx_waiting = 1;
{
VirtIODevice *vdev = VIRTIO_DEVICE(n);
- n->vqs[index].rx_vq = virtio_add_queue(vdev, 256, virtio_net_handle_rx);
+ n->vqs[index].rx_vq = virtio_add_queue(vdev, n->net_conf.rx_queue_size,
+ virtio_net_handle_rx);
+
if (n->net_conf.tx && !strcmp(n->net_conf.tx, "timer")) {
n->vqs[index].tx_vq =
- virtio_add_queue(vdev, 256, virtio_net_handle_tx_timer);
+ virtio_add_queue(vdev, n->net_conf.tx_queue_size,
+ virtio_net_handle_tx_timer);
n->vqs[index].tx_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
virtio_net_tx_timer,
&n->vqs[index]);
} else {
n->vqs[index].tx_vq =
- virtio_add_queue(vdev, 256, virtio_net_handle_tx_bh);
+ virtio_add_queue(vdev, n->net_conf.tx_queue_size,
+ virtio_net_handle_tx_bh);
n->vqs[index].tx_bh = qemu_bh_new(virtio_net_tx_bh, &n->vqs[index]);
}
if (q->tx_timer) {
timer_del(q->tx_timer);
timer_free(q->tx_timer);
+ q->tx_timer = NULL;
} else {
qemu_bh_delete(q->tx_bh);
+ q->tx_bh = NULL;
}
+ q->tx_waiting = 0;
virtio_del_queue(vdev, index * 2 + 1);
}
virtio_net_set_queues(n);
}
-static void virtio_net_save(QEMUFile *f, void *opaque)
+static int virtio_net_post_load_device(void *opaque, int version_id)
{
VirtIONet *n = opaque;
VirtIODevice *vdev = VIRTIO_DEVICE(n);
+ int i, link_down;
- /* At this point, backend must be stopped, otherwise
- * it might keep writing to memory. */
- assert(!n->vhost_started);
- virtio_save(vdev, f);
-}
-
-static void virtio_net_save_device(VirtIODevice *vdev, QEMUFile *f)
-{
- VirtIONet *n = VIRTIO_NET(vdev);
- int i;
+ virtio_net_set_mrg_rx_bufs(n, n->mergeable_rx_bufs,
+ virtio_vdev_has_feature(vdev,
+ VIRTIO_F_VERSION_1));
- qemu_put_buffer(f, n->mac, ETH_ALEN);
- qemu_put_be32(f, n->vqs[0].tx_waiting);
- qemu_put_be32(f, n->mergeable_rx_bufs);
- qemu_put_be16(f, n->status);
- qemu_put_byte(f, n->promisc);
- qemu_put_byte(f, n->allmulti);
- qemu_put_be32(f, n->mac_table.in_use);
- qemu_put_buffer(f, n->mac_table.macs, n->mac_table.in_use * ETH_ALEN);
- qemu_put_buffer(f, (uint8_t *)n->vlans, MAX_VLAN >> 3);
- qemu_put_be32(f, n->has_vnet_hdr);
- qemu_put_byte(f, n->mac_table.multi_overflow);
- qemu_put_byte(f, n->mac_table.uni_overflow);
- qemu_put_byte(f, n->alluni);
- qemu_put_byte(f, n->nomulti);
- qemu_put_byte(f, n->nouni);
- qemu_put_byte(f, n->nobcast);
- qemu_put_byte(f, n->has_ufo);
- if (n->max_queues > 1) {
- qemu_put_be16(f, n->max_queues);
- qemu_put_be16(f, n->curr_queues);
- for (i = 1; i < n->curr_queues; i++) {
- qemu_put_be32(f, n->vqs[i].tx_waiting);
- }
+ /* MAC_TABLE_ENTRIES may be different from the saved image */
+ if (n->mac_table.in_use > MAC_TABLE_ENTRIES) {
+ n->mac_table.in_use = 0;
}
- if (virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)) {
- qemu_put_be64(f, n->curr_guest_offloads);
+ if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)) {
+ n->curr_guest_offloads = virtio_net_supported_guest_offloads(n);
}
-}
-
-static int virtio_net_load(QEMUFile *f, void *opaque, int version_id)
-{
- VirtIONet *n = opaque;
- VirtIODevice *vdev = VIRTIO_DEVICE(n);
- int ret;
- if (version_id < 2 || version_id > VIRTIO_NET_VM_VERSION)
- return -EINVAL;
-
- ret = virtio_load(vdev, f, version_id);
- if (ret) {
- return ret;
+ if (peer_has_vnet_hdr(n)) {
+ virtio_net_apply_guest_offloads(n);
}
- if (virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)) {
- n->curr_guest_offloads = qemu_get_be64(f);
- } else {
- n->curr_guest_offloads = virtio_net_supported_guest_offloads(n);
+ virtio_net_set_queues(n);
+
+ /* Find the first multicast entry in the saved MAC filter */
+ for (i = 0; i < n->mac_table.in_use; i++) {
+ if (n->mac_table.macs[i * ETH_ALEN] & 1) {
+ break;
+ }
}
+ n->mac_table.first_multi = i;
- if (peer_has_vnet_hdr(n)) {
- virtio_net_apply_guest_offloads(n);
+ /* nc.link_down can't be migrated, so infer link_down according
+ * to link status bit in n->status */
+ link_down = (n->status & VIRTIO_NET_S_LINK_UP) == 0;
+ for (i = 0; i < n->max_queues; i++) {
+ qemu_get_subqueue(n->nic, i)->link_down = link_down;
}
if (virtio_vdev_has_feature(vdev, VIRTIO_NET_F_GUEST_ANNOUNCE) &&
return 0;
}
-static int virtio_net_load_device(VirtIODevice *vdev, QEMUFile *f,
- int version_id)
+/* tx_waiting field of a VirtIONetQueue */
+static const VMStateDescription vmstate_virtio_net_queue_tx_waiting = {
+ .name = "virtio-net-queue-tx_waiting",
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT32(tx_waiting, VirtIONetQueue),
+ VMSTATE_END_OF_LIST()
+ },
+};
+
+static bool max_queues_gt_1(void *opaque, int version_id)
{
- VirtIONet *n = VIRTIO_NET(vdev);
- int i, link_down;
+ return VIRTIO_NET(opaque)->max_queues > 1;
+}
- qemu_get_buffer(f, n->mac, ETH_ALEN);
- n->vqs[0].tx_waiting = qemu_get_be32(f);
+static bool has_ctrl_guest_offloads(void *opaque, int version_id)
+{
+ return virtio_vdev_has_feature(VIRTIO_DEVICE(opaque),
+ VIRTIO_NET_F_CTRL_GUEST_OFFLOADS);
+}
- virtio_net_set_mrg_rx_bufs(n, qemu_get_be32(f),
- virtio_vdev_has_feature(vdev,
- VIRTIO_F_VERSION_1));
+static bool mac_table_fits(void *opaque, int version_id)
+{
+ return VIRTIO_NET(opaque)->mac_table.in_use <= MAC_TABLE_ENTRIES;
+}
- if (version_id >= 3)
- n->status = qemu_get_be16(f);
+static bool mac_table_doesnt_fit(void *opaque, int version_id)
+{
+ return !mac_table_fits(opaque, version_id);
+}
- if (version_id >= 4) {
- if (version_id < 8) {
- n->promisc = qemu_get_be32(f);
- n->allmulti = qemu_get_be32(f);
- } else {
- n->promisc = qemu_get_byte(f);
- n->allmulti = qemu_get_byte(f);
- }
- }
+/* This temporary type is shared by all the WITH_TMP methods
+ * although only some fields are used by each.
+ */
+struct VirtIONetMigTmp {
+ VirtIONet *parent;
+ VirtIONetQueue *vqs_1;
+ uint16_t curr_queues_1;
+ uint8_t has_ufo;
+ uint32_t has_vnet_hdr;
+};
- if (version_id >= 5) {
- n->mac_table.in_use = qemu_get_be32(f);
- /* MAC_TABLE_ENTRIES may be different from the saved image */
- if (n->mac_table.in_use <= MAC_TABLE_ENTRIES) {
- qemu_get_buffer(f, n->mac_table.macs,
- n->mac_table.in_use * ETH_ALEN);
- } else {
- int64_t i;
-
- /* Overflow detected - can happen if source has a larger MAC table.
- * We simply set overflow flag so there's no need to maintain the
- * table of addresses, discard them all.
- * Note: 64 bit math to avoid integer overflow.
- */
- for (i = 0; i < (int64_t)n->mac_table.in_use * ETH_ALEN; ++i) {
- qemu_get_byte(f);
- }
- n->mac_table.multi_overflow = n->mac_table.uni_overflow = 1;
- n->mac_table.in_use = 0;
- }
- }
-
- if (version_id >= 6)
- qemu_get_buffer(f, (uint8_t *)n->vlans, MAX_VLAN >> 3);
+/* The 2nd and subsequent tx_waiting flags are loaded later than
+ * the 1st entry in the queues and only if there's more than one
+ * entry. We use the tmp mechanism to calculate a temporary
+ * pointer and count and also validate the count.
+ */
- if (version_id >= 7) {
- if (qemu_get_be32(f) && !peer_has_vnet_hdr(n)) {
- error_report("virtio-net: saved image requires vnet_hdr=on");
- return -1;
- }
- }
+static void virtio_net_tx_waiting_pre_save(void *opaque)
+{
+ struct VirtIONetMigTmp *tmp = opaque;
- if (version_id >= 9) {
- n->mac_table.multi_overflow = qemu_get_byte(f);
- n->mac_table.uni_overflow = qemu_get_byte(f);
+ tmp->vqs_1 = tmp->parent->vqs + 1;
+ tmp->curr_queues_1 = tmp->parent->curr_queues - 1;
+ if (tmp->parent->curr_queues == 0) {
+ tmp->curr_queues_1 = 0;
}
+}
- if (version_id >= 10) {
- n->alluni = qemu_get_byte(f);
- n->nomulti = qemu_get_byte(f);
- n->nouni = qemu_get_byte(f);
- n->nobcast = qemu_get_byte(f);
- }
+static int virtio_net_tx_waiting_pre_load(void *opaque)
+{
+ struct VirtIONetMigTmp *tmp = opaque;
- if (version_id >= 11) {
- if (qemu_get_byte(f) && !peer_has_ufo(n)) {
- error_report("virtio-net: saved image requires TUN_F_UFO support");
- return -1;
- }
- }
+ /* Reuse the pointer setup from save */
+ virtio_net_tx_waiting_pre_save(opaque);
- if (n->max_queues > 1) {
- if (n->max_queues != qemu_get_be16(f)) {
- error_report("virtio-net: different max_queues ");
- return -1;
- }
+ if (tmp->parent->curr_queues > tmp->parent->max_queues) {
+ error_report("virtio-net: curr_queues %x > max_queues %x",
+ tmp->parent->curr_queues, tmp->parent->max_queues);
- n->curr_queues = qemu_get_be16(f);
- if (n->curr_queues > n->max_queues) {
- error_report("virtio-net: curr_queues %x > max_queues %x",
- n->curr_queues, n->max_queues);
- return -1;
- }
- for (i = 1; i < n->curr_queues; i++) {
- n->vqs[i].tx_waiting = qemu_get_be32(f);
- }
+ return -EINVAL;
}
- virtio_net_set_queues(n);
+ return 0; /* all good */
+}
- /* Find the first multicast entry in the saved MAC filter */
- for (i = 0; i < n->mac_table.in_use; i++) {
- if (n->mac_table.macs[i * ETH_ALEN] & 1) {
- break;
- }
+static const VMStateDescription vmstate_virtio_net_tx_waiting = {
+ .name = "virtio-net-tx_waiting",
+ .pre_load = virtio_net_tx_waiting_pre_load,
+ .pre_save = virtio_net_tx_waiting_pre_save,
+ .fields = (VMStateField[]) {
+ VMSTATE_STRUCT_VARRAY_POINTER_UINT16(vqs_1, struct VirtIONetMigTmp,
+ curr_queues_1,
+ vmstate_virtio_net_queue_tx_waiting,
+ struct VirtIONetQueue),
+ VMSTATE_END_OF_LIST()
+ },
+};
+
+/* the 'has_ufo' flag is just tested; if the incoming stream has the
+ * flag set we need to check that we have it
+ */
+static int virtio_net_ufo_post_load(void *opaque, int version_id)
+{
+ struct VirtIONetMigTmp *tmp = opaque;
+
+ if (tmp->has_ufo && !peer_has_ufo(tmp->parent)) {
+ error_report("virtio-net: saved image requires TUN_F_UFO support");
+ return -EINVAL;
}
- n->mac_table.first_multi = i;
- /* nc.link_down can't be migrated, so infer link_down according
- * to link status bit in n->status */
- link_down = (n->status & VIRTIO_NET_S_LINK_UP) == 0;
- for (i = 0; i < n->max_queues; i++) {
- qemu_get_subqueue(n->nic, i)->link_down = link_down;
+ return 0;
+}
+
+static void virtio_net_ufo_pre_save(void *opaque)
+{
+ struct VirtIONetMigTmp *tmp = opaque;
+
+ tmp->has_ufo = tmp->parent->has_ufo;
+}
+
+static const VMStateDescription vmstate_virtio_net_has_ufo = {
+ .name = "virtio-net-ufo",
+ .post_load = virtio_net_ufo_post_load,
+ .pre_save = virtio_net_ufo_pre_save,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT8(has_ufo, struct VirtIONetMigTmp),
+ VMSTATE_END_OF_LIST()
+ },
+};
+
+/* the 'has_vnet_hdr' flag is just tested; if the incoming stream has the
+ * flag set we need to check that we have it
+ */
+static int virtio_net_vnet_post_load(void *opaque, int version_id)
+{
+ struct VirtIONetMigTmp *tmp = opaque;
+
+ if (tmp->has_vnet_hdr && !peer_has_vnet_hdr(tmp->parent)) {
+ error_report("virtio-net: saved image requires vnet_hdr=on");
+ return -EINVAL;
}
return 0;
}
+static void virtio_net_vnet_pre_save(void *opaque)
+{
+ struct VirtIONetMigTmp *tmp = opaque;
+
+ tmp->has_vnet_hdr = tmp->parent->has_vnet_hdr;
+}
+
+static const VMStateDescription vmstate_virtio_net_has_vnet = {
+ .name = "virtio-net-vnet",
+ .post_load = virtio_net_vnet_post_load,
+ .pre_save = virtio_net_vnet_pre_save,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT32(has_vnet_hdr, struct VirtIONetMigTmp),
+ VMSTATE_END_OF_LIST()
+ },
+};
+
+static const VMStateDescription vmstate_virtio_net_device = {
+ .name = "virtio-net-device",
+ .version_id = VIRTIO_NET_VM_VERSION,
+ .minimum_version_id = VIRTIO_NET_VM_VERSION,
+ .post_load = virtio_net_post_load_device,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT8_ARRAY(mac, VirtIONet, ETH_ALEN),
+ VMSTATE_STRUCT_POINTER(vqs, VirtIONet,
+ vmstate_virtio_net_queue_tx_waiting,
+ VirtIONetQueue),
+ VMSTATE_UINT32(mergeable_rx_bufs, VirtIONet),
+ VMSTATE_UINT16(status, VirtIONet),
+ VMSTATE_UINT8(promisc, VirtIONet),
+ VMSTATE_UINT8(allmulti, VirtIONet),
+ VMSTATE_UINT32(mac_table.in_use, VirtIONet),
+
+ /* Guarded pair: If it fits we load it, else we throw it away
+ * - can happen if source has a larger MAC table.; post-load
+ * sets flags in this case.
+ */
+ VMSTATE_VBUFFER_MULTIPLY(mac_table.macs, VirtIONet,
+ 0, mac_table_fits, mac_table.in_use,
+ ETH_ALEN),
+ VMSTATE_UNUSED_VARRAY_UINT32(VirtIONet, mac_table_doesnt_fit, 0,
+ mac_table.in_use, ETH_ALEN),
+
+ /* Note: This is an array of uint32's that's always been saved as a
+ * buffer; hold onto your endiannesses; it's actually used as a bitmap
+ * but based on the uint.
+ */
+ VMSTATE_BUFFER_POINTER_UNSAFE(vlans, VirtIONet, 0, MAX_VLAN >> 3),
+ VMSTATE_WITH_TMP(VirtIONet, struct VirtIONetMigTmp,
+ vmstate_virtio_net_has_vnet),
+ VMSTATE_UINT8(mac_table.multi_overflow, VirtIONet),
+ VMSTATE_UINT8(mac_table.uni_overflow, VirtIONet),
+ VMSTATE_UINT8(alluni, VirtIONet),
+ VMSTATE_UINT8(nomulti, VirtIONet),
+ VMSTATE_UINT8(nouni, VirtIONet),
+ VMSTATE_UINT8(nobcast, VirtIONet),
+ VMSTATE_WITH_TMP(VirtIONet, struct VirtIONetMigTmp,
+ vmstate_virtio_net_has_ufo),
+ VMSTATE_SINGLE_TEST(max_queues, VirtIONet, max_queues_gt_1, 0,
+ vmstate_info_uint16_equal, uint16_t),
+ VMSTATE_UINT16_TEST(curr_queues, VirtIONet, max_queues_gt_1),
+ VMSTATE_WITH_TMP(VirtIONet, struct VirtIONetMigTmp,
+ vmstate_virtio_net_tx_waiting),
+ VMSTATE_UINT64_TEST(curr_guest_offloads, VirtIONet,
+ has_ctrl_guest_offloads),
+ VMSTATE_END_OF_LIST()
+ },
+};
+
static NetClientInfo net_virtio_info = {
- .type = NET_CLIENT_OPTIONS_KIND_NIC,
+ .type = NET_CLIENT_DRIVER_NIC,
.size = sizeof(NICState),
.can_receive = virtio_net_can_receive,
.receive = virtio_net_receive,
{
int i, config_size = 0;
virtio_add_feature(&host_features, VIRTIO_NET_F_MAC);
+
for (i = 0; feature_sizes[i].flags != 0; i++) {
if (host_features & feature_sizes[i].flags) {
config_size = MAX(feature_sizes[i].end, config_size);
NetClientState *nc;
int i;
+ if (n->net_conf.mtu) {
+ n->host_features |= (0x1 << VIRTIO_NET_F_MTU);
+ }
+
virtio_net_set_config_size(n, n->host_features);
virtio_init(vdev, "virtio-net", VIRTIO_ID_NET, n->config_size);
+ /*
+ * We set a lower limit on RX queue size to what it always was.
+ * Guests that want a smaller ring can always resize it without
+ * help from us (using virtio 1 and up).
+ */
+ if (n->net_conf.rx_queue_size < VIRTIO_NET_RX_QUEUE_MIN_SIZE ||
+ n->net_conf.rx_queue_size > VIRTQUEUE_MAX_SIZE ||
+ !is_power_of_2(n->net_conf.rx_queue_size)) {
+ error_setg(errp, "Invalid rx_queue_size (= %" PRIu16 "), "
+ "must be a power of 2 between %d and %d.",
+ n->net_conf.rx_queue_size, VIRTIO_NET_RX_QUEUE_MIN_SIZE,
+ VIRTQUEUE_MAX_SIZE);
+ virtio_cleanup(vdev);
+ return;
+ }
+
+ if (n->net_conf.tx_queue_size < VIRTIO_NET_TX_QUEUE_MIN_SIZE ||
+ n->net_conf.tx_queue_size > VIRTQUEUE_MAX_SIZE ||
+ !is_power_of_2(n->net_conf.tx_queue_size)) {
+ error_setg(errp, "Invalid tx_queue_size (= %" PRIu16 "), "
+ "must be a power of 2 between %d and %d",
+ n->net_conf.tx_queue_size, VIRTIO_NET_TX_QUEUE_MIN_SIZE,
+ VIRTQUEUE_MAX_SIZE);
+ virtio_cleanup(vdev);
+ return;
+ }
+
n->max_queues = MAX(n->nic_conf.peers.queues, 1);
if (n->max_queues * 2 + 1 > VIRTIO_QUEUE_MAX) {
error_setg(errp, "Invalid number of queues (= %" PRIu32 "), "
error_report("Defaulting to \"bh\"");
}
+ n->net_conf.tx_queue_size = MIN(virtio_net_max_tx_queue_size(n),
+ n->net_conf.tx_queue_size);
+
for (i = 0; i < n->max_queues; i++) {
virtio_net_add_queue(n, i);
}
nc->rxfilter_notify_enabled = 1;
n->qdev = dev;
- register_savevm(dev, "virtio-net", -1, VIRTIO_NET_VM_VERSION,
- virtio_net_save, virtio_net_load, n);
}
static void virtio_net_device_unrealize(DeviceState *dev, Error **errp)
/* This will stop vhost backend if appropriate. */
virtio_net_set_status(vdev, 0);
- unregister_savevm(dev, "virtio-net", n);
-
g_free(n->netclient_name);
n->netclient_name = NULL;
g_free(n->netclient_type);
DEVICE(n), NULL);
}
+static void virtio_net_pre_save(void *opaque)
+{
+ VirtIONet *n = opaque;
+
+ /* At this point, backend must be stopped, otherwise
+ * it might keep writing to memory. */
+ assert(!n->vhost_started);
+}
+
+static const VMStateDescription vmstate_virtio_net = {
+ .name = "virtio-net",
+ .minimum_version_id = VIRTIO_NET_VM_VERSION,
+ .version_id = VIRTIO_NET_VM_VERSION,
+ .fields = (VMStateField[]) {
+ VMSTATE_VIRTIO_DEVICE,
+ VMSTATE_END_OF_LIST()
+ },
+ .pre_save = virtio_net_pre_save,
+};
+
static Property virtio_net_properties[] = {
DEFINE_PROP_BIT("csum", VirtIONet, host_features, VIRTIO_NET_F_CSUM, true),
DEFINE_PROP_BIT("guest_csum", VirtIONet, host_features,
TX_TIMER_INTERVAL),
DEFINE_PROP_INT32("x-txburst", VirtIONet, net_conf.txburst, TX_BURST),
DEFINE_PROP_STRING("tx", VirtIONet, net_conf.tx),
+ DEFINE_PROP_UINT16("rx_queue_size", VirtIONet, net_conf.rx_queue_size,
+ VIRTIO_NET_RX_QUEUE_DEFAULT_SIZE),
+ DEFINE_PROP_UINT16("tx_queue_size", VirtIONet, net_conf.tx_queue_size,
+ VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE),
+ DEFINE_PROP_UINT16("host_mtu", VirtIONet, net_conf.mtu, 0),
+ DEFINE_PROP_BOOL("x-mtu-bypass-backend", VirtIONet, mtu_bypass_backend,
+ true),
DEFINE_PROP_END_OF_LIST(),
};
VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
dc->props = virtio_net_properties;
+ dc->vmsd = &vmstate_virtio_net;
set_bit(DEVICE_CATEGORY_NETWORK, dc->categories);
vdc->realize = virtio_net_device_realize;
vdc->unrealize = virtio_net_device_unrealize;
vdc->set_status = virtio_net_set_status;
vdc->guest_notifier_mask = virtio_net_guest_notifier_mask;
vdc->guest_notifier_pending = virtio_net_guest_notifier_pending;
- vdc->load = virtio_net_load_device;
- vdc->save = virtio_net_save_device;
+ vdc->legacy_features |= (0x1 << VIRTIO_NET_F_GSO);
+ vdc->vmsd = &vmstate_virtio_net_device;
}
static const TypeInfo virtio_net_info = {