memcpy(&netcfg, config, n->config_size);
- if (!(vdev->guest_features >> VIRTIO_NET_F_CTRL_MAC_ADDR & 1) &&
+ if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR) &&
+ !virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1) &&
memcmp(netcfg.mac, n->mac, ETH_ALEN)) {
memcpy(n->mac, netcfg.mac, ETH_ALEN);
qemu_format_nic_info_str(qemu_get_queue(n->nic), n->mac);
return;
}
- if (!!n->vhost_started ==
- (virtio_net_started(n, status) && !nc->peer->link_down)) {
+ if ((virtio_net_started(n, status) && !nc->peer->link_down) ==
+ !!n->vhost_started) {
return;
}
if (!n->vhost_started) {
int r, i;
- if (!vhost_net_query(get_vhost_net(nc->peer), vdev)) {
- return;
- }
-
/* Any packets outstanding? Purge them to avoid touching rings
* when vhost is running.
*/
virtio_net_vhost_status(n, status);
for (i = 0; i < n->max_queues; i++) {
+ NetClientState *ncs = qemu_get_subqueue(n->nic, i);
+ bool queue_started;
q = &n->vqs[i];
if ((!n->multiqueue && i != 0) || i >= n->curr_queues) {
} else {
queue_status = status;
}
+ queue_started =
+ virtio_net_started(n, queue_status) && !n->vhost_started;
+
+ if (queue_started) {
+ qemu_flush_queued_packets(ncs);
+ }
if (!q->tx_waiting) {
continue;
}
- if (virtio_net_started(n, queue_status) && !n->vhost_started) {
+ if (queue_started) {
if (q->tx_timer) {
timer_mod(q->tx_timer,
qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + n->tx_timeout);
}
}
-static char *mac_strdup_printf(const uint8_t *mac)
-{
- return g_strdup_printf("%.2x:%.2x:%.2x:%.2x:%.2x:%.2x", mac[0],
- mac[1], mac[2], mac[3], mac[4], mac[5]);
-}
-
static intList *get_vlan_table(VirtIONet *n)
{
intList *list, *entry;
info->multicast_overflow = n->mac_table.multi_overflow;
info->unicast_overflow = n->mac_table.uni_overflow;
- info->main_mac = mac_strdup_printf(n->mac);
+ info->main_mac = qemu_mac_strdup_printf(n->mac);
str_list = NULL;
for (i = 0; i < n->mac_table.first_multi; i++) {
entry = g_malloc0(sizeof(*entry));
- entry->value = mac_strdup_printf(n->mac_table.macs + i * ETH_ALEN);
+ entry->value = qemu_mac_strdup_printf(n->mac_table.macs + i * ETH_ALEN);
entry->next = str_list;
str_list = entry;
}
str_list = NULL;
for (i = n->mac_table.first_multi; i < n->mac_table.in_use; i++) {
entry = g_malloc0(sizeof(*entry));
- entry->value = mac_strdup_printf(n->mac_table.macs + i * ETH_ALEN);
+ entry->value = qemu_mac_strdup_printf(n->mac_table.macs + i * ETH_ALEN);
entry->next = str_list;
str_list = entry;
}
info->multicast_table = str_list;
info->vlan_table = get_vlan_table(n);
- if (!((1 << VIRTIO_NET_F_CTRL_VLAN) & vdev->guest_features)) {
+ if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_VLAN)) {
info->vlan = RX_STATE_ALL;
} else if (!info->vlan_table) {
info->vlan = RX_STATE_NONE;
return n->has_ufo;
}
-static void virtio_net_set_mrg_rx_bufs(VirtIONet *n, int mergeable_rx_bufs)
+static void virtio_net_set_mrg_rx_bufs(VirtIONet *n, int mergeable_rx_bufs,
+ int version_1)
{
int i;
NetClientState *nc;
n->mergeable_rx_bufs = mergeable_rx_bufs;
- n->guest_hdr_len = n->mergeable_rx_bufs ?
- sizeof(struct virtio_net_hdr_mrg_rxbuf) : sizeof(struct virtio_net_hdr);
+ if (version_1) {
+ n->guest_hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf);
+ } else {
+ n->guest_hdr_len = n->mergeable_rx_bufs ?
+ sizeof(struct virtio_net_hdr_mrg_rxbuf) :
+ sizeof(struct virtio_net_hdr);
+ }
for (i = 0; i < n->max_queues; i++) {
nc = qemu_get_subqueue(n->nic, i);
return 0;
}
+ if (nc->peer->info->type == NET_CLIENT_OPTIONS_KIND_VHOST_USER) {
+ vhost_set_vring_enable(nc->peer, 1);
+ }
+
if (nc->peer->info->type != NET_CLIENT_OPTIONS_KIND_TAP) {
return 0;
}
return 0;
}
+ if (nc->peer->info->type == NET_CLIENT_OPTIONS_KIND_VHOST_USER) {
+ vhost_set_vring_enable(nc->peer, 0);
+ }
+
if (nc->peer->info->type != NET_CLIENT_OPTIONS_KIND_TAP) {
return 0;
}
static void virtio_net_set_multiqueue(VirtIONet *n, int multiqueue);
-static uint32_t virtio_net_get_features(VirtIODevice *vdev, uint32_t features)
+static uint64_t virtio_net_get_features(VirtIODevice *vdev, uint64_t features,
+ Error **errp)
{
VirtIONet *n = VIRTIO_NET(vdev);
NetClientState *nc = qemu_get_queue(n->nic);
- features |= (1 << VIRTIO_NET_F_MAC);
+ /* Firstly sync all virtio-net possible supported features */
+ features |= n->host_features;
+
+ virtio_add_feature(&features, VIRTIO_NET_F_MAC);
if (!peer_has_vnet_hdr(n)) {
- features &= ~(0x1 << VIRTIO_NET_F_CSUM);
- features &= ~(0x1 << VIRTIO_NET_F_HOST_TSO4);
- features &= ~(0x1 << VIRTIO_NET_F_HOST_TSO6);
- features &= ~(0x1 << VIRTIO_NET_F_HOST_ECN);
+ virtio_clear_feature(&features, VIRTIO_NET_F_CSUM);
+ virtio_clear_feature(&features, VIRTIO_NET_F_HOST_TSO4);
+ virtio_clear_feature(&features, VIRTIO_NET_F_HOST_TSO6);
+ virtio_clear_feature(&features, VIRTIO_NET_F_HOST_ECN);
- features &= ~(0x1 << VIRTIO_NET_F_GUEST_CSUM);
- features &= ~(0x1 << VIRTIO_NET_F_GUEST_TSO4);
- features &= ~(0x1 << VIRTIO_NET_F_GUEST_TSO6);
- features &= ~(0x1 << VIRTIO_NET_F_GUEST_ECN);
+ virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_CSUM);
+ virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_TSO4);
+ virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_TSO6);
+ virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_ECN);
}
if (!peer_has_vnet_hdr(n) || !peer_has_ufo(n)) {
- features &= ~(0x1 << VIRTIO_NET_F_GUEST_UFO);
- features &= ~(0x1 << VIRTIO_NET_F_HOST_UFO);
+ virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_UFO);
+ virtio_clear_feature(&features, VIRTIO_NET_F_HOST_UFO);
}
if (!get_vhost_net(nc->peer)) {
return vhost_net_get_features(get_vhost_net(nc->peer), features);
}
-static uint32_t virtio_net_bad_features(VirtIODevice *vdev)
+static uint64_t virtio_net_bad_features(VirtIODevice *vdev)
{
- uint32_t features = 0;
+ uint64_t features = 0;
/* Linux kernel 2.6.25. It understood MAC (as everyone must),
* but also these: */
- features |= (1 << VIRTIO_NET_F_MAC);
- features |= (1 << VIRTIO_NET_F_CSUM);
- features |= (1 << VIRTIO_NET_F_HOST_TSO4);
- features |= (1 << VIRTIO_NET_F_HOST_TSO6);
- features |= (1 << VIRTIO_NET_F_HOST_ECN);
+ virtio_add_feature(&features, VIRTIO_NET_F_MAC);
+ virtio_add_feature(&features, VIRTIO_NET_F_CSUM);
+ virtio_add_feature(&features, VIRTIO_NET_F_HOST_TSO4);
+ virtio_add_feature(&features, VIRTIO_NET_F_HOST_TSO6);
+ virtio_add_feature(&features, VIRTIO_NET_F_HOST_ECN);
return features;
}
return virtio_net_guest_offloads_by_features(vdev->guest_features);
}
-static void virtio_net_set_features(VirtIODevice *vdev, uint32_t features)
+static void virtio_net_set_features(VirtIODevice *vdev, uint64_t features)
{
VirtIONet *n = VIRTIO_NET(vdev);
int i;
- virtio_net_set_multiqueue(n, !!(features & (1 << VIRTIO_NET_F_MQ)));
+ virtio_net_set_multiqueue(n,
+ virtio_has_feature(features, VIRTIO_NET_F_MQ));
- virtio_net_set_mrg_rx_bufs(n, !!(features & (1 << VIRTIO_NET_F_MRG_RXBUF)));
+ virtio_net_set_mrg_rx_bufs(n,
+ virtio_has_feature(features,
+ VIRTIO_NET_F_MRG_RXBUF),
+ virtio_has_feature(features,
+ VIRTIO_F_VERSION_1));
if (n->has_vnet_hdr) {
n->curr_guest_offloads =
vhost_net_ack_features(get_vhost_net(nc->peer), features);
}
- if ((1 << VIRTIO_NET_F_CTRL_VLAN) & features) {
+ if (virtio_has_feature(features, VIRTIO_NET_F_CTRL_VLAN)) {
memset(n->vlans, 0, MAX_VLAN >> 3);
} else {
memset(n->vlans, 0xff, MAX_VLAN >> 3);
uint64_t offloads;
size_t s;
- if (!((1 << VIRTIO_NET_F_CTRL_GUEST_OFFLOADS) & vdev->guest_features)) {
+ if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)) {
return VIRTIO_NET_ERR;
}
virtio_net_ctrl_ack status = VIRTIO_NET_ERR;
VirtQueueElement elem;
size_t s;
- struct iovec *iov;
+ struct iovec *iov, *iov2;
unsigned int iov_cnt;
while (virtqueue_pop(vq, &elem)) {
exit(1);
}
- iov = elem.out_sg;
iov_cnt = elem.out_num;
+ iov2 = iov = g_memdup(elem.out_sg, sizeof(struct iovec) * elem.out_num);
s = iov_to_buf(iov, iov_cnt, 0, &ctrl, sizeof(ctrl));
iov_discard_front(&iov, &iov_cnt, sizeof(ctrl));
if (s != sizeof(ctrl)) {
virtqueue_push(vq, &elem, sizeof(status));
virtio_notify(vdev, vq);
+ g_free(iov2);
}
}
if (i == 0)
return -1;
error_report("virtio-net unexpected empty queue: "
- "i %zd mergeable %d offset %zd, size %zd, "
- "guest hdr len %zd, host hdr len %zd guest features 0x%x",
- i, n->mergeable_rx_bufs, offset, size,
- n->guest_hdr_len, n->host_hdr_len, vdev->guest_features);
+ "i %zd mergeable %d offset %zd, size %zd, "
+ "guest hdr len %zd, host hdr len %zd "
+ "guest features 0x%" PRIx64,
+ i, n->mergeable_rx_bufs, offset, size,
+ n->guest_hdr_len, n->host_hdr_len,
+ vdev->guest_features);
exit(1);
}
* must have consumed the complete packet.
* Otherwise, drop it. */
if (!n->mergeable_rx_bufs && offset < size) {
-#if 0
- error_report("virtio-net truncated non-mergeable packet: "
- "i %zd mergeable %d offset %zd, size %zd, "
- "guest hdr len %zd, host hdr len %zd",
- i, n->mergeable_rx_bufs,
- offset, size, n->guest_hdr_len, n->host_hdr_len);
-#endif
+ virtqueue_discard(q->rx_vq, &elem, total);
return size;
}
virtqueue_push(q->tx_vq, &q->async_tx.elem, 0);
virtio_notify(vdev, q->tx_vq);
- q->async_tx.elem.out_num = q->async_tx.len = 0;
+ q->async_tx.elem.out_num = 0;
virtio_queue_set_notification(q->tx_vq, 1);
virtio_net_flush_tx(q);
}
while (virtqueue_pop(q->tx_vq, &elem)) {
- ssize_t ret, len;
+ ssize_t ret;
unsigned int out_num = elem.out_num;
struct iovec *out_sg = &elem.out_sg[0];
- struct iovec sg[VIRTQUEUE_MAX_SIZE];
+ struct iovec sg[VIRTQUEUE_MAX_SIZE], sg2[VIRTQUEUE_MAX_SIZE + 1];
+ struct virtio_net_hdr_mrg_rxbuf mhdr;
if (out_num < 1) {
error_report("virtio-net header not in first element");
}
if (n->has_vnet_hdr) {
- if (out_sg[0].iov_len < n->guest_hdr_len) {
+ if (iov_to_buf(out_sg, out_num, 0, &mhdr, n->guest_hdr_len) <
+ n->guest_hdr_len) {
error_report("virtio-net header incorrect");
exit(1);
}
- virtio_net_hdr_swap(vdev, (void *) out_sg[0].iov_base);
+ if (virtio_needs_swap(vdev)) {
+ virtio_net_hdr_swap(vdev, (void *) &mhdr);
+ sg2[0].iov_base = &mhdr;
+ sg2[0].iov_len = n->guest_hdr_len;
+ out_num = iov_copy(&sg2[1], ARRAY_SIZE(sg2) - 1,
+ out_sg, out_num,
+ n->guest_hdr_len, -1);
+ if (out_num == VIRTQUEUE_MAX_SIZE) {
+ goto drop;
+ }
+ out_num += 1;
+ out_sg = sg2;
+ }
}
-
/*
* If host wants to see the guest header as is, we can
* pass it on unchanged. Otherwise, copy just the parts
out_sg = sg;
}
- len = n->guest_hdr_len;
-
ret = qemu_sendv_packet_async(qemu_get_subqueue(n->nic, queue_index),
out_sg, out_num, virtio_net_tx_complete);
if (ret == 0) {
virtio_queue_set_notification(q->tx_vq, 0);
q->async_tx.elem = elem;
- q->async_tx.len = len;
return -EBUSY;
}
- len += ret;
-
+drop:
virtqueue_push(q->tx_vq, &elem, 0);
virtio_notify(vdev, q->tx_vq);
}
}
-static void virtio_net_set_multiqueue(VirtIONet *n, int multiqueue)
+static void virtio_net_add_queue(VirtIONet *n, int index)
{
VirtIODevice *vdev = VIRTIO_DEVICE(n);
- int i, max = multiqueue ? n->max_queues : 1;
- n->multiqueue = multiqueue;
+ n->vqs[index].rx_vq = virtio_add_queue(vdev, 256, virtio_net_handle_rx);
+ if (n->net_conf.tx && !strcmp(n->net_conf.tx, "timer")) {
+ n->vqs[index].tx_vq =
+ virtio_add_queue(vdev, 256, virtio_net_handle_tx_timer);
+ n->vqs[index].tx_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
+ virtio_net_tx_timer,
+ &n->vqs[index]);
+ } else {
+ n->vqs[index].tx_vq =
+ virtio_add_queue(vdev, 256, virtio_net_handle_tx_bh);
+ n->vqs[index].tx_bh = qemu_bh_new(virtio_net_tx_bh, &n->vqs[index]);
+ }
+
+ n->vqs[index].tx_waiting = 0;
+ n->vqs[index].n = n;
+}
- for (i = 2; i <= n->max_queues * 2 + 1; i++) {
- virtio_del_queue(vdev, i);
+static void virtio_net_del_queue(VirtIONet *n, int index)
+{
+ VirtIODevice *vdev = VIRTIO_DEVICE(n);
+ VirtIONetQueue *q = &n->vqs[index];
+ NetClientState *nc = qemu_get_subqueue(n->nic, index);
+
+ qemu_purge_queued_packets(nc);
+
+ virtio_del_queue(vdev, index * 2);
+ if (q->tx_timer) {
+ timer_del(q->tx_timer);
+ timer_free(q->tx_timer);
+ } else {
+ qemu_bh_delete(q->tx_bh);
}
+ virtio_del_queue(vdev, index * 2 + 1);
+}
- for (i = 1; i < max; i++) {
- n->vqs[i].rx_vq = virtio_add_queue(vdev, 256, virtio_net_handle_rx);
- if (n->vqs[i].tx_timer) {
- n->vqs[i].tx_vq =
- virtio_add_queue(vdev, 256, virtio_net_handle_tx_timer);
- n->vqs[i].tx_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
- virtio_net_tx_timer,
- &n->vqs[i]);
- } else {
- n->vqs[i].tx_vq =
- virtio_add_queue(vdev, 256, virtio_net_handle_tx_bh);
- n->vqs[i].tx_bh = qemu_bh_new(virtio_net_tx_bh, &n->vqs[i]);
- }
+static void virtio_net_change_num_queues(VirtIONet *n, int new_max_queues)
+{
+ VirtIODevice *vdev = VIRTIO_DEVICE(n);
+ int old_num_queues = virtio_get_num_queues(vdev);
+ int new_num_queues = new_max_queues * 2 + 1;
+ int i;
+
+ assert(old_num_queues >= 3);
+ assert(old_num_queues % 2 == 1);
- n->vqs[i].tx_waiting = 0;
- n->vqs[i].n = n;
+ if (old_num_queues == new_num_queues) {
+ return;
}
- /* Note: Minux Guests (version 3.2.1) use ctrl vq but don't ack
- * VIRTIO_NET_F_CTRL_VQ. Create ctrl vq unconditionally to avoid
- * breaking them.
+ /*
+ * We always need to remove and add ctrl vq if
+ * old_num_queues != new_num_queues. Remove ctrl_vq first,
+ * and then we only enter one of the following too loops.
*/
+ virtio_del_queue(vdev, old_num_queues - 1);
+
+ for (i = new_num_queues - 1; i < old_num_queues - 1; i += 2) {
+ /* new_num_queues < old_num_queues */
+ virtio_net_del_queue(n, i / 2);
+ }
+
+ for (i = old_num_queues - 1; i < new_num_queues - 1; i += 2) {
+ /* new_num_queues > old_num_queues */
+ virtio_net_add_queue(n, i / 2);
+ }
+
+ /* add ctrl_vq last */
n->ctrl_vq = virtio_add_queue(vdev, 64, virtio_net_handle_ctrl);
+}
+
+static void virtio_net_set_multiqueue(VirtIONet *n, int multiqueue)
+{
+ int max = multiqueue ? n->max_queues : 1;
+
+ n->multiqueue = multiqueue;
+ virtio_net_change_num_queues(n, max);
virtio_net_set_queues(n);
}
}
}
- if ((1 << VIRTIO_NET_F_CTRL_GUEST_OFFLOADS) & vdev->guest_features) {
+ if (virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)) {
qemu_put_be64(f, n->curr_guest_offloads);
}
}
{
VirtIONet *n = opaque;
VirtIODevice *vdev = VIRTIO_DEVICE(n);
+ int ret;
if (version_id < 2 || version_id > VIRTIO_NET_VM_VERSION)
return -EINVAL;
- return virtio_load(vdev, f, version_id);
+ ret = virtio_load(vdev, f, version_id);
+ if (ret) {
+ return ret;
+ }
+
+ if (virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)) {
+ n->curr_guest_offloads = qemu_get_be64(f);
+ } else {
+ n->curr_guest_offloads = virtio_net_supported_guest_offloads(n);
+ }
+
+ if (peer_has_vnet_hdr(n)) {
+ virtio_net_apply_guest_offloads(n);
+ }
+
+ if (virtio_vdev_has_feature(vdev, VIRTIO_NET_F_GUEST_ANNOUNCE) &&
+ virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) {
+ n->announce_counter = SELF_ANNOUNCE_ROUNDS;
+ timer_mod(n->announce_timer, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL));
+ }
+
+ return 0;
}
static int virtio_net_load_device(VirtIODevice *vdev, QEMUFile *f,
qemu_get_buffer(f, n->mac, ETH_ALEN);
n->vqs[0].tx_waiting = qemu_get_be32(f);
- virtio_net_set_mrg_rx_bufs(n, qemu_get_be32(f));
+ virtio_net_set_mrg_rx_bufs(n, qemu_get_be32(f),
+ virtio_vdev_has_feature(vdev,
+ VIRTIO_F_VERSION_1));
if (version_id >= 3)
n->status = qemu_get_be16(f);
}
}
- if ((1 << VIRTIO_NET_F_CTRL_GUEST_OFFLOADS) & vdev->guest_features) {
- n->curr_guest_offloads = qemu_get_be64(f);
- } else {
- n->curr_guest_offloads = virtio_net_supported_guest_offloads(n);
- }
-
- if (peer_has_vnet_hdr(n)) {
- virtio_net_apply_guest_offloads(n);
- }
-
virtio_net_set_queues(n);
/* Find the first multicast entry in the saved MAC filter */
qemu_get_subqueue(n->nic, i)->link_down = link_down;
}
- if (vdev->guest_features & (0x1 << VIRTIO_NET_F_GUEST_ANNOUNCE) &&
- vdev->guest_features & (0x1 << VIRTIO_NET_F_CTRL_VQ)) {
- n->announce_counter = SELF_ANNOUNCE_ROUNDS;
- timer_mod(n->announce_timer, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL));
- }
-
return 0;
}
-static void virtio_net_cleanup(NetClientState *nc)
-{
- VirtIONet *n = qemu_get_nic_opaque(nc);
-
- n->nic = NULL;
-}
-
static NetClientInfo net_virtio_info = {
.type = NET_CLIENT_OPTIONS_KIND_NIC,
.size = sizeof(NICState),
.can_receive = virtio_net_can_receive,
.receive = virtio_net_receive,
- .cleanup = virtio_net_cleanup,
.link_status_changed = virtio_net_set_link_status,
.query_rx_filter = virtio_net_query_rxfilter,
};
vdev, idx, mask);
}
-void virtio_net_set_config_size(VirtIONet *n, uint32_t host_features)
+static void virtio_net_set_config_size(VirtIONet *n, uint64_t host_features)
{
int i, config_size = 0;
- host_features |= (1 << VIRTIO_NET_F_MAC);
+ virtio_add_feature(&host_features, VIRTIO_NET_F_MAC);
for (i = 0; feature_sizes[i].flags != 0; i++) {
if (host_features & feature_sizes[i].flags) {
config_size = MAX(feature_sizes[i].end, config_size);
NetClientState *nc;
int i;
+ virtio_net_set_config_size(n, n->host_features);
virtio_init(vdev, "virtio-net", VIRTIO_ID_NET, n->config_size);
n->max_queues = MAX(n->nic_conf.peers.queues, 1);
+ if (n->max_queues * 2 + 1 > VIRTIO_QUEUE_MAX) {
+ error_setg(errp, "Invalid number of queues (= %" PRIu32 "), "
+ "must be a positive integer less than %d.",
+ n->max_queues, (VIRTIO_QUEUE_MAX - 1) / 2);
+ virtio_cleanup(vdev);
+ return;
+ }
n->vqs = g_malloc0(sizeof(VirtIONetQueue) * n->max_queues);
- n->vqs[0].rx_vq = virtio_add_queue(vdev, 256, virtio_net_handle_rx);
n->curr_queues = 1;
- n->vqs[0].n = n;
n->tx_timeout = n->net_conf.txtimer;
if (n->net_conf.tx && strcmp(n->net_conf.tx, "timer")
error_report("Defaulting to \"bh\"");
}
- if (n->net_conf.tx && !strcmp(n->net_conf.tx, "timer")) {
- n->vqs[0].tx_vq = virtio_add_queue(vdev, 256,
- virtio_net_handle_tx_timer);
- n->vqs[0].tx_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, virtio_net_tx_timer,
- &n->vqs[0]);
- } else {
- n->vqs[0].tx_vq = virtio_add_queue(vdev, 256,
- virtio_net_handle_tx_bh);
- n->vqs[0].tx_bh = qemu_bh_new(virtio_net_tx_bh, &n->vqs[0]);
+ for (i = 0; i < n->max_queues; i++) {
+ virtio_net_add_queue(n, i);
}
+
n->ctrl_vq = virtio_add_queue(vdev, 64, virtio_net_handle_ctrl);
qemu_macaddr_default_if_unset(&n->nic_conf.macaddr);
memcpy(&n->mac[0], &n->nic_conf.macaddr, sizeof(n->mac));
n->vqs[0].tx_waiting = 0;
n->tx_burst = n->net_conf.txburst;
- virtio_net_set_mrg_rx_bufs(n, 0);
+ virtio_net_set_mrg_rx_bufs(n, 0, 0);
n->promisc = 1; /* for compatibility */
n->mac_table.macs = g_malloc0(MAC_TABLE_ENTRIES * ETH_ALEN);
n->qdev = dev;
register_savevm(dev, "virtio-net", -1, VIRTIO_NET_VM_VERSION,
virtio_net_save, virtio_net_load, n);
-
- add_boot_device_path(n->nic_conf.bootindex, dev, "/ethernet-phy@0");
}
static void virtio_net_device_unrealize(DeviceState *dev, Error **errp)
{
VirtIODevice *vdev = VIRTIO_DEVICE(dev);
VirtIONet *n = VIRTIO_NET(dev);
- int i;
+ int i, max_queues;
/* This will stop vhost backend if appropriate. */
virtio_net_set_status(vdev, 0);
g_free(n->mac_table.macs);
g_free(n->vlans);
- for (i = 0; i < n->max_queues; i++) {
- VirtIONetQueue *q = &n->vqs[i];
- NetClientState *nc = qemu_get_subqueue(n->nic, i);
-
- qemu_purge_queued_packets(nc);
-
- if (q->tx_timer) {
- timer_del(q->tx_timer);
- timer_free(q->tx_timer);
- } else if (q->tx_bh) {
- qemu_bh_delete(q->tx_bh);
- }
+ max_queues = n->multiqueue ? n->max_queues : 1;
+ for (i = 0; i < max_queues; i++) {
+ virtio_net_del_queue(n, i);
}
timer_del(n->announce_timer);
}
static Property virtio_net_properties[] = {
+ DEFINE_PROP_BIT("csum", VirtIONet, host_features, VIRTIO_NET_F_CSUM, true),
+ DEFINE_PROP_BIT("guest_csum", VirtIONet, host_features,
+ VIRTIO_NET_F_GUEST_CSUM, true),
+ DEFINE_PROP_BIT("gso", VirtIONet, host_features, VIRTIO_NET_F_GSO, true),
+ DEFINE_PROP_BIT("guest_tso4", VirtIONet, host_features,
+ VIRTIO_NET_F_GUEST_TSO4, true),
+ DEFINE_PROP_BIT("guest_tso6", VirtIONet, host_features,
+ VIRTIO_NET_F_GUEST_TSO6, true),
+ DEFINE_PROP_BIT("guest_ecn", VirtIONet, host_features,
+ VIRTIO_NET_F_GUEST_ECN, true),
+ DEFINE_PROP_BIT("guest_ufo", VirtIONet, host_features,
+ VIRTIO_NET_F_GUEST_UFO, true),
+ DEFINE_PROP_BIT("guest_announce", VirtIONet, host_features,
+ VIRTIO_NET_F_GUEST_ANNOUNCE, true),
+ DEFINE_PROP_BIT("host_tso4", VirtIONet, host_features,
+ VIRTIO_NET_F_HOST_TSO4, true),
+ DEFINE_PROP_BIT("host_tso6", VirtIONet, host_features,
+ VIRTIO_NET_F_HOST_TSO6, true),
+ DEFINE_PROP_BIT("host_ecn", VirtIONet, host_features,
+ VIRTIO_NET_F_HOST_ECN, true),
+ DEFINE_PROP_BIT("host_ufo", VirtIONet, host_features,
+ VIRTIO_NET_F_HOST_UFO, true),
+ DEFINE_PROP_BIT("mrg_rxbuf", VirtIONet, host_features,
+ VIRTIO_NET_F_MRG_RXBUF, true),
+ DEFINE_PROP_BIT("status", VirtIONet, host_features,
+ VIRTIO_NET_F_STATUS, true),
+ DEFINE_PROP_BIT("ctrl_vq", VirtIONet, host_features,
+ VIRTIO_NET_F_CTRL_VQ, true),
+ DEFINE_PROP_BIT("ctrl_rx", VirtIONet, host_features,
+ VIRTIO_NET_F_CTRL_RX, true),
+ DEFINE_PROP_BIT("ctrl_vlan", VirtIONet, host_features,
+ VIRTIO_NET_F_CTRL_VLAN, true),
+ DEFINE_PROP_BIT("ctrl_rx_extra", VirtIONet, host_features,
+ VIRTIO_NET_F_CTRL_RX_EXTRA, true),
+ DEFINE_PROP_BIT("ctrl_mac_addr", VirtIONet, host_features,
+ VIRTIO_NET_F_CTRL_MAC_ADDR, true),
+ DEFINE_PROP_BIT("ctrl_guest_offloads", VirtIONet, host_features,
+ VIRTIO_NET_F_CTRL_GUEST_OFFLOADS, true),
+ DEFINE_PROP_BIT("mq", VirtIONet, host_features, VIRTIO_NET_F_MQ, false),
DEFINE_NIC_PROPERTIES(VirtIONet, nic_conf),
DEFINE_PROP_UINT32("x-txtimer", VirtIONet, net_conf.txtimer,
- TX_TIMER_INTERVAL),
+ TX_TIMER_INTERVAL),
DEFINE_PROP_INT32("x-txburst", VirtIONet, net_conf.txburst, TX_BURST),
DEFINE_PROP_STRING("tx", VirtIONet, net_conf.tx),
DEFINE_PROP_END_OF_LIST(),