2 * Virtio Network Device
4 * Copyright IBM, Corp. 2007
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
15 #include "hw/virtio/virtio.h"
17 #include "net/checksum.h"
19 #include "qemu/error-report.h"
20 #include "qemu/timer.h"
21 #include "hw/virtio/virtio-net.h"
22 #include "net/vhost_net.h"
23 #include "hw/virtio/virtio-bus.h"
24 #include "qapi/qmp/qjson.h"
25 #include "monitor/monitor.h"
27 #define VIRTIO_NET_VM_VERSION 11
29 #define MAC_TABLE_ENTRIES 64
30 #define MAX_VLAN (1 << 12) /* Per 802.1Q definition */
33 * Calculate the number of bytes up to and including the given 'field' of
36 #define endof(container, field) \
37 (offsetof(container, field) + sizeof(((container *)0)->field))
39 typedef struct VirtIOFeature {
44 static VirtIOFeature feature_sizes[] = {
45 {.flags = 1 << VIRTIO_NET_F_MAC,
46 .end = endof(struct virtio_net_config, mac)},
47 {.flags = 1 << VIRTIO_NET_F_STATUS,
48 .end = endof(struct virtio_net_config, status)},
49 {.flags = 1 << VIRTIO_NET_F_MQ,
50 .end = endof(struct virtio_net_config, max_virtqueue_pairs)},
54 static VirtIONetQueue *virtio_net_get_subqueue(NetClientState *nc)
56 VirtIONet *n = qemu_get_nic_opaque(nc);
58 return &n->vqs[nc->queue_index];
61 static int vq2q(int queue_index)
63 return queue_index / 2;
67 * - we could suppress RX interrupt if we were so inclined.
70 static void virtio_net_get_config(VirtIODevice *vdev, uint8_t *config)
72 VirtIONet *n = VIRTIO_NET(vdev);
73 struct virtio_net_config netcfg;
75 stw_p(&netcfg.status, n->status);
76 stw_p(&netcfg.max_virtqueue_pairs, n->max_queues);
77 memcpy(netcfg.mac, n->mac, ETH_ALEN);
78 memcpy(config, &netcfg, n->config_size);
81 static void virtio_net_set_config(VirtIODevice *vdev, const uint8_t *config)
83 VirtIONet *n = VIRTIO_NET(vdev);
84 struct virtio_net_config netcfg = {};
86 memcpy(&netcfg, config, n->config_size);
88 if (!(vdev->guest_features >> VIRTIO_NET_F_CTRL_MAC_ADDR & 1) &&
89 memcmp(netcfg.mac, n->mac, ETH_ALEN)) {
90 memcpy(n->mac, netcfg.mac, ETH_ALEN);
91 qemu_format_nic_info_str(qemu_get_queue(n->nic), n->mac);
95 static bool virtio_net_started(VirtIONet *n, uint8_t status)
97 VirtIODevice *vdev = VIRTIO_DEVICE(n);
98 return (status & VIRTIO_CONFIG_S_DRIVER_OK) &&
99 (n->status & VIRTIO_NET_S_LINK_UP) && vdev->vm_running;
102 static void virtio_net_vhost_status(VirtIONet *n, uint8_t status)
104 VirtIODevice *vdev = VIRTIO_DEVICE(n);
105 NetClientState *nc = qemu_get_queue(n->nic);
106 int queues = n->multiqueue ? n->max_queues : 1;
111 if (nc->peer->info->type != NET_CLIENT_OPTIONS_KIND_TAP) {
115 if (!tap_get_vhost_net(nc->peer)) {
119 if (!!n->vhost_started ==
120 (virtio_net_started(n, status) && !nc->peer->link_down)) {
123 if (!n->vhost_started) {
125 if (!vhost_net_query(tap_get_vhost_net(nc->peer), vdev)) {
128 n->vhost_started = 1;
129 r = vhost_net_start(vdev, n->nic->ncs, queues);
131 error_report("unable to start vhost net: %d: "
132 "falling back on userspace virtio", -r);
133 n->vhost_started = 0;
136 vhost_net_stop(vdev, n->nic->ncs, queues);
137 n->vhost_started = 0;
141 static void virtio_net_set_status(struct VirtIODevice *vdev, uint8_t status)
143 VirtIONet *n = VIRTIO_NET(vdev);
146 uint8_t queue_status;
148 virtio_net_vhost_status(n, status);
150 for (i = 0; i < n->max_queues; i++) {
153 if ((!n->multiqueue && i != 0) || i >= n->curr_queues) {
156 queue_status = status;
159 if (!q->tx_waiting) {
163 if (virtio_net_started(n, queue_status) && !n->vhost_started) {
165 timer_mod(q->tx_timer,
166 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + n->tx_timeout);
168 qemu_bh_schedule(q->tx_bh);
172 timer_del(q->tx_timer);
174 qemu_bh_cancel(q->tx_bh);
180 static void virtio_net_set_link_status(NetClientState *nc)
182 VirtIONet *n = qemu_get_nic_opaque(nc);
183 VirtIODevice *vdev = VIRTIO_DEVICE(n);
184 uint16_t old_status = n->status;
187 n->status &= ~VIRTIO_NET_S_LINK_UP;
189 n->status |= VIRTIO_NET_S_LINK_UP;
191 if (n->status != old_status)
192 virtio_notify_config(vdev);
194 virtio_net_set_status(vdev, vdev->status);
197 static void rxfilter_notify(NetClientState *nc)
200 VirtIONet *n = qemu_get_nic_opaque(nc);
202 if (nc->rxfilter_notify_enabled) {
203 gchar *path = object_get_canonical_path(OBJECT(n->qdev));
204 if (n->netclient_name) {
205 event_data = qobject_from_jsonf("{ 'name': %s, 'path': %s }",
206 n->netclient_name, path);
208 event_data = qobject_from_jsonf("{ 'path': %s }", path);
210 monitor_protocol_event(QEVENT_NIC_RX_FILTER_CHANGED, event_data);
211 qobject_decref(event_data);
214 /* disable event notification to avoid events flooding */
215 nc->rxfilter_notify_enabled = 0;
219 static char *mac_strdup_printf(const uint8_t *mac)
221 return g_strdup_printf("%.2x:%.2x:%.2x:%.2x:%.2x:%.2x", mac[0],
222 mac[1], mac[2], mac[3], mac[4], mac[5]);
225 static RxFilterInfo *virtio_net_query_rxfilter(NetClientState *nc)
227 VirtIONet *n = qemu_get_nic_opaque(nc);
229 strList *str_list, *entry;
230 intList *int_list, *int_entry;
233 info = g_malloc0(sizeof(*info));
234 info->name = g_strdup(nc->name);
235 info->promiscuous = n->promisc;
238 info->unicast = RX_STATE_NONE;
239 } else if (n->alluni) {
240 info->unicast = RX_STATE_ALL;
242 info->unicast = RX_STATE_NORMAL;
246 info->multicast = RX_STATE_NONE;
247 } else if (n->allmulti) {
248 info->multicast = RX_STATE_ALL;
250 info->multicast = RX_STATE_NORMAL;
253 info->broadcast_allowed = n->nobcast;
254 info->multicast_overflow = n->mac_table.multi_overflow;
255 info->unicast_overflow = n->mac_table.uni_overflow;
257 info->main_mac = mac_strdup_printf(n->mac);
260 for (i = 0; i < n->mac_table.first_multi; i++) {
261 entry = g_malloc0(sizeof(*entry));
262 entry->value = mac_strdup_printf(n->mac_table.macs + i * ETH_ALEN);
263 entry->next = str_list;
266 info->unicast_table = str_list;
269 for (i = n->mac_table.first_multi; i < n->mac_table.in_use; i++) {
270 entry = g_malloc0(sizeof(*entry));
271 entry->value = mac_strdup_printf(n->mac_table.macs + i * ETH_ALEN);
272 entry->next = str_list;
275 info->multicast_table = str_list;
278 for (i = 0; i < MAX_VLAN >> 5; i++) {
279 for (j = 0; n->vlans[i] && j < 0x1f; j++) {
280 if (n->vlans[i] & (1U << j)) {
281 int_entry = g_malloc0(sizeof(*int_entry));
282 int_entry->value = (i << 5) + j;
283 int_entry->next = int_list;
284 int_list = int_entry;
288 info->vlan_table = int_list;
290 /* enable event notification after query */
291 nc->rxfilter_notify_enabled = 1;
296 static void virtio_net_reset(VirtIODevice *vdev)
298 VirtIONet *n = VIRTIO_NET(vdev);
300 /* Reset back to compatibility mode */
307 /* multiqueue is disabled by default */
310 /* Flush any MAC and VLAN filter table state */
311 n->mac_table.in_use = 0;
312 n->mac_table.first_multi = 0;
313 n->mac_table.multi_overflow = 0;
314 n->mac_table.uni_overflow = 0;
315 memset(n->mac_table.macs, 0, MAC_TABLE_ENTRIES * ETH_ALEN);
316 memcpy(&n->mac[0], &n->nic->conf->macaddr, sizeof(n->mac));
317 qemu_format_nic_info_str(qemu_get_queue(n->nic), n->mac);
318 memset(n->vlans, 0, MAX_VLAN >> 3);
321 static void peer_test_vnet_hdr(VirtIONet *n)
323 NetClientState *nc = qemu_get_queue(n->nic);
328 if (nc->peer->info->type != NET_CLIENT_OPTIONS_KIND_TAP) {
332 n->has_vnet_hdr = tap_has_vnet_hdr(nc->peer);
335 static int peer_has_vnet_hdr(VirtIONet *n)
337 return n->has_vnet_hdr;
340 static int peer_has_ufo(VirtIONet *n)
342 if (!peer_has_vnet_hdr(n))
345 n->has_ufo = tap_has_ufo(qemu_get_queue(n->nic)->peer);
350 static void virtio_net_set_mrg_rx_bufs(VirtIONet *n, int mergeable_rx_bufs)
355 n->mergeable_rx_bufs = mergeable_rx_bufs;
357 n->guest_hdr_len = n->mergeable_rx_bufs ?
358 sizeof(struct virtio_net_hdr_mrg_rxbuf) : sizeof(struct virtio_net_hdr);
360 for (i = 0; i < n->max_queues; i++) {
361 nc = qemu_get_subqueue(n->nic, i);
363 if (peer_has_vnet_hdr(n) &&
364 tap_has_vnet_hdr_len(nc->peer, n->guest_hdr_len)) {
365 tap_set_vnet_hdr_len(nc->peer, n->guest_hdr_len);
366 n->host_hdr_len = n->guest_hdr_len;
371 static int peer_attach(VirtIONet *n, int index)
373 NetClientState *nc = qemu_get_subqueue(n->nic, index);
379 if (nc->peer->info->type != NET_CLIENT_OPTIONS_KIND_TAP) {
383 return tap_enable(nc->peer);
386 static int peer_detach(VirtIONet *n, int index)
388 NetClientState *nc = qemu_get_subqueue(n->nic, index);
394 if (nc->peer->info->type != NET_CLIENT_OPTIONS_KIND_TAP) {
398 return tap_disable(nc->peer);
401 static void virtio_net_set_queues(VirtIONet *n)
405 for (i = 0; i < n->max_queues; i++) {
406 if (i < n->curr_queues) {
407 assert(!peer_attach(n, i));
409 assert(!peer_detach(n, i));
414 static void virtio_net_set_multiqueue(VirtIONet *n, int multiqueue);
416 static uint32_t virtio_net_get_features(VirtIODevice *vdev, uint32_t features)
418 VirtIONet *n = VIRTIO_NET(vdev);
419 NetClientState *nc = qemu_get_queue(n->nic);
421 features |= (1 << VIRTIO_NET_F_MAC);
423 if (!peer_has_vnet_hdr(n)) {
424 features &= ~(0x1 << VIRTIO_NET_F_CSUM);
425 features &= ~(0x1 << VIRTIO_NET_F_HOST_TSO4);
426 features &= ~(0x1 << VIRTIO_NET_F_HOST_TSO6);
427 features &= ~(0x1 << VIRTIO_NET_F_HOST_ECN);
429 features &= ~(0x1 << VIRTIO_NET_F_GUEST_CSUM);
430 features &= ~(0x1 << VIRTIO_NET_F_GUEST_TSO4);
431 features &= ~(0x1 << VIRTIO_NET_F_GUEST_TSO6);
432 features &= ~(0x1 << VIRTIO_NET_F_GUEST_ECN);
435 if (!peer_has_vnet_hdr(n) || !peer_has_ufo(n)) {
436 features &= ~(0x1 << VIRTIO_NET_F_GUEST_UFO);
437 features &= ~(0x1 << VIRTIO_NET_F_HOST_UFO);
440 if (!nc->peer || nc->peer->info->type != NET_CLIENT_OPTIONS_KIND_TAP) {
443 if (!tap_get_vhost_net(nc->peer)) {
446 return vhost_net_get_features(tap_get_vhost_net(nc->peer), features);
449 static uint32_t virtio_net_bad_features(VirtIODevice *vdev)
451 uint32_t features = 0;
453 /* Linux kernel 2.6.25. It understood MAC (as everyone must),
455 features |= (1 << VIRTIO_NET_F_MAC);
456 features |= (1 << VIRTIO_NET_F_CSUM);
457 features |= (1 << VIRTIO_NET_F_HOST_TSO4);
458 features |= (1 << VIRTIO_NET_F_HOST_TSO6);
459 features |= (1 << VIRTIO_NET_F_HOST_ECN);
464 static void virtio_net_apply_guest_offloads(VirtIONet *n)
466 tap_set_offload(qemu_get_subqueue(n->nic, 0)->peer,
467 !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_CSUM)),
468 !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_TSO4)),
469 !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_TSO6)),
470 !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_ECN)),
471 !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_UFO)));
474 static uint64_t virtio_net_guest_offloads_by_features(uint32_t features)
476 static const uint64_t guest_offloads_mask =
477 (1ULL << VIRTIO_NET_F_GUEST_CSUM) |
478 (1ULL << VIRTIO_NET_F_GUEST_TSO4) |
479 (1ULL << VIRTIO_NET_F_GUEST_TSO6) |
480 (1ULL << VIRTIO_NET_F_GUEST_ECN) |
481 (1ULL << VIRTIO_NET_F_GUEST_UFO);
483 return guest_offloads_mask & features;
486 static inline uint64_t virtio_net_supported_guest_offloads(VirtIONet *n)
488 VirtIODevice *vdev = VIRTIO_DEVICE(n);
489 return virtio_net_guest_offloads_by_features(vdev->guest_features);
492 static void virtio_net_set_features(VirtIODevice *vdev, uint32_t features)
494 VirtIONet *n = VIRTIO_NET(vdev);
497 virtio_net_set_multiqueue(n, !!(features & (1 << VIRTIO_NET_F_MQ)));
499 virtio_net_set_mrg_rx_bufs(n, !!(features & (1 << VIRTIO_NET_F_MRG_RXBUF)));
501 if (n->has_vnet_hdr) {
502 n->curr_guest_offloads =
503 virtio_net_guest_offloads_by_features(features);
504 virtio_net_apply_guest_offloads(n);
507 for (i = 0; i < n->max_queues; i++) {
508 NetClientState *nc = qemu_get_subqueue(n->nic, i);
510 if (!nc->peer || nc->peer->info->type != NET_CLIENT_OPTIONS_KIND_TAP) {
513 if (!tap_get_vhost_net(nc->peer)) {
516 vhost_net_ack_features(tap_get_vhost_net(nc->peer), features);
520 static int virtio_net_handle_rx_mode(VirtIONet *n, uint8_t cmd,
521 struct iovec *iov, unsigned int iov_cnt)
525 NetClientState *nc = qemu_get_queue(n->nic);
527 s = iov_to_buf(iov, iov_cnt, 0, &on, sizeof(on));
528 if (s != sizeof(on)) {
529 return VIRTIO_NET_ERR;
532 if (cmd == VIRTIO_NET_CTRL_RX_PROMISC) {
534 } else if (cmd == VIRTIO_NET_CTRL_RX_ALLMULTI) {
536 } else if (cmd == VIRTIO_NET_CTRL_RX_ALLUNI) {
538 } else if (cmd == VIRTIO_NET_CTRL_RX_NOMULTI) {
540 } else if (cmd == VIRTIO_NET_CTRL_RX_NOUNI) {
542 } else if (cmd == VIRTIO_NET_CTRL_RX_NOBCAST) {
545 return VIRTIO_NET_ERR;
550 return VIRTIO_NET_OK;
553 static int virtio_net_handle_offloads(VirtIONet *n, uint8_t cmd,
554 struct iovec *iov, unsigned int iov_cnt)
556 VirtIODevice *vdev = VIRTIO_DEVICE(n);
560 if (!((1 << VIRTIO_NET_F_CTRL_GUEST_OFFLOADS) & vdev->guest_features)) {
561 return VIRTIO_NET_ERR;
564 s = iov_to_buf(iov, iov_cnt, 0, &offloads, sizeof(offloads));
565 if (s != sizeof(offloads)) {
566 return VIRTIO_NET_ERR;
569 if (cmd == VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET) {
570 uint64_t supported_offloads;
572 if (!n->has_vnet_hdr) {
573 return VIRTIO_NET_ERR;
576 supported_offloads = virtio_net_supported_guest_offloads(n);
577 if (offloads & ~supported_offloads) {
578 return VIRTIO_NET_ERR;
581 n->curr_guest_offloads = offloads;
582 virtio_net_apply_guest_offloads(n);
584 return VIRTIO_NET_OK;
586 return VIRTIO_NET_ERR;
590 static int virtio_net_handle_mac(VirtIONet *n, uint8_t cmd,
591 struct iovec *iov, unsigned int iov_cnt)
593 struct virtio_net_ctrl_mac mac_data;
595 NetClientState *nc = qemu_get_queue(n->nic);
597 if (cmd == VIRTIO_NET_CTRL_MAC_ADDR_SET) {
598 if (iov_size(iov, iov_cnt) != sizeof(n->mac)) {
599 return VIRTIO_NET_ERR;
601 s = iov_to_buf(iov, iov_cnt, 0, &n->mac, sizeof(n->mac));
602 assert(s == sizeof(n->mac));
603 qemu_format_nic_info_str(qemu_get_queue(n->nic), n->mac);
606 return VIRTIO_NET_OK;
609 if (cmd != VIRTIO_NET_CTRL_MAC_TABLE_SET) {
610 return VIRTIO_NET_ERR;
615 uint8_t uni_overflow = 0;
616 uint8_t multi_overflow = 0;
617 uint8_t *macs = g_malloc0(MAC_TABLE_ENTRIES * ETH_ALEN);
619 s = iov_to_buf(iov, iov_cnt, 0, &mac_data.entries,
620 sizeof(mac_data.entries));
621 mac_data.entries = ldl_p(&mac_data.entries);
622 if (s != sizeof(mac_data.entries)) {
625 iov_discard_front(&iov, &iov_cnt, s);
627 if (mac_data.entries * ETH_ALEN > iov_size(iov, iov_cnt)) {
631 if (mac_data.entries <= MAC_TABLE_ENTRIES) {
632 s = iov_to_buf(iov, iov_cnt, 0, macs,
633 mac_data.entries * ETH_ALEN);
634 if (s != mac_data.entries * ETH_ALEN) {
637 in_use += mac_data.entries;
642 iov_discard_front(&iov, &iov_cnt, mac_data.entries * ETH_ALEN);
644 first_multi = in_use;
646 s = iov_to_buf(iov, iov_cnt, 0, &mac_data.entries,
647 sizeof(mac_data.entries));
648 mac_data.entries = ldl_p(&mac_data.entries);
649 if (s != sizeof(mac_data.entries)) {
653 iov_discard_front(&iov, &iov_cnt, s);
655 if (mac_data.entries * ETH_ALEN != iov_size(iov, iov_cnt)) {
659 if (in_use + mac_data.entries <= MAC_TABLE_ENTRIES) {
660 s = iov_to_buf(iov, iov_cnt, 0, &macs[in_use * ETH_ALEN],
661 mac_data.entries * ETH_ALEN);
662 if (s != mac_data.entries * ETH_ALEN) {
665 in_use += mac_data.entries;
670 n->mac_table.in_use = in_use;
671 n->mac_table.first_multi = first_multi;
672 n->mac_table.uni_overflow = uni_overflow;
673 n->mac_table.multi_overflow = multi_overflow;
674 memcpy(n->mac_table.macs, macs, MAC_TABLE_ENTRIES * ETH_ALEN);
678 return VIRTIO_NET_OK;
682 return VIRTIO_NET_ERR;
685 static int virtio_net_handle_vlan_table(VirtIONet *n, uint8_t cmd,
686 struct iovec *iov, unsigned int iov_cnt)
690 NetClientState *nc = qemu_get_queue(n->nic);
692 s = iov_to_buf(iov, iov_cnt, 0, &vid, sizeof(vid));
694 if (s != sizeof(vid)) {
695 return VIRTIO_NET_ERR;
699 return VIRTIO_NET_ERR;
701 if (cmd == VIRTIO_NET_CTRL_VLAN_ADD)
702 n->vlans[vid >> 5] |= (1U << (vid & 0x1f));
703 else if (cmd == VIRTIO_NET_CTRL_VLAN_DEL)
704 n->vlans[vid >> 5] &= ~(1U << (vid & 0x1f));
706 return VIRTIO_NET_ERR;
710 return VIRTIO_NET_OK;
713 static int virtio_net_handle_mq(VirtIONet *n, uint8_t cmd,
714 struct iovec *iov, unsigned int iov_cnt)
716 VirtIODevice *vdev = VIRTIO_DEVICE(n);
717 struct virtio_net_ctrl_mq mq;
721 s = iov_to_buf(iov, iov_cnt, 0, &mq, sizeof(mq));
722 if (s != sizeof(mq)) {
723 return VIRTIO_NET_ERR;
726 if (cmd != VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET) {
727 return VIRTIO_NET_ERR;
730 queues = lduw_p(&mq.virtqueue_pairs);
732 if (queues < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN ||
733 queues > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX ||
734 queues > n->max_queues ||
736 return VIRTIO_NET_ERR;
739 n->curr_queues = queues;
740 /* stop the backend before changing the number of queues to avoid handling a
742 virtio_net_set_status(vdev, vdev->status);
743 virtio_net_set_queues(n);
745 return VIRTIO_NET_OK;
747 static void virtio_net_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq)
749 VirtIONet *n = VIRTIO_NET(vdev);
750 struct virtio_net_ctrl_hdr ctrl;
751 virtio_net_ctrl_ack status = VIRTIO_NET_ERR;
752 VirtQueueElement elem;
755 unsigned int iov_cnt;
757 while (virtqueue_pop(vq, &elem)) {
758 if (iov_size(elem.in_sg, elem.in_num) < sizeof(status) ||
759 iov_size(elem.out_sg, elem.out_num) < sizeof(ctrl)) {
760 error_report("virtio-net ctrl missing headers");
765 iov_cnt = elem.out_num;
766 s = iov_to_buf(iov, iov_cnt, 0, &ctrl, sizeof(ctrl));
767 iov_discard_front(&iov, &iov_cnt, sizeof(ctrl));
768 if (s != sizeof(ctrl)) {
769 status = VIRTIO_NET_ERR;
770 } else if (ctrl.class == VIRTIO_NET_CTRL_RX) {
771 status = virtio_net_handle_rx_mode(n, ctrl.cmd, iov, iov_cnt);
772 } else if (ctrl.class == VIRTIO_NET_CTRL_MAC) {
773 status = virtio_net_handle_mac(n, ctrl.cmd, iov, iov_cnt);
774 } else if (ctrl.class == VIRTIO_NET_CTRL_VLAN) {
775 status = virtio_net_handle_vlan_table(n, ctrl.cmd, iov, iov_cnt);
776 } else if (ctrl.class == VIRTIO_NET_CTRL_MQ) {
777 status = virtio_net_handle_mq(n, ctrl.cmd, iov, iov_cnt);
778 } else if (ctrl.class == VIRTIO_NET_CTRL_GUEST_OFFLOADS) {
779 status = virtio_net_handle_offloads(n, ctrl.cmd, iov, iov_cnt);
782 s = iov_from_buf(elem.in_sg, elem.in_num, 0, &status, sizeof(status));
783 assert(s == sizeof(status));
785 virtqueue_push(vq, &elem, sizeof(status));
786 virtio_notify(vdev, vq);
792 static void virtio_net_handle_rx(VirtIODevice *vdev, VirtQueue *vq)
794 VirtIONet *n = VIRTIO_NET(vdev);
795 int queue_index = vq2q(virtio_get_queue_index(vq));
797 qemu_flush_queued_packets(qemu_get_subqueue(n->nic, queue_index));
800 static int virtio_net_can_receive(NetClientState *nc)
802 VirtIONet *n = qemu_get_nic_opaque(nc);
803 VirtIODevice *vdev = VIRTIO_DEVICE(n);
804 VirtIONetQueue *q = virtio_net_get_subqueue(nc);
806 if (!vdev->vm_running) {
810 if (nc->queue_index >= n->curr_queues) {
814 if (!virtio_queue_ready(q->rx_vq) ||
815 !(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) {
822 static int virtio_net_has_buffers(VirtIONetQueue *q, int bufsize)
825 if (virtio_queue_empty(q->rx_vq) ||
826 (n->mergeable_rx_bufs &&
827 !virtqueue_avail_bytes(q->rx_vq, bufsize, 0))) {
828 virtio_queue_set_notification(q->rx_vq, 1);
830 /* To avoid a race condition where the guest has made some buffers
831 * available after the above check but before notification was
832 * enabled, check for available buffers again.
834 if (virtio_queue_empty(q->rx_vq) ||
835 (n->mergeable_rx_bufs &&
836 !virtqueue_avail_bytes(q->rx_vq, bufsize, 0))) {
841 virtio_queue_set_notification(q->rx_vq, 0);
845 /* dhclient uses AF_PACKET but doesn't pass auxdata to the kernel so
846 * it never finds out that the packets don't have valid checksums. This
847 * causes dhclient to get upset. Fedora's carried a patch for ages to
848 * fix this with Xen but it hasn't appeared in an upstream release of
851 * To avoid breaking existing guests, we catch udp packets and add
852 * checksums. This is terrible but it's better than hacking the guest
855 * N.B. if we introduce a zero-copy API, this operation is no longer free so
856 * we should provide a mechanism to disable it to avoid polluting the host
859 static void work_around_broken_dhclient(struct virtio_net_hdr *hdr,
860 uint8_t *buf, size_t size)
862 if ((hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) && /* missing csum */
863 (size > 27 && size < 1500) && /* normal sized MTU */
864 (buf[12] == 0x08 && buf[13] == 0x00) && /* ethertype == IPv4 */
865 (buf[23] == 17) && /* ip.protocol == UDP */
866 (buf[34] == 0 && buf[35] == 67)) { /* udp.srcport == bootps */
867 net_checksum_calculate(buf, size);
868 hdr->flags &= ~VIRTIO_NET_HDR_F_NEEDS_CSUM;
872 static void receive_header(VirtIONet *n, const struct iovec *iov, int iov_cnt,
873 const void *buf, size_t size)
875 if (n->has_vnet_hdr) {
876 /* FIXME this cast is evil */
877 void *wbuf = (void *)buf;
878 work_around_broken_dhclient(wbuf, wbuf + n->host_hdr_len,
879 size - n->host_hdr_len);
880 iov_from_buf(iov, iov_cnt, 0, buf, sizeof(struct virtio_net_hdr));
882 struct virtio_net_hdr hdr = {
884 .gso_type = VIRTIO_NET_HDR_GSO_NONE
886 iov_from_buf(iov, iov_cnt, 0, &hdr, sizeof hdr);
890 static int receive_filter(VirtIONet *n, const uint8_t *buf, int size)
892 static const uint8_t bcast[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
893 static const uint8_t vlan[] = {0x81, 0x00};
894 uint8_t *ptr = (uint8_t *)buf;
900 ptr += n->host_hdr_len;
902 if (!memcmp(&ptr[12], vlan, sizeof(vlan))) {
903 int vid = be16_to_cpup((uint16_t *)(ptr + 14)) & 0xfff;
904 if (!(n->vlans[vid >> 5] & (1U << (vid & 0x1f))))
908 if (ptr[0] & 1) { // multicast
909 if (!memcmp(ptr, bcast, sizeof(bcast))) {
911 } else if (n->nomulti) {
913 } else if (n->allmulti || n->mac_table.multi_overflow) {
917 for (i = n->mac_table.first_multi; i < n->mac_table.in_use; i++) {
918 if (!memcmp(ptr, &n->mac_table.macs[i * ETH_ALEN], ETH_ALEN)) {
925 } else if (n->alluni || n->mac_table.uni_overflow) {
927 } else if (!memcmp(ptr, n->mac, ETH_ALEN)) {
931 for (i = 0; i < n->mac_table.first_multi; i++) {
932 if (!memcmp(ptr, &n->mac_table.macs[i * ETH_ALEN], ETH_ALEN)) {
941 static ssize_t virtio_net_receive(NetClientState *nc, const uint8_t *buf, size_t size)
943 VirtIONet *n = qemu_get_nic_opaque(nc);
944 VirtIONetQueue *q = virtio_net_get_subqueue(nc);
945 VirtIODevice *vdev = VIRTIO_DEVICE(n);
946 struct iovec mhdr_sg[VIRTQUEUE_MAX_SIZE];
947 struct virtio_net_hdr_mrg_rxbuf mhdr;
948 unsigned mhdr_cnt = 0;
949 size_t offset, i, guest_offset;
951 if (!virtio_net_can_receive(nc)) {
955 /* hdr_len refers to the header we supply to the guest */
956 if (!virtio_net_has_buffers(q, size + n->guest_hdr_len - n->host_hdr_len)) {
960 if (!receive_filter(n, buf, size))
965 while (offset < size) {
966 VirtQueueElement elem;
968 const struct iovec *sg = elem.in_sg;
972 if (virtqueue_pop(q->rx_vq, &elem) == 0) {
975 error_report("virtio-net unexpected empty queue: "
976 "i %zd mergeable %d offset %zd, size %zd, "
977 "guest hdr len %zd, host hdr len %zd guest features 0x%x",
978 i, n->mergeable_rx_bufs, offset, size,
979 n->guest_hdr_len, n->host_hdr_len, vdev->guest_features);
983 if (elem.in_num < 1) {
984 error_report("virtio-net receive queue contains no in buffers");
990 if (n->mergeable_rx_bufs) {
991 mhdr_cnt = iov_copy(mhdr_sg, ARRAY_SIZE(mhdr_sg),
993 offsetof(typeof(mhdr), num_buffers),
994 sizeof(mhdr.num_buffers));
997 receive_header(n, sg, elem.in_num, buf, size);
998 offset = n->host_hdr_len;
999 total += n->guest_hdr_len;
1000 guest_offset = n->guest_hdr_len;
1005 /* copy in packet. ugh */
1006 len = iov_from_buf(sg, elem.in_num, guest_offset,
1007 buf + offset, size - offset);
1010 /* If buffers can't be merged, at this point we
1011 * must have consumed the complete packet.
1012 * Otherwise, drop it. */
1013 if (!n->mergeable_rx_bufs && offset < size) {
1015 error_report("virtio-net truncated non-mergeable packet: "
1016 "i %zd mergeable %d offset %zd, size %zd, "
1017 "guest hdr len %zd, host hdr len %zd",
1018 i, n->mergeable_rx_bufs,
1019 offset, size, n->guest_hdr_len, n->host_hdr_len);
1024 /* signal other side */
1025 virtqueue_fill(q->rx_vq, &elem, total, i++);
1029 stw_p(&mhdr.num_buffers, i);
1030 iov_from_buf(mhdr_sg, mhdr_cnt,
1032 &mhdr.num_buffers, sizeof mhdr.num_buffers);
1035 virtqueue_flush(q->rx_vq, i);
1036 virtio_notify(vdev, q->rx_vq);
1041 static int32_t virtio_net_flush_tx(VirtIONetQueue *q);
1043 static void virtio_net_tx_complete(NetClientState *nc, ssize_t len)
1045 VirtIONet *n = qemu_get_nic_opaque(nc);
1046 VirtIONetQueue *q = virtio_net_get_subqueue(nc);
1047 VirtIODevice *vdev = VIRTIO_DEVICE(n);
1049 virtqueue_push(q->tx_vq, &q->async_tx.elem, 0);
1050 virtio_notify(vdev, q->tx_vq);
1052 q->async_tx.elem.out_num = q->async_tx.len = 0;
1054 virtio_queue_set_notification(q->tx_vq, 1);
1055 virtio_net_flush_tx(q);
1059 static int32_t virtio_net_flush_tx(VirtIONetQueue *q)
1061 VirtIONet *n = q->n;
1062 VirtIODevice *vdev = VIRTIO_DEVICE(n);
1063 VirtQueueElement elem;
1064 int32_t num_packets = 0;
1065 int queue_index = vq2q(virtio_get_queue_index(q->tx_vq));
1066 if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) {
1070 assert(vdev->vm_running);
1072 if (q->async_tx.elem.out_num) {
1073 virtio_queue_set_notification(q->tx_vq, 0);
1077 while (virtqueue_pop(q->tx_vq, &elem)) {
1079 unsigned int out_num = elem.out_num;
1080 struct iovec *out_sg = &elem.out_sg[0];
1081 struct iovec sg[VIRTQUEUE_MAX_SIZE];
1084 error_report("virtio-net header not in first element");
1089 * If host wants to see the guest header as is, we can
1090 * pass it on unchanged. Otherwise, copy just the parts
1091 * that host is interested in.
1093 assert(n->host_hdr_len <= n->guest_hdr_len);
1094 if (n->host_hdr_len != n->guest_hdr_len) {
1095 unsigned sg_num = iov_copy(sg, ARRAY_SIZE(sg),
1097 0, n->host_hdr_len);
1098 sg_num += iov_copy(sg + sg_num, ARRAY_SIZE(sg) - sg_num,
1100 n->guest_hdr_len, -1);
1105 len = n->guest_hdr_len;
1107 ret = qemu_sendv_packet_async(qemu_get_subqueue(n->nic, queue_index),
1108 out_sg, out_num, virtio_net_tx_complete);
1110 virtio_queue_set_notification(q->tx_vq, 0);
1111 q->async_tx.elem = elem;
1112 q->async_tx.len = len;
1118 virtqueue_push(q->tx_vq, &elem, 0);
1119 virtio_notify(vdev, q->tx_vq);
1121 if (++num_packets >= n->tx_burst) {
1128 static void virtio_net_handle_tx_timer(VirtIODevice *vdev, VirtQueue *vq)
1130 VirtIONet *n = VIRTIO_NET(vdev);
1131 VirtIONetQueue *q = &n->vqs[vq2q(virtio_get_queue_index(vq))];
1133 /* This happens when device was stopped but VCPU wasn't. */
1134 if (!vdev->vm_running) {
1139 if (q->tx_waiting) {
1140 virtio_queue_set_notification(vq, 1);
1141 timer_del(q->tx_timer);
1143 virtio_net_flush_tx(q);
1145 timer_mod(q->tx_timer,
1146 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + n->tx_timeout);
1148 virtio_queue_set_notification(vq, 0);
1152 static void virtio_net_handle_tx_bh(VirtIODevice *vdev, VirtQueue *vq)
1154 VirtIONet *n = VIRTIO_NET(vdev);
1155 VirtIONetQueue *q = &n->vqs[vq2q(virtio_get_queue_index(vq))];
1157 if (unlikely(q->tx_waiting)) {
1161 /* This happens when device was stopped but VCPU wasn't. */
1162 if (!vdev->vm_running) {
1165 virtio_queue_set_notification(vq, 0);
1166 qemu_bh_schedule(q->tx_bh);
1169 static void virtio_net_tx_timer(void *opaque)
1171 VirtIONetQueue *q = opaque;
1172 VirtIONet *n = q->n;
1173 VirtIODevice *vdev = VIRTIO_DEVICE(n);
1174 assert(vdev->vm_running);
1178 /* Just in case the driver is not ready on more */
1179 if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) {
1183 virtio_queue_set_notification(q->tx_vq, 1);
1184 virtio_net_flush_tx(q);
1187 static void virtio_net_tx_bh(void *opaque)
1189 VirtIONetQueue *q = opaque;
1190 VirtIONet *n = q->n;
1191 VirtIODevice *vdev = VIRTIO_DEVICE(n);
1194 assert(vdev->vm_running);
1198 /* Just in case the driver is not ready on more */
1199 if (unlikely(!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK))) {
1203 ret = virtio_net_flush_tx(q);
1204 if (ret == -EBUSY) {
1205 return; /* Notification re-enable handled by tx_complete */
1208 /* If we flush a full burst of packets, assume there are
1209 * more coming and immediately reschedule */
1210 if (ret >= n->tx_burst) {
1211 qemu_bh_schedule(q->tx_bh);
1216 /* If less than a full burst, re-enable notification and flush
1217 * anything that may have come in while we weren't looking. If
1218 * we find something, assume the guest is still active and reschedule */
1219 virtio_queue_set_notification(q->tx_vq, 1);
1220 if (virtio_net_flush_tx(q) > 0) {
1221 virtio_queue_set_notification(q->tx_vq, 0);
1222 qemu_bh_schedule(q->tx_bh);
1227 static void virtio_net_set_multiqueue(VirtIONet *n, int multiqueue)
1229 VirtIODevice *vdev = VIRTIO_DEVICE(n);
1230 int i, max = multiqueue ? n->max_queues : 1;
1232 n->multiqueue = multiqueue;
1234 for (i = 2; i <= n->max_queues * 2 + 1; i++) {
1235 virtio_del_queue(vdev, i);
1238 for (i = 1; i < max; i++) {
1239 n->vqs[i].rx_vq = virtio_add_queue(vdev, 256, virtio_net_handle_rx);
1240 if (n->vqs[i].tx_timer) {
1242 virtio_add_queue(vdev, 256, virtio_net_handle_tx_timer);
1243 n->vqs[i].tx_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
1244 virtio_net_tx_timer,
1248 virtio_add_queue(vdev, 256, virtio_net_handle_tx_bh);
1249 n->vqs[i].tx_bh = qemu_bh_new(virtio_net_tx_bh, &n->vqs[i]);
1252 n->vqs[i].tx_waiting = 0;
1256 /* Note: Minux Guests (version 3.2.1) use ctrl vq but don't ack
1257 * VIRTIO_NET_F_CTRL_VQ. Create ctrl vq unconditionally to avoid
1260 n->ctrl_vq = virtio_add_queue(vdev, 64, virtio_net_handle_ctrl);
1262 virtio_net_set_queues(n);
1265 static void virtio_net_save(QEMUFile *f, void *opaque)
1268 VirtIONet *n = opaque;
1269 VirtIODevice *vdev = VIRTIO_DEVICE(n);
1271 /* At this point, backend must be stopped, otherwise
1272 * it might keep writing to memory. */
1273 assert(!n->vhost_started);
1274 virtio_save(vdev, f);
1276 qemu_put_buffer(f, n->mac, ETH_ALEN);
1277 qemu_put_be32(f, n->vqs[0].tx_waiting);
1278 qemu_put_be32(f, n->mergeable_rx_bufs);
1279 qemu_put_be16(f, n->status);
1280 qemu_put_byte(f, n->promisc);
1281 qemu_put_byte(f, n->allmulti);
1282 qemu_put_be32(f, n->mac_table.in_use);
1283 qemu_put_buffer(f, n->mac_table.macs, n->mac_table.in_use * ETH_ALEN);
1284 qemu_put_buffer(f, (uint8_t *)n->vlans, MAX_VLAN >> 3);
1285 qemu_put_be32(f, n->has_vnet_hdr);
1286 qemu_put_byte(f, n->mac_table.multi_overflow);
1287 qemu_put_byte(f, n->mac_table.uni_overflow);
1288 qemu_put_byte(f, n->alluni);
1289 qemu_put_byte(f, n->nomulti);
1290 qemu_put_byte(f, n->nouni);
1291 qemu_put_byte(f, n->nobcast);
1292 qemu_put_byte(f, n->has_ufo);
1293 if (n->max_queues > 1) {
1294 qemu_put_be16(f, n->max_queues);
1295 qemu_put_be16(f, n->curr_queues);
1296 for (i = 1; i < n->curr_queues; i++) {
1297 qemu_put_be32(f, n->vqs[i].tx_waiting);
1301 if ((1 << VIRTIO_NET_F_CTRL_GUEST_OFFLOADS) & vdev->guest_features) {
1302 qemu_put_be64(f, n->curr_guest_offloads);
1306 static int virtio_net_load(QEMUFile *f, void *opaque, int version_id)
1308 VirtIONet *n = opaque;
1309 VirtIODevice *vdev = VIRTIO_DEVICE(n);
1310 int ret, i, link_down;
1312 if (version_id < 2 || version_id > VIRTIO_NET_VM_VERSION)
1315 ret = virtio_load(vdev, f);
1320 qemu_get_buffer(f, n->mac, ETH_ALEN);
1321 n->vqs[0].tx_waiting = qemu_get_be32(f);
1323 virtio_net_set_mrg_rx_bufs(n, qemu_get_be32(f));
1325 if (version_id >= 3)
1326 n->status = qemu_get_be16(f);
1328 if (version_id >= 4) {
1329 if (version_id < 8) {
1330 n->promisc = qemu_get_be32(f);
1331 n->allmulti = qemu_get_be32(f);
1333 n->promisc = qemu_get_byte(f);
1334 n->allmulti = qemu_get_byte(f);
1338 if (version_id >= 5) {
1339 n->mac_table.in_use = qemu_get_be32(f);
1340 /* MAC_TABLE_ENTRIES may be different from the saved image */
1341 if (n->mac_table.in_use <= MAC_TABLE_ENTRIES) {
1342 qemu_get_buffer(f, n->mac_table.macs,
1343 n->mac_table.in_use * ETH_ALEN);
1344 } else if (n->mac_table.in_use) {
1345 uint8_t *buf = g_malloc0(n->mac_table.in_use);
1346 qemu_get_buffer(f, buf, n->mac_table.in_use * ETH_ALEN);
1348 n->mac_table.multi_overflow = n->mac_table.uni_overflow = 1;
1349 n->mac_table.in_use = 0;
1353 if (version_id >= 6)
1354 qemu_get_buffer(f, (uint8_t *)n->vlans, MAX_VLAN >> 3);
1356 if (version_id >= 7) {
1357 if (qemu_get_be32(f) && !peer_has_vnet_hdr(n)) {
1358 error_report("virtio-net: saved image requires vnet_hdr=on");
1363 if (version_id >= 9) {
1364 n->mac_table.multi_overflow = qemu_get_byte(f);
1365 n->mac_table.uni_overflow = qemu_get_byte(f);
1368 if (version_id >= 10) {
1369 n->alluni = qemu_get_byte(f);
1370 n->nomulti = qemu_get_byte(f);
1371 n->nouni = qemu_get_byte(f);
1372 n->nobcast = qemu_get_byte(f);
1375 if (version_id >= 11) {
1376 if (qemu_get_byte(f) && !peer_has_ufo(n)) {
1377 error_report("virtio-net: saved image requires TUN_F_UFO support");
1382 if (n->max_queues > 1) {
1383 if (n->max_queues != qemu_get_be16(f)) {
1384 error_report("virtio-net: different max_queues ");
1388 n->curr_queues = qemu_get_be16(f);
1389 for (i = 1; i < n->curr_queues; i++) {
1390 n->vqs[i].tx_waiting = qemu_get_be32(f);
1394 if ((1 << VIRTIO_NET_F_CTRL_GUEST_OFFLOADS) & vdev->guest_features) {
1395 n->curr_guest_offloads = qemu_get_be64(f);
1397 n->curr_guest_offloads = virtio_net_supported_guest_offloads(n);
1400 if (peer_has_vnet_hdr(n)) {
1401 virtio_net_apply_guest_offloads(n);
1404 virtio_net_set_queues(n);
1406 /* Find the first multicast entry in the saved MAC filter */
1407 for (i = 0; i < n->mac_table.in_use; i++) {
1408 if (n->mac_table.macs[i * ETH_ALEN] & 1) {
1412 n->mac_table.first_multi = i;
1414 /* nc.link_down can't be migrated, so infer link_down according
1415 * to link status bit in n->status */
1416 link_down = (n->status & VIRTIO_NET_S_LINK_UP) == 0;
1417 for (i = 0; i < n->max_queues; i++) {
1418 qemu_get_subqueue(n->nic, i)->link_down = link_down;
1424 static void virtio_net_cleanup(NetClientState *nc)
1426 VirtIONet *n = qemu_get_nic_opaque(nc);
1431 static NetClientInfo net_virtio_info = {
1432 .type = NET_CLIENT_OPTIONS_KIND_NIC,
1433 .size = sizeof(NICState),
1434 .can_receive = virtio_net_can_receive,
1435 .receive = virtio_net_receive,
1436 .cleanup = virtio_net_cleanup,
1437 .link_status_changed = virtio_net_set_link_status,
1438 .query_rx_filter = virtio_net_query_rxfilter,
1441 static bool virtio_net_guest_notifier_pending(VirtIODevice *vdev, int idx)
1443 VirtIONet *n = VIRTIO_NET(vdev);
1444 NetClientState *nc = qemu_get_subqueue(n->nic, vq2q(idx));
1445 assert(n->vhost_started);
1446 return vhost_net_virtqueue_pending(tap_get_vhost_net(nc->peer), idx);
1449 static void virtio_net_guest_notifier_mask(VirtIODevice *vdev, int idx,
1452 VirtIONet *n = VIRTIO_NET(vdev);
1453 NetClientState *nc = qemu_get_subqueue(n->nic, vq2q(idx));
1454 assert(n->vhost_started);
1455 vhost_net_virtqueue_mask(tap_get_vhost_net(nc->peer),
1459 void virtio_net_set_config_size(VirtIONet *n, uint32_t host_features)
1461 int i, config_size = 0;
1462 host_features |= (1 << VIRTIO_NET_F_MAC);
1463 for (i = 0; feature_sizes[i].flags != 0; i++) {
1464 if (host_features & feature_sizes[i].flags) {
1465 config_size = MAX(feature_sizes[i].end, config_size);
1468 n->config_size = config_size;
1471 void virtio_net_set_netclient_name(VirtIONet *n, const char *name,
1475 * The name can be NULL, the netclient name will be type.x.
1477 assert(type != NULL);
1479 if (n->netclient_name) {
1480 g_free(n->netclient_name);
1481 n->netclient_name = NULL;
1483 if (n->netclient_type) {
1484 g_free(n->netclient_type);
1485 n->netclient_type = NULL;
1489 n->netclient_name = g_strdup(name);
1491 n->netclient_type = g_strdup(type);
1494 static int virtio_net_device_init(VirtIODevice *vdev)
1498 DeviceState *qdev = DEVICE(vdev);
1499 VirtIONet *n = VIRTIO_NET(vdev);
1502 virtio_init(VIRTIO_DEVICE(n), "virtio-net", VIRTIO_ID_NET,
1505 n->max_queues = MAX(n->nic_conf.queues, 1);
1506 n->vqs = g_malloc0(sizeof(VirtIONetQueue) * n->max_queues);
1507 n->vqs[0].rx_vq = virtio_add_queue(vdev, 256, virtio_net_handle_rx);
1510 n->tx_timeout = n->net_conf.txtimer;
1512 if (n->net_conf.tx && strcmp(n->net_conf.tx, "timer")
1513 && strcmp(n->net_conf.tx, "bh")) {
1514 error_report("virtio-net: "
1515 "Unknown option tx=%s, valid options: \"timer\" \"bh\"",
1517 error_report("Defaulting to \"bh\"");
1520 if (n->net_conf.tx && !strcmp(n->net_conf.tx, "timer")) {
1521 n->vqs[0].tx_vq = virtio_add_queue(vdev, 256,
1522 virtio_net_handle_tx_timer);
1523 n->vqs[0].tx_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, virtio_net_tx_timer,
1526 n->vqs[0].tx_vq = virtio_add_queue(vdev, 256,
1527 virtio_net_handle_tx_bh);
1528 n->vqs[0].tx_bh = qemu_bh_new(virtio_net_tx_bh, &n->vqs[0]);
1530 n->ctrl_vq = virtio_add_queue(vdev, 64, virtio_net_handle_ctrl);
1531 qemu_macaddr_default_if_unset(&n->nic_conf.macaddr);
1532 memcpy(&n->mac[0], &n->nic_conf.macaddr, sizeof(n->mac));
1533 n->status = VIRTIO_NET_S_LINK_UP;
1535 if (n->netclient_type) {
1537 * Happen when virtio_net_set_netclient_name has been called.
1539 n->nic = qemu_new_nic(&net_virtio_info, &n->nic_conf,
1540 n->netclient_type, n->netclient_name, n);
1542 n->nic = qemu_new_nic(&net_virtio_info, &n->nic_conf,
1543 object_get_typename(OBJECT(qdev)), qdev->id, n);
1546 peer_test_vnet_hdr(n);
1547 if (peer_has_vnet_hdr(n)) {
1548 for (i = 0; i < n->max_queues; i++) {
1549 tap_using_vnet_hdr(qemu_get_subqueue(n->nic, i)->peer, true);
1551 n->host_hdr_len = sizeof(struct virtio_net_hdr);
1553 n->host_hdr_len = 0;
1556 qemu_format_nic_info_str(qemu_get_queue(n->nic), n->nic_conf.macaddr.a);
1558 n->vqs[0].tx_waiting = 0;
1559 n->tx_burst = n->net_conf.txburst;
1560 virtio_net_set_mrg_rx_bufs(n, 0);
1561 n->promisc = 1; /* for compatibility */
1563 n->mac_table.macs = g_malloc0(MAC_TABLE_ENTRIES * ETH_ALEN);
1565 n->vlans = g_malloc0(MAX_VLAN >> 3);
1567 nc = qemu_get_queue(n->nic);
1568 nc->rxfilter_notify_enabled = 1;
1571 register_savevm(qdev, "virtio-net", -1, VIRTIO_NET_VM_VERSION,
1572 virtio_net_save, virtio_net_load, n);
1574 add_boot_device_path(n->nic_conf.bootindex, qdev, "/ethernet-phy@0");
1578 static int virtio_net_device_exit(DeviceState *qdev)
1580 VirtIONet *n = VIRTIO_NET(qdev);
1581 VirtIODevice *vdev = VIRTIO_DEVICE(qdev);
1584 /* This will stop vhost backend if appropriate. */
1585 virtio_net_set_status(vdev, 0);
1587 unregister_savevm(qdev, "virtio-net", n);
1589 if (n->netclient_name) {
1590 g_free(n->netclient_name);
1591 n->netclient_name = NULL;
1593 if (n->netclient_type) {
1594 g_free(n->netclient_type);
1595 n->netclient_type = NULL;
1598 g_free(n->mac_table.macs);
1601 for (i = 0; i < n->max_queues; i++) {
1602 VirtIONetQueue *q = &n->vqs[i];
1603 NetClientState *nc = qemu_get_subqueue(n->nic, i);
1605 qemu_purge_queued_packets(nc);
1608 timer_del(q->tx_timer);
1609 timer_free(q->tx_timer);
1610 } else if (q->tx_bh) {
1611 qemu_bh_delete(q->tx_bh);
1616 qemu_del_nic(n->nic);
1617 virtio_cleanup(vdev);
1622 static void virtio_net_instance_init(Object *obj)
1624 VirtIONet *n = VIRTIO_NET(obj);
1627 * The default config_size is sizeof(struct virtio_net_config).
1628 * Can be overriden with virtio_net_set_config_size.
1630 n->config_size = sizeof(struct virtio_net_config);
1633 static Property virtio_net_properties[] = {
1634 DEFINE_NIC_PROPERTIES(VirtIONet, nic_conf),
1635 DEFINE_PROP_UINT32("x-txtimer", VirtIONet, net_conf.txtimer,
1637 DEFINE_PROP_INT32("x-txburst", VirtIONet, net_conf.txburst, TX_BURST),
1638 DEFINE_PROP_STRING("tx", VirtIONet, net_conf.tx),
1639 DEFINE_PROP_END_OF_LIST(),
1642 static void virtio_net_class_init(ObjectClass *klass, void *data)
1644 DeviceClass *dc = DEVICE_CLASS(klass);
1645 VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
1646 dc->exit = virtio_net_device_exit;
1647 dc->props = virtio_net_properties;
1648 set_bit(DEVICE_CATEGORY_NETWORK, dc->categories);
1649 vdc->init = virtio_net_device_init;
1650 vdc->get_config = virtio_net_get_config;
1651 vdc->set_config = virtio_net_set_config;
1652 vdc->get_features = virtio_net_get_features;
1653 vdc->set_features = virtio_net_set_features;
1654 vdc->bad_features = virtio_net_bad_features;
1655 vdc->reset = virtio_net_reset;
1656 vdc->set_status = virtio_net_set_status;
1657 vdc->guest_notifier_mask = virtio_net_guest_notifier_mask;
1658 vdc->guest_notifier_pending = virtio_net_guest_notifier_pending;
1661 static const TypeInfo virtio_net_info = {
1662 .name = TYPE_VIRTIO_NET,
1663 .parent = TYPE_VIRTIO_DEVICE,
1664 .instance_size = sizeof(VirtIONet),
1665 .instance_init = virtio_net_instance_init,
1666 .class_init = virtio_net_class_init,
1669 static void virtio_register_types(void)
1671 type_register_static(&virtio_net_info);
1674 type_init(virtio_register_types)