X-Git-Url: https://repo.jachan.dev/qemu.git/blobdiff_plain/283e2c2adcb80148c7f67d71d52134af80d2fbae..bfec359afba088aaacc7d316f43302f28c6e642a:/hw/net/virtio-net.c diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c index 6f98eab2ba..7d091c9259 100644 --- a/hw/net/virtio-net.c +++ b/hw/net/virtio-net.c @@ -55,6 +55,8 @@ static VirtIOFeature feature_sizes[] = { .end = endof(struct virtio_net_config, status)}, {.flags = 1 << VIRTIO_NET_F_MQ, .end = endof(struct virtio_net_config, max_virtqueue_pairs)}, + {.flags = 1 << VIRTIO_NET_F_MTU, + .end = endof(struct virtio_net_config, mtu)}, {} }; @@ -81,6 +83,7 @@ static void virtio_net_get_config(VirtIODevice *vdev, uint8_t *config) virtio_stw_p(vdev, &netcfg.status, n->status); virtio_stw_p(vdev, &netcfg.max_virtqueue_pairs, n->max_queues); + virtio_stw_p(vdev, &netcfg.mtu, n->net_conf.mtu); memcpy(netcfg.mac, n->mac, ETH_ALEN); memcpy(config, &netcfg, n->config_size); } @@ -152,6 +155,16 @@ static void virtio_net_vhost_status(VirtIONet *n, uint8_t status) qemu_net_queue_purge(qnc->incoming_queue, qnc->peer); } + if (virtio_has_feature(vdev->guest_features, VIRTIO_NET_F_MTU)) { + r = vhost_net_set_mtu(get_vhost_net(nc->peer), n->net_conf.mtu); + if (r < 0) { + error_report("%uBytes MTU not supported by the backend", + n->net_conf.mtu); + + return; + } + } + n->vhost_started = 1; r = vhost_net_start(vdev, n->nic->ncs, queues); if (r < 0) { @@ -497,6 +510,10 @@ static int peer_attach(VirtIONet *n, int index) return 0; } + if (n->max_queues == 1) { + return 0; + } + return tap_enable(nc->peer); } @@ -1117,7 +1134,8 @@ static int receive_filter(VirtIONet *n, const uint8_t *buf, int size) return 0; } -static ssize_t virtio_net_receive(NetClientState *nc, const uint8_t *buf, size_t size) +static ssize_t virtio_net_receive_rcu(NetClientState *nc, const uint8_t *buf, + size_t size) { VirtIONet *n = qemu_get_nic_opaque(nc); VirtIONetQueue *q = virtio_net_get_subqueue(nc); @@ -1220,6 +1238,17 @@ static ssize_t virtio_net_receive(NetClientState *nc, const uint8_t *buf, size_t return size; } +static ssize_t virtio_net_receive(NetClientState *nc, const uint8_t *buf, + size_t size) +{ + ssize_t r; + + rcu_read_lock(); + r = virtio_net_receive_rcu(nc, buf, size); + rcu_read_unlock(); + return r; +} + static int32_t virtio_net_flush_tx(VirtIONetQueue *q); static void virtio_net_tx_complete(NetClientState *nc, ssize_t len) @@ -1544,119 +1573,22 @@ static void virtio_net_set_multiqueue(VirtIONet *n, int multiqueue) virtio_net_set_queues(n); } -static void virtio_net_save_device(VirtIODevice *vdev, QEMUFile *f) -{ - VirtIONet *n = VIRTIO_NET(vdev); - int i; - - qemu_put_buffer(f, n->mac, ETH_ALEN); - qemu_put_be32(f, n->vqs[0].tx_waiting); - qemu_put_be32(f, n->mergeable_rx_bufs); - qemu_put_be16(f, n->status); - qemu_put_byte(f, n->promisc); - qemu_put_byte(f, n->allmulti); - qemu_put_be32(f, n->mac_table.in_use); - qemu_put_buffer(f, n->mac_table.macs, n->mac_table.in_use * ETH_ALEN); - qemu_put_buffer(f, (uint8_t *)n->vlans, MAX_VLAN >> 3); - qemu_put_be32(f, n->has_vnet_hdr); - qemu_put_byte(f, n->mac_table.multi_overflow); - qemu_put_byte(f, n->mac_table.uni_overflow); - qemu_put_byte(f, n->alluni); - qemu_put_byte(f, n->nomulti); - qemu_put_byte(f, n->nouni); - qemu_put_byte(f, n->nobcast); - qemu_put_byte(f, n->has_ufo); - if (n->max_queues > 1) { - qemu_put_be16(f, n->max_queues); - qemu_put_be16(f, n->curr_queues); - for (i = 1; i < n->curr_queues; i++) { - qemu_put_be32(f, n->vqs[i].tx_waiting); - } - } - - if (virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)) { - qemu_put_be64(f, n->curr_guest_offloads); - } -} - -static int virtio_net_load_device(VirtIODevice *vdev, QEMUFile *f, - int version_id) +static int virtio_net_post_load_device(void *opaque, int version_id) { - VirtIONet *n = VIRTIO_NET(vdev); + VirtIONet *n = opaque; + VirtIODevice *vdev = VIRTIO_DEVICE(n); int i, link_down; - qemu_get_buffer(f, n->mac, ETH_ALEN); - n->vqs[0].tx_waiting = qemu_get_be32(f); - - virtio_net_set_mrg_rx_bufs(n, qemu_get_be32(f), + virtio_net_set_mrg_rx_bufs(n, n->mergeable_rx_bufs, virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)); - n->status = qemu_get_be16(f); - - n->promisc = qemu_get_byte(f); - n->allmulti = qemu_get_byte(f); - - n->mac_table.in_use = qemu_get_be32(f); /* MAC_TABLE_ENTRIES may be different from the saved image */ - if (n->mac_table.in_use <= MAC_TABLE_ENTRIES) { - qemu_get_buffer(f, n->mac_table.macs, - n->mac_table.in_use * ETH_ALEN); - } else { - int64_t i; - - /* Overflow detected - can happen if source has a larger MAC table. - * We simply set overflow flag so there's no need to maintain the - * table of addresses, discard them all. - * Note: 64 bit math to avoid integer overflow. - */ - for (i = 0; i < (int64_t)n->mac_table.in_use * ETH_ALEN; ++i) { - qemu_get_byte(f); - } - n->mac_table.multi_overflow = n->mac_table.uni_overflow = 1; + if (n->mac_table.in_use > MAC_TABLE_ENTRIES) { n->mac_table.in_use = 0; } - - qemu_get_buffer(f, (uint8_t *)n->vlans, MAX_VLAN >> 3); - if (qemu_get_be32(f) && !peer_has_vnet_hdr(n)) { - error_report("virtio-net: saved image requires vnet_hdr=on"); - return -1; - } - - n->mac_table.multi_overflow = qemu_get_byte(f); - n->mac_table.uni_overflow = qemu_get_byte(f); - - n->alluni = qemu_get_byte(f); - n->nomulti = qemu_get_byte(f); - n->nouni = qemu_get_byte(f); - n->nobcast = qemu_get_byte(f); - - if (qemu_get_byte(f) && !peer_has_ufo(n)) { - error_report("virtio-net: saved image requires TUN_F_UFO support"); - return -1; - } - - if (n->max_queues > 1) { - if (n->max_queues != qemu_get_be16(f)) { - error_report("virtio-net: different max_queues "); - return -1; - } - - n->curr_queues = qemu_get_be16(f); - if (n->curr_queues > n->max_queues) { - error_report("virtio-net: curr_queues %x > max_queues %x", - n->curr_queues, n->max_queues); - return -1; - } - for (i = 1; i < n->curr_queues; i++) { - n->vqs[i].tx_waiting = qemu_get_be32(f); - } - } - - if (virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)) { - n->curr_guest_offloads = qemu_get_be64(f); - } else { + if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)) { n->curr_guest_offloads = virtio_net_supported_guest_offloads(n); } @@ -1690,6 +1622,210 @@ static int virtio_net_load_device(VirtIODevice *vdev, QEMUFile *f, return 0; } +/* tx_waiting field of a VirtIONetQueue */ +static const VMStateDescription vmstate_virtio_net_queue_tx_waiting = { + .name = "virtio-net-queue-tx_waiting", + .fields = (VMStateField[]) { + VMSTATE_UINT32(tx_waiting, VirtIONetQueue), + VMSTATE_END_OF_LIST() + }, +}; + +static bool max_queues_gt_1(void *opaque, int version_id) +{ + return VIRTIO_NET(opaque)->max_queues > 1; +} + +static bool has_ctrl_guest_offloads(void *opaque, int version_id) +{ + return virtio_vdev_has_feature(VIRTIO_DEVICE(opaque), + VIRTIO_NET_F_CTRL_GUEST_OFFLOADS); +} + +static bool mac_table_fits(void *opaque, int version_id) +{ + return VIRTIO_NET(opaque)->mac_table.in_use <= MAC_TABLE_ENTRIES; +} + +static bool mac_table_doesnt_fit(void *opaque, int version_id) +{ + return !mac_table_fits(opaque, version_id); +} + +/* This temporary type is shared by all the WITH_TMP methods + * although only some fields are used by each. + */ +struct VirtIONetMigTmp { + VirtIONet *parent; + VirtIONetQueue *vqs_1; + uint16_t curr_queues_1; + uint8_t has_ufo; + uint32_t has_vnet_hdr; +}; + +/* The 2nd and subsequent tx_waiting flags are loaded later than + * the 1st entry in the queues and only if there's more than one + * entry. We use the tmp mechanism to calculate a temporary + * pointer and count and also validate the count. + */ + +static void virtio_net_tx_waiting_pre_save(void *opaque) +{ + struct VirtIONetMigTmp *tmp = opaque; + + tmp->vqs_1 = tmp->parent->vqs + 1; + tmp->curr_queues_1 = tmp->parent->curr_queues - 1; + if (tmp->parent->curr_queues == 0) { + tmp->curr_queues_1 = 0; + } +} + +static int virtio_net_tx_waiting_pre_load(void *opaque) +{ + struct VirtIONetMigTmp *tmp = opaque; + + /* Reuse the pointer setup from save */ + virtio_net_tx_waiting_pre_save(opaque); + + if (tmp->parent->curr_queues > tmp->parent->max_queues) { + error_report("virtio-net: curr_queues %x > max_queues %x", + tmp->parent->curr_queues, tmp->parent->max_queues); + + return -EINVAL; + } + + return 0; /* all good */ +} + +static const VMStateDescription vmstate_virtio_net_tx_waiting = { + .name = "virtio-net-tx_waiting", + .pre_load = virtio_net_tx_waiting_pre_load, + .pre_save = virtio_net_tx_waiting_pre_save, + .fields = (VMStateField[]) { + VMSTATE_STRUCT_VARRAY_POINTER_UINT16(vqs_1, struct VirtIONetMigTmp, + curr_queues_1, + vmstate_virtio_net_queue_tx_waiting, + struct VirtIONetQueue), + VMSTATE_END_OF_LIST() + }, +}; + +/* the 'has_ufo' flag is just tested; if the incoming stream has the + * flag set we need to check that we have it + */ +static int virtio_net_ufo_post_load(void *opaque, int version_id) +{ + struct VirtIONetMigTmp *tmp = opaque; + + if (tmp->has_ufo && !peer_has_ufo(tmp->parent)) { + error_report("virtio-net: saved image requires TUN_F_UFO support"); + return -EINVAL; + } + + return 0; +} + +static void virtio_net_ufo_pre_save(void *opaque) +{ + struct VirtIONetMigTmp *tmp = opaque; + + tmp->has_ufo = tmp->parent->has_ufo; +} + +static const VMStateDescription vmstate_virtio_net_has_ufo = { + .name = "virtio-net-ufo", + .post_load = virtio_net_ufo_post_load, + .pre_save = virtio_net_ufo_pre_save, + .fields = (VMStateField[]) { + VMSTATE_UINT8(has_ufo, struct VirtIONetMigTmp), + VMSTATE_END_OF_LIST() + }, +}; + +/* the 'has_vnet_hdr' flag is just tested; if the incoming stream has the + * flag set we need to check that we have it + */ +static int virtio_net_vnet_post_load(void *opaque, int version_id) +{ + struct VirtIONetMigTmp *tmp = opaque; + + if (tmp->has_vnet_hdr && !peer_has_vnet_hdr(tmp->parent)) { + error_report("virtio-net: saved image requires vnet_hdr=on"); + return -EINVAL; + } + + return 0; +} + +static void virtio_net_vnet_pre_save(void *opaque) +{ + struct VirtIONetMigTmp *tmp = opaque; + + tmp->has_vnet_hdr = tmp->parent->has_vnet_hdr; +} + +static const VMStateDescription vmstate_virtio_net_has_vnet = { + .name = "virtio-net-vnet", + .post_load = virtio_net_vnet_post_load, + .pre_save = virtio_net_vnet_pre_save, + .fields = (VMStateField[]) { + VMSTATE_UINT32(has_vnet_hdr, struct VirtIONetMigTmp), + VMSTATE_END_OF_LIST() + }, +}; + +static const VMStateDescription vmstate_virtio_net_device = { + .name = "virtio-net-device", + .version_id = VIRTIO_NET_VM_VERSION, + .minimum_version_id = VIRTIO_NET_VM_VERSION, + .post_load = virtio_net_post_load_device, + .fields = (VMStateField[]) { + VMSTATE_UINT8_ARRAY(mac, VirtIONet, ETH_ALEN), + VMSTATE_STRUCT_POINTER(vqs, VirtIONet, + vmstate_virtio_net_queue_tx_waiting, + VirtIONetQueue), + VMSTATE_UINT32(mergeable_rx_bufs, VirtIONet), + VMSTATE_UINT16(status, VirtIONet), + VMSTATE_UINT8(promisc, VirtIONet), + VMSTATE_UINT8(allmulti, VirtIONet), + VMSTATE_UINT32(mac_table.in_use, VirtIONet), + + /* Guarded pair: If it fits we load it, else we throw it away + * - can happen if source has a larger MAC table.; post-load + * sets flags in this case. + */ + VMSTATE_VBUFFER_MULTIPLY(mac_table.macs, VirtIONet, + 0, mac_table_fits, mac_table.in_use, + ETH_ALEN), + VMSTATE_UNUSED_VARRAY_UINT32(VirtIONet, mac_table_doesnt_fit, 0, + mac_table.in_use, ETH_ALEN), + + /* Note: This is an array of uint32's that's always been saved as a + * buffer; hold onto your endiannesses; it's actually used as a bitmap + * but based on the uint. + */ + VMSTATE_BUFFER_POINTER_UNSAFE(vlans, VirtIONet, 0, MAX_VLAN >> 3), + VMSTATE_WITH_TMP(VirtIONet, struct VirtIONetMigTmp, + vmstate_virtio_net_has_vnet), + VMSTATE_UINT8(mac_table.multi_overflow, VirtIONet), + VMSTATE_UINT8(mac_table.uni_overflow, VirtIONet), + VMSTATE_UINT8(alluni, VirtIONet), + VMSTATE_UINT8(nomulti, VirtIONet), + VMSTATE_UINT8(nouni, VirtIONet), + VMSTATE_UINT8(nobcast, VirtIONet), + VMSTATE_WITH_TMP(VirtIONet, struct VirtIONetMigTmp, + vmstate_virtio_net_has_ufo), + VMSTATE_SINGLE_TEST(max_queues, VirtIONet, max_queues_gt_1, 0, + vmstate_info_uint16_equal, uint16_t), + VMSTATE_UINT16_TEST(curr_queues, VirtIONet, max_queues_gt_1), + VMSTATE_WITH_TMP(VirtIONet, struct VirtIONetMigTmp, + vmstate_virtio_net_tx_waiting), + VMSTATE_UINT64_TEST(curr_guest_offloads, VirtIONet, + has_ctrl_guest_offloads), + VMSTATE_END_OF_LIST() + }, +}; + static NetClientInfo net_virtio_info = { .type = NET_CLIENT_DRIVER_NIC, .size = sizeof(NICState), @@ -1721,6 +1857,7 @@ static void virtio_net_set_config_size(VirtIONet *n, uint64_t host_features) { int i, config_size = 0; virtio_add_feature(&host_features, VIRTIO_NET_F_MAC); + for (i = 0; feature_sizes[i].flags != 0; i++) { if (host_features & feature_sizes[i].flags) { config_size = MAX(feature_sizes[i].end, config_size); @@ -1750,6 +1887,10 @@ static void virtio_net_device_realize(DeviceState *dev, Error **errp) NetClientState *nc; int i; + if (n->net_conf.mtu) { + n->host_features |= (0x1 << VIRTIO_NET_F_MTU); + } + virtio_net_set_config_size(n, n->host_features); virtio_init(vdev, "virtio-net", VIRTIO_ID_NET, n->config_size); @@ -1948,6 +2089,7 @@ static Property virtio_net_properties[] = { DEFINE_PROP_STRING("tx", VirtIONet, net_conf.tx), DEFINE_PROP_UINT16("rx_queue_size", VirtIONet, net_conf.rx_queue_size, VIRTIO_NET_RX_QUEUE_DEFAULT_SIZE), + DEFINE_PROP_UINT16("host_mtu", VirtIONet, net_conf.mtu, 0), DEFINE_PROP_END_OF_LIST(), }; @@ -1970,9 +2112,8 @@ static void virtio_net_class_init(ObjectClass *klass, void *data) vdc->set_status = virtio_net_set_status; vdc->guest_notifier_mask = virtio_net_guest_notifier_mask; vdc->guest_notifier_pending = virtio_net_guest_notifier_pending; - vdc->load = virtio_net_load_device; - vdc->save = virtio_net_save_device; vdc->legacy_features |= (0x1 << VIRTIO_NET_F_GSO); + vdc->vmsd = &vmstate_virtio_net_device; } static const TypeInfo virtio_net_info = {