#include "net/vhost_net.h"
#include "hw/virtio/virtio-bus.h"
#include "qapi/qmp/qjson.h"
-#include "monitor/monitor.h"
+#include "qapi-event.h"
+#include "hw/virtio/virtio-access.h"
#define VIRTIO_NET_VM_VERSION 11
VirtIONet *n = VIRTIO_NET(vdev);
struct virtio_net_config netcfg;
- stw_p(&netcfg.status, n->status);
- stw_p(&netcfg.max_virtqueue_pairs, n->max_queues);
+ virtio_stw_p(vdev, &netcfg.status, n->status);
+ virtio_stw_p(vdev, &netcfg.max_virtqueue_pairs, n->max_queues);
memcpy(netcfg.mac, n->mac, ETH_ALEN);
memcpy(config, &netcfg, n->config_size);
}
(n->status & VIRTIO_NET_S_LINK_UP) && vdev->vm_running;
}
+static void virtio_net_announce_timer(void *opaque)
+{
+ VirtIONet *n = opaque;
+ VirtIODevice *vdev = VIRTIO_DEVICE(n);
+
+ n->announce_counter--;
+ n->status |= VIRTIO_NET_S_ANNOUNCE;
+ virtio_notify_config(vdev);
+}
+
static void virtio_net_vhost_status(VirtIONet *n, uint8_t status)
{
VirtIODevice *vdev = VIRTIO_DEVICE(n);
NetClientState *nc = qemu_get_queue(n->nic);
int queues = n->multiqueue ? n->max_queues : 1;
- if (!nc->peer) {
- return;
- }
- if (nc->peer->info->type != NET_CLIENT_OPTIONS_KIND_TAP) {
- return;
- }
-
- if (!tap_get_vhost_net(nc->peer)) {
+ if (!get_vhost_net(nc->peer)) {
return;
}
return;
}
if (!n->vhost_started) {
- int r;
- if (!vhost_net_query(tap_get_vhost_net(nc->peer), vdev)) {
+ int r, i;
+
+ if (!vhost_net_query(get_vhost_net(nc->peer), vdev)) {
return;
}
+
+ /* Any packets outstanding? Purge them to avoid touching rings
+ * when vhost is running.
+ */
+ for (i = 0; i < queues; i++) {
+ NetClientState *qnc = qemu_get_subqueue(n->nic, i);
+
+ /* Purge both directions: TX and RX. */
+ qemu_net_queue_purge(qnc->peer->incoming_queue, qnc);
+ qemu_net_queue_purge(qnc->incoming_queue, qnc->peer);
+ }
+
n->vhost_started = 1;
r = vhost_net_start(vdev, n->nic->ncs, queues);
if (r < 0) {
static void rxfilter_notify(NetClientState *nc)
{
- QObject *event_data;
VirtIONet *n = qemu_get_nic_opaque(nc);
if (nc->rxfilter_notify_enabled) {
gchar *path = object_get_canonical_path(OBJECT(n->qdev));
- if (n->netclient_name) {
- event_data = qobject_from_jsonf("{ 'name': %s, 'path': %s }",
- n->netclient_name, path);
- } else {
- event_data = qobject_from_jsonf("{ 'path': %s }", path);
- }
- monitor_protocol_event(QEVENT_NIC_RX_FILTER_CHANGED, event_data);
- qobject_decref(event_data);
+ qapi_event_send_nic_rx_filter_changed(!!n->netclient_name,
+ n->netclient_name, path, &error_abort);
g_free(path);
/* disable event notification to avoid events flooding */
mac[1], mac[2], mac[3], mac[4], mac[5]);
}
+static intList *get_vlan_table(VirtIONet *n)
+{
+ intList *list, *entry;
+ int i, j;
+
+ list = NULL;
+ for (i = 0; i < MAX_VLAN >> 5; i++) {
+ for (j = 0; n->vlans[i] && j <= 0x1f; j++) {
+ if (n->vlans[i] & (1U << j)) {
+ entry = g_malloc0(sizeof(*entry));
+ entry->value = (i << 5) + j;
+ entry->next = list;
+ list = entry;
+ }
+ }
+ }
+
+ return list;
+}
+
static RxFilterInfo *virtio_net_query_rxfilter(NetClientState *nc)
{
VirtIONet *n = qemu_get_nic_opaque(nc);
+ VirtIODevice *vdev = VIRTIO_DEVICE(n);
RxFilterInfo *info;
strList *str_list, *entry;
- intList *int_list, *int_entry;
- int i, j;
+ int i;
info = g_malloc0(sizeof(*info));
info->name = g_strdup(nc->name);
str_list = entry;
}
info->multicast_table = str_list;
+ info->vlan_table = get_vlan_table(n);
- int_list = NULL;
- for (i = 0; i < MAX_VLAN >> 5; i++) {
- for (j = 0; n->vlans[i] && j < 0x1f; j++) {
- if (n->vlans[i] & (1U << j)) {
- int_entry = g_malloc0(sizeof(*int_entry));
- int_entry->value = (i << 5) + j;
- int_entry->next = int_list;
- int_list = int_entry;
- }
- }
+ if (!((1 << VIRTIO_NET_F_CTRL_VLAN) & vdev->guest_features)) {
+ info->vlan = RX_STATE_ALL;
+ } else if (!info->vlan_table) {
+ info->vlan = RX_STATE_NONE;
+ } else {
+ info->vlan = RX_STATE_NORMAL;
}
- info->vlan_table = int_list;
/* enable event notification after query */
nc->rxfilter_notify_enabled = 1;
n->nobcast = 0;
/* multiqueue is disabled by default */
n->curr_queues = 1;
+ timer_del(n->announce_timer);
+ n->announce_counter = 0;
+ n->status &= ~VIRTIO_NET_S_ANNOUNCE;
/* Flush any MAC and VLAN filter table state */
n->mac_table.in_use = 0;
static void virtio_net_set_queues(VirtIONet *n)
{
int i;
+ int r;
for (i = 0; i < n->max_queues; i++) {
if (i < n->curr_queues) {
- assert(!peer_attach(n, i));
+ r = peer_attach(n, i);
+ assert(!r);
} else {
- assert(!peer_detach(n, i));
+ r = peer_detach(n, i);
+ assert(!r);
}
}
}
features &= ~(0x1 << VIRTIO_NET_F_HOST_UFO);
}
- if (!nc->peer || nc->peer->info->type != NET_CLIENT_OPTIONS_KIND_TAP) {
- return features;
- }
- if (!tap_get_vhost_net(nc->peer)) {
+ if (!get_vhost_net(nc->peer)) {
return features;
}
- return vhost_net_get_features(tap_get_vhost_net(nc->peer), features);
+ return vhost_net_get_features(get_vhost_net(nc->peer), features);
}
static uint32_t virtio_net_bad_features(VirtIODevice *vdev)
for (i = 0; i < n->max_queues; i++) {
NetClientState *nc = qemu_get_subqueue(n->nic, i);
- if (!nc->peer || nc->peer->info->type != NET_CLIENT_OPTIONS_KIND_TAP) {
+ if (!get_vhost_net(nc->peer)) {
continue;
}
- if (!tap_get_vhost_net(nc->peer)) {
- continue;
- }
- vhost_net_ack_features(tap_get_vhost_net(nc->peer), features);
+ vhost_net_ack_features(get_vhost_net(nc->peer), features);
+ }
+
+ if ((1 << VIRTIO_NET_F_CTRL_VLAN) & features) {
+ memset(n->vlans, 0, MAX_VLAN >> 3);
+ } else {
+ memset(n->vlans, 0xff, MAX_VLAN >> 3);
}
}
static int virtio_net_handle_mac(VirtIONet *n, uint8_t cmd,
struct iovec *iov, unsigned int iov_cnt)
{
+ VirtIODevice *vdev = VIRTIO_DEVICE(n);
struct virtio_net_ctrl_mac mac_data;
size_t s;
NetClientState *nc = qemu_get_queue(n->nic);
s = iov_to_buf(iov, iov_cnt, 0, &mac_data.entries,
sizeof(mac_data.entries));
- mac_data.entries = ldl_p(&mac_data.entries);
+ mac_data.entries = virtio_ldl_p(vdev, &mac_data.entries);
if (s != sizeof(mac_data.entries)) {
goto error;
}
s = iov_to_buf(iov, iov_cnt, 0, &mac_data.entries,
sizeof(mac_data.entries));
- mac_data.entries = ldl_p(&mac_data.entries);
+ mac_data.entries = virtio_ldl_p(vdev, &mac_data.entries);
if (s != sizeof(mac_data.entries)) {
goto error;
}
goto error;
}
- if (in_use + mac_data.entries <= MAC_TABLE_ENTRIES) {
+ if (mac_data.entries <= MAC_TABLE_ENTRIES - in_use) {
s = iov_to_buf(iov, iov_cnt, 0, &macs[in_use * ETH_ALEN],
mac_data.entries * ETH_ALEN);
if (s != mac_data.entries * ETH_ALEN) {
static int virtio_net_handle_vlan_table(VirtIONet *n, uint8_t cmd,
struct iovec *iov, unsigned int iov_cnt)
{
+ VirtIODevice *vdev = VIRTIO_DEVICE(n);
uint16_t vid;
size_t s;
NetClientState *nc = qemu_get_queue(n->nic);
s = iov_to_buf(iov, iov_cnt, 0, &vid, sizeof(vid));
- vid = lduw_p(&vid);
+ vid = virtio_lduw_p(vdev, &vid);
if (s != sizeof(vid)) {
return VIRTIO_NET_ERR;
}
return VIRTIO_NET_OK;
}
+static int virtio_net_handle_announce(VirtIONet *n, uint8_t cmd,
+ struct iovec *iov, unsigned int iov_cnt)
+{
+ if (cmd == VIRTIO_NET_CTRL_ANNOUNCE_ACK &&
+ n->status & VIRTIO_NET_S_ANNOUNCE) {
+ n->status &= ~VIRTIO_NET_S_ANNOUNCE;
+ if (n->announce_counter) {
+ timer_mod(n->announce_timer,
+ qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) +
+ self_announce_delay(n->announce_counter));
+ }
+ return VIRTIO_NET_OK;
+ } else {
+ return VIRTIO_NET_ERR;
+ }
+}
+
static int virtio_net_handle_mq(VirtIONet *n, uint8_t cmd,
struct iovec *iov, unsigned int iov_cnt)
{
return VIRTIO_NET_ERR;
}
- queues = lduw_p(&mq.virtqueue_pairs);
+ queues = virtio_lduw_p(vdev, &mq.virtqueue_pairs);
if (queues < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN ||
queues > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX ||
virtio_net_ctrl_ack status = VIRTIO_NET_ERR;
VirtQueueElement elem;
size_t s;
- struct iovec *iov;
+ struct iovec *iov, *iov2;
unsigned int iov_cnt;
while (virtqueue_pop(vq, &elem)) {
exit(1);
}
- iov = elem.out_sg;
iov_cnt = elem.out_num;
+ iov2 = iov = g_memdup(elem.out_sg, sizeof(struct iovec) * elem.out_num);
s = iov_to_buf(iov, iov_cnt, 0, &ctrl, sizeof(ctrl));
iov_discard_front(&iov, &iov_cnt, sizeof(ctrl));
if (s != sizeof(ctrl)) {
status = virtio_net_handle_mac(n, ctrl.cmd, iov, iov_cnt);
} else if (ctrl.class == VIRTIO_NET_CTRL_VLAN) {
status = virtio_net_handle_vlan_table(n, ctrl.cmd, iov, iov_cnt);
+ } else if (ctrl.class == VIRTIO_NET_CTRL_ANNOUNCE) {
+ status = virtio_net_handle_announce(n, ctrl.cmd, iov, iov_cnt);
} else if (ctrl.class == VIRTIO_NET_CTRL_MQ) {
status = virtio_net_handle_mq(n, ctrl.cmd, iov, iov_cnt);
} else if (ctrl.class == VIRTIO_NET_CTRL_GUEST_OFFLOADS) {
virtqueue_push(vq, &elem, sizeof(status));
virtio_notify(vdev, vq);
+ g_free(iov2);
}
}
return 1;
}
+static void virtio_net_hdr_swap(VirtIODevice *vdev, struct virtio_net_hdr *hdr)
+{
+ virtio_tswap16s(vdev, &hdr->hdr_len);
+ virtio_tswap16s(vdev, &hdr->gso_size);
+ virtio_tswap16s(vdev, &hdr->csum_start);
+ virtio_tswap16s(vdev, &hdr->csum_offset);
+}
+
/* dhclient uses AF_PACKET but doesn't pass auxdata to the kernel so
* it never finds out that the packets don't have valid checksums. This
* causes dhclient to get upset. Fedora's carried a patch for ages to
void *wbuf = (void *)buf;
work_around_broken_dhclient(wbuf, wbuf + n->host_hdr_len,
size - n->host_hdr_len);
+ virtio_net_hdr_swap(VIRTIO_DEVICE(n), wbuf);
iov_from_buf(iov, iov_cnt, 0, buf, sizeof(struct virtio_net_hdr));
} else {
struct virtio_net_hdr hdr = {
}
if (mhdr_cnt) {
- stw_p(&mhdr.num_buffers, i);
+ virtio_stw_p(vdev, &mhdr.num_buffers, i);
iov_from_buf(mhdr_sg, mhdr_cnt,
0,
&mhdr.num_buffers, sizeof mhdr.num_buffers);
return num_packets;
}
- assert(vdev->vm_running);
-
if (q->async_tx.elem.out_num) {
virtio_queue_set_notification(q->tx_vq, 0);
return num_packets;
exit(1);
}
+ if (n->has_vnet_hdr) {
+ if (out_sg[0].iov_len < n->guest_hdr_len) {
+ error_report("virtio-net header incorrect");
+ exit(1);
+ }
+ virtio_net_hdr_swap(vdev, (void *) out_sg[0].iov_base);
+ }
+
/*
* If host wants to see the guest header as is, we can
* pass it on unchanged. Otherwise, copy just the parts
VirtIONetQueue *q = opaque;
VirtIONet *n = q->n;
VirtIODevice *vdev = VIRTIO_DEVICE(n);
- assert(vdev->vm_running);
+ /* This happens when device was stopped but BH wasn't. */
+ if (!vdev->vm_running) {
+ /* Make sure tx waiting is set, so we'll run when restarted. */
+ assert(q->tx_waiting);
+ return;
+ }
q->tx_waiting = 0;
VirtIODevice *vdev = VIRTIO_DEVICE(n);
int32_t ret;
- assert(vdev->vm_running);
+ /* This happens when device was stopped but BH wasn't. */
+ if (!vdev->vm_running) {
+ /* Make sure tx waiting is set, so we'll run when restarted. */
+ assert(q->tx_waiting);
+ return;
+ }
q->tx_waiting = 0;
static void virtio_net_save(QEMUFile *f, void *opaque)
{
- int i;
VirtIONet *n = opaque;
VirtIODevice *vdev = VIRTIO_DEVICE(n);
* it might keep writing to memory. */
assert(!n->vhost_started);
virtio_save(vdev, f);
+}
+
+static void virtio_net_save_device(VirtIODevice *vdev, QEMUFile *f)
+{
+ VirtIONet *n = VIRTIO_NET(vdev);
+ int i;
qemu_put_buffer(f, n->mac, ETH_ALEN);
qemu_put_be32(f, n->vqs[0].tx_waiting);
{
VirtIONet *n = opaque;
VirtIODevice *vdev = VIRTIO_DEVICE(n);
- int ret, i, link_down;
if (version_id < 2 || version_id > VIRTIO_NET_VM_VERSION)
return -EINVAL;
- ret = virtio_load(vdev, f);
- if (ret) {
- return ret;
- }
+ return virtio_load(vdev, f, version_id);
+}
+
+static int virtio_net_load_device(VirtIODevice *vdev, QEMUFile *f,
+ int version_id)
+{
+ VirtIONet *n = VIRTIO_NET(vdev);
+ int i, link_down;
qemu_get_buffer(f, n->mac, ETH_ALEN);
n->vqs[0].tx_waiting = qemu_get_be32(f);
if (n->mac_table.in_use <= MAC_TABLE_ENTRIES) {
qemu_get_buffer(f, n->mac_table.macs,
n->mac_table.in_use * ETH_ALEN);
- } else if (n->mac_table.in_use) {
- uint8_t *buf = g_malloc0(n->mac_table.in_use);
- qemu_get_buffer(f, buf, n->mac_table.in_use * ETH_ALEN);
- g_free(buf);
+ } else {
+ int64_t i;
+
+ /* Overflow detected - can happen if source has a larger MAC table.
+ * We simply set overflow flag so there's no need to maintain the
+ * table of addresses, discard them all.
+ * Note: 64 bit math to avoid integer overflow.
+ */
+ for (i = 0; i < (int64_t)n->mac_table.in_use * ETH_ALEN; ++i) {
+ qemu_get_byte(f);
+ }
n->mac_table.multi_overflow = n->mac_table.uni_overflow = 1;
n->mac_table.in_use = 0;
}
}
n->curr_queues = qemu_get_be16(f);
+ if (n->curr_queues > n->max_queues) {
+ error_report("virtio-net: curr_queues %x > max_queues %x",
+ n->curr_queues, n->max_queues);
+ return -1;
+ }
for (i = 1; i < n->curr_queues; i++) {
n->vqs[i].tx_waiting = qemu_get_be32(f);
}
qemu_get_subqueue(n->nic, i)->link_down = link_down;
}
+ if (vdev->guest_features & (0x1 << VIRTIO_NET_F_GUEST_ANNOUNCE) &&
+ vdev->guest_features & (0x1 << VIRTIO_NET_F_CTRL_VQ)) {
+ n->announce_counter = SELF_ANNOUNCE_ROUNDS;
+ timer_mod(n->announce_timer, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL));
+ }
+
return 0;
}
VirtIONet *n = VIRTIO_NET(vdev);
NetClientState *nc = qemu_get_subqueue(n->nic, vq2q(idx));
assert(n->vhost_started);
- return vhost_net_virtqueue_pending(tap_get_vhost_net(nc->peer), idx);
+ return vhost_net_virtqueue_pending(get_vhost_net(nc->peer), idx);
}
static void virtio_net_guest_notifier_mask(VirtIODevice *vdev, int idx,
VirtIONet *n = VIRTIO_NET(vdev);
NetClientState *nc = qemu_get_subqueue(n->nic, vq2q(idx));
assert(n->vhost_started);
- vhost_net_virtqueue_mask(tap_get_vhost_net(nc->peer),
+ vhost_net_virtqueue_mask(get_vhost_net(nc->peer),
vdev, idx, mask);
}
*/
assert(type != NULL);
- if (n->netclient_name) {
- g_free(n->netclient_name);
- n->netclient_name = NULL;
- }
- if (n->netclient_type) {
- g_free(n->netclient_type);
- n->netclient_type = NULL;
- }
-
- if (name != NULL) {
- n->netclient_name = g_strdup(name);
- }
+ g_free(n->netclient_name);
+ g_free(n->netclient_type);
+ n->netclient_name = g_strdup(name);
n->netclient_type = g_strdup(type);
}
virtio_init(vdev, "virtio-net", VIRTIO_ID_NET, n->config_size);
- n->max_queues = MAX(n->nic_conf.queues, 1);
+ n->max_queues = MAX(n->nic_conf.peers.queues, 1);
n->vqs = g_malloc0(sizeof(VirtIONetQueue) * n->max_queues);
n->vqs[0].rx_vq = virtio_add_queue(vdev, 256, virtio_net_handle_rx);
n->curr_queues = 1;
qemu_macaddr_default_if_unset(&n->nic_conf.macaddr);
memcpy(&n->mac[0], &n->nic_conf.macaddr, sizeof(n->mac));
n->status = VIRTIO_NET_S_LINK_UP;
+ n->announce_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL,
+ virtio_net_announce_timer, n);
if (n->netclient_type) {
/*
n->qdev = dev;
register_savevm(dev, "virtio-net", -1, VIRTIO_NET_VM_VERSION,
virtio_net_save, virtio_net_load, n);
-
- add_boot_device_path(n->nic_conf.bootindex, dev, "/ethernet-phy@0");
}
static void virtio_net_device_unrealize(DeviceState *dev, Error **errp)
unregister_savevm(dev, "virtio-net", n);
- if (n->netclient_name) {
- g_free(n->netclient_name);
- n->netclient_name = NULL;
- }
- if (n->netclient_type) {
- g_free(n->netclient_type);
- n->netclient_type = NULL;
- }
+ g_free(n->netclient_name);
+ n->netclient_name = NULL;
+ g_free(n->netclient_type);
+ n->netclient_type = NULL;
g_free(n->mac_table.macs);
g_free(n->vlans);
}
}
+ timer_del(n->announce_timer);
+ timer_free(n->announce_timer);
g_free(n->vqs);
qemu_del_nic(n->nic);
virtio_cleanup(vdev);
* Can be overriden with virtio_net_set_config_size.
*/
n->config_size = sizeof(struct virtio_net_config);
+ device_add_bootindex_property(obj, &n->nic_conf.bootindex,
+ "bootindex", "/ethernet-phy@0",
+ DEVICE(n), NULL);
}
static Property virtio_net_properties[] = {
vdc->set_status = virtio_net_set_status;
vdc->guest_notifier_mask = virtio_net_guest_notifier_mask;
vdc->guest_notifier_pending = virtio_net_guest_notifier_pending;
+ vdc->load = virtio_net_load_device;
+ vdc->save = virtio_net_save_device;
}
static const TypeInfo virtio_net_info = {