#include "net/vhost_net.h"
#include "hw/virtio/virtio-bus.h"
#include "qapi/qmp/qjson.h"
-#include "monitor/monitor.h"
+#include "qapi-event.h"
+#include "hw/virtio/virtio-access.h"
#define VIRTIO_NET_VM_VERSION 11
VirtIONet *n = VIRTIO_NET(vdev);
struct virtio_net_config netcfg;
- stw_p(&netcfg.status, n->status);
- stw_p(&netcfg.max_virtqueue_pairs, n->max_queues);
+ virtio_stw_p(vdev, &netcfg.status, n->status);
+ virtio_stw_p(vdev, &netcfg.max_virtqueue_pairs, n->max_queues);
memcpy(netcfg.mac, n->mac, ETH_ALEN);
memcpy(config, &netcfg, n->config_size);
}
memcpy(&netcfg, config, n->config_size);
- if (!(vdev->guest_features >> VIRTIO_NET_F_CTRL_MAC_ADDR & 1) &&
+ if (!virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR) &&
memcmp(netcfg.mac, n->mac, ETH_ALEN)) {
memcpy(n->mac, netcfg.mac, ETH_ALEN);
qemu_format_nic_info_str(qemu_get_queue(n->nic), n->mac);
return;
}
- if (!!n->vhost_started ==
- (virtio_net_started(n, status) && !nc->peer->link_down)) {
+ if ((virtio_net_started(n, status) && !nc->peer->link_down) ==
+ !!n->vhost_started) {
return;
}
if (!n->vhost_started) {
- int r;
+ int r, i;
+
if (!vhost_net_query(get_vhost_net(nc->peer), vdev)) {
return;
}
+
+ /* Any packets outstanding? Purge them to avoid touching rings
+ * when vhost is running.
+ */
+ for (i = 0; i < queues; i++) {
+ NetClientState *qnc = qemu_get_subqueue(n->nic, i);
+
+ /* Purge both directions: TX and RX. */
+ qemu_net_queue_purge(qnc->peer->incoming_queue, qnc);
+ qemu_net_queue_purge(qnc->incoming_queue, qnc->peer);
+ }
+
n->vhost_started = 1;
r = vhost_net_start(vdev, n->nic->ncs, queues);
if (r < 0) {
static void rxfilter_notify(NetClientState *nc)
{
- QObject *event_data;
VirtIONet *n = qemu_get_nic_opaque(nc);
if (nc->rxfilter_notify_enabled) {
gchar *path = object_get_canonical_path(OBJECT(n->qdev));
- if (n->netclient_name) {
- event_data = qobject_from_jsonf("{ 'name': %s, 'path': %s }",
- n->netclient_name, path);
- } else {
- event_data = qobject_from_jsonf("{ 'path': %s }", path);
- }
- monitor_protocol_event(QEVENT_NIC_RX_FILTER_CHANGED, event_data);
- qobject_decref(event_data);
+ qapi_event_send_nic_rx_filter_changed(!!n->netclient_name,
+ n->netclient_name, path, &error_abort);
g_free(path);
/* disable event notification to avoid events flooding */
info->multicast_table = str_list;
info->vlan_table = get_vlan_table(n);
- if (!((1 << VIRTIO_NET_F_CTRL_VLAN) & vdev->guest_features)) {
+ if (!virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VLAN)) {
info->vlan = RX_STATE_ALL;
} else if (!info->vlan_table) {
info->vlan = RX_STATE_NONE;
VirtIONet *n = VIRTIO_NET(vdev);
NetClientState *nc = qemu_get_queue(n->nic);
- features |= (1 << VIRTIO_NET_F_MAC);
+ virtio_add_feature(&features, VIRTIO_NET_F_MAC);
if (!peer_has_vnet_hdr(n)) {
- features &= ~(0x1 << VIRTIO_NET_F_CSUM);
- features &= ~(0x1 << VIRTIO_NET_F_HOST_TSO4);
- features &= ~(0x1 << VIRTIO_NET_F_HOST_TSO6);
- features &= ~(0x1 << VIRTIO_NET_F_HOST_ECN);
+ virtio_clear_feature(&features, VIRTIO_NET_F_CSUM);
+ virtio_clear_feature(&features, VIRTIO_NET_F_HOST_TSO4);
+ virtio_clear_feature(&features, VIRTIO_NET_F_HOST_TSO6);
+ virtio_clear_feature(&features, VIRTIO_NET_F_HOST_ECN);
- features &= ~(0x1 << VIRTIO_NET_F_GUEST_CSUM);
- features &= ~(0x1 << VIRTIO_NET_F_GUEST_TSO4);
- features &= ~(0x1 << VIRTIO_NET_F_GUEST_TSO6);
- features &= ~(0x1 << VIRTIO_NET_F_GUEST_ECN);
+ virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_CSUM);
+ virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_TSO4);
+ virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_TSO6);
+ virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_ECN);
}
if (!peer_has_vnet_hdr(n) || !peer_has_ufo(n)) {
- features &= ~(0x1 << VIRTIO_NET_F_GUEST_UFO);
- features &= ~(0x1 << VIRTIO_NET_F_HOST_UFO);
+ virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_UFO);
+ virtio_clear_feature(&features, VIRTIO_NET_F_HOST_UFO);
}
if (!get_vhost_net(nc->peer)) {
/* Linux kernel 2.6.25. It understood MAC (as everyone must),
* but also these: */
- features |= (1 << VIRTIO_NET_F_MAC);
- features |= (1 << VIRTIO_NET_F_CSUM);
- features |= (1 << VIRTIO_NET_F_HOST_TSO4);
- features |= (1 << VIRTIO_NET_F_HOST_TSO6);
- features |= (1 << VIRTIO_NET_F_HOST_ECN);
+ virtio_add_feature(&features, VIRTIO_NET_F_MAC);
+ virtio_add_feature(&features, VIRTIO_NET_F_CSUM);
+ virtio_add_feature(&features, VIRTIO_NET_F_HOST_TSO4);
+ virtio_add_feature(&features, VIRTIO_NET_F_HOST_TSO6);
+ virtio_add_feature(&features, VIRTIO_NET_F_HOST_ECN);
return features;
}
VirtIONet *n = VIRTIO_NET(vdev);
int i;
- virtio_net_set_multiqueue(n, !!(features & (1 << VIRTIO_NET_F_MQ)));
+ virtio_net_set_multiqueue(n,
+ __virtio_has_feature(features, VIRTIO_NET_F_MQ));
- virtio_net_set_mrg_rx_bufs(n, !!(features & (1 << VIRTIO_NET_F_MRG_RXBUF)));
+ virtio_net_set_mrg_rx_bufs(n,
+ __virtio_has_feature(features,
+ VIRTIO_NET_F_MRG_RXBUF));
if (n->has_vnet_hdr) {
n->curr_guest_offloads =
vhost_net_ack_features(get_vhost_net(nc->peer), features);
}
- if ((1 << VIRTIO_NET_F_CTRL_VLAN) & features) {
+ if (__virtio_has_feature(features, VIRTIO_NET_F_CTRL_VLAN)) {
memset(n->vlans, 0, MAX_VLAN >> 3);
} else {
memset(n->vlans, 0xff, MAX_VLAN >> 3);
uint64_t offloads;
size_t s;
- if (!((1 << VIRTIO_NET_F_CTRL_GUEST_OFFLOADS) & vdev->guest_features)) {
+ if (!virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)) {
return VIRTIO_NET_ERR;
}
static int virtio_net_handle_mac(VirtIONet *n, uint8_t cmd,
struct iovec *iov, unsigned int iov_cnt)
{
+ VirtIODevice *vdev = VIRTIO_DEVICE(n);
struct virtio_net_ctrl_mac mac_data;
size_t s;
NetClientState *nc = qemu_get_queue(n->nic);
s = iov_to_buf(iov, iov_cnt, 0, &mac_data.entries,
sizeof(mac_data.entries));
- mac_data.entries = ldl_p(&mac_data.entries);
+ mac_data.entries = virtio_ldl_p(vdev, &mac_data.entries);
if (s != sizeof(mac_data.entries)) {
goto error;
}
s = iov_to_buf(iov, iov_cnt, 0, &mac_data.entries,
sizeof(mac_data.entries));
- mac_data.entries = ldl_p(&mac_data.entries);
+ mac_data.entries = virtio_ldl_p(vdev, &mac_data.entries);
if (s != sizeof(mac_data.entries)) {
goto error;
}
static int virtio_net_handle_vlan_table(VirtIONet *n, uint8_t cmd,
struct iovec *iov, unsigned int iov_cnt)
{
+ VirtIODevice *vdev = VIRTIO_DEVICE(n);
uint16_t vid;
size_t s;
NetClientState *nc = qemu_get_queue(n->nic);
s = iov_to_buf(iov, iov_cnt, 0, &vid, sizeof(vid));
- vid = lduw_p(&vid);
+ vid = virtio_lduw_p(vdev, &vid);
if (s != sizeof(vid)) {
return VIRTIO_NET_ERR;
}
return VIRTIO_NET_ERR;
}
- queues = lduw_p(&mq.virtqueue_pairs);
+ queues = virtio_lduw_p(vdev, &mq.virtqueue_pairs);
if (queues < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN ||
queues > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX ||
virtio_net_ctrl_ack status = VIRTIO_NET_ERR;
VirtQueueElement elem;
size_t s;
- struct iovec *iov;
+ struct iovec *iov, *iov2;
unsigned int iov_cnt;
while (virtqueue_pop(vq, &elem)) {
exit(1);
}
- iov = elem.out_sg;
iov_cnt = elem.out_num;
+ iov2 = iov = g_memdup(elem.out_sg, sizeof(struct iovec) * elem.out_num);
s = iov_to_buf(iov, iov_cnt, 0, &ctrl, sizeof(ctrl));
iov_discard_front(&iov, &iov_cnt, sizeof(ctrl));
if (s != sizeof(ctrl)) {
virtqueue_push(vq, &elem, sizeof(status));
virtio_notify(vdev, vq);
+ g_free(iov2);
}
}
return 1;
}
+static void virtio_net_hdr_swap(VirtIODevice *vdev, struct virtio_net_hdr *hdr)
+{
+ virtio_tswap16s(vdev, &hdr->hdr_len);
+ virtio_tswap16s(vdev, &hdr->gso_size);
+ virtio_tswap16s(vdev, &hdr->csum_start);
+ virtio_tswap16s(vdev, &hdr->csum_offset);
+}
+
/* dhclient uses AF_PACKET but doesn't pass auxdata to the kernel so
* it never finds out that the packets don't have valid checksums. This
* causes dhclient to get upset. Fedora's carried a patch for ages to
void *wbuf = (void *)buf;
work_around_broken_dhclient(wbuf, wbuf + n->host_hdr_len,
size - n->host_hdr_len);
+ virtio_net_hdr_swap(VIRTIO_DEVICE(n), wbuf);
iov_from_buf(iov, iov_cnt, 0, buf, sizeof(struct virtio_net_hdr));
} else {
struct virtio_net_hdr hdr = {
}
if (mhdr_cnt) {
- stw_p(&mhdr.num_buffers, i);
+ virtio_stw_p(vdev, &mhdr.num_buffers, i);
iov_from_buf(mhdr_sg, mhdr_cnt,
0,
&mhdr.num_buffers, sizeof mhdr.num_buffers);
return num_packets;
}
- assert(vdev->vm_running);
-
if (q->async_tx.elem.out_num) {
virtio_queue_set_notification(q->tx_vq, 0);
return num_packets;
exit(1);
}
+ if (n->has_vnet_hdr) {
+ if (out_sg[0].iov_len < n->guest_hdr_len) {
+ error_report("virtio-net header incorrect");
+ exit(1);
+ }
+ virtio_net_hdr_swap(vdev, (void *) out_sg[0].iov_base);
+ }
+
/*
* If host wants to see the guest header as is, we can
* pass it on unchanged. Otherwise, copy just the parts
VirtIONetQueue *q = opaque;
VirtIONet *n = q->n;
VirtIODevice *vdev = VIRTIO_DEVICE(n);
- assert(vdev->vm_running);
+ /* This happens when device was stopped but BH wasn't. */
+ if (!vdev->vm_running) {
+ /* Make sure tx waiting is set, so we'll run when restarted. */
+ assert(q->tx_waiting);
+ return;
+ }
q->tx_waiting = 0;
VirtIODevice *vdev = VIRTIO_DEVICE(n);
int32_t ret;
- assert(vdev->vm_running);
+ /* This happens when device was stopped but BH wasn't. */
+ if (!vdev->vm_running) {
+ /* Make sure tx waiting is set, so we'll run when restarted. */
+ assert(q->tx_waiting);
+ return;
+ }
q->tx_waiting = 0;
static void virtio_net_save(QEMUFile *f, void *opaque)
{
- int i;
VirtIONet *n = opaque;
VirtIODevice *vdev = VIRTIO_DEVICE(n);
* it might keep writing to memory. */
assert(!n->vhost_started);
virtio_save(vdev, f);
+}
+
+static void virtio_net_save_device(VirtIODevice *vdev, QEMUFile *f)
+{
+ VirtIONet *n = VIRTIO_NET(vdev);
+ int i;
qemu_put_buffer(f, n->mac, ETH_ALEN);
qemu_put_be32(f, n->vqs[0].tx_waiting);
}
}
- if ((1 << VIRTIO_NET_F_CTRL_GUEST_OFFLOADS) & vdev->guest_features) {
+ if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)) {
qemu_put_be64(f, n->curr_guest_offloads);
}
}
{
VirtIONet *n = opaque;
VirtIODevice *vdev = VIRTIO_DEVICE(n);
- int ret, i, link_down;
if (version_id < 2 || version_id > VIRTIO_NET_VM_VERSION)
return -EINVAL;
- ret = virtio_load(vdev, f);
- if (ret) {
- return ret;
- }
+ return virtio_load(vdev, f, version_id);
+}
+
+static int virtio_net_load_device(VirtIODevice *vdev, QEMUFile *f,
+ int version_id)
+{
+ VirtIONet *n = VIRTIO_NET(vdev);
+ int i, link_down;
qemu_get_buffer(f, n->mac, ETH_ALEN);
n->vqs[0].tx_waiting = qemu_get_be32(f);
}
}
- if ((1 << VIRTIO_NET_F_CTRL_GUEST_OFFLOADS) & vdev->guest_features) {
+ if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)) {
n->curr_guest_offloads = qemu_get_be64(f);
} else {
n->curr_guest_offloads = virtio_net_supported_guest_offloads(n);
qemu_get_subqueue(n->nic, i)->link_down = link_down;
}
- if (vdev->guest_features & (0x1 << VIRTIO_NET_F_GUEST_ANNOUNCE) &&
- vdev->guest_features & (0x1 << VIRTIO_NET_F_CTRL_VQ)) {
+ if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ANNOUNCE) &&
+ virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) {
n->announce_counter = SELF_ANNOUNCE_ROUNDS;
timer_mod(n->announce_timer, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL));
}
return 0;
}
-static void virtio_net_cleanup(NetClientState *nc)
-{
- VirtIONet *n = qemu_get_nic_opaque(nc);
-
- n->nic = NULL;
-}
-
static NetClientInfo net_virtio_info = {
.type = NET_CLIENT_OPTIONS_KIND_NIC,
.size = sizeof(NICState),
.can_receive = virtio_net_can_receive,
.receive = virtio_net_receive,
- .cleanup = virtio_net_cleanup,
.link_status_changed = virtio_net_set_link_status,
.query_rx_filter = virtio_net_query_rxfilter,
};
void virtio_net_set_config_size(VirtIONet *n, uint32_t host_features)
{
int i, config_size = 0;
- host_features |= (1 << VIRTIO_NET_F_MAC);
+ virtio_add_feature(&host_features, VIRTIO_NET_F_MAC);
for (i = 0; feature_sizes[i].flags != 0; i++) {
if (host_features & feature_sizes[i].flags) {
config_size = MAX(feature_sizes[i].end, config_size);
virtio_init(vdev, "virtio-net", VIRTIO_ID_NET, n->config_size);
- n->max_queues = MAX(n->nic_conf.queues, 1);
+ n->max_queues = MAX(n->nic_conf.peers.queues, 1);
+ if (n->max_queues * 2 + 1 > VIRTIO_PCI_QUEUE_MAX) {
+ error_setg(errp, "Invalid number of queues (= %" PRIu32 "), "
+ "must be a postive integer less than %d.",
+ n->max_queues, (VIRTIO_PCI_QUEUE_MAX - 1) / 2);
+ virtio_cleanup(vdev);
+ return;
+ }
n->vqs = g_malloc0(sizeof(VirtIONetQueue) * n->max_queues);
n->vqs[0].rx_vq = virtio_add_queue(vdev, 256, virtio_net_handle_rx);
n->curr_queues = 1;
n->qdev = dev;
register_savevm(dev, "virtio-net", -1, VIRTIO_NET_VM_VERSION,
virtio_net_save, virtio_net_load, n);
-
- add_boot_device_path(n->nic_conf.bootindex, dev, "/ethernet-phy@0");
}
static void virtio_net_device_unrealize(DeviceState *dev, Error **errp)
* Can be overriden with virtio_net_set_config_size.
*/
n->config_size = sizeof(struct virtio_net_config);
+ device_add_bootindex_property(obj, &n->nic_conf.bootindex,
+ "bootindex", "/ethernet-phy@0",
+ DEVICE(n), NULL);
}
static Property virtio_net_properties[] = {
vdc->set_status = virtio_net_set_status;
vdc->guest_notifier_mask = virtio_net_guest_notifier_mask;
vdc->guest_notifier_pending = virtio_net_guest_notifier_pending;
+ vdc->load = virtio_net_load_device;
+ vdc->save = virtio_net_save_device;
}
static const TypeInfo virtio_net_info = {