void *cvq_cmd_out_buffer;
virtio_net_ctrl_ack *status;
+ /* The device always have SVQ enabled */
+ bool always_svq;
bool started;
} VhostVDPAState;
BIT_ULL(VIRTIO_NET_F_RSC_EXT) |
BIT_ULL(VIRTIO_NET_F_STANDBY);
+#define VHOST_VDPA_NET_CVQ_ASID 1
+
VHostNetState *vhost_vdpa_get_vhost_net(NetClientState *nc)
{
VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
if (invalid_dev_features) {
error_setg(errp, "vdpa svq does not work with features 0x%" PRIx64,
invalid_dev_features);
+ return false;
}
- return !invalid_dev_features;
+ return vhost_svq_valid_features(features, errp);
}
static int vhost_vdpa_net_check_device_id(struct vhost_net *net)
.check_peer_type = vhost_vdpa_check_peer_type,
};
+static int64_t vhost_vdpa_get_vring_group(int device_fd, unsigned vq_index)
+{
+ struct vhost_vring_state state = {
+ .index = vq_index,
+ };
+ int r = ioctl(device_fd, VHOST_VDPA_GET_VRING_GROUP, &state);
+
+ if (unlikely(r < 0)) {
+ error_report("Cannot get VQ %u group: %s", vq_index,
+ g_strerror(errno));
+ return r;
+ }
+
+ return state.num;
+}
+
+static int vhost_vdpa_set_address_space_id(struct vhost_vdpa *v,
+ unsigned vq_group,
+ unsigned asid_num)
+{
+ struct vhost_vring_state asid = {
+ .index = vq_group,
+ .num = asid_num,
+ };
+ int r;
+
+ r = ioctl(v->device_fd, VHOST_VDPA_SET_GROUP_ASID, &asid);
+ if (unlikely(r < 0)) {
+ error_report("Can't set vq group %u asid %u, errno=%d (%s)",
+ asid.index, asid.num, errno, g_strerror(errno));
+ }
+ return r;
+}
+
static void vhost_vdpa_cvq_unmap_buf(struct vhost_vdpa *v, void *addr)
{
VhostIOVATree *tree = v->iova_tree;
return;
}
- r = vhost_vdpa_dma_unmap(v, map->iova, map->size + 1);
+ r = vhost_vdpa_dma_unmap(v, v->address_space_id, map->iova, map->size + 1);
if (unlikely(r != 0)) {
error_report("Device cannot unmap: %s(%d)", g_strerror(r), r);
}
return r;
}
- r = vhost_vdpa_dma_map(v, map.iova, vhost_vdpa_net_cvq_cmd_page_len(), buf,
- !write);
+ r = vhost_vdpa_dma_map(v, v->address_space_id, map.iova,
+ vhost_vdpa_net_cvq_cmd_page_len(), buf, !write);
if (unlikely(r < 0)) {
goto dma_map_err;
}
static int vhost_vdpa_net_cvq_start(NetClientState *nc)
{
VhostVDPAState *s;
- int r;
+ struct vhost_vdpa *v;
+ uint64_t backend_features;
+ int64_t cvq_group;
+ int cvq_index, r;
assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
s = DO_UPCAST(VhostVDPAState, nc, nc);
+ v = &s->vhost_vdpa;
+
+ v->shadow_data = s->always_svq;
+ v->shadow_vqs_enabled = s->always_svq;
+ s->vhost_vdpa.address_space_id = VHOST_VDPA_GUEST_PA_ASID;
+
+ if (s->always_svq) {
+ /* SVQ is already configured for all virtqueues */
+ goto out;
+ }
+
+ /*
+ * If we early return in these cases SVQ will not be enabled. The migration
+ * will be blocked as long as vhost-vdpa backends will not offer _F_LOG.
+ *
+ * Calling VHOST_GET_BACKEND_FEATURES as they are not available in v->dev
+ * yet.
+ */
+ r = ioctl(v->device_fd, VHOST_GET_BACKEND_FEATURES, &backend_features);
+ if (unlikely(r < 0)) {
+ error_report("Cannot get vdpa backend_features: %s(%d)",
+ g_strerror(errno), errno);
+ return -1;
+ }
+ if (!(backend_features & VHOST_BACKEND_F_IOTLB_ASID) ||
+ !vhost_vdpa_net_valid_svq_features(v->dev->features, NULL)) {
+ return 0;
+ }
+
+ /*
+ * Check if all the virtqueues of the virtio device are in a different vq
+ * than the last vq. VQ group of last group passed in cvq_group.
+ */
+ cvq_index = v->dev->vq_index_end - 1;
+ cvq_group = vhost_vdpa_get_vring_group(v->device_fd, cvq_index);
+ if (unlikely(cvq_group < 0)) {
+ return cvq_group;
+ }
+ for (int i = 0; i < cvq_index; ++i) {
+ int64_t group = vhost_vdpa_get_vring_group(v->device_fd, i);
+
+ if (unlikely(group < 0)) {
+ return group;
+ }
+
+ if (group == cvq_group) {
+ return 0;
+ }
+ }
+
+ r = vhost_vdpa_set_address_space_id(v, cvq_group, VHOST_VDPA_NET_CVQ_ASID);
+ if (unlikely(r < 0)) {
+ return r;
+ }
+
+ v->iova_tree = vhost_iova_tree_new(v->iova_range.first,
+ v->iova_range.last);
+ v->shadow_vqs_enabled = true;
+ s->vhost_vdpa.address_space_id = VHOST_VDPA_NET_CVQ_ASID;
+
+out:
if (!s->vhost_vdpa.shadow_vqs_enabled) {
return 0;
}
if (s->vhost_vdpa.shadow_vqs_enabled) {
vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer);
vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->status);
+ if (!s->always_svq) {
+ /*
+ * If only the CVQ is shadowed we can delete this safely.
+ * If all the VQs are shadows this will be needed by the time the
+ * device is started again to register SVQ vrings and similar.
+ */
+ g_clear_pointer(&s->vhost_vdpa.iova_tree, vhost_iova_tree_delete);
+ }
}
}
out.iov_len = iov_to_buf(elem->out_sg, elem->out_num, 0,
s->cvq_cmd_out_buffer,
vhost_vdpa_net_cvq_cmd_len());
- dev_written = vhost_vdpa_net_cvq_add(s, out.iov_len, sizeof(status));
- if (unlikely(dev_written < 0)) {
- goto out;
+ if (*(uint8_t *)s->cvq_cmd_out_buffer == VIRTIO_NET_CTRL_ANNOUNCE) {
+ /*
+ * Guest announce capability is emulated by qemu, so don't forward to
+ * the device.
+ */
+ dev_written = sizeof(status);
+ *s->status = VIRTIO_NET_OK;
+ } else {
+ dev_written = vhost_vdpa_net_cvq_add(s, out.iov_len, sizeof(status));
+ if (unlikely(dev_written < 0)) {
+ goto out;
+ }
}
if (unlikely(dev_written < sizeof(status))) {
};
static NetClientState *net_vhost_vdpa_init(NetClientState *peer,
- const char *device,
- const char *name,
- int vdpa_device_fd,
- int queue_pair_index,
- int nvqs,
- bool is_datapath,
- bool svq,
- VhostIOVATree *iova_tree)
+ const char *device,
+ const char *name,
+ int vdpa_device_fd,
+ int queue_pair_index,
+ int nvqs,
+ bool is_datapath,
+ bool svq,
+ struct vhost_vdpa_iova_range iova_range,
+ VhostIOVATree *iova_tree)
{
NetClientState *nc = NULL;
VhostVDPAState *s;
s->vhost_vdpa.device_fd = vdpa_device_fd;
s->vhost_vdpa.index = queue_pair_index;
+ s->always_svq = svq;
s->vhost_vdpa.shadow_vqs_enabled = svq;
+ s->vhost_vdpa.iova_range = iova_range;
+ s->vhost_vdpa.shadow_data = svq;
s->vhost_vdpa.iova_tree = iova_tree;
if (!is_datapath) {
s->cvq_cmd_out_buffer = qemu_memalign(qemu_real_host_page_size(),
int vdpa_device_fd;
g_autofree NetClientState **ncs = NULL;
g_autoptr(VhostIOVATree) iova_tree = NULL;
+ struct vhost_vdpa_iova_range iova_range;
NetClientState *nc;
int queue_pairs, r, i = 0, has_cvq = 0;
return queue_pairs;
}
+ vhost_vdpa_get_iova_range(vdpa_device_fd, &iova_range);
if (opts->x_svq) {
- struct vhost_vdpa_iova_range iova_range;
-
if (!vhost_vdpa_net_valid_svq_features(features, errp)) {
goto err_svq;
}
- vhost_vdpa_get_iova_range(vdpa_device_fd, &iova_range);
iova_tree = vhost_iova_tree_new(iova_range.first, iova_range.last);
}
for (i = 0; i < queue_pairs; i++) {
ncs[i] = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name,
vdpa_device_fd, i, 2, true, opts->x_svq,
- iova_tree);
+ iova_range, iova_tree);
if (!ncs[i])
goto err;
}
if (has_cvq) {
nc = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name,
vdpa_device_fd, i, 1, false,
- opts->x_svq, iova_tree);
+ opts->x_svq, iova_range, iova_tree);
if (!nc)
goto err;
}