* GNU GPL, version 2 or (at your option) any later version.
*/
+#include "qemu/osdep.h"
#include "hw/virtio/vhost.h"
#include "hw/hw.h"
#include "qemu/atomic.h"
}
if (!dev->log_enabled) {
- r = dev->vhost_ops->vhost_call(dev, VHOST_SET_MEM_TABLE, dev->mem);
+ r = dev->vhost_ops->vhost_set_mem_table(dev, dev->mem);
assert(r >= 0);
dev->memory_changed = false;
return;
if (dev->log_size < log_size) {
vhost_dev_log_resize(dev, log_size + VHOST_LOG_BUFFER);
}
- r = dev->vhost_ops->vhost_call(dev, VHOST_SET_MEM_TABLE, dev->mem);
+ r = dev->vhost_ops->vhost_set_mem_table(dev, dev->mem);
assert(r >= 0);
/* To log less, can only decrease log size after table update. */
if (dev->log_size > log_size + VHOST_LOG_BUFFER) {
.log_guest_addr = vq->used_phys,
.flags = enable_log ? (1 << VHOST_VRING_F_LOG) : 0,
};
- int r = dev->vhost_ops->vhost_call(dev, VHOST_SET_VRING_ADDR, &addr);
+ int r = dev->vhost_ops->vhost_set_vring_addr(dev, &addr);
if (r < 0) {
return -errno;
}
if (enable_log) {
features |= 0x1ULL << VHOST_F_LOG_ALL;
}
- r = dev->vhost_ops->vhost_call(dev, VHOST_SET_FEATURES, &features);
+ r = dev->vhost_ops->vhost_set_features(dev, features);
return r < 0 ? -errno : 0;
}
static int vhost_dev_set_log(struct vhost_dev *dev, bool enable_log)
{
- int r, t, i;
+ int r, t, i, idx;
r = vhost_dev_set_features(dev, enable_log);
if (r < 0) {
goto err_features;
}
for (i = 0; i < dev->nvqs; ++i) {
- r = vhost_virtqueue_set_addr(dev, dev->vqs + i, i,
+ idx = dev->vhost_ops->vhost_get_vq_index(dev, dev->vq_index + i);
+ r = vhost_virtqueue_set_addr(dev, dev->vqs + i, idx,
enable_log);
if (r < 0) {
goto err_vq;
return 0;
err_vq:
for (; i >= 0; --i) {
- t = vhost_virtqueue_set_addr(dev, dev->vqs + i, i,
+ idx = dev->vhost_ops->vhost_get_vq_index(dev, dev->vq_index + i);
+ t = vhost_virtqueue_set_addr(dev, dev->vqs + i, idx,
dev->log_enabled);
assert(t >= 0);
}
/* FIXME: implement */
}
+/* The vhost driver natively knows how to handle the vrings of non
+ * cross-endian legacy devices and modern devices. Only legacy devices
+ * exposed to a bi-endian guest may require the vhost driver to use a
+ * specific endianness.
+ */
+static inline bool vhost_needs_vring_endian(VirtIODevice *vdev)
+{
+ if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
+ return false;
+ }
+#ifdef TARGET_IS_BIENDIAN
+#ifdef HOST_WORDS_BIGENDIAN
+ return vdev->device_endian == VIRTIO_DEVICE_ENDIAN_LITTLE;
+#else
+ return vdev->device_endian == VIRTIO_DEVICE_ENDIAN_BIG;
+#endif
+#else
+ return false;
+#endif
+}
+
static int vhost_virtqueue_set_vring_endian_legacy(struct vhost_dev *dev,
bool is_big_endian,
int vhost_vq_index)
.num = is_big_endian
};
- if (!dev->vhost_ops->vhost_call(dev, VHOST_SET_VRING_ENDIAN, &s)) {
+ if (!dev->vhost_ops->vhost_set_vring_endian(dev, &s)) {
return 0;
}
{
hwaddr s, l, a;
int r;
- int vhost_vq_index = dev->vhost_ops->vhost_backend_get_vq_index(dev, idx);
+ int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, idx);
struct vhost_vring_file file = {
.index = vhost_vq_index
};
vq->num = state.num = virtio_queue_get_num(vdev, idx);
- r = dev->vhost_ops->vhost_call(dev, VHOST_SET_VRING_NUM, &state);
+ r = dev->vhost_ops->vhost_set_vring_num(dev, &state);
if (r) {
return -errno;
}
state.num = virtio_queue_get_last_avail_idx(vdev, idx);
- r = dev->vhost_ops->vhost_call(dev, VHOST_SET_VRING_BASE, &state);
+ r = dev->vhost_ops->vhost_set_vring_base(dev, &state);
if (r) {
return -errno;
}
- if (!virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1) &&
- virtio_legacy_is_cross_endian(vdev)) {
+ if (vhost_needs_vring_endian(vdev)) {
r = vhost_virtqueue_set_vring_endian_legacy(dev,
virtio_is_big_endian(vdev),
vhost_vq_index);
}
file.fd = event_notifier_get_fd(virtio_queue_get_host_notifier(vvq));
- r = dev->vhost_ops->vhost_call(dev, VHOST_SET_VRING_KICK, &file);
+ r = dev->vhost_ops->vhost_set_vring_kick(dev, &file);
if (r) {
r = -errno;
goto fail_kick;
/* Clear and discard previous events if any. */
event_notifier_test_and_clear(&vq->masked_notifier);
+ /* Init vring in unmasked state, unless guest_notifier_mask
+ * will do it later.
+ */
+ if (!vdev->use_guest_notifier_mask) {
+ /* TODO: check and handle errors. */
+ vhost_virtqueue_mask(dev, vdev, idx, false);
+ }
+
return 0;
fail_kick:
struct vhost_virtqueue *vq,
unsigned idx)
{
- int vhost_vq_index = dev->vhost_ops->vhost_backend_get_vq_index(dev, idx);
+ int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, idx);
struct vhost_vring_state state = {
.index = vhost_vq_index,
};
int r;
- r = dev->vhost_ops->vhost_call(dev, VHOST_GET_VRING_BASE, &state);
+ r = dev->vhost_ops->vhost_get_vring_base(dev, &state);
if (r < 0) {
fprintf(stderr, "vhost VQ %d ring restore failed: %d\n", idx, r);
fflush(stderr);
/* In the cross-endian case, we need to reset the vring endianness to
* native as legacy devices expect so by default.
*/
- if (!virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1) &&
- virtio_legacy_is_cross_endian(vdev)) {
+ if (vhost_needs_vring_endian(vdev)) {
r = vhost_virtqueue_set_vring_endian_legacy(dev,
!virtio_is_big_endian(vdev),
vhost_vq_index);
static int vhost_virtqueue_init(struct vhost_dev *dev,
struct vhost_virtqueue *vq, int n)
{
- int vhost_vq_index = dev->vhost_ops->vhost_backend_get_vq_index(dev, n);
+ int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, n);
struct vhost_vring_file file = {
.index = vhost_vq_index,
};
}
file.fd = event_notifier_get_fd(&vq->masked_notifier);
- r = dev->vhost_ops->vhost_call(dev, VHOST_SET_VRING_CALL, &file);
+ r = dev->vhost_ops->vhost_set_vring_call(dev, &file);
if (r) {
r = -errno;
goto fail_call;
}
QLIST_INSERT_HEAD(&vhost_devices, hdev, entry);
- r = hdev->vhost_ops->vhost_call(hdev, VHOST_SET_OWNER, NULL);
+ r = hdev->vhost_ops->vhost_set_owner(hdev);
if (r < 0) {
goto fail;
}
- r = hdev->vhost_ops->vhost_call(hdev, VHOST_GET_FEATURES, &features);
+ r = hdev->vhost_ops->vhost_get_features(hdev, &features);
if (r < 0) {
goto fail;
}
if (!(hdev->features & (0x1ULL << VHOST_F_LOG_ALL))) {
error_setg(&hdev->migration_blocker,
"Migration disabled: vhost lacks VHOST_F_LOG_ALL feature.");
+ } else if (!qemu_memfd_check()) {
+ error_setg(&hdev->migration_blocker,
+ "Migration disabled: failed to allocate shared memory");
}
}
struct vhost_vring_file file;
if (mask) {
+ assert(vdev->use_guest_notifier_mask);
file.fd = event_notifier_get_fd(&hdev->vqs[index].masked_notifier);
} else {
file.fd = event_notifier_get_fd(virtio_queue_get_guest_notifier(vvq));
}
- file.index = hdev->vhost_ops->vhost_backend_get_vq_index(hdev, n);
- r = hdev->vhost_ops->vhost_call(hdev, VHOST_SET_VRING_CALL, &file);
+ file.index = hdev->vhost_ops->vhost_get_vq_index(hdev, n);
+ r = hdev->vhost_ops->vhost_set_vring_call(hdev, &file);
assert(r >= 0);
}
if (r < 0) {
goto fail_features;
}
- r = hdev->vhost_ops->vhost_call(hdev, VHOST_SET_MEM_TABLE, hdev->mem);
+ r = hdev->vhost_ops->vhost_set_mem_table(hdev, hdev->mem);
if (r < 0) {
r = -errno;
goto fail_mem;