1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2018-2020 Intel Corporation.
4 * Copyright (C) 2020 Red Hat, Inc.
9 * Thanks Michael S. Tsirkin for the valuable comments and
10 * suggestions. And thanks to Cunming Liang and Zhihong Wang for all
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/cdev.h>
17 #include <linux/device.h>
19 #include <linux/slab.h>
20 #include <linux/iommu.h>
21 #include <linux/uuid.h>
22 #include <linux/vdpa.h>
23 #include <linux/nospec.h>
24 #include <linux/vhost.h>
29 VHOST_VDPA_BACKEND_FEATURES =
30 (1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2) |
31 (1ULL << VHOST_BACKEND_F_IOTLB_BATCH) |
32 (1ULL << VHOST_BACKEND_F_IOTLB_ASID),
35 #define VHOST_VDPA_DEV_MAX (1U << MINORBITS)
37 #define VHOST_VDPA_IOTLB_BUCKETS 16
39 struct vhost_vdpa_as {
40 struct hlist_node hash_link;
41 struct vhost_iotlb iotlb;
46 struct vhost_dev vdev;
47 struct iommu_domain *domain;
48 struct vhost_virtqueue *vqs;
49 struct completion completion;
50 struct vdpa_device *vdpa;
51 struct hlist_head as[VHOST_VDPA_IOTLB_BUCKETS];
58 struct eventfd_ctx *config_ctx;
60 struct vdpa_iova_range range;
65 static DEFINE_IDA(vhost_vdpa_ida);
67 static dev_t vhost_vdpa_major;
69 static void vhost_vdpa_iotlb_unmap(struct vhost_vdpa *v,
70 struct vhost_iotlb *iotlb, u64 start,
73 static inline u32 iotlb_to_asid(struct vhost_iotlb *iotlb)
75 struct vhost_vdpa_as *as = container_of(iotlb, struct
76 vhost_vdpa_as, iotlb);
80 static struct vhost_vdpa_as *asid_to_as(struct vhost_vdpa *v, u32 asid)
82 struct hlist_head *head = &v->as[asid % VHOST_VDPA_IOTLB_BUCKETS];
83 struct vhost_vdpa_as *as;
85 hlist_for_each_entry(as, head, hash_link)
92 static struct vhost_iotlb *asid_to_iotlb(struct vhost_vdpa *v, u32 asid)
94 struct vhost_vdpa_as *as = asid_to_as(v, asid);
102 static struct vhost_vdpa_as *vhost_vdpa_alloc_as(struct vhost_vdpa *v, u32 asid)
104 struct hlist_head *head = &v->as[asid % VHOST_VDPA_IOTLB_BUCKETS];
105 struct vhost_vdpa_as *as;
107 if (asid_to_as(v, asid))
110 if (asid >= v->vdpa->nas)
113 as = kmalloc(sizeof(*as), GFP_KERNEL);
117 vhost_iotlb_init(&as->iotlb, 0, 0);
119 hlist_add_head(&as->hash_link, head);
124 static struct vhost_vdpa_as *vhost_vdpa_find_alloc_as(struct vhost_vdpa *v,
127 struct vhost_vdpa_as *as = asid_to_as(v, asid);
132 return vhost_vdpa_alloc_as(v, asid);
135 static void vhost_vdpa_reset_map(struct vhost_vdpa *v, u32 asid)
137 struct vdpa_device *vdpa = v->vdpa;
138 const struct vdpa_config_ops *ops = vdpa->config;
141 ops->reset_map(vdpa, asid);
144 static int vhost_vdpa_remove_as(struct vhost_vdpa *v, u32 asid)
146 struct vhost_vdpa_as *as = asid_to_as(v, asid);
151 hlist_del(&as->hash_link);
152 vhost_vdpa_iotlb_unmap(v, &as->iotlb, 0ULL, 0ULL - 1, asid);
154 * Devices with vendor specific IOMMU may need to restore
155 * iotlb to the initial or default state, which cannot be
156 * cleaned up in the all range unmap call above. Give them
157 * a chance to clean up or reset the map to the desired
160 vhost_vdpa_reset_map(v, asid);
166 static void handle_vq_kick(struct vhost_work *work)
168 struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
170 struct vhost_vdpa *v = container_of(vq->dev, struct vhost_vdpa, vdev);
171 const struct vdpa_config_ops *ops = v->vdpa->config;
173 ops->kick_vq(v->vdpa, vq - v->vqs);
176 static irqreturn_t vhost_vdpa_virtqueue_cb(void *private)
178 struct vhost_virtqueue *vq = private;
179 struct eventfd_ctx *call_ctx = vq->call_ctx.ctx;
182 eventfd_signal(call_ctx);
187 static irqreturn_t vhost_vdpa_config_cb(void *private)
189 struct vhost_vdpa *v = private;
190 struct eventfd_ctx *config_ctx = v->config_ctx;
193 eventfd_signal(config_ctx);
198 static void vhost_vdpa_setup_vq_irq(struct vhost_vdpa *v, u16 qid)
200 struct vhost_virtqueue *vq = &v->vqs[qid];
201 const struct vdpa_config_ops *ops = v->vdpa->config;
202 struct vdpa_device *vdpa = v->vdpa;
205 if (!ops->get_vq_irq)
208 irq = ops->get_vq_irq(vdpa, qid);
212 irq_bypass_unregister_producer(&vq->call_ctx.producer);
213 if (!vq->call_ctx.ctx)
216 vq->call_ctx.producer.token = vq->call_ctx.ctx;
217 vq->call_ctx.producer.irq = irq;
218 ret = irq_bypass_register_producer(&vq->call_ctx.producer);
220 dev_info(&v->dev, "vq %u, irq bypass producer (token %p) registration fails, ret = %d\n",
221 qid, vq->call_ctx.producer.token, ret);
224 static void vhost_vdpa_unsetup_vq_irq(struct vhost_vdpa *v, u16 qid)
226 struct vhost_virtqueue *vq = &v->vqs[qid];
228 irq_bypass_unregister_producer(&vq->call_ctx.producer);
231 static int _compat_vdpa_reset(struct vhost_vdpa *v)
233 struct vdpa_device *vdpa = v->vdpa;
236 v->suspended = false;
239 flags |= !vhost_backend_has_feature(v->vdev.vqs[0],
240 VHOST_BACKEND_F_IOTLB_PERSIST) ?
241 VDPA_RESET_F_CLEAN_MAP : 0;
244 return vdpa_reset(vdpa, flags);
247 static int vhost_vdpa_reset(struct vhost_vdpa *v)
250 return _compat_vdpa_reset(v);
253 static long vhost_vdpa_bind_mm(struct vhost_vdpa *v)
255 struct vdpa_device *vdpa = v->vdpa;
256 const struct vdpa_config_ops *ops = vdpa->config;
258 if (!vdpa->use_va || !ops->bind_mm)
261 return ops->bind_mm(vdpa, v->vdev.mm);
264 static void vhost_vdpa_unbind_mm(struct vhost_vdpa *v)
266 struct vdpa_device *vdpa = v->vdpa;
267 const struct vdpa_config_ops *ops = vdpa->config;
269 if (!vdpa->use_va || !ops->unbind_mm)
272 ops->unbind_mm(vdpa);
275 static long vhost_vdpa_get_device_id(struct vhost_vdpa *v, u8 __user *argp)
277 struct vdpa_device *vdpa = v->vdpa;
278 const struct vdpa_config_ops *ops = vdpa->config;
281 device_id = ops->get_device_id(vdpa);
283 if (copy_to_user(argp, &device_id, sizeof(device_id)))
289 static long vhost_vdpa_get_status(struct vhost_vdpa *v, u8 __user *statusp)
291 struct vdpa_device *vdpa = v->vdpa;
292 const struct vdpa_config_ops *ops = vdpa->config;
295 status = ops->get_status(vdpa);
297 if (copy_to_user(statusp, &status, sizeof(status)))
303 static long vhost_vdpa_set_status(struct vhost_vdpa *v, u8 __user *statusp)
305 struct vdpa_device *vdpa = v->vdpa;
306 const struct vdpa_config_ops *ops = vdpa->config;
307 u8 status, status_old;
312 if (copy_from_user(&status, statusp, sizeof(status)))
315 status_old = ops->get_status(vdpa);
318 * Userspace shouldn't remove status bits unless reset the
321 if (status != 0 && (status_old & ~status) != 0)
324 if ((status_old & VIRTIO_CONFIG_S_DRIVER_OK) && !(status & VIRTIO_CONFIG_S_DRIVER_OK))
325 for (i = 0; i < nvqs; i++)
326 vhost_vdpa_unsetup_vq_irq(v, i);
329 ret = _compat_vdpa_reset(v);
333 vdpa_set_status(vdpa, status);
335 if ((status & VIRTIO_CONFIG_S_DRIVER_OK) && !(status_old & VIRTIO_CONFIG_S_DRIVER_OK))
336 for (i = 0; i < nvqs; i++)
337 vhost_vdpa_setup_vq_irq(v, i);
342 static int vhost_vdpa_config_validate(struct vhost_vdpa *v,
343 struct vhost_vdpa_config *c)
345 struct vdpa_device *vdpa = v->vdpa;
346 size_t size = vdpa->config->get_config_size(vdpa);
348 if (c->len == 0 || c->off > size)
351 if (c->len > size - c->off)
357 static long vhost_vdpa_get_config(struct vhost_vdpa *v,
358 struct vhost_vdpa_config __user *c)
360 struct vdpa_device *vdpa = v->vdpa;
361 struct vhost_vdpa_config config;
362 unsigned long size = offsetof(struct vhost_vdpa_config, buf);
365 if (copy_from_user(&config, c, size))
367 if (vhost_vdpa_config_validate(v, &config))
369 buf = kvzalloc(config.len, GFP_KERNEL);
373 vdpa_get_config(vdpa, config.off, buf, config.len);
375 if (copy_to_user(c->buf, buf, config.len)) {
384 static long vhost_vdpa_set_config(struct vhost_vdpa *v,
385 struct vhost_vdpa_config __user *c)
387 struct vdpa_device *vdpa = v->vdpa;
388 struct vhost_vdpa_config config;
389 unsigned long size = offsetof(struct vhost_vdpa_config, buf);
392 if (copy_from_user(&config, c, size))
394 if (vhost_vdpa_config_validate(v, &config))
397 buf = vmemdup_user(c->buf, config.len);
401 vdpa_set_config(vdpa, config.off, buf, config.len);
407 static bool vhost_vdpa_can_suspend(const struct vhost_vdpa *v)
409 struct vdpa_device *vdpa = v->vdpa;
410 const struct vdpa_config_ops *ops = vdpa->config;
415 static bool vhost_vdpa_can_resume(const struct vhost_vdpa *v)
417 struct vdpa_device *vdpa = v->vdpa;
418 const struct vdpa_config_ops *ops = vdpa->config;
423 static bool vhost_vdpa_has_desc_group(const struct vhost_vdpa *v)
425 struct vdpa_device *vdpa = v->vdpa;
426 const struct vdpa_config_ops *ops = vdpa->config;
428 return ops->get_vq_desc_group;
431 static long vhost_vdpa_get_features(struct vhost_vdpa *v, u64 __user *featurep)
433 struct vdpa_device *vdpa = v->vdpa;
434 const struct vdpa_config_ops *ops = vdpa->config;
437 features = ops->get_device_features(vdpa);
439 if (copy_to_user(featurep, &features, sizeof(features)))
445 static u64 vhost_vdpa_get_backend_features(const struct vhost_vdpa *v)
447 struct vdpa_device *vdpa = v->vdpa;
448 const struct vdpa_config_ops *ops = vdpa->config;
450 if (!ops->get_backend_features)
453 return ops->get_backend_features(vdpa);
456 static bool vhost_vdpa_has_persistent_map(const struct vhost_vdpa *v)
458 struct vdpa_device *vdpa = v->vdpa;
459 const struct vdpa_config_ops *ops = vdpa->config;
461 return (!ops->set_map && !ops->dma_map) || ops->reset_map ||
462 vhost_vdpa_get_backend_features(v) & BIT_ULL(VHOST_BACKEND_F_IOTLB_PERSIST);
465 static long vhost_vdpa_set_features(struct vhost_vdpa *v, u64 __user *featurep)
467 struct vdpa_device *vdpa = v->vdpa;
468 const struct vdpa_config_ops *ops = vdpa->config;
469 struct vhost_dev *d = &v->vdev;
475 * It's not allowed to change the features after they have
478 if (ops->get_status(vdpa) & VIRTIO_CONFIG_S_FEATURES_OK)
481 if (copy_from_user(&features, featurep, sizeof(features)))
484 if (vdpa_set_features(vdpa, features))
487 /* let the vqs know what has been configured */
488 actual_features = ops->get_driver_features(vdpa);
489 for (i = 0; i < d->nvqs; ++i) {
490 struct vhost_virtqueue *vq = d->vqs[i];
492 mutex_lock(&vq->mutex);
493 vq->acked_features = actual_features;
494 mutex_unlock(&vq->mutex);
500 static long vhost_vdpa_get_vring_num(struct vhost_vdpa *v, u16 __user *argp)
502 struct vdpa_device *vdpa = v->vdpa;
503 const struct vdpa_config_ops *ops = vdpa->config;
506 num = ops->get_vq_num_max(vdpa);
508 if (copy_to_user(argp, &num, sizeof(num)))
514 static void vhost_vdpa_config_put(struct vhost_vdpa *v)
517 eventfd_ctx_put(v->config_ctx);
518 v->config_ctx = NULL;
522 static long vhost_vdpa_set_config_call(struct vhost_vdpa *v, u32 __user *argp)
524 struct vdpa_callback cb;
526 struct eventfd_ctx *ctx;
528 cb.callback = vhost_vdpa_config_cb;
530 if (copy_from_user(&fd, argp, sizeof(fd)))
533 ctx = fd == VHOST_FILE_UNBIND ? NULL : eventfd_ctx_fdget(fd);
534 swap(ctx, v->config_ctx);
536 if (!IS_ERR_OR_NULL(ctx))
537 eventfd_ctx_put(ctx);
539 if (IS_ERR(v->config_ctx)) {
540 long ret = PTR_ERR(v->config_ctx);
542 v->config_ctx = NULL;
546 v->vdpa->config->set_config_cb(v->vdpa, &cb);
551 static long vhost_vdpa_get_iova_range(struct vhost_vdpa *v, u32 __user *argp)
553 struct vhost_vdpa_iova_range range = {
554 .first = v->range.first,
555 .last = v->range.last,
558 if (copy_to_user(argp, &range, sizeof(range)))
563 static long vhost_vdpa_get_config_size(struct vhost_vdpa *v, u32 __user *argp)
565 struct vdpa_device *vdpa = v->vdpa;
566 const struct vdpa_config_ops *ops = vdpa->config;
569 size = ops->get_config_size(vdpa);
571 if (copy_to_user(argp, &size, sizeof(size)))
577 static long vhost_vdpa_get_vqs_count(struct vhost_vdpa *v, u32 __user *argp)
579 struct vdpa_device *vdpa = v->vdpa;
581 if (copy_to_user(argp, &vdpa->nvqs, sizeof(vdpa->nvqs)))
587 /* After a successful return of ioctl the device must not process more
588 * virtqueue descriptors. The device can answer to read or writes of config
589 * fields as if it were not suspended. In particular, writing to "queue_enable"
590 * with a value of 1 will not make the device start processing buffers.
592 static long vhost_vdpa_suspend(struct vhost_vdpa *v)
594 struct vdpa_device *vdpa = v->vdpa;
595 const struct vdpa_config_ops *ops = vdpa->config;
598 if (!(ops->get_status(vdpa) & VIRTIO_CONFIG_S_DRIVER_OK))
604 ret = ops->suspend(vdpa);
611 /* After a successful return of this ioctl the device resumes processing
612 * virtqueue descriptors. The device becomes fully operational the same way it
613 * was before it was suspended.
615 static long vhost_vdpa_resume(struct vhost_vdpa *v)
617 struct vdpa_device *vdpa = v->vdpa;
618 const struct vdpa_config_ops *ops = vdpa->config;
621 if (!(ops->get_status(vdpa) & VIRTIO_CONFIG_S_DRIVER_OK))
627 ret = ops->resume(vdpa);
629 v->suspended = false;
634 static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd,
637 struct vdpa_device *vdpa = v->vdpa;
638 const struct vdpa_config_ops *ops = vdpa->config;
639 struct vdpa_vq_state vq_state;
640 struct vdpa_callback cb;
641 struct vhost_virtqueue *vq;
642 struct vhost_vring_state s;
646 r = get_user(idx, (u32 __user *)argp);
653 idx = array_index_nospec(idx, v->nvqs);
657 case VHOST_VDPA_SET_VRING_ENABLE:
658 if (copy_from_user(&s, argp, sizeof(s)))
660 ops->set_vq_ready(vdpa, idx, s.num);
662 case VHOST_VDPA_GET_VRING_GROUP:
663 if (!ops->get_vq_group)
666 s.num = ops->get_vq_group(vdpa, idx);
667 if (s.num >= vdpa->ngroups)
669 else if (copy_to_user(argp, &s, sizeof(s)))
672 case VHOST_VDPA_GET_VRING_DESC_GROUP:
673 if (!vhost_vdpa_has_desc_group(v))
676 s.num = ops->get_vq_desc_group(vdpa, idx);
677 if (s.num >= vdpa->ngroups)
679 else if (copy_to_user(argp, &s, sizeof(s)))
682 case VHOST_VDPA_SET_GROUP_ASID:
683 if (copy_from_user(&s, argp, sizeof(s)))
685 if (s.num >= vdpa->nas)
687 if (!ops->set_group_asid)
689 return ops->set_group_asid(vdpa, idx, s.num);
690 case VHOST_VDPA_GET_VRING_SIZE:
691 if (!ops->get_vq_size)
694 s.num = ops->get_vq_size(vdpa, idx);
695 if (copy_to_user(argp, &s, sizeof(s)))
698 case VHOST_GET_VRING_BASE:
699 r = ops->get_vq_state(v->vdpa, idx, &vq_state);
703 if (vhost_has_feature(vq, VIRTIO_F_RING_PACKED)) {
704 vq->last_avail_idx = vq_state.packed.last_avail_idx |
705 (vq_state.packed.last_avail_counter << 15);
706 vq->last_used_idx = vq_state.packed.last_used_idx |
707 (vq_state.packed.last_used_counter << 15);
709 vq->last_avail_idx = vq_state.split.avail_index;
714 r = vhost_vring_ioctl(&v->vdev, cmd, argp);
719 case VHOST_SET_VRING_ADDR:
720 if ((ops->get_status(vdpa) & VIRTIO_CONFIG_S_DRIVER_OK) && !v->suspended)
723 if (ops->set_vq_address(vdpa, idx,
724 (u64)(uintptr_t)vq->desc,
725 (u64)(uintptr_t)vq->avail,
726 (u64)(uintptr_t)vq->used))
730 case VHOST_SET_VRING_BASE:
731 if ((ops->get_status(vdpa) & VIRTIO_CONFIG_S_DRIVER_OK) && !v->suspended)
734 if (vhost_has_feature(vq, VIRTIO_F_RING_PACKED)) {
735 vq_state.packed.last_avail_idx = vq->last_avail_idx & 0x7fff;
736 vq_state.packed.last_avail_counter = !!(vq->last_avail_idx & 0x8000);
737 vq_state.packed.last_used_idx = vq->last_used_idx & 0x7fff;
738 vq_state.packed.last_used_counter = !!(vq->last_used_idx & 0x8000);
740 vq_state.split.avail_index = vq->last_avail_idx;
742 r = ops->set_vq_state(vdpa, idx, &vq_state);
745 case VHOST_SET_VRING_CALL:
746 if (vq->call_ctx.ctx) {
747 cb.callback = vhost_vdpa_virtqueue_cb;
749 cb.trigger = vq->call_ctx.ctx;
755 ops->set_vq_cb(vdpa, idx, &cb);
756 vhost_vdpa_setup_vq_irq(v, idx);
759 case VHOST_SET_VRING_NUM:
760 ops->set_vq_num(vdpa, idx, vq->num);
767 static long vhost_vdpa_unlocked_ioctl(struct file *filep,
768 unsigned int cmd, unsigned long arg)
770 struct vhost_vdpa *v = filep->private_data;
771 struct vhost_dev *d = &v->vdev;
772 void __user *argp = (void __user *)arg;
773 u64 __user *featurep = argp;
777 if (cmd == VHOST_SET_BACKEND_FEATURES) {
778 if (copy_from_user(&features, featurep, sizeof(features)))
780 if (features & ~(VHOST_VDPA_BACKEND_FEATURES |
781 BIT_ULL(VHOST_BACKEND_F_DESC_ASID) |
782 BIT_ULL(VHOST_BACKEND_F_IOTLB_PERSIST) |
783 BIT_ULL(VHOST_BACKEND_F_SUSPEND) |
784 BIT_ULL(VHOST_BACKEND_F_RESUME) |
785 BIT_ULL(VHOST_BACKEND_F_ENABLE_AFTER_DRIVER_OK)))
787 if ((features & BIT_ULL(VHOST_BACKEND_F_SUSPEND)) &&
788 !vhost_vdpa_can_suspend(v))
790 if ((features & BIT_ULL(VHOST_BACKEND_F_RESUME)) &&
791 !vhost_vdpa_can_resume(v))
793 if ((features & BIT_ULL(VHOST_BACKEND_F_DESC_ASID)) &&
794 !(features & BIT_ULL(VHOST_BACKEND_F_IOTLB_ASID)))
796 if ((features & BIT_ULL(VHOST_BACKEND_F_DESC_ASID)) &&
797 !vhost_vdpa_has_desc_group(v))
799 if ((features & BIT_ULL(VHOST_BACKEND_F_IOTLB_PERSIST)) &&
800 !vhost_vdpa_has_persistent_map(v))
802 vhost_set_backend_features(&v->vdev, features);
806 mutex_lock(&d->mutex);
809 case VHOST_VDPA_GET_DEVICE_ID:
810 r = vhost_vdpa_get_device_id(v, argp);
812 case VHOST_VDPA_GET_STATUS:
813 r = vhost_vdpa_get_status(v, argp);
815 case VHOST_VDPA_SET_STATUS:
816 r = vhost_vdpa_set_status(v, argp);
818 case VHOST_VDPA_GET_CONFIG:
819 r = vhost_vdpa_get_config(v, argp);
821 case VHOST_VDPA_SET_CONFIG:
822 r = vhost_vdpa_set_config(v, argp);
824 case VHOST_GET_FEATURES:
825 r = vhost_vdpa_get_features(v, argp);
827 case VHOST_SET_FEATURES:
828 r = vhost_vdpa_set_features(v, argp);
830 case VHOST_VDPA_GET_VRING_NUM:
831 r = vhost_vdpa_get_vring_num(v, argp);
833 case VHOST_VDPA_GET_GROUP_NUM:
834 if (copy_to_user(argp, &v->vdpa->ngroups,
835 sizeof(v->vdpa->ngroups)))
838 case VHOST_VDPA_GET_AS_NUM:
839 if (copy_to_user(argp, &v->vdpa->nas, sizeof(v->vdpa->nas)))
842 case VHOST_SET_LOG_BASE:
843 case VHOST_SET_LOG_FD:
846 case VHOST_VDPA_SET_CONFIG_CALL:
847 r = vhost_vdpa_set_config_call(v, argp);
849 case VHOST_GET_BACKEND_FEATURES:
850 features = VHOST_VDPA_BACKEND_FEATURES;
851 if (vhost_vdpa_can_suspend(v))
852 features |= BIT_ULL(VHOST_BACKEND_F_SUSPEND);
853 if (vhost_vdpa_can_resume(v))
854 features |= BIT_ULL(VHOST_BACKEND_F_RESUME);
855 if (vhost_vdpa_has_desc_group(v))
856 features |= BIT_ULL(VHOST_BACKEND_F_DESC_ASID);
857 if (vhost_vdpa_has_persistent_map(v))
858 features |= BIT_ULL(VHOST_BACKEND_F_IOTLB_PERSIST);
859 features |= vhost_vdpa_get_backend_features(v);
860 if (copy_to_user(featurep, &features, sizeof(features)))
863 case VHOST_VDPA_GET_IOVA_RANGE:
864 r = vhost_vdpa_get_iova_range(v, argp);
866 case VHOST_VDPA_GET_CONFIG_SIZE:
867 r = vhost_vdpa_get_config_size(v, argp);
869 case VHOST_VDPA_GET_VQS_COUNT:
870 r = vhost_vdpa_get_vqs_count(v, argp);
872 case VHOST_VDPA_SUSPEND:
873 r = vhost_vdpa_suspend(v);
875 case VHOST_VDPA_RESUME:
876 r = vhost_vdpa_resume(v);
879 r = vhost_dev_ioctl(&v->vdev, cmd, argp);
880 if (r == -ENOIOCTLCMD)
881 r = vhost_vdpa_vring_ioctl(v, cmd, argp);
889 case VHOST_SET_OWNER:
890 r = vhost_vdpa_bind_mm(v);
892 vhost_dev_reset_owner(d, NULL);
896 mutex_unlock(&d->mutex);
899 static void vhost_vdpa_general_unmap(struct vhost_vdpa *v,
900 struct vhost_iotlb_map *map, u32 asid)
902 struct vdpa_device *vdpa = v->vdpa;
903 const struct vdpa_config_ops *ops = vdpa->config;
905 ops->dma_unmap(vdpa, asid, map->start, map->size);
906 } else if (ops->set_map == NULL) {
907 iommu_unmap(v->domain, map->start, map->size);
911 static void vhost_vdpa_pa_unmap(struct vhost_vdpa *v, struct vhost_iotlb *iotlb,
912 u64 start, u64 last, u32 asid)
914 struct vhost_dev *dev = &v->vdev;
915 struct vhost_iotlb_map *map;
917 unsigned long pfn, pinned;
919 while ((map = vhost_iotlb_itree_first(iotlb, start, last)) != NULL) {
920 pinned = PFN_DOWN(map->size);
921 for (pfn = PFN_DOWN(map->addr);
922 pinned > 0; pfn++, pinned--) {
923 page = pfn_to_page(pfn);
924 if (map->perm & VHOST_ACCESS_WO)
925 set_page_dirty_lock(page);
926 unpin_user_page(page);
928 atomic64_sub(PFN_DOWN(map->size), &dev->mm->pinned_vm);
929 vhost_vdpa_general_unmap(v, map, asid);
930 vhost_iotlb_map_free(iotlb, map);
934 static void vhost_vdpa_va_unmap(struct vhost_vdpa *v, struct vhost_iotlb *iotlb,
935 u64 start, u64 last, u32 asid)
937 struct vhost_iotlb_map *map;
938 struct vdpa_map_file *map_file;
940 while ((map = vhost_iotlb_itree_first(iotlb, start, last)) != NULL) {
941 map_file = (struct vdpa_map_file *)map->opaque;
942 fput(map_file->file);
944 vhost_vdpa_general_unmap(v, map, asid);
945 vhost_iotlb_map_free(iotlb, map);
949 static void vhost_vdpa_iotlb_unmap(struct vhost_vdpa *v,
950 struct vhost_iotlb *iotlb, u64 start,
953 struct vdpa_device *vdpa = v->vdpa;
956 return vhost_vdpa_va_unmap(v, iotlb, start, last, asid);
958 return vhost_vdpa_pa_unmap(v, iotlb, start, last, asid);
961 static int perm_to_iommu_flags(u32 perm)
966 case VHOST_ACCESS_WO:
967 flags |= IOMMU_WRITE;
969 case VHOST_ACCESS_RO:
972 case VHOST_ACCESS_RW:
973 flags |= (IOMMU_WRITE | IOMMU_READ);
976 WARN(1, "invalidate vhost IOTLB permission\n");
980 return flags | IOMMU_CACHE;
983 static int vhost_vdpa_map(struct vhost_vdpa *v, struct vhost_iotlb *iotlb,
984 u64 iova, u64 size, u64 pa, u32 perm, void *opaque)
986 struct vhost_dev *dev = &v->vdev;
987 struct vdpa_device *vdpa = v->vdpa;
988 const struct vdpa_config_ops *ops = vdpa->config;
989 u32 asid = iotlb_to_asid(iotlb);
992 r = vhost_iotlb_add_range_ctx(iotlb, iova, iova + size - 1,
998 r = ops->dma_map(vdpa, asid, iova, size, pa, perm, opaque);
999 } else if (ops->set_map) {
1001 r = ops->set_map(vdpa, asid, iotlb);
1003 r = iommu_map(v->domain, iova, pa, size,
1004 perm_to_iommu_flags(perm),
1005 GFP_KERNEL_ACCOUNT);
1008 vhost_iotlb_del_range(iotlb, iova, iova + size - 1);
1013 atomic64_add(PFN_DOWN(size), &dev->mm->pinned_vm);
1018 static void vhost_vdpa_unmap(struct vhost_vdpa *v,
1019 struct vhost_iotlb *iotlb,
1022 struct vdpa_device *vdpa = v->vdpa;
1023 const struct vdpa_config_ops *ops = vdpa->config;
1024 u32 asid = iotlb_to_asid(iotlb);
1026 vhost_vdpa_iotlb_unmap(v, iotlb, iova, iova + size - 1, asid);
1030 ops->set_map(vdpa, asid, iotlb);
1035 static int vhost_vdpa_va_map(struct vhost_vdpa *v,
1036 struct vhost_iotlb *iotlb,
1037 u64 iova, u64 size, u64 uaddr, u32 perm)
1039 struct vhost_dev *dev = &v->vdev;
1040 u64 offset, map_size, map_iova = iova;
1041 struct vdpa_map_file *map_file;
1042 struct vm_area_struct *vma;
1045 mmap_read_lock(dev->mm);
1048 vma = find_vma(dev->mm, uaddr);
1053 map_size = min(size, vma->vm_end - uaddr);
1054 if (!(vma->vm_file && (vma->vm_flags & VM_SHARED) &&
1055 !(vma->vm_flags & (VM_IO | VM_PFNMAP))))
1058 map_file = kzalloc(sizeof(*map_file), GFP_KERNEL);
1063 offset = (vma->vm_pgoff << PAGE_SHIFT) + uaddr - vma->vm_start;
1064 map_file->offset = offset;
1065 map_file->file = get_file(vma->vm_file);
1066 ret = vhost_vdpa_map(v, iotlb, map_iova, map_size, uaddr,
1069 fput(map_file->file);
1076 map_iova += map_size;
1079 vhost_vdpa_unmap(v, iotlb, iova, map_iova - iova);
1081 mmap_read_unlock(dev->mm);
1086 static int vhost_vdpa_pa_map(struct vhost_vdpa *v,
1087 struct vhost_iotlb *iotlb,
1088 u64 iova, u64 size, u64 uaddr, u32 perm)
1090 struct vhost_dev *dev = &v->vdev;
1091 struct page **page_list;
1092 unsigned long list_size = PAGE_SIZE / sizeof(struct page *);
1093 unsigned int gup_flags = FOLL_LONGTERM;
1094 unsigned long npages, cur_base, map_pfn, last_pfn = 0;
1095 unsigned long lock_limit, sz2pin, nchunks, i;
1100 /* Limit the use of memory for bookkeeping */
1101 page_list = (struct page **) __get_free_page(GFP_KERNEL);
1105 if (perm & VHOST_ACCESS_WO)
1106 gup_flags |= FOLL_WRITE;
1108 npages = PFN_UP(size + (iova & ~PAGE_MASK));
1114 mmap_read_lock(dev->mm);
1116 lock_limit = PFN_DOWN(rlimit(RLIMIT_MEMLOCK));
1117 if (npages + atomic64_read(&dev->mm->pinned_vm) > lock_limit) {
1122 cur_base = uaddr & PAGE_MASK;
1127 sz2pin = min_t(unsigned long, npages, list_size);
1128 pinned = pin_user_pages(cur_base, sz2pin,
1129 gup_flags, page_list);
1130 if (sz2pin != pinned) {
1134 unpin_user_pages(page_list, pinned);
1142 map_pfn = page_to_pfn(page_list[0]);
1144 for (i = 0; i < pinned; i++) {
1145 unsigned long this_pfn = page_to_pfn(page_list[i]);
1148 if (last_pfn && (this_pfn != last_pfn + 1)) {
1149 /* Pin a contiguous chunk of memory */
1150 csize = PFN_PHYS(last_pfn - map_pfn + 1);
1151 ret = vhost_vdpa_map(v, iotlb, iova, csize,
1156 * Unpin the pages that are left unmapped
1157 * from this point on in the current
1158 * page_list. The remaining outstanding
1159 * ones which may stride across several
1160 * chunks will be covered in the common
1161 * error path subsequently.
1163 unpin_user_pages(&page_list[i],
1173 last_pfn = this_pfn;
1176 cur_base += PFN_PHYS(pinned);
1180 /* Pin the rest chunk */
1181 ret = vhost_vdpa_map(v, iotlb, iova, PFN_PHYS(last_pfn - map_pfn + 1),
1182 PFN_PHYS(map_pfn), perm, NULL);
1189 * Unpin the outstanding pages which are yet to be
1190 * mapped but haven't due to vdpa_map() or
1191 * pin_user_pages() failure.
1193 * Mapped pages are accounted in vdpa_map(), hence
1194 * the corresponding unpinning will be handled by
1198 for (pfn = map_pfn; pfn <= last_pfn; pfn++)
1199 unpin_user_page(pfn_to_page(pfn));
1201 vhost_vdpa_unmap(v, iotlb, start, size);
1204 mmap_read_unlock(dev->mm);
1206 free_page((unsigned long)page_list);
1211 static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
1212 struct vhost_iotlb *iotlb,
1213 struct vhost_iotlb_msg *msg)
1215 struct vdpa_device *vdpa = v->vdpa;
1217 if (msg->iova < v->range.first || !msg->size ||
1218 msg->iova > U64_MAX - msg->size + 1 ||
1219 msg->iova + msg->size - 1 > v->range.last)
1222 if (vhost_iotlb_itree_first(iotlb, msg->iova,
1223 msg->iova + msg->size - 1))
1227 return vhost_vdpa_va_map(v, iotlb, msg->iova, msg->size,
1228 msg->uaddr, msg->perm);
1230 return vhost_vdpa_pa_map(v, iotlb, msg->iova, msg->size, msg->uaddr,
1234 static int vhost_vdpa_process_iotlb_msg(struct vhost_dev *dev, u32 asid,
1235 struct vhost_iotlb_msg *msg)
1237 struct vhost_vdpa *v = container_of(dev, struct vhost_vdpa, vdev);
1238 struct vdpa_device *vdpa = v->vdpa;
1239 const struct vdpa_config_ops *ops = vdpa->config;
1240 struct vhost_iotlb *iotlb = NULL;
1241 struct vhost_vdpa_as *as = NULL;
1244 mutex_lock(&dev->mutex);
1246 r = vhost_dev_check_owner(dev);
1250 if (msg->type == VHOST_IOTLB_UPDATE ||
1251 msg->type == VHOST_IOTLB_BATCH_BEGIN) {
1252 as = vhost_vdpa_find_alloc_as(v, asid);
1254 dev_err(&v->dev, "can't find and alloc asid %d\n",
1261 iotlb = asid_to_iotlb(v, asid);
1263 if ((v->in_batch && v->batch_asid != asid) || !iotlb) {
1264 if (v->in_batch && v->batch_asid != asid) {
1265 dev_info(&v->dev, "batch id %d asid %d\n",
1266 v->batch_asid, asid);
1269 dev_err(&v->dev, "no iotlb for asid %d\n", asid);
1274 switch (msg->type) {
1275 case VHOST_IOTLB_UPDATE:
1276 r = vhost_vdpa_process_iotlb_update(v, iotlb, msg);
1278 case VHOST_IOTLB_INVALIDATE:
1279 vhost_vdpa_unmap(v, iotlb, msg->iova, msg->size);
1281 case VHOST_IOTLB_BATCH_BEGIN:
1282 v->batch_asid = asid;
1285 case VHOST_IOTLB_BATCH_END:
1286 if (v->in_batch && ops->set_map)
1287 ops->set_map(vdpa, asid, iotlb);
1288 v->in_batch = false;
1295 mutex_unlock(&dev->mutex);
1300 static ssize_t vhost_vdpa_chr_write_iter(struct kiocb *iocb,
1301 struct iov_iter *from)
1303 struct file *file = iocb->ki_filp;
1304 struct vhost_vdpa *v = file->private_data;
1305 struct vhost_dev *dev = &v->vdev;
1307 return vhost_chr_write_iter(dev, from);
1310 static int vhost_vdpa_alloc_domain(struct vhost_vdpa *v)
1312 struct vdpa_device *vdpa = v->vdpa;
1313 const struct vdpa_config_ops *ops = vdpa->config;
1314 struct device *dma_dev = vdpa_get_dma_dev(vdpa);
1317 /* Device want to do DMA by itself */
1318 if (ops->set_map || ops->dma_map)
1321 if (!device_iommu_capable(dma_dev, IOMMU_CAP_CACHE_COHERENCY)) {
1322 dev_warn_once(&v->dev,
1323 "Failed to allocate domain, device is not IOMMU cache coherent capable\n");
1327 v->domain = iommu_paging_domain_alloc(dma_dev);
1328 if (IS_ERR(v->domain)) {
1329 ret = PTR_ERR(v->domain);
1334 ret = iommu_attach_device(v->domain, dma_dev);
1341 iommu_domain_free(v->domain);
1346 static void vhost_vdpa_free_domain(struct vhost_vdpa *v)
1348 struct vdpa_device *vdpa = v->vdpa;
1349 struct device *dma_dev = vdpa_get_dma_dev(vdpa);
1352 iommu_detach_device(v->domain, dma_dev);
1353 iommu_domain_free(v->domain);
1359 static void vhost_vdpa_set_iova_range(struct vhost_vdpa *v)
1361 struct vdpa_iova_range *range = &v->range;
1362 struct vdpa_device *vdpa = v->vdpa;
1363 const struct vdpa_config_ops *ops = vdpa->config;
1365 if (ops->get_iova_range) {
1366 *range = ops->get_iova_range(vdpa);
1367 } else if (v->domain && v->domain->geometry.force_aperture) {
1368 range->first = v->domain->geometry.aperture_start;
1369 range->last = v->domain->geometry.aperture_end;
1372 range->last = ULLONG_MAX;
1376 static void vhost_vdpa_cleanup(struct vhost_vdpa *v)
1378 struct vhost_vdpa_as *as;
1381 for (asid = 0; asid < v->vdpa->nas; asid++) {
1382 as = asid_to_as(v, asid);
1384 vhost_vdpa_remove_as(v, asid);
1387 vhost_vdpa_free_domain(v);
1388 vhost_dev_cleanup(&v->vdev);
1393 static int vhost_vdpa_open(struct inode *inode, struct file *filep)
1395 struct vhost_vdpa *v;
1396 struct vhost_dev *dev;
1397 struct vhost_virtqueue **vqs;
1401 v = container_of(inode->i_cdev, struct vhost_vdpa, cdev);
1403 opened = atomic_cmpxchg(&v->opened, 0, 1);
1408 r = vhost_vdpa_reset(v);
1412 vqs = kmalloc_array(nvqs, sizeof(*vqs), GFP_KERNEL);
1419 for (i = 0; i < nvqs; i++) {
1420 vqs[i] = &v->vqs[i];
1421 vqs[i]->handle_kick = handle_vq_kick;
1423 vhost_dev_init(dev, vqs, nvqs, 0, 0, 0, false,
1424 vhost_vdpa_process_iotlb_msg);
1426 r = vhost_vdpa_alloc_domain(v);
1428 goto err_alloc_domain;
1430 vhost_vdpa_set_iova_range(v);
1432 filep->private_data = v;
1437 vhost_vdpa_cleanup(v);
1439 atomic_dec(&v->opened);
1443 static void vhost_vdpa_clean_irq(struct vhost_vdpa *v)
1447 for (i = 0; i < v->nvqs; i++)
1448 vhost_vdpa_unsetup_vq_irq(v, i);
1451 static int vhost_vdpa_release(struct inode *inode, struct file *filep)
1453 struct vhost_vdpa *v = filep->private_data;
1454 struct vhost_dev *d = &v->vdev;
1456 mutex_lock(&d->mutex);
1457 filep->private_data = NULL;
1458 vhost_vdpa_clean_irq(v);
1459 vhost_vdpa_reset(v);
1460 vhost_dev_stop(&v->vdev);
1461 vhost_vdpa_unbind_mm(v);
1462 vhost_vdpa_config_put(v);
1463 vhost_vdpa_cleanup(v);
1464 mutex_unlock(&d->mutex);
1466 atomic_dec(&v->opened);
1467 complete(&v->completion);
1473 static vm_fault_t vhost_vdpa_fault(struct vm_fault *vmf)
1475 struct vhost_vdpa *v = vmf->vma->vm_file->private_data;
1476 struct vdpa_device *vdpa = v->vdpa;
1477 const struct vdpa_config_ops *ops = vdpa->config;
1478 struct vdpa_notification_area notify;
1479 struct vm_area_struct *vma = vmf->vma;
1480 u16 index = vma->vm_pgoff;
1482 notify = ops->get_vq_notification(vdpa, index);
1484 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1485 if (remap_pfn_range(vma, vmf->address & PAGE_MASK,
1486 PFN_DOWN(notify.addr), PAGE_SIZE,
1488 return VM_FAULT_SIGBUS;
1490 return VM_FAULT_NOPAGE;
1493 static const struct vm_operations_struct vhost_vdpa_vm_ops = {
1494 .fault = vhost_vdpa_fault,
1497 static int vhost_vdpa_mmap(struct file *file, struct vm_area_struct *vma)
1499 struct vhost_vdpa *v = vma->vm_file->private_data;
1500 struct vdpa_device *vdpa = v->vdpa;
1501 const struct vdpa_config_ops *ops = vdpa->config;
1502 struct vdpa_notification_area notify;
1503 unsigned long index = vma->vm_pgoff;
1505 if (vma->vm_end - vma->vm_start != PAGE_SIZE)
1507 if ((vma->vm_flags & VM_SHARED) == 0)
1509 if (vma->vm_flags & VM_READ)
1513 if (!ops->get_vq_notification)
1516 /* To be safe and easily modelled by userspace, We only
1517 * support the doorbell which sits on the page boundary and
1518 * does not share the page with other registers.
1520 notify = ops->get_vq_notification(vdpa, index);
1521 if (notify.addr & (PAGE_SIZE - 1))
1523 if (vma->vm_end - vma->vm_start != notify.size)
1526 vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
1527 vma->vm_ops = &vhost_vdpa_vm_ops;
1530 #endif /* CONFIG_MMU */
1532 static const struct file_operations vhost_vdpa_fops = {
1533 .owner = THIS_MODULE,
1534 .open = vhost_vdpa_open,
1535 .release = vhost_vdpa_release,
1536 .write_iter = vhost_vdpa_chr_write_iter,
1537 .unlocked_ioctl = vhost_vdpa_unlocked_ioctl,
1539 .mmap = vhost_vdpa_mmap,
1540 #endif /* CONFIG_MMU */
1541 .compat_ioctl = compat_ptr_ioctl,
1544 static void vhost_vdpa_release_dev(struct device *device)
1546 struct vhost_vdpa *v =
1547 container_of(device, struct vhost_vdpa, dev);
1549 ida_free(&vhost_vdpa_ida, v->minor);
1554 static int vhost_vdpa_probe(struct vdpa_device *vdpa)
1556 const struct vdpa_config_ops *ops = vdpa->config;
1557 struct vhost_vdpa *v;
1561 /* We can't support platform IOMMU device with more than 1
1564 if (!ops->set_map && !ops->dma_map &&
1565 (vdpa->ngroups > 1 || vdpa->nas > 1))
1568 v = kzalloc(sizeof(*v), GFP_KERNEL | __GFP_RETRY_MAYFAIL);
1572 minor = ida_alloc_max(&vhost_vdpa_ida, VHOST_VDPA_DEV_MAX - 1,
1579 atomic_set(&v->opened, 0);
1582 v->nvqs = vdpa->nvqs;
1583 v->virtio_id = ops->get_device_id(vdpa);
1585 device_initialize(&v->dev);
1586 v->dev.release = vhost_vdpa_release_dev;
1587 v->dev.parent = &vdpa->dev;
1588 v->dev.devt = MKDEV(MAJOR(vhost_vdpa_major), minor);
1589 v->vqs = kmalloc_array(v->nvqs, sizeof(struct vhost_virtqueue),
1596 r = dev_set_name(&v->dev, "vhost-vdpa-%u", minor);
1600 cdev_init(&v->cdev, &vhost_vdpa_fops);
1601 v->cdev.owner = THIS_MODULE;
1603 r = cdev_device_add(&v->cdev, &v->dev);
1607 init_completion(&v->completion);
1608 vdpa_set_drvdata(vdpa, v);
1610 for (i = 0; i < VHOST_VDPA_IOTLB_BUCKETS; i++)
1611 INIT_HLIST_HEAD(&v->as[i]);
1616 put_device(&v->dev);
1620 static void vhost_vdpa_remove(struct vdpa_device *vdpa)
1622 struct vhost_vdpa *v = vdpa_get_drvdata(vdpa);
1625 cdev_device_del(&v->cdev, &v->dev);
1628 opened = atomic_cmpxchg(&v->opened, 0, 1);
1631 wait_for_completion(&v->completion);
1634 put_device(&v->dev);
1637 static struct vdpa_driver vhost_vdpa_driver = {
1639 .name = "vhost_vdpa",
1641 .probe = vhost_vdpa_probe,
1642 .remove = vhost_vdpa_remove,
1645 static int __init vhost_vdpa_init(void)
1649 r = alloc_chrdev_region(&vhost_vdpa_major, 0, VHOST_VDPA_DEV_MAX,
1652 goto err_alloc_chrdev;
1654 r = vdpa_register_driver(&vhost_vdpa_driver);
1656 goto err_vdpa_register_driver;
1660 err_vdpa_register_driver:
1661 unregister_chrdev_region(vhost_vdpa_major, VHOST_VDPA_DEV_MAX);
1665 module_init(vhost_vdpa_init);
1667 static void __exit vhost_vdpa_exit(void)
1669 vdpa_unregister_driver(&vhost_vdpa_driver);
1670 unregister_chrdev_region(vhost_vdpa_major, VHOST_VDPA_DEV_MAX);
1672 module_exit(vhost_vdpa_exit);
1674 MODULE_VERSION("0.0.1");
1675 MODULE_LICENSE("GPL v2");
1676 MODULE_AUTHOR("Intel Corporation");
1677 MODULE_DESCRIPTION("vDPA-based vhost backend for virtio");