1 // SPDX-License-Identifier: GPL-2.0-only
3 * VFIO-KVM bridge pseudo device
5 * Copyright (C) 2013 Red Hat, Inc. All rights reserved.
9 #include <linux/errno.h>
10 #include <linux/file.h>
11 #include <linux/kvm_host.h>
12 #include <linux/list.h>
13 #include <linux/module.h>
14 #include <linux/mutex.h>
15 #include <linux/slab.h>
16 #include <linux/uaccess.h>
17 #include <linux/vfio.h>
20 #ifdef CONFIG_SPAPR_TCE_IOMMU
21 #include <asm/kvm_ppc.h>
24 struct kvm_vfio_group {
25 struct list_head node;
30 struct list_head group_list;
35 static void kvm_vfio_file_set_kvm(struct file *file, struct kvm *kvm)
37 void (*fn)(struct file *file, struct kvm *kvm);
39 fn = symbol_get(vfio_file_set_kvm);
45 symbol_put(vfio_file_set_kvm);
48 static bool kvm_vfio_file_enforced_coherent(struct file *file)
50 bool (*fn)(struct file *file);
53 fn = symbol_get(vfio_file_enforced_coherent);
59 symbol_put(vfio_file_enforced_coherent);
64 static struct iommu_group *kvm_vfio_file_iommu_group(struct file *file)
66 struct iommu_group *(*fn)(struct file *file);
67 struct iommu_group *ret;
69 fn = symbol_get(vfio_file_iommu_group);
75 symbol_put(vfio_file_iommu_group);
80 #ifdef CONFIG_SPAPR_TCE_IOMMU
81 static void kvm_spapr_tce_release_vfio_group(struct kvm *kvm,
82 struct kvm_vfio_group *kvg)
84 struct iommu_group *grp = kvm_vfio_file_iommu_group(kvg->file);
86 if (WARN_ON_ONCE(!grp))
89 kvm_spapr_tce_release_iommu_group(kvm, grp);
94 * Groups can use the same or different IOMMU domains. If the same then
95 * adding a new group may change the coherency of groups we've previously
96 * been told about. We don't want to care about any of that so we retest
97 * each group and bail as soon as we find one that's noncoherent. This
98 * means we only ever [un]register_noncoherent_dma once for the whole device.
100 static void kvm_vfio_update_coherency(struct kvm_device *dev)
102 struct kvm_vfio *kv = dev->private;
103 bool noncoherent = false;
104 struct kvm_vfio_group *kvg;
106 mutex_lock(&kv->lock);
108 list_for_each_entry(kvg, &kv->group_list, node) {
109 if (!kvm_vfio_file_enforced_coherent(kvg->file)) {
115 if (noncoherent != kv->noncoherent) {
116 kv->noncoherent = noncoherent;
119 kvm_arch_register_noncoherent_dma(dev->kvm);
121 kvm_arch_unregister_noncoherent_dma(dev->kvm);
124 mutex_unlock(&kv->lock);
127 static int kvm_vfio_group_add(struct kvm_device *dev, unsigned int fd)
129 struct kvm_vfio *kv = dev->private;
130 struct kvm_vfio_group *kvg;
138 /* Ensure the FD is a vfio group FD.*/
139 if (!kvm_vfio_file_iommu_group(filp)) {
144 mutex_lock(&kv->lock);
146 list_for_each_entry(kvg, &kv->group_list, node) {
147 if (kvg->file == filp) {
153 kvg = kzalloc(sizeof(*kvg), GFP_KERNEL_ACCOUNT);
160 list_add_tail(&kvg->node, &kv->group_list);
162 kvm_arch_start_assignment(dev->kvm);
164 mutex_unlock(&kv->lock);
166 kvm_vfio_file_set_kvm(kvg->file, dev->kvm);
167 kvm_vfio_update_coherency(dev);
171 mutex_unlock(&kv->lock);
177 static int kvm_vfio_group_del(struct kvm_device *dev, unsigned int fd)
179 struct kvm_vfio *kv = dev->private;
180 struct kvm_vfio_group *kvg;
190 mutex_lock(&kv->lock);
192 list_for_each_entry(kvg, &kv->group_list, node) {
193 if (kvg->file != f.file)
196 list_del(&kvg->node);
197 kvm_arch_end_assignment(dev->kvm);
198 #ifdef CONFIG_SPAPR_TCE_IOMMU
199 kvm_spapr_tce_release_vfio_group(dev->kvm, kvg);
201 kvm_vfio_file_set_kvm(kvg->file, NULL);
208 mutex_unlock(&kv->lock);
212 kvm_vfio_update_coherency(dev);
217 #ifdef CONFIG_SPAPR_TCE_IOMMU
218 static int kvm_vfio_group_set_spapr_tce(struct kvm_device *dev,
221 struct kvm_vfio_spapr_tce param;
222 struct kvm_vfio *kv = dev->private;
223 struct kvm_vfio_group *kvg;
227 if (copy_from_user(¶m, arg, sizeof(struct kvm_vfio_spapr_tce)))
230 f = fdget(param.groupfd);
236 mutex_lock(&kv->lock);
238 list_for_each_entry(kvg, &kv->group_list, node) {
239 struct iommu_group *grp;
241 if (kvg->file != f.file)
244 grp = kvm_vfio_file_iommu_group(kvg->file);
245 if (WARN_ON_ONCE(!grp)) {
250 ret = kvm_spapr_tce_attach_iommu_group(dev->kvm, param.tablefd,
256 mutex_unlock(&kv->lock);
262 static int kvm_vfio_set_group(struct kvm_device *dev, long attr,
265 int32_t __user *argp = arg;
269 case KVM_DEV_VFIO_GROUP_ADD:
270 if (get_user(fd, argp))
272 return kvm_vfio_group_add(dev, fd);
274 case KVM_DEV_VFIO_GROUP_DEL:
275 if (get_user(fd, argp))
277 return kvm_vfio_group_del(dev, fd);
279 #ifdef CONFIG_SPAPR_TCE_IOMMU
280 case KVM_DEV_VFIO_GROUP_SET_SPAPR_TCE:
281 return kvm_vfio_group_set_spapr_tce(dev, arg);
288 static int kvm_vfio_set_attr(struct kvm_device *dev,
289 struct kvm_device_attr *attr)
291 switch (attr->group) {
292 case KVM_DEV_VFIO_GROUP:
293 return kvm_vfio_set_group(dev, attr->attr,
294 u64_to_user_ptr(attr->addr));
300 static int kvm_vfio_has_attr(struct kvm_device *dev,
301 struct kvm_device_attr *attr)
303 switch (attr->group) {
304 case KVM_DEV_VFIO_GROUP:
305 switch (attr->attr) {
306 case KVM_DEV_VFIO_GROUP_ADD:
307 case KVM_DEV_VFIO_GROUP_DEL:
308 #ifdef CONFIG_SPAPR_TCE_IOMMU
309 case KVM_DEV_VFIO_GROUP_SET_SPAPR_TCE:
320 static void kvm_vfio_destroy(struct kvm_device *dev)
322 struct kvm_vfio *kv = dev->private;
323 struct kvm_vfio_group *kvg, *tmp;
325 list_for_each_entry_safe(kvg, tmp, &kv->group_list, node) {
326 #ifdef CONFIG_SPAPR_TCE_IOMMU
327 kvm_spapr_tce_release_vfio_group(dev->kvm, kvg);
329 kvm_vfio_file_set_kvm(kvg->file, NULL);
331 list_del(&kvg->node);
333 kvm_arch_end_assignment(dev->kvm);
336 kvm_vfio_update_coherency(dev);
339 kfree(dev); /* alloc by kvm_ioctl_create_device, free by .destroy */
342 static int kvm_vfio_create(struct kvm_device *dev, u32 type);
344 static struct kvm_device_ops kvm_vfio_ops = {
346 .create = kvm_vfio_create,
347 .destroy = kvm_vfio_destroy,
348 .set_attr = kvm_vfio_set_attr,
349 .has_attr = kvm_vfio_has_attr,
352 static int kvm_vfio_create(struct kvm_device *dev, u32 type)
354 struct kvm_device *tmp;
357 /* Only one VFIO "device" per VM */
358 list_for_each_entry(tmp, &dev->kvm->devices, vm_node)
359 if (tmp->ops == &kvm_vfio_ops)
362 kv = kzalloc(sizeof(*kv), GFP_KERNEL_ACCOUNT);
366 INIT_LIST_HEAD(&kv->group_list);
367 mutex_init(&kv->lock);
374 int kvm_vfio_ops_init(void)
376 return kvm_register_device_ops(&kvm_vfio_ops, KVM_DEV_TYPE_VFIO);
379 void kvm_vfio_ops_exit(void)
381 kvm_unregister_device_ops(KVM_DEV_TYPE_VFIO);