1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES.
4 * Kernel side components to support tools/testing/selftests/iommu
6 #include <linux/anon_inodes.h>
7 #include <linux/debugfs.h>
8 #include <linux/fault-inject.h>
9 #include <linux/file.h>
10 #include <linux/iommu.h>
11 #include <linux/platform_device.h>
12 #include <linux/slab.h>
13 #include <linux/xarray.h>
14 #include <uapi/linux/iommufd.h>
16 #include "../iommu-priv.h"
17 #include "io_pagetable.h"
18 #include "iommufd_private.h"
19 #include "iommufd_test.h"
21 static DECLARE_FAULT_ATTR(fail_iommufd);
22 static struct dentry *dbgfs_root;
23 static struct platform_device *selftest_iommu_dev;
24 static const struct iommu_ops mock_ops;
25 static struct iommu_domain_ops domain_nested_ops;
27 size_t iommufd_test_memory_limit = 65536;
29 struct mock_bus_type {
31 struct notifier_block nb;
34 static struct mock_bus_type iommufd_mock_bus_type = {
36 .name = "iommufd_mock",
40 static DEFINE_IDA(mock_dev_ida);
44 MOCK_IO_PAGE_SIZE = PAGE_SIZE / 2,
45 MOCK_HUGE_PAGE_SIZE = 512 * MOCK_IO_PAGE_SIZE,
48 * Like a real page table alignment requires the low bits of the address
49 * to be zero. xarray also requires the high bit to be zero, so we store
50 * the pfns shifted. The upper bits are used for metadata.
52 MOCK_PFN_MASK = ULONG_MAX / MOCK_IO_PAGE_SIZE,
54 _MOCK_PFN_START = MOCK_PFN_MASK + 1,
55 MOCK_PFN_START_IOVA = _MOCK_PFN_START,
56 MOCK_PFN_LAST_IOVA = _MOCK_PFN_START,
57 MOCK_PFN_DIRTY_IOVA = _MOCK_PFN_START << 1,
58 MOCK_PFN_HUGE_IOVA = _MOCK_PFN_START << 2,
62 * Syzkaller has trouble randomizing the correct iova to use since it is linked
63 * to the map ioctl's output, and it has no ide about that. So, simplify things.
64 * In syzkaller mode the 64 bit IOVA is converted into an nth area and offset
65 * value. This has a much smaller randomization space and syzkaller can hit it.
67 static unsigned long __iommufd_test_syz_conv_iova(struct io_pagetable *iopt,
74 struct syz_layout *syz = (void *)iova;
75 unsigned int nth = syz->nth_area;
76 struct iopt_area *area;
78 down_read(&iopt->iova_rwsem);
79 for (area = iopt_area_iter_first(iopt, 0, ULONG_MAX); area;
80 area = iopt_area_iter_next(area, 0, ULONG_MAX)) {
82 up_read(&iopt->iova_rwsem);
83 return iopt_area_iova(area) + syz->offset;
87 up_read(&iopt->iova_rwsem);
92 static unsigned long iommufd_test_syz_conv_iova(struct iommufd_access *access,
97 mutex_lock(&access->ioas_lock);
99 mutex_unlock(&access->ioas_lock);
102 ret = __iommufd_test_syz_conv_iova(&access->ioas->iopt, iova);
103 mutex_unlock(&access->ioas_lock);
107 void iommufd_test_syz_conv_iova_id(struct iommufd_ucmd *ucmd,
108 unsigned int ioas_id, u64 *iova, u32 *flags)
110 struct iommufd_ioas *ioas;
112 if (!(*flags & MOCK_FLAGS_ACCESS_SYZ))
114 *flags &= ~(u32)MOCK_FLAGS_ACCESS_SYZ;
116 ioas = iommufd_get_ioas(ucmd->ictx, ioas_id);
119 *iova = __iommufd_test_syz_conv_iova(&ioas->iopt, iova);
120 iommufd_put_object(ucmd->ictx, &ioas->obj);
123 struct mock_iommu_domain {
125 struct iommu_domain domain;
129 static inline struct mock_iommu_domain *
130 to_mock_domain(struct iommu_domain *domain)
132 return container_of(domain, struct mock_iommu_domain, domain);
135 struct mock_iommu_domain_nested {
136 struct iommu_domain domain;
137 struct mock_viommu *mock_viommu;
138 struct mock_iommu_domain *parent;
139 u32 iotlb[MOCK_NESTED_DOMAIN_IOTLB_NUM];
142 static inline struct mock_iommu_domain_nested *
143 to_mock_nested(struct iommu_domain *domain)
145 return container_of(domain, struct mock_iommu_domain_nested, domain);
149 struct iommufd_viommu core;
150 struct mock_iommu_domain *s2_parent;
153 static inline struct mock_viommu *to_mock_viommu(struct iommufd_viommu *viommu)
155 return container_of(viommu, struct mock_viommu, core);
158 enum selftest_obj_type {
166 u32 cache[MOCK_DEV_CACHE_NUM];
169 static inline struct mock_dev *to_mock_dev(struct device *dev)
171 return container_of(dev, struct mock_dev, dev);
174 struct selftest_obj {
175 struct iommufd_object obj;
176 enum selftest_obj_type type;
180 struct iommufd_device *idev;
181 struct iommufd_ctx *ictx;
182 struct mock_dev *mock_dev;
187 static inline struct selftest_obj *to_selftest_obj(struct iommufd_object *obj)
189 return container_of(obj, struct selftest_obj, obj);
192 static int mock_domain_nop_attach(struct iommu_domain *domain,
195 struct mock_dev *mdev = to_mock_dev(dev);
197 if (domain->dirty_ops && (mdev->flags & MOCK_FLAGS_DEVICE_NO_DIRTY))
203 static const struct iommu_domain_ops mock_blocking_ops = {
204 .attach_dev = mock_domain_nop_attach,
207 static struct iommu_domain mock_blocking_domain = {
208 .type = IOMMU_DOMAIN_BLOCKED,
209 .ops = &mock_blocking_ops,
212 static void *mock_domain_hw_info(struct device *dev, u32 *length, u32 *type)
214 struct iommu_test_hw_info *info;
216 info = kzalloc(sizeof(*info), GFP_KERNEL);
218 return ERR_PTR(-ENOMEM);
220 info->test_reg = IOMMU_HW_INFO_SELFTEST_REGVAL;
221 *length = sizeof(*info);
222 *type = IOMMU_HW_INFO_TYPE_SELFTEST;
227 static int mock_domain_set_dirty_tracking(struct iommu_domain *domain,
230 struct mock_iommu_domain *mock = to_mock_domain(domain);
231 unsigned long flags = mock->flags;
233 if (enable && !domain->dirty_ops)
237 if (!(enable ^ !!(flags & MOCK_DIRTY_TRACK)))
240 flags = (enable ? flags | MOCK_DIRTY_TRACK : flags & ~MOCK_DIRTY_TRACK);
246 static bool mock_test_and_clear_dirty(struct mock_iommu_domain *mock,
247 unsigned long iova, size_t page_size,
250 unsigned long cur, end = iova + page_size - 1;
254 for (cur = iova; cur < end; cur += MOCK_IO_PAGE_SIZE) {
255 ent = xa_load(&mock->pfns, cur / MOCK_IO_PAGE_SIZE);
256 if (!ent || !(xa_to_value(ent) & MOCK_PFN_DIRTY_IOVA))
261 if (!(flags & IOMMU_DIRTY_NO_CLEAR)) {
264 val = xa_to_value(ent) & ~MOCK_PFN_DIRTY_IOVA;
265 old = xa_store(&mock->pfns, cur / MOCK_IO_PAGE_SIZE,
266 xa_mk_value(val), GFP_KERNEL);
267 WARN_ON_ONCE(ent != old);
274 static int mock_domain_read_and_clear_dirty(struct iommu_domain *domain,
275 unsigned long iova, size_t size,
277 struct iommu_dirty_bitmap *dirty)
279 struct mock_iommu_domain *mock = to_mock_domain(domain);
280 unsigned long end = iova + size;
283 if (!(mock->flags & MOCK_DIRTY_TRACK) && dirty->bitmap)
287 unsigned long pgsize = MOCK_IO_PAGE_SIZE;
290 ent = xa_load(&mock->pfns, iova / MOCK_IO_PAGE_SIZE);
296 if (xa_to_value(ent) & MOCK_PFN_HUGE_IOVA)
297 pgsize = MOCK_HUGE_PAGE_SIZE;
298 head = iova & ~(pgsize - 1);
301 if (mock_test_and_clear_dirty(mock, head, pgsize, flags))
302 iommu_dirty_bitmap_record(dirty, iova, pgsize);
304 } while (iova < end);
309 static const struct iommu_dirty_ops dirty_ops = {
310 .set_dirty_tracking = mock_domain_set_dirty_tracking,
311 .read_and_clear_dirty = mock_domain_read_and_clear_dirty,
314 static struct mock_iommu_domain_nested *
315 __mock_domain_alloc_nested(const struct iommu_user_data *user_data)
317 struct mock_iommu_domain_nested *mock_nested;
318 struct iommu_hwpt_selftest user_cfg;
321 if (user_data->type != IOMMU_HWPT_DATA_SELFTEST)
322 return ERR_PTR(-EOPNOTSUPP);
324 rc = iommu_copy_struct_from_user(&user_cfg, user_data,
325 IOMMU_HWPT_DATA_SELFTEST, iotlb);
329 mock_nested = kzalloc(sizeof(*mock_nested), GFP_KERNEL);
331 return ERR_PTR(-ENOMEM);
332 mock_nested->domain.ops = &domain_nested_ops;
333 mock_nested->domain.type = IOMMU_DOMAIN_NESTED;
334 for (i = 0; i < MOCK_NESTED_DOMAIN_IOTLB_NUM; i++)
335 mock_nested->iotlb[i] = user_cfg.iotlb;
339 static struct iommu_domain *
340 mock_domain_alloc_nested(struct device *dev, struct iommu_domain *parent,
341 u32 flags, const struct iommu_user_data *user_data)
343 struct mock_iommu_domain_nested *mock_nested;
344 struct mock_iommu_domain *mock_parent;
347 return ERR_PTR(-EOPNOTSUPP);
348 if (!parent || parent->ops != mock_ops.default_domain_ops)
349 return ERR_PTR(-EINVAL);
351 mock_parent = to_mock_domain(parent);
353 return ERR_PTR(-EINVAL);
355 mock_nested = __mock_domain_alloc_nested(user_data);
356 if (IS_ERR(mock_nested))
357 return ERR_CAST(mock_nested);
358 mock_nested->parent = mock_parent;
359 return &mock_nested->domain;
362 static struct iommu_domain *
363 mock_domain_alloc_paging_flags(struct device *dev, u32 flags,
364 const struct iommu_user_data *user_data)
366 bool has_dirty_flag = flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING;
367 const u32 PAGING_FLAGS = IOMMU_HWPT_ALLOC_DIRTY_TRACKING |
368 IOMMU_HWPT_ALLOC_NEST_PARENT;
369 struct mock_dev *mdev = to_mock_dev(dev);
370 bool no_dirty_ops = mdev->flags & MOCK_FLAGS_DEVICE_NO_DIRTY;
371 struct mock_iommu_domain *mock;
374 return ERR_PTR(-EOPNOTSUPP);
375 if ((flags & ~PAGING_FLAGS) || (has_dirty_flag && no_dirty_ops))
376 return ERR_PTR(-EOPNOTSUPP);
378 mock = kzalloc(sizeof(*mock), GFP_KERNEL);
380 return ERR_PTR(-ENOMEM);
381 mock->domain.geometry.aperture_start = MOCK_APERTURE_START;
382 mock->domain.geometry.aperture_end = MOCK_APERTURE_LAST;
383 mock->domain.pgsize_bitmap = MOCK_IO_PAGE_SIZE;
384 if (dev && mdev->flags & MOCK_FLAGS_DEVICE_HUGE_IOVA)
385 mock->domain.pgsize_bitmap |= MOCK_HUGE_PAGE_SIZE;
386 mock->domain.ops = mock_ops.default_domain_ops;
387 mock->domain.type = IOMMU_DOMAIN_UNMANAGED;
388 xa_init(&mock->pfns);
391 mock->domain.dirty_ops = &dirty_ops;
392 return &mock->domain;
395 static void mock_domain_free(struct iommu_domain *domain)
397 struct mock_iommu_domain *mock = to_mock_domain(domain);
399 WARN_ON(!xa_empty(&mock->pfns));
403 static int mock_domain_map_pages(struct iommu_domain *domain,
404 unsigned long iova, phys_addr_t paddr,
405 size_t pgsize, size_t pgcount, int prot,
406 gfp_t gfp, size_t *mapped)
408 struct mock_iommu_domain *mock = to_mock_domain(domain);
409 unsigned long flags = MOCK_PFN_START_IOVA;
410 unsigned long start_iova = iova;
413 * xarray does not reliably work with fault injection because it does a
414 * retry allocation, so put our own failure point.
416 if (iommufd_should_fail())
419 WARN_ON(iova % MOCK_IO_PAGE_SIZE);
420 WARN_ON(pgsize % MOCK_IO_PAGE_SIZE);
421 for (; pgcount; pgcount--) {
424 for (cur = 0; cur != pgsize; cur += MOCK_IO_PAGE_SIZE) {
427 if (pgcount == 1 && cur + MOCK_IO_PAGE_SIZE == pgsize)
428 flags = MOCK_PFN_LAST_IOVA;
429 if (pgsize != MOCK_IO_PAGE_SIZE) {
430 flags |= MOCK_PFN_HUGE_IOVA;
432 old = xa_store(&mock->pfns, iova / MOCK_IO_PAGE_SIZE,
433 xa_mk_value((paddr / MOCK_IO_PAGE_SIZE) |
436 if (xa_is_err(old)) {
437 for (; start_iova != iova;
438 start_iova += MOCK_IO_PAGE_SIZE)
439 xa_erase(&mock->pfns,
445 iova += MOCK_IO_PAGE_SIZE;
446 paddr += MOCK_IO_PAGE_SIZE;
447 *mapped += MOCK_IO_PAGE_SIZE;
454 static size_t mock_domain_unmap_pages(struct iommu_domain *domain,
455 unsigned long iova, size_t pgsize,
457 struct iommu_iotlb_gather *iotlb_gather)
459 struct mock_iommu_domain *mock = to_mock_domain(domain);
464 WARN_ON(iova % MOCK_IO_PAGE_SIZE);
465 WARN_ON(pgsize % MOCK_IO_PAGE_SIZE);
467 for (; pgcount; pgcount--) {
470 for (cur = 0; cur != pgsize; cur += MOCK_IO_PAGE_SIZE) {
471 ent = xa_erase(&mock->pfns, iova / MOCK_IO_PAGE_SIZE);
474 * iommufd generates unmaps that must be a strict
475 * superset of the map's performend So every
476 * starting/ending IOVA should have been an iova passed
479 * This simple logic doesn't work when the HUGE_PAGE is
480 * turned on since the core code will automatically
481 * switch between the two page sizes creating a break in
482 * the unmap calls. The break can land in the middle of
485 if (!(domain->pgsize_bitmap & MOCK_HUGE_PAGE_SIZE)) {
487 WARN_ON(ent && !(xa_to_value(ent) &
488 MOCK_PFN_START_IOVA));
492 cur + MOCK_IO_PAGE_SIZE == pgsize)
493 WARN_ON(ent && !(xa_to_value(ent) &
494 MOCK_PFN_LAST_IOVA));
497 iova += MOCK_IO_PAGE_SIZE;
498 ret += MOCK_IO_PAGE_SIZE;
504 static phys_addr_t mock_domain_iova_to_phys(struct iommu_domain *domain,
507 struct mock_iommu_domain *mock = to_mock_domain(domain);
510 WARN_ON(iova % MOCK_IO_PAGE_SIZE);
511 ent = xa_load(&mock->pfns, iova / MOCK_IO_PAGE_SIZE);
513 return (xa_to_value(ent) & MOCK_PFN_MASK) * MOCK_IO_PAGE_SIZE;
516 static bool mock_domain_capable(struct device *dev, enum iommu_cap cap)
518 struct mock_dev *mdev = to_mock_dev(dev);
521 case IOMMU_CAP_CACHE_COHERENCY:
523 case IOMMU_CAP_DIRTY_TRACKING:
524 return !(mdev->flags & MOCK_FLAGS_DEVICE_NO_DIRTY);
532 static struct iopf_queue *mock_iommu_iopf_queue;
534 static struct mock_iommu_device {
535 struct iommu_device iommu_dev;
536 struct completion complete;
540 static struct iommu_device *mock_probe_device(struct device *dev)
542 if (dev->bus != &iommufd_mock_bus_type.bus)
543 return ERR_PTR(-ENODEV);
544 return &mock_iommu.iommu_dev;
547 static void mock_domain_page_response(struct device *dev, struct iopf_fault *evt,
548 struct iommu_page_response *msg)
552 static int mock_dev_enable_feat(struct device *dev, enum iommu_dev_features feat)
554 if (feat != IOMMU_DEV_FEAT_IOPF || !mock_iommu_iopf_queue)
557 return iopf_queue_add_device(mock_iommu_iopf_queue, dev);
560 static int mock_dev_disable_feat(struct device *dev, enum iommu_dev_features feat)
562 if (feat != IOMMU_DEV_FEAT_IOPF || !mock_iommu_iopf_queue)
565 iopf_queue_remove_device(mock_iommu_iopf_queue, dev);
570 static void mock_viommu_destroy(struct iommufd_viommu *viommu)
572 struct mock_iommu_device *mock_iommu = container_of(
573 viommu->iommu_dev, struct mock_iommu_device, iommu_dev);
575 if (refcount_dec_and_test(&mock_iommu->users))
576 complete(&mock_iommu->complete);
578 /* iommufd core frees mock_viommu and viommu */
581 static struct iommu_domain *
582 mock_viommu_alloc_domain_nested(struct iommufd_viommu *viommu, u32 flags,
583 const struct iommu_user_data *user_data)
585 struct mock_viommu *mock_viommu = to_mock_viommu(viommu);
586 struct mock_iommu_domain_nested *mock_nested;
589 return ERR_PTR(-EOPNOTSUPP);
591 mock_nested = __mock_domain_alloc_nested(user_data);
592 if (IS_ERR(mock_nested))
593 return ERR_CAST(mock_nested);
594 mock_nested->mock_viommu = mock_viommu;
595 mock_nested->parent = mock_viommu->s2_parent;
596 return &mock_nested->domain;
599 static int mock_viommu_cache_invalidate(struct iommufd_viommu *viommu,
600 struct iommu_user_data_array *array)
602 struct iommu_viommu_invalidate_selftest *cmds;
603 struct iommu_viommu_invalidate_selftest *cur;
604 struct iommu_viommu_invalidate_selftest *end;
607 /* A zero-length array is allowed to validate the array type */
608 if (array->entry_num == 0 &&
609 array->type == IOMMU_VIOMMU_INVALIDATE_DATA_SELFTEST) {
610 array->entry_num = 0;
614 cmds = kcalloc(array->entry_num, sizeof(*cmds), GFP_KERNEL);
618 end = cmds + array->entry_num;
620 static_assert(sizeof(*cmds) == 3 * sizeof(u32));
621 rc = iommu_copy_struct_from_full_user_array(
622 cmds, sizeof(*cmds), array,
623 IOMMU_VIOMMU_INVALIDATE_DATA_SELFTEST);
628 struct mock_dev *mdev;
632 if (cur->flags & ~IOMMU_TEST_INVALIDATE_FLAG_ALL) {
637 if (cur->cache_id > MOCK_DEV_CACHE_ID_MAX) {
642 xa_lock(&viommu->vdevs);
643 dev = iommufd_viommu_find_dev(viommu,
644 (unsigned long)cur->vdev_id);
646 xa_unlock(&viommu->vdevs);
650 mdev = container_of(dev, struct mock_dev, dev);
652 if (cur->flags & IOMMU_TEST_INVALIDATE_FLAG_ALL) {
653 /* Invalidate all cache entries and ignore cache_id */
654 for (i = 0; i < MOCK_DEV_CACHE_NUM; i++)
657 mdev->cache[cur->cache_id] = 0;
659 xa_unlock(&viommu->vdevs);
664 array->entry_num = cur - cmds;
669 static struct iommufd_viommu_ops mock_viommu_ops = {
670 .destroy = mock_viommu_destroy,
671 .alloc_domain_nested = mock_viommu_alloc_domain_nested,
672 .cache_invalidate = mock_viommu_cache_invalidate,
675 static struct iommufd_viommu *mock_viommu_alloc(struct device *dev,
676 struct iommu_domain *domain,
677 struct iommufd_ctx *ictx,
678 unsigned int viommu_type)
680 struct mock_iommu_device *mock_iommu =
681 iommu_get_iommu_dev(dev, struct mock_iommu_device, iommu_dev);
682 struct mock_viommu *mock_viommu;
684 if (viommu_type != IOMMU_VIOMMU_TYPE_SELFTEST)
685 return ERR_PTR(-EOPNOTSUPP);
687 mock_viommu = iommufd_viommu_alloc(ictx, struct mock_viommu, core,
689 if (IS_ERR(mock_viommu))
690 return ERR_CAST(mock_viommu);
692 refcount_inc(&mock_iommu->users);
693 return &mock_viommu->core;
696 static const struct iommu_ops mock_ops = {
698 * IOMMU_DOMAIN_BLOCKED cannot be returned from def_domain_type()
699 * because it is zero.
701 .default_domain = &mock_blocking_domain,
702 .blocked_domain = &mock_blocking_domain,
703 .owner = THIS_MODULE,
704 .pgsize_bitmap = MOCK_IO_PAGE_SIZE,
705 .hw_info = mock_domain_hw_info,
706 .domain_alloc_paging_flags = mock_domain_alloc_paging_flags,
707 .domain_alloc_nested = mock_domain_alloc_nested,
708 .capable = mock_domain_capable,
709 .device_group = generic_device_group,
710 .probe_device = mock_probe_device,
711 .page_response = mock_domain_page_response,
712 .dev_enable_feat = mock_dev_enable_feat,
713 .dev_disable_feat = mock_dev_disable_feat,
714 .user_pasid_table = true,
715 .viommu_alloc = mock_viommu_alloc,
716 .default_domain_ops =
717 &(struct iommu_domain_ops){
718 .free = mock_domain_free,
719 .attach_dev = mock_domain_nop_attach,
720 .map_pages = mock_domain_map_pages,
721 .unmap_pages = mock_domain_unmap_pages,
722 .iova_to_phys = mock_domain_iova_to_phys,
726 static void mock_domain_free_nested(struct iommu_domain *domain)
728 kfree(to_mock_nested(domain));
732 mock_domain_cache_invalidate_user(struct iommu_domain *domain,
733 struct iommu_user_data_array *array)
735 struct mock_iommu_domain_nested *mock_nested = to_mock_nested(domain);
736 struct iommu_hwpt_invalidate_selftest inv;
741 if (array->type != IOMMU_HWPT_INVALIDATE_DATA_SELFTEST) {
746 for ( ; i < array->entry_num; i++) {
747 rc = iommu_copy_struct_from_user_array(&inv, array,
748 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
753 if (inv.flags & ~IOMMU_TEST_INVALIDATE_FLAG_ALL) {
758 if (inv.iotlb_id > MOCK_NESTED_DOMAIN_IOTLB_ID_MAX) {
763 if (inv.flags & IOMMU_TEST_INVALIDATE_FLAG_ALL) {
764 /* Invalidate all mock iotlb entries and ignore iotlb_id */
765 for (j = 0; j < MOCK_NESTED_DOMAIN_IOTLB_NUM; j++)
766 mock_nested->iotlb[j] = 0;
768 mock_nested->iotlb[inv.iotlb_id] = 0;
775 array->entry_num = processed;
779 static struct iommu_domain_ops domain_nested_ops = {
780 .free = mock_domain_free_nested,
781 .attach_dev = mock_domain_nop_attach,
782 .cache_invalidate_user = mock_domain_cache_invalidate_user,
785 static inline struct iommufd_hw_pagetable *
786 __get_md_pagetable(struct iommufd_ucmd *ucmd, u32 mockpt_id, u32 hwpt_type)
788 struct iommufd_object *obj;
790 obj = iommufd_get_object(ucmd->ictx, mockpt_id, hwpt_type);
792 return ERR_CAST(obj);
793 return container_of(obj, struct iommufd_hw_pagetable, obj);
796 static inline struct iommufd_hw_pagetable *
797 get_md_pagetable(struct iommufd_ucmd *ucmd, u32 mockpt_id,
798 struct mock_iommu_domain **mock)
800 struct iommufd_hw_pagetable *hwpt;
802 hwpt = __get_md_pagetable(ucmd, mockpt_id, IOMMUFD_OBJ_HWPT_PAGING);
805 if (hwpt->domain->type != IOMMU_DOMAIN_UNMANAGED ||
806 hwpt->domain->ops != mock_ops.default_domain_ops) {
807 iommufd_put_object(ucmd->ictx, &hwpt->obj);
808 return ERR_PTR(-EINVAL);
810 *mock = to_mock_domain(hwpt->domain);
814 static inline struct iommufd_hw_pagetable *
815 get_md_pagetable_nested(struct iommufd_ucmd *ucmd, u32 mockpt_id,
816 struct mock_iommu_domain_nested **mock_nested)
818 struct iommufd_hw_pagetable *hwpt;
820 hwpt = __get_md_pagetable(ucmd, mockpt_id, IOMMUFD_OBJ_HWPT_NESTED);
823 if (hwpt->domain->type != IOMMU_DOMAIN_NESTED ||
824 hwpt->domain->ops != &domain_nested_ops) {
825 iommufd_put_object(ucmd->ictx, &hwpt->obj);
826 return ERR_PTR(-EINVAL);
828 *mock_nested = to_mock_nested(hwpt->domain);
832 static void mock_dev_release(struct device *dev)
834 struct mock_dev *mdev = to_mock_dev(dev);
836 ida_free(&mock_dev_ida, mdev->id);
840 static struct mock_dev *mock_dev_create(unsigned long dev_flags)
842 struct mock_dev *mdev;
846 ~(MOCK_FLAGS_DEVICE_NO_DIRTY | MOCK_FLAGS_DEVICE_HUGE_IOVA))
847 return ERR_PTR(-EINVAL);
849 mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
851 return ERR_PTR(-ENOMEM);
853 device_initialize(&mdev->dev);
854 mdev->flags = dev_flags;
855 mdev->dev.release = mock_dev_release;
856 mdev->dev.bus = &iommufd_mock_bus_type.bus;
857 for (i = 0; i < MOCK_DEV_CACHE_NUM; i++)
858 mdev->cache[i] = IOMMU_TEST_DEV_CACHE_DEFAULT;
860 rc = ida_alloc(&mock_dev_ida, GFP_KERNEL);
865 rc = dev_set_name(&mdev->dev, "iommufd_mock%u", mdev->id);
869 rc = device_add(&mdev->dev);
875 put_device(&mdev->dev);
879 static void mock_dev_destroy(struct mock_dev *mdev)
881 device_unregister(&mdev->dev);
884 bool iommufd_selftest_is_mock_dev(struct device *dev)
886 return dev->release == mock_dev_release;
889 /* Create an hw_pagetable with the mock domain so we can test the domain ops */
890 static int iommufd_test_mock_domain(struct iommufd_ucmd *ucmd,
891 struct iommu_test_cmd *cmd)
893 struct iommufd_device *idev;
894 struct selftest_obj *sobj;
900 sobj = iommufd_object_alloc(ucmd->ictx, sobj, IOMMUFD_OBJ_SELFTEST);
902 return PTR_ERR(sobj);
904 sobj->idev.ictx = ucmd->ictx;
905 sobj->type = TYPE_IDEV;
907 if (cmd->op == IOMMU_TEST_OP_MOCK_DOMAIN_FLAGS)
908 dev_flags = cmd->mock_domain_flags.dev_flags;
910 sobj->idev.mock_dev = mock_dev_create(dev_flags);
911 if (IS_ERR(sobj->idev.mock_dev)) {
912 rc = PTR_ERR(sobj->idev.mock_dev);
916 idev = iommufd_device_bind(ucmd->ictx, &sobj->idev.mock_dev->dev,
922 sobj->idev.idev = idev;
924 rc = iommufd_device_attach(idev, &pt_id);
928 /* Userspace must destroy the device_id to destroy the object */
929 cmd->mock_domain.out_hwpt_id = pt_id;
930 cmd->mock_domain.out_stdev_id = sobj->obj.id;
931 cmd->mock_domain.out_idev_id = idev_id;
932 rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd));
935 iommufd_object_finalize(ucmd->ictx, &sobj->obj);
939 iommufd_device_detach(idev);
941 iommufd_device_unbind(idev);
943 mock_dev_destroy(sobj->idev.mock_dev);
945 iommufd_object_abort(ucmd->ictx, &sobj->obj);
949 /* Replace the mock domain with a manually allocated hw_pagetable */
950 static int iommufd_test_mock_domain_replace(struct iommufd_ucmd *ucmd,
951 unsigned int device_id, u32 pt_id,
952 struct iommu_test_cmd *cmd)
954 struct iommufd_object *dev_obj;
955 struct selftest_obj *sobj;
959 * Prefer to use the OBJ_SELFTEST because the destroy_rwsem will ensure
960 * it doesn't race with detach, which is not allowed.
963 iommufd_get_object(ucmd->ictx, device_id, IOMMUFD_OBJ_SELFTEST);
965 return PTR_ERR(dev_obj);
967 sobj = to_selftest_obj(dev_obj);
968 if (sobj->type != TYPE_IDEV) {
973 rc = iommufd_device_replace(sobj->idev.idev, &pt_id);
977 cmd->mock_domain_replace.pt_id = pt_id;
978 rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd));
981 iommufd_put_object(ucmd->ictx, dev_obj);
985 /* Add an additional reserved IOVA to the IOAS */
986 static int iommufd_test_add_reserved(struct iommufd_ucmd *ucmd,
987 unsigned int mockpt_id,
988 unsigned long start, size_t length)
990 struct iommufd_ioas *ioas;
993 ioas = iommufd_get_ioas(ucmd->ictx, mockpt_id);
995 return PTR_ERR(ioas);
996 down_write(&ioas->iopt.iova_rwsem);
997 rc = iopt_reserve_iova(&ioas->iopt, start, start + length - 1, NULL);
998 up_write(&ioas->iopt.iova_rwsem);
999 iommufd_put_object(ucmd->ictx, &ioas->obj);
1003 /* Check that every pfn under each iova matches the pfn under a user VA */
1004 static int iommufd_test_md_check_pa(struct iommufd_ucmd *ucmd,
1005 unsigned int mockpt_id, unsigned long iova,
1006 size_t length, void __user *uptr)
1008 struct iommufd_hw_pagetable *hwpt;
1009 struct mock_iommu_domain *mock;
1013 if (iova % MOCK_IO_PAGE_SIZE || length % MOCK_IO_PAGE_SIZE ||
1014 (uintptr_t)uptr % MOCK_IO_PAGE_SIZE ||
1015 check_add_overflow((uintptr_t)uptr, (uintptr_t)length, &end))
1018 hwpt = get_md_pagetable(ucmd, mockpt_id, &mock);
1020 return PTR_ERR(hwpt);
1022 for (; length; length -= MOCK_IO_PAGE_SIZE) {
1023 struct page *pages[1];
1028 npages = get_user_pages_fast((uintptr_t)uptr & PAGE_MASK, 1, 0,
1034 if (WARN_ON(npages != 1)) {
1038 pfn = page_to_pfn(pages[0]);
1041 ent = xa_load(&mock->pfns, iova / MOCK_IO_PAGE_SIZE);
1043 (xa_to_value(ent) & MOCK_PFN_MASK) * MOCK_IO_PAGE_SIZE !=
1044 pfn * PAGE_SIZE + ((uintptr_t)uptr % PAGE_SIZE)) {
1048 iova += MOCK_IO_PAGE_SIZE;
1049 uptr += MOCK_IO_PAGE_SIZE;
1054 iommufd_put_object(ucmd->ictx, &hwpt->obj);
1058 /* Check that the page ref count matches, to look for missing pin/unpins */
1059 static int iommufd_test_md_check_refs(struct iommufd_ucmd *ucmd,
1060 void __user *uptr, size_t length,
1065 if (length % PAGE_SIZE || (uintptr_t)uptr % PAGE_SIZE ||
1066 check_add_overflow((uintptr_t)uptr, (uintptr_t)length, &end))
1069 for (; length; length -= PAGE_SIZE) {
1070 struct page *pages[1];
1073 npages = get_user_pages_fast((uintptr_t)uptr, 1, 0, pages);
1076 if (WARN_ON(npages != 1))
1078 if (!PageCompound(pages[0])) {
1081 count = page_ref_count(pages[0]);
1082 if (count / GUP_PIN_COUNTING_BIAS != refs) {
1093 static int iommufd_test_md_check_iotlb(struct iommufd_ucmd *ucmd,
1094 u32 mockpt_id, unsigned int iotlb_id,
1097 struct mock_iommu_domain_nested *mock_nested;
1098 struct iommufd_hw_pagetable *hwpt;
1101 hwpt = get_md_pagetable_nested(ucmd, mockpt_id, &mock_nested);
1103 return PTR_ERR(hwpt);
1105 mock_nested = to_mock_nested(hwpt->domain);
1107 if (iotlb_id > MOCK_NESTED_DOMAIN_IOTLB_ID_MAX ||
1108 mock_nested->iotlb[iotlb_id] != iotlb)
1110 iommufd_put_object(ucmd->ictx, &hwpt->obj);
1114 static int iommufd_test_dev_check_cache(struct iommufd_ucmd *ucmd, u32 idev_id,
1115 unsigned int cache_id, u32 cache)
1117 struct iommufd_device *idev;
1118 struct mock_dev *mdev;
1121 idev = iommufd_get_device(ucmd, idev_id);
1123 return PTR_ERR(idev);
1124 mdev = container_of(idev->dev, struct mock_dev, dev);
1126 if (cache_id > MOCK_DEV_CACHE_ID_MAX || mdev->cache[cache_id] != cache)
1128 iommufd_put_object(ucmd->ictx, &idev->obj);
1132 struct selftest_access {
1133 struct iommufd_access *access;
1136 struct list_head items;
1137 unsigned int next_id;
1141 struct selftest_access_item {
1142 struct list_head items_elm;
1148 static const struct file_operations iommfd_test_staccess_fops;
1150 static struct selftest_access *iommufd_access_get(int fd)
1156 return ERR_PTR(-EBADFD);
1158 if (file->f_op != &iommfd_test_staccess_fops) {
1160 return ERR_PTR(-EBADFD);
1162 return file->private_data;
1165 static void iommufd_test_access_unmap(void *data, unsigned long iova,
1166 unsigned long length)
1168 unsigned long iova_last = iova + length - 1;
1169 struct selftest_access *staccess = data;
1170 struct selftest_access_item *item;
1171 struct selftest_access_item *tmp;
1173 mutex_lock(&staccess->lock);
1174 list_for_each_entry_safe(item, tmp, &staccess->items, items_elm) {
1175 if (iova > item->iova + item->length - 1 ||
1176 iova_last < item->iova)
1178 list_del(&item->items_elm);
1179 iommufd_access_unpin_pages(staccess->access, item->iova,
1183 mutex_unlock(&staccess->lock);
1186 static int iommufd_test_access_item_destroy(struct iommufd_ucmd *ucmd,
1187 unsigned int access_id,
1188 unsigned int item_id)
1190 struct selftest_access_item *item;
1191 struct selftest_access *staccess;
1193 staccess = iommufd_access_get(access_id);
1194 if (IS_ERR(staccess))
1195 return PTR_ERR(staccess);
1197 mutex_lock(&staccess->lock);
1198 list_for_each_entry(item, &staccess->items, items_elm) {
1199 if (item->id == item_id) {
1200 list_del(&item->items_elm);
1201 iommufd_access_unpin_pages(staccess->access, item->iova,
1203 mutex_unlock(&staccess->lock);
1205 fput(staccess->file);
1209 mutex_unlock(&staccess->lock);
1210 fput(staccess->file);
1214 static int iommufd_test_staccess_release(struct inode *inode,
1217 struct selftest_access *staccess = filep->private_data;
1219 if (staccess->access) {
1220 iommufd_test_access_unmap(staccess, 0, ULONG_MAX);
1221 iommufd_access_destroy(staccess->access);
1223 mutex_destroy(&staccess->lock);
1228 static const struct iommufd_access_ops selftest_access_ops_pin = {
1229 .needs_pin_pages = 1,
1230 .unmap = iommufd_test_access_unmap,
1233 static const struct iommufd_access_ops selftest_access_ops = {
1234 .unmap = iommufd_test_access_unmap,
1237 static const struct file_operations iommfd_test_staccess_fops = {
1238 .release = iommufd_test_staccess_release,
1241 static struct selftest_access *iommufd_test_alloc_access(void)
1243 struct selftest_access *staccess;
1246 staccess = kzalloc(sizeof(*staccess), GFP_KERNEL_ACCOUNT);
1248 return ERR_PTR(-ENOMEM);
1249 INIT_LIST_HEAD(&staccess->items);
1250 mutex_init(&staccess->lock);
1252 filep = anon_inode_getfile("[iommufd_test_staccess]",
1253 &iommfd_test_staccess_fops, staccess,
1255 if (IS_ERR(filep)) {
1257 return ERR_CAST(filep);
1259 staccess->file = filep;
1263 static int iommufd_test_create_access(struct iommufd_ucmd *ucmd,
1264 unsigned int ioas_id, unsigned int flags)
1266 struct iommu_test_cmd *cmd = ucmd->cmd;
1267 struct selftest_access *staccess;
1268 struct iommufd_access *access;
1273 if (flags & ~MOCK_FLAGS_ACCESS_CREATE_NEEDS_PIN_PAGES)
1276 staccess = iommufd_test_alloc_access();
1277 if (IS_ERR(staccess))
1278 return PTR_ERR(staccess);
1280 fdno = get_unused_fd_flags(O_CLOEXEC);
1283 goto out_free_staccess;
1286 access = iommufd_access_create(
1288 (flags & MOCK_FLAGS_ACCESS_CREATE_NEEDS_PIN_PAGES) ?
1289 &selftest_access_ops_pin :
1290 &selftest_access_ops,
1292 if (IS_ERR(access)) {
1293 rc = PTR_ERR(access);
1296 rc = iommufd_access_attach(access, ioas_id);
1299 cmd->create_access.out_access_fd = fdno;
1300 rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd));
1304 staccess->access = access;
1305 fd_install(fdno, staccess->file);
1309 iommufd_access_destroy(access);
1311 put_unused_fd(fdno);
1313 fput(staccess->file);
1317 static int iommufd_test_access_replace_ioas(struct iommufd_ucmd *ucmd,
1318 unsigned int access_id,
1319 unsigned int ioas_id)
1321 struct selftest_access *staccess;
1324 staccess = iommufd_access_get(access_id);
1325 if (IS_ERR(staccess))
1326 return PTR_ERR(staccess);
1328 rc = iommufd_access_replace(staccess->access, ioas_id);
1329 fput(staccess->file);
1333 /* Check that the pages in a page array match the pages in the user VA */
1334 static int iommufd_test_check_pages(void __user *uptr, struct page **pages,
1337 for (; npages; npages--) {
1338 struct page *tmp_pages[1];
1341 rc = get_user_pages_fast((uintptr_t)uptr, 1, 0, tmp_pages);
1344 if (WARN_ON(rc != 1))
1346 put_page(tmp_pages[0]);
1347 if (tmp_pages[0] != *pages)
1355 static int iommufd_test_access_pages(struct iommufd_ucmd *ucmd,
1356 unsigned int access_id, unsigned long iova,
1357 size_t length, void __user *uptr,
1360 struct iommu_test_cmd *cmd = ucmd->cmd;
1361 struct selftest_access_item *item;
1362 struct selftest_access *staccess;
1363 struct page **pages;
1367 /* Prevent syzkaller from triggering a WARN_ON in kvzalloc() */
1368 if (length > 16*1024*1024)
1371 if (flags & ~(MOCK_FLAGS_ACCESS_WRITE | MOCK_FLAGS_ACCESS_SYZ))
1374 staccess = iommufd_access_get(access_id);
1375 if (IS_ERR(staccess))
1376 return PTR_ERR(staccess);
1378 if (staccess->access->ops != &selftest_access_ops_pin) {
1383 if (flags & MOCK_FLAGS_ACCESS_SYZ)
1384 iova = iommufd_test_syz_conv_iova(staccess->access,
1385 &cmd->access_pages.iova);
1387 npages = (ALIGN(iova + length, PAGE_SIZE) -
1388 ALIGN_DOWN(iova, PAGE_SIZE)) /
1390 pages = kvcalloc(npages, sizeof(*pages), GFP_KERNEL_ACCOUNT);
1397 * Drivers will need to think very carefully about this locking. The
1398 * core code can do multiple unmaps instantaneously after
1399 * iommufd_access_pin_pages() and *all* the unmaps must not return until
1400 * the range is unpinned. This simple implementation puts a global lock
1401 * around the pin, which may not suit drivers that want this to be a
1402 * performance path. drivers that get this wrong will trigger WARN_ON
1403 * races and cause EDEADLOCK failures to userspace.
1405 mutex_lock(&staccess->lock);
1406 rc = iommufd_access_pin_pages(staccess->access, iova, length, pages,
1407 flags & MOCK_FLAGS_ACCESS_WRITE);
1411 /* For syzkaller allow uptr to be NULL to skip this check */
1413 rc = iommufd_test_check_pages(
1414 uptr - (iova - ALIGN_DOWN(iova, PAGE_SIZE)), pages,
1420 item = kzalloc(sizeof(*item), GFP_KERNEL_ACCOUNT);
1427 item->length = length;
1428 item->id = staccess->next_id++;
1429 list_add_tail(&item->items_elm, &staccess->items);
1431 cmd->access_pages.out_access_pages_id = item->id;
1432 rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd));
1438 list_del(&item->items_elm);
1441 iommufd_access_unpin_pages(staccess->access, iova, length);
1443 mutex_unlock(&staccess->lock);
1446 fput(staccess->file);
1450 static int iommufd_test_access_rw(struct iommufd_ucmd *ucmd,
1451 unsigned int access_id, unsigned long iova,
1452 size_t length, void __user *ubuf,
1455 struct iommu_test_cmd *cmd = ucmd->cmd;
1456 struct selftest_access *staccess;
1460 /* Prevent syzkaller from triggering a WARN_ON in kvzalloc() */
1461 if (length > 16*1024*1024)
1464 if (flags & ~(MOCK_ACCESS_RW_WRITE | MOCK_ACCESS_RW_SLOW_PATH |
1465 MOCK_FLAGS_ACCESS_SYZ))
1468 staccess = iommufd_access_get(access_id);
1469 if (IS_ERR(staccess))
1470 return PTR_ERR(staccess);
1472 tmp = kvzalloc(length, GFP_KERNEL_ACCOUNT);
1478 if (flags & MOCK_ACCESS_RW_WRITE) {
1479 if (copy_from_user(tmp, ubuf, length)) {
1485 if (flags & MOCK_FLAGS_ACCESS_SYZ)
1486 iova = iommufd_test_syz_conv_iova(staccess->access,
1487 &cmd->access_rw.iova);
1489 rc = iommufd_access_rw(staccess->access, iova, tmp, length, flags);
1492 if (!(flags & MOCK_ACCESS_RW_WRITE)) {
1493 if (copy_to_user(ubuf, tmp, length)) {
1502 fput(staccess->file);
1505 static_assert((unsigned int)MOCK_ACCESS_RW_WRITE == IOMMUFD_ACCESS_RW_WRITE);
1506 static_assert((unsigned int)MOCK_ACCESS_RW_SLOW_PATH ==
1507 __IOMMUFD_ACCESS_RW_SLOW_PATH);
1509 static int iommufd_test_dirty(struct iommufd_ucmd *ucmd, unsigned int mockpt_id,
1510 unsigned long iova, size_t length,
1511 unsigned long page_size, void __user *uptr,
1514 unsigned long i, max;
1515 struct iommu_test_cmd *cmd = ucmd->cmd;
1516 struct iommufd_hw_pagetable *hwpt;
1517 struct mock_iommu_domain *mock;
1521 if (!page_size || !length || iova % page_size || length % page_size ||
1525 hwpt = get_md_pagetable(ucmd, mockpt_id, &mock);
1527 return PTR_ERR(hwpt);
1529 if (!(mock->flags & MOCK_DIRTY_TRACK)) {
1534 max = length / page_size;
1535 tmp = kvzalloc(DIV_ROUND_UP(max, BITS_PER_LONG) * sizeof(unsigned long),
1536 GFP_KERNEL_ACCOUNT);
1542 if (copy_from_user(tmp, uptr,DIV_ROUND_UP(max, BITS_PER_BYTE))) {
1547 for (i = 0; i < max; i++) {
1548 unsigned long cur = iova + i * page_size;
1551 if (!test_bit(i, (unsigned long *)tmp))
1554 ent = xa_load(&mock->pfns, cur / page_size);
1558 val = xa_to_value(ent) | MOCK_PFN_DIRTY_IOVA;
1559 old = xa_store(&mock->pfns, cur / page_size,
1560 xa_mk_value(val), GFP_KERNEL);
1561 WARN_ON_ONCE(ent != old);
1566 cmd->dirty.out_nr_dirty = count;
1567 rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd));
1571 iommufd_put_object(ucmd->ictx, &hwpt->obj);
1575 static int iommufd_test_trigger_iopf(struct iommufd_ucmd *ucmd,
1576 struct iommu_test_cmd *cmd)
1578 struct iopf_fault event = { };
1579 struct iommufd_device *idev;
1581 idev = iommufd_get_device(ucmd, cmd->trigger_iopf.dev_id);
1583 return PTR_ERR(idev);
1585 event.fault.prm.flags = IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE;
1586 if (cmd->trigger_iopf.pasid != IOMMU_NO_PASID)
1587 event.fault.prm.flags |= IOMMU_FAULT_PAGE_REQUEST_PASID_VALID;
1588 event.fault.type = IOMMU_FAULT_PAGE_REQ;
1589 event.fault.prm.addr = cmd->trigger_iopf.addr;
1590 event.fault.prm.pasid = cmd->trigger_iopf.pasid;
1591 event.fault.prm.grpid = cmd->trigger_iopf.grpid;
1592 event.fault.prm.perm = cmd->trigger_iopf.perm;
1594 iommu_report_device_fault(idev->dev, &event);
1595 iommufd_put_object(ucmd->ictx, &idev->obj);
1600 void iommufd_selftest_destroy(struct iommufd_object *obj)
1602 struct selftest_obj *sobj = to_selftest_obj(obj);
1604 switch (sobj->type) {
1606 iommufd_device_detach(sobj->idev.idev);
1607 iommufd_device_unbind(sobj->idev.idev);
1608 mock_dev_destroy(sobj->idev.mock_dev);
1613 int iommufd_test(struct iommufd_ucmd *ucmd)
1615 struct iommu_test_cmd *cmd = ucmd->cmd;
1618 case IOMMU_TEST_OP_ADD_RESERVED:
1619 return iommufd_test_add_reserved(ucmd, cmd->id,
1620 cmd->add_reserved.start,
1621 cmd->add_reserved.length);
1622 case IOMMU_TEST_OP_MOCK_DOMAIN:
1623 case IOMMU_TEST_OP_MOCK_DOMAIN_FLAGS:
1624 return iommufd_test_mock_domain(ucmd, cmd);
1625 case IOMMU_TEST_OP_MOCK_DOMAIN_REPLACE:
1626 return iommufd_test_mock_domain_replace(
1627 ucmd, cmd->id, cmd->mock_domain_replace.pt_id, cmd);
1628 case IOMMU_TEST_OP_MD_CHECK_MAP:
1629 return iommufd_test_md_check_pa(
1630 ucmd, cmd->id, cmd->check_map.iova,
1631 cmd->check_map.length,
1632 u64_to_user_ptr(cmd->check_map.uptr));
1633 case IOMMU_TEST_OP_MD_CHECK_REFS:
1634 return iommufd_test_md_check_refs(
1635 ucmd, u64_to_user_ptr(cmd->check_refs.uptr),
1636 cmd->check_refs.length, cmd->check_refs.refs);
1637 case IOMMU_TEST_OP_MD_CHECK_IOTLB:
1638 return iommufd_test_md_check_iotlb(ucmd, cmd->id,
1639 cmd->check_iotlb.id,
1640 cmd->check_iotlb.iotlb);
1641 case IOMMU_TEST_OP_DEV_CHECK_CACHE:
1642 return iommufd_test_dev_check_cache(ucmd, cmd->id,
1643 cmd->check_dev_cache.id,
1644 cmd->check_dev_cache.cache);
1645 case IOMMU_TEST_OP_CREATE_ACCESS:
1646 return iommufd_test_create_access(ucmd, cmd->id,
1647 cmd->create_access.flags);
1648 case IOMMU_TEST_OP_ACCESS_REPLACE_IOAS:
1649 return iommufd_test_access_replace_ioas(
1650 ucmd, cmd->id, cmd->access_replace_ioas.ioas_id);
1651 case IOMMU_TEST_OP_ACCESS_PAGES:
1652 return iommufd_test_access_pages(
1653 ucmd, cmd->id, cmd->access_pages.iova,
1654 cmd->access_pages.length,
1655 u64_to_user_ptr(cmd->access_pages.uptr),
1656 cmd->access_pages.flags);
1657 case IOMMU_TEST_OP_ACCESS_RW:
1658 return iommufd_test_access_rw(
1659 ucmd, cmd->id, cmd->access_rw.iova,
1660 cmd->access_rw.length,
1661 u64_to_user_ptr(cmd->access_rw.uptr),
1662 cmd->access_rw.flags);
1663 case IOMMU_TEST_OP_DESTROY_ACCESS_PAGES:
1664 return iommufd_test_access_item_destroy(
1665 ucmd, cmd->id, cmd->destroy_access_pages.access_pages_id);
1666 case IOMMU_TEST_OP_SET_TEMP_MEMORY_LIMIT:
1667 /* Protect _batch_init(), can not be less than elmsz */
1668 if (cmd->memory_limit.limit <
1669 sizeof(unsigned long) + sizeof(u32))
1671 iommufd_test_memory_limit = cmd->memory_limit.limit;
1673 case IOMMU_TEST_OP_DIRTY:
1674 return iommufd_test_dirty(ucmd, cmd->id, cmd->dirty.iova,
1676 cmd->dirty.page_size,
1677 u64_to_user_ptr(cmd->dirty.uptr),
1679 case IOMMU_TEST_OP_TRIGGER_IOPF:
1680 return iommufd_test_trigger_iopf(ucmd, cmd);
1686 bool iommufd_should_fail(void)
1688 return should_fail(&fail_iommufd, 1);
1691 int __init iommufd_test_init(void)
1693 struct platform_device_info pdevinfo = {
1694 .name = "iommufd_selftest_iommu",
1699 fault_create_debugfs_attr("fail_iommufd", NULL, &fail_iommufd);
1701 selftest_iommu_dev = platform_device_register_full(&pdevinfo);
1702 if (IS_ERR(selftest_iommu_dev)) {
1703 rc = PTR_ERR(selftest_iommu_dev);
1707 rc = bus_register(&iommufd_mock_bus_type.bus);
1711 rc = iommu_device_sysfs_add(&mock_iommu.iommu_dev,
1712 &selftest_iommu_dev->dev, NULL, "%s",
1713 dev_name(&selftest_iommu_dev->dev));
1717 rc = iommu_device_register_bus(&mock_iommu.iommu_dev, &mock_ops,
1718 &iommufd_mock_bus_type.bus,
1719 &iommufd_mock_bus_type.nb);
1723 refcount_set(&mock_iommu.users, 1);
1724 init_completion(&mock_iommu.complete);
1726 mock_iommu_iopf_queue = iopf_queue_alloc("mock-iopfq");
1731 iommu_device_sysfs_remove(&mock_iommu.iommu_dev);
1733 bus_unregister(&iommufd_mock_bus_type.bus);
1735 platform_device_unregister(selftest_iommu_dev);
1737 debugfs_remove_recursive(dbgfs_root);
1741 static void iommufd_test_wait_for_users(void)
1743 if (refcount_dec_and_test(&mock_iommu.users))
1746 * Time out waiting for iommu device user count to become 0.
1748 * Note that this is just making an example here, since the selftest is
1749 * built into the iommufd module, i.e. it only unplugs the iommu device
1750 * when unloading the module. So, it is expected that this WARN_ON will
1751 * not trigger, as long as any iommufd FDs are open.
1753 WARN_ON(!wait_for_completion_timeout(&mock_iommu.complete,
1754 msecs_to_jiffies(10000)));
1757 void iommufd_test_exit(void)
1759 if (mock_iommu_iopf_queue) {
1760 iopf_queue_free(mock_iommu_iopf_queue);
1761 mock_iommu_iopf_queue = NULL;
1764 iommufd_test_wait_for_users();
1765 iommu_device_sysfs_remove(&mock_iommu.iommu_dev);
1766 iommu_device_unregister_bus(&mock_iommu.iommu_dev,
1767 &iommufd_mock_bus_type.bus,
1768 &iommufd_mock_bus_type.nb);
1769 bus_unregister(&iommufd_mock_bus_type.bus);
1770 platform_device_unregister(selftest_iommu_dev);
1771 debugfs_remove_recursive(dbgfs_root);