]> Git Repo - linux.git/blame - include/linux/vfio.h
Merge tag 'modules-6.4-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/mcgrof...
[linux.git] / include / linux / vfio.h
CommitLineData
d2912cb1 1/* SPDX-License-Identifier: GPL-2.0-only */
cba3345c
AW
2/*
3 * VFIO API definition
4 *
5 * Copyright (C) 2012 Red Hat, Inc. All rights reserved.
6 * Author: Alex Williamson <[email protected]>
cba3345c
AW
7 */
8#ifndef VFIO_H
9#define VFIO_H
10
cba3345c
AW
11
12#include <linux/iommu.h>
13#include <linux/mm.h>
7e992d69
AM
14#include <linux/workqueue.h>
15#include <linux/poll.h>
607ca46e 16#include <uapi/linux/vfio.h>
80c4b92a 17#include <linux/iova_bitmap.h>
cba3345c 18
ba70a89f 19struct kvm;
a4d1f91d
JG
20struct iommufd_ctx;
21struct iommufd_device;
4741f2e9 22struct iommufd_access;
ba70a89f 23
2fd585f4
JG
24/*
25 * VFIO devices can be placed in a set, this allows all devices to share this
26 * structure and the VFIO core will provide a lock that is held around
27 * open_device()/close_device() for all devices in the set.
28 */
29struct vfio_device_set {
30 void *set_id;
31 struct mutex lock;
32 struct list_head device_list;
33 unsigned int device_count;
34};
35
0bfc6a4e
JG
36struct vfio_device {
37 struct device *dev;
38 const struct vfio_device_ops *ops;
6e97eba8 39 /*
80c4b92a
YH
40 * mig_ops/log_ops is a static property of the vfio_device which must
41 * be set prior to registering the vfio_device.
6e97eba8
YH
42 */
43 const struct vfio_migration_ops *mig_ops;
80c4b92a 44 const struct vfio_log_ops *log_ops;
0bfc6a4e 45 struct vfio_group *group;
2fd585f4
JG
46 struct vfio_device_set *dev_set;
47 struct list_head dev_set_list;
8cb3d83b 48 unsigned int migration_flags;
421cfe65 49 struct kvm *kvm;
0bfc6a4e
JG
50
51 /* Members below here are private, not for driver use */
3c28a761
YL
52 unsigned int index;
53 struct device device; /* device.kref covers object life circle */
cb9ff3f3 54 refcount_t refcount; /* user count on registered device*/
2fd585f4 55 unsigned int open_count;
0bfc6a4e
JG
56 struct completion comp;
57 struct list_head group_next;
8cfc5b60 58 struct list_head iommu_entry;
4741f2e9 59 struct iommufd_access *iommufd_access;
2b48f52f 60 void (*put_kvm)(struct kvm *kvm);
a4d1f91d
JG
61#if IS_ENABLED(CONFIG_IOMMUFD)
62 struct iommufd_device *iommufd_device;
63 bool iommufd_attached;
64#endif
0bfc6a4e
JG
65};
66
cba3345c
AW
67/**
68 * struct vfio_device_ops - VFIO bus driver device callbacks
69 *
cb9ff3f3
KT
70 * @init: initialize private fields in device structure
71 * @release: Reclaim private fields in device structure
fae90680
YL
72 * @bind_iommufd: Called when binding the device to an iommufd
73 * @unbind_iommufd: Opposite of bind_iommufd
74 * @attach_ioas: Called when attaching device to an IOAS/HWPT managed by the
75 * bound iommufd. Undo in unbind_iommufd.
2fd585f4
JG
76 * @open_device: Called when the first file descriptor is opened for this device
77 * @close_device: Opposite of open_device
cba3345c
AW
78 * @read: Perform read(2) on device file descriptor
79 * @write: Perform write(2) on device file descriptor
80 * @ioctl: Perform ioctl(2) on device file descriptor, supporting VFIO_DEVICE_*
81 * operations documented below
82 * @mmap: Perform mmap(2) on a region of the device file descriptor
13060b64 83 * @request: Request for the bus driver to release the device
5f3874c2
AW
84 * @match: Optional device name match callback (return: 0 for no-match, >0 for
85 * match, -errno for abort (ex. match with insufficient or incorrect
86 * additional args)
ce4b4657
JG
87 * @dma_unmap: Called when userspace unmaps IOVA from the container
88 * this device is attached to.
445ad495 89 * @device_feature: Optional, fill in the VFIO_DEVICE_FEATURE ioctl
cba3345c
AW
90 */
91struct vfio_device_ops {
92 char *name;
cb9ff3f3
KT
93 int (*init)(struct vfio_device *vdev);
94 void (*release)(struct vfio_device *vdev);
a4d1f91d
JG
95 int (*bind_iommufd)(struct vfio_device *vdev,
96 struct iommufd_ctx *ictx, u32 *out_device_id);
97 void (*unbind_iommufd)(struct vfio_device *vdev);
98 int (*attach_ioas)(struct vfio_device *vdev, u32 *pt_id);
2fd585f4
JG
99 int (*open_device)(struct vfio_device *vdev);
100 void (*close_device)(struct vfio_device *vdev);
6df62c5b 101 ssize_t (*read)(struct vfio_device *vdev, char __user *buf,
cba3345c 102 size_t count, loff_t *ppos);
6df62c5b 103 ssize_t (*write)(struct vfio_device *vdev, const char __user *buf,
cba3345c 104 size_t count, loff_t *size);
6df62c5b 105 long (*ioctl)(struct vfio_device *vdev, unsigned int cmd,
cba3345c 106 unsigned long arg);
6df62c5b
JG
107 int (*mmap)(struct vfio_device *vdev, struct vm_area_struct *vma);
108 void (*request)(struct vfio_device *vdev, unsigned int count);
109 int (*match)(struct vfio_device *vdev, char *buf);
ce4b4657 110 void (*dma_unmap)(struct vfio_device *vdev, u64 iova, u64 length);
445ad495
JG
111 int (*device_feature)(struct vfio_device *device, u32 flags,
112 void __user *arg, size_t argsz);
6e97eba8
YH
113};
114
a4d1f91d
JG
115#if IS_ENABLED(CONFIG_IOMMUFD)
116int vfio_iommufd_physical_bind(struct vfio_device *vdev,
117 struct iommufd_ctx *ictx, u32 *out_device_id);
118void vfio_iommufd_physical_unbind(struct vfio_device *vdev);
119int vfio_iommufd_physical_attach_ioas(struct vfio_device *vdev, u32 *pt_id);
4741f2e9
JG
120int vfio_iommufd_emulated_bind(struct vfio_device *vdev,
121 struct iommufd_ctx *ictx, u32 *out_device_id);
122void vfio_iommufd_emulated_unbind(struct vfio_device *vdev);
123int vfio_iommufd_emulated_attach_ioas(struct vfio_device *vdev, u32 *pt_id);
a4d1f91d
JG
124#else
125#define vfio_iommufd_physical_bind \
126 ((int (*)(struct vfio_device *vdev, struct iommufd_ctx *ictx, \
127 u32 *out_device_id)) NULL)
128#define vfio_iommufd_physical_unbind \
129 ((void (*)(struct vfio_device *vdev)) NULL)
130#define vfio_iommufd_physical_attach_ioas \
131 ((int (*)(struct vfio_device *vdev, u32 *pt_id)) NULL)
4741f2e9
JG
132#define vfio_iommufd_emulated_bind \
133 ((int (*)(struct vfio_device *vdev, struct iommufd_ctx *ictx, \
134 u32 *out_device_id)) NULL)
135#define vfio_iommufd_emulated_unbind \
136 ((void (*)(struct vfio_device *vdev)) NULL)
137#define vfio_iommufd_emulated_attach_ioas \
138 ((int (*)(struct vfio_device *vdev, u32 *pt_id)) NULL)
a4d1f91d
JG
139#endif
140
6e97eba8
YH
141/**
142 * @migration_set_state: Optional callback to change the migration state for
143 * devices that support migration. It's mandatory for
144 * VFIO_DEVICE_FEATURE_MIGRATION migration support.
145 * The returned FD is used for data transfer according to the FSM
146 * definition. The driver is responsible to ensure that FD reaches end
147 * of stream or error whenever the migration FSM leaves a data transfer
148 * state or before close_device() returns.
149 * @migration_get_state: Optional callback to get the migration state for
150 * devices that support migration. It's mandatory for
151 * VFIO_DEVICE_FEATURE_MIGRATION migration support.
4e016f96
YH
152 * @migration_get_data_size: Optional callback to get the estimated data
153 * length that will be required to complete stop copy. It's mandatory for
154 * VFIO_DEVICE_FEATURE_MIGRATION migration support.
6e97eba8
YH
155 */
156struct vfio_migration_ops {
115dcec6
JG
157 struct file *(*migration_set_state)(
158 struct vfio_device *device,
159 enum vfio_device_mig_state new_state);
160 int (*migration_get_state)(struct vfio_device *device,
161 enum vfio_device_mig_state *curr_state);
4e016f96
YH
162 int (*migration_get_data_size)(struct vfio_device *device,
163 unsigned long *stop_copy_length);
cba3345c
AW
164};
165
80c4b92a
YH
166/**
167 * @log_start: Optional callback to ask the device start DMA logging.
168 * @log_stop: Optional callback to ask the device stop DMA logging.
169 * @log_read_and_clear: Optional callback to ask the device read
170 * and clear the dirty DMAs in some given range.
171 *
172 * The vfio core implementation of the DEVICE_FEATURE_DMA_LOGGING_ set
173 * of features does not track logging state relative to the device,
174 * therefore the device implementation of vfio_log_ops must handle
175 * arbitrary user requests. This includes rejecting subsequent calls
176 * to log_start without an intervening log_stop, as well as graceful
177 * handling of log_stop and log_read_and_clear from invalid states.
178 */
179struct vfio_log_ops {
180 int (*log_start)(struct vfio_device *device,
181 struct rb_root_cached *ranges, u32 nnodes, u64 *page_size);
182 int (*log_stop)(struct vfio_device *device);
183 int (*log_read_and_clear)(struct vfio_device *device,
184 unsigned long iova, unsigned long length,
185 struct iova_bitmap *dirty);
186};
187
445ad495
JG
188/**
189 * vfio_check_feature - Validate user input for the VFIO_DEVICE_FEATURE ioctl
190 * @flags: Arg from the device_feature op
191 * @argsz: Arg from the device_feature op
192 * @supported_ops: Combination of VFIO_DEVICE_FEATURE_GET and SET the driver
193 * supports
194 * @minsz: Minimum data size the driver accepts
195 *
196 * For use in a driver's device_feature op. Checks that the inputs to the
197 * VFIO_DEVICE_FEATURE ioctl are correct for the driver's feature. Returns 1 if
198 * the driver should execute the get or set, otherwise the relevant
199 * value should be returned.
200 */
201static inline int vfio_check_feature(u32 flags, size_t argsz, u32 supported_ops,
202 size_t minsz)
203{
204 if ((flags & (VFIO_DEVICE_FEATURE_GET | VFIO_DEVICE_FEATURE_SET)) &
205 ~supported_ops)
206 return -EINVAL;
207 if (flags & VFIO_DEVICE_FEATURE_PROBE)
208 return 0;
209 /* Without PROBE one of GET or SET must be requested */
210 if (!(flags & (VFIO_DEVICE_FEATURE_GET | VFIO_DEVICE_FEATURE_SET)))
211 return -EINVAL;
212 if (argsz < minsz)
213 return -EINVAL;
214 return 1;
215}
216
cb9ff3f3
KT
217struct vfio_device *_vfio_alloc_device(size_t size, struct device *dev,
218 const struct vfio_device_ops *ops);
219#define vfio_alloc_device(dev_struct, member, dev, ops) \
220 container_of(_vfio_alloc_device(sizeof(struct dev_struct) + \
221 BUILD_BUG_ON_ZERO(offsetof( \
222 struct dev_struct, member)), \
223 dev, ops), \
224 struct dev_struct, member)
225
cb9ff3f3
KT
226static inline void vfio_put_device(struct vfio_device *device)
227{
3c28a761 228 put_device(&device->device);
cb9ff3f3
KT
229}
230
0bfc6a4e 231int vfio_register_group_dev(struct vfio_device *device);
c68ea0d0 232int vfio_register_emulated_iommu_dev(struct vfio_device *device);
0bfc6a4e 233void vfio_unregister_group_dev(struct vfio_device *device);
cba3345c 234
2fd585f4 235int vfio_assign_device_set(struct vfio_device *device, void *set_id);
5cd189e4 236unsigned int vfio_device_set_open_count(struct vfio_device_set *dev_set);
2fd585f4 237
115dcec6
JG
238int vfio_mig_get_next_state(struct vfio_device *device,
239 enum vfio_device_mig_state cur_fsm,
240 enum vfio_device_mig_state new_fsm,
241 enum vfio_device_mig_state *next_fsm);
242
6cdd9782
AK
243/*
244 * External user API
245 */
d1877e63 246struct iommu_group *vfio_file_iommu_group(struct file *file);
4b22ef04 247bool vfio_file_is_group(struct file *file);
d1877e63
AW
248bool vfio_file_enforced_coherent(struct file *file);
249void vfio_file_set_kvm(struct file *file, struct kvm *kvm);
250bool vfio_file_has_dev(struct file *file, struct vfio_device *device);
6cdd9782 251
2169037d
KW
252#define VFIO_PIN_PAGES_MAX_ENTRIES (PAGE_SIZE/sizeof(unsigned long))
253
44abdd16 254int vfio_pin_pages(struct vfio_device *device, dma_addr_t iova,
34a255e6 255 int npage, int prot, struct page **pages);
44abdd16 256void vfio_unpin_pages(struct vfio_device *device, dma_addr_t iova, int npage);
8561aa4f 257int vfio_dma_rw(struct vfio_device *device, dma_addr_t iova,
d1877e63 258 void *data, size_t len, bool write);
8d46c0cc 259
d7a8d5ed
AW
260/*
261 * Sub-module helpers
262 */
263struct vfio_info_cap {
264 struct vfio_info_cap_header *buf;
265 size_t size;
266};
d1877e63
AW
267struct vfio_info_cap_header *vfio_info_cap_add(struct vfio_info_cap *caps,
268 size_t size, u16 id,
269 u16 version);
270void vfio_info_cap_shift(struct vfio_info_cap *caps, size_t offset);
d7a8d5ed 271
d1877e63
AW
272int vfio_info_add_capability(struct vfio_info_cap *caps,
273 struct vfio_info_cap_header *cap, size_t size);
b3c0a866 274
d1877e63
AW
275int vfio_set_irqs_validate_and_prepare(struct vfio_irq_set *hdr,
276 int num_irqs, int max_irq_type,
277 size_t *data_size);
c747f08a 278
7e992d69
AM
279/*
280 * IRQfd - generic
281 */
282struct virqfd {
283 void *opaque;
284 struct eventfd_ctx *eventfd;
285 int (*handler)(void *, void *);
286 void (*thread)(void *, void *);
287 void *data;
288 struct work_struct inject;
ac6424b9 289 wait_queue_entry_t wait;
7e992d69
AM
290 poll_table pt;
291 struct work_struct shutdown;
292 struct virqfd **pvirqfd;
293};
294
d1877e63
AW
295int vfio_virqfd_enable(void *opaque, int (*handler)(void *, void *),
296 void (*thread)(void *, void *), void *data,
297 struct virqfd **pvirqfd, int fd);
298void vfio_virqfd_disable(struct virqfd **pvirqfd);
7e992d69 299
cba3345c 300#endif /* VFIO_H */
This page took 1.583735 seconds and 4 git commands to generate.