]>
Commit | Line | Data |
---|---|---|
883f0b85 AW |
1 | /* |
2 | * VFIO API definition | |
3 | * | |
4 | * Copyright (C) 2012 Red Hat, Inc. All rights reserved. | |
5 | * Author: Alex Williamson <[email protected]> | |
6 | * | |
7 | * This program is free software; you can redistribute it and/or modify | |
8 | * it under the terms of the GNU General Public License version 2 as | |
9 | * published by the Free Software Foundation. | |
10 | */ | |
d4834ff9 AG |
11 | #ifndef VFIO_H |
12 | #define VFIO_H | |
883f0b85 AW |
13 | |
14 | #include <linux/types.h> | |
15 | #include <linux/ioctl.h> | |
16 | ||
17 | #define VFIO_API_VERSION 0 | |
18 | ||
19 | ||
20 | /* Kernel & User level defines for VFIO IOCTLs. */ | |
21 | ||
22 | /* Extensions */ | |
23 | ||
24 | #define VFIO_TYPE1_IOMMU 1 | |
c5daeae1 | 25 | #define VFIO_SPAPR_TCE_IOMMU 2 |
9ffd2685 CH |
26 | #define VFIO_TYPE1v2_IOMMU 3 |
27 | /* | |
28 | * IOMMU enforces DMA cache coherence (ex. PCIe NoSnoop stripping). This | |
29 | * capability is subject to change as groups are added or removed. | |
30 | */ | |
31 | #define VFIO_DMA_CC_IOMMU 4 | |
883f0b85 | 32 | |
a9fd1654 JF |
33 | /* Check if EEH is supported */ |
34 | #define VFIO_EEH 5 | |
35 | ||
444b1996 AB |
36 | /* Two-stage IOMMU */ |
37 | #define VFIO_TYPE1_NESTING_IOMMU 6 /* Implies v2 */ | |
38 | ||
25b8b39b AK |
39 | #define VFIO_SPAPR_TCE_v2_IOMMU 7 |
40 | ||
883f0b85 AW |
41 | /* |
42 | * The IOCTL interface is designed for extensibility by embedding the | |
43 | * structure length (argsz) and flags into structures passed between | |
44 | * kernel and userspace. We therefore use the _IO() macro for these | |
45 | * defines to avoid implicitly embedding a size into the ioctl request. | |
46 | * As structure fields are added, argsz will increase to match and flag | |
47 | * bits will be defined to indicate additional fields with valid data. | |
48 | * It's *always* the caller's responsibility to indicate the size of | |
49 | * the structure passed by setting argsz appropriately. | |
50 | */ | |
51 | ||
52 | #define VFIO_TYPE (';') | |
53 | #define VFIO_BASE 100 | |
54 | ||
55 | /* -------- IOCTLs for VFIO file descriptor (/dev/vfio/vfio) -------- */ | |
56 | ||
57 | /** | |
58 | * VFIO_GET_API_VERSION - _IO(VFIO_TYPE, VFIO_BASE + 0) | |
59 | * | |
60 | * Report the version of the VFIO API. This allows us to bump the entire | |
61 | * API version should we later need to add or change features in incompatible | |
62 | * ways. | |
63 | * Return: VFIO_API_VERSION | |
64 | * Availability: Always | |
65 | */ | |
66 | #define VFIO_GET_API_VERSION _IO(VFIO_TYPE, VFIO_BASE + 0) | |
67 | ||
68 | /** | |
69 | * VFIO_CHECK_EXTENSION - _IOW(VFIO_TYPE, VFIO_BASE + 1, __u32) | |
70 | * | |
71 | * Check whether an extension is supported. | |
72 | * Return: 0 if not supported, 1 (or some other positive integer) if supported. | |
73 | * Availability: Always | |
74 | */ | |
75 | #define VFIO_CHECK_EXTENSION _IO(VFIO_TYPE, VFIO_BASE + 1) | |
76 | ||
77 | /** | |
78 | * VFIO_SET_IOMMU - _IOW(VFIO_TYPE, VFIO_BASE + 2, __s32) | |
79 | * | |
80 | * Set the iommu to the given type. The type must be supported by an | |
81 | * iommu driver as verified by calling CHECK_EXTENSION using the same | |
82 | * type. A group must be set to this file descriptor before this | |
83 | * ioctl is available. The IOMMU interfaces enabled by this call are | |
84 | * specific to the value set. | |
85 | * Return: 0 on success, -errno on failure | |
86 | * Availability: When VFIO group attached | |
87 | */ | |
88 | #define VFIO_SET_IOMMU _IO(VFIO_TYPE, VFIO_BASE + 2) | |
89 | ||
90 | /* -------- IOCTLs for GROUP file descriptors (/dev/vfio/$GROUP) -------- */ | |
91 | ||
92 | /** | |
93 | * VFIO_GROUP_GET_STATUS - _IOR(VFIO_TYPE, VFIO_BASE + 3, | |
94 | * struct vfio_group_status) | |
95 | * | |
96 | * Retrieve information about the group. Fills in provided | |
97 | * struct vfio_group_info. Caller sets argsz. | |
98 | * Return: 0 on succes, -errno on failure. | |
99 | * Availability: Always | |
100 | */ | |
101 | struct vfio_group_status { | |
102 | __u32 argsz; | |
103 | __u32 flags; | |
104 | #define VFIO_GROUP_FLAGS_VIABLE (1 << 0) | |
105 | #define VFIO_GROUP_FLAGS_CONTAINER_SET (1 << 1) | |
106 | }; | |
107 | #define VFIO_GROUP_GET_STATUS _IO(VFIO_TYPE, VFIO_BASE + 3) | |
108 | ||
109 | /** | |
110 | * VFIO_GROUP_SET_CONTAINER - _IOW(VFIO_TYPE, VFIO_BASE + 4, __s32) | |
111 | * | |
112 | * Set the container for the VFIO group to the open VFIO file | |
113 | * descriptor provided. Groups may only belong to a single | |
114 | * container. Containers may, at their discretion, support multiple | |
115 | * groups. Only when a container is set are all of the interfaces | |
116 | * of the VFIO file descriptor and the VFIO group file descriptor | |
117 | * available to the user. | |
118 | * Return: 0 on success, -errno on failure. | |
119 | * Availability: Always | |
120 | */ | |
121 | #define VFIO_GROUP_SET_CONTAINER _IO(VFIO_TYPE, VFIO_BASE + 4) | |
122 | ||
123 | /** | |
124 | * VFIO_GROUP_UNSET_CONTAINER - _IO(VFIO_TYPE, VFIO_BASE + 5) | |
125 | * | |
126 | * Remove the group from the attached container. This is the | |
127 | * opposite of the SET_CONTAINER call and returns the group to | |
128 | * an initial state. All device file descriptors must be released | |
129 | * prior to calling this interface. When removing the last group | |
130 | * from a container, the IOMMU will be disabled and all state lost, | |
131 | * effectively also returning the VFIO file descriptor to an initial | |
132 | * state. | |
133 | * Return: 0 on success, -errno on failure. | |
134 | * Availability: When attached to container | |
135 | */ | |
136 | #define VFIO_GROUP_UNSET_CONTAINER _IO(VFIO_TYPE, VFIO_BASE + 5) | |
137 | ||
138 | /** | |
139 | * VFIO_GROUP_GET_DEVICE_FD - _IOW(VFIO_TYPE, VFIO_BASE + 6, char) | |
140 | * | |
141 | * Return a new file descriptor for the device object described by | |
142 | * the provided string. The string should match a device listed in | |
143 | * the devices subdirectory of the IOMMU group sysfs entry. The | |
144 | * group containing the device must already be added to this context. | |
145 | * Return: new file descriptor on success, -errno on failure. | |
146 | * Availability: When attached to container | |
147 | */ | |
148 | #define VFIO_GROUP_GET_DEVICE_FD _IO(VFIO_TYPE, VFIO_BASE + 6) | |
149 | ||
150 | /* --------------- IOCTLs for DEVICE file descriptors --------------- */ | |
151 | ||
152 | /** | |
153 | * VFIO_DEVICE_GET_INFO - _IOR(VFIO_TYPE, VFIO_BASE + 7, | |
154 | * struct vfio_device_info) | |
155 | * | |
156 | * Retrieve information about the device. Fills in provided | |
157 | * struct vfio_device_info. Caller sets argsz. | |
158 | * Return: 0 on success, -errno on failure. | |
159 | */ | |
160 | struct vfio_device_info { | |
161 | __u32 argsz; | |
162 | __u32 flags; | |
163 | #define VFIO_DEVICE_FLAGS_RESET (1 << 0) /* Device supports reset */ | |
164 | #define VFIO_DEVICE_FLAGS_PCI (1 << 1) /* vfio-pci device */ | |
7a52ce8a CH |
165 | #define VFIO_DEVICE_FLAGS_PLATFORM (1 << 2) /* vfio-platform device */ |
166 | #define VFIO_DEVICE_FLAGS_AMBA (1 << 3) /* vfio-amba device */ | |
883f0b85 AW |
167 | __u32 num_regions; /* Max region index + 1 */ |
168 | __u32 num_irqs; /* Max IRQ index + 1 */ | |
169 | }; | |
170 | #define VFIO_DEVICE_GET_INFO _IO(VFIO_TYPE, VFIO_BASE + 7) | |
171 | ||
172 | /** | |
173 | * VFIO_DEVICE_GET_REGION_INFO - _IOWR(VFIO_TYPE, VFIO_BASE + 8, | |
174 | * struct vfio_region_info) | |
175 | * | |
176 | * Retrieve information about a device region. Caller provides | |
177 | * struct vfio_region_info with index value set. Caller sets argsz. | |
178 | * Implementation of region mapping is bus driver specific. This is | |
179 | * intended to describe MMIO, I/O port, as well as bus specific | |
180 | * regions (ex. PCI config space). Zero sized regions may be used | |
181 | * to describe unimplemented regions (ex. unimplemented PCI BARs). | |
182 | * Return: 0 on success, -errno on failure. | |
183 | */ | |
184 | struct vfio_region_info { | |
185 | __u32 argsz; | |
186 | __u32 flags; | |
187 | #define VFIO_REGION_INFO_FLAG_READ (1 << 0) /* Region supports read */ | |
188 | #define VFIO_REGION_INFO_FLAG_WRITE (1 << 1) /* Region supports write */ | |
189 | #define VFIO_REGION_INFO_FLAG_MMAP (1 << 2) /* Region supports mmap */ | |
190 | __u32 index; /* Region index */ | |
191 | __u32 resv; /* Reserved for alignment */ | |
192 | __u64 size; /* Region size (bytes) */ | |
193 | __u64 offset; /* Region offset from start of device fd */ | |
194 | }; | |
195 | #define VFIO_DEVICE_GET_REGION_INFO _IO(VFIO_TYPE, VFIO_BASE + 8) | |
196 | ||
197 | /** | |
198 | * VFIO_DEVICE_GET_IRQ_INFO - _IOWR(VFIO_TYPE, VFIO_BASE + 9, | |
199 | * struct vfio_irq_info) | |
200 | * | |
201 | * Retrieve information about a device IRQ. Caller provides | |
202 | * struct vfio_irq_info with index value set. Caller sets argsz. | |
203 | * Implementation of IRQ mapping is bus driver specific. Indexes | |
204 | * using multiple IRQs are primarily intended to support MSI-like | |
205 | * interrupt blocks. Zero count irq blocks may be used to describe | |
206 | * unimplemented interrupt types. | |
207 | * | |
208 | * The EVENTFD flag indicates the interrupt index supports eventfd based | |
209 | * signaling. | |
210 | * | |
211 | * The MASKABLE flags indicates the index supports MASK and UNMASK | |
212 | * actions described below. | |
213 | * | |
214 | * AUTOMASKED indicates that after signaling, the interrupt line is | |
215 | * automatically masked by VFIO and the user needs to unmask the line | |
216 | * to receive new interrupts. This is primarily intended to distinguish | |
217 | * level triggered interrupts. | |
218 | * | |
219 | * The NORESIZE flag indicates that the interrupt lines within the index | |
220 | * are setup as a set and new subindexes cannot be enabled without first | |
221 | * disabling the entire index. This is used for interrupts like PCI MSI | |
222 | * and MSI-X where the driver may only use a subset of the available | |
223 | * indexes, but VFIO needs to enable a specific number of vectors | |
224 | * upfront. In the case of MSI-X, where the user can enable MSI-X and | |
225 | * then add and unmask vectors, it's up to userspace to make the decision | |
226 | * whether to allocate the maximum supported number of vectors or tear | |
227 | * down setup and incrementally increase the vectors as each is enabled. | |
228 | */ | |
229 | struct vfio_irq_info { | |
230 | __u32 argsz; | |
231 | __u32 flags; | |
232 | #define VFIO_IRQ_INFO_EVENTFD (1 << 0) | |
233 | #define VFIO_IRQ_INFO_MASKABLE (1 << 1) | |
234 | #define VFIO_IRQ_INFO_AUTOMASKED (1 << 2) | |
235 | #define VFIO_IRQ_INFO_NORESIZE (1 << 3) | |
236 | __u32 index; /* IRQ index */ | |
237 | __u32 count; /* Number of IRQs within this index */ | |
238 | }; | |
239 | #define VFIO_DEVICE_GET_IRQ_INFO _IO(VFIO_TYPE, VFIO_BASE + 9) | |
240 | ||
241 | /** | |
242 | * VFIO_DEVICE_SET_IRQS - _IOW(VFIO_TYPE, VFIO_BASE + 10, struct vfio_irq_set) | |
243 | * | |
244 | * Set signaling, masking, and unmasking of interrupts. Caller provides | |
245 | * struct vfio_irq_set with all fields set. 'start' and 'count' indicate | |
246 | * the range of subindexes being specified. | |
247 | * | |
248 | * The DATA flags specify the type of data provided. If DATA_NONE, the | |
249 | * operation performs the specified action immediately on the specified | |
250 | * interrupt(s). For example, to unmask AUTOMASKED interrupt [0,0]: | |
251 | * flags = (DATA_NONE|ACTION_UNMASK), index = 0, start = 0, count = 1. | |
252 | * | |
253 | * DATA_BOOL allows sparse support for the same on arrays of interrupts. | |
254 | * For example, to mask interrupts [0,1] and [0,3] (but not [0,2]): | |
255 | * flags = (DATA_BOOL|ACTION_MASK), index = 0, start = 1, count = 3, | |
256 | * data = {1,0,1} | |
257 | * | |
258 | * DATA_EVENTFD binds the specified ACTION to the provided __s32 eventfd. | |
259 | * A value of -1 can be used to either de-assign interrupts if already | |
260 | * assigned or skip un-assigned interrupts. For example, to set an eventfd | |
261 | * to be trigger for interrupts [0,0] and [0,2]: | |
262 | * flags = (DATA_EVENTFD|ACTION_TRIGGER), index = 0, start = 0, count = 3, | |
263 | * data = {fd1, -1, fd2} | |
264 | * If index [0,1] is previously set, two count = 1 ioctls calls would be | |
265 | * required to set [0,0] and [0,2] without changing [0,1]. | |
266 | * | |
267 | * Once a signaling mechanism is set, DATA_BOOL or DATA_NONE can be used | |
268 | * with ACTION_TRIGGER to perform kernel level interrupt loopback testing | |
269 | * from userspace (ie. simulate hardware triggering). | |
270 | * | |
271 | * Setting of an event triggering mechanism to userspace for ACTION_TRIGGER | |
272 | * enables the interrupt index for the device. Individual subindex interrupts | |
273 | * can be disabled using the -1 value for DATA_EVENTFD or the index can be | |
274 | * disabled as a whole with: flags = (DATA_NONE|ACTION_TRIGGER), count = 0. | |
275 | * | |
276 | * Note that ACTION_[UN]MASK specify user->kernel signaling (irqfds) while | |
277 | * ACTION_TRIGGER specifies kernel->user signaling. | |
278 | */ | |
279 | struct vfio_irq_set { | |
280 | __u32 argsz; | |
281 | __u32 flags; | |
282 | #define VFIO_IRQ_SET_DATA_NONE (1 << 0) /* Data not present */ | |
283 | #define VFIO_IRQ_SET_DATA_BOOL (1 << 1) /* Data is bool (u8) */ | |
284 | #define VFIO_IRQ_SET_DATA_EVENTFD (1 << 2) /* Data is eventfd (s32) */ | |
285 | #define VFIO_IRQ_SET_ACTION_MASK (1 << 3) /* Mask interrupt */ | |
286 | #define VFIO_IRQ_SET_ACTION_UNMASK (1 << 4) /* Unmask interrupt */ | |
287 | #define VFIO_IRQ_SET_ACTION_TRIGGER (1 << 5) /* Trigger interrupt */ | |
288 | __u32 index; | |
289 | __u32 start; | |
290 | __u32 count; | |
291 | __u8 data[]; | |
292 | }; | |
293 | #define VFIO_DEVICE_SET_IRQS _IO(VFIO_TYPE, VFIO_BASE + 10) | |
294 | ||
295 | #define VFIO_IRQ_SET_DATA_TYPE_MASK (VFIO_IRQ_SET_DATA_NONE | \ | |
296 | VFIO_IRQ_SET_DATA_BOOL | \ | |
297 | VFIO_IRQ_SET_DATA_EVENTFD) | |
298 | #define VFIO_IRQ_SET_ACTION_TYPE_MASK (VFIO_IRQ_SET_ACTION_MASK | \ | |
299 | VFIO_IRQ_SET_ACTION_UNMASK | \ | |
300 | VFIO_IRQ_SET_ACTION_TRIGGER) | |
301 | /** | |
302 | * VFIO_DEVICE_RESET - _IO(VFIO_TYPE, VFIO_BASE + 11) | |
303 | * | |
304 | * Reset a device. | |
305 | */ | |
306 | #define VFIO_DEVICE_RESET _IO(VFIO_TYPE, VFIO_BASE + 11) | |
307 | ||
308 | /* | |
309 | * The VFIO-PCI bus driver makes use of the following fixed region and | |
310 | * IRQ index mapping. Unimplemented regions return a size of zero. | |
311 | * Unimplemented IRQ types return a count of zero. | |
312 | */ | |
313 | ||
314 | enum { | |
315 | VFIO_PCI_BAR0_REGION_INDEX, | |
316 | VFIO_PCI_BAR1_REGION_INDEX, | |
317 | VFIO_PCI_BAR2_REGION_INDEX, | |
318 | VFIO_PCI_BAR3_REGION_INDEX, | |
319 | VFIO_PCI_BAR4_REGION_INDEX, | |
320 | VFIO_PCI_BAR5_REGION_INDEX, | |
321 | VFIO_PCI_ROM_REGION_INDEX, | |
322 | VFIO_PCI_CONFIG_REGION_INDEX, | |
010ca0b3 AW |
323 | /* |
324 | * Expose VGA regions defined for PCI base class 03, subclass 00. | |
325 | * This includes I/O port ranges 0x3b0 to 0x3bb and 0x3c0 to 0x3df | |
326 | * as well as the MMIO range 0xa0000 to 0xbffff. Each implemented | |
327 | * range is found at it's identity mapped offset from the region | |
328 | * offset, for example 0x3b0 is region_info.offset + 0x3b0. Areas | |
329 | * between described ranges are unimplemented. | |
330 | */ | |
331 | VFIO_PCI_VGA_REGION_INDEX, | |
883f0b85 AW |
332 | VFIO_PCI_NUM_REGIONS |
333 | }; | |
334 | ||
335 | enum { | |
336 | VFIO_PCI_INTX_IRQ_INDEX, | |
337 | VFIO_PCI_MSI_IRQ_INDEX, | |
338 | VFIO_PCI_MSIX_IRQ_INDEX, | |
e098b453 | 339 | VFIO_PCI_ERR_IRQ_INDEX, |
47cbe50c | 340 | VFIO_PCI_REQ_IRQ_INDEX, |
883f0b85 AW |
341 | VFIO_PCI_NUM_IRQS |
342 | }; | |
343 | ||
4f265607 AJ |
344 | /** |
345 | * VFIO_DEVICE_GET_PCI_HOT_RESET_INFO - _IORW(VFIO_TYPE, VFIO_BASE + 12, | |
346 | * struct vfio_pci_hot_reset_info) | |
347 | * | |
348 | * Return: 0 on success, -errno on failure: | |
349 | * -enospc = insufficient buffer, -enodev = unsupported for device. | |
350 | */ | |
351 | struct vfio_pci_dependent_device { | |
352 | __u32 group_id; | |
353 | __u16 segment; | |
354 | __u8 bus; | |
355 | __u8 devfn; /* Use PCI_SLOT/PCI_FUNC */ | |
356 | }; | |
357 | ||
358 | struct vfio_pci_hot_reset_info { | |
359 | __u32 argsz; | |
360 | __u32 flags; | |
361 | __u32 count; | |
362 | struct vfio_pci_dependent_device devices[]; | |
363 | }; | |
364 | ||
365 | #define VFIO_DEVICE_GET_PCI_HOT_RESET_INFO _IO(VFIO_TYPE, VFIO_BASE + 12) | |
366 | ||
367 | /** | |
368 | * VFIO_DEVICE_PCI_HOT_RESET - _IOW(VFIO_TYPE, VFIO_BASE + 13, | |
369 | * struct vfio_pci_hot_reset) | |
370 | * | |
371 | * Return: 0 on success, -errno on failure. | |
372 | */ | |
373 | struct vfio_pci_hot_reset { | |
374 | __u32 argsz; | |
375 | __u32 flags; | |
376 | __u32 count; | |
377 | __s32 group_fds[]; | |
378 | }; | |
379 | ||
380 | #define VFIO_DEVICE_PCI_HOT_RESET _IO(VFIO_TYPE, VFIO_BASE + 13) | |
381 | ||
883f0b85 AW |
382 | /* -------- API for Type1 VFIO IOMMU -------- */ |
383 | ||
384 | /** | |
385 | * VFIO_IOMMU_GET_INFO - _IOR(VFIO_TYPE, VFIO_BASE + 12, struct vfio_iommu_info) | |
386 | * | |
387 | * Retrieve information about the IOMMU object. Fills in provided | |
388 | * struct vfio_iommu_info. Caller sets argsz. | |
389 | * | |
390 | * XXX Should we do these by CHECK_EXTENSION too? | |
391 | */ | |
392 | struct vfio_iommu_type1_info { | |
393 | __u32 argsz; | |
394 | __u32 flags; | |
395 | #define VFIO_IOMMU_INFO_PGSIZES (1 << 0) /* supported page sizes info */ | |
396 | __u64 iova_pgsizes; /* Bitmap of supported page sizes */ | |
397 | }; | |
398 | ||
399 | #define VFIO_IOMMU_GET_INFO _IO(VFIO_TYPE, VFIO_BASE + 12) | |
400 | ||
401 | /** | |
402 | * VFIO_IOMMU_MAP_DMA - _IOW(VFIO_TYPE, VFIO_BASE + 13, struct vfio_dma_map) | |
403 | * | |
404 | * Map process virtual addresses to IO virtual addresses using the | |
405 | * provided struct vfio_dma_map. Caller sets argsz. READ &/ WRITE required. | |
406 | */ | |
407 | struct vfio_iommu_type1_dma_map { | |
408 | __u32 argsz; | |
409 | __u32 flags; | |
410 | #define VFIO_DMA_MAP_FLAG_READ (1 << 0) /* readable from device */ | |
411 | #define VFIO_DMA_MAP_FLAG_WRITE (1 << 1) /* writable from device */ | |
412 | __u64 vaddr; /* Process virtual address */ | |
413 | __u64 iova; /* IO virtual address */ | |
414 | __u64 size; /* Size of mapping (bytes) */ | |
415 | }; | |
416 | ||
417 | #define VFIO_IOMMU_MAP_DMA _IO(VFIO_TYPE, VFIO_BASE + 13) | |
418 | ||
419 | /** | |
c5daeae1 AK |
420 | * VFIO_IOMMU_UNMAP_DMA - _IOWR(VFIO_TYPE, VFIO_BASE + 14, |
421 | * struct vfio_dma_unmap) | |
883f0b85 AW |
422 | * |
423 | * Unmap IO virtual addresses using the provided struct vfio_dma_unmap. | |
c5daeae1 AK |
424 | * Caller sets argsz. The actual unmapped size is returned in the size |
425 | * field. No guarantee is made to the user that arbitrary unmaps of iova | |
426 | * or size different from those used in the original mapping call will | |
427 | * succeed. | |
883f0b85 AW |
428 | */ |
429 | struct vfio_iommu_type1_dma_unmap { | |
430 | __u32 argsz; | |
431 | __u32 flags; | |
432 | __u64 iova; /* IO virtual address */ | |
433 | __u64 size; /* Size of mapping (bytes) */ | |
434 | }; | |
435 | ||
436 | #define VFIO_IOMMU_UNMAP_DMA _IO(VFIO_TYPE, VFIO_BASE + 14) | |
437 | ||
c5daeae1 AK |
438 | /* |
439 | * IOCTLs to enable/disable IOMMU container usage. | |
440 | * No parameters are supported. | |
441 | */ | |
442 | #define VFIO_IOMMU_ENABLE _IO(VFIO_TYPE, VFIO_BASE + 15) | |
443 | #define VFIO_IOMMU_DISABLE _IO(VFIO_TYPE, VFIO_BASE + 16) | |
444 | ||
445 | /* -------- Additional API for SPAPR TCE (Server POWERPC) IOMMU -------- */ | |
446 | ||
25b8b39b AK |
447 | /* |
448 | * The SPAPR TCE DDW info struct provides the information about | |
449 | * the details of Dynamic DMA window capability. | |
450 | * | |
451 | * @pgsizes contains a page size bitmask, 4K/64K/16M are supported. | |
452 | * @max_dynamic_windows_supported tells the maximum number of windows | |
453 | * which the platform can create. | |
454 | * @levels tells the maximum number of levels in multi-level IOMMU tables; | |
455 | * this allows splitting a table into smaller chunks which reduces | |
456 | * the amount of physically contiguous memory required for the table. | |
457 | */ | |
458 | struct vfio_iommu_spapr_tce_ddw_info { | |
459 | __u64 pgsizes; /* Bitmap of supported page sizes */ | |
460 | __u32 max_dynamic_windows_supported; | |
461 | __u32 levels; | |
462 | }; | |
463 | ||
c5daeae1 AK |
464 | /* |
465 | * The SPAPR TCE info struct provides the information about the PCI bus | |
466 | * address ranges available for DMA, these values are programmed into | |
467 | * the hardware so the guest has to know that information. | |
468 | * | |
469 | * The DMA 32 bit window start is an absolute PCI bus address. | |
470 | * The IOVA address passed via map/unmap ioctls are absolute PCI bus | |
471 | * addresses too so the window works as a filter rather than an offset | |
472 | * for IOVA addresses. | |
473 | * | |
25b8b39b AK |
474 | * Flags supported: |
475 | * - VFIO_IOMMU_SPAPR_INFO_DDW: informs the userspace that dynamic DMA windows | |
476 | * (DDW) support is present. @ddw is only supported when DDW is present. | |
c5daeae1 AK |
477 | */ |
478 | struct vfio_iommu_spapr_tce_info { | |
479 | __u32 argsz; | |
25b8b39b AK |
480 | __u32 flags; |
481 | #define VFIO_IOMMU_SPAPR_INFO_DDW (1 << 0) /* DDW supported */ | |
c5daeae1 AK |
482 | __u32 dma32_window_start; /* 32 bit window start (bytes) */ |
483 | __u32 dma32_window_size; /* 32 bit window size (bytes) */ | |
25b8b39b | 484 | struct vfio_iommu_spapr_tce_ddw_info ddw; |
c5daeae1 AK |
485 | }; |
486 | ||
487 | #define VFIO_IOMMU_SPAPR_TCE_GET_INFO _IO(VFIO_TYPE, VFIO_BASE + 12) | |
488 | ||
a9fd1654 JF |
489 | /* |
490 | * EEH PE operation struct provides ways to: | |
491 | * - enable/disable EEH functionality; | |
492 | * - unfreeze IO/DMA for frozen PE; | |
493 | * - read PE state; | |
494 | * - reset PE; | |
25b8b39b AK |
495 | * - configure PE; |
496 | * - inject EEH error. | |
a9fd1654 | 497 | */ |
25b8b39b AK |
498 | struct vfio_eeh_pe_err { |
499 | __u32 type; | |
500 | __u32 func; | |
501 | __u64 addr; | |
502 | __u64 mask; | |
503 | }; | |
504 | ||
a9fd1654 JF |
505 | struct vfio_eeh_pe_op { |
506 | __u32 argsz; | |
507 | __u32 flags; | |
508 | __u32 op; | |
25b8b39b AK |
509 | union { |
510 | struct vfio_eeh_pe_err err; | |
511 | }; | |
a9fd1654 JF |
512 | }; |
513 | ||
514 | #define VFIO_EEH_PE_DISABLE 0 /* Disable EEH functionality */ | |
515 | #define VFIO_EEH_PE_ENABLE 1 /* Enable EEH functionality */ | |
516 | #define VFIO_EEH_PE_UNFREEZE_IO 2 /* Enable IO for frozen PE */ | |
517 | #define VFIO_EEH_PE_UNFREEZE_DMA 3 /* Enable DMA for frozen PE */ | |
518 | #define VFIO_EEH_PE_GET_STATE 4 /* PE state retrieval */ | |
519 | #define VFIO_EEH_PE_STATE_NORMAL 0 /* PE in functional state */ | |
520 | #define VFIO_EEH_PE_STATE_RESET 1 /* PE reset in progress */ | |
521 | #define VFIO_EEH_PE_STATE_STOPPED 2 /* Stopped DMA and IO */ | |
522 | #define VFIO_EEH_PE_STATE_STOPPED_DMA 4 /* Stopped DMA only */ | |
523 | #define VFIO_EEH_PE_STATE_UNAVAIL 5 /* State unavailable */ | |
524 | #define VFIO_EEH_PE_RESET_DEACTIVATE 5 /* Deassert PE reset */ | |
525 | #define VFIO_EEH_PE_RESET_HOT 6 /* Assert hot reset */ | |
526 | #define VFIO_EEH_PE_RESET_FUNDAMENTAL 7 /* Assert fundamental reset */ | |
527 | #define VFIO_EEH_PE_CONFIGURE 8 /* PE configuration */ | |
25b8b39b | 528 | #define VFIO_EEH_PE_INJECT_ERR 9 /* Inject EEH error */ |
a9fd1654 JF |
529 | |
530 | #define VFIO_EEH_PE_OP _IO(VFIO_TYPE, VFIO_BASE + 21) | |
531 | ||
25b8b39b AK |
532 | /** |
533 | * VFIO_IOMMU_SPAPR_REGISTER_MEMORY - _IOW(VFIO_TYPE, VFIO_BASE + 17, struct vfio_iommu_spapr_register_memory) | |
534 | * | |
535 | * Registers user space memory where DMA is allowed. It pins | |
536 | * user pages and does the locked memory accounting so | |
537 | * subsequent VFIO_IOMMU_MAP_DMA/VFIO_IOMMU_UNMAP_DMA calls | |
538 | * get faster. | |
539 | */ | |
540 | struct vfio_iommu_spapr_register_memory { | |
541 | __u32 argsz; | |
542 | __u32 flags; | |
543 | __u64 vaddr; /* Process virtual address */ | |
544 | __u64 size; /* Size of mapping (bytes) */ | |
545 | }; | |
546 | #define VFIO_IOMMU_SPAPR_REGISTER_MEMORY _IO(VFIO_TYPE, VFIO_BASE + 17) | |
547 | ||
548 | /** | |
549 | * VFIO_IOMMU_SPAPR_UNREGISTER_MEMORY - _IOW(VFIO_TYPE, VFIO_BASE + 18, struct vfio_iommu_spapr_register_memory) | |
550 | * | |
551 | * Unregisters user space memory registered with | |
552 | * VFIO_IOMMU_SPAPR_REGISTER_MEMORY. | |
553 | * Uses vfio_iommu_spapr_register_memory for parameters. | |
554 | */ | |
555 | #define VFIO_IOMMU_SPAPR_UNREGISTER_MEMORY _IO(VFIO_TYPE, VFIO_BASE + 18) | |
556 | ||
557 | /** | |
558 | * VFIO_IOMMU_SPAPR_TCE_CREATE - _IOWR(VFIO_TYPE, VFIO_BASE + 19, struct vfio_iommu_spapr_tce_create) | |
559 | * | |
560 | * Creates an additional TCE table and programs it (sets a new DMA window) | |
561 | * to every IOMMU group in the container. It receives page shift, window | |
562 | * size and number of levels in the TCE table being created. | |
563 | * | |
564 | * It allocates and returns an offset on a PCI bus of the new DMA window. | |
565 | */ | |
566 | struct vfio_iommu_spapr_tce_create { | |
567 | __u32 argsz; | |
568 | __u32 flags; | |
569 | /* in */ | |
570 | __u32 page_shift; | |
571 | __u64 window_size; | |
572 | __u32 levels; | |
573 | /* out */ | |
574 | __u64 start_addr; | |
575 | }; | |
576 | #define VFIO_IOMMU_SPAPR_TCE_CREATE _IO(VFIO_TYPE, VFIO_BASE + 19) | |
577 | ||
578 | /** | |
579 | * VFIO_IOMMU_SPAPR_TCE_REMOVE - _IOW(VFIO_TYPE, VFIO_BASE + 20, struct vfio_iommu_spapr_tce_remove) | |
580 | * | |
581 | * Unprograms a TCE table from all groups in the container and destroys it. | |
582 | * It receives a PCI bus offset as a window id. | |
583 | */ | |
584 | struct vfio_iommu_spapr_tce_remove { | |
585 | __u32 argsz; | |
586 | __u32 flags; | |
587 | /* in */ | |
588 | __u64 start_addr; | |
589 | }; | |
590 | #define VFIO_IOMMU_SPAPR_TCE_REMOVE _IO(VFIO_TYPE, VFIO_BASE + 20) | |
591 | ||
c5daeae1 AK |
592 | /* ***************************************************************** */ |
593 | ||
d4834ff9 | 594 | #endif /* VFIO_H */ |