]>
Commit | Line | Data |
---|---|---|
02eb84d0 MT |
1 | /* |
2 | * MSI-X device support | |
3 | * | |
4 | * This module includes support for MSI-X in pci devices. | |
5 | * | |
6 | * Author: Michael S. Tsirkin <[email protected]> | |
7 | * | |
8 | * Copyright (c) 2009, Red Hat Inc, Michael S. Tsirkin ([email protected]) | |
9 | * | |
10 | * This work is licensed under the terms of the GNU GPL, version 2. See | |
11 | * the COPYING file in the top-level directory. | |
12 | */ | |
13 | ||
14 | #include "hw.h" | |
15 | #include "msix.h" | |
16 | #include "pci.h" | |
bf1b0071 | 17 | #include "range.h" |
02eb84d0 | 18 | |
02eb84d0 MT |
19 | #define MSIX_CAP_LENGTH 12 |
20 | ||
2760952b MT |
21 | /* MSI enable bit and maskall bit are in byte 1 in FLAGS register */ |
22 | #define MSIX_CONTROL_OFFSET (PCI_MSIX_FLAGS + 1) | |
02eb84d0 | 23 | #define MSIX_ENABLE_MASK (PCI_MSIX_FLAGS_ENABLE >> 8) |
5b5cb086 | 24 | #define MSIX_MASKALL_MASK (PCI_MSIX_FLAGS_MASKALL >> 8) |
02eb84d0 | 25 | |
5a1fc5e8 MT |
26 | /* How much space does an MSIX table need. */ |
27 | /* The spec requires giving the table structure | |
28 | * a 4K aligned region all by itself. */ | |
29 | #define MSIX_PAGE_SIZE 0x1000 | |
30 | /* Reserve second half of the page for pending bits */ | |
31 | #define MSIX_PAGE_PENDING (MSIX_PAGE_SIZE / 2) | |
02eb84d0 MT |
32 | #define MSIX_MAX_ENTRIES 32 |
33 | ||
34 | ||
02eb84d0 MT |
35 | /* Flag for interrupt controller to declare MSI-X support */ |
36 | int msix_supported; | |
37 | ||
38 | /* Add MSI-X capability to the config space for the device. */ | |
39 | /* Given a bar and its size, add MSI-X table on top of it | |
40 | * and fill MSI-X capability in the config space. | |
41 | * Original bar size must be a power of 2 or 0. | |
42 | * New bar size is returned. */ | |
43 | static int msix_add_config(struct PCIDevice *pdev, unsigned short nentries, | |
44 | unsigned bar_nr, unsigned bar_size) | |
45 | { | |
46 | int config_offset; | |
47 | uint8_t *config; | |
48 | uint32_t new_size; | |
49 | ||
50 | if (nentries < 1 || nentries > PCI_MSIX_FLAGS_QSIZE + 1) | |
51 | return -EINVAL; | |
52 | if (bar_size > 0x80000000) | |
53 | return -ENOSPC; | |
54 | ||
55 | /* Add space for MSI-X structures */ | |
5e520a7d | 56 | if (!bar_size) { |
5a1fc5e8 MT |
57 | new_size = MSIX_PAGE_SIZE; |
58 | } else if (bar_size < MSIX_PAGE_SIZE) { | |
59 | bar_size = MSIX_PAGE_SIZE; | |
60 | new_size = MSIX_PAGE_SIZE * 2; | |
61 | } else { | |
02eb84d0 | 62 | new_size = bar_size * 2; |
5a1fc5e8 | 63 | } |
02eb84d0 MT |
64 | |
65 | pdev->msix_bar_size = new_size; | |
ca77089d IY |
66 | config_offset = pci_add_capability(pdev, PCI_CAP_ID_MSIX, |
67 | 0, MSIX_CAP_LENGTH); | |
02eb84d0 MT |
68 | if (config_offset < 0) |
69 | return config_offset; | |
70 | config = pdev->config + config_offset; | |
71 | ||
72 | pci_set_word(config + PCI_MSIX_FLAGS, nentries - 1); | |
73 | /* Table on top of BAR */ | |
01731cfb | 74 | pci_set_long(config + PCI_MSIX_TABLE, bar_size | bar_nr); |
02eb84d0 | 75 | /* Pending bits on top of that */ |
01731cfb | 76 | pci_set_long(config + PCI_MSIX_PBA, (bar_size + MSIX_PAGE_PENDING) | |
5a1fc5e8 | 77 | bar_nr); |
02eb84d0 | 78 | pdev->msix_cap = config_offset; |
ebabb67a | 79 | /* Make flags bit writable. */ |
5b5cb086 MT |
80 | pdev->wmask[config_offset + MSIX_CONTROL_OFFSET] |= MSIX_ENABLE_MASK | |
81 | MSIX_MASKALL_MASK; | |
02eb84d0 MT |
82 | return 0; |
83 | } | |
84 | ||
c227f099 | 85 | static uint32_t msix_mmio_readl(void *opaque, target_phys_addr_t addr) |
02eb84d0 MT |
86 | { |
87 | PCIDevice *dev = opaque; | |
76f5159d | 88 | unsigned int offset = addr & (MSIX_PAGE_SIZE - 1) & ~0x3; |
02eb84d0 | 89 | void *page = dev->msix_table_page; |
02eb84d0 | 90 | |
76f5159d | 91 | return pci_get_long(page + offset); |
02eb84d0 MT |
92 | } |
93 | ||
c227f099 | 94 | static uint32_t msix_mmio_read_unallowed(void *opaque, target_phys_addr_t addr) |
02eb84d0 MT |
95 | { |
96 | fprintf(stderr, "MSI-X: only dword read is allowed!\n"); | |
97 | return 0; | |
98 | } | |
99 | ||
100 | static uint8_t msix_pending_mask(int vector) | |
101 | { | |
102 | return 1 << (vector % 8); | |
103 | } | |
104 | ||
105 | static uint8_t *msix_pending_byte(PCIDevice *dev, int vector) | |
106 | { | |
5a1fc5e8 | 107 | return dev->msix_table_page + MSIX_PAGE_PENDING + vector / 8; |
02eb84d0 MT |
108 | } |
109 | ||
110 | static int msix_is_pending(PCIDevice *dev, int vector) | |
111 | { | |
112 | return *msix_pending_byte(dev, vector) & msix_pending_mask(vector); | |
113 | } | |
114 | ||
115 | static void msix_set_pending(PCIDevice *dev, int vector) | |
116 | { | |
117 | *msix_pending_byte(dev, vector) |= msix_pending_mask(vector); | |
118 | } | |
119 | ||
120 | static void msix_clr_pending(PCIDevice *dev, int vector) | |
121 | { | |
122 | *msix_pending_byte(dev, vector) &= ~msix_pending_mask(vector); | |
123 | } | |
124 | ||
5b5cb086 MT |
125 | static int msix_function_masked(PCIDevice *dev) |
126 | { | |
127 | return dev->config[dev->msix_cap + MSIX_CONTROL_OFFSET] & MSIX_MASKALL_MASK; | |
128 | } | |
129 | ||
02eb84d0 MT |
130 | static int msix_is_masked(PCIDevice *dev, int vector) |
131 | { | |
01731cfb JK |
132 | unsigned offset = |
133 | vector * PCI_MSIX_ENTRY_SIZE + PCI_MSIX_ENTRY_VECTOR_CTRL; | |
5b5cb086 | 134 | return msix_function_masked(dev) || |
01731cfb | 135 | dev->msix_table_page[offset] & PCI_MSIX_ENTRY_CTRL_MASKBIT; |
5b5cb086 MT |
136 | } |
137 | ||
138 | static void msix_handle_mask_update(PCIDevice *dev, int vector) | |
139 | { | |
140 | if (!msix_is_masked(dev, vector) && msix_is_pending(dev, vector)) { | |
141 | msix_clr_pending(dev, vector); | |
142 | msix_notify(dev, vector); | |
143 | } | |
144 | } | |
145 | ||
146 | /* Handle MSI-X capability config write. */ | |
147 | void msix_write_config(PCIDevice *dev, uint32_t addr, | |
148 | uint32_t val, int len) | |
149 | { | |
150 | unsigned enable_pos = dev->msix_cap + MSIX_CONTROL_OFFSET; | |
151 | int vector; | |
152 | ||
98a3cb02 | 153 | if (!range_covers_byte(addr, len, enable_pos)) { |
5b5cb086 MT |
154 | return; |
155 | } | |
156 | ||
157 | if (!msix_enabled(dev)) { | |
158 | return; | |
159 | } | |
160 | ||
e407bf13 | 161 | pci_device_deassert_intx(dev); |
5b5cb086 MT |
162 | |
163 | if (msix_function_masked(dev)) { | |
164 | return; | |
165 | } | |
166 | ||
167 | for (vector = 0; vector < dev->msix_entries_nr; ++vector) { | |
168 | msix_handle_mask_update(dev, vector); | |
169 | } | |
02eb84d0 MT |
170 | } |
171 | ||
c227f099 | 172 | static void msix_mmio_writel(void *opaque, target_phys_addr_t addr, |
02eb84d0 MT |
173 | uint32_t val) |
174 | { | |
175 | PCIDevice *dev = opaque; | |
76f5159d | 176 | unsigned int offset = addr & (MSIX_PAGE_SIZE - 1) & ~0x3; |
01731cfb | 177 | int vector = offset / PCI_MSIX_ENTRY_SIZE; |
76f5159d | 178 | pci_set_long(dev->msix_table_page + offset, val); |
5b5cb086 | 179 | msix_handle_mask_update(dev, vector); |
02eb84d0 MT |
180 | } |
181 | ||
c227f099 | 182 | static void msix_mmio_write_unallowed(void *opaque, target_phys_addr_t addr, |
02eb84d0 MT |
183 | uint32_t val) |
184 | { | |
185 | fprintf(stderr, "MSI-X: only dword write is allowed!\n"); | |
186 | } | |
187 | ||
d60efc6b | 188 | static CPUWriteMemoryFunc * const msix_mmio_write[] = { |
02eb84d0 MT |
189 | msix_mmio_write_unallowed, msix_mmio_write_unallowed, msix_mmio_writel |
190 | }; | |
191 | ||
d60efc6b | 192 | static CPUReadMemoryFunc * const msix_mmio_read[] = { |
02eb84d0 MT |
193 | msix_mmio_read_unallowed, msix_mmio_read_unallowed, msix_mmio_readl |
194 | }; | |
195 | ||
196 | /* Should be called from device's map method. */ | |
197 | void msix_mmio_map(PCIDevice *d, int region_num, | |
6e355d90 | 198 | pcibus_t addr, pcibus_t size, int type) |
02eb84d0 MT |
199 | { |
200 | uint8_t *config = d->config + d->msix_cap; | |
01731cfb | 201 | uint32_t table = pci_get_long(config + PCI_MSIX_TABLE); |
5a1fc5e8 | 202 | uint32_t offset = table & ~(MSIX_PAGE_SIZE - 1); |
02eb84d0 MT |
203 | /* TODO: for assigned devices, we'll want to make it possible to map |
204 | * pending bits separately in case they are in a separate bar. */ | |
205 | int table_bir = table & PCI_MSIX_FLAGS_BIRMASK; | |
206 | ||
207 | if (table_bir != region_num) | |
208 | return; | |
209 | if (size <= offset) | |
210 | return; | |
211 | cpu_register_physical_memory(addr + offset, size - offset, | |
212 | d->msix_mmio_index); | |
213 | } | |
214 | ||
ae1be0bb MT |
215 | static void msix_mask_all(struct PCIDevice *dev, unsigned nentries) |
216 | { | |
217 | int vector; | |
218 | for (vector = 0; vector < nentries; ++vector) { | |
01731cfb JK |
219 | unsigned offset = |
220 | vector * PCI_MSIX_ENTRY_SIZE + PCI_MSIX_ENTRY_VECTOR_CTRL; | |
221 | dev->msix_table_page[offset] |= PCI_MSIX_ENTRY_CTRL_MASKBIT; | |
ae1be0bb MT |
222 | } |
223 | } | |
224 | ||
02eb84d0 MT |
225 | /* Initialize the MSI-X structures. Note: if MSI-X is supported, BAR size is |
226 | * modified, it should be retrieved with msix_bar_size. */ | |
227 | int msix_init(struct PCIDevice *dev, unsigned short nentries, | |
5a1fc5e8 | 228 | unsigned bar_nr, unsigned bar_size) |
02eb84d0 MT |
229 | { |
230 | int ret; | |
231 | /* Nothing to do if MSI is not supported by interrupt controller */ | |
232 | if (!msix_supported) | |
233 | return -ENOTSUP; | |
234 | ||
235 | if (nentries > MSIX_MAX_ENTRIES) | |
236 | return -EINVAL; | |
237 | ||
238 | dev->msix_entry_used = qemu_mallocz(MSIX_MAX_ENTRIES * | |
239 | sizeof *dev->msix_entry_used); | |
240 | ||
5a1fc5e8 | 241 | dev->msix_table_page = qemu_mallocz(MSIX_PAGE_SIZE); |
ae1be0bb | 242 | msix_mask_all(dev, nentries); |
02eb84d0 MT |
243 | |
244 | dev->msix_mmio_index = cpu_register_io_memory(msix_mmio_read, | |
2507c12a AG |
245 | msix_mmio_write, dev, |
246 | DEVICE_NATIVE_ENDIAN); | |
02eb84d0 MT |
247 | if (dev->msix_mmio_index == -1) { |
248 | ret = -EBUSY; | |
249 | goto err_index; | |
250 | } | |
251 | ||
252 | dev->msix_entries_nr = nentries; | |
253 | ret = msix_add_config(dev, nentries, bar_nr, bar_size); | |
254 | if (ret) | |
255 | goto err_config; | |
256 | ||
257 | dev->cap_present |= QEMU_PCI_CAP_MSIX; | |
258 | return 0; | |
259 | ||
260 | err_config: | |
3174ecd1 | 261 | dev->msix_entries_nr = 0; |
02eb84d0 MT |
262 | cpu_unregister_io_memory(dev->msix_mmio_index); |
263 | err_index: | |
264 | qemu_free(dev->msix_table_page); | |
265 | dev->msix_table_page = NULL; | |
266 | qemu_free(dev->msix_entry_used); | |
267 | dev->msix_entry_used = NULL; | |
268 | return ret; | |
269 | } | |
270 | ||
98304c84 MT |
271 | static void msix_free_irq_entries(PCIDevice *dev) |
272 | { | |
273 | int vector; | |
274 | ||
275 | for (vector = 0; vector < dev->msix_entries_nr; ++vector) { | |
276 | dev->msix_entry_used[vector] = 0; | |
277 | msix_clr_pending(dev, vector); | |
278 | } | |
279 | } | |
280 | ||
02eb84d0 MT |
281 | /* Clean up resources for the device. */ |
282 | int msix_uninit(PCIDevice *dev) | |
283 | { | |
284 | if (!(dev->cap_present & QEMU_PCI_CAP_MSIX)) | |
285 | return 0; | |
286 | pci_del_capability(dev, PCI_CAP_ID_MSIX, MSIX_CAP_LENGTH); | |
287 | dev->msix_cap = 0; | |
288 | msix_free_irq_entries(dev); | |
289 | dev->msix_entries_nr = 0; | |
290 | cpu_unregister_io_memory(dev->msix_mmio_index); | |
291 | qemu_free(dev->msix_table_page); | |
292 | dev->msix_table_page = NULL; | |
293 | qemu_free(dev->msix_entry_used); | |
294 | dev->msix_entry_used = NULL; | |
295 | dev->cap_present &= ~QEMU_PCI_CAP_MSIX; | |
296 | return 0; | |
297 | } | |
298 | ||
299 | void msix_save(PCIDevice *dev, QEMUFile *f) | |
300 | { | |
9a3e12c8 MT |
301 | unsigned n = dev->msix_entries_nr; |
302 | ||
72755a70 | 303 | if (!(dev->cap_present & QEMU_PCI_CAP_MSIX)) { |
9a3e12c8 | 304 | return; |
72755a70 | 305 | } |
9a3e12c8 | 306 | |
01731cfb | 307 | qemu_put_buffer(f, dev->msix_table_page, n * PCI_MSIX_ENTRY_SIZE); |
5a1fc5e8 | 308 | qemu_put_buffer(f, dev->msix_table_page + MSIX_PAGE_PENDING, (n + 7) / 8); |
02eb84d0 MT |
309 | } |
310 | ||
311 | /* Should be called after restoring the config space. */ | |
312 | void msix_load(PCIDevice *dev, QEMUFile *f) | |
313 | { | |
314 | unsigned n = dev->msix_entries_nr; | |
315 | ||
98846d73 | 316 | if (!(dev->cap_present & QEMU_PCI_CAP_MSIX)) { |
02eb84d0 | 317 | return; |
98846d73 | 318 | } |
02eb84d0 | 319 | |
4bfd1712 | 320 | msix_free_irq_entries(dev); |
01731cfb | 321 | qemu_get_buffer(f, dev->msix_table_page, n * PCI_MSIX_ENTRY_SIZE); |
5a1fc5e8 | 322 | qemu_get_buffer(f, dev->msix_table_page + MSIX_PAGE_PENDING, (n + 7) / 8); |
02eb84d0 MT |
323 | } |
324 | ||
325 | /* Does device support MSI-X? */ | |
326 | int msix_present(PCIDevice *dev) | |
327 | { | |
328 | return dev->cap_present & QEMU_PCI_CAP_MSIX; | |
329 | } | |
330 | ||
331 | /* Is MSI-X enabled? */ | |
332 | int msix_enabled(PCIDevice *dev) | |
333 | { | |
334 | return (dev->cap_present & QEMU_PCI_CAP_MSIX) && | |
2760952b | 335 | (dev->config[dev->msix_cap + MSIX_CONTROL_OFFSET] & |
02eb84d0 MT |
336 | MSIX_ENABLE_MASK); |
337 | } | |
338 | ||
339 | /* Size of bar where MSI-X table resides, or 0 if MSI-X not supported. */ | |
340 | uint32_t msix_bar_size(PCIDevice *dev) | |
341 | { | |
342 | return (dev->cap_present & QEMU_PCI_CAP_MSIX) ? | |
343 | dev->msix_bar_size : 0; | |
344 | } | |
345 | ||
346 | /* Send an MSI-X message */ | |
347 | void msix_notify(PCIDevice *dev, unsigned vector) | |
348 | { | |
01731cfb | 349 | uint8_t *table_entry = dev->msix_table_page + vector * PCI_MSIX_ENTRY_SIZE; |
02eb84d0 MT |
350 | uint64_t address; |
351 | uint32_t data; | |
352 | ||
353 | if (vector >= dev->msix_entries_nr || !dev->msix_entry_used[vector]) | |
354 | return; | |
355 | if (msix_is_masked(dev, vector)) { | |
356 | msix_set_pending(dev, vector); | |
357 | return; | |
358 | } | |
359 | ||
01731cfb JK |
360 | address = pci_get_quad(table_entry + PCI_MSIX_ENTRY_LOWER_ADDR); |
361 | data = pci_get_long(table_entry + PCI_MSIX_ENTRY_DATA); | |
ae5d3eb4 | 362 | stl_le_phys(address, data); |
02eb84d0 MT |
363 | } |
364 | ||
365 | void msix_reset(PCIDevice *dev) | |
366 | { | |
367 | if (!(dev->cap_present & QEMU_PCI_CAP_MSIX)) | |
368 | return; | |
369 | msix_free_irq_entries(dev); | |
2760952b MT |
370 | dev->config[dev->msix_cap + MSIX_CONTROL_OFFSET] &= |
371 | ~dev->wmask[dev->msix_cap + MSIX_CONTROL_OFFSET]; | |
5a1fc5e8 | 372 | memset(dev->msix_table_page, 0, MSIX_PAGE_SIZE); |
ae1be0bb | 373 | msix_mask_all(dev, dev->msix_entries_nr); |
02eb84d0 MT |
374 | } |
375 | ||
376 | /* PCI spec suggests that devices make it possible for software to configure | |
377 | * less vectors than supported by the device, but does not specify a standard | |
378 | * mechanism for devices to do so. | |
379 | * | |
380 | * We support this by asking devices to declare vectors software is going to | |
381 | * actually use, and checking this on the notification path. Devices that | |
382 | * don't want to follow the spec suggestion can declare all vectors as used. */ | |
383 | ||
384 | /* Mark vector as used. */ | |
385 | int msix_vector_use(PCIDevice *dev, unsigned vector) | |
386 | { | |
387 | if (vector >= dev->msix_entries_nr) | |
388 | return -EINVAL; | |
389 | dev->msix_entry_used[vector]++; | |
390 | return 0; | |
391 | } | |
392 | ||
393 | /* Mark vector as unused. */ | |
394 | void msix_vector_unuse(PCIDevice *dev, unsigned vector) | |
395 | { | |
98304c84 MT |
396 | if (vector >= dev->msix_entries_nr || !dev->msix_entry_used[vector]) { |
397 | return; | |
398 | } | |
399 | if (--dev->msix_entry_used[vector]) { | |
400 | return; | |
401 | } | |
402 | msix_clr_pending(dev, vector); | |
02eb84d0 | 403 | } |
b5f28bca MT |
404 | |
405 | void msix_unuse_all_vectors(PCIDevice *dev) | |
406 | { | |
407 | if (!(dev->cap_present & QEMU_PCI_CAP_MSIX)) | |
408 | return; | |
409 | msix_free_irq_entries(dev); | |
410 | } |