]>
Commit | Line | Data |
---|---|---|
02eb84d0 MT |
1 | /* |
2 | * MSI-X device support | |
3 | * | |
4 | * This module includes support for MSI-X in pci devices. | |
5 | * | |
6 | * Author: Michael S. Tsirkin <[email protected]> | |
7 | * | |
8 | * Copyright (c) 2009, Red Hat Inc, Michael S. Tsirkin ([email protected]) | |
9 | * | |
10 | * This work is licensed under the terms of the GNU GPL, version 2. See | |
11 | * the COPYING file in the top-level directory. | |
12 | */ | |
13 | ||
14 | #include "hw.h" | |
15 | #include "msix.h" | |
16 | #include "pci.h" | |
bf1b0071 | 17 | #include "range.h" |
02eb84d0 | 18 | |
02eb84d0 MT |
19 | /* MSI-X capability structure */ |
20 | #define MSIX_TABLE_OFFSET 4 | |
21 | #define MSIX_PBA_OFFSET 8 | |
22 | #define MSIX_CAP_LENGTH 12 | |
23 | ||
2760952b MT |
24 | /* MSI enable bit and maskall bit are in byte 1 in FLAGS register */ |
25 | #define MSIX_CONTROL_OFFSET (PCI_MSIX_FLAGS + 1) | |
02eb84d0 | 26 | #define MSIX_ENABLE_MASK (PCI_MSIX_FLAGS_ENABLE >> 8) |
5b5cb086 | 27 | #define MSIX_MASKALL_MASK (PCI_MSIX_FLAGS_MASKALL >> 8) |
02eb84d0 MT |
28 | |
29 | /* MSI-X table format */ | |
30 | #define MSIX_MSG_ADDR 0 | |
31 | #define MSIX_MSG_UPPER_ADDR 4 | |
32 | #define MSIX_MSG_DATA 8 | |
33 | #define MSIX_VECTOR_CTRL 12 | |
34 | #define MSIX_ENTRY_SIZE 16 | |
35 | #define MSIX_VECTOR_MASK 0x1 | |
5a1fc5e8 MT |
36 | |
37 | /* How much space does an MSIX table need. */ | |
38 | /* The spec requires giving the table structure | |
39 | * a 4K aligned region all by itself. */ | |
40 | #define MSIX_PAGE_SIZE 0x1000 | |
41 | /* Reserve second half of the page for pending bits */ | |
42 | #define MSIX_PAGE_PENDING (MSIX_PAGE_SIZE / 2) | |
02eb84d0 MT |
43 | #define MSIX_MAX_ENTRIES 32 |
44 | ||
45 | ||
02eb84d0 MT |
46 | /* Flag for interrupt controller to declare MSI-X support */ |
47 | int msix_supported; | |
48 | ||
49 | /* Add MSI-X capability to the config space for the device. */ | |
50 | /* Given a bar and its size, add MSI-X table on top of it | |
51 | * and fill MSI-X capability in the config space. | |
52 | * Original bar size must be a power of 2 or 0. | |
53 | * New bar size is returned. */ | |
54 | static int msix_add_config(struct PCIDevice *pdev, unsigned short nentries, | |
55 | unsigned bar_nr, unsigned bar_size) | |
56 | { | |
57 | int config_offset; | |
58 | uint8_t *config; | |
59 | uint32_t new_size; | |
60 | ||
61 | if (nentries < 1 || nentries > PCI_MSIX_FLAGS_QSIZE + 1) | |
62 | return -EINVAL; | |
63 | if (bar_size > 0x80000000) | |
64 | return -ENOSPC; | |
65 | ||
66 | /* Add space for MSI-X structures */ | |
5e520a7d | 67 | if (!bar_size) { |
5a1fc5e8 MT |
68 | new_size = MSIX_PAGE_SIZE; |
69 | } else if (bar_size < MSIX_PAGE_SIZE) { | |
70 | bar_size = MSIX_PAGE_SIZE; | |
71 | new_size = MSIX_PAGE_SIZE * 2; | |
72 | } else { | |
02eb84d0 | 73 | new_size = bar_size * 2; |
5a1fc5e8 | 74 | } |
02eb84d0 MT |
75 | |
76 | pdev->msix_bar_size = new_size; | |
ca77089d IY |
77 | config_offset = pci_add_capability(pdev, PCI_CAP_ID_MSIX, |
78 | 0, MSIX_CAP_LENGTH); | |
02eb84d0 MT |
79 | if (config_offset < 0) |
80 | return config_offset; | |
81 | config = pdev->config + config_offset; | |
82 | ||
83 | pci_set_word(config + PCI_MSIX_FLAGS, nentries - 1); | |
84 | /* Table on top of BAR */ | |
85 | pci_set_long(config + MSIX_TABLE_OFFSET, bar_size | bar_nr); | |
86 | /* Pending bits on top of that */ | |
5a1fc5e8 MT |
87 | pci_set_long(config + MSIX_PBA_OFFSET, (bar_size + MSIX_PAGE_PENDING) | |
88 | bar_nr); | |
02eb84d0 | 89 | pdev->msix_cap = config_offset; |
ebabb67a | 90 | /* Make flags bit writable. */ |
5b5cb086 MT |
91 | pdev->wmask[config_offset + MSIX_CONTROL_OFFSET] |= MSIX_ENABLE_MASK | |
92 | MSIX_MASKALL_MASK; | |
02eb84d0 MT |
93 | return 0; |
94 | } | |
95 | ||
c227f099 | 96 | static uint32_t msix_mmio_readl(void *opaque, target_phys_addr_t addr) |
02eb84d0 MT |
97 | { |
98 | PCIDevice *dev = opaque; | |
76f5159d | 99 | unsigned int offset = addr & (MSIX_PAGE_SIZE - 1) & ~0x3; |
02eb84d0 | 100 | void *page = dev->msix_table_page; |
02eb84d0 | 101 | |
76f5159d | 102 | return pci_get_long(page + offset); |
02eb84d0 MT |
103 | } |
104 | ||
c227f099 | 105 | static uint32_t msix_mmio_read_unallowed(void *opaque, target_phys_addr_t addr) |
02eb84d0 MT |
106 | { |
107 | fprintf(stderr, "MSI-X: only dword read is allowed!\n"); | |
108 | return 0; | |
109 | } | |
110 | ||
111 | static uint8_t msix_pending_mask(int vector) | |
112 | { | |
113 | return 1 << (vector % 8); | |
114 | } | |
115 | ||
116 | static uint8_t *msix_pending_byte(PCIDevice *dev, int vector) | |
117 | { | |
5a1fc5e8 | 118 | return dev->msix_table_page + MSIX_PAGE_PENDING + vector / 8; |
02eb84d0 MT |
119 | } |
120 | ||
121 | static int msix_is_pending(PCIDevice *dev, int vector) | |
122 | { | |
123 | return *msix_pending_byte(dev, vector) & msix_pending_mask(vector); | |
124 | } | |
125 | ||
126 | static void msix_set_pending(PCIDevice *dev, int vector) | |
127 | { | |
128 | *msix_pending_byte(dev, vector) |= msix_pending_mask(vector); | |
129 | } | |
130 | ||
131 | static void msix_clr_pending(PCIDevice *dev, int vector) | |
132 | { | |
133 | *msix_pending_byte(dev, vector) &= ~msix_pending_mask(vector); | |
134 | } | |
135 | ||
5b5cb086 MT |
136 | static int msix_function_masked(PCIDevice *dev) |
137 | { | |
138 | return dev->config[dev->msix_cap + MSIX_CONTROL_OFFSET] & MSIX_MASKALL_MASK; | |
139 | } | |
140 | ||
02eb84d0 MT |
141 | static int msix_is_masked(PCIDevice *dev, int vector) |
142 | { | |
143 | unsigned offset = vector * MSIX_ENTRY_SIZE + MSIX_VECTOR_CTRL; | |
5b5cb086 MT |
144 | return msix_function_masked(dev) || |
145 | dev->msix_table_page[offset] & MSIX_VECTOR_MASK; | |
146 | } | |
147 | ||
148 | static void msix_handle_mask_update(PCIDevice *dev, int vector) | |
149 | { | |
150 | if (!msix_is_masked(dev, vector) && msix_is_pending(dev, vector)) { | |
151 | msix_clr_pending(dev, vector); | |
152 | msix_notify(dev, vector); | |
153 | } | |
154 | } | |
155 | ||
156 | /* Handle MSI-X capability config write. */ | |
157 | void msix_write_config(PCIDevice *dev, uint32_t addr, | |
158 | uint32_t val, int len) | |
159 | { | |
160 | unsigned enable_pos = dev->msix_cap + MSIX_CONTROL_OFFSET; | |
161 | int vector; | |
162 | ||
98a3cb02 | 163 | if (!range_covers_byte(addr, len, enable_pos)) { |
5b5cb086 MT |
164 | return; |
165 | } | |
166 | ||
167 | if (!msix_enabled(dev)) { | |
168 | return; | |
169 | } | |
170 | ||
e407bf13 | 171 | pci_device_deassert_intx(dev); |
5b5cb086 MT |
172 | |
173 | if (msix_function_masked(dev)) { | |
174 | return; | |
175 | } | |
176 | ||
177 | for (vector = 0; vector < dev->msix_entries_nr; ++vector) { | |
178 | msix_handle_mask_update(dev, vector); | |
179 | } | |
02eb84d0 MT |
180 | } |
181 | ||
c227f099 | 182 | static void msix_mmio_writel(void *opaque, target_phys_addr_t addr, |
02eb84d0 MT |
183 | uint32_t val) |
184 | { | |
185 | PCIDevice *dev = opaque; | |
76f5159d | 186 | unsigned int offset = addr & (MSIX_PAGE_SIZE - 1) & ~0x3; |
02eb84d0 | 187 | int vector = offset / MSIX_ENTRY_SIZE; |
76f5159d | 188 | pci_set_long(dev->msix_table_page + offset, val); |
5b5cb086 | 189 | msix_handle_mask_update(dev, vector); |
02eb84d0 MT |
190 | } |
191 | ||
c227f099 | 192 | static void msix_mmio_write_unallowed(void *opaque, target_phys_addr_t addr, |
02eb84d0 MT |
193 | uint32_t val) |
194 | { | |
195 | fprintf(stderr, "MSI-X: only dword write is allowed!\n"); | |
196 | } | |
197 | ||
d60efc6b | 198 | static CPUWriteMemoryFunc * const msix_mmio_write[] = { |
02eb84d0 MT |
199 | msix_mmio_write_unallowed, msix_mmio_write_unallowed, msix_mmio_writel |
200 | }; | |
201 | ||
d60efc6b | 202 | static CPUReadMemoryFunc * const msix_mmio_read[] = { |
02eb84d0 MT |
203 | msix_mmio_read_unallowed, msix_mmio_read_unallowed, msix_mmio_readl |
204 | }; | |
205 | ||
206 | /* Should be called from device's map method. */ | |
207 | void msix_mmio_map(PCIDevice *d, int region_num, | |
6e355d90 | 208 | pcibus_t addr, pcibus_t size, int type) |
02eb84d0 MT |
209 | { |
210 | uint8_t *config = d->config + d->msix_cap; | |
211 | uint32_t table = pci_get_long(config + MSIX_TABLE_OFFSET); | |
5a1fc5e8 | 212 | uint32_t offset = table & ~(MSIX_PAGE_SIZE - 1); |
02eb84d0 MT |
213 | /* TODO: for assigned devices, we'll want to make it possible to map |
214 | * pending bits separately in case they are in a separate bar. */ | |
215 | int table_bir = table & PCI_MSIX_FLAGS_BIRMASK; | |
216 | ||
217 | if (table_bir != region_num) | |
218 | return; | |
219 | if (size <= offset) | |
220 | return; | |
221 | cpu_register_physical_memory(addr + offset, size - offset, | |
222 | d->msix_mmio_index); | |
223 | } | |
224 | ||
ae1be0bb MT |
225 | static void msix_mask_all(struct PCIDevice *dev, unsigned nentries) |
226 | { | |
227 | int vector; | |
228 | for (vector = 0; vector < nentries; ++vector) { | |
229 | unsigned offset = vector * MSIX_ENTRY_SIZE + MSIX_VECTOR_CTRL; | |
230 | dev->msix_table_page[offset] |= MSIX_VECTOR_MASK; | |
231 | } | |
232 | } | |
233 | ||
02eb84d0 MT |
234 | /* Initialize the MSI-X structures. Note: if MSI-X is supported, BAR size is |
235 | * modified, it should be retrieved with msix_bar_size. */ | |
236 | int msix_init(struct PCIDevice *dev, unsigned short nentries, | |
5a1fc5e8 | 237 | unsigned bar_nr, unsigned bar_size) |
02eb84d0 MT |
238 | { |
239 | int ret; | |
240 | /* Nothing to do if MSI is not supported by interrupt controller */ | |
241 | if (!msix_supported) | |
242 | return -ENOTSUP; | |
243 | ||
244 | if (nentries > MSIX_MAX_ENTRIES) | |
245 | return -EINVAL; | |
246 | ||
247 | dev->msix_entry_used = qemu_mallocz(MSIX_MAX_ENTRIES * | |
248 | sizeof *dev->msix_entry_used); | |
249 | ||
5a1fc5e8 | 250 | dev->msix_table_page = qemu_mallocz(MSIX_PAGE_SIZE); |
ae1be0bb | 251 | msix_mask_all(dev, nentries); |
02eb84d0 MT |
252 | |
253 | dev->msix_mmio_index = cpu_register_io_memory(msix_mmio_read, | |
2507c12a AG |
254 | msix_mmio_write, dev, |
255 | DEVICE_NATIVE_ENDIAN); | |
02eb84d0 MT |
256 | if (dev->msix_mmio_index == -1) { |
257 | ret = -EBUSY; | |
258 | goto err_index; | |
259 | } | |
260 | ||
261 | dev->msix_entries_nr = nentries; | |
262 | ret = msix_add_config(dev, nentries, bar_nr, bar_size); | |
263 | if (ret) | |
264 | goto err_config; | |
265 | ||
266 | dev->cap_present |= QEMU_PCI_CAP_MSIX; | |
267 | return 0; | |
268 | ||
269 | err_config: | |
3174ecd1 | 270 | dev->msix_entries_nr = 0; |
02eb84d0 MT |
271 | cpu_unregister_io_memory(dev->msix_mmio_index); |
272 | err_index: | |
273 | qemu_free(dev->msix_table_page); | |
274 | dev->msix_table_page = NULL; | |
275 | qemu_free(dev->msix_entry_used); | |
276 | dev->msix_entry_used = NULL; | |
277 | return ret; | |
278 | } | |
279 | ||
98304c84 MT |
280 | static void msix_free_irq_entries(PCIDevice *dev) |
281 | { | |
282 | int vector; | |
283 | ||
284 | for (vector = 0; vector < dev->msix_entries_nr; ++vector) { | |
285 | dev->msix_entry_used[vector] = 0; | |
286 | msix_clr_pending(dev, vector); | |
287 | } | |
288 | } | |
289 | ||
02eb84d0 MT |
290 | /* Clean up resources for the device. */ |
291 | int msix_uninit(PCIDevice *dev) | |
292 | { | |
293 | if (!(dev->cap_present & QEMU_PCI_CAP_MSIX)) | |
294 | return 0; | |
295 | pci_del_capability(dev, PCI_CAP_ID_MSIX, MSIX_CAP_LENGTH); | |
296 | dev->msix_cap = 0; | |
297 | msix_free_irq_entries(dev); | |
298 | dev->msix_entries_nr = 0; | |
299 | cpu_unregister_io_memory(dev->msix_mmio_index); | |
300 | qemu_free(dev->msix_table_page); | |
301 | dev->msix_table_page = NULL; | |
302 | qemu_free(dev->msix_entry_used); | |
303 | dev->msix_entry_used = NULL; | |
304 | dev->cap_present &= ~QEMU_PCI_CAP_MSIX; | |
305 | return 0; | |
306 | } | |
307 | ||
308 | void msix_save(PCIDevice *dev, QEMUFile *f) | |
309 | { | |
9a3e12c8 MT |
310 | unsigned n = dev->msix_entries_nr; |
311 | ||
72755a70 | 312 | if (!(dev->cap_present & QEMU_PCI_CAP_MSIX)) { |
9a3e12c8 | 313 | return; |
72755a70 | 314 | } |
9a3e12c8 MT |
315 | |
316 | qemu_put_buffer(f, dev->msix_table_page, n * MSIX_ENTRY_SIZE); | |
5a1fc5e8 | 317 | qemu_put_buffer(f, dev->msix_table_page + MSIX_PAGE_PENDING, (n + 7) / 8); |
02eb84d0 MT |
318 | } |
319 | ||
320 | /* Should be called after restoring the config space. */ | |
321 | void msix_load(PCIDevice *dev, QEMUFile *f) | |
322 | { | |
323 | unsigned n = dev->msix_entries_nr; | |
324 | ||
98846d73 | 325 | if (!(dev->cap_present & QEMU_PCI_CAP_MSIX)) { |
02eb84d0 | 326 | return; |
98846d73 | 327 | } |
02eb84d0 | 328 | |
4bfd1712 | 329 | msix_free_irq_entries(dev); |
02eb84d0 | 330 | qemu_get_buffer(f, dev->msix_table_page, n * MSIX_ENTRY_SIZE); |
5a1fc5e8 | 331 | qemu_get_buffer(f, dev->msix_table_page + MSIX_PAGE_PENDING, (n + 7) / 8); |
02eb84d0 MT |
332 | } |
333 | ||
334 | /* Does device support MSI-X? */ | |
335 | int msix_present(PCIDevice *dev) | |
336 | { | |
337 | return dev->cap_present & QEMU_PCI_CAP_MSIX; | |
338 | } | |
339 | ||
340 | /* Is MSI-X enabled? */ | |
341 | int msix_enabled(PCIDevice *dev) | |
342 | { | |
343 | return (dev->cap_present & QEMU_PCI_CAP_MSIX) && | |
2760952b | 344 | (dev->config[dev->msix_cap + MSIX_CONTROL_OFFSET] & |
02eb84d0 MT |
345 | MSIX_ENABLE_MASK); |
346 | } | |
347 | ||
348 | /* Size of bar where MSI-X table resides, or 0 if MSI-X not supported. */ | |
349 | uint32_t msix_bar_size(PCIDevice *dev) | |
350 | { | |
351 | return (dev->cap_present & QEMU_PCI_CAP_MSIX) ? | |
352 | dev->msix_bar_size : 0; | |
353 | } | |
354 | ||
355 | /* Send an MSI-X message */ | |
356 | void msix_notify(PCIDevice *dev, unsigned vector) | |
357 | { | |
358 | uint8_t *table_entry = dev->msix_table_page + vector * MSIX_ENTRY_SIZE; | |
359 | uint64_t address; | |
360 | uint32_t data; | |
361 | ||
362 | if (vector >= dev->msix_entries_nr || !dev->msix_entry_used[vector]) | |
363 | return; | |
364 | if (msix_is_masked(dev, vector)) { | |
365 | msix_set_pending(dev, vector); | |
366 | return; | |
367 | } | |
368 | ||
369 | address = pci_get_long(table_entry + MSIX_MSG_UPPER_ADDR); | |
370 | address = (address << 32) | pci_get_long(table_entry + MSIX_MSG_ADDR); | |
371 | data = pci_get_long(table_entry + MSIX_MSG_DATA); | |
372 | stl_phys(address, data); | |
373 | } | |
374 | ||
375 | void msix_reset(PCIDevice *dev) | |
376 | { | |
377 | if (!(dev->cap_present & QEMU_PCI_CAP_MSIX)) | |
378 | return; | |
379 | msix_free_irq_entries(dev); | |
2760952b MT |
380 | dev->config[dev->msix_cap + MSIX_CONTROL_OFFSET] &= |
381 | ~dev->wmask[dev->msix_cap + MSIX_CONTROL_OFFSET]; | |
5a1fc5e8 | 382 | memset(dev->msix_table_page, 0, MSIX_PAGE_SIZE); |
ae1be0bb | 383 | msix_mask_all(dev, dev->msix_entries_nr); |
02eb84d0 MT |
384 | } |
385 | ||
386 | /* PCI spec suggests that devices make it possible for software to configure | |
387 | * less vectors than supported by the device, but does not specify a standard | |
388 | * mechanism for devices to do so. | |
389 | * | |
390 | * We support this by asking devices to declare vectors software is going to | |
391 | * actually use, and checking this on the notification path. Devices that | |
392 | * don't want to follow the spec suggestion can declare all vectors as used. */ | |
393 | ||
394 | /* Mark vector as used. */ | |
395 | int msix_vector_use(PCIDevice *dev, unsigned vector) | |
396 | { | |
397 | if (vector >= dev->msix_entries_nr) | |
398 | return -EINVAL; | |
399 | dev->msix_entry_used[vector]++; | |
400 | return 0; | |
401 | } | |
402 | ||
403 | /* Mark vector as unused. */ | |
404 | void msix_vector_unuse(PCIDevice *dev, unsigned vector) | |
405 | { | |
98304c84 MT |
406 | if (vector >= dev->msix_entries_nr || !dev->msix_entry_used[vector]) { |
407 | return; | |
408 | } | |
409 | if (--dev->msix_entry_used[vector]) { | |
410 | return; | |
411 | } | |
412 | msix_clr_pending(dev, vector); | |
02eb84d0 | 413 | } |
b5f28bca MT |
414 | |
415 | void msix_unuse_all_vectors(PCIDevice *dev) | |
416 | { | |
417 | if (!(dev->cap_present & QEMU_PCI_CAP_MSIX)) | |
418 | return; | |
419 | msix_free_irq_entries(dev); | |
420 | } |