]>
Commit | Line | Data |
---|---|---|
02eb84d0 MT |
1 | /* |
2 | * MSI-X device support | |
3 | * | |
4 | * This module includes support for MSI-X in pci devices. | |
5 | * | |
6 | * Author: Michael S. Tsirkin <[email protected]> | |
7 | * | |
8 | * Copyright (c) 2009, Red Hat Inc, Michael S. Tsirkin ([email protected]) | |
9 | * | |
10 | * This work is licensed under the terms of the GNU GPL, version 2. See | |
11 | * the COPYING file in the top-level directory. | |
12 | */ | |
13 | ||
14 | #include "hw.h" | |
15 | #include "msix.h" | |
16 | #include "pci.h" | |
17 | ||
02eb84d0 MT |
18 | /* MSI-X capability structure */ |
19 | #define MSIX_TABLE_OFFSET 4 | |
20 | #define MSIX_PBA_OFFSET 8 | |
21 | #define MSIX_CAP_LENGTH 12 | |
22 | ||
2760952b MT |
23 | /* MSI enable bit and maskall bit are in byte 1 in FLAGS register */ |
24 | #define MSIX_CONTROL_OFFSET (PCI_MSIX_FLAGS + 1) | |
02eb84d0 | 25 | #define MSIX_ENABLE_MASK (PCI_MSIX_FLAGS_ENABLE >> 8) |
5b5cb086 | 26 | #define MSIX_MASKALL_MASK (PCI_MSIX_FLAGS_MASKALL >> 8) |
02eb84d0 MT |
27 | |
28 | /* MSI-X table format */ | |
29 | #define MSIX_MSG_ADDR 0 | |
30 | #define MSIX_MSG_UPPER_ADDR 4 | |
31 | #define MSIX_MSG_DATA 8 | |
32 | #define MSIX_VECTOR_CTRL 12 | |
33 | #define MSIX_ENTRY_SIZE 16 | |
34 | #define MSIX_VECTOR_MASK 0x1 | |
5a1fc5e8 MT |
35 | |
36 | /* How much space does an MSIX table need. */ | |
37 | /* The spec requires giving the table structure | |
38 | * a 4K aligned region all by itself. */ | |
39 | #define MSIX_PAGE_SIZE 0x1000 | |
40 | /* Reserve second half of the page for pending bits */ | |
41 | #define MSIX_PAGE_PENDING (MSIX_PAGE_SIZE / 2) | |
02eb84d0 MT |
42 | #define MSIX_MAX_ENTRIES 32 |
43 | ||
44 | ||
02eb84d0 MT |
45 | /* Flag for interrupt controller to declare MSI-X support */ |
46 | int msix_supported; | |
47 | ||
48 | /* Add MSI-X capability to the config space for the device. */ | |
49 | /* Given a bar and its size, add MSI-X table on top of it | |
50 | * and fill MSI-X capability in the config space. | |
51 | * Original bar size must be a power of 2 or 0. | |
52 | * New bar size is returned. */ | |
53 | static int msix_add_config(struct PCIDevice *pdev, unsigned short nentries, | |
54 | unsigned bar_nr, unsigned bar_size) | |
55 | { | |
56 | int config_offset; | |
57 | uint8_t *config; | |
58 | uint32_t new_size; | |
59 | ||
60 | if (nentries < 1 || nentries > PCI_MSIX_FLAGS_QSIZE + 1) | |
61 | return -EINVAL; | |
62 | if (bar_size > 0x80000000) | |
63 | return -ENOSPC; | |
64 | ||
65 | /* Add space for MSI-X structures */ | |
5e520a7d | 66 | if (!bar_size) { |
5a1fc5e8 MT |
67 | new_size = MSIX_PAGE_SIZE; |
68 | } else if (bar_size < MSIX_PAGE_SIZE) { | |
69 | bar_size = MSIX_PAGE_SIZE; | |
70 | new_size = MSIX_PAGE_SIZE * 2; | |
71 | } else { | |
02eb84d0 | 72 | new_size = bar_size * 2; |
5a1fc5e8 | 73 | } |
02eb84d0 MT |
74 | |
75 | pdev->msix_bar_size = new_size; | |
76 | config_offset = pci_add_capability(pdev, PCI_CAP_ID_MSIX, MSIX_CAP_LENGTH); | |
77 | if (config_offset < 0) | |
78 | return config_offset; | |
79 | config = pdev->config + config_offset; | |
80 | ||
81 | pci_set_word(config + PCI_MSIX_FLAGS, nentries - 1); | |
82 | /* Table on top of BAR */ | |
83 | pci_set_long(config + MSIX_TABLE_OFFSET, bar_size | bar_nr); | |
84 | /* Pending bits on top of that */ | |
5a1fc5e8 MT |
85 | pci_set_long(config + MSIX_PBA_OFFSET, (bar_size + MSIX_PAGE_PENDING) | |
86 | bar_nr); | |
02eb84d0 MT |
87 | pdev->msix_cap = config_offset; |
88 | /* Make flags bit writeable. */ | |
5b5cb086 MT |
89 | pdev->wmask[config_offset + MSIX_CONTROL_OFFSET] |= MSIX_ENABLE_MASK | |
90 | MSIX_MASKALL_MASK; | |
02eb84d0 MT |
91 | return 0; |
92 | } | |
93 | ||
c227f099 | 94 | static uint32_t msix_mmio_readl(void *opaque, target_phys_addr_t addr) |
02eb84d0 MT |
95 | { |
96 | PCIDevice *dev = opaque; | |
76f5159d | 97 | unsigned int offset = addr & (MSIX_PAGE_SIZE - 1) & ~0x3; |
02eb84d0 | 98 | void *page = dev->msix_table_page; |
02eb84d0 | 99 | |
76f5159d | 100 | return pci_get_long(page + offset); |
02eb84d0 MT |
101 | } |
102 | ||
c227f099 | 103 | static uint32_t msix_mmio_read_unallowed(void *opaque, target_phys_addr_t addr) |
02eb84d0 MT |
104 | { |
105 | fprintf(stderr, "MSI-X: only dword read is allowed!\n"); | |
106 | return 0; | |
107 | } | |
108 | ||
109 | static uint8_t msix_pending_mask(int vector) | |
110 | { | |
111 | return 1 << (vector % 8); | |
112 | } | |
113 | ||
114 | static uint8_t *msix_pending_byte(PCIDevice *dev, int vector) | |
115 | { | |
5a1fc5e8 | 116 | return dev->msix_table_page + MSIX_PAGE_PENDING + vector / 8; |
02eb84d0 MT |
117 | } |
118 | ||
119 | static int msix_is_pending(PCIDevice *dev, int vector) | |
120 | { | |
121 | return *msix_pending_byte(dev, vector) & msix_pending_mask(vector); | |
122 | } | |
123 | ||
124 | static void msix_set_pending(PCIDevice *dev, int vector) | |
125 | { | |
126 | *msix_pending_byte(dev, vector) |= msix_pending_mask(vector); | |
127 | } | |
128 | ||
129 | static void msix_clr_pending(PCIDevice *dev, int vector) | |
130 | { | |
131 | *msix_pending_byte(dev, vector) &= ~msix_pending_mask(vector); | |
132 | } | |
133 | ||
5b5cb086 MT |
134 | static int msix_function_masked(PCIDevice *dev) |
135 | { | |
136 | return dev->config[dev->msix_cap + MSIX_CONTROL_OFFSET] & MSIX_MASKALL_MASK; | |
137 | } | |
138 | ||
02eb84d0 MT |
139 | static int msix_is_masked(PCIDevice *dev, int vector) |
140 | { | |
141 | unsigned offset = vector * MSIX_ENTRY_SIZE + MSIX_VECTOR_CTRL; | |
5b5cb086 MT |
142 | return msix_function_masked(dev) || |
143 | dev->msix_table_page[offset] & MSIX_VECTOR_MASK; | |
144 | } | |
145 | ||
146 | static void msix_handle_mask_update(PCIDevice *dev, int vector) | |
147 | { | |
148 | if (!msix_is_masked(dev, vector) && msix_is_pending(dev, vector)) { | |
149 | msix_clr_pending(dev, vector); | |
150 | msix_notify(dev, vector); | |
151 | } | |
152 | } | |
153 | ||
154 | /* Handle MSI-X capability config write. */ | |
155 | void msix_write_config(PCIDevice *dev, uint32_t addr, | |
156 | uint32_t val, int len) | |
157 | { | |
158 | unsigned enable_pos = dev->msix_cap + MSIX_CONTROL_OFFSET; | |
159 | int vector; | |
160 | ||
98a3cb02 | 161 | if (!range_covers_byte(addr, len, enable_pos)) { |
5b5cb086 MT |
162 | return; |
163 | } | |
164 | ||
165 | if (!msix_enabled(dev)) { | |
166 | return; | |
167 | } | |
168 | ||
169 | qemu_set_irq(dev->irq[0], 0); | |
170 | ||
171 | if (msix_function_masked(dev)) { | |
172 | return; | |
173 | } | |
174 | ||
175 | for (vector = 0; vector < dev->msix_entries_nr; ++vector) { | |
176 | msix_handle_mask_update(dev, vector); | |
177 | } | |
02eb84d0 MT |
178 | } |
179 | ||
c227f099 | 180 | static void msix_mmio_writel(void *opaque, target_phys_addr_t addr, |
02eb84d0 MT |
181 | uint32_t val) |
182 | { | |
183 | PCIDevice *dev = opaque; | |
76f5159d | 184 | unsigned int offset = addr & (MSIX_PAGE_SIZE - 1) & ~0x3; |
02eb84d0 | 185 | int vector = offset / MSIX_ENTRY_SIZE; |
76f5159d | 186 | pci_set_long(dev->msix_table_page + offset, val); |
5b5cb086 | 187 | msix_handle_mask_update(dev, vector); |
02eb84d0 MT |
188 | } |
189 | ||
c227f099 | 190 | static void msix_mmio_write_unallowed(void *opaque, target_phys_addr_t addr, |
02eb84d0 MT |
191 | uint32_t val) |
192 | { | |
193 | fprintf(stderr, "MSI-X: only dword write is allowed!\n"); | |
194 | } | |
195 | ||
d60efc6b | 196 | static CPUWriteMemoryFunc * const msix_mmio_write[] = { |
02eb84d0 MT |
197 | msix_mmio_write_unallowed, msix_mmio_write_unallowed, msix_mmio_writel |
198 | }; | |
199 | ||
d60efc6b | 200 | static CPUReadMemoryFunc * const msix_mmio_read[] = { |
02eb84d0 MT |
201 | msix_mmio_read_unallowed, msix_mmio_read_unallowed, msix_mmio_readl |
202 | }; | |
203 | ||
204 | /* Should be called from device's map method. */ | |
205 | void msix_mmio_map(PCIDevice *d, int region_num, | |
6e355d90 | 206 | pcibus_t addr, pcibus_t size, int type) |
02eb84d0 MT |
207 | { |
208 | uint8_t *config = d->config + d->msix_cap; | |
209 | uint32_t table = pci_get_long(config + MSIX_TABLE_OFFSET); | |
5a1fc5e8 | 210 | uint32_t offset = table & ~(MSIX_PAGE_SIZE - 1); |
02eb84d0 MT |
211 | /* TODO: for assigned devices, we'll want to make it possible to map |
212 | * pending bits separately in case they are in a separate bar. */ | |
213 | int table_bir = table & PCI_MSIX_FLAGS_BIRMASK; | |
214 | ||
215 | if (table_bir != region_num) | |
216 | return; | |
217 | if (size <= offset) | |
218 | return; | |
219 | cpu_register_physical_memory(addr + offset, size - offset, | |
220 | d->msix_mmio_index); | |
221 | } | |
222 | ||
ae1be0bb MT |
223 | static void msix_mask_all(struct PCIDevice *dev, unsigned nentries) |
224 | { | |
225 | int vector; | |
226 | for (vector = 0; vector < nentries; ++vector) { | |
227 | unsigned offset = vector * MSIX_ENTRY_SIZE + MSIX_VECTOR_CTRL; | |
228 | dev->msix_table_page[offset] |= MSIX_VECTOR_MASK; | |
229 | } | |
230 | } | |
231 | ||
02eb84d0 MT |
232 | /* Initialize the MSI-X structures. Note: if MSI-X is supported, BAR size is |
233 | * modified, it should be retrieved with msix_bar_size. */ | |
234 | int msix_init(struct PCIDevice *dev, unsigned short nentries, | |
5a1fc5e8 | 235 | unsigned bar_nr, unsigned bar_size) |
02eb84d0 MT |
236 | { |
237 | int ret; | |
238 | /* Nothing to do if MSI is not supported by interrupt controller */ | |
239 | if (!msix_supported) | |
240 | return -ENOTSUP; | |
241 | ||
242 | if (nentries > MSIX_MAX_ENTRIES) | |
243 | return -EINVAL; | |
244 | ||
245 | dev->msix_entry_used = qemu_mallocz(MSIX_MAX_ENTRIES * | |
246 | sizeof *dev->msix_entry_used); | |
247 | ||
5a1fc5e8 | 248 | dev->msix_table_page = qemu_mallocz(MSIX_PAGE_SIZE); |
ae1be0bb | 249 | msix_mask_all(dev, nentries); |
02eb84d0 MT |
250 | |
251 | dev->msix_mmio_index = cpu_register_io_memory(msix_mmio_read, | |
252 | msix_mmio_write, dev); | |
253 | if (dev->msix_mmio_index == -1) { | |
254 | ret = -EBUSY; | |
255 | goto err_index; | |
256 | } | |
257 | ||
258 | dev->msix_entries_nr = nentries; | |
259 | ret = msix_add_config(dev, nentries, bar_nr, bar_size); | |
260 | if (ret) | |
261 | goto err_config; | |
262 | ||
263 | dev->cap_present |= QEMU_PCI_CAP_MSIX; | |
264 | return 0; | |
265 | ||
266 | err_config: | |
3174ecd1 | 267 | dev->msix_entries_nr = 0; |
02eb84d0 MT |
268 | cpu_unregister_io_memory(dev->msix_mmio_index); |
269 | err_index: | |
270 | qemu_free(dev->msix_table_page); | |
271 | dev->msix_table_page = NULL; | |
272 | qemu_free(dev->msix_entry_used); | |
273 | dev->msix_entry_used = NULL; | |
274 | return ret; | |
275 | } | |
276 | ||
98304c84 MT |
277 | static void msix_free_irq_entries(PCIDevice *dev) |
278 | { | |
279 | int vector; | |
280 | ||
281 | for (vector = 0; vector < dev->msix_entries_nr; ++vector) { | |
282 | dev->msix_entry_used[vector] = 0; | |
283 | msix_clr_pending(dev, vector); | |
284 | } | |
285 | } | |
286 | ||
02eb84d0 MT |
287 | /* Clean up resources for the device. */ |
288 | int msix_uninit(PCIDevice *dev) | |
289 | { | |
290 | if (!(dev->cap_present & QEMU_PCI_CAP_MSIX)) | |
291 | return 0; | |
292 | pci_del_capability(dev, PCI_CAP_ID_MSIX, MSIX_CAP_LENGTH); | |
293 | dev->msix_cap = 0; | |
294 | msix_free_irq_entries(dev); | |
295 | dev->msix_entries_nr = 0; | |
296 | cpu_unregister_io_memory(dev->msix_mmio_index); | |
297 | qemu_free(dev->msix_table_page); | |
298 | dev->msix_table_page = NULL; | |
299 | qemu_free(dev->msix_entry_used); | |
300 | dev->msix_entry_used = NULL; | |
301 | dev->cap_present &= ~QEMU_PCI_CAP_MSIX; | |
302 | return 0; | |
303 | } | |
304 | ||
305 | void msix_save(PCIDevice *dev, QEMUFile *f) | |
306 | { | |
9a3e12c8 MT |
307 | unsigned n = dev->msix_entries_nr; |
308 | ||
72755a70 | 309 | if (!(dev->cap_present & QEMU_PCI_CAP_MSIX)) { |
9a3e12c8 | 310 | return; |
72755a70 | 311 | } |
9a3e12c8 MT |
312 | |
313 | qemu_put_buffer(f, dev->msix_table_page, n * MSIX_ENTRY_SIZE); | |
5a1fc5e8 | 314 | qemu_put_buffer(f, dev->msix_table_page + MSIX_PAGE_PENDING, (n + 7) / 8); |
02eb84d0 MT |
315 | } |
316 | ||
317 | /* Should be called after restoring the config space. */ | |
318 | void msix_load(PCIDevice *dev, QEMUFile *f) | |
319 | { | |
320 | unsigned n = dev->msix_entries_nr; | |
321 | ||
98846d73 | 322 | if (!(dev->cap_present & QEMU_PCI_CAP_MSIX)) { |
02eb84d0 | 323 | return; |
98846d73 | 324 | } |
02eb84d0 | 325 | |
4bfd1712 | 326 | msix_free_irq_entries(dev); |
02eb84d0 | 327 | qemu_get_buffer(f, dev->msix_table_page, n * MSIX_ENTRY_SIZE); |
5a1fc5e8 | 328 | qemu_get_buffer(f, dev->msix_table_page + MSIX_PAGE_PENDING, (n + 7) / 8); |
02eb84d0 MT |
329 | } |
330 | ||
331 | /* Does device support MSI-X? */ | |
332 | int msix_present(PCIDevice *dev) | |
333 | { | |
334 | return dev->cap_present & QEMU_PCI_CAP_MSIX; | |
335 | } | |
336 | ||
337 | /* Is MSI-X enabled? */ | |
338 | int msix_enabled(PCIDevice *dev) | |
339 | { | |
340 | return (dev->cap_present & QEMU_PCI_CAP_MSIX) && | |
2760952b | 341 | (dev->config[dev->msix_cap + MSIX_CONTROL_OFFSET] & |
02eb84d0 MT |
342 | MSIX_ENABLE_MASK); |
343 | } | |
344 | ||
345 | /* Size of bar where MSI-X table resides, or 0 if MSI-X not supported. */ | |
346 | uint32_t msix_bar_size(PCIDevice *dev) | |
347 | { | |
348 | return (dev->cap_present & QEMU_PCI_CAP_MSIX) ? | |
349 | dev->msix_bar_size : 0; | |
350 | } | |
351 | ||
352 | /* Send an MSI-X message */ | |
353 | void msix_notify(PCIDevice *dev, unsigned vector) | |
354 | { | |
355 | uint8_t *table_entry = dev->msix_table_page + vector * MSIX_ENTRY_SIZE; | |
356 | uint64_t address; | |
357 | uint32_t data; | |
358 | ||
359 | if (vector >= dev->msix_entries_nr || !dev->msix_entry_used[vector]) | |
360 | return; | |
361 | if (msix_is_masked(dev, vector)) { | |
362 | msix_set_pending(dev, vector); | |
363 | return; | |
364 | } | |
365 | ||
366 | address = pci_get_long(table_entry + MSIX_MSG_UPPER_ADDR); | |
367 | address = (address << 32) | pci_get_long(table_entry + MSIX_MSG_ADDR); | |
368 | data = pci_get_long(table_entry + MSIX_MSG_DATA); | |
369 | stl_phys(address, data); | |
370 | } | |
371 | ||
372 | void msix_reset(PCIDevice *dev) | |
373 | { | |
374 | if (!(dev->cap_present & QEMU_PCI_CAP_MSIX)) | |
375 | return; | |
376 | msix_free_irq_entries(dev); | |
2760952b MT |
377 | dev->config[dev->msix_cap + MSIX_CONTROL_OFFSET] &= |
378 | ~dev->wmask[dev->msix_cap + MSIX_CONTROL_OFFSET]; | |
5a1fc5e8 | 379 | memset(dev->msix_table_page, 0, MSIX_PAGE_SIZE); |
ae1be0bb | 380 | msix_mask_all(dev, dev->msix_entries_nr); |
02eb84d0 MT |
381 | } |
382 | ||
383 | /* PCI spec suggests that devices make it possible for software to configure | |
384 | * less vectors than supported by the device, but does not specify a standard | |
385 | * mechanism for devices to do so. | |
386 | * | |
387 | * We support this by asking devices to declare vectors software is going to | |
388 | * actually use, and checking this on the notification path. Devices that | |
389 | * don't want to follow the spec suggestion can declare all vectors as used. */ | |
390 | ||
391 | /* Mark vector as used. */ | |
392 | int msix_vector_use(PCIDevice *dev, unsigned vector) | |
393 | { | |
394 | if (vector >= dev->msix_entries_nr) | |
395 | return -EINVAL; | |
396 | dev->msix_entry_used[vector]++; | |
397 | return 0; | |
398 | } | |
399 | ||
400 | /* Mark vector as unused. */ | |
401 | void msix_vector_unuse(PCIDevice *dev, unsigned vector) | |
402 | { | |
98304c84 MT |
403 | if (vector >= dev->msix_entries_nr || !dev->msix_entry_used[vector]) { |
404 | return; | |
405 | } | |
406 | if (--dev->msix_entry_used[vector]) { | |
407 | return; | |
408 | } | |
409 | msix_clr_pending(dev, vector); | |
02eb84d0 | 410 | } |
b5f28bca MT |
411 | |
412 | void msix_unuse_all_vectors(PCIDevice *dev) | |
413 | { | |
414 | if (!(dev->cap_present & QEMU_PCI_CAP_MSIX)) | |
415 | return; | |
416 | msix_free_irq_entries(dev); | |
417 | } |