]>
Commit | Line | Data |
---|---|---|
02eb84d0 MT |
1 | /* |
2 | * MSI-X device support | |
3 | * | |
4 | * This module includes support for MSI-X in pci devices. | |
5 | * | |
6 | * Author: Michael S. Tsirkin <[email protected]> | |
7 | * | |
8 | * Copyright (c) 2009, Red Hat Inc, Michael S. Tsirkin ([email protected]) | |
9 | * | |
10 | * This work is licensed under the terms of the GNU GPL, version 2. See | |
11 | * the COPYING file in the top-level directory. | |
6b620ca3 PB |
12 | * |
13 | * Contributions after 2012-01-13 are licensed under the terms of the | |
14 | * GNU GPL, version 2 or (at your option) any later version. | |
02eb84d0 MT |
15 | */ |
16 | ||
97d5408f | 17 | #include "qemu/osdep.h" |
c759b24f MT |
18 | #include "hw/pci/msi.h" |
19 | #include "hw/pci/msix.h" | |
20 | #include "hw/pci/pci.h" | |
428c3ece | 21 | #include "hw/xen/xen.h" |
ca77ee28 | 22 | #include "migration/qemu-file-types.h" |
d6454270 | 23 | #include "migration/vmstate.h" |
1de7afc9 | 24 | #include "qemu/range.h" |
ee640c62 | 25 | #include "qapi/error.h" |
993b1f4b | 26 | #include "trace.h" |
02eb84d0 | 27 | |
2760952b MT |
28 | /* MSI enable bit and maskall bit are in byte 1 in FLAGS register */ |
29 | #define MSIX_CONTROL_OFFSET (PCI_MSIX_FLAGS + 1) | |
02eb84d0 | 30 | #define MSIX_ENABLE_MASK (PCI_MSIX_FLAGS_ENABLE >> 8) |
5b5cb086 | 31 | #define MSIX_MASKALL_MASK (PCI_MSIX_FLAGS_MASKALL >> 8) |
02eb84d0 | 32 | |
4c93bfa9 | 33 | MSIMessage msix_get_message(PCIDevice *dev, unsigned vector) |
bc4caf49 | 34 | { |
d35e428c | 35 | uint8_t *table_entry = dev->msix_table + vector * PCI_MSIX_ENTRY_SIZE; |
bc4caf49 JK |
36 | MSIMessage msg; |
37 | ||
38 | msg.address = pci_get_quad(table_entry + PCI_MSIX_ENTRY_LOWER_ADDR); | |
39 | msg.data = pci_get_long(table_entry + PCI_MSIX_ENTRY_DATA); | |
40 | return msg; | |
41 | } | |
02eb84d0 | 42 | |
932d4a42 AK |
43 | /* |
44 | * Special API for POWER to configure the vectors through | |
45 | * a side channel. Should never be used by devices. | |
46 | */ | |
47 | void msix_set_message(PCIDevice *dev, int vector, struct MSIMessage msg) | |
48 | { | |
49 | uint8_t *table_entry = dev->msix_table + vector * PCI_MSIX_ENTRY_SIZE; | |
50 | ||
51 | pci_set_quad(table_entry + PCI_MSIX_ENTRY_LOWER_ADDR, msg.address); | |
52 | pci_set_long(table_entry + PCI_MSIX_ENTRY_DATA, msg.data); | |
53 | table_entry[PCI_MSIX_ENTRY_VECTOR_CTRL] &= ~PCI_MSIX_ENTRY_CTRL_MASKBIT; | |
54 | } | |
55 | ||
02eb84d0 MT |
56 | static uint8_t msix_pending_mask(int vector) |
57 | { | |
58 | return 1 << (vector % 8); | |
59 | } | |
60 | ||
61 | static uint8_t *msix_pending_byte(PCIDevice *dev, int vector) | |
62 | { | |
d35e428c | 63 | return dev->msix_pba + vector / 8; |
02eb84d0 MT |
64 | } |
65 | ||
66 | static int msix_is_pending(PCIDevice *dev, int vector) | |
67 | { | |
68 | return *msix_pending_byte(dev, vector) & msix_pending_mask(vector); | |
69 | } | |
70 | ||
70f8ee39 | 71 | void msix_set_pending(PCIDevice *dev, unsigned int vector) |
02eb84d0 MT |
72 | { |
73 | *msix_pending_byte(dev, vector) |= msix_pending_mask(vector); | |
74 | } | |
75 | ||
3bdfaabb | 76 | void msix_clr_pending(PCIDevice *dev, int vector) |
02eb84d0 MT |
77 | { |
78 | *msix_pending_byte(dev, vector) &= ~msix_pending_mask(vector); | |
79 | } | |
80 | ||
70f8ee39 | 81 | static bool msix_vector_masked(PCIDevice *dev, unsigned int vector, bool fmask) |
02eb84d0 | 82 | { |
428c3ece | 83 | unsigned offset = vector * PCI_MSIX_ENTRY_SIZE; |
e1e4bf22 | 84 | uint8_t *data = &dev->msix_table[offset + PCI_MSIX_ENTRY_DATA]; |
428c3ece SS |
85 | /* MSIs on Xen can be remapped into pirqs. In those cases, masking |
86 | * and unmasking go through the PV evtchn path. */ | |
e1e4bf22 | 87 | if (xen_enabled() && xen_is_pirq_msi(pci_get_long(data))) { |
428c3ece SS |
88 | return false; |
89 | } | |
90 | return fmask || dev->msix_table[offset + PCI_MSIX_ENTRY_VECTOR_CTRL] & | |
91 | PCI_MSIX_ENTRY_CTRL_MASKBIT; | |
5b5cb086 MT |
92 | } |
93 | ||
70f8ee39 | 94 | bool msix_is_masked(PCIDevice *dev, unsigned int vector) |
5b5cb086 | 95 | { |
ae392c41 MT |
96 | return msix_vector_masked(dev, vector, dev->msix_function_masked); |
97 | } | |
98 | ||
2cdfe53c JK |
99 | static void msix_fire_vector_notifier(PCIDevice *dev, |
100 | unsigned int vector, bool is_masked) | |
101 | { | |
102 | MSIMessage msg; | |
103 | int ret; | |
104 | ||
105 | if (!dev->msix_vector_use_notifier) { | |
106 | return; | |
107 | } | |
108 | if (is_masked) { | |
109 | dev->msix_vector_release_notifier(dev, vector); | |
110 | } else { | |
111 | msg = msix_get_message(dev, vector); | |
112 | ret = dev->msix_vector_use_notifier(dev, vector, msg); | |
113 | assert(ret >= 0); | |
114 | } | |
115 | } | |
116 | ||
ae392c41 MT |
117 | static void msix_handle_mask_update(PCIDevice *dev, int vector, bool was_masked) |
118 | { | |
119 | bool is_masked = msix_is_masked(dev, vector); | |
2cdfe53c | 120 | |
ae392c41 MT |
121 | if (is_masked == was_masked) { |
122 | return; | |
123 | } | |
124 | ||
2cdfe53c JK |
125 | msix_fire_vector_notifier(dev, vector, is_masked); |
126 | ||
ae392c41 | 127 | if (!is_masked && msix_is_pending(dev, vector)) { |
5b5cb086 MT |
128 | msix_clr_pending(dev, vector); |
129 | msix_notify(dev, vector); | |
130 | } | |
131 | } | |
132 | ||
993b1f4b PX |
133 | static bool msix_masked(PCIDevice *dev) |
134 | { | |
135 | return dev->config[dev->msix_cap + MSIX_CONTROL_OFFSET] & MSIX_MASKALL_MASK; | |
136 | } | |
137 | ||
50322249 MT |
138 | static void msix_update_function_masked(PCIDevice *dev) |
139 | { | |
993b1f4b | 140 | dev->msix_function_masked = !msix_enabled(dev) || msix_masked(dev); |
50322249 MT |
141 | } |
142 | ||
5b5cb086 MT |
143 | /* Handle MSI-X capability config write. */ |
144 | void msix_write_config(PCIDevice *dev, uint32_t addr, | |
145 | uint32_t val, int len) | |
146 | { | |
147 | unsigned enable_pos = dev->msix_cap + MSIX_CONTROL_OFFSET; | |
148 | int vector; | |
50322249 | 149 | bool was_masked; |
5b5cb086 | 150 | |
7c9958b0 | 151 | if (!msix_present(dev) || !range_covers_byte(addr, len, enable_pos)) { |
5b5cb086 MT |
152 | return; |
153 | } | |
154 | ||
993b1f4b PX |
155 | trace_msix_write_config(dev->name, msix_enabled(dev), msix_masked(dev)); |
156 | ||
50322249 MT |
157 | was_masked = dev->msix_function_masked; |
158 | msix_update_function_masked(dev); | |
159 | ||
5b5cb086 MT |
160 | if (!msix_enabled(dev)) { |
161 | return; | |
162 | } | |
163 | ||
e407bf13 | 164 | pci_device_deassert_intx(dev); |
5b5cb086 | 165 | |
50322249 | 166 | if (dev->msix_function_masked == was_masked) { |
5b5cb086 MT |
167 | return; |
168 | } | |
169 | ||
170 | for (vector = 0; vector < dev->msix_entries_nr; ++vector) { | |
ae392c41 MT |
171 | msix_handle_mask_update(dev, vector, |
172 | msix_vector_masked(dev, vector, was_masked)); | |
5b5cb086 | 173 | } |
02eb84d0 MT |
174 | } |
175 | ||
a8170e5e | 176 | static uint64_t msix_table_mmio_read(void *opaque, hwaddr addr, |
d35e428c | 177 | unsigned size) |
eebcb0a7 AW |
178 | { |
179 | PCIDevice *dev = opaque; | |
eebcb0a7 | 180 | |
d35e428c | 181 | return pci_get_long(dev->msix_table + addr); |
eebcb0a7 AW |
182 | } |
183 | ||
a8170e5e | 184 | static void msix_table_mmio_write(void *opaque, hwaddr addr, |
d35e428c | 185 | uint64_t val, unsigned size) |
02eb84d0 MT |
186 | { |
187 | PCIDevice *dev = opaque; | |
d35e428c | 188 | int vector = addr / PCI_MSIX_ENTRY_SIZE; |
ae392c41 | 189 | bool was_masked; |
9a93b617 | 190 | |
ae392c41 | 191 | was_masked = msix_is_masked(dev, vector); |
d35e428c | 192 | pci_set_long(dev->msix_table + addr, val); |
ae392c41 | 193 | msix_handle_mask_update(dev, vector, was_masked); |
02eb84d0 MT |
194 | } |
195 | ||
d35e428c AW |
196 | static const MemoryRegionOps msix_table_mmio_ops = { |
197 | .read = msix_table_mmio_read, | |
198 | .write = msix_table_mmio_write, | |
68d1e1f5 | 199 | .endianness = DEVICE_LITTLE_ENDIAN, |
d35e428c AW |
200 | .valid = { |
201 | .min_access_size = 4, | |
202 | .max_access_size = 4, | |
203 | }, | |
204 | }; | |
205 | ||
a8170e5e | 206 | static uint64_t msix_pba_mmio_read(void *opaque, hwaddr addr, |
d35e428c AW |
207 | unsigned size) |
208 | { | |
209 | PCIDevice *dev = opaque; | |
bbef882c MT |
210 | if (dev->msix_vector_poll_notifier) { |
211 | unsigned vector_start = addr * 8; | |
212 | unsigned vector_end = MIN(addr + size * 8, dev->msix_entries_nr); | |
213 | dev->msix_vector_poll_notifier(dev, vector_start, vector_end); | |
214 | } | |
d35e428c AW |
215 | |
216 | return pci_get_long(dev->msix_pba + addr); | |
217 | } | |
218 | ||
43b11a91 MAL |
219 | static void msix_pba_mmio_write(void *opaque, hwaddr addr, |
220 | uint64_t val, unsigned size) | |
221 | { | |
222 | } | |
223 | ||
d35e428c AW |
224 | static const MemoryRegionOps msix_pba_mmio_ops = { |
225 | .read = msix_pba_mmio_read, | |
43b11a91 | 226 | .write = msix_pba_mmio_write, |
68d1e1f5 | 227 | .endianness = DEVICE_LITTLE_ENDIAN, |
95524ae8 AK |
228 | .valid = { |
229 | .min_access_size = 4, | |
230 | .max_access_size = 4, | |
231 | }, | |
02eb84d0 MT |
232 | }; |
233 | ||
ae1be0bb MT |
234 | static void msix_mask_all(struct PCIDevice *dev, unsigned nentries) |
235 | { | |
236 | int vector; | |
5b5f1330 | 237 | |
ae1be0bb | 238 | for (vector = 0; vector < nentries; ++vector) { |
01731cfb JK |
239 | unsigned offset = |
240 | vector * PCI_MSIX_ENTRY_SIZE + PCI_MSIX_ENTRY_VECTOR_CTRL; | |
5b5f1330 JK |
241 | bool was_masked = msix_is_masked(dev, vector); |
242 | ||
d35e428c | 243 | dev->msix_table[offset] |= PCI_MSIX_ENTRY_CTRL_MASKBIT; |
5b5f1330 | 244 | msix_handle_mask_update(dev, vector, was_masked); |
ae1be0bb MT |
245 | } |
246 | } | |
247 | ||
ee640c62 C |
248 | /* |
249 | * Make PCI device @dev MSI-X capable | |
250 | * @nentries is the max number of MSI-X vectors that the device support. | |
251 | * @table_bar is the MemoryRegion that MSI-X table structure resides. | |
252 | * @table_bar_nr is number of base address register corresponding to @table_bar. | |
253 | * @table_offset indicates the offset that the MSI-X table structure starts with | |
254 | * in @table_bar. | |
255 | * @pba_bar is the MemoryRegion that the Pending Bit Array structure resides. | |
256 | * @pba_bar_nr is number of base address register corresponding to @pba_bar. | |
257 | * @pba_offset indicates the offset that the Pending Bit Array structure | |
258 | * starts with in @pba_bar. | |
259 | * Non-zero @cap_pos puts capability MSI-X at that offset in PCI config space. | |
260 | * @errp is for returning errors. | |
261 | * | |
262 | * Return 0 on success; set @errp and return -errno on error: | |
263 | * -ENOTSUP means lacking msi support for a msi-capable platform. | |
264 | * -EINVAL means capability overlap, happens when @cap_pos is non-zero, | |
265 | * also means a programming error, except device assignment, which can check | |
266 | * if a real HW is broken. | |
267 | */ | |
02eb84d0 | 268 | int msix_init(struct PCIDevice *dev, unsigned short nentries, |
5a2c2029 AW |
269 | MemoryRegion *table_bar, uint8_t table_bar_nr, |
270 | unsigned table_offset, MemoryRegion *pba_bar, | |
ee640c62 C |
271 | uint8_t pba_bar_nr, unsigned pba_offset, uint8_t cap_pos, |
272 | Error **errp) | |
02eb84d0 | 273 | { |
5a2c2029 | 274 | int cap; |
d35e428c | 275 | unsigned table_size, pba_size; |
5a2c2029 | 276 | uint8_t *config; |
60ba3cc2 | 277 | |
02eb84d0 | 278 | /* Nothing to do if MSI is not supported by interrupt controller */ |
226419d6 | 279 | if (!msi_nonbroken) { |
ee640c62 | 280 | error_setg(errp, "MSI-X is not supported by interrupt controller"); |
02eb84d0 | 281 | return -ENOTSUP; |
60ba3cc2 | 282 | } |
5a2c2029 AW |
283 | |
284 | if (nentries < 1 || nentries > PCI_MSIX_FLAGS_QSIZE + 1) { | |
ee640c62 | 285 | error_setg(errp, "The number of MSI-X vectors is invalid"); |
02eb84d0 | 286 | return -EINVAL; |
5a2c2029 | 287 | } |
02eb84d0 | 288 | |
d35e428c AW |
289 | table_size = nentries * PCI_MSIX_ENTRY_SIZE; |
290 | pba_size = QEMU_ALIGN_UP(nentries, 64) / 8; | |
291 | ||
5a2c2029 AW |
292 | /* Sanity test: table & pba don't overlap, fit within BARs, min aligned */ |
293 | if ((table_bar_nr == pba_bar_nr && | |
294 | ranges_overlap(table_offset, table_size, pba_offset, pba_size)) || | |
295 | table_offset + table_size > memory_region_size(table_bar) || | |
296 | pba_offset + pba_size > memory_region_size(pba_bar) || | |
297 | (table_offset | pba_offset) & PCI_MSIX_FLAGS_BIRMASK) { | |
ee640c62 C |
298 | error_setg(errp, "table & pba overlap, or they don't fit in BARs," |
299 | " or don't align"); | |
5a2c2029 AW |
300 | return -EINVAL; |
301 | } | |
302 | ||
27841278 | 303 | cap = pci_add_capability(dev, PCI_CAP_ID_MSIX, |
ee640c62 | 304 | cap_pos, MSIX_CAP_LENGTH, errp); |
5a2c2029 AW |
305 | if (cap < 0) { |
306 | return cap; | |
307 | } | |
308 | ||
309 | dev->msix_cap = cap; | |
310 | dev->cap_present |= QEMU_PCI_CAP_MSIX; | |
311 | config = dev->config + cap; | |
312 | ||
313 | pci_set_word(config + PCI_MSIX_FLAGS, nentries - 1); | |
314 | dev->msix_entries_nr = nentries; | |
315 | dev->msix_function_masked = true; | |
316 | ||
317 | pci_set_long(config + PCI_MSIX_TABLE, table_offset | table_bar_nr); | |
318 | pci_set_long(config + PCI_MSIX_PBA, pba_offset | pba_bar_nr); | |
319 | ||
320 | /* Make flags bit writable. */ | |
321 | dev->wmask[cap + MSIX_CONTROL_OFFSET] |= MSIX_ENABLE_MASK | | |
322 | MSIX_MASKALL_MASK; | |
02eb84d0 | 323 | |
d35e428c AW |
324 | dev->msix_table = g_malloc0(table_size); |
325 | dev->msix_pba = g_malloc0(pba_size); | |
5a2c2029 AW |
326 | dev->msix_entry_used = g_malloc0(nentries * sizeof *dev->msix_entry_used); |
327 | ||
ae1be0bb | 328 | msix_mask_all(dev, nentries); |
02eb84d0 | 329 | |
40c5dce9 | 330 | memory_region_init_io(&dev->msix_table_mmio, OBJECT(dev), &msix_table_mmio_ops, dev, |
d35e428c | 331 | "msix-table", table_size); |
5a2c2029 | 332 | memory_region_add_subregion(table_bar, table_offset, &dev->msix_table_mmio); |
40c5dce9 | 333 | memory_region_init_io(&dev->msix_pba_mmio, OBJECT(dev), &msix_pba_mmio_ops, dev, |
d35e428c | 334 | "msix-pba", pba_size); |
5a2c2029 | 335 | memory_region_add_subregion(pba_bar, pba_offset, &dev->msix_pba_mmio); |
02eb84d0 | 336 | |
02eb84d0 | 337 | return 0; |
02eb84d0 MT |
338 | } |
339 | ||
53f94925 | 340 | int msix_init_exclusive_bar(PCIDevice *dev, unsigned short nentries, |
ee640c62 | 341 | uint8_t bar_nr, Error **errp) |
53f94925 AW |
342 | { |
343 | int ret; | |
344 | char *name; | |
a0ccd212 JW |
345 | uint32_t bar_size = 4096; |
346 | uint32_t bar_pba_offset = bar_size / 2; | |
17323e8b | 347 | uint32_t bar_pba_size = QEMU_ALIGN_UP(nentries, 64) / 8; |
53f94925 AW |
348 | |
349 | /* | |
350 | * Migration compatibility dictates that this remains a 4k | |
351 | * BAR with the vector table in the lower half and PBA in | |
a0ccd212 JW |
352 | * the upper half for nentries which is lower or equal to 128. |
353 | * No need to care about using more than 65 entries for legacy | |
354 | * machine types who has at most 64 queues. | |
53f94925 | 355 | */ |
a0ccd212 JW |
356 | if (nentries * PCI_MSIX_ENTRY_SIZE > bar_pba_offset) { |
357 | bar_pba_offset = nentries * PCI_MSIX_ENTRY_SIZE; | |
358 | } | |
53f94925 | 359 | |
a0ccd212 JW |
360 | if (bar_pba_offset + bar_pba_size > 4096) { |
361 | bar_size = bar_pba_offset + bar_pba_size; | |
362 | } | |
363 | ||
9bff5d81 | 364 | bar_size = pow2ceil(bar_size); |
53f94925 | 365 | |
5f893b4e | 366 | name = g_strdup_printf("%s-msix", dev->name); |
a0ccd212 | 367 | memory_region_init(&dev->msix_exclusive_bar, OBJECT(dev), name, bar_size); |
5f893b4e | 368 | g_free(name); |
53f94925 AW |
369 | |
370 | ret = msix_init(dev, nentries, &dev->msix_exclusive_bar, bar_nr, | |
a0ccd212 JW |
371 | 0, &dev->msix_exclusive_bar, |
372 | bar_nr, bar_pba_offset, | |
ee640c62 | 373 | 0, errp); |
53f94925 | 374 | if (ret) { |
53f94925 AW |
375 | return ret; |
376 | } | |
377 | ||
378 | pci_register_bar(dev, bar_nr, PCI_BASE_ADDRESS_SPACE_MEMORY, | |
379 | &dev->msix_exclusive_bar); | |
380 | ||
381 | return 0; | |
382 | } | |
383 | ||
98304c84 MT |
384 | static void msix_free_irq_entries(PCIDevice *dev) |
385 | { | |
386 | int vector; | |
387 | ||
388 | for (vector = 0; vector < dev->msix_entries_nr; ++vector) { | |
389 | dev->msix_entry_used[vector] = 0; | |
390 | msix_clr_pending(dev, vector); | |
391 | } | |
392 | } | |
393 | ||
3cac001e MT |
394 | static void msix_clear_all_vectors(PCIDevice *dev) |
395 | { | |
396 | int vector; | |
397 | ||
398 | for (vector = 0; vector < dev->msix_entries_nr; ++vector) { | |
399 | msix_clr_pending(dev, vector); | |
400 | } | |
401 | } | |
402 | ||
02eb84d0 | 403 | /* Clean up resources for the device. */ |
572992ee | 404 | void msix_uninit(PCIDevice *dev, MemoryRegion *table_bar, MemoryRegion *pba_bar) |
02eb84d0 | 405 | { |
44701ab7 | 406 | if (!msix_present(dev)) { |
572992ee | 407 | return; |
44701ab7 | 408 | } |
02eb84d0 MT |
409 | pci_del_capability(dev, PCI_CAP_ID_MSIX, MSIX_CAP_LENGTH); |
410 | dev->msix_cap = 0; | |
411 | msix_free_irq_entries(dev); | |
412 | dev->msix_entries_nr = 0; | |
5a2c2029 | 413 | memory_region_del_subregion(pba_bar, &dev->msix_pba_mmio); |
d35e428c AW |
414 | g_free(dev->msix_pba); |
415 | dev->msix_pba = NULL; | |
5a2c2029 | 416 | memory_region_del_subregion(table_bar, &dev->msix_table_mmio); |
d35e428c AW |
417 | g_free(dev->msix_table); |
418 | dev->msix_table = NULL; | |
7267c094 | 419 | g_free(dev->msix_entry_used); |
02eb84d0 MT |
420 | dev->msix_entry_used = NULL; |
421 | dev->cap_present &= ~QEMU_PCI_CAP_MSIX; | |
02eb84d0 MT |
422 | } |
423 | ||
53f94925 AW |
424 | void msix_uninit_exclusive_bar(PCIDevice *dev) |
425 | { | |
426 | if (msix_present(dev)) { | |
5a2c2029 | 427 | msix_uninit(dev, &dev->msix_exclusive_bar, &dev->msix_exclusive_bar); |
53f94925 AW |
428 | } |
429 | } | |
430 | ||
02eb84d0 MT |
431 | void msix_save(PCIDevice *dev, QEMUFile *f) |
432 | { | |
9a3e12c8 MT |
433 | unsigned n = dev->msix_entries_nr; |
434 | ||
44701ab7 | 435 | if (!msix_present(dev)) { |
9a3e12c8 | 436 | return; |
72755a70 | 437 | } |
9a3e12c8 | 438 | |
d35e428c | 439 | qemu_put_buffer(f, dev->msix_table, n * PCI_MSIX_ENTRY_SIZE); |
0ef1efcf | 440 | qemu_put_buffer(f, dev->msix_pba, DIV_ROUND_UP(n, 8)); |
02eb84d0 MT |
441 | } |
442 | ||
443 | /* Should be called after restoring the config space. */ | |
444 | void msix_load(PCIDevice *dev, QEMUFile *f) | |
445 | { | |
446 | unsigned n = dev->msix_entries_nr; | |
2cdfe53c | 447 | unsigned int vector; |
02eb84d0 | 448 | |
44701ab7 | 449 | if (!msix_present(dev)) { |
02eb84d0 | 450 | return; |
98846d73 | 451 | } |
02eb84d0 | 452 | |
3cac001e | 453 | msix_clear_all_vectors(dev); |
d35e428c | 454 | qemu_get_buffer(f, dev->msix_table, n * PCI_MSIX_ENTRY_SIZE); |
0ef1efcf | 455 | qemu_get_buffer(f, dev->msix_pba, DIV_ROUND_UP(n, 8)); |
50322249 | 456 | msix_update_function_masked(dev); |
2cdfe53c JK |
457 | |
458 | for (vector = 0; vector < n; vector++) { | |
459 | msix_handle_mask_update(dev, vector, true); | |
460 | } | |
02eb84d0 MT |
461 | } |
462 | ||
463 | /* Does device support MSI-X? */ | |
464 | int msix_present(PCIDevice *dev) | |
465 | { | |
466 | return dev->cap_present & QEMU_PCI_CAP_MSIX; | |
467 | } | |
468 | ||
469 | /* Is MSI-X enabled? */ | |
470 | int msix_enabled(PCIDevice *dev) | |
471 | { | |
472 | return (dev->cap_present & QEMU_PCI_CAP_MSIX) && | |
2760952b | 473 | (dev->config[dev->msix_cap + MSIX_CONTROL_OFFSET] & |
02eb84d0 MT |
474 | MSIX_ENABLE_MASK); |
475 | } | |
476 | ||
02eb84d0 MT |
477 | /* Send an MSI-X message */ |
478 | void msix_notify(PCIDevice *dev, unsigned vector) | |
479 | { | |
bc4caf49 | 480 | MSIMessage msg; |
02eb84d0 | 481 | |
93482436 | 482 | if (vector >= dev->msix_entries_nr || !dev->msix_entry_used[vector]) { |
02eb84d0 | 483 | return; |
93482436 C |
484 | } |
485 | ||
02eb84d0 MT |
486 | if (msix_is_masked(dev, vector)) { |
487 | msix_set_pending(dev, vector); | |
488 | return; | |
489 | } | |
490 | ||
bc4caf49 JK |
491 | msg = msix_get_message(dev, vector); |
492 | ||
38d40ff1 | 493 | msi_send_message(dev, msg); |
02eb84d0 MT |
494 | } |
495 | ||
496 | void msix_reset(PCIDevice *dev) | |
497 | { | |
44701ab7 | 498 | if (!msix_present(dev)) { |
02eb84d0 | 499 | return; |
44701ab7 | 500 | } |
3cac001e | 501 | msix_clear_all_vectors(dev); |
2760952b | 502 | dev->config[dev->msix_cap + MSIX_CONTROL_OFFSET] &= |
7d37435b | 503 | ~dev->wmask[dev->msix_cap + MSIX_CONTROL_OFFSET]; |
d35e428c AW |
504 | memset(dev->msix_table, 0, dev->msix_entries_nr * PCI_MSIX_ENTRY_SIZE); |
505 | memset(dev->msix_pba, 0, QEMU_ALIGN_UP(dev->msix_entries_nr, 64) / 8); | |
ae1be0bb | 506 | msix_mask_all(dev, dev->msix_entries_nr); |
02eb84d0 MT |
507 | } |
508 | ||
509 | /* PCI spec suggests that devices make it possible for software to configure | |
510 | * less vectors than supported by the device, but does not specify a standard | |
511 | * mechanism for devices to do so. | |
512 | * | |
513 | * We support this by asking devices to declare vectors software is going to | |
514 | * actually use, and checking this on the notification path. Devices that | |
515 | * don't want to follow the spec suggestion can declare all vectors as used. */ | |
516 | ||
517 | /* Mark vector as used. */ | |
518 | int msix_vector_use(PCIDevice *dev, unsigned vector) | |
519 | { | |
93482436 | 520 | if (vector >= dev->msix_entries_nr) { |
02eb84d0 | 521 | return -EINVAL; |
93482436 C |
522 | } |
523 | ||
02eb84d0 MT |
524 | dev->msix_entry_used[vector]++; |
525 | return 0; | |
526 | } | |
527 | ||
528 | /* Mark vector as unused. */ | |
529 | void msix_vector_unuse(PCIDevice *dev, unsigned vector) | |
530 | { | |
98304c84 MT |
531 | if (vector >= dev->msix_entries_nr || !dev->msix_entry_used[vector]) { |
532 | return; | |
533 | } | |
534 | if (--dev->msix_entry_used[vector]) { | |
535 | return; | |
536 | } | |
537 | msix_clr_pending(dev, vector); | |
02eb84d0 | 538 | } |
b5f28bca MT |
539 | |
540 | void msix_unuse_all_vectors(PCIDevice *dev) | |
541 | { | |
44701ab7 | 542 | if (!msix_present(dev)) { |
b5f28bca | 543 | return; |
44701ab7 | 544 | } |
b5f28bca MT |
545 | msix_free_irq_entries(dev); |
546 | } | |
2cdfe53c | 547 | |
cb697aaa JK |
548 | unsigned int msix_nr_vectors_allocated(const PCIDevice *dev) |
549 | { | |
550 | return dev->msix_entries_nr; | |
551 | } | |
552 | ||
2cdfe53c JK |
553 | static int msix_set_notifier_for_vector(PCIDevice *dev, unsigned int vector) |
554 | { | |
555 | MSIMessage msg; | |
556 | ||
557 | if (msix_is_masked(dev, vector)) { | |
558 | return 0; | |
559 | } | |
560 | msg = msix_get_message(dev, vector); | |
561 | return dev->msix_vector_use_notifier(dev, vector, msg); | |
562 | } | |
563 | ||
564 | static void msix_unset_notifier_for_vector(PCIDevice *dev, unsigned int vector) | |
565 | { | |
566 | if (msix_is_masked(dev, vector)) { | |
567 | return; | |
568 | } | |
569 | dev->msix_vector_release_notifier(dev, vector); | |
570 | } | |
571 | ||
572 | int msix_set_vector_notifiers(PCIDevice *dev, | |
573 | MSIVectorUseNotifier use_notifier, | |
bbef882c MT |
574 | MSIVectorReleaseNotifier release_notifier, |
575 | MSIVectorPollNotifier poll_notifier) | |
2cdfe53c JK |
576 | { |
577 | int vector, ret; | |
578 | ||
579 | assert(use_notifier && release_notifier); | |
580 | ||
581 | dev->msix_vector_use_notifier = use_notifier; | |
582 | dev->msix_vector_release_notifier = release_notifier; | |
bbef882c | 583 | dev->msix_vector_poll_notifier = poll_notifier; |
2cdfe53c JK |
584 | |
585 | if ((dev->config[dev->msix_cap + MSIX_CONTROL_OFFSET] & | |
586 | (MSIX_ENABLE_MASK | MSIX_MASKALL_MASK)) == MSIX_ENABLE_MASK) { | |
587 | for (vector = 0; vector < dev->msix_entries_nr; vector++) { | |
588 | ret = msix_set_notifier_for_vector(dev, vector); | |
589 | if (ret < 0) { | |
590 | goto undo; | |
591 | } | |
592 | } | |
593 | } | |
bbef882c MT |
594 | if (dev->msix_vector_poll_notifier) { |
595 | dev->msix_vector_poll_notifier(dev, 0, dev->msix_entries_nr); | |
596 | } | |
2cdfe53c JK |
597 | return 0; |
598 | ||
599 | undo: | |
600 | while (--vector >= 0) { | |
601 | msix_unset_notifier_for_vector(dev, vector); | |
602 | } | |
603 | dev->msix_vector_use_notifier = NULL; | |
604 | dev->msix_vector_release_notifier = NULL; | |
605 | return ret; | |
606 | } | |
607 | ||
608 | void msix_unset_vector_notifiers(PCIDevice *dev) | |
609 | { | |
610 | int vector; | |
611 | ||
612 | assert(dev->msix_vector_use_notifier && | |
613 | dev->msix_vector_release_notifier); | |
614 | ||
615 | if ((dev->config[dev->msix_cap + MSIX_CONTROL_OFFSET] & | |
616 | (MSIX_ENABLE_MASK | MSIX_MASKALL_MASK)) == MSIX_ENABLE_MASK) { | |
617 | for (vector = 0; vector < dev->msix_entries_nr; vector++) { | |
618 | msix_unset_notifier_for_vector(dev, vector); | |
619 | } | |
620 | } | |
621 | dev->msix_vector_use_notifier = NULL; | |
622 | dev->msix_vector_release_notifier = NULL; | |
bbef882c | 623 | dev->msix_vector_poll_notifier = NULL; |
2cdfe53c | 624 | } |
340b50c7 | 625 | |
2c21ee76 | 626 | static int put_msix_state(QEMUFile *f, void *pv, size_t size, |
03fee66f | 627 | const VMStateField *field, QJSON *vmdesc) |
340b50c7 GH |
628 | { |
629 | msix_save(pv, f); | |
2c21ee76 JD |
630 | |
631 | return 0; | |
340b50c7 GH |
632 | } |
633 | ||
2c21ee76 | 634 | static int get_msix_state(QEMUFile *f, void *pv, size_t size, |
03fee66f | 635 | const VMStateField *field) |
340b50c7 GH |
636 | { |
637 | msix_load(pv, f); | |
638 | return 0; | |
639 | } | |
640 | ||
641 | static VMStateInfo vmstate_info_msix = { | |
642 | .name = "msix state", | |
643 | .get = get_msix_state, | |
644 | .put = put_msix_state, | |
645 | }; | |
646 | ||
647 | const VMStateDescription vmstate_msix = { | |
648 | .name = "msix", | |
649 | .fields = (VMStateField[]) { | |
650 | { | |
651 | .name = "msix", | |
652 | .version_id = 0, | |
653 | .field_exists = NULL, | |
654 | .size = 0, /* ouch */ | |
655 | .info = &vmstate_info_msix, | |
656 | .flags = VMS_SINGLE, | |
657 | .offset = 0, | |
658 | }, | |
659 | VMSTATE_END_OF_LIST() | |
660 | } | |
661 | }; |