]>
Commit | Line | Data |
---|---|---|
3854ca57 JY |
1 | /* |
2 | * Copyright (c) 2007, Intel Corporation. | |
3 | * | |
4 | * This work is licensed under the terms of the GNU GPL, version 2. See | |
5 | * the COPYING file in the top-level directory. | |
6 | * | |
7 | * Jiang Yunhong <[email protected]> | |
8 | * | |
9 | * This file implements direct PCI assignment to a HVM guest | |
10 | */ | |
11 | ||
12 | #include <sys/mman.h> | |
13 | ||
83c9f4ca PB |
14 | #include "hw/xen_backend.h" |
15 | #include "hw/xen_pt.h" | |
16 | #include "hw/apic-msidef.h" | |
3854ca57 JY |
17 | |
18 | ||
19 | #define XEN_PT_AUTO_ASSIGN -1 | |
20 | ||
21 | /* shift count for gflags */ | |
22 | #define XEN_PT_GFLAGS_SHIFT_DEST_ID 0 | |
23 | #define XEN_PT_GFLAGS_SHIFT_RH 8 | |
24 | #define XEN_PT_GFLAGS_SHIFT_DM 9 | |
25 | #define XEN_PT_GFLAGSSHIFT_DELIV_MODE 12 | |
26 | #define XEN_PT_GFLAGSSHIFT_TRG_MODE 15 | |
27 | ||
28 | ||
29 | /* | |
30 | * Helpers | |
31 | */ | |
32 | ||
33 | static inline uint8_t msi_vector(uint32_t data) | |
34 | { | |
35 | return (data & MSI_DATA_VECTOR_MASK) >> MSI_DATA_VECTOR_SHIFT; | |
36 | } | |
37 | ||
38 | static inline uint8_t msi_dest_id(uint32_t addr) | |
39 | { | |
40 | return (addr & MSI_ADDR_DEST_ID_MASK) >> MSI_ADDR_DEST_ID_SHIFT; | |
41 | } | |
42 | ||
43 | static inline uint32_t msi_ext_dest_id(uint32_t addr_hi) | |
44 | { | |
45 | return addr_hi & 0xffffff00; | |
46 | } | |
47 | ||
48 | static uint32_t msi_gflags(uint32_t data, uint64_t addr) | |
49 | { | |
50 | uint32_t result = 0; | |
51 | int rh, dm, dest_id, deliv_mode, trig_mode; | |
52 | ||
53 | rh = (addr >> MSI_ADDR_REDIRECTION_SHIFT) & 0x1; | |
54 | dm = (addr >> MSI_ADDR_DEST_MODE_SHIFT) & 0x1; | |
55 | dest_id = msi_dest_id(addr); | |
56 | deliv_mode = (data >> MSI_DATA_DELIVERY_MODE_SHIFT) & 0x7; | |
57 | trig_mode = (data >> MSI_DATA_TRIGGER_SHIFT) & 0x1; | |
58 | ||
59 | result = dest_id | (rh << XEN_PT_GFLAGS_SHIFT_RH) | |
60 | | (dm << XEN_PT_GFLAGS_SHIFT_DM) | |
61 | | (deliv_mode << XEN_PT_GFLAGSSHIFT_DELIV_MODE) | |
62 | | (trig_mode << XEN_PT_GFLAGSSHIFT_TRG_MODE); | |
63 | ||
64 | return result; | |
65 | } | |
66 | ||
67 | static inline uint64_t msi_addr64(XenPTMSI *msi) | |
68 | { | |
69 | return (uint64_t)msi->addr_hi << 32 | msi->addr_lo; | |
70 | } | |
71 | ||
72 | static int msi_msix_enable(XenPCIPassthroughState *s, | |
73 | uint32_t address, | |
74 | uint16_t flag, | |
75 | bool enable) | |
76 | { | |
77 | uint16_t val = 0; | |
78 | ||
79 | if (!address) { | |
80 | return -1; | |
81 | } | |
82 | ||
83 | xen_host_pci_get_word(&s->real_device, address, &val); | |
84 | if (enable) { | |
85 | val |= flag; | |
86 | } else { | |
87 | val &= ~flag; | |
88 | } | |
89 | xen_host_pci_set_word(&s->real_device, address, val); | |
90 | return 0; | |
91 | } | |
92 | ||
93 | static int msi_msix_setup(XenPCIPassthroughState *s, | |
94 | uint64_t addr, | |
95 | uint32_t data, | |
96 | int *ppirq, | |
97 | bool is_msix, | |
98 | int msix_entry, | |
99 | bool is_not_mapped) | |
100 | { | |
101 | uint8_t gvec = msi_vector(data); | |
102 | int rc = 0; | |
103 | ||
104 | assert((!is_msix && msix_entry == 0) || is_msix); | |
105 | ||
106 | if (gvec == 0) { | |
107 | /* if gvec is 0, the guest is asking for a particular pirq that | |
108 | * is passed as dest_id */ | |
109 | *ppirq = msi_ext_dest_id(addr >> 32) | msi_dest_id(addr); | |
110 | if (!*ppirq) { | |
111 | /* this probably identifies an misconfiguration of the guest, | |
112 | * try the emulated path */ | |
113 | *ppirq = XEN_PT_UNASSIGNED_PIRQ; | |
114 | } else { | |
115 | XEN_PT_LOG(&s->dev, "requested pirq %d for MSI%s" | |
116 | " (vec: %#x, entry: %#x)\n", | |
117 | *ppirq, is_msix ? "-X" : "", gvec, msix_entry); | |
118 | } | |
119 | } | |
120 | ||
121 | if (is_not_mapped) { | |
122 | uint64_t table_base = 0; | |
123 | ||
124 | if (is_msix) { | |
125 | table_base = s->msix->table_base; | |
126 | } | |
127 | ||
128 | rc = xc_physdev_map_pirq_msi(xen_xc, xen_domid, XEN_PT_AUTO_ASSIGN, | |
129 | ppirq, PCI_DEVFN(s->real_device.dev, | |
130 | s->real_device.func), | |
131 | s->real_device.bus, | |
132 | msix_entry, table_base); | |
133 | if (rc) { | |
134 | XEN_PT_ERR(&s->dev, | |
135 | "Mapping of MSI%s (rc: %i, vec: %#x, entry %#x)\n", | |
136 | is_msix ? "-X" : "", rc, gvec, msix_entry); | |
137 | return rc; | |
138 | } | |
139 | } | |
140 | ||
141 | return 0; | |
142 | } | |
143 | static int msi_msix_update(XenPCIPassthroughState *s, | |
144 | uint64_t addr, | |
145 | uint32_t data, | |
146 | int pirq, | |
147 | bool is_msix, | |
148 | int msix_entry, | |
149 | int *old_pirq) | |
150 | { | |
151 | PCIDevice *d = &s->dev; | |
152 | uint8_t gvec = msi_vector(data); | |
153 | uint32_t gflags = msi_gflags(data, addr); | |
154 | int rc = 0; | |
155 | uint64_t table_addr = 0; | |
156 | ||
157 | XEN_PT_LOG(d, "Updating MSI%s with pirq %d gvec %#x gflags %#x" | |
158 | " (entry: %#x)\n", | |
159 | is_msix ? "-X" : "", pirq, gvec, gflags, msix_entry); | |
160 | ||
161 | if (is_msix) { | |
162 | table_addr = s->msix->mmio_base_addr; | |
163 | } | |
164 | ||
165 | rc = xc_domain_update_msi_irq(xen_xc, xen_domid, gvec, | |
166 | pirq, gflags, table_addr); | |
167 | ||
168 | if (rc) { | |
169 | XEN_PT_ERR(d, "Updating of MSI%s failed. (rc: %d)\n", | |
170 | is_msix ? "-X" : "", rc); | |
171 | ||
172 | if (xc_physdev_unmap_pirq(xen_xc, xen_domid, *old_pirq)) { | |
173 | XEN_PT_ERR(d, "Unmapping of MSI%s pirq %d failed.\n", | |
174 | is_msix ? "-X" : "", *old_pirq); | |
175 | } | |
176 | *old_pirq = XEN_PT_UNASSIGNED_PIRQ; | |
177 | } | |
178 | return rc; | |
179 | } | |
180 | ||
181 | static int msi_msix_disable(XenPCIPassthroughState *s, | |
182 | uint64_t addr, | |
183 | uint32_t data, | |
184 | int pirq, | |
185 | bool is_msix, | |
186 | bool is_binded) | |
187 | { | |
188 | PCIDevice *d = &s->dev; | |
189 | uint8_t gvec = msi_vector(data); | |
190 | uint32_t gflags = msi_gflags(data, addr); | |
191 | int rc = 0; | |
192 | ||
193 | if (pirq == XEN_PT_UNASSIGNED_PIRQ) { | |
194 | return 0; | |
195 | } | |
196 | ||
197 | if (is_binded) { | |
198 | XEN_PT_LOG(d, "Unbind MSI%s with pirq %d, gvec %#x\n", | |
199 | is_msix ? "-X" : "", pirq, gvec); | |
200 | rc = xc_domain_unbind_msi_irq(xen_xc, xen_domid, gvec, pirq, gflags); | |
201 | if (rc) { | |
202 | XEN_PT_ERR(d, "Unbinding of MSI%s failed. (pirq: %d, gvec: %#x)\n", | |
203 | is_msix ? "-X" : "", pirq, gvec); | |
204 | return rc; | |
205 | } | |
206 | } | |
207 | ||
208 | XEN_PT_LOG(d, "Unmap MSI%s pirq %d\n", is_msix ? "-X" : "", pirq); | |
209 | rc = xc_physdev_unmap_pirq(xen_xc, xen_domid, pirq); | |
210 | if (rc) { | |
211 | XEN_PT_ERR(d, "Unmapping of MSI%s pirq %d failed. (rc: %i)\n", | |
212 | is_msix ? "-X" : "", pirq, rc); | |
213 | return rc; | |
214 | } | |
215 | ||
216 | return 0; | |
217 | } | |
218 | ||
219 | /* | |
220 | * MSI virtualization functions | |
221 | */ | |
222 | ||
223 | int xen_pt_msi_set_enable(XenPCIPassthroughState *s, bool enable) | |
224 | { | |
225 | XEN_PT_LOG(&s->dev, "%s MSI.\n", enable ? "enabling" : "disabling"); | |
226 | ||
227 | if (!s->msi) { | |
228 | return -1; | |
229 | } | |
230 | ||
231 | return msi_msix_enable(s, s->msi->ctrl_offset, PCI_MSI_FLAGS_ENABLE, | |
232 | enable); | |
233 | } | |
234 | ||
235 | /* setup physical msi, but don't enable it */ | |
236 | int xen_pt_msi_setup(XenPCIPassthroughState *s) | |
237 | { | |
238 | int pirq = XEN_PT_UNASSIGNED_PIRQ; | |
239 | int rc = 0; | |
240 | XenPTMSI *msi = s->msi; | |
241 | ||
242 | if (msi->initialized) { | |
243 | XEN_PT_ERR(&s->dev, | |
244 | "Setup physical MSI when it has been properly initialized.\n"); | |
245 | return -1; | |
246 | } | |
247 | ||
248 | rc = msi_msix_setup(s, msi_addr64(msi), msi->data, &pirq, false, 0, true); | |
249 | if (rc) { | |
250 | return rc; | |
251 | } | |
252 | ||
253 | if (pirq < 0) { | |
254 | XEN_PT_ERR(&s->dev, "Invalid pirq number: %d.\n", pirq); | |
255 | return -1; | |
256 | } | |
257 | ||
258 | msi->pirq = pirq; | |
259 | XEN_PT_LOG(&s->dev, "MSI mapped with pirq %d.\n", pirq); | |
260 | ||
261 | return 0; | |
262 | } | |
263 | ||
264 | int xen_pt_msi_update(XenPCIPassthroughState *s) | |
265 | { | |
266 | XenPTMSI *msi = s->msi; | |
267 | return msi_msix_update(s, msi_addr64(msi), msi->data, msi->pirq, | |
268 | false, 0, &msi->pirq); | |
269 | } | |
270 | ||
271 | void xen_pt_msi_disable(XenPCIPassthroughState *s) | |
272 | { | |
273 | XenPTMSI *msi = s->msi; | |
274 | ||
275 | if (!msi) { | |
276 | return; | |
277 | } | |
278 | ||
279 | xen_pt_msi_set_enable(s, false); | |
280 | ||
281 | msi_msix_disable(s, msi_addr64(msi), msi->data, msi->pirq, false, | |
282 | msi->initialized); | |
283 | ||
284 | /* clear msi info */ | |
285 | msi->flags = 0; | |
286 | msi->mapped = false; | |
287 | msi->pirq = XEN_PT_UNASSIGNED_PIRQ; | |
288 | } | |
289 | ||
290 | /* | |
291 | * MSI-X virtualization functions | |
292 | */ | |
293 | ||
294 | static int msix_set_enable(XenPCIPassthroughState *s, bool enabled) | |
295 | { | |
296 | XEN_PT_LOG(&s->dev, "%s MSI-X.\n", enabled ? "enabling" : "disabling"); | |
297 | ||
298 | if (!s->msix) { | |
299 | return -1; | |
300 | } | |
301 | ||
302 | return msi_msix_enable(s, s->msix->ctrl_offset, PCI_MSIX_FLAGS_ENABLE, | |
303 | enabled); | |
304 | } | |
305 | ||
306 | static int xen_pt_msix_update_one(XenPCIPassthroughState *s, int entry_nr) | |
307 | { | |
308 | XenPTMSIXEntry *entry = NULL; | |
309 | int pirq; | |
310 | int rc; | |
311 | ||
312 | if (entry_nr < 0 || entry_nr >= s->msix->total_entries) { | |
313 | return -EINVAL; | |
314 | } | |
315 | ||
316 | entry = &s->msix->msix_entry[entry_nr]; | |
317 | ||
318 | if (!entry->updated) { | |
319 | return 0; | |
320 | } | |
321 | ||
322 | pirq = entry->pirq; | |
323 | ||
044b99c6 | 324 | rc = msi_msix_setup(s, entry->addr, entry->data, &pirq, true, entry_nr, |
3854ca57 JY |
325 | entry->pirq == XEN_PT_UNASSIGNED_PIRQ); |
326 | if (rc) { | |
327 | return rc; | |
328 | } | |
329 | if (entry->pirq == XEN_PT_UNASSIGNED_PIRQ) { | |
330 | entry->pirq = pirq; | |
331 | } | |
332 | ||
333 | rc = msi_msix_update(s, entry->addr, entry->data, pirq, true, | |
334 | entry_nr, &entry->pirq); | |
335 | ||
336 | if (!rc) { | |
337 | entry->updated = false; | |
338 | } | |
339 | ||
340 | return rc; | |
341 | } | |
342 | ||
343 | int xen_pt_msix_update(XenPCIPassthroughState *s) | |
344 | { | |
345 | XenPTMSIX *msix = s->msix; | |
346 | int i; | |
347 | ||
348 | for (i = 0; i < msix->total_entries; i++) { | |
349 | xen_pt_msix_update_one(s, i); | |
350 | } | |
351 | ||
352 | return 0; | |
353 | } | |
354 | ||
355 | void xen_pt_msix_disable(XenPCIPassthroughState *s) | |
356 | { | |
357 | int i = 0; | |
358 | ||
359 | msix_set_enable(s, false); | |
360 | ||
361 | for (i = 0; i < s->msix->total_entries; i++) { | |
362 | XenPTMSIXEntry *entry = &s->msix->msix_entry[i]; | |
363 | ||
364 | msi_msix_disable(s, entry->addr, entry->data, entry->pirq, true, true); | |
365 | ||
366 | /* clear MSI-X info */ | |
367 | entry->pirq = XEN_PT_UNASSIGNED_PIRQ; | |
368 | entry->updated = false; | |
369 | } | |
370 | } | |
371 | ||
372 | int xen_pt_msix_update_remap(XenPCIPassthroughState *s, int bar_index) | |
373 | { | |
374 | XenPTMSIXEntry *entry; | |
375 | int i, ret; | |
376 | ||
377 | if (!(s->msix && s->msix->bar_index == bar_index)) { | |
378 | return 0; | |
379 | } | |
380 | ||
381 | for (i = 0; i < s->msix->total_entries; i++) { | |
382 | entry = &s->msix->msix_entry[i]; | |
383 | if (entry->pirq != XEN_PT_UNASSIGNED_PIRQ) { | |
384 | ret = xc_domain_unbind_pt_irq(xen_xc, xen_domid, entry->pirq, | |
385 | PT_IRQ_TYPE_MSI, 0, 0, 0, 0); | |
386 | if (ret) { | |
387 | XEN_PT_ERR(&s->dev, "unbind MSI-X entry %d failed\n", | |
388 | entry->pirq); | |
389 | } | |
390 | entry->updated = true; | |
391 | } | |
392 | } | |
393 | return xen_pt_msix_update(s); | |
394 | } | |
395 | ||
396 | static uint32_t get_entry_value(XenPTMSIXEntry *e, int offset) | |
397 | { | |
398 | switch (offset) { | |
399 | case PCI_MSIX_ENTRY_LOWER_ADDR: | |
400 | return e->addr & UINT32_MAX; | |
401 | case PCI_MSIX_ENTRY_UPPER_ADDR: | |
402 | return e->addr >> 32; | |
403 | case PCI_MSIX_ENTRY_DATA: | |
404 | return e->data; | |
405 | case PCI_MSIX_ENTRY_VECTOR_CTRL: | |
406 | return e->vector_ctrl; | |
407 | default: | |
408 | return 0; | |
409 | } | |
410 | } | |
411 | ||
412 | static void set_entry_value(XenPTMSIXEntry *e, int offset, uint32_t val) | |
413 | { | |
414 | switch (offset) { | |
415 | case PCI_MSIX_ENTRY_LOWER_ADDR: | |
416 | e->addr = (e->addr & ((uint64_t)UINT32_MAX << 32)) | val; | |
417 | break; | |
418 | case PCI_MSIX_ENTRY_UPPER_ADDR: | |
419 | e->addr = (uint64_t)val << 32 | (e->addr & UINT32_MAX); | |
420 | break; | |
421 | case PCI_MSIX_ENTRY_DATA: | |
422 | e->data = val; | |
423 | break; | |
424 | case PCI_MSIX_ENTRY_VECTOR_CTRL: | |
425 | e->vector_ctrl = val; | |
426 | break; | |
427 | } | |
428 | } | |
429 | ||
a8170e5e | 430 | static void pci_msix_write(void *opaque, hwaddr addr, |
3854ca57 JY |
431 | uint64_t val, unsigned size) |
432 | { | |
433 | XenPCIPassthroughState *s = opaque; | |
434 | XenPTMSIX *msix = s->msix; | |
435 | XenPTMSIXEntry *entry; | |
436 | int entry_nr, offset; | |
437 | ||
438 | entry_nr = addr / PCI_MSIX_ENTRY_SIZE; | |
439 | if (entry_nr < 0 || entry_nr >= msix->total_entries) { | |
440 | XEN_PT_ERR(&s->dev, "asked MSI-X entry '%i' invalid!\n", entry_nr); | |
441 | return; | |
442 | } | |
443 | entry = &msix->msix_entry[entry_nr]; | |
444 | offset = addr % PCI_MSIX_ENTRY_SIZE; | |
445 | ||
446 | if (offset != PCI_MSIX_ENTRY_VECTOR_CTRL) { | |
447 | const volatile uint32_t *vec_ctrl; | |
448 | ||
449 | if (get_entry_value(entry, offset) == val) { | |
450 | return; | |
451 | } | |
452 | ||
453 | /* | |
454 | * If Xen intercepts the mask bit access, entry->vec_ctrl may not be | |
455 | * up-to-date. Read from hardware directly. | |
456 | */ | |
457 | vec_ctrl = s->msix->phys_iomem_base + entry_nr * PCI_MSIX_ENTRY_SIZE | |
458 | + PCI_MSIX_ENTRY_VECTOR_CTRL; | |
459 | ||
460 | if (msix->enabled && !(*vec_ctrl & PCI_MSIX_ENTRY_CTRL_MASKBIT)) { | |
461 | XEN_PT_ERR(&s->dev, "Can't update msix entry %d since MSI-X is" | |
462 | " already enabled.\n", entry_nr); | |
463 | return; | |
464 | } | |
465 | ||
466 | entry->updated = true; | |
467 | } | |
468 | ||
469 | set_entry_value(entry, offset, val); | |
470 | ||
471 | if (offset == PCI_MSIX_ENTRY_VECTOR_CTRL) { | |
472 | if (msix->enabled && !(val & PCI_MSIX_ENTRY_CTRL_MASKBIT)) { | |
473 | xen_pt_msix_update_one(s, entry_nr); | |
474 | } | |
475 | } | |
476 | } | |
477 | ||
a8170e5e | 478 | static uint64_t pci_msix_read(void *opaque, hwaddr addr, |
3854ca57 JY |
479 | unsigned size) |
480 | { | |
481 | XenPCIPassthroughState *s = opaque; | |
482 | XenPTMSIX *msix = s->msix; | |
483 | int entry_nr, offset; | |
484 | ||
485 | entry_nr = addr / PCI_MSIX_ENTRY_SIZE; | |
486 | if (entry_nr < 0) { | |
487 | XEN_PT_ERR(&s->dev, "asked MSI-X entry '%i' invalid!\n", entry_nr); | |
488 | return 0; | |
489 | } | |
490 | ||
491 | offset = addr % PCI_MSIX_ENTRY_SIZE; | |
492 | ||
493 | if (addr < msix->total_entries * PCI_MSIX_ENTRY_SIZE) { | |
494 | return get_entry_value(&msix->msix_entry[entry_nr], offset); | |
495 | } else { | |
496 | /* Pending Bit Array (PBA) */ | |
497 | return *(uint32_t *)(msix->phys_iomem_base + addr); | |
498 | } | |
499 | } | |
500 | ||
501 | static const MemoryRegionOps pci_msix_ops = { | |
502 | .read = pci_msix_read, | |
503 | .write = pci_msix_write, | |
504 | .endianness = DEVICE_NATIVE_ENDIAN, | |
505 | .valid = { | |
506 | .min_access_size = 4, | |
507 | .max_access_size = 4, | |
508 | .unaligned = false, | |
509 | }, | |
510 | }; | |
511 | ||
512 | int xen_pt_msix_init(XenPCIPassthroughState *s, uint32_t base) | |
513 | { | |
514 | uint8_t id = 0; | |
515 | uint16_t control = 0; | |
516 | uint32_t table_off = 0; | |
517 | int i, total_entries, bar_index; | |
518 | XenHostPCIDevice *hd = &s->real_device; | |
519 | PCIDevice *d = &s->dev; | |
520 | int fd = -1; | |
521 | XenPTMSIX *msix = NULL; | |
522 | int rc = 0; | |
523 | ||
524 | rc = xen_host_pci_get_byte(hd, base + PCI_CAP_LIST_ID, &id); | |
525 | if (rc) { | |
526 | return rc; | |
527 | } | |
528 | ||
529 | if (id != PCI_CAP_ID_MSIX) { | |
530 | XEN_PT_ERR(d, "Invalid id %#x base %#x\n", id, base); | |
531 | return -1; | |
532 | } | |
533 | ||
534 | xen_host_pci_get_word(hd, base + PCI_MSIX_FLAGS, &control); | |
535 | total_entries = control & PCI_MSIX_FLAGS_QSIZE; | |
536 | total_entries += 1; | |
537 | ||
538 | s->msix = g_malloc0(sizeof (XenPTMSIX) | |
539 | + total_entries * sizeof (XenPTMSIXEntry)); | |
540 | msix = s->msix; | |
541 | ||
542 | msix->total_entries = total_entries; | |
543 | for (i = 0; i < total_entries; i++) { | |
544 | msix->msix_entry[i].pirq = XEN_PT_UNASSIGNED_PIRQ; | |
545 | } | |
546 | ||
547 | memory_region_init_io(&msix->mmio, &pci_msix_ops, s, "xen-pci-pt-msix", | |
548 | (total_entries * PCI_MSIX_ENTRY_SIZE | |
549 | + XC_PAGE_SIZE - 1) | |
550 | & XC_PAGE_MASK); | |
551 | ||
552 | xen_host_pci_get_long(hd, base + PCI_MSIX_TABLE, &table_off); | |
553 | bar_index = msix->bar_index = table_off & PCI_MSIX_FLAGS_BIRMASK; | |
554 | table_off = table_off & ~PCI_MSIX_FLAGS_BIRMASK; | |
555 | msix->table_base = s->real_device.io_regions[bar_index].base_addr; | |
556 | XEN_PT_LOG(d, "get MSI-X table BAR base 0x%"PRIx64"\n", msix->table_base); | |
557 | ||
558 | fd = open("/dev/mem", O_RDWR); | |
559 | if (fd == -1) { | |
560 | rc = -errno; | |
561 | XEN_PT_ERR(d, "Can't open /dev/mem: %s\n", strerror(errno)); | |
562 | goto error_out; | |
563 | } | |
564 | XEN_PT_LOG(d, "table_off = %#x, total_entries = %d\n", | |
565 | table_off, total_entries); | |
566 | msix->table_offset_adjust = table_off & 0x0fff; | |
567 | msix->phys_iomem_base = | |
568 | mmap(NULL, | |
569 | total_entries * PCI_MSIX_ENTRY_SIZE + msix->table_offset_adjust, | |
570 | PROT_READ, | |
571 | MAP_SHARED | MAP_LOCKED, | |
572 | fd, | |
573 | msix->table_base + table_off - msix->table_offset_adjust); | |
574 | close(fd); | |
575 | if (msix->phys_iomem_base == MAP_FAILED) { | |
576 | rc = -errno; | |
577 | XEN_PT_ERR(d, "Can't map physical MSI-X table: %s\n", strerror(errno)); | |
578 | goto error_out; | |
579 | } | |
580 | msix->phys_iomem_base = (char *)msix->phys_iomem_base | |
581 | + msix->table_offset_adjust; | |
582 | ||
583 | XEN_PT_LOG(d, "mapping physical MSI-X table to %p\n", | |
584 | msix->phys_iomem_base); | |
585 | ||
586 | memory_region_add_subregion_overlap(&s->bar[bar_index], table_off, | |
587 | &msix->mmio, | |
588 | 2); /* Priority: pci default + 1 */ | |
589 | ||
590 | return 0; | |
591 | ||
592 | error_out: | |
593 | memory_region_destroy(&msix->mmio); | |
594 | g_free(s->msix); | |
595 | s->msix = NULL; | |
596 | return rc; | |
597 | } | |
598 | ||
599 | void xen_pt_msix_delete(XenPCIPassthroughState *s) | |
600 | { | |
601 | XenPTMSIX *msix = s->msix; | |
602 | ||
603 | if (!msix) { | |
604 | return; | |
605 | } | |
606 | ||
607 | /* unmap the MSI-X memory mapped register area */ | |
608 | if (msix->phys_iomem_base) { | |
609 | XEN_PT_LOG(&s->dev, "unmapping physical MSI-X table from %p\n", | |
610 | msix->phys_iomem_base); | |
611 | munmap(msix->phys_iomem_base, msix->total_entries * PCI_MSIX_ENTRY_SIZE | |
612 | + msix->table_offset_adjust); | |
613 | } | |
614 | ||
615 | memory_region_del_subregion(&s->bar[msix->bar_index], &msix->mmio); | |
616 | memory_region_destroy(&msix->mmio); | |
617 | ||
618 | g_free(s->msix); | |
619 | s->msix = NULL; | |
620 | } |