]>
Commit | Line | Data |
---|---|---|
d94f9486 | 1 | #ifndef QEMU_HW_XEN_COMMON_H |
175de524 | 2 | #define QEMU_HW_XEN_COMMON_H |
d94f9486 | 3 | |
5eeb39c2 IC |
4 | /* |
5 | * If we have new enough libxenctrl then we do not want/need these compat | |
6 | * interfaces, despite what the user supplied cflags might say. They | |
7 | * must be undefined before including xenctrl.h | |
8 | */ | |
9 | #undef XC_WANT_COMPAT_EVTCHN_API | |
10 | #undef XC_WANT_COMPAT_GNTTAB_API | |
11 | #undef XC_WANT_COMPAT_MAP_FOREIGN_API | |
12 | ||
d94f9486 | 13 | #include <xenctrl.h> |
edfb07ed | 14 | #include <xenstore.h> |
d94f9486 AL |
15 | #include <xen/io/xenbus.h> |
16 | ||
83c9f4ca | 17 | #include "hw/hw.h" |
0d09e41a | 18 | #include "hw/xen/xen.h" |
3996e85c | 19 | #include "hw/pci/pci.h" |
1de7afc9 | 20 | #include "qemu/queue.h" |
0ab8ed18 | 21 | #include "hw/xen/trace.h" |
d94f9486 | 22 | |
260cabed PD |
23 | extern xc_interface *xen_xc; |
24 | ||
d94f9486 | 25 | /* |
edfb07ed | 26 | * We don't support Xen prior to 4.2.0. |
d94f9486 | 27 | */ |
d5b93ddf | 28 | |
14d015b6 PD |
29 | /* Xen 4.2 through 4.6 */ |
30 | #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40701 | |
31 | ||
32 | typedef xc_interface xenforeignmemory_handle; | |
33 | typedef xc_evtchn xenevtchn_handle; | |
34 | typedef xc_gnttab xengnttab_handle; | |
35 | ||
36 | #define xenevtchn_open(l, f) xc_evtchn_open(l, f); | |
37 | #define xenevtchn_close(h) xc_evtchn_close(h) | |
38 | #define xenevtchn_fd(h) xc_evtchn_fd(h) | |
39 | #define xenevtchn_pending(h) xc_evtchn_pending(h) | |
40 | #define xenevtchn_notify(h, p) xc_evtchn_notify(h, p) | |
41 | #define xenevtchn_bind_interdomain(h, d, p) xc_evtchn_bind_interdomain(h, d, p) | |
42 | #define xenevtchn_unmask(h, p) xc_evtchn_unmask(h, p) | |
43 | #define xenevtchn_unbind(h, p) xc_evtchn_unbind(h, p) | |
44 | ||
45 | #define xengnttab_open(l, f) xc_gnttab_open(l, f) | |
46 | #define xengnttab_close(h) xc_gnttab_close(h) | |
47 | #define xengnttab_set_max_grants(h, n) xc_gnttab_set_max_grants(h, n) | |
48 | #define xengnttab_map_grant_ref(h, d, r, p) xc_gnttab_map_grant_ref(h, d, r, p) | |
49 | #define xengnttab_unmap(h, a, n) xc_gnttab_munmap(h, a, n) | |
50 | #define xengnttab_map_grant_refs(h, c, d, r, p) \ | |
51 | xc_gnttab_map_grant_refs(h, c, d, r, p) | |
52 | #define xengnttab_map_domain_grant_refs(h, c, d, r, p) \ | |
53 | xc_gnttab_map_domain_grant_refs(h, c, d, r, p) | |
54 | ||
55 | #define xenforeignmemory_open(l, f) xen_xc | |
56 | #define xenforeignmemory_close(h) | |
57 | ||
58 | static inline void *xenforeignmemory_map(xc_interface *h, uint32_t dom, | |
59 | int prot, size_t pages, | |
60 | const xen_pfn_t arr[/*pages*/], | |
61 | int err[/*pages*/]) | |
62 | { | |
63 | if (err) | |
64 | return xc_map_foreign_bulk(h, dom, prot, arr, err, pages); | |
65 | else | |
66 | return xc_map_foreign_pages(h, dom, prot, arr, pages); | |
67 | } | |
68 | ||
69 | #define xenforeignmemory_unmap(h, p, s) munmap(p, s * XC_PAGE_SIZE) | |
70 | ||
71 | #else /* CONFIG_XEN_CTRL_INTERFACE_VERSION >= 40701 */ | |
72 | ||
73 | #include <xenevtchn.h> | |
74 | #include <xengnttab.h> | |
75 | #include <xenforeignmemory.h> | |
76 | ||
77 | #endif | |
78 | ||
79 | extern xenforeignmemory_handle *xen_fmem; | |
80 | ||
85f3c64d IJ |
81 | #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40900 |
82 | ||
83 | typedef xc_interface xendevicemodel_handle; | |
84 | ||
85 | #else /* CONFIG_XEN_CTRL_INTERFACE_VERSION >= 40900 */ | |
86 | ||
87 | #undef XC_WANT_COMPAT_DEVICEMODEL_API | |
88 | #include <xendevicemodel.h> | |
89 | ||
90 | #endif | |
91 | ||
2cbf8903 RL |
92 | #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 41100 |
93 | ||
94 | static inline int xendevicemodel_relocate_memory( | |
95 | xendevicemodel_handle *dmod, domid_t domid, uint32_t size, uint64_t src_gfn, | |
96 | uint64_t dst_gfn) | |
97 | { | |
98 | uint32_t i; | |
99 | int rc; | |
100 | ||
101 | for (i = 0; i < size; i++) { | |
102 | unsigned long idx = src_gfn + i; | |
103 | xen_pfn_t gpfn = dst_gfn + i; | |
104 | ||
105 | rc = xc_domain_add_to_physmap(xen_xc, domid, XENMAPSPACE_gmfn, idx, | |
106 | gpfn); | |
107 | if (rc) { | |
108 | return rc; | |
109 | } | |
110 | } | |
111 | ||
112 | return 0; | |
113 | } | |
114 | ||
115 | static inline int xendevicemodel_pin_memory_cacheattr( | |
116 | xendevicemodel_handle *dmod, domid_t domid, uint64_t start, uint64_t end, | |
117 | uint32_t type) | |
118 | { | |
119 | return xc_domain_pin_memory_cacheattr(xen_xc, domid, start, end, type); | |
120 | } | |
121 | ||
d3c49ebb PD |
122 | typedef void xenforeignmemory_resource_handle; |
123 | ||
124 | #define XENMEM_resource_ioreq_server 0 | |
125 | ||
126 | #define XENMEM_resource_ioreq_server_frame_bufioreq 0 | |
127 | #define XENMEM_resource_ioreq_server_frame_ioreq(n) (1 + (n)) | |
128 | ||
129 | static inline xenforeignmemory_resource_handle *xenforeignmemory_map_resource( | |
130 | xenforeignmemory_handle *fmem, domid_t domid, unsigned int type, | |
131 | unsigned int id, unsigned long frame, unsigned long nr_frames, | |
132 | void **paddr, int prot, int flags) | |
133 | { | |
134 | errno = EOPNOTSUPP; | |
135 | return NULL; | |
136 | } | |
137 | ||
2cbf8903 RL |
138 | #endif /* CONFIG_XEN_CTRL_INTERFACE_VERSION < 41100 */ |
139 | ||
5ba3d756 ID |
140 | #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 41000 |
141 | ||
331b5189 | 142 | #define XEN_COMPAT_PHYSMAP |
5ba3d756 ID |
143 | static inline void *xenforeignmemory_map2(xenforeignmemory_handle *h, |
144 | uint32_t dom, void *addr, | |
145 | int prot, int flags, size_t pages, | |
146 | const xen_pfn_t arr[/*pages*/], | |
147 | int err[/*pages*/]) | |
148 | { | |
149 | assert(addr == NULL && flags == 0); | |
150 | return xenforeignmemory_map(h, dom, prot, pages, arr, err); | |
151 | } | |
152 | ||
0ef4d87d IJ |
153 | static inline int xentoolcore_restrict_all(domid_t domid) |
154 | { | |
155 | errno = ENOTTY; | |
156 | return -1; | |
157 | } | |
158 | ||
6b47c2aa IJ |
159 | static inline int xendevicemodel_shutdown(xendevicemodel_handle *dmod, |
160 | domid_t domid, unsigned int reason) | |
161 | { | |
162 | errno = ENOTTY; | |
163 | return -1; | |
164 | } | |
165 | ||
0ef4d87d IJ |
166 | #else /* CONFIG_XEN_CTRL_INTERFACE_VERSION >= 41000 */ |
167 | ||
168 | #include <xentoolcore.h> | |
169 | ||
5ba3d756 ID |
170 | #endif |
171 | ||
f1167ee6 | 172 | #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40900 |
d655f34e | 173 | |
d655f34e PD |
174 | static inline xendevicemodel_handle *xendevicemodel_open( |
175 | struct xentoollog_logger *logger, unsigned int open_flags) | |
176 | { | |
177 | return xen_xc; | |
178 | } | |
179 | ||
f1167ee6 | 180 | #if CONFIG_XEN_CTRL_INTERFACE_VERSION >= 40500 |
d655f34e PD |
181 | |
182 | static inline int xendevicemodel_create_ioreq_server( | |
183 | xendevicemodel_handle *dmod, domid_t domid, int handle_bufioreq, | |
184 | ioservid_t *id) | |
185 | { | |
186 | return xc_hvm_create_ioreq_server(dmod, domid, handle_bufioreq, | |
187 | id); | |
188 | } | |
189 | ||
190 | static inline int xendevicemodel_get_ioreq_server_info( | |
191 | xendevicemodel_handle *dmod, domid_t domid, ioservid_t id, | |
192 | xen_pfn_t *ioreq_pfn, xen_pfn_t *bufioreq_pfn, | |
193 | evtchn_port_t *bufioreq_port) | |
194 | { | |
195 | return xc_hvm_get_ioreq_server_info(dmod, domid, id, ioreq_pfn, | |
196 | bufioreq_pfn, bufioreq_port); | |
197 | } | |
198 | ||
199 | static inline int xendevicemodel_map_io_range_to_ioreq_server( | |
200 | xendevicemodel_handle *dmod, domid_t domid, ioservid_t id, int is_mmio, | |
201 | uint64_t start, uint64_t end) | |
202 | { | |
203 | return xc_hvm_map_io_range_to_ioreq_server(dmod, domid, id, is_mmio, | |
204 | start, end); | |
205 | } | |
206 | ||
207 | static inline int xendevicemodel_unmap_io_range_from_ioreq_server( | |
208 | xendevicemodel_handle *dmod, domid_t domid, ioservid_t id, int is_mmio, | |
209 | uint64_t start, uint64_t end) | |
210 | { | |
211 | return xc_hvm_unmap_io_range_from_ioreq_server(dmod, domid, id, is_mmio, | |
212 | start, end); | |
213 | } | |
214 | ||
215 | static inline int xendevicemodel_map_pcidev_to_ioreq_server( | |
216 | xendevicemodel_handle *dmod, domid_t domid, ioservid_t id, | |
217 | uint16_t segment, uint8_t bus, uint8_t device, uint8_t function) | |
218 | { | |
219 | return xc_hvm_map_pcidev_to_ioreq_server(dmod, domid, id, segment, | |
220 | bus, device, function); | |
221 | } | |
222 | ||
223 | static inline int xendevicemodel_unmap_pcidev_from_ioreq_server( | |
224 | xendevicemodel_handle *dmod, domid_t domid, ioservid_t id, | |
225 | uint16_t segment, uint8_t bus, uint8_t device, uint8_t function) | |
226 | { | |
227 | return xc_hvm_unmap_pcidev_from_ioreq_server(dmod, domid, id, segment, | |
228 | bus, device, function); | |
229 | } | |
230 | ||
231 | static inline int xendevicemodel_destroy_ioreq_server( | |
232 | xendevicemodel_handle *dmod, domid_t domid, ioservid_t id) | |
233 | { | |
234 | return xc_hvm_destroy_ioreq_server(dmod, domid, id); | |
235 | } | |
236 | ||
237 | static inline int xendevicemodel_set_ioreq_server_state( | |
238 | xendevicemodel_handle *dmod, domid_t domid, ioservid_t id, int enabled) | |
239 | { | |
240 | return xc_hvm_set_ioreq_server_state(dmod, domid, id, enabled); | |
241 | } | |
242 | ||
f1167ee6 | 243 | #endif /* CONFIG_XEN_CTRL_INTERFACE_VERSION >= 40500 */ |
d655f34e PD |
244 | |
245 | static inline int xendevicemodel_set_pci_intx_level( | |
246 | xendevicemodel_handle *dmod, domid_t domid, uint16_t segment, | |
247 | uint8_t bus, uint8_t device, uint8_t intx, unsigned int level) | |
248 | { | |
249 | return xc_hvm_set_pci_intx_level(dmod, domid, segment, bus, device, | |
250 | intx, level); | |
251 | } | |
252 | ||
253 | static inline int xendevicemodel_set_isa_irq_level( | |
254 | xendevicemodel_handle *dmod, domid_t domid, uint8_t irq, | |
255 | unsigned int level) | |
256 | { | |
257 | return xc_hvm_set_isa_irq_level(dmod, domid, irq, level); | |
258 | } | |
259 | ||
260 | static inline int xendevicemodel_set_pci_link_route( | |
261 | xendevicemodel_handle *dmod, domid_t domid, uint8_t link, uint8_t irq) | |
262 | { | |
263 | return xc_hvm_set_pci_link_route(dmod, domid, link, irq); | |
264 | } | |
265 | ||
266 | static inline int xendevicemodel_inject_msi( | |
267 | xendevicemodel_handle *dmod, domid_t domid, uint64_t msi_addr, | |
268 | uint32_t msi_data) | |
269 | { | |
270 | return xc_hvm_inject_msi(dmod, domid, msi_addr, msi_data); | |
271 | } | |
272 | ||
273 | static inline int xendevicemodel_track_dirty_vram( | |
274 | xendevicemodel_handle *dmod, domid_t domid, uint64_t first_pfn, | |
275 | uint32_t nr, unsigned long *dirty_bitmap) | |
276 | { | |
277 | return xc_hvm_track_dirty_vram(dmod, domid, first_pfn, nr, | |
278 | dirty_bitmap); | |
279 | } | |
280 | ||
281 | static inline int xendevicemodel_modified_memory( | |
282 | xendevicemodel_handle *dmod, domid_t domid, uint64_t first_pfn, | |
283 | uint32_t nr) | |
284 | { | |
285 | return xc_hvm_modified_memory(dmod, domid, first_pfn, nr); | |
286 | } | |
287 | ||
288 | static inline int xendevicemodel_set_mem_type( | |
289 | xendevicemodel_handle *dmod, domid_t domid, hvmmem_type_t mem_type, | |
290 | uint64_t first_pfn, uint32_t nr) | |
291 | { | |
292 | return xc_hvm_set_mem_type(dmod, domid, mem_type, first_pfn, nr); | |
293 | } | |
294 | ||
d655f34e PD |
295 | #endif |
296 | ||
297 | extern xendevicemodel_handle *xen_dmod; | |
298 | ||
8f25e754 PD |
299 | static inline int xen_set_mem_type(domid_t domid, hvmmem_type_t type, |
300 | uint64_t first_pfn, uint32_t nr) | |
301 | { | |
d655f34e PD |
302 | return xendevicemodel_set_mem_type(xen_dmod, domid, type, first_pfn, |
303 | nr); | |
8f25e754 PD |
304 | } |
305 | ||
306 | static inline int xen_set_pci_intx_level(domid_t domid, uint16_t segment, | |
307 | uint8_t bus, uint8_t device, | |
308 | uint8_t intx, unsigned int level) | |
309 | { | |
d655f34e PD |
310 | return xendevicemodel_set_pci_intx_level(xen_dmod, domid, segment, bus, |
311 | device, intx, level); | |
8f25e754 PD |
312 | } |
313 | ||
314 | static inline int xen_set_pci_link_route(domid_t domid, uint8_t link, | |
315 | uint8_t irq) | |
316 | { | |
d655f34e | 317 | return xendevicemodel_set_pci_link_route(xen_dmod, domid, link, irq); |
8f25e754 PD |
318 | } |
319 | ||
320 | static inline int xen_inject_msi(domid_t domid, uint64_t msi_addr, | |
321 | uint32_t msi_data) | |
322 | { | |
d655f34e | 323 | return xendevicemodel_inject_msi(xen_dmod, domid, msi_addr, msi_data); |
8f25e754 PD |
324 | } |
325 | ||
326 | static inline int xen_set_isa_irq_level(domid_t domid, uint8_t irq, | |
327 | unsigned int level) | |
328 | { | |
d655f34e | 329 | return xendevicemodel_set_isa_irq_level(xen_dmod, domid, irq, level); |
8f25e754 PD |
330 | } |
331 | ||
332 | static inline int xen_track_dirty_vram(domid_t domid, uint64_t first_pfn, | |
333 | uint32_t nr, unsigned long *bitmap) | |
334 | { | |
d655f34e PD |
335 | return xendevicemodel_track_dirty_vram(xen_dmod, domid, first_pfn, nr, |
336 | bitmap); | |
8f25e754 PD |
337 | } |
338 | ||
339 | static inline int xen_modified_memory(domid_t domid, uint64_t first_pfn, | |
340 | uint32_t nr) | |
341 | { | |
d655f34e | 342 | return xendevicemodel_modified_memory(xen_dmod, domid, first_pfn, nr); |
8f25e754 PD |
343 | } |
344 | ||
1c599472 PD |
345 | static inline int xen_restrict(domid_t domid) |
346 | { | |
14d015b6 | 347 | int rc; |
0ef4d87d | 348 | rc = xentoolcore_restrict_all(domid); |
14d015b6 | 349 | trace_xen_domid_restrict(rc ? errno : 0); |
14d015b6 | 350 | return rc; |
6aa0205e IC |
351 | } |
352 | ||
180640ea | 353 | void destroy_hvm_domain(bool reboot); |
9ce94e7c | 354 | |
eaab4d60 AK |
355 | /* shutdown/destroy current domain because of an error */ |
356 | void xen_shutdown_fatal_error(const char *fmt, ...) GCC_FMT_ATTR(1, 2); | |
357 | ||
37f9e258 | 358 | #ifdef HVM_PARAM_VMPORT_REGS_PFN |
81daba58 | 359 | static inline int xen_get_vmport_regs_pfn(xc_interface *xc, domid_t dom, |
d01a5a3f | 360 | xen_pfn_t *vmport_regs_pfn) |
37f9e258 | 361 | { |
d01a5a3f SS |
362 | int rc; |
363 | uint64_t value; | |
364 | rc = xc_hvm_param_get(xc, dom, HVM_PARAM_VMPORT_REGS_PFN, &value); | |
365 | if (rc >= 0) { | |
366 | *vmport_regs_pfn = (xen_pfn_t) value; | |
367 | } | |
368 | return rc; | |
37f9e258 DS |
369 | } |
370 | #else | |
81daba58 | 371 | static inline int xen_get_vmport_regs_pfn(xc_interface *xc, domid_t dom, |
d01a5a3f | 372 | xen_pfn_t *vmport_regs_pfn) |
37f9e258 DS |
373 | { |
374 | return -ENOSYS; | |
375 | } | |
376 | #endif | |
377 | ||
d8b441a3 | 378 | /* Xen before 4.6 */ |
f1167ee6 | 379 | #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40600 |
d8b441a3 JB |
380 | |
381 | #ifndef HVM_IOREQSRV_BUFIOREQ_ATOMIC | |
382 | #define HVM_IOREQSRV_BUFIOREQ_ATOMIC 2 | |
383 | #endif | |
384 | ||
385 | #endif | |
386 | ||
260cabed | 387 | static inline int xen_get_default_ioreq_server_info(domid_t dom, |
b7665c60 PD |
388 | xen_pfn_t *ioreq_pfn, |
389 | xen_pfn_t *bufioreq_pfn, | |
390 | evtchn_port_t | |
391 | *bufioreq_evtchn) | |
392 | { | |
393 | unsigned long param; | |
394 | int rc; | |
395 | ||
260cabed | 396 | rc = xc_get_hvm_param(xen_xc, dom, HVM_PARAM_IOREQ_PFN, ¶m); |
b7665c60 PD |
397 | if (rc < 0) { |
398 | fprintf(stderr, "failed to get HVM_PARAM_IOREQ_PFN\n"); | |
399 | return -1; | |
400 | } | |
401 | ||
402 | *ioreq_pfn = param; | |
403 | ||
260cabed | 404 | rc = xc_get_hvm_param(xen_xc, dom, HVM_PARAM_BUFIOREQ_PFN, ¶m); |
b7665c60 PD |
405 | if (rc < 0) { |
406 | fprintf(stderr, "failed to get HVM_PARAM_BUFIOREQ_PFN\n"); | |
407 | return -1; | |
408 | } | |
409 | ||
410 | *bufioreq_pfn = param; | |
411 | ||
260cabed | 412 | rc = xc_get_hvm_param(xen_xc, dom, HVM_PARAM_BUFIOREQ_EVTCHN, |
b7665c60 PD |
413 | ¶m); |
414 | if (rc < 0) { | |
415 | fprintf(stderr, "failed to get HVM_PARAM_BUFIOREQ_EVTCHN\n"); | |
416 | return -1; | |
417 | } | |
418 | ||
419 | *bufioreq_evtchn = param; | |
420 | ||
421 | return 0; | |
422 | } | |
423 | ||
3996e85c | 424 | /* Xen before 4.5 */ |
f1167ee6 | 425 | #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40500 |
3996e85c PD |
426 | |
427 | #ifndef HVM_PARAM_BUFIOREQ_EVTCHN | |
428 | #define HVM_PARAM_BUFIOREQ_EVTCHN 26 | |
429 | #endif | |
430 | ||
431 | #define IOREQ_TYPE_PCI_CONFIG 2 | |
432 | ||
d09952ee | 433 | typedef uint16_t ioservid_t; |
3996e85c | 434 | |
260cabed | 435 | static inline void xen_map_memory_section(domid_t dom, |
3996e85c PD |
436 | ioservid_t ioservid, |
437 | MemoryRegionSection *section) | |
438 | { | |
439 | } | |
440 | ||
260cabed | 441 | static inline void xen_unmap_memory_section(domid_t dom, |
3996e85c PD |
442 | ioservid_t ioservid, |
443 | MemoryRegionSection *section) | |
444 | { | |
445 | } | |
446 | ||
260cabed | 447 | static inline void xen_map_io_section(domid_t dom, |
3996e85c PD |
448 | ioservid_t ioservid, |
449 | MemoryRegionSection *section) | |
450 | { | |
451 | } | |
452 | ||
260cabed | 453 | static inline void xen_unmap_io_section(domid_t dom, |
3996e85c PD |
454 | ioservid_t ioservid, |
455 | MemoryRegionSection *section) | |
456 | { | |
457 | } | |
458 | ||
260cabed | 459 | static inline void xen_map_pcidev(domid_t dom, |
3996e85c PD |
460 | ioservid_t ioservid, |
461 | PCIDevice *pci_dev) | |
462 | { | |
463 | } | |
464 | ||
260cabed | 465 | static inline void xen_unmap_pcidev(domid_t dom, |
3996e85c PD |
466 | ioservid_t ioservid, |
467 | PCIDevice *pci_dev) | |
468 | { | |
469 | } | |
470 | ||
260cabed | 471 | static inline void xen_create_ioreq_server(domid_t dom, |
b7665c60 | 472 | ioservid_t *ioservid) |
3996e85c | 473 | { |
3996e85c PD |
474 | } |
475 | ||
260cabed | 476 | static inline void xen_destroy_ioreq_server(domid_t dom, |
3996e85c PD |
477 | ioservid_t ioservid) |
478 | { | |
479 | } | |
480 | ||
260cabed | 481 | static inline int xen_get_ioreq_server_info(domid_t dom, |
3996e85c PD |
482 | ioservid_t ioservid, |
483 | xen_pfn_t *ioreq_pfn, | |
484 | xen_pfn_t *bufioreq_pfn, | |
485 | evtchn_port_t *bufioreq_evtchn) | |
486 | { | |
260cabed PD |
487 | return xen_get_default_ioreq_server_info(dom, ioreq_pfn, |
488 | bufioreq_pfn, | |
b7665c60 | 489 | bufioreq_evtchn); |
3996e85c PD |
490 | } |
491 | ||
260cabed | 492 | static inline int xen_set_ioreq_server_state(domid_t dom, |
3996e85c PD |
493 | ioservid_t ioservid, |
494 | bool enable) | |
495 | { | |
496 | return 0; | |
497 | } | |
498 | ||
499 | /* Xen 4.5 */ | |
500 | #else | |
501 | ||
b7665c60 PD |
502 | static bool use_default_ioreq_server; |
503 | ||
260cabed | 504 | static inline void xen_map_memory_section(domid_t dom, |
3996e85c PD |
505 | ioservid_t ioservid, |
506 | MemoryRegionSection *section) | |
507 | { | |
508 | hwaddr start_addr = section->offset_within_address_space; | |
509 | ram_addr_t size = int128_get64(section->size); | |
510 | hwaddr end_addr = start_addr + size - 1; | |
511 | ||
b7665c60 PD |
512 | if (use_default_ioreq_server) { |
513 | return; | |
514 | } | |
515 | ||
3996e85c | 516 | trace_xen_map_mmio_range(ioservid, start_addr, end_addr); |
d655f34e PD |
517 | xendevicemodel_map_io_range_to_ioreq_server(xen_dmod, dom, ioservid, 1, |
518 | start_addr, end_addr); | |
3996e85c PD |
519 | } |
520 | ||
260cabed | 521 | static inline void xen_unmap_memory_section(domid_t dom, |
3996e85c PD |
522 | ioservid_t ioservid, |
523 | MemoryRegionSection *section) | |
524 | { | |
525 | hwaddr start_addr = section->offset_within_address_space; | |
526 | ram_addr_t size = int128_get64(section->size); | |
527 | hwaddr end_addr = start_addr + size - 1; | |
528 | ||
b7665c60 PD |
529 | if (use_default_ioreq_server) { |
530 | return; | |
531 | } | |
532 | ||
3996e85c | 533 | trace_xen_unmap_mmio_range(ioservid, start_addr, end_addr); |
d655f34e PD |
534 | xendevicemodel_unmap_io_range_from_ioreq_server(xen_dmod, dom, ioservid, |
535 | 1, start_addr, end_addr); | |
3996e85c PD |
536 | } |
537 | ||
260cabed | 538 | static inline void xen_map_io_section(domid_t dom, |
3996e85c PD |
539 | ioservid_t ioservid, |
540 | MemoryRegionSection *section) | |
541 | { | |
542 | hwaddr start_addr = section->offset_within_address_space; | |
543 | ram_addr_t size = int128_get64(section->size); | |
544 | hwaddr end_addr = start_addr + size - 1; | |
545 | ||
b7665c60 PD |
546 | if (use_default_ioreq_server) { |
547 | return; | |
548 | } | |
549 | ||
3996e85c | 550 | trace_xen_map_portio_range(ioservid, start_addr, end_addr); |
d655f34e PD |
551 | xendevicemodel_map_io_range_to_ioreq_server(xen_dmod, dom, ioservid, 0, |
552 | start_addr, end_addr); | |
3996e85c PD |
553 | } |
554 | ||
260cabed | 555 | static inline void xen_unmap_io_section(domid_t dom, |
3996e85c PD |
556 | ioservid_t ioservid, |
557 | MemoryRegionSection *section) | |
558 | { | |
559 | hwaddr start_addr = section->offset_within_address_space; | |
560 | ram_addr_t size = int128_get64(section->size); | |
561 | hwaddr end_addr = start_addr + size - 1; | |
562 | ||
b7665c60 PD |
563 | if (use_default_ioreq_server) { |
564 | return; | |
565 | } | |
566 | ||
3996e85c | 567 | trace_xen_unmap_portio_range(ioservid, start_addr, end_addr); |
d655f34e PD |
568 | xendevicemodel_unmap_io_range_from_ioreq_server(xen_dmod, dom, ioservid, |
569 | 0, start_addr, end_addr); | |
3996e85c PD |
570 | } |
571 | ||
260cabed | 572 | static inline void xen_map_pcidev(domid_t dom, |
3996e85c PD |
573 | ioservid_t ioservid, |
574 | PCIDevice *pci_dev) | |
575 | { | |
b7665c60 PD |
576 | if (use_default_ioreq_server) { |
577 | return; | |
578 | } | |
579 | ||
cdc57472 | 580 | trace_xen_map_pcidev(ioservid, pci_dev_bus_num(pci_dev), |
3996e85c | 581 | PCI_SLOT(pci_dev->devfn), PCI_FUNC(pci_dev->devfn)); |
d655f34e | 582 | xendevicemodel_map_pcidev_to_ioreq_server(xen_dmod, dom, ioservid, 0, |
cdc57472 | 583 | pci_dev_bus_num(pci_dev), |
d655f34e PD |
584 | PCI_SLOT(pci_dev->devfn), |
585 | PCI_FUNC(pci_dev->devfn)); | |
3996e85c PD |
586 | } |
587 | ||
260cabed | 588 | static inline void xen_unmap_pcidev(domid_t dom, |
3996e85c PD |
589 | ioservid_t ioservid, |
590 | PCIDevice *pci_dev) | |
591 | { | |
b7665c60 PD |
592 | if (use_default_ioreq_server) { |
593 | return; | |
594 | } | |
595 | ||
cdc57472 | 596 | trace_xen_unmap_pcidev(ioservid, pci_dev_bus_num(pci_dev), |
3996e85c | 597 | PCI_SLOT(pci_dev->devfn), PCI_FUNC(pci_dev->devfn)); |
d655f34e | 598 | xendevicemodel_unmap_pcidev_from_ioreq_server(xen_dmod, dom, ioservid, 0, |
cdc57472 | 599 | pci_dev_bus_num(pci_dev), |
d655f34e PD |
600 | PCI_SLOT(pci_dev->devfn), |
601 | PCI_FUNC(pci_dev->devfn)); | |
3996e85c PD |
602 | } |
603 | ||
260cabed | 604 | static inline void xen_create_ioreq_server(domid_t dom, |
b7665c60 | 605 | ioservid_t *ioservid) |
3996e85c | 606 | { |
d655f34e PD |
607 | int rc = xendevicemodel_create_ioreq_server(xen_dmod, dom, |
608 | HVM_IOREQSRV_BUFIOREQ_ATOMIC, | |
609 | ioservid); | |
3996e85c PD |
610 | |
611 | if (rc == 0) { | |
612 | trace_xen_ioreq_server_create(*ioservid); | |
b7665c60 | 613 | return; |
3996e85c PD |
614 | } |
615 | ||
b7665c60 PD |
616 | *ioservid = 0; |
617 | use_default_ioreq_server = true; | |
618 | trace_xen_default_ioreq_server(); | |
3996e85c PD |
619 | } |
620 | ||
260cabed | 621 | static inline void xen_destroy_ioreq_server(domid_t dom, |
3996e85c PD |
622 | ioservid_t ioservid) |
623 | { | |
b7665c60 PD |
624 | if (use_default_ioreq_server) { |
625 | return; | |
626 | } | |
627 | ||
3996e85c | 628 | trace_xen_ioreq_server_destroy(ioservid); |
d655f34e | 629 | xendevicemodel_destroy_ioreq_server(xen_dmod, dom, ioservid); |
3996e85c PD |
630 | } |
631 | ||
260cabed | 632 | static inline int xen_get_ioreq_server_info(domid_t dom, |
3996e85c PD |
633 | ioservid_t ioservid, |
634 | xen_pfn_t *ioreq_pfn, | |
635 | xen_pfn_t *bufioreq_pfn, | |
636 | evtchn_port_t *bufioreq_evtchn) | |
637 | { | |
b7665c60 | 638 | if (use_default_ioreq_server) { |
260cabed | 639 | return xen_get_default_ioreq_server_info(dom, ioreq_pfn, |
b7665c60 PD |
640 | bufioreq_pfn, |
641 | bufioreq_evtchn); | |
642 | } | |
643 | ||
d655f34e PD |
644 | return xendevicemodel_get_ioreq_server_info(xen_dmod, dom, ioservid, |
645 | ioreq_pfn, bufioreq_pfn, | |
646 | bufioreq_evtchn); | |
3996e85c PD |
647 | } |
648 | ||
260cabed | 649 | static inline int xen_set_ioreq_server_state(domid_t dom, |
3996e85c PD |
650 | ioservid_t ioservid, |
651 | bool enable) | |
652 | { | |
b7665c60 PD |
653 | if (use_default_ioreq_server) { |
654 | return 0; | |
655 | } | |
656 | ||
3996e85c | 657 | trace_xen_ioreq_server_state(ioservid, enable); |
d655f34e PD |
658 | return xendevicemodel_set_ioreq_server_state(xen_dmod, dom, ioservid, |
659 | enable); | |
3996e85c PD |
660 | } |
661 | ||
662 | #endif | |
663 | ||
64a7ad6f | 664 | #ifdef CONFIG_XEN_PV_DOMAIN_BUILD |
f1167ee6 | 665 | #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40700 |
81daba58 | 666 | static inline int xen_domain_create(xc_interface *xc, uint32_t ssidref, |
cdadde39 RPM |
667 | xen_domain_handle_t handle, uint32_t flags, |
668 | uint32_t *pdomid) | |
669 | { | |
670 | return xc_domain_create(xc, ssidref, handle, flags, pdomid); | |
671 | } | |
672 | #else | |
81daba58 | 673 | static inline int xen_domain_create(xc_interface *xc, uint32_t ssidref, |
cdadde39 RPM |
674 | xen_domain_handle_t handle, uint32_t flags, |
675 | uint32_t *pdomid) | |
676 | { | |
677 | return xc_domain_create(xc, ssidref, handle, flags, pdomid, NULL); | |
678 | } | |
679 | #endif | |
64a7ad6f | 680 | #endif |
cdadde39 | 681 | |
b6eb9b45 PS |
682 | /* Xen before 4.8 */ |
683 | ||
f1167ee6 | 684 | #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40800 |
b6eb9b45 | 685 | |
5c0d914a PD |
686 | struct xengnttab_grant_copy_segment { |
687 | union xengnttab_copy_ptr { | |
688 | void *virt; | |
689 | struct { | |
690 | uint32_t ref; | |
691 | uint16_t offset; | |
692 | uint16_t domid; | |
693 | } foreign; | |
694 | } source, dest; | |
695 | uint16_t len; | |
696 | uint16_t flags; | |
697 | int16_t status; | |
698 | }; | |
699 | ||
700 | typedef struct xengnttab_grant_copy_segment xengnttab_grant_copy_segment_t; | |
b6eb9b45 PS |
701 | |
702 | static inline int xengnttab_grant_copy(xengnttab_handle *xgt, uint32_t count, | |
703 | xengnttab_grant_copy_segment_t *segs) | |
704 | { | |
705 | return -ENOSYS; | |
706 | } | |
707 | #endif | |
708 | ||
d94f9486 | 709 | #endif /* QEMU_HW_XEN_COMMON_H */ |