4 * Copyright IBM, Corp. 2007
5 * Copyright (c) 2009 CodeSourcery
11 * This work is licensed under the terms of the GNU GPL, version 2. See
12 * the COPYING file in the top-level directory.
19 #include "virtio-blk.h"
20 #include "virtio-net.h"
22 #include "qemu-error.h"
29 /* from Linux's linux/virtio_pci.h */
31 /* A 32-bit r/o bitmask of the features supported by the host */
32 #define VIRTIO_PCI_HOST_FEATURES 0
34 /* A 32-bit r/w bitmask of features activated by the guest */
35 #define VIRTIO_PCI_GUEST_FEATURES 4
37 /* A 32-bit r/w PFN for the currently selected queue */
38 #define VIRTIO_PCI_QUEUE_PFN 8
40 /* A 16-bit r/o queue size for the currently selected queue */
41 #define VIRTIO_PCI_QUEUE_NUM 12
43 /* A 16-bit r/w queue selector */
44 #define VIRTIO_PCI_QUEUE_SEL 14
46 /* A 16-bit r/w queue notifier */
47 #define VIRTIO_PCI_QUEUE_NOTIFY 16
49 /* An 8-bit device status register. */
50 #define VIRTIO_PCI_STATUS 18
52 /* An 8-bit r/o interrupt status register. Reading the value will return the
53 * current contents of the ISR and will also clear it. This is effectively
54 * a read-and-acknowledge. */
55 #define VIRTIO_PCI_ISR 19
57 /* MSI-X registers: only enabled if MSI-X is enabled. */
58 /* A 16-bit vector for configuration changes. */
59 #define VIRTIO_MSI_CONFIG_VECTOR 20
60 /* A 16-bit vector for selected queue notifications. */
61 #define VIRTIO_MSI_QUEUE_VECTOR 22
63 /* Config space size */
64 #define VIRTIO_PCI_CONFIG_NOMSI 20
65 #define VIRTIO_PCI_CONFIG_MSI 24
66 #define VIRTIO_PCI_REGION_SIZE(dev) (msix_present(dev) ? \
67 VIRTIO_PCI_CONFIG_MSI : \
68 VIRTIO_PCI_CONFIG_NOMSI)
70 /* The remaining space is defined by each driver as the per-driver
71 * configuration space */
72 #define VIRTIO_PCI_CONFIG(dev) (msix_enabled(dev) ? \
73 VIRTIO_PCI_CONFIG_MSI : \
74 VIRTIO_PCI_CONFIG_NOMSI)
76 /* Virtio ABI version, if we increment this, we break the guest driver. */
77 #define VIRTIO_PCI_ABI_VERSION 0
79 /* How many bits to shift physical queue address written to QUEUE_PFN.
80 * 12 is historical, and due to x86 page size. */
81 #define VIRTIO_PCI_QUEUE_ADDR_SHIFT 12
83 /* Flags track per-device state like workarounds for quirks in older guests. */
84 #define VIRTIO_PCI_FLAG_BUS_MASTER_BUG (1 << 0)
86 /* Performance improves when virtqueue kick processing is decoupled from the
87 * vcpu thread using ioeventfd for some devices. */
88 #define VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT 1
89 #define VIRTIO_PCI_FLAG_USE_IOEVENTFD (1 << VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT)
91 /* QEMU doesn't strictly need write barriers since everything runs in
92 * lock-step. We'll leave the calls to wmb() in though to make it obvious for
93 * KVM or if kqemu gets SMP support.
95 #define wmb() do { } while (0)
108 uint32_t host_features;
112 /* Max. number of ports we can have for a the virtio-serial device */
113 uint32_t max_virtserial_ports;
115 bool ioeventfd_disabled;
116 bool ioeventfd_started;
121 static void virtio_pci_notify(void *opaque, uint16_t vector)
123 VirtIOPCIProxy *proxy = opaque;
124 if (msix_enabled(&proxy->pci_dev))
125 msix_notify(&proxy->pci_dev, vector);
127 qemu_set_irq(proxy->pci_dev.irq[0], proxy->vdev->isr & 1);
130 static void virtio_pci_save_config(void * opaque, QEMUFile *f)
132 VirtIOPCIProxy *proxy = opaque;
133 pci_device_save(&proxy->pci_dev, f);
134 msix_save(&proxy->pci_dev, f);
135 if (msix_present(&proxy->pci_dev))
136 qemu_put_be16(f, proxy->vdev->config_vector);
139 static void virtio_pci_save_queue(void * opaque, int n, QEMUFile *f)
141 VirtIOPCIProxy *proxy = opaque;
142 if (msix_present(&proxy->pci_dev))
143 qemu_put_be16(f, virtio_queue_vector(proxy->vdev, n));
146 static int virtio_pci_load_config(void * opaque, QEMUFile *f)
148 VirtIOPCIProxy *proxy = opaque;
150 ret = pci_device_load(&proxy->pci_dev, f);
154 msix_load(&proxy->pci_dev, f);
155 if (msix_present(&proxy->pci_dev)) {
156 qemu_get_be16s(f, &proxy->vdev->config_vector);
158 proxy->vdev->config_vector = VIRTIO_NO_VECTOR;
160 if (proxy->vdev->config_vector != VIRTIO_NO_VECTOR) {
161 return msix_vector_use(&proxy->pci_dev, proxy->vdev->config_vector);
164 /* Try to find out if the guest has bus master disabled, but is
165 in ready state. Then we have a buggy guest OS. */
166 if ((proxy->vdev->status & VIRTIO_CONFIG_S_DRIVER_OK) &&
167 !(proxy->pci_dev.config[PCI_COMMAND] & PCI_COMMAND_MASTER)) {
168 proxy->flags |= VIRTIO_PCI_FLAG_BUS_MASTER_BUG;
173 static int virtio_pci_load_queue(void * opaque, int n, QEMUFile *f)
175 VirtIOPCIProxy *proxy = opaque;
177 if (msix_present(&proxy->pci_dev)) {
178 qemu_get_be16s(f, &vector);
180 vector = VIRTIO_NO_VECTOR;
182 virtio_queue_set_vector(proxy->vdev, n, vector);
183 if (vector != VIRTIO_NO_VECTOR) {
184 return msix_vector_use(&proxy->pci_dev, vector);
189 static int virtio_pci_set_host_notifier_internal(VirtIOPCIProxy *proxy,
192 VirtQueue *vq = virtio_get_queue(proxy->vdev, n);
193 EventNotifier *notifier = virtio_queue_get_host_notifier(vq);
196 r = event_notifier_init(notifier, 1);
200 r = kvm_set_ioeventfd_pio_word(event_notifier_get_fd(notifier),
201 proxy->addr + VIRTIO_PCI_QUEUE_NOTIFY,
204 event_notifier_cleanup(notifier);
207 r = kvm_set_ioeventfd_pio_word(event_notifier_get_fd(notifier),
208 proxy->addr + VIRTIO_PCI_QUEUE_NOTIFY,
214 /* Handle the race condition where the guest kicked and we deassigned
215 * before we got around to handling the kick.
217 if (event_notifier_test_and_clear(notifier)) {
218 virtio_queue_notify_vq(vq);
221 event_notifier_cleanup(notifier);
226 static void virtio_pci_host_notifier_read(void *opaque)
228 VirtQueue *vq = opaque;
229 EventNotifier *n = virtio_queue_get_host_notifier(vq);
230 if (event_notifier_test_and_clear(n)) {
231 virtio_queue_notify_vq(vq);
235 static void virtio_pci_set_host_notifier_fd_handler(VirtIOPCIProxy *proxy,
238 VirtQueue *vq = virtio_get_queue(proxy->vdev, n);
239 EventNotifier *notifier = virtio_queue_get_host_notifier(vq);
241 qemu_set_fd_handler(event_notifier_get_fd(notifier),
242 virtio_pci_host_notifier_read, NULL, vq);
244 qemu_set_fd_handler(event_notifier_get_fd(notifier),
249 static int virtio_pci_start_ioeventfd(VirtIOPCIProxy *proxy)
253 if (!(proxy->flags & VIRTIO_PCI_FLAG_USE_IOEVENTFD) ||
254 proxy->ioeventfd_disabled ||
255 proxy->ioeventfd_started) {
259 for (n = 0; n < VIRTIO_PCI_QUEUE_MAX; n++) {
260 if (!virtio_queue_get_num(proxy->vdev, n)) {
264 r = virtio_pci_set_host_notifier_internal(proxy, n, true);
269 virtio_pci_set_host_notifier_fd_handler(proxy, n, true);
271 proxy->ioeventfd_started = true;
276 if (!virtio_queue_get_num(proxy->vdev, n)) {
280 virtio_pci_set_host_notifier_fd_handler(proxy, n, false);
281 virtio_pci_set_host_notifier_internal(proxy, n, false);
283 proxy->ioeventfd_started = false;
284 proxy->ioeventfd_disabled = true;
288 static int virtio_pci_stop_ioeventfd(VirtIOPCIProxy *proxy)
292 if (!proxy->ioeventfd_started) {
296 for (n = 0; n < VIRTIO_PCI_QUEUE_MAX; n++) {
297 if (!virtio_queue_get_num(proxy->vdev, n)) {
301 virtio_pci_set_host_notifier_fd_handler(proxy, n, false);
302 virtio_pci_set_host_notifier_internal(proxy, n, false);
304 proxy->ioeventfd_started = false;
308 static void virtio_pci_reset(DeviceState *d)
310 VirtIOPCIProxy *proxy = container_of(d, VirtIOPCIProxy, pci_dev.qdev);
311 virtio_pci_stop_ioeventfd(proxy);
312 virtio_reset(proxy->vdev);
313 msix_reset(&proxy->pci_dev);
314 proxy->flags &= ~VIRTIO_PCI_FLAG_BUS_MASTER_BUG;
317 static void virtio_ioport_write(void *opaque, uint32_t addr, uint32_t val)
319 VirtIOPCIProxy *proxy = opaque;
320 VirtIODevice *vdev = proxy->vdev;
321 target_phys_addr_t pa;
324 case VIRTIO_PCI_GUEST_FEATURES:
325 /* Guest does not negotiate properly? We have to assume nothing. */
326 if (val & (1 << VIRTIO_F_BAD_FEATURE)) {
327 if (vdev->bad_features)
328 val = proxy->host_features & vdev->bad_features(vdev);
332 if (vdev->set_features)
333 vdev->set_features(vdev, val);
334 vdev->guest_features = val;
336 case VIRTIO_PCI_QUEUE_PFN:
337 pa = (target_phys_addr_t)val << VIRTIO_PCI_QUEUE_ADDR_SHIFT;
339 virtio_pci_stop_ioeventfd(proxy);
340 virtio_reset(proxy->vdev);
341 msix_unuse_all_vectors(&proxy->pci_dev);
344 virtio_queue_set_addr(vdev, vdev->queue_sel, pa);
346 case VIRTIO_PCI_QUEUE_SEL:
347 if (val < VIRTIO_PCI_QUEUE_MAX)
348 vdev->queue_sel = val;
350 case VIRTIO_PCI_QUEUE_NOTIFY:
351 virtio_queue_notify(vdev, val);
353 case VIRTIO_PCI_STATUS:
354 if (!(val & VIRTIO_CONFIG_S_DRIVER_OK)) {
355 virtio_pci_stop_ioeventfd(proxy);
358 virtio_set_status(vdev, val & 0xFF);
360 if (val & VIRTIO_CONFIG_S_DRIVER_OK) {
361 virtio_pci_start_ioeventfd(proxy);
364 if (vdev->status == 0) {
365 virtio_reset(proxy->vdev);
366 msix_unuse_all_vectors(&proxy->pci_dev);
369 /* Linux before 2.6.34 sets the device as OK without enabling
370 the PCI device bus master bit. In this case we need to disable
371 some safety checks. */
372 if ((val & VIRTIO_CONFIG_S_DRIVER_OK) &&
373 !(proxy->pci_dev.config[PCI_COMMAND] & PCI_COMMAND_MASTER)) {
374 proxy->flags |= VIRTIO_PCI_FLAG_BUS_MASTER_BUG;
377 case VIRTIO_MSI_CONFIG_VECTOR:
378 msix_vector_unuse(&proxy->pci_dev, vdev->config_vector);
379 /* Make it possible for guest to discover an error took place. */
380 if (msix_vector_use(&proxy->pci_dev, val) < 0)
381 val = VIRTIO_NO_VECTOR;
382 vdev->config_vector = val;
384 case VIRTIO_MSI_QUEUE_VECTOR:
385 msix_vector_unuse(&proxy->pci_dev,
386 virtio_queue_vector(vdev, vdev->queue_sel));
387 /* Make it possible for guest to discover an error took place. */
388 if (msix_vector_use(&proxy->pci_dev, val) < 0)
389 val = VIRTIO_NO_VECTOR;
390 virtio_queue_set_vector(vdev, vdev->queue_sel, val);
393 error_report("%s: unexpected address 0x%x value 0x%x",
394 __func__, addr, val);
399 static uint32_t virtio_ioport_read(VirtIOPCIProxy *proxy, uint32_t addr)
401 VirtIODevice *vdev = proxy->vdev;
402 uint32_t ret = 0xFFFFFFFF;
405 case VIRTIO_PCI_HOST_FEATURES:
406 ret = proxy->host_features;
408 case VIRTIO_PCI_GUEST_FEATURES:
409 ret = vdev->guest_features;
411 case VIRTIO_PCI_QUEUE_PFN:
412 ret = virtio_queue_get_addr(vdev, vdev->queue_sel)
413 >> VIRTIO_PCI_QUEUE_ADDR_SHIFT;
415 case VIRTIO_PCI_QUEUE_NUM:
416 ret = virtio_queue_get_num(vdev, vdev->queue_sel);
418 case VIRTIO_PCI_QUEUE_SEL:
419 ret = vdev->queue_sel;
421 case VIRTIO_PCI_STATUS:
425 /* reading from the ISR also clears it. */
428 qemu_set_irq(proxy->pci_dev.irq[0], 0);
430 case VIRTIO_MSI_CONFIG_VECTOR:
431 ret = vdev->config_vector;
433 case VIRTIO_MSI_QUEUE_VECTOR:
434 ret = virtio_queue_vector(vdev, vdev->queue_sel);
443 static uint32_t virtio_pci_config_readb(void *opaque, uint32_t addr)
445 VirtIOPCIProxy *proxy = opaque;
446 uint32_t config = VIRTIO_PCI_CONFIG(&proxy->pci_dev);
449 return virtio_ioport_read(proxy, addr);
451 return virtio_config_readb(proxy->vdev, addr);
454 static uint32_t virtio_pci_config_readw(void *opaque, uint32_t addr)
456 VirtIOPCIProxy *proxy = opaque;
457 uint32_t config = VIRTIO_PCI_CONFIG(&proxy->pci_dev);
460 return virtio_ioport_read(proxy, addr);
462 return virtio_config_readw(proxy->vdev, addr);
465 static uint32_t virtio_pci_config_readl(void *opaque, uint32_t addr)
467 VirtIOPCIProxy *proxy = opaque;
468 uint32_t config = VIRTIO_PCI_CONFIG(&proxy->pci_dev);
471 return virtio_ioport_read(proxy, addr);
473 return virtio_config_readl(proxy->vdev, addr);
476 static void virtio_pci_config_writeb(void *opaque, uint32_t addr, uint32_t val)
478 VirtIOPCIProxy *proxy = opaque;
479 uint32_t config = VIRTIO_PCI_CONFIG(&proxy->pci_dev);
482 virtio_ioport_write(proxy, addr, val);
486 virtio_config_writeb(proxy->vdev, addr, val);
489 static void virtio_pci_config_writew(void *opaque, uint32_t addr, uint32_t val)
491 VirtIOPCIProxy *proxy = opaque;
492 uint32_t config = VIRTIO_PCI_CONFIG(&proxy->pci_dev);
495 virtio_ioport_write(proxy, addr, val);
499 virtio_config_writew(proxy->vdev, addr, val);
502 static void virtio_pci_config_writel(void *opaque, uint32_t addr, uint32_t val)
504 VirtIOPCIProxy *proxy = opaque;
505 uint32_t config = VIRTIO_PCI_CONFIG(&proxy->pci_dev);
508 virtio_ioport_write(proxy, addr, val);
512 virtio_config_writel(proxy->vdev, addr, val);
515 static void virtio_map(PCIDevice *pci_dev, int region_num,
516 pcibus_t addr, pcibus_t size, int type)
518 VirtIOPCIProxy *proxy = container_of(pci_dev, VirtIOPCIProxy, pci_dev);
519 VirtIODevice *vdev = proxy->vdev;
520 unsigned config_len = VIRTIO_PCI_REGION_SIZE(pci_dev) + vdev->config_len;
524 register_ioport_write(addr, config_len, 1, virtio_pci_config_writeb, proxy);
525 register_ioport_write(addr, config_len, 2, virtio_pci_config_writew, proxy);
526 register_ioport_write(addr, config_len, 4, virtio_pci_config_writel, proxy);
527 register_ioport_read(addr, config_len, 1, virtio_pci_config_readb, proxy);
528 register_ioport_read(addr, config_len, 2, virtio_pci_config_readw, proxy);
529 register_ioport_read(addr, config_len, 4, virtio_pci_config_readl, proxy);
531 if (vdev->config_len)
532 vdev->get_config(vdev, vdev->config);
535 static void virtio_write_config(PCIDevice *pci_dev, uint32_t address,
536 uint32_t val, int len)
538 VirtIOPCIProxy *proxy = DO_UPCAST(VirtIOPCIProxy, pci_dev, pci_dev);
540 if (PCI_COMMAND == address) {
541 if (!(val & PCI_COMMAND_MASTER)) {
542 if (!(proxy->flags & VIRTIO_PCI_FLAG_BUS_MASTER_BUG)) {
543 virtio_pci_stop_ioeventfd(proxy);
544 virtio_set_status(proxy->vdev,
545 proxy->vdev->status & ~VIRTIO_CONFIG_S_DRIVER_OK);
550 pci_default_write_config(pci_dev, address, val, len);
551 msix_write_config(pci_dev, address, val, len);
554 static unsigned virtio_pci_get_features(void *opaque)
556 VirtIOPCIProxy *proxy = opaque;
557 return proxy->host_features;
560 static void virtio_pci_guest_notifier_read(void *opaque)
562 VirtQueue *vq = opaque;
563 EventNotifier *n = virtio_queue_get_guest_notifier(vq);
564 if (event_notifier_test_and_clear(n)) {
569 static int virtio_pci_set_guest_notifier(void *opaque, int n, bool assign)
571 VirtIOPCIProxy *proxy = opaque;
572 VirtQueue *vq = virtio_get_queue(proxy->vdev, n);
573 EventNotifier *notifier = virtio_queue_get_guest_notifier(vq);
576 int r = event_notifier_init(notifier, 0);
580 qemu_set_fd_handler(event_notifier_get_fd(notifier),
581 virtio_pci_guest_notifier_read, NULL, vq);
583 qemu_set_fd_handler(event_notifier_get_fd(notifier),
585 event_notifier_cleanup(notifier);
591 static int virtio_pci_set_guest_notifiers(void *opaque, bool assign)
593 VirtIOPCIProxy *proxy = opaque;
594 VirtIODevice *vdev = proxy->vdev;
597 for (n = 0; n < VIRTIO_PCI_QUEUE_MAX; n++) {
598 if (!virtio_queue_get_num(vdev, n)) {
602 r = virtio_pci_set_guest_notifier(opaque, n, assign);
611 /* We get here on assignment failure. Recover by undoing for VQs 0 .. n. */
613 virtio_pci_set_guest_notifier(opaque, n, !assign);
618 static int virtio_pci_set_host_notifier(void *opaque, int n, bool assign)
620 VirtIOPCIProxy *proxy = opaque;
622 /* Stop using ioeventfd for virtqueue kick if the device starts using host
623 * notifiers. This makes it easy to avoid stepping on each others' toes.
625 proxy->ioeventfd_disabled = assign;
627 virtio_pci_stop_ioeventfd(proxy);
629 /* We don't need to start here: it's not needed because backend
630 * currently only stops on status change away from ok,
631 * reset, vmstop and such. If we do add code to start here,
632 * need to check vmstate, device state etc. */
633 return virtio_pci_set_host_notifier_internal(proxy, n, assign);
636 static void virtio_pci_vmstate_change(void *opaque, bool running)
638 VirtIOPCIProxy *proxy = opaque;
641 virtio_pci_start_ioeventfd(proxy);
643 virtio_pci_stop_ioeventfd(proxy);
647 static const VirtIOBindings virtio_pci_bindings = {
648 .notify = virtio_pci_notify,
649 .save_config = virtio_pci_save_config,
650 .load_config = virtio_pci_load_config,
651 .save_queue = virtio_pci_save_queue,
652 .load_queue = virtio_pci_load_queue,
653 .get_features = virtio_pci_get_features,
654 .set_host_notifier = virtio_pci_set_host_notifier,
655 .set_guest_notifiers = virtio_pci_set_guest_notifiers,
656 .vmstate_change = virtio_pci_vmstate_change,
659 static void virtio_init_pci(VirtIOPCIProxy *proxy, VirtIODevice *vdev,
660 uint16_t vendor, uint16_t device,
661 uint16_t class_code, uint8_t pif)
668 config = proxy->pci_dev.config;
669 pci_config_set_vendor_id(config, vendor);
670 pci_config_set_device_id(config, device);
672 config[0x08] = VIRTIO_PCI_ABI_VERSION;
675 pci_config_set_class(config, class_code);
677 config[0x2c] = vendor & 0xFF;
678 config[0x2d] = (vendor >> 8) & 0xFF;
679 config[0x2e] = vdev->device_id & 0xFF;
680 config[0x2f] = (vdev->device_id >> 8) & 0xFF;
684 if (vdev->nvectors && !msix_init(&proxy->pci_dev, vdev->nvectors, 1, 0)) {
685 pci_register_bar(&proxy->pci_dev, 1,
686 msix_bar_size(&proxy->pci_dev),
687 PCI_BASE_ADDRESS_SPACE_MEMORY,
692 proxy->pci_dev.config_write = virtio_write_config;
694 size = VIRTIO_PCI_REGION_SIZE(&proxy->pci_dev) + vdev->config_len;
696 size = 1 << qemu_fls(size);
698 pci_register_bar(&proxy->pci_dev, 0, size, PCI_BASE_ADDRESS_SPACE_IO,
701 if (!kvm_has_many_ioeventfds()) {
702 proxy->flags &= ~VIRTIO_PCI_FLAG_USE_IOEVENTFD;
705 virtio_bind_device(vdev, &virtio_pci_bindings, proxy);
706 proxy->host_features |= 0x1 << VIRTIO_F_NOTIFY_ON_EMPTY;
707 proxy->host_features |= 0x1 << VIRTIO_F_BAD_FEATURE;
708 proxy->host_features = vdev->get_features(vdev, proxy->host_features);
712 static int virtio_blk_init_pci(PCIDevice *pci_dev)
714 VirtIOPCIProxy *proxy = DO_UPCAST(VirtIOPCIProxy, pci_dev, pci_dev);
717 if (proxy->class_code != PCI_CLASS_STORAGE_SCSI &&
718 proxy->class_code != PCI_CLASS_STORAGE_OTHER)
719 proxy->class_code = PCI_CLASS_STORAGE_SCSI;
721 vdev = virtio_blk_init(&pci_dev->qdev, &proxy->block);
725 vdev->nvectors = proxy->nvectors;
726 virtio_init_pci(proxy, vdev,
727 PCI_VENDOR_ID_REDHAT_QUMRANET,
728 PCI_DEVICE_ID_VIRTIO_BLOCK,
729 proxy->class_code, 0x00);
730 /* make the actual value visible */
731 proxy->nvectors = vdev->nvectors;
735 static int virtio_exit_pci(PCIDevice *pci_dev)
737 return msix_uninit(pci_dev);
740 static int virtio_blk_exit_pci(PCIDevice *pci_dev)
742 VirtIOPCIProxy *proxy = DO_UPCAST(VirtIOPCIProxy, pci_dev, pci_dev);
744 virtio_pci_stop_ioeventfd(proxy);
745 virtio_blk_exit(proxy->vdev);
746 blockdev_mark_auto_del(proxy->block.bs);
747 return virtio_exit_pci(pci_dev);
750 static int virtio_serial_init_pci(PCIDevice *pci_dev)
752 VirtIOPCIProxy *proxy = DO_UPCAST(VirtIOPCIProxy, pci_dev, pci_dev);
755 if (proxy->class_code != PCI_CLASS_COMMUNICATION_OTHER &&
756 proxy->class_code != PCI_CLASS_DISPLAY_OTHER && /* qemu 0.10 */
757 proxy->class_code != PCI_CLASS_OTHERS) /* qemu-kvm */
758 proxy->class_code = PCI_CLASS_COMMUNICATION_OTHER;
760 vdev = virtio_serial_init(&pci_dev->qdev, proxy->max_virtserial_ports);
764 vdev->nvectors = proxy->nvectors == DEV_NVECTORS_UNSPECIFIED
765 ? proxy->max_virtserial_ports + 1
767 virtio_init_pci(proxy, vdev,
768 PCI_VENDOR_ID_REDHAT_QUMRANET,
769 PCI_DEVICE_ID_VIRTIO_CONSOLE,
770 proxy->class_code, 0x00);
771 proxy->nvectors = vdev->nvectors;
775 static int virtio_serial_exit_pci(PCIDevice *pci_dev)
777 VirtIOPCIProxy *proxy = DO_UPCAST(VirtIOPCIProxy, pci_dev, pci_dev);
779 virtio_serial_exit(proxy->vdev);
780 return virtio_exit_pci(pci_dev);
783 static int virtio_net_init_pci(PCIDevice *pci_dev)
785 VirtIOPCIProxy *proxy = DO_UPCAST(VirtIOPCIProxy, pci_dev, pci_dev);
788 vdev = virtio_net_init(&pci_dev->qdev, &proxy->nic, &proxy->net);
790 vdev->nvectors = proxy->nvectors;
791 virtio_init_pci(proxy, vdev,
792 PCI_VENDOR_ID_REDHAT_QUMRANET,
793 PCI_DEVICE_ID_VIRTIO_NET,
794 PCI_CLASS_NETWORK_ETHERNET,
797 /* make the actual value visible */
798 proxy->nvectors = vdev->nvectors;
802 static int virtio_net_exit_pci(PCIDevice *pci_dev)
804 VirtIOPCIProxy *proxy = DO_UPCAST(VirtIOPCIProxy, pci_dev, pci_dev);
806 virtio_pci_stop_ioeventfd(proxy);
807 virtio_net_exit(proxy->vdev);
808 return virtio_exit_pci(pci_dev);
811 static int virtio_balloon_init_pci(PCIDevice *pci_dev)
813 VirtIOPCIProxy *proxy = DO_UPCAST(VirtIOPCIProxy, pci_dev, pci_dev);
816 vdev = virtio_balloon_init(&pci_dev->qdev);
817 virtio_init_pci(proxy, vdev,
818 PCI_VENDOR_ID_REDHAT_QUMRANET,
819 PCI_DEVICE_ID_VIRTIO_BALLOON,
820 PCI_CLASS_MEMORY_RAM,
826 static int virtio_9p_init_pci(PCIDevice *pci_dev)
828 VirtIOPCIProxy *proxy = DO_UPCAST(VirtIOPCIProxy, pci_dev, pci_dev);
831 vdev = virtio_9p_init(&pci_dev->qdev, &proxy->fsconf);
832 vdev->nvectors = proxy->nvectors;
833 virtio_init_pci(proxy, vdev,
834 PCI_VENDOR_ID_REDHAT_QUMRANET,
838 /* make the actual value visible */
839 proxy->nvectors = vdev->nvectors;
844 static PCIDeviceInfo virtio_info[] = {
846 .qdev.name = "virtio-blk-pci",
847 .qdev.alias = "virtio-blk",
848 .qdev.size = sizeof(VirtIOPCIProxy),
849 .init = virtio_blk_init_pci,
850 .exit = virtio_blk_exit_pci,
851 .qdev.props = (Property[]) {
852 DEFINE_PROP_HEX32("class", VirtIOPCIProxy, class_code, 0),
853 DEFINE_BLOCK_PROPERTIES(VirtIOPCIProxy, block),
854 DEFINE_PROP_BIT("ioeventfd", VirtIOPCIProxy, flags,
855 VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT, true),
856 DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, 2),
857 DEFINE_VIRTIO_BLK_FEATURES(VirtIOPCIProxy, host_features),
858 DEFINE_PROP_END_OF_LIST(),
860 .qdev.reset = virtio_pci_reset,
862 .qdev.name = "virtio-net-pci",
863 .qdev.size = sizeof(VirtIOPCIProxy),
864 .init = virtio_net_init_pci,
865 .exit = virtio_net_exit_pci,
866 .romfile = "pxe-virtio.bin",
867 .qdev.props = (Property[]) {
868 DEFINE_PROP_BIT("ioeventfd", VirtIOPCIProxy, flags,
869 VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT, false),
870 DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, 3),
871 DEFINE_VIRTIO_NET_FEATURES(VirtIOPCIProxy, host_features),
872 DEFINE_NIC_PROPERTIES(VirtIOPCIProxy, nic),
873 DEFINE_PROP_UINT32("x-txtimer", VirtIOPCIProxy,
874 net.txtimer, TX_TIMER_INTERVAL),
875 DEFINE_PROP_INT32("x-txburst", VirtIOPCIProxy,
876 net.txburst, TX_BURST),
877 DEFINE_PROP_STRING("tx", VirtIOPCIProxy, net.tx),
878 DEFINE_PROP_END_OF_LIST(),
880 .qdev.reset = virtio_pci_reset,
882 .qdev.name = "virtio-serial-pci",
883 .qdev.alias = "virtio-serial",
884 .qdev.size = sizeof(VirtIOPCIProxy),
885 .init = virtio_serial_init_pci,
886 .exit = virtio_serial_exit_pci,
887 .qdev.props = (Property[]) {
888 DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors,
889 DEV_NVECTORS_UNSPECIFIED),
890 DEFINE_PROP_HEX32("class", VirtIOPCIProxy, class_code, 0),
891 DEFINE_VIRTIO_COMMON_FEATURES(VirtIOPCIProxy, host_features),
892 DEFINE_PROP_UINT32("max_ports", VirtIOPCIProxy, max_virtserial_ports,
894 DEFINE_PROP_END_OF_LIST(),
896 .qdev.reset = virtio_pci_reset,
898 .qdev.name = "virtio-balloon-pci",
899 .qdev.size = sizeof(VirtIOPCIProxy),
900 .init = virtio_balloon_init_pci,
901 .exit = virtio_exit_pci,
902 .qdev.props = (Property[]) {
903 DEFINE_VIRTIO_COMMON_FEATURES(VirtIOPCIProxy, host_features),
904 DEFINE_PROP_END_OF_LIST(),
906 .qdev.reset = virtio_pci_reset,
909 .qdev.name = "virtio-9p-pci",
910 .qdev.size = sizeof(VirtIOPCIProxy),
911 .init = virtio_9p_init_pci,
912 .qdev.props = (Property[]) {
913 DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, 2),
914 DEFINE_VIRTIO_COMMON_FEATURES(VirtIOPCIProxy, host_features),
915 DEFINE_PROP_STRING("mount_tag", VirtIOPCIProxy, fsconf.tag),
916 DEFINE_PROP_STRING("fsdev", VirtIOPCIProxy, fsconf.fsdev_id),
917 DEFINE_PROP_END_OF_LIST(),
925 static void virtio_pci_register_devices(void)
927 pci_qdev_register_many(virtio_info);
930 device_init(virtio_pci_register_devices)