2 * Virtio PCI driver - common functionality for all device versions
4 * This module allows virtio devices to be used over a virtual PCI device.
5 * This can be used with QEMU based VMMs like KVM or Xen.
7 * Copyright IBM Corp. 2007
8 * Copyright Red Hat, Inc. 2014
15 * This work is licensed under the terms of the GNU GPL, version 2 or later.
16 * See the COPYING file in the top-level directory.
20 #include "virtio_pci_common.h"
22 static bool force_legacy = false;
24 #if IS_ENABLED(CONFIG_VIRTIO_PCI_LEGACY)
25 module_param(force_legacy, bool, 0444);
26 MODULE_PARM_DESC(force_legacy,
27 "Force legacy mode for transitional virtio 1 devices");
30 /* wait for pending irq handlers */
31 void vp_synchronize_vectors(struct virtio_device *vdev)
33 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
36 if (vp_dev->intx_enabled)
37 synchronize_irq(vp_dev->pci_dev->irq);
39 for (i = 0; i < vp_dev->msix_vectors; ++i)
40 synchronize_irq(pci_irq_vector(vp_dev->pci_dev, i));
43 /* the notify function used when creating a virt queue */
44 bool vp_notify(struct virtqueue *vq)
46 /* we write the queue's selector into the notification register to
47 * signal the other end */
48 iowrite16(vq->index, (void __iomem *)vq->priv);
52 /* Handle a configuration change: Tell driver if it wants to know. */
53 static irqreturn_t vp_config_changed(int irq, void *opaque)
55 struct virtio_pci_device *vp_dev = opaque;
57 virtio_config_changed(&vp_dev->vdev);
61 /* Notify all virtqueues on an interrupt. */
62 static irqreturn_t vp_vring_interrupt(int irq, void *opaque)
64 struct virtio_pci_device *vp_dev = opaque;
65 struct virtio_pci_vq_info *info;
66 irqreturn_t ret = IRQ_NONE;
69 spin_lock_irqsave(&vp_dev->lock, flags);
70 list_for_each_entry(info, &vp_dev->virtqueues, node) {
71 if (vring_interrupt(irq, info->vq) == IRQ_HANDLED)
74 spin_unlock_irqrestore(&vp_dev->lock, flags);
79 /* A small wrapper to also acknowledge the interrupt when it's handled.
80 * I really need an EIO hook for the vring so I can ack the interrupt once we
81 * know that we'll be handling the IRQ but before we invoke the callback since
82 * the callback may notify the host which results in the host attempting to
83 * raise an interrupt that we would then mask once we acknowledged the
85 static irqreturn_t vp_interrupt(int irq, void *opaque)
87 struct virtio_pci_device *vp_dev = opaque;
90 /* reading the ISR has the effect of also clearing it so it's very
91 * important to save off the value. */
92 isr = ioread8(vp_dev->isr);
94 /* It's definitely not us if the ISR was not high */
98 /* Configuration change? Tell driver if it wants to know. */
99 if (isr & VIRTIO_PCI_ISR_CONFIG)
100 vp_config_changed(irq, opaque);
102 return vp_vring_interrupt(irq, opaque);
105 static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors,
106 bool per_vq_vectors, struct irq_affinity *desc)
108 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
109 const char *name = dev_name(&vp_dev->vdev.dev);
110 unsigned flags = PCI_IRQ_MSIX;
114 vp_dev->msix_vectors = nvectors;
116 vp_dev->msix_names = kmalloc_array(nvectors,
117 sizeof(*vp_dev->msix_names),
119 if (!vp_dev->msix_names)
121 vp_dev->msix_affinity_masks
122 = kcalloc(nvectors, sizeof(*vp_dev->msix_affinity_masks),
124 if (!vp_dev->msix_affinity_masks)
126 for (i = 0; i < nvectors; ++i)
127 if (!alloc_cpumask_var(&vp_dev->msix_affinity_masks[i],
132 flags |= PCI_IRQ_AFFINITY;
133 desc->pre_vectors++; /* virtio config vector */
136 err = pci_alloc_irq_vectors_affinity(vp_dev->pci_dev, nvectors,
137 nvectors, flags, desc);
140 vp_dev->msix_enabled = 1;
142 /* Set the vector used for configuration */
143 v = vp_dev->msix_used_vectors;
144 snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names,
146 err = request_irq(pci_irq_vector(vp_dev->pci_dev, v),
147 vp_config_changed, 0, vp_dev->msix_names[v],
151 ++vp_dev->msix_used_vectors;
153 v = vp_dev->config_vector(vp_dev, v);
154 /* Verify we had enough resources to assign the vector */
155 if (v == VIRTIO_MSI_NO_VECTOR) {
160 if (!per_vq_vectors) {
161 /* Shared vector for all VQs */
162 v = vp_dev->msix_used_vectors;
163 snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names,
164 "%s-virtqueues", name);
165 err = request_irq(pci_irq_vector(vp_dev->pci_dev, v),
166 vp_vring_interrupt, 0, vp_dev->msix_names[v],
170 ++vp_dev->msix_used_vectors;
177 static struct virtqueue *vp_setup_vq(struct virtio_device *vdev, unsigned index,
178 void (*callback)(struct virtqueue *vq),
183 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
184 struct virtio_pci_vq_info *info = kmalloc(sizeof *info, GFP_KERNEL);
185 struct virtqueue *vq;
188 /* fill out our structure that represents an active queue */
190 return ERR_PTR(-ENOMEM);
192 vq = vp_dev->setup_vq(vp_dev, info, index, callback, name, ctx,
199 spin_lock_irqsave(&vp_dev->lock, flags);
200 list_add(&info->node, &vp_dev->virtqueues);
201 spin_unlock_irqrestore(&vp_dev->lock, flags);
203 INIT_LIST_HEAD(&info->node);
206 vp_dev->vqs[index] = info;
214 static void vp_del_vq(struct virtqueue *vq)
216 struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
217 struct virtio_pci_vq_info *info = vp_dev->vqs[vq->index];
220 spin_lock_irqsave(&vp_dev->lock, flags);
221 list_del(&info->node);
222 spin_unlock_irqrestore(&vp_dev->lock, flags);
224 vp_dev->del_vq(info);
228 /* the config->del_vqs() implementation */
229 void vp_del_vqs(struct virtio_device *vdev)
231 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
232 struct virtqueue *vq, *n;
235 list_for_each_entry_safe(vq, n, &vdev->vqs, list) {
236 if (vp_dev->per_vq_vectors) {
237 int v = vp_dev->vqs[vq->index]->msix_vector;
239 if (v != VIRTIO_MSI_NO_VECTOR) {
240 int irq = pci_irq_vector(vp_dev->pci_dev, v);
242 irq_set_affinity_hint(irq, NULL);
248 vp_dev->per_vq_vectors = false;
250 if (vp_dev->intx_enabled) {
251 free_irq(vp_dev->pci_dev->irq, vp_dev);
252 vp_dev->intx_enabled = 0;
255 for (i = 0; i < vp_dev->msix_used_vectors; ++i)
256 free_irq(pci_irq_vector(vp_dev->pci_dev, i), vp_dev);
258 for (i = 0; i < vp_dev->msix_vectors; i++)
259 if (vp_dev->msix_affinity_masks[i])
260 free_cpumask_var(vp_dev->msix_affinity_masks[i]);
262 if (vp_dev->msix_enabled) {
263 /* Disable the vector used for configuration */
264 vp_dev->config_vector(vp_dev, VIRTIO_MSI_NO_VECTOR);
266 pci_free_irq_vectors(vp_dev->pci_dev);
267 vp_dev->msix_enabled = 0;
270 vp_dev->msix_vectors = 0;
271 vp_dev->msix_used_vectors = 0;
272 kfree(vp_dev->msix_names);
273 vp_dev->msix_names = NULL;
274 kfree(vp_dev->msix_affinity_masks);
275 vp_dev->msix_affinity_masks = NULL;
280 static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned nvqs,
281 struct virtqueue *vqs[], vq_callback_t *callbacks[],
282 const char * const names[], bool per_vq_vectors,
284 struct irq_affinity *desc)
286 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
288 int i, err, nvectors, allocated_vectors;
290 vp_dev->vqs = kcalloc(nvqs, sizeof(*vp_dev->vqs), GFP_KERNEL);
294 if (per_vq_vectors) {
295 /* Best option: one for change interrupt, one per vq. */
297 for (i = 0; i < nvqs; ++i)
301 /* Second best: one for change, shared for all vqs. */
305 err = vp_request_msix_vectors(vdev, nvectors, per_vq_vectors,
306 per_vq_vectors ? desc : NULL);
310 vp_dev->per_vq_vectors = per_vq_vectors;
311 allocated_vectors = vp_dev->msix_used_vectors;
312 for (i = 0; i < nvqs; ++i) {
319 msix_vec = VIRTIO_MSI_NO_VECTOR;
320 else if (vp_dev->per_vq_vectors)
321 msix_vec = allocated_vectors++;
323 msix_vec = VP_MSIX_VQ_VECTOR;
324 vqs[i] = vp_setup_vq(vdev, i, callbacks[i], names[i],
325 ctx ? ctx[i] : false,
327 if (IS_ERR(vqs[i])) {
328 err = PTR_ERR(vqs[i]);
332 if (!vp_dev->per_vq_vectors || msix_vec == VIRTIO_MSI_NO_VECTOR)
335 /* allocate per-vq irq if available and necessary */
336 snprintf(vp_dev->msix_names[msix_vec],
337 sizeof *vp_dev->msix_names,
339 dev_name(&vp_dev->vdev.dev), names[i]);
340 err = request_irq(pci_irq_vector(vp_dev->pci_dev, msix_vec),
342 vp_dev->msix_names[msix_vec],
354 static int vp_find_vqs_intx(struct virtio_device *vdev, unsigned nvqs,
355 struct virtqueue *vqs[], vq_callback_t *callbacks[],
356 const char * const names[], const bool *ctx)
358 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
361 vp_dev->vqs = kcalloc(nvqs, sizeof(*vp_dev->vqs), GFP_KERNEL);
365 err = request_irq(vp_dev->pci_dev->irq, vp_interrupt, IRQF_SHARED,
366 dev_name(&vdev->dev), vp_dev);
370 vp_dev->intx_enabled = 1;
371 vp_dev->per_vq_vectors = false;
372 for (i = 0; i < nvqs; ++i) {
377 vqs[i] = vp_setup_vq(vdev, i, callbacks[i], names[i],
378 ctx ? ctx[i] : false,
379 VIRTIO_MSI_NO_VECTOR);
380 if (IS_ERR(vqs[i])) {
381 err = PTR_ERR(vqs[i]);
392 /* the config->find_vqs() implementation */
393 int vp_find_vqs(struct virtio_device *vdev, unsigned nvqs,
394 struct virtqueue *vqs[], vq_callback_t *callbacks[],
395 const char * const names[], const bool *ctx,
396 struct irq_affinity *desc)
400 /* Try MSI-X with one vector per queue. */
401 err = vp_find_vqs_msix(vdev, nvqs, vqs, callbacks, names, true, ctx, desc);
404 /* Fallback: MSI-X with one vector for config, one shared for queues. */
405 err = vp_find_vqs_msix(vdev, nvqs, vqs, callbacks, names, false, ctx, desc);
408 /* Finally fall back to regular interrupts. */
409 return vp_find_vqs_intx(vdev, nvqs, vqs, callbacks, names, ctx);
412 const char *vp_bus_name(struct virtio_device *vdev)
414 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
416 return pci_name(vp_dev->pci_dev);
419 /* Setup the affinity for a virtqueue:
420 * - force the affinity for per vq vector
421 * - OR over all affinities for shared MSI
422 * - ignore the affinity request if we're using INTX
424 int vp_set_vq_affinity(struct virtqueue *vq, const struct cpumask *cpu_mask)
426 struct virtio_device *vdev = vq->vdev;
427 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
428 struct virtio_pci_vq_info *info = vp_dev->vqs[vq->index];
429 struct cpumask *mask;
435 if (vp_dev->msix_enabled) {
436 mask = vp_dev->msix_affinity_masks[info->msix_vector];
437 irq = pci_irq_vector(vp_dev->pci_dev, info->msix_vector);
439 irq_set_affinity_hint(irq, NULL);
441 cpumask_copy(mask, cpu_mask);
442 irq_set_affinity_hint(irq, mask);
448 const struct cpumask *vp_get_vq_affinity(struct virtio_device *vdev, int index)
450 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
452 if (!vp_dev->per_vq_vectors ||
453 vp_dev->vqs[index]->msix_vector == VIRTIO_MSI_NO_VECTOR)
456 return pci_irq_get_affinity(vp_dev->pci_dev,
457 vp_dev->vqs[index]->msix_vector);
460 #ifdef CONFIG_PM_SLEEP
461 static int virtio_pci_freeze(struct device *dev)
463 struct pci_dev *pci_dev = to_pci_dev(dev);
464 struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
467 ret = virtio_device_freeze(&vp_dev->vdev);
470 pci_disable_device(pci_dev);
474 static int virtio_pci_restore(struct device *dev)
476 struct pci_dev *pci_dev = to_pci_dev(dev);
477 struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
480 ret = pci_enable_device(pci_dev);
484 pci_set_master(pci_dev);
485 return virtio_device_restore(&vp_dev->vdev);
488 static const struct dev_pm_ops virtio_pci_pm_ops = {
489 SET_SYSTEM_SLEEP_PM_OPS(virtio_pci_freeze, virtio_pci_restore)
494 /* Qumranet donated their vendor ID for devices 0x1000 thru 0x10FF. */
495 static const struct pci_device_id virtio_pci_id_table[] = {
496 { PCI_DEVICE(PCI_VENDOR_ID_REDHAT_QUMRANET, PCI_ANY_ID) },
500 MODULE_DEVICE_TABLE(pci, virtio_pci_id_table);
502 static void virtio_pci_release_dev(struct device *_d)
504 struct virtio_device *vdev = dev_to_virtio(_d);
505 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
507 /* As struct device is a kobject, it's not safe to
508 * free the memory (including the reference counter itself)
509 * until it's release callback. */
513 static int virtio_pci_probe(struct pci_dev *pci_dev,
514 const struct pci_device_id *id)
516 struct virtio_pci_device *vp_dev, *reg_dev = NULL;
519 /* allocate our structure and fill it out */
520 vp_dev = kzalloc(sizeof(struct virtio_pci_device), GFP_KERNEL);
524 pci_set_drvdata(pci_dev, vp_dev);
525 vp_dev->vdev.dev.parent = &pci_dev->dev;
526 vp_dev->vdev.dev.release = virtio_pci_release_dev;
527 vp_dev->pci_dev = pci_dev;
528 INIT_LIST_HEAD(&vp_dev->virtqueues);
529 spin_lock_init(&vp_dev->lock);
531 /* enable the device */
532 rc = pci_enable_device(pci_dev);
534 goto err_enable_device;
537 rc = virtio_pci_legacy_probe(vp_dev);
538 /* Also try modern mode if we can't map BAR0 (no IO space). */
539 if (rc == -ENODEV || rc == -ENOMEM)
540 rc = virtio_pci_modern_probe(vp_dev);
544 rc = virtio_pci_modern_probe(vp_dev);
546 rc = virtio_pci_legacy_probe(vp_dev);
551 pci_set_master(pci_dev);
553 rc = register_virtio_device(&vp_dev->vdev);
562 virtio_pci_legacy_remove(vp_dev);
564 virtio_pci_modern_remove(vp_dev);
566 pci_disable_device(pci_dev);
569 put_device(&vp_dev->vdev.dev);
575 static void virtio_pci_remove(struct pci_dev *pci_dev)
577 struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
578 struct device *dev = get_device(&vp_dev->vdev.dev);
580 pci_disable_sriov(pci_dev);
582 unregister_virtio_device(&vp_dev->vdev);
585 virtio_pci_legacy_remove(vp_dev);
587 virtio_pci_modern_remove(vp_dev);
589 pci_disable_device(pci_dev);
593 static int virtio_pci_sriov_configure(struct pci_dev *pci_dev, int num_vfs)
595 struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
596 struct virtio_device *vdev = &vp_dev->vdev;
599 if (!(vdev->config->get_status(vdev) & VIRTIO_CONFIG_S_DRIVER_OK))
602 if (!__virtio_test_bit(vdev, VIRTIO_F_SR_IOV))
605 if (pci_vfs_assigned(pci_dev))
609 pci_disable_sriov(pci_dev);
613 ret = pci_enable_sriov(pci_dev, num_vfs);
620 static struct pci_driver virtio_pci_driver = {
621 .name = "virtio-pci",
622 .id_table = virtio_pci_id_table,
623 .probe = virtio_pci_probe,
624 .remove = virtio_pci_remove,
625 #ifdef CONFIG_PM_SLEEP
626 .driver.pm = &virtio_pci_pm_ops,
628 .sriov_configure = virtio_pci_sriov_configure,
631 module_pci_driver(virtio_pci_driver);
634 MODULE_DESCRIPTION("virtio-pci");
635 MODULE_LICENSE("GPL");