6 * PCI Express I/O Virtualization (IOV) support.
8 * Address Translation Service 1.0
11 #include <linux/pci.h>
12 #include <linux/slab.h>
13 #include <linux/mutex.h>
14 #include <linux/export.h>
15 #include <linux/string.h>
16 #include <linux/delay.h>
17 #include <linux/pci-ats.h>
20 #define VIRTFN_ID_LEN 16
22 static inline u8 virtfn_bus(struct pci_dev *dev, int id)
24 return dev->bus->number + ((dev->devfn + dev->sriov->offset +
25 dev->sriov->stride * id) >> 8);
28 static inline u8 virtfn_devfn(struct pci_dev *dev, int id)
30 return (dev->devfn + dev->sriov->offset +
31 dev->sriov->stride * id) & 0xff;
34 static struct pci_bus *virtfn_add_bus(struct pci_bus *bus, int busnr)
36 struct pci_bus *child;
38 if (bus->number == busnr)
41 child = pci_find_bus(pci_domain_nr(bus), busnr);
45 child = pci_add_new_bus(bus, NULL, busnr);
49 pci_bus_insert_busn_res(child, busnr, busnr);
54 static void virtfn_remove_bus(struct pci_bus *physbus, struct pci_bus *virtbus)
56 if (physbus != virtbus && list_empty(&virtbus->devices))
57 pci_remove_bus(virtbus);
60 static int virtfn_add(struct pci_dev *dev, int id, int reset)
65 char buf[VIRTFN_ID_LEN];
66 struct pci_dev *virtfn;
68 struct pci_sriov *iov = dev->sriov;
71 mutex_lock(&iov->dev->sriov->lock);
72 bus = virtfn_add_bus(dev->bus, virtfn_bus(dev, id));
76 virtfn = pci_alloc_dev(bus);
80 virtfn->devfn = virtfn_devfn(dev, id);
81 virtfn->vendor = dev->vendor;
82 pci_read_config_word(dev, iov->pos + PCI_SRIOV_VF_DID, &virtfn->device);
83 pci_setup_device(virtfn);
84 virtfn->dev.parent = dev->dev.parent;
85 virtfn->physfn = pci_dev_get(dev);
86 virtfn->is_virtfn = 1;
88 for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
89 res = dev->resource + PCI_IOV_RESOURCES + i;
92 virtfn->resource[i].name = pci_name(virtfn);
93 virtfn->resource[i].flags = res->flags;
94 size = resource_size(res);
95 do_div(size, iov->total_VFs);
96 virtfn->resource[i].start = res->start + size * id;
97 virtfn->resource[i].end = virtfn->resource[i].start + size - 1;
98 rc = request_resource(res, &virtfn->resource[i]);
103 __pci_reset_function(virtfn);
105 pci_device_add(virtfn, virtfn->bus);
106 mutex_unlock(&iov->dev->sriov->lock);
108 rc = pci_bus_add_device(virtfn);
109 sprintf(buf, "virtfn%u", id);
110 rc = sysfs_create_link(&dev->dev.kobj, &virtfn->dev.kobj, buf);
113 rc = sysfs_create_link(&virtfn->dev.kobj, &dev->dev.kobj, "physfn");
117 kobject_uevent(&virtfn->dev.kobj, KOBJ_CHANGE);
122 sysfs_remove_link(&dev->dev.kobj, buf);
125 mutex_lock(&iov->dev->sriov->lock);
126 pci_stop_and_remove_bus_device(virtfn);
128 virtfn_remove_bus(dev->bus, bus);
130 mutex_unlock(&iov->dev->sriov->lock);
135 static void virtfn_remove(struct pci_dev *dev, int id, int reset)
137 char buf[VIRTFN_ID_LEN];
138 struct pci_dev *virtfn;
139 struct pci_sriov *iov = dev->sriov;
141 virtfn = pci_get_domain_bus_and_slot(pci_domain_nr(dev->bus),
143 virtfn_devfn(dev, id));
148 device_release_driver(&virtfn->dev);
149 __pci_reset_function(virtfn);
152 sprintf(buf, "virtfn%u", id);
153 sysfs_remove_link(&dev->dev.kobj, buf);
155 * pci_stop_dev() could have been called for this virtfn already,
156 * so the directory for the virtfn may have been removed before.
157 * Double check to avoid spurious sysfs warnings.
159 if (virtfn->dev.kobj.sd)
160 sysfs_remove_link(&virtfn->dev.kobj, "physfn");
162 mutex_lock(&iov->dev->sriov->lock);
163 pci_stop_and_remove_bus_device(virtfn);
164 virtfn_remove_bus(dev->bus, virtfn->bus);
165 mutex_unlock(&iov->dev->sriov->lock);
167 /* balance pci_get_domain_bus_and_slot() */
172 static int sriov_migration(struct pci_dev *dev)
175 struct pci_sriov *iov = dev->sriov;
180 if (!(iov->cap & PCI_SRIOV_CAP_VFM))
183 pci_read_config_word(dev, iov->pos + PCI_SRIOV_STATUS, &status);
184 if (!(status & PCI_SRIOV_STATUS_VFM))
187 schedule_work(&iov->mtask);
192 static void sriov_migration_task(struct work_struct *work)
197 struct pci_sriov *iov = container_of(work, struct pci_sriov, mtask);
199 for (i = iov->initial_VFs; i < iov->num_VFs; i++) {
200 state = readb(iov->mstate + i);
201 if (state == PCI_SRIOV_VFM_MI) {
202 writeb(PCI_SRIOV_VFM_AV, iov->mstate + i);
203 state = readb(iov->mstate + i);
204 if (state == PCI_SRIOV_VFM_AV)
205 virtfn_add(iov->self, i, 1);
206 } else if (state == PCI_SRIOV_VFM_MO) {
207 virtfn_remove(iov->self, i, 1);
208 writeb(PCI_SRIOV_VFM_UA, iov->mstate + i);
209 state = readb(iov->mstate + i);
210 if (state == PCI_SRIOV_VFM_AV)
211 virtfn_add(iov->self, i, 0);
215 pci_read_config_word(iov->self, iov->pos + PCI_SRIOV_STATUS, &status);
216 status &= ~PCI_SRIOV_STATUS_VFM;
217 pci_write_config_word(iov->self, iov->pos + PCI_SRIOV_STATUS, status);
220 static int sriov_enable_migration(struct pci_dev *dev, int nr_virtfn)
225 struct pci_sriov *iov = dev->sriov;
227 if (nr_virtfn <= iov->initial_VFs)
230 pci_read_config_dword(dev, iov->pos + PCI_SRIOV_VFM, &table);
231 bir = PCI_SRIOV_VFM_BIR(table);
232 if (bir > PCI_STD_RESOURCE_END)
235 table = PCI_SRIOV_VFM_OFFSET(table);
236 if (table + nr_virtfn > pci_resource_len(dev, bir))
239 pa = pci_resource_start(dev, bir) + table;
240 iov->mstate = ioremap(pa, nr_virtfn);
244 INIT_WORK(&iov->mtask, sriov_migration_task);
246 iov->ctrl |= PCI_SRIOV_CTRL_VFM | PCI_SRIOV_CTRL_INTR;
247 pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
252 static void sriov_disable_migration(struct pci_dev *dev)
254 struct pci_sriov *iov = dev->sriov;
256 iov->ctrl &= ~(PCI_SRIOV_CTRL_VFM | PCI_SRIOV_CTRL_INTR);
257 pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
259 cancel_work_sync(&iov->mtask);
260 iounmap(iov->mstate);
263 static int sriov_enable(struct pci_dev *dev, int nr_virtfn)
268 u16 offset, stride, initial;
269 struct resource *res;
270 struct pci_dev *pdev;
271 struct pci_sriov *iov = dev->sriov;
280 pci_read_config_word(dev, iov->pos + PCI_SRIOV_INITIAL_VF, &initial);
281 if (initial > iov->total_VFs ||
282 (!(iov->cap & PCI_SRIOV_CAP_VFM) && (initial != iov->total_VFs)))
285 if (nr_virtfn < 0 || nr_virtfn > iov->total_VFs ||
286 (!(iov->cap & PCI_SRIOV_CAP_VFM) && (nr_virtfn > initial)))
289 pci_read_config_word(dev, iov->pos + PCI_SRIOV_VF_OFFSET, &offset);
290 pci_read_config_word(dev, iov->pos + PCI_SRIOV_VF_STRIDE, &stride);
291 if (!offset || (nr_virtfn > 1 && !stride))
295 for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
296 bars |= (1 << (i + PCI_IOV_RESOURCES));
297 res = dev->resource + PCI_IOV_RESOURCES + i;
301 if (nres != iov->nres) {
302 dev_err(&dev->dev, "not enough MMIO resources for SR-IOV\n");
306 iov->offset = offset;
307 iov->stride = stride;
309 if (virtfn_bus(dev, nr_virtfn - 1) > dev->bus->busn_res.end) {
310 dev_err(&dev->dev, "SR-IOV: bus number out of range\n");
314 if (pci_enable_resources(dev, bars)) {
315 dev_err(&dev->dev, "SR-IOV: IOV BARS not allocated\n");
319 if (iov->link != dev->devfn) {
320 pdev = pci_get_slot(dev->bus, iov->link);
324 if (!pdev->is_physfn) {
329 rc = sysfs_create_link(&dev->dev.kobj,
330 &pdev->dev.kobj, "dep_link");
336 pci_write_config_word(dev, iov->pos + PCI_SRIOV_NUM_VF, nr_virtfn);
337 iov->ctrl |= PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE;
338 pci_cfg_access_lock(dev);
339 pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
341 pci_cfg_access_unlock(dev);
343 iov->initial_VFs = initial;
344 if (nr_virtfn < initial)
347 for (i = 0; i < initial; i++) {
348 rc = virtfn_add(dev, i, 0);
353 if (iov->cap & PCI_SRIOV_CAP_VFM) {
354 rc = sriov_enable_migration(dev, nr_virtfn);
359 kobject_uevent(&dev->dev.kobj, KOBJ_CHANGE);
360 iov->num_VFs = nr_virtfn;
365 for (j = 0; j < i; j++)
366 virtfn_remove(dev, j, 0);
368 iov->ctrl &= ~(PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE);
369 pci_cfg_access_lock(dev);
370 pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
371 pci_write_config_word(dev, iov->pos + PCI_SRIOV_NUM_VF, 0);
373 pci_cfg_access_unlock(dev);
375 if (iov->link != dev->devfn)
376 sysfs_remove_link(&dev->dev.kobj, "dep_link");
381 static void sriov_disable(struct pci_dev *dev)
384 struct pci_sriov *iov = dev->sriov;
389 if (iov->cap & PCI_SRIOV_CAP_VFM)
390 sriov_disable_migration(dev);
392 for (i = 0; i < iov->num_VFs; i++)
393 virtfn_remove(dev, i, 0);
395 iov->ctrl &= ~(PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE);
396 pci_cfg_access_lock(dev);
397 pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
399 pci_cfg_access_unlock(dev);
401 if (iov->link != dev->devfn)
402 sysfs_remove_link(&dev->dev.kobj, "dep_link");
405 pci_write_config_word(dev, iov->pos + PCI_SRIOV_NUM_VF, 0);
408 static int sriov_init(struct pci_dev *dev, int pos)
414 u16 ctrl, total, offset, stride;
415 struct pci_sriov *iov;
416 struct resource *res;
417 struct pci_dev *pdev;
419 if (pci_pcie_type(dev) != PCI_EXP_TYPE_RC_END &&
420 pci_pcie_type(dev) != PCI_EXP_TYPE_ENDPOINT)
423 pci_read_config_word(dev, pos + PCI_SRIOV_CTRL, &ctrl);
424 if (ctrl & PCI_SRIOV_CTRL_VFE) {
425 pci_write_config_word(dev, pos + PCI_SRIOV_CTRL, 0);
429 pci_read_config_word(dev, pos + PCI_SRIOV_TOTAL_VF, &total);
434 list_for_each_entry(pdev, &dev->bus->devices, bus_list)
439 if (pci_ari_enabled(dev->bus))
440 ctrl |= PCI_SRIOV_CTRL_ARI;
443 pci_write_config_word(dev, pos + PCI_SRIOV_CTRL, ctrl);
444 pci_read_config_word(dev, pos + PCI_SRIOV_VF_OFFSET, &offset);
445 pci_read_config_word(dev, pos + PCI_SRIOV_VF_STRIDE, &stride);
446 if (!offset || (total > 1 && !stride))
449 pci_read_config_dword(dev, pos + PCI_SRIOV_SUP_PGSIZE, &pgsz);
450 i = PAGE_SHIFT > 12 ? PAGE_SHIFT - 12 : 0;
451 pgsz &= ~((1 << i) - 1);
456 pci_write_config_dword(dev, pos + PCI_SRIOV_SYS_PGSIZE, pgsz);
459 for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
460 res = dev->resource + PCI_IOV_RESOURCES + i;
461 i += __pci_read_base(dev, pci_bar_unknown, res,
462 pos + PCI_SRIOV_BAR + i * 4);
465 if (resource_size(res) & (PAGE_SIZE - 1)) {
469 res->end = res->start + resource_size(res) * total - 1;
473 iov = kzalloc(sizeof(*iov), GFP_KERNEL);
482 iov->total_VFs = total;
483 iov->offset = offset;
484 iov->stride = stride;
487 pci_read_config_dword(dev, pos + PCI_SRIOV_CAP, &iov->cap);
488 pci_read_config_byte(dev, pos + PCI_SRIOV_FUNC_LINK, &iov->link);
489 if (pci_pcie_type(dev) == PCI_EXP_TYPE_RC_END)
490 iov->link = PCI_DEVFN(PCI_SLOT(dev->devfn), iov->link);
493 iov->dev = pci_dev_get(pdev);
497 mutex_init(&iov->lock);
505 for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
506 res = dev->resource + PCI_IOV_RESOURCES + i;
513 static void sriov_release(struct pci_dev *dev)
515 BUG_ON(dev->sriov->num_VFs);
517 if (dev != dev->sriov->dev)
518 pci_dev_put(dev->sriov->dev);
520 mutex_destroy(&dev->sriov->lock);
526 static void sriov_restore_state(struct pci_dev *dev)
530 struct pci_sriov *iov = dev->sriov;
532 pci_read_config_word(dev, iov->pos + PCI_SRIOV_CTRL, &ctrl);
533 if (ctrl & PCI_SRIOV_CTRL_VFE)
536 for (i = PCI_IOV_RESOURCES; i <= PCI_IOV_RESOURCE_END; i++)
537 pci_update_resource(dev, i);
539 pci_write_config_dword(dev, iov->pos + PCI_SRIOV_SYS_PGSIZE, iov->pgsz);
540 pci_write_config_word(dev, iov->pos + PCI_SRIOV_NUM_VF, iov->num_VFs);
541 pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
542 if (iov->ctrl & PCI_SRIOV_CTRL_VFE)
547 * pci_iov_init - initialize the IOV capability
548 * @dev: the PCI device
550 * Returns 0 on success, or negative on failure.
552 int pci_iov_init(struct pci_dev *dev)
556 if (!pci_is_pcie(dev))
559 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV);
561 return sriov_init(dev, pos);
567 * pci_iov_release - release resources used by the IOV capability
568 * @dev: the PCI device
570 void pci_iov_release(struct pci_dev *dev)
577 * pci_iov_resource_bar - get position of the SR-IOV BAR
578 * @dev: the PCI device
579 * @resno: the resource number
580 * @type: the BAR type to be filled in
582 * Returns position of the BAR encapsulated in the SR-IOV capability.
584 int pci_iov_resource_bar(struct pci_dev *dev, int resno,
585 enum pci_bar_type *type)
587 if (resno < PCI_IOV_RESOURCES || resno > PCI_IOV_RESOURCE_END)
590 BUG_ON(!dev->is_physfn);
592 *type = pci_bar_unknown;
594 return dev->sriov->pos + PCI_SRIOV_BAR +
595 4 * (resno - PCI_IOV_RESOURCES);
599 * pci_sriov_resource_alignment - get resource alignment for VF BAR
600 * @dev: the PCI device
601 * @resno: the resource number
603 * Returns the alignment of the VF BAR found in the SR-IOV capability.
604 * This is not the same as the resource size which is defined as
605 * the VF BAR size multiplied by the number of VFs. The alignment
606 * is just the VF BAR size.
608 resource_size_t pci_sriov_resource_alignment(struct pci_dev *dev, int resno)
611 enum pci_bar_type type;
612 int reg = pci_iov_resource_bar(dev, resno, &type);
617 __pci_read_base(dev, type, &tmp, reg);
618 return resource_alignment(&tmp);
622 * pci_restore_iov_state - restore the state of the IOV capability
623 * @dev: the PCI device
625 void pci_restore_iov_state(struct pci_dev *dev)
628 sriov_restore_state(dev);
632 * pci_iov_bus_range - find bus range used by Virtual Function
635 * Returns max number of buses (exclude current one) used by Virtual
638 int pci_iov_bus_range(struct pci_bus *bus)
644 list_for_each_entry(dev, &bus->devices, bus_list) {
647 busnr = virtfn_bus(dev, dev->sriov->total_VFs - 1);
652 return max ? max - bus->number : 0;
656 * pci_enable_sriov - enable the SR-IOV capability
657 * @dev: the PCI device
658 * @nr_virtfn: number of virtual functions to enable
660 * Returns 0 on success, or negative on failure.
662 int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn)
669 return sriov_enable(dev, nr_virtfn);
671 EXPORT_SYMBOL_GPL(pci_enable_sriov);
674 * pci_disable_sriov - disable the SR-IOV capability
675 * @dev: the PCI device
677 void pci_disable_sriov(struct pci_dev *dev)
686 EXPORT_SYMBOL_GPL(pci_disable_sriov);
689 * pci_sriov_migration - notify SR-IOV core of Virtual Function Migration
690 * @dev: the PCI device
692 * Returns IRQ_HANDLED if the IRQ is handled, or IRQ_NONE if not.
694 * Physical Function driver is responsible to register IRQ handler using
695 * VF Migration Interrupt Message Number, and call this function when the
696 * interrupt is generated by the hardware.
698 irqreturn_t pci_sriov_migration(struct pci_dev *dev)
703 return sriov_migration(dev) ? IRQ_HANDLED : IRQ_NONE;
705 EXPORT_SYMBOL_GPL(pci_sriov_migration);
708 * pci_num_vf - return number of VFs associated with a PF device_release_driver
709 * @dev: the PCI device
711 * Returns number of VFs, or 0 if SR-IOV is not enabled.
713 int pci_num_vf(struct pci_dev *dev)
718 return dev->sriov->num_VFs;
720 EXPORT_SYMBOL_GPL(pci_num_vf);
723 * pci_vfs_assigned - returns number of VFs are assigned to a guest
724 * @dev: the PCI device
726 * Returns number of VFs belonging to this device that are assigned to a guest.
727 * If device is not a physical function returns 0.
729 int pci_vfs_assigned(struct pci_dev *dev)
731 struct pci_dev *vfdev;
732 unsigned int vfs_assigned = 0;
733 unsigned short dev_id;
735 /* only search if we are a PF */
740 * determine the device ID for the VFs, the vendor ID will be the
741 * same as the PF so there is no need to check for that one
743 pci_read_config_word(dev, dev->sriov->pos + PCI_SRIOV_VF_DID, &dev_id);
745 /* loop through all the VFs to see if we own any that are assigned */
746 vfdev = pci_get_device(dev->vendor, dev_id, NULL);
749 * It is considered assigned if it is a virtual function with
750 * our dev as the physical function and the assigned bit is set
752 if (vfdev->is_virtfn && (vfdev->physfn == dev) &&
753 (vfdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED))
756 vfdev = pci_get_device(dev->vendor, dev_id, vfdev);
761 EXPORT_SYMBOL_GPL(pci_vfs_assigned);
764 * pci_sriov_set_totalvfs -- reduce the TotalVFs available
765 * @dev: the PCI PF device
766 * @numvfs: number that should be used for TotalVFs supported
768 * Should be called from PF driver's probe routine with
769 * device's mutex held.
771 * Returns 0 if PF is an SRIOV-capable device and
772 * value of numvfs valid. If not a PF return -ENOSYS;
773 * if numvfs is invalid return -EINVAL;
774 * if VFs already enabled, return -EBUSY.
776 int pci_sriov_set_totalvfs(struct pci_dev *dev, u16 numvfs)
780 if (numvfs > dev->sriov->total_VFs)
783 /* Shouldn't change if VFs already enabled */
784 if (dev->sriov->ctrl & PCI_SRIOV_CTRL_VFE)
787 dev->sriov->driver_max_VFs = numvfs;
791 EXPORT_SYMBOL_GPL(pci_sriov_set_totalvfs);
794 * pci_sriov_get_totalvfs -- get total VFs supported on this device
795 * @dev: the PCI PF device
797 * For a PCIe device with SRIOV support, return the PCIe
798 * SRIOV capability value of TotalVFs or the value of driver_max_VFs
799 * if the driver reduced it. Otherwise 0.
801 int pci_sriov_get_totalvfs(struct pci_dev *dev)
806 if (dev->sriov->driver_max_VFs)
807 return dev->sriov->driver_max_VFs;
809 return dev->sriov->total_VFs;
811 EXPORT_SYMBOL_GPL(pci_sriov_get_totalvfs);