/**
* pci_enable_pri - Enable PRI capability
- * @ pdev: PCI device structure
+ * @pdev: PCI device structure
+ * @reqs: outstanding requests
*
* Returns 0 on success, negative value on error
*/
return pdev->pasid_required;
}
+
+/**
+ * pci_pri_supported - Check if PRI is supported.
+ * @pdev: PCI device structure
+ *
+ * Returns true if PRI capability is present, false otherwise.
+ */
+bool pci_pri_supported(struct pci_dev *pdev)
+{
+ /* VFs share the PF PRI */
+ if (pci_physfn(pdev)->pri_cap)
+ return true;
+ return false;
+}
+EXPORT_SYMBOL_GPL(pci_pri_supported);
#endif /* CONFIG_PCI_PRI */
#ifdef CONFIG_PCI_PASID
} else {
node = of_node_get(bus->self->dev.of_node);
if (node && of_property_read_bool(node, "external-facing"))
- bus->self->untrusted = true;
+ bus->self->external_facing = true;
}
bus->dev.of_node = node;
* @busno: bus number associated with the bridge root bus
* @bus_max: maximum number of buses for this bridge
* @resources: list where the range of resources will be added after DT parsing
+ * @ib_resources: list where the range of inbound resources (with addresses
+ * from 'dma-ranges') will be added after DT parsing
* @io_base: pointer to a variable that will contain on return the physical
* address for the start of the I/O range. Can be NULL if the caller doesn't
* expect I/O ranges to be present in the device tree.
EXPORT_SYMBOL_GPL(of_irq_parse_and_map_pci);
#endif /* CONFIG_OF_IRQ */
-int pci_parse_request_of_pci_ranges(struct device *dev,
- struct list_head *resources,
- struct list_head *ib_resources,
- struct resource **bus_range)
+static int pci_parse_request_of_pci_ranges(struct device *dev,
+ struct pci_host_bridge *bridge)
{
int err, res_valid = 0;
resource_size_t iobase;
struct resource_entry *win, *tmp;
- INIT_LIST_HEAD(resources);
- if (ib_resources)
- INIT_LIST_HEAD(ib_resources);
- err = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff, resources,
- ib_resources, &iobase);
+ INIT_LIST_HEAD(&bridge->windows);
+ INIT_LIST_HEAD(&bridge->dma_ranges);
+
+ err = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff, &bridge->windows,
+ &bridge->dma_ranges, &iobase);
if (err)
return err;
- err = devm_request_pci_bus_resources(dev, resources);
+ err = devm_request_pci_bus_resources(dev, &bridge->windows);
if (err)
- goto out_release_res;
+ return err;
- resource_list_for_each_entry_safe(win, tmp, resources) {
+ resource_list_for_each_entry_safe(win, tmp, &bridge->windows) {
struct resource *res = win->res;
switch (resource_type(res)) {
case IORESOURCE_MEM:
res_valid |= !(res->flags & IORESOURCE_PREFETCH);
break;
- case IORESOURCE_BUS:
- if (bus_range)
- *bus_range = res;
- break;
}
}
- if (res_valid)
+ if (!res_valid)
+ dev_warn(dev, "non-prefetchable memory resource required\n");
+
+ return 0;
+}
+
+int devm_of_pci_bridge_init(struct device *dev, struct pci_host_bridge *bridge)
+{
+ if (!dev->of_node)
return 0;
- dev_err(dev, "non-prefetchable memory resource required\n");
- err = -EINVAL;
+ bridge->swizzle_irq = pci_common_swizzle;
+ bridge->map_irq = of_irq_parse_and_map_pci;
- out_release_res:
- pci_free_resource_list(resources);
- return err;
+ return pci_parse_request_of_pci_ranges(dev, bridge);
}
-EXPORT_SYMBOL_GPL(pci_parse_request_of_pci_ranges);
#endif /* CONFIG_PCI */
* @dev: Device to which the resource belongs
* @res: Resource to be tracked
* @add_size: Additional size to be optionally added to the resource
+ * @min_align: Minimum memory window alignment
*/
static int add_to_list(struct list_head *head, struct pci_dev *dev,
struct resource *res, resource_size_t add_size,
tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
if (!tmp)
- panic("pdev_sort_resources(): kmalloc() failed!\n");
+ panic("%s: kzalloc() failed!\n", __func__);
tmp->res = r;
tmp->dev = dev;