]> Git Repo - J-linux.git/blob - drivers/pci/setup-bus.c
Merge tag 'vfs-6.13-rc7.fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/vfs/vfs
[J-linux.git] / drivers / pci / setup-bus.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Support routines for initializing a PCI subsystem
4  *
5  * Extruded from code written by
6  *      Dave Rusling ([email protected])
7  *      David Mosberger ([email protected])
8  *      David Miller ([email protected])
9  *
10  * Nov 2000, Ivan Kokshaysky <[email protected]>
11  *           PCI-PCI bridges cleanup, sorted resource allocation.
12  * Feb 2002, Ivan Kokshaysky <[email protected]>
13  *           Converted to allocation in 3 passes, which gives
14  *           tighter packing. Prefetchable range support.
15  */
16
17 #include <linux/bitops.h>
18 #include <linux/init.h>
19 #include <linux/kernel.h>
20 #include <linux/module.h>
21 #include <linux/pci.h>
22 #include <linux/errno.h>
23 #include <linux/ioport.h>
24 #include <linux/cache.h>
25 #include <linux/limits.h>
26 #include <linux/sizes.h>
27 #include <linux/slab.h>
28 #include <linux/acpi.h>
29 #include "pci.h"
30
31 unsigned int pci_flags;
32 EXPORT_SYMBOL_GPL(pci_flags);
33
34 struct pci_dev_resource {
35         struct list_head list;
36         struct resource *res;
37         struct pci_dev *dev;
38         resource_size_t start;
39         resource_size_t end;
40         resource_size_t add_size;
41         resource_size_t min_align;
42         unsigned long flags;
43 };
44
45 static void free_list(struct list_head *head)
46 {
47         struct pci_dev_resource *dev_res, *tmp;
48
49         list_for_each_entry_safe(dev_res, tmp, head, list) {
50                 list_del(&dev_res->list);
51                 kfree(dev_res);
52         }
53 }
54
55 /**
56  * add_to_list() - Add a new resource tracker to the list
57  * @head:       Head of the list
58  * @dev:        Device to which the resource belongs
59  * @res:        Resource to be tracked
60  * @add_size:   Additional size to be optionally added to the resource
61  * @min_align:  Minimum memory window alignment
62  */
63 static int add_to_list(struct list_head *head, struct pci_dev *dev,
64                        struct resource *res, resource_size_t add_size,
65                        resource_size_t min_align)
66 {
67         struct pci_dev_resource *tmp;
68
69         tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
70         if (!tmp)
71                 return -ENOMEM;
72
73         tmp->res = res;
74         tmp->dev = dev;
75         tmp->start = res->start;
76         tmp->end = res->end;
77         tmp->flags = res->flags;
78         tmp->add_size = add_size;
79         tmp->min_align = min_align;
80
81         list_add(&tmp->list, head);
82
83         return 0;
84 }
85
86 static void remove_from_list(struct list_head *head, struct resource *res)
87 {
88         struct pci_dev_resource *dev_res, *tmp;
89
90         list_for_each_entry_safe(dev_res, tmp, head, list) {
91                 if (dev_res->res == res) {
92                         list_del(&dev_res->list);
93                         kfree(dev_res);
94                         break;
95                 }
96         }
97 }
98
99 static struct pci_dev_resource *res_to_dev_res(struct list_head *head,
100                                                struct resource *res)
101 {
102         struct pci_dev_resource *dev_res;
103
104         list_for_each_entry(dev_res, head, list) {
105                 if (dev_res->res == res)
106                         return dev_res;
107         }
108
109         return NULL;
110 }
111
112 static resource_size_t get_res_add_size(struct list_head *head,
113                                         struct resource *res)
114 {
115         struct pci_dev_resource *dev_res;
116
117         dev_res = res_to_dev_res(head, res);
118         return dev_res ? dev_res->add_size : 0;
119 }
120
121 static resource_size_t get_res_add_align(struct list_head *head,
122                                          struct resource *res)
123 {
124         struct pci_dev_resource *dev_res;
125
126         dev_res = res_to_dev_res(head, res);
127         return dev_res ? dev_res->min_align : 0;
128 }
129
130 /* Sort resources by alignment */
131 static void pdev_sort_resources(struct pci_dev *dev, struct list_head *head)
132 {
133         struct resource *r;
134         int i;
135
136         pci_dev_for_each_resource(dev, r, i) {
137                 const char *r_name = pci_resource_name(dev, i);
138                 struct pci_dev_resource *dev_res, *tmp;
139                 resource_size_t r_align;
140                 struct list_head *n;
141
142                 if (r->flags & IORESOURCE_PCI_FIXED)
143                         continue;
144
145                 if (!(r->flags) || r->parent)
146                         continue;
147
148                 r_align = pci_resource_alignment(dev, r);
149                 if (!r_align) {
150                         pci_warn(dev, "%s %pR: alignment must not be zero\n",
151                                  r_name, r);
152                         continue;
153                 }
154
155                 tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
156                 if (!tmp)
157                         panic("%s: kzalloc() failed!\n", __func__);
158                 tmp->res = r;
159                 tmp->dev = dev;
160
161                 /* Fallback is smallest one or list is empty */
162                 n = head;
163                 list_for_each_entry(dev_res, head, list) {
164                         resource_size_t align;
165
166                         align = pci_resource_alignment(dev_res->dev,
167                                                          dev_res->res);
168
169                         if (r_align > align) {
170                                 n = &dev_res->list;
171                                 break;
172                         }
173                 }
174                 /* Insert it just before n */
175                 list_add_tail(&tmp->list, n);
176         }
177 }
178
179 static void __dev_sort_resources(struct pci_dev *dev, struct list_head *head)
180 {
181         u16 class = dev->class >> 8;
182
183         /* Don't touch classless devices or host bridges or IOAPICs */
184         if (class == PCI_CLASS_NOT_DEFINED || class == PCI_CLASS_BRIDGE_HOST)
185                 return;
186
187         /* Don't touch IOAPIC devices already enabled by firmware */
188         if (class == PCI_CLASS_SYSTEM_PIC) {
189                 u16 command;
190                 pci_read_config_word(dev, PCI_COMMAND, &command);
191                 if (command & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY))
192                         return;
193         }
194
195         pdev_sort_resources(dev, head);
196 }
197
198 static inline void reset_resource(struct resource *res)
199 {
200         res->start = 0;
201         res->end = 0;
202         res->flags = 0;
203 }
204
205 /**
206  * reassign_resources_sorted() - Satisfy any additional resource requests
207  *
208  * @realloc_head:       Head of the list tracking requests requiring
209  *                      additional resources
210  * @head:               Head of the list tracking requests with allocated
211  *                      resources
212  *
213  * Walk through each element of the realloc_head and try to procure additional
214  * resources for the element, provided the element is in the head list.
215  */
216 static void reassign_resources_sorted(struct list_head *realloc_head,
217                                       struct list_head *head)
218 {
219         struct resource *res;
220         const char *res_name;
221         struct pci_dev_resource *add_res, *tmp;
222         struct pci_dev_resource *dev_res;
223         resource_size_t add_size, align;
224         int idx;
225
226         list_for_each_entry_safe(add_res, tmp, realloc_head, list) {
227                 bool found_match = false;
228
229                 res = add_res->res;
230
231                 /* Skip resource that has been reset */
232                 if (!res->flags)
233                         goto out;
234
235                 /* Skip this resource if not found in head list */
236                 list_for_each_entry(dev_res, head, list) {
237                         if (dev_res->res == res) {
238                                 found_match = true;
239                                 break;
240                         }
241                 }
242                 if (!found_match) /* Just skip */
243                         continue;
244
245                 idx = res - &add_res->dev->resource[0];
246                 res_name = pci_resource_name(add_res->dev, idx);
247                 add_size = add_res->add_size;
248                 align = add_res->min_align;
249                 if (!resource_size(res)) {
250                         resource_set_range(res, align, add_size);
251                         if (pci_assign_resource(add_res->dev, idx))
252                                 reset_resource(res);
253                 } else {
254                         res->flags |= add_res->flags &
255                                  (IORESOURCE_STARTALIGN|IORESOURCE_SIZEALIGN);
256                         if (pci_reassign_resource(add_res->dev, idx,
257                                                   add_size, align))
258                                 pci_info(add_res->dev, "%s %pR: failed to add %llx\n",
259                                          res_name, res,
260                                          (unsigned long long) add_size);
261                 }
262 out:
263                 list_del(&add_res->list);
264                 kfree(add_res);
265         }
266 }
267
268 /**
269  * assign_requested_resources_sorted() - Satisfy resource requests
270  *
271  * @head:       Head of the list tracking requests for resources
272  * @fail_head:  Head of the list tracking requests that could not be
273  *              allocated
274  *
275  * Satisfy resource requests of each element in the list.  Add requests that
276  * could not be satisfied to the failed_list.
277  */
278 static void assign_requested_resources_sorted(struct list_head *head,
279                                  struct list_head *fail_head)
280 {
281         struct resource *res;
282         struct pci_dev_resource *dev_res;
283         int idx;
284
285         list_for_each_entry(dev_res, head, list) {
286                 res = dev_res->res;
287                 idx = res - &dev_res->dev->resource[0];
288                 if (resource_size(res) &&
289                     pci_assign_resource(dev_res->dev, idx)) {
290                         if (fail_head) {
291                                 /*
292                                  * If the failed resource is a ROM BAR and
293                                  * it will be enabled later, don't add it
294                                  * to the list.
295                                  */
296                                 if (!((idx == PCI_ROM_RESOURCE) &&
297                                       (!(res->flags & IORESOURCE_ROM_ENABLE))))
298                                         add_to_list(fail_head,
299                                                     dev_res->dev, res,
300                                                     0 /* don't care */,
301                                                     0 /* don't care */);
302                         }
303                         reset_resource(res);
304                 }
305         }
306 }
307
308 static unsigned long pci_fail_res_type_mask(struct list_head *fail_head)
309 {
310         struct pci_dev_resource *fail_res;
311         unsigned long mask = 0;
312
313         /* Check failed type */
314         list_for_each_entry(fail_res, fail_head, list)
315                 mask |= fail_res->flags;
316
317         /*
318          * One pref failed resource will set IORESOURCE_MEM, as we can
319          * allocate pref in non-pref range.  Will release all assigned
320          * non-pref sibling resources according to that bit.
321          */
322         return mask & (IORESOURCE_IO | IORESOURCE_MEM | IORESOURCE_PREFETCH);
323 }
324
325 static bool pci_need_to_release(unsigned long mask, struct resource *res)
326 {
327         if (res->flags & IORESOURCE_IO)
328                 return !!(mask & IORESOURCE_IO);
329
330         /* Check pref at first */
331         if (res->flags & IORESOURCE_PREFETCH) {
332                 if (mask & IORESOURCE_PREFETCH)
333                         return true;
334                 /* Count pref if its parent is non-pref */
335                 else if ((mask & IORESOURCE_MEM) &&
336                          !(res->parent->flags & IORESOURCE_PREFETCH))
337                         return true;
338                 else
339                         return false;
340         }
341
342         if (res->flags & IORESOURCE_MEM)
343                 return !!(mask & IORESOURCE_MEM);
344
345         return false;   /* Should not get here */
346 }
347
348 static void __assign_resources_sorted(struct list_head *head,
349                                       struct list_head *realloc_head,
350                                       struct list_head *fail_head)
351 {
352         /*
353          * Should not assign requested resources at first.  They could be
354          * adjacent, so later reassign can not reallocate them one by one in
355          * parent resource window.
356          *
357          * Try to assign requested + add_size at beginning.  If could do that,
358          * could get out early.  If could not do that, we still try to assign
359          * requested at first, then try to reassign add_size for some resources.
360          *
361          * Separate three resource type checking if we need to release
362          * assigned resource after requested + add_size try.
363          *
364          *      1. If IO port assignment fails, will release assigned IO
365          *         port.
366          *      2. If pref MMIO assignment fails, release assigned pref
367          *         MMIO.  If assigned pref MMIO's parent is non-pref MMIO
368          *         and non-pref MMIO assignment fails, will release that
369          *         assigned pref MMIO.
370          *      3. If non-pref MMIO assignment fails or pref MMIO
371          *         assignment fails, will release assigned non-pref MMIO.
372          */
373         LIST_HEAD(save_head);
374         LIST_HEAD(local_fail_head);
375         struct pci_dev_resource *save_res;
376         struct pci_dev_resource *dev_res, *tmp_res, *dev_res2;
377         unsigned long fail_type;
378         resource_size_t add_align, align;
379
380         /* Check if optional add_size is there */
381         if (!realloc_head || list_empty(realloc_head))
382                 goto requested_and_reassign;
383
384         /* Save original start, end, flags etc at first */
385         list_for_each_entry(dev_res, head, list) {
386                 if (add_to_list(&save_head, dev_res->dev, dev_res->res, 0, 0)) {
387                         free_list(&save_head);
388                         goto requested_and_reassign;
389                 }
390         }
391
392         /* Update res in head list with add_size in realloc_head list */
393         list_for_each_entry_safe(dev_res, tmp_res, head, list) {
394                 dev_res->res->end += get_res_add_size(realloc_head,
395                                                         dev_res->res);
396
397                 /*
398                  * There are two kinds of additional resources in the list:
399                  * 1. bridge resource  -- IORESOURCE_STARTALIGN
400                  * 2. SR-IOV resource  -- IORESOURCE_SIZEALIGN
401                  * Here just fix the additional alignment for bridge
402                  */
403                 if (!(dev_res->res->flags & IORESOURCE_STARTALIGN))
404                         continue;
405
406                 add_align = get_res_add_align(realloc_head, dev_res->res);
407
408                 /*
409                  * The "head" list is sorted by alignment so resources with
410                  * bigger alignment will be assigned first.  After we
411                  * change the alignment of a dev_res in "head" list, we
412                  * need to reorder the list by alignment to make it
413                  * consistent.
414                  */
415                 if (add_align > dev_res->res->start) {
416                         resource_size_t r_size = resource_size(dev_res->res);
417
418                         dev_res->res->start = add_align;
419                         dev_res->res->end = add_align + r_size - 1;
420
421                         list_for_each_entry(dev_res2, head, list) {
422                                 align = pci_resource_alignment(dev_res2->dev,
423                                                                dev_res2->res);
424                                 if (add_align > align) {
425                                         list_move_tail(&dev_res->list,
426                                                        &dev_res2->list);
427                                         break;
428                                 }
429                         }
430                 }
431
432         }
433
434         /* Try updated head list with add_size added */
435         assign_requested_resources_sorted(head, &local_fail_head);
436
437         /* All assigned with add_size? */
438         if (list_empty(&local_fail_head)) {
439                 /* Remove head list from realloc_head list */
440                 list_for_each_entry(dev_res, head, list)
441                         remove_from_list(realloc_head, dev_res->res);
442                 free_list(&save_head);
443                 free_list(head);
444                 return;
445         }
446
447         /* Check failed type */
448         fail_type = pci_fail_res_type_mask(&local_fail_head);
449         /* Remove not need to be released assigned res from head list etc */
450         list_for_each_entry_safe(dev_res, tmp_res, head, list)
451                 if (dev_res->res->parent &&
452                     !pci_need_to_release(fail_type, dev_res->res)) {
453                         /* Remove it from realloc_head list */
454                         remove_from_list(realloc_head, dev_res->res);
455                         remove_from_list(&save_head, dev_res->res);
456                         list_del(&dev_res->list);
457                         kfree(dev_res);
458                 }
459
460         free_list(&local_fail_head);
461         /* Release assigned resource */
462         list_for_each_entry(dev_res, head, list)
463                 if (dev_res->res->parent)
464                         release_resource(dev_res->res);
465         /* Restore start/end/flags from saved list */
466         list_for_each_entry(save_res, &save_head, list) {
467                 struct resource *res = save_res->res;
468
469                 res->start = save_res->start;
470                 res->end = save_res->end;
471                 res->flags = save_res->flags;
472         }
473         free_list(&save_head);
474
475 requested_and_reassign:
476         /* Satisfy the must-have resource requests */
477         assign_requested_resources_sorted(head, fail_head);
478
479         /* Try to satisfy any additional optional resource requests */
480         if (realloc_head)
481                 reassign_resources_sorted(realloc_head, head);
482         free_list(head);
483 }
484
485 static void pdev_assign_resources_sorted(struct pci_dev *dev,
486                                          struct list_head *add_head,
487                                          struct list_head *fail_head)
488 {
489         LIST_HEAD(head);
490
491         __dev_sort_resources(dev, &head);
492         __assign_resources_sorted(&head, add_head, fail_head);
493
494 }
495
496 static void pbus_assign_resources_sorted(const struct pci_bus *bus,
497                                          struct list_head *realloc_head,
498                                          struct list_head *fail_head)
499 {
500         struct pci_dev *dev;
501         LIST_HEAD(head);
502
503         list_for_each_entry(dev, &bus->devices, bus_list)
504                 __dev_sort_resources(dev, &head);
505
506         __assign_resources_sorted(&head, realloc_head, fail_head);
507 }
508
509 void pci_setup_cardbus(struct pci_bus *bus)
510 {
511         struct pci_dev *bridge = bus->self;
512         struct resource *res;
513         struct pci_bus_region region;
514
515         pci_info(bridge, "CardBus bridge to %pR\n",
516                  &bus->busn_res);
517
518         res = bus->resource[0];
519         pcibios_resource_to_bus(bridge->bus, &region, res);
520         if (res->flags & IORESOURCE_IO) {
521                 /*
522                  * The IO resource is allocated a range twice as large as it
523                  * would normally need.  This allows us to set both IO regs.
524                  */
525                 pci_info(bridge, "  bridge window %pR\n", res);
526                 pci_write_config_dword(bridge, PCI_CB_IO_BASE_0,
527                                         region.start);
528                 pci_write_config_dword(bridge, PCI_CB_IO_LIMIT_0,
529                                         region.end);
530         }
531
532         res = bus->resource[1];
533         pcibios_resource_to_bus(bridge->bus, &region, res);
534         if (res->flags & IORESOURCE_IO) {
535                 pci_info(bridge, "  bridge window %pR\n", res);
536                 pci_write_config_dword(bridge, PCI_CB_IO_BASE_1,
537                                         region.start);
538                 pci_write_config_dword(bridge, PCI_CB_IO_LIMIT_1,
539                                         region.end);
540         }
541
542         res = bus->resource[2];
543         pcibios_resource_to_bus(bridge->bus, &region, res);
544         if (res->flags & IORESOURCE_MEM) {
545                 pci_info(bridge, "  bridge window %pR\n", res);
546                 pci_write_config_dword(bridge, PCI_CB_MEMORY_BASE_0,
547                                         region.start);
548                 pci_write_config_dword(bridge, PCI_CB_MEMORY_LIMIT_0,
549                                         region.end);
550         }
551
552         res = bus->resource[3];
553         pcibios_resource_to_bus(bridge->bus, &region, res);
554         if (res->flags & IORESOURCE_MEM) {
555                 pci_info(bridge, "  bridge window %pR\n", res);
556                 pci_write_config_dword(bridge, PCI_CB_MEMORY_BASE_1,
557                                         region.start);
558                 pci_write_config_dword(bridge, PCI_CB_MEMORY_LIMIT_1,
559                                         region.end);
560         }
561 }
562 EXPORT_SYMBOL(pci_setup_cardbus);
563
564 /*
565  * Initialize bridges with base/limit values we have collected.  PCI-to-PCI
566  * Bridge Architecture Specification rev. 1.1 (1998) requires that if there
567  * are no I/O ports or memory behind the bridge, the corresponding range
568  * must be turned off by writing base value greater than limit to the
569  * bridge's base/limit registers.
570  *
571  * Note: care must be taken when updating I/O base/limit registers of
572  * bridges which support 32-bit I/O.  This update requires two config space
573  * writes, so it's quite possible that an I/O window of the bridge will
574  * have some undesirable address (e.g. 0) after the first write.  Ditto
575  * 64-bit prefetchable MMIO.
576  */
577 static void pci_setup_bridge_io(struct pci_dev *bridge)
578 {
579         struct resource *res;
580         const char *res_name;
581         struct pci_bus_region region;
582         unsigned long io_mask;
583         u8 io_base_lo, io_limit_lo;
584         u16 l;
585         u32 io_upper16;
586
587         io_mask = PCI_IO_RANGE_MASK;
588         if (bridge->io_window_1k)
589                 io_mask = PCI_IO_1K_RANGE_MASK;
590
591         /* Set up the top and bottom of the PCI I/O segment for this bus */
592         res = &bridge->resource[PCI_BRIDGE_IO_WINDOW];
593         res_name = pci_resource_name(bridge, PCI_BRIDGE_IO_WINDOW);
594         pcibios_resource_to_bus(bridge->bus, &region, res);
595         if (res->flags & IORESOURCE_IO) {
596                 pci_read_config_word(bridge, PCI_IO_BASE, &l);
597                 io_base_lo = (region.start >> 8) & io_mask;
598                 io_limit_lo = (region.end >> 8) & io_mask;
599                 l = ((u16) io_limit_lo << 8) | io_base_lo;
600                 /* Set up upper 16 bits of I/O base/limit */
601                 io_upper16 = (region.end & 0xffff0000) | (region.start >> 16);
602                 pci_info(bridge, "  %s %pR\n", res_name, res);
603         } else {
604                 /* Clear upper 16 bits of I/O base/limit */
605                 io_upper16 = 0;
606                 l = 0x00f0;
607         }
608         /* Temporarily disable the I/O range before updating PCI_IO_BASE */
609         pci_write_config_dword(bridge, PCI_IO_BASE_UPPER16, 0x0000ffff);
610         /* Update lower 16 bits of I/O base/limit */
611         pci_write_config_word(bridge, PCI_IO_BASE, l);
612         /* Update upper 16 bits of I/O base/limit */
613         pci_write_config_dword(bridge, PCI_IO_BASE_UPPER16, io_upper16);
614 }
615
616 static void pci_setup_bridge_mmio(struct pci_dev *bridge)
617 {
618         struct resource *res;
619         const char *res_name;
620         struct pci_bus_region region;
621         u32 l;
622
623         /* Set up the top and bottom of the PCI Memory segment for this bus */
624         res = &bridge->resource[PCI_BRIDGE_MEM_WINDOW];
625         res_name = pci_resource_name(bridge, PCI_BRIDGE_MEM_WINDOW);
626         pcibios_resource_to_bus(bridge->bus, &region, res);
627         if (res->flags & IORESOURCE_MEM) {
628                 l = (region.start >> 16) & 0xfff0;
629                 l |= region.end & 0xfff00000;
630                 pci_info(bridge, "  %s %pR\n", res_name, res);
631         } else {
632                 l = 0x0000fff0;
633         }
634         pci_write_config_dword(bridge, PCI_MEMORY_BASE, l);
635 }
636
637 static void pci_setup_bridge_mmio_pref(struct pci_dev *bridge)
638 {
639         struct resource *res;
640         const char *res_name;
641         struct pci_bus_region region;
642         u32 l, bu, lu;
643
644         /*
645          * Clear out the upper 32 bits of PREF limit.  If
646          * PCI_PREF_BASE_UPPER32 was non-zero, this temporarily disables
647          * PREF range, which is ok.
648          */
649         pci_write_config_dword(bridge, PCI_PREF_LIMIT_UPPER32, 0);
650
651         /* Set up PREF base/limit */
652         bu = lu = 0;
653         res = &bridge->resource[PCI_BRIDGE_PREF_MEM_WINDOW];
654         res_name = pci_resource_name(bridge, PCI_BRIDGE_PREF_MEM_WINDOW);
655         pcibios_resource_to_bus(bridge->bus, &region, res);
656         if (res->flags & IORESOURCE_PREFETCH) {
657                 l = (region.start >> 16) & 0xfff0;
658                 l |= region.end & 0xfff00000;
659                 if (res->flags & IORESOURCE_MEM_64) {
660                         bu = upper_32_bits(region.start);
661                         lu = upper_32_bits(region.end);
662                 }
663                 pci_info(bridge, "  %s %pR\n", res_name, res);
664         } else {
665                 l = 0x0000fff0;
666         }
667         pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE, l);
668
669         /* Set the upper 32 bits of PREF base & limit */
670         pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32, bu);
671         pci_write_config_dword(bridge, PCI_PREF_LIMIT_UPPER32, lu);
672 }
673
674 static void __pci_setup_bridge(struct pci_bus *bus, unsigned long type)
675 {
676         struct pci_dev *bridge = bus->self;
677
678         pci_info(bridge, "PCI bridge to %pR\n",
679                  &bus->busn_res);
680
681         if (type & IORESOURCE_IO)
682                 pci_setup_bridge_io(bridge);
683
684         if (type & IORESOURCE_MEM)
685                 pci_setup_bridge_mmio(bridge);
686
687         if (type & IORESOURCE_PREFETCH)
688                 pci_setup_bridge_mmio_pref(bridge);
689
690         pci_write_config_word(bridge, PCI_BRIDGE_CONTROL, bus->bridge_ctl);
691 }
692
693 void __weak pcibios_setup_bridge(struct pci_bus *bus, unsigned long type)
694 {
695 }
696
697 void pci_setup_bridge(struct pci_bus *bus)
698 {
699         unsigned long type = IORESOURCE_IO | IORESOURCE_MEM |
700                                   IORESOURCE_PREFETCH;
701
702         pcibios_setup_bridge(bus, type);
703         __pci_setup_bridge(bus, type);
704 }
705
706
707 int pci_claim_bridge_resource(struct pci_dev *bridge, int i)
708 {
709         if (i < PCI_BRIDGE_RESOURCES || i > PCI_BRIDGE_RESOURCE_END)
710                 return 0;
711
712         if (pci_claim_resource(bridge, i) == 0)
713                 return 0;       /* Claimed the window */
714
715         if ((bridge->class >> 8) != PCI_CLASS_BRIDGE_PCI)
716                 return 0;
717
718         if (!pci_bus_clip_resource(bridge, i))
719                 return -EINVAL; /* Clipping didn't change anything */
720
721         switch (i) {
722         case PCI_BRIDGE_IO_WINDOW:
723                 pci_setup_bridge_io(bridge);
724                 break;
725         case PCI_BRIDGE_MEM_WINDOW:
726                 pci_setup_bridge_mmio(bridge);
727                 break;
728         case PCI_BRIDGE_PREF_MEM_WINDOW:
729                 pci_setup_bridge_mmio_pref(bridge);
730                 break;
731         default:
732                 return -EINVAL;
733         }
734
735         if (pci_claim_resource(bridge, i) == 0)
736                 return 0;       /* Claimed a smaller window */
737
738         return -EINVAL;
739 }
740
741 /*
742  * Check whether the bridge supports optional I/O and prefetchable memory
743  * ranges.  If not, the respective base/limit registers must be read-only
744  * and read as 0.
745  */
746 static void pci_bridge_check_ranges(struct pci_bus *bus)
747 {
748         struct pci_dev *bridge = bus->self;
749         struct resource *b_res;
750
751         b_res = &bridge->resource[PCI_BRIDGE_MEM_WINDOW];
752         b_res->flags |= IORESOURCE_MEM;
753
754         if (bridge->io_window) {
755                 b_res = &bridge->resource[PCI_BRIDGE_IO_WINDOW];
756                 b_res->flags |= IORESOURCE_IO;
757         }
758
759         if (bridge->pref_window) {
760                 b_res = &bridge->resource[PCI_BRIDGE_PREF_MEM_WINDOW];
761                 b_res->flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH;
762                 if (bridge->pref_64_window) {
763                         b_res->flags |= IORESOURCE_MEM_64 |
764                                         PCI_PREF_RANGE_TYPE_64;
765                 }
766         }
767 }
768
769 /*
770  * Helper function for sizing routines.  Assigned resources have non-NULL
771  * parent resource.
772  *
773  * Return first unassigned resource of the correct type.  If there is none,
774  * return first assigned resource of the correct type.  If none of the
775  * above, return NULL.
776  *
777  * Returning an assigned resource of the correct type allows the caller to
778  * distinguish between already assigned and no resource of the correct type.
779  */
780 static struct resource *find_bus_resource_of_type(struct pci_bus *bus,
781                                                   unsigned long type_mask,
782                                                   unsigned long type)
783 {
784         struct resource *r, *r_assigned = NULL;
785
786         pci_bus_for_each_resource(bus, r) {
787                 if (r == &ioport_resource || r == &iomem_resource)
788                         continue;
789                 if (r && (r->flags & type_mask) == type && !r->parent)
790                         return r;
791                 if (r && (r->flags & type_mask) == type && !r_assigned)
792                         r_assigned = r;
793         }
794         return r_assigned;
795 }
796
797 static resource_size_t calculate_iosize(resource_size_t size,
798                                         resource_size_t min_size,
799                                         resource_size_t size1,
800                                         resource_size_t add_size,
801                                         resource_size_t children_add_size,
802                                         resource_size_t old_size,
803                                         resource_size_t align)
804 {
805         if (size < min_size)
806                 size = min_size;
807         if (old_size == 1)
808                 old_size = 0;
809         /*
810          * To be fixed in 2.5: we should have sort of HAVE_ISA flag in the
811          * struct pci_bus.
812          */
813 #if defined(CONFIG_ISA) || defined(CONFIG_EISA)
814         size = (size & 0xff) + ((size & ~0xffUL) << 2);
815 #endif
816         size = size + size1;
817         if (size < old_size)
818                 size = old_size;
819
820         size = ALIGN(max(size, add_size) + children_add_size, align);
821         return size;
822 }
823
824 static resource_size_t calculate_memsize(resource_size_t size,
825                                          resource_size_t min_size,
826                                          resource_size_t add_size,
827                                          resource_size_t children_add_size,
828                                          resource_size_t old_size,
829                                          resource_size_t align)
830 {
831         if (size < min_size)
832                 size = min_size;
833         if (old_size == 1)
834                 old_size = 0;
835
836         size = max(size, add_size) + children_add_size;
837         return ALIGN(max(size, old_size), align);
838 }
839
840 resource_size_t __weak pcibios_window_alignment(struct pci_bus *bus,
841                                                 unsigned long type)
842 {
843         return 1;
844 }
845
846 #define PCI_P2P_DEFAULT_MEM_ALIGN       0x100000        /* 1MiB */
847 #define PCI_P2P_DEFAULT_IO_ALIGN        0x1000          /* 4KiB */
848 #define PCI_P2P_DEFAULT_IO_ALIGN_1K     0x400           /* 1KiB */
849
850 static resource_size_t window_alignment(struct pci_bus *bus, unsigned long type)
851 {
852         resource_size_t align = 1, arch_align;
853
854         if (type & IORESOURCE_MEM)
855                 align = PCI_P2P_DEFAULT_MEM_ALIGN;
856         else if (type & IORESOURCE_IO) {
857                 /*
858                  * Per spec, I/O windows are 4K-aligned, but some bridges have
859                  * an extension to support 1K alignment.
860                  */
861                 if (bus->self && bus->self->io_window_1k)
862                         align = PCI_P2P_DEFAULT_IO_ALIGN_1K;
863                 else
864                         align = PCI_P2P_DEFAULT_IO_ALIGN;
865         }
866
867         arch_align = pcibios_window_alignment(bus, type);
868         return max(align, arch_align);
869 }
870
871 /**
872  * pbus_size_io() - Size the I/O window of a given bus
873  *
874  * @bus:                The bus
875  * @min_size:           The minimum I/O window that must be allocated
876  * @add_size:           Additional optional I/O window
877  * @realloc_head:       Track the additional I/O window on this list
878  *
879  * Sizing the I/O windows of the PCI-PCI bridge is trivial, since these
880  * windows have 1K or 4K granularity and the I/O ranges of non-bridge PCI
881  * devices are limited to 256 bytes.  We must be careful with the ISA
882  * aliasing though.
883  */
884 static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size,
885                          resource_size_t add_size,
886                          struct list_head *realloc_head)
887 {
888         struct pci_dev *dev;
889         struct resource *b_res = find_bus_resource_of_type(bus, IORESOURCE_IO,
890                                                            IORESOURCE_IO);
891         resource_size_t size = 0, size0 = 0, size1 = 0;
892         resource_size_t children_add_size = 0;
893         resource_size_t min_align, align;
894
895         if (!b_res)
896                 return;
897
898         /* If resource is already assigned, nothing more to do */
899         if (b_res->parent)
900                 return;
901
902         min_align = window_alignment(bus, IORESOURCE_IO);
903         list_for_each_entry(dev, &bus->devices, bus_list) {
904                 struct resource *r;
905
906                 pci_dev_for_each_resource(dev, r) {
907                         unsigned long r_size;
908
909                         if (r->parent || !(r->flags & IORESOURCE_IO))
910                                 continue;
911                         r_size = resource_size(r);
912
913                         if (r_size < 0x400)
914                                 /* Might be re-aligned for ISA */
915                                 size += r_size;
916                         else
917                                 size1 += r_size;
918
919                         align = pci_resource_alignment(dev, r);
920                         if (align > min_align)
921                                 min_align = align;
922
923                         if (realloc_head)
924                                 children_add_size += get_res_add_size(realloc_head, r);
925                 }
926         }
927
928         size0 = calculate_iosize(size, min_size, size1, 0, 0,
929                         resource_size(b_res), min_align);
930         size1 = (!realloc_head || (realloc_head && !add_size && !children_add_size)) ? size0 :
931                 calculate_iosize(size, min_size, size1, add_size, children_add_size,
932                         resource_size(b_res), min_align);
933         if (!size0 && !size1) {
934                 if (bus->self && (b_res->start || b_res->end))
935                         pci_info(bus->self, "disabling bridge window %pR to %pR (unused)\n",
936                                  b_res, &bus->busn_res);
937                 b_res->flags = 0;
938                 return;
939         }
940
941         resource_set_range(b_res, min_align, size0);
942         b_res->flags |= IORESOURCE_STARTALIGN;
943         if (bus->self && size1 > size0 && realloc_head) {
944                 add_to_list(realloc_head, bus->self, b_res, size1-size0,
945                             min_align);
946                 pci_info(bus->self, "bridge window %pR to %pR add_size %llx\n",
947                          b_res, &bus->busn_res,
948                          (unsigned long long) size1 - size0);
949         }
950 }
951
952 static inline resource_size_t calculate_mem_align(resource_size_t *aligns,
953                                                   int max_order)
954 {
955         resource_size_t align = 0;
956         resource_size_t min_align = 0;
957         int order;
958
959         for (order = 0; order <= max_order; order++) {
960                 resource_size_t align1 = 1;
961
962                 align1 <<= order + __ffs(SZ_1M);
963
964                 if (!align)
965                         min_align = align1;
966                 else if (ALIGN(align + min_align, min_align) < align1)
967                         min_align = align1 >> 1;
968                 align += aligns[order];
969         }
970
971         return min_align;
972 }
973
974 /**
975  * pbus_upstream_space_available - Check no upstream resource limits allocation
976  * @bus:        The bus
977  * @mask:       Mask the resource flag, then compare it with type
978  * @type:       The type of resource from bridge
979  * @size:       The size required from the bridge window
980  * @align:      Required alignment for the resource
981  *
982  * Checks that @size can fit inside the upstream bridge resources that are
983  * already assigned.
984  *
985  * Return: %true if enough space is available on all assigned upstream
986  * resources.
987  */
988 static bool pbus_upstream_space_available(struct pci_bus *bus, unsigned long mask,
989                                           unsigned long type, resource_size_t size,
990                                           resource_size_t align)
991 {
992         struct resource_constraint constraint = {
993                 .max = RESOURCE_SIZE_MAX,
994                 .align = align,
995         };
996         struct pci_bus *downstream = bus;
997         struct resource *r;
998
999         while ((bus = bus->parent)) {
1000                 if (pci_is_root_bus(bus))
1001                         break;
1002
1003                 pci_bus_for_each_resource(bus, r) {
1004                         if (!r || !r->parent || (r->flags & mask) != type)
1005                                 continue;
1006
1007                         if (resource_size(r) >= size) {
1008                                 struct resource gap = {};
1009
1010                                 if (find_resource_space(r, &gap, size, &constraint) == 0) {
1011                                         gap.flags = type;
1012                                         pci_dbg(bus->self,
1013                                                 "Assigned bridge window %pR to %pR free space at %pR\n",
1014                                                 r, &bus->busn_res, &gap);
1015                                         return true;
1016                                 }
1017                         }
1018
1019                         if (bus->self) {
1020                                 pci_info(bus->self,
1021                                          "Assigned bridge window %pR to %pR cannot fit 0x%llx required for %s bridging to %pR\n",
1022                                          r, &bus->busn_res,
1023                                          (unsigned long long)size,
1024                                          pci_name(downstream->self),
1025                                          &downstream->busn_res);
1026                         }
1027
1028                         return false;
1029                 }
1030         }
1031
1032         return true;
1033 }
1034
1035 /**
1036  * pbus_size_mem() - Size the memory window of a given bus
1037  *
1038  * @bus:                The bus
1039  * @mask:               Mask the resource flag, then compare it with type
1040  * @type:               The type of free resource from bridge
1041  * @type2:              Second match type
1042  * @type3:              Third match type
1043  * @min_size:           The minimum memory window that must be allocated
1044  * @add_size:           Additional optional memory window
1045  * @realloc_head:       Track the additional memory window on this list
1046  *
1047  * Calculate the size of the bus and minimal alignment which guarantees
1048  * that all child resources fit in this size.
1049  *
1050  * Return -ENOSPC if there's no available bus resource of the desired
1051  * type.  Otherwise, set the bus resource start/end to indicate the
1052  * required size, add things to realloc_head (if supplied), and return 0.
1053  */
1054 static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
1055                          unsigned long type, unsigned long type2,
1056                          unsigned long type3, resource_size_t min_size,
1057                          resource_size_t add_size,
1058                          struct list_head *realloc_head)
1059 {
1060         struct pci_dev *dev;
1061         resource_size_t min_align, win_align, align, size, size0, size1;
1062         resource_size_t aligns[24]; /* Alignments from 1MB to 8TB */
1063         int order, max_order;
1064         struct resource *b_res = find_bus_resource_of_type(bus,
1065                                         mask | IORESOURCE_PREFETCH, type);
1066         resource_size_t children_add_size = 0;
1067         resource_size_t children_add_align = 0;
1068         resource_size_t add_align = 0;
1069
1070         if (!b_res)
1071                 return -ENOSPC;
1072
1073         /* If resource is already assigned, nothing more to do */
1074         if (b_res->parent)
1075                 return 0;
1076
1077         memset(aligns, 0, sizeof(aligns));
1078         max_order = 0;
1079         size = 0;
1080
1081         list_for_each_entry(dev, &bus->devices, bus_list) {
1082                 struct resource *r;
1083                 int i;
1084
1085                 pci_dev_for_each_resource(dev, r, i) {
1086                         const char *r_name = pci_resource_name(dev, i);
1087                         resource_size_t r_size;
1088
1089                         if (r->parent || (r->flags & IORESOURCE_PCI_FIXED) ||
1090                             ((r->flags & mask) != type &&
1091                              (r->flags & mask) != type2 &&
1092                              (r->flags & mask) != type3))
1093                                 continue;
1094                         r_size = resource_size(r);
1095 #ifdef CONFIG_PCI_IOV
1096                         /* Put SRIOV requested res to the optional list */
1097                         if (realloc_head && i >= PCI_IOV_RESOURCES &&
1098                                         i <= PCI_IOV_RESOURCE_END) {
1099                                 add_align = max(pci_resource_alignment(dev, r), add_align);
1100                                 r->end = r->start - 1;
1101                                 add_to_list(realloc_head, dev, r, r_size, 0 /* Don't care */);
1102                                 children_add_size += r_size;
1103                                 continue;
1104                         }
1105 #endif
1106                         /*
1107                          * aligns[0] is for 1MB (since bridge memory
1108                          * windows are always at least 1MB aligned), so
1109                          * keep "order" from being negative for smaller
1110                          * resources.
1111                          */
1112                         align = pci_resource_alignment(dev, r);
1113                         order = __ffs(align) - __ffs(SZ_1M);
1114                         if (order < 0)
1115                                 order = 0;
1116                         if (order >= ARRAY_SIZE(aligns)) {
1117                                 pci_warn(dev, "%s %pR: disabling; bad alignment %#llx\n",
1118                                          r_name, r, (unsigned long long) align);
1119                                 r->flags = 0;
1120                                 continue;
1121                         }
1122                         size += max(r_size, align);
1123                         /*
1124                          * Exclude ranges with size > align from calculation of
1125                          * the alignment.
1126                          */
1127                         if (r_size <= align)
1128                                 aligns[order] += align;
1129                         if (order > max_order)
1130                                 max_order = order;
1131
1132                         if (realloc_head) {
1133                                 children_add_size += get_res_add_size(realloc_head, r);
1134                                 children_add_align = get_res_add_align(realloc_head, r);
1135                                 add_align = max(add_align, children_add_align);
1136                         }
1137                 }
1138         }
1139
1140         win_align = window_alignment(bus, b_res->flags);
1141         min_align = calculate_mem_align(aligns, max_order);
1142         min_align = max(min_align, win_align);
1143         size0 = calculate_memsize(size, min_size, 0, 0, resource_size(b_res), min_align);
1144         add_align = max(min_align, add_align);
1145
1146         if (bus->self && size0 &&
1147             !pbus_upstream_space_available(bus, mask | IORESOURCE_PREFETCH, type,
1148                                            size0, add_align)) {
1149                 min_align = 1ULL << (max_order + __ffs(SZ_1M));
1150                 min_align = max(min_align, win_align);
1151                 size0 = calculate_memsize(size, min_size, 0, 0, resource_size(b_res), win_align);
1152                 add_align = win_align;
1153                 pci_info(bus->self, "bridge window %pR to %pR requires relaxed alignment rules\n",
1154                          b_res, &bus->busn_res);
1155         }
1156
1157         size1 = (!realloc_head || (realloc_head && !add_size && !children_add_size)) ? size0 :
1158                 calculate_memsize(size, min_size, add_size, children_add_size,
1159                                 resource_size(b_res), add_align);
1160         if (!size0 && !size1) {
1161                 if (bus->self && (b_res->start || b_res->end))
1162                         pci_info(bus->self, "disabling bridge window %pR to %pR (unused)\n",
1163                                  b_res, &bus->busn_res);
1164                 b_res->flags = 0;
1165                 return 0;
1166         }
1167         b_res->start = min_align;
1168         b_res->end = size0 + min_align - 1;
1169         b_res->flags |= IORESOURCE_STARTALIGN;
1170         if (bus->self && size1 > size0 && realloc_head) {
1171                 add_to_list(realloc_head, bus->self, b_res, size1-size0, add_align);
1172                 pci_info(bus->self, "bridge window %pR to %pR add_size %llx add_align %llx\n",
1173                            b_res, &bus->busn_res,
1174                            (unsigned long long) (size1 - size0),
1175                            (unsigned long long) add_align);
1176         }
1177         return 0;
1178 }
1179
1180 unsigned long pci_cardbus_resource_alignment(struct resource *res)
1181 {
1182         if (res->flags & IORESOURCE_IO)
1183                 return pci_cardbus_io_size;
1184         if (res->flags & IORESOURCE_MEM)
1185                 return pci_cardbus_mem_size;
1186         return 0;
1187 }
1188
1189 static void pci_bus_size_cardbus(struct pci_bus *bus,
1190                                  struct list_head *realloc_head)
1191 {
1192         struct pci_dev *bridge = bus->self;
1193         struct resource *b_res;
1194         resource_size_t b_res_3_size = pci_cardbus_mem_size * 2;
1195         u16 ctrl;
1196
1197         b_res = &bridge->resource[PCI_CB_BRIDGE_IO_0_WINDOW];
1198         if (b_res->parent)
1199                 goto handle_b_res_1;
1200         /*
1201          * Reserve some resources for CardBus.  We reserve a fixed amount
1202          * of bus space for CardBus bridges.
1203          */
1204         resource_set_range(b_res, pci_cardbus_io_size, pci_cardbus_io_size);
1205         b_res->flags |= IORESOURCE_IO | IORESOURCE_STARTALIGN;
1206         if (realloc_head) {
1207                 b_res->end -= pci_cardbus_io_size;
1208                 add_to_list(realloc_head, bridge, b_res, pci_cardbus_io_size,
1209                             pci_cardbus_io_size);
1210         }
1211
1212 handle_b_res_1:
1213         b_res = &bridge->resource[PCI_CB_BRIDGE_IO_1_WINDOW];
1214         if (b_res->parent)
1215                 goto handle_b_res_2;
1216         resource_set_range(b_res, pci_cardbus_io_size, pci_cardbus_io_size);
1217         b_res->flags |= IORESOURCE_IO | IORESOURCE_STARTALIGN;
1218         if (realloc_head) {
1219                 b_res->end -= pci_cardbus_io_size;
1220                 add_to_list(realloc_head, bridge, b_res, pci_cardbus_io_size,
1221                             pci_cardbus_io_size);
1222         }
1223
1224 handle_b_res_2:
1225         /* MEM1 must not be pref MMIO */
1226         pci_read_config_word(bridge, PCI_CB_BRIDGE_CONTROL, &ctrl);
1227         if (ctrl & PCI_CB_BRIDGE_CTL_PREFETCH_MEM1) {
1228                 ctrl &= ~PCI_CB_BRIDGE_CTL_PREFETCH_MEM1;
1229                 pci_write_config_word(bridge, PCI_CB_BRIDGE_CONTROL, ctrl);
1230                 pci_read_config_word(bridge, PCI_CB_BRIDGE_CONTROL, &ctrl);
1231         }
1232
1233         /* Check whether prefetchable memory is supported by this bridge. */
1234         pci_read_config_word(bridge, PCI_CB_BRIDGE_CONTROL, &ctrl);
1235         if (!(ctrl & PCI_CB_BRIDGE_CTL_PREFETCH_MEM0)) {
1236                 ctrl |= PCI_CB_BRIDGE_CTL_PREFETCH_MEM0;
1237                 pci_write_config_word(bridge, PCI_CB_BRIDGE_CONTROL, ctrl);
1238                 pci_read_config_word(bridge, PCI_CB_BRIDGE_CONTROL, &ctrl);
1239         }
1240
1241         b_res = &bridge->resource[PCI_CB_BRIDGE_MEM_0_WINDOW];
1242         if (b_res->parent)
1243                 goto handle_b_res_3;
1244         /*
1245          * If we have prefetchable memory support, allocate two regions.
1246          * Otherwise, allocate one region of twice the size.
1247          */
1248         if (ctrl & PCI_CB_BRIDGE_CTL_PREFETCH_MEM0) {
1249                 resource_set_range(b_res, pci_cardbus_mem_size,
1250                                    pci_cardbus_mem_size);
1251                 b_res->flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH |
1252                                     IORESOURCE_STARTALIGN;
1253                 if (realloc_head) {
1254                         b_res->end -= pci_cardbus_mem_size;
1255                         add_to_list(realloc_head, bridge, b_res,
1256                                     pci_cardbus_mem_size, pci_cardbus_mem_size);
1257                 }
1258
1259                 /* Reduce that to half */
1260                 b_res_3_size = pci_cardbus_mem_size;
1261         }
1262
1263 handle_b_res_3:
1264         b_res = &bridge->resource[PCI_CB_BRIDGE_MEM_1_WINDOW];
1265         if (b_res->parent)
1266                 goto handle_done;
1267         resource_set_range(b_res, pci_cardbus_mem_size, b_res_3_size);
1268         b_res->flags |= IORESOURCE_MEM | IORESOURCE_STARTALIGN;
1269         if (realloc_head) {
1270                 b_res->end -= b_res_3_size;
1271                 add_to_list(realloc_head, bridge, b_res, b_res_3_size,
1272                             pci_cardbus_mem_size);
1273         }
1274
1275 handle_done:
1276         ;
1277 }
1278
1279 void __pci_bus_size_bridges(struct pci_bus *bus, struct list_head *realloc_head)
1280 {
1281         struct pci_dev *dev;
1282         unsigned long mask, prefmask, type2 = 0, type3 = 0;
1283         resource_size_t additional_io_size = 0, additional_mmio_size = 0,
1284                         additional_mmio_pref_size = 0;
1285         struct resource *pref;
1286         struct pci_host_bridge *host;
1287         int hdr_type, ret;
1288
1289         list_for_each_entry(dev, &bus->devices, bus_list) {
1290                 struct pci_bus *b = dev->subordinate;
1291                 if (!b)
1292                         continue;
1293
1294                 switch (dev->hdr_type) {
1295                 case PCI_HEADER_TYPE_CARDBUS:
1296                         pci_bus_size_cardbus(b, realloc_head);
1297                         break;
1298
1299                 case PCI_HEADER_TYPE_BRIDGE:
1300                 default:
1301                         __pci_bus_size_bridges(b, realloc_head);
1302                         break;
1303                 }
1304         }
1305
1306         /* The root bus? */
1307         if (pci_is_root_bus(bus)) {
1308                 host = to_pci_host_bridge(bus->bridge);
1309                 if (!host->size_windows)
1310                         return;
1311                 pci_bus_for_each_resource(bus, pref)
1312                         if (pref && (pref->flags & IORESOURCE_PREFETCH))
1313                                 break;
1314                 hdr_type = -1;  /* Intentionally invalid - not a PCI device. */
1315         } else {
1316                 pref = &bus->self->resource[PCI_BRIDGE_PREF_MEM_WINDOW];
1317                 hdr_type = bus->self->hdr_type;
1318         }
1319
1320         switch (hdr_type) {
1321         case PCI_HEADER_TYPE_CARDBUS:
1322                 /* Don't size CardBuses yet */
1323                 break;
1324
1325         case PCI_HEADER_TYPE_BRIDGE:
1326                 pci_bridge_check_ranges(bus);
1327                 if (bus->self->is_hotplug_bridge) {
1328                         additional_io_size  = pci_hotplug_io_size;
1329                         additional_mmio_size = pci_hotplug_mmio_size;
1330                         additional_mmio_pref_size = pci_hotplug_mmio_pref_size;
1331                 }
1332                 fallthrough;
1333         default:
1334                 pbus_size_io(bus, realloc_head ? 0 : additional_io_size,
1335                              additional_io_size, realloc_head);
1336
1337                 /*
1338                  * If there's a 64-bit prefetchable MMIO window, compute
1339                  * the size required to put all 64-bit prefetchable
1340                  * resources in it.
1341                  */
1342                 mask = IORESOURCE_MEM;
1343                 prefmask = IORESOURCE_MEM | IORESOURCE_PREFETCH;
1344                 if (pref && (pref->flags & IORESOURCE_MEM_64)) {
1345                         prefmask |= IORESOURCE_MEM_64;
1346                         ret = pbus_size_mem(bus, prefmask, prefmask,
1347                                 prefmask, prefmask,
1348                                 realloc_head ? 0 : additional_mmio_pref_size,
1349                                 additional_mmio_pref_size, realloc_head);
1350
1351                         /*
1352                          * If successful, all non-prefetchable resources
1353                          * and any 32-bit prefetchable resources will go in
1354                          * the non-prefetchable window.
1355                          */
1356                         if (ret == 0) {
1357                                 mask = prefmask;
1358                                 type2 = prefmask & ~IORESOURCE_MEM_64;
1359                                 type3 = prefmask & ~IORESOURCE_PREFETCH;
1360                         }
1361                 }
1362
1363                 /*
1364                  * If there is no 64-bit prefetchable window, compute the
1365                  * size required to put all prefetchable resources in the
1366                  * 32-bit prefetchable window (if there is one).
1367                  */
1368                 if (!type2) {
1369                         prefmask &= ~IORESOURCE_MEM_64;
1370                         ret = pbus_size_mem(bus, prefmask, prefmask,
1371                                 prefmask, prefmask,
1372                                 realloc_head ? 0 : additional_mmio_pref_size,
1373                                 additional_mmio_pref_size, realloc_head);
1374
1375                         /*
1376                          * If successful, only non-prefetchable resources
1377                          * will go in the non-prefetchable window.
1378                          */
1379                         if (ret == 0)
1380                                 mask = prefmask;
1381                         else
1382                                 additional_mmio_size += additional_mmio_pref_size;
1383
1384                         type2 = type3 = IORESOURCE_MEM;
1385                 }
1386
1387                 /*
1388                  * Compute the size required to put everything else in the
1389                  * non-prefetchable window. This includes:
1390                  *
1391                  *   - all non-prefetchable resources
1392                  *   - 32-bit prefetchable resources if there's a 64-bit
1393                  *     prefetchable window or no prefetchable window at all
1394                  *   - 64-bit prefetchable resources if there's no prefetchable
1395                  *     window at all
1396                  *
1397                  * Note that the strategy in __pci_assign_resource() must match
1398                  * that used here. Specifically, we cannot put a 32-bit
1399                  * prefetchable resource in a 64-bit prefetchable window.
1400                  */
1401                 pbus_size_mem(bus, mask, IORESOURCE_MEM, type2, type3,
1402                               realloc_head ? 0 : additional_mmio_size,
1403                               additional_mmio_size, realloc_head);
1404                 break;
1405         }
1406 }
1407
1408 void pci_bus_size_bridges(struct pci_bus *bus)
1409 {
1410         __pci_bus_size_bridges(bus, NULL);
1411 }
1412 EXPORT_SYMBOL(pci_bus_size_bridges);
1413
1414 static void assign_fixed_resource_on_bus(struct pci_bus *b, struct resource *r)
1415 {
1416         struct resource *parent_r;
1417         unsigned long mask = IORESOURCE_IO | IORESOURCE_MEM |
1418                              IORESOURCE_PREFETCH;
1419
1420         pci_bus_for_each_resource(b, parent_r) {
1421                 if (!parent_r)
1422                         continue;
1423
1424                 if ((r->flags & mask) == (parent_r->flags & mask) &&
1425                     resource_contains(parent_r, r))
1426                         request_resource(parent_r, r);
1427         }
1428 }
1429
1430 /*
1431  * Try to assign any resources marked as IORESOURCE_PCI_FIXED, as they are
1432  * skipped by pbus_assign_resources_sorted().
1433  */
1434 static void pdev_assign_fixed_resources(struct pci_dev *dev)
1435 {
1436         struct resource *r;
1437
1438         pci_dev_for_each_resource(dev, r) {
1439                 struct pci_bus *b;
1440
1441                 if (r->parent || !(r->flags & IORESOURCE_PCI_FIXED) ||
1442                     !(r->flags & (IORESOURCE_IO | IORESOURCE_MEM)))
1443                         continue;
1444
1445                 b = dev->bus;
1446                 while (b && !r->parent) {
1447                         assign_fixed_resource_on_bus(b, r);
1448                         b = b->parent;
1449                 }
1450         }
1451 }
1452
1453 void __pci_bus_assign_resources(const struct pci_bus *bus,
1454                                 struct list_head *realloc_head,
1455                                 struct list_head *fail_head)
1456 {
1457         struct pci_bus *b;
1458         struct pci_dev *dev;
1459
1460         pbus_assign_resources_sorted(bus, realloc_head, fail_head);
1461
1462         list_for_each_entry(dev, &bus->devices, bus_list) {
1463                 pdev_assign_fixed_resources(dev);
1464
1465                 b = dev->subordinate;
1466                 if (!b)
1467                         continue;
1468
1469                 __pci_bus_assign_resources(b, realloc_head, fail_head);
1470
1471                 switch (dev->hdr_type) {
1472                 case PCI_HEADER_TYPE_BRIDGE:
1473                         if (!pci_is_enabled(dev))
1474                                 pci_setup_bridge(b);
1475                         break;
1476
1477                 case PCI_HEADER_TYPE_CARDBUS:
1478                         pci_setup_cardbus(b);
1479                         break;
1480
1481                 default:
1482                         pci_info(dev, "not setting up bridge for bus %04x:%02x\n",
1483                                  pci_domain_nr(b), b->number);
1484                         break;
1485                 }
1486         }
1487 }
1488
1489 void pci_bus_assign_resources(const struct pci_bus *bus)
1490 {
1491         __pci_bus_assign_resources(bus, NULL, NULL);
1492 }
1493 EXPORT_SYMBOL(pci_bus_assign_resources);
1494
1495 static void pci_claim_device_resources(struct pci_dev *dev)
1496 {
1497         int i;
1498
1499         for (i = 0; i < PCI_BRIDGE_RESOURCES; i++) {
1500                 struct resource *r = &dev->resource[i];
1501
1502                 if (!r->flags || r->parent)
1503                         continue;
1504
1505                 pci_claim_resource(dev, i);
1506         }
1507 }
1508
1509 static void pci_claim_bridge_resources(struct pci_dev *dev)
1510 {
1511         int i;
1512
1513         for (i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++) {
1514                 struct resource *r = &dev->resource[i];
1515
1516                 if (!r->flags || r->parent)
1517                         continue;
1518
1519                 pci_claim_bridge_resource(dev, i);
1520         }
1521 }
1522
1523 static void pci_bus_allocate_dev_resources(struct pci_bus *b)
1524 {
1525         struct pci_dev *dev;
1526         struct pci_bus *child;
1527
1528         list_for_each_entry(dev, &b->devices, bus_list) {
1529                 pci_claim_device_resources(dev);
1530
1531                 child = dev->subordinate;
1532                 if (child)
1533                         pci_bus_allocate_dev_resources(child);
1534         }
1535 }
1536
1537 static void pci_bus_allocate_resources(struct pci_bus *b)
1538 {
1539         struct pci_bus *child;
1540
1541         /*
1542          * Carry out a depth-first search on the PCI bus tree to allocate
1543          * bridge apertures.  Read the programmed bridge bases and
1544          * recursively claim the respective bridge resources.
1545          */
1546         if (b->self) {
1547                 pci_read_bridge_bases(b);
1548                 pci_claim_bridge_resources(b->self);
1549         }
1550
1551         list_for_each_entry(child, &b->children, node)
1552                 pci_bus_allocate_resources(child);
1553 }
1554
1555 void pci_bus_claim_resources(struct pci_bus *b)
1556 {
1557         pci_bus_allocate_resources(b);
1558         pci_bus_allocate_dev_resources(b);
1559 }
1560 EXPORT_SYMBOL(pci_bus_claim_resources);
1561
1562 static void __pci_bridge_assign_resources(const struct pci_dev *bridge,
1563                                           struct list_head *add_head,
1564                                           struct list_head *fail_head)
1565 {
1566         struct pci_bus *b;
1567
1568         pdev_assign_resources_sorted((struct pci_dev *)bridge,
1569                                          add_head, fail_head);
1570
1571         b = bridge->subordinate;
1572         if (!b)
1573                 return;
1574
1575         __pci_bus_assign_resources(b, add_head, fail_head);
1576
1577         switch (bridge->class >> 8) {
1578         case PCI_CLASS_BRIDGE_PCI:
1579                 pci_setup_bridge(b);
1580                 break;
1581
1582         case PCI_CLASS_BRIDGE_CARDBUS:
1583                 pci_setup_cardbus(b);
1584                 break;
1585
1586         default:
1587                 pci_info(bridge, "not setting up bridge for bus %04x:%02x\n",
1588                          pci_domain_nr(b), b->number);
1589                 break;
1590         }
1591 }
1592
1593 #define PCI_RES_TYPE_MASK \
1594         (IORESOURCE_IO | IORESOURCE_MEM | IORESOURCE_PREFETCH |\
1595          IORESOURCE_MEM_64)
1596
1597 static void pci_bridge_release_resources(struct pci_bus *bus,
1598                                          unsigned long type)
1599 {
1600         struct pci_dev *dev = bus->self;
1601         struct resource *r;
1602         unsigned int old_flags;
1603         struct resource *b_res;
1604         int idx = 1;
1605
1606         b_res = &dev->resource[PCI_BRIDGE_RESOURCES];
1607
1608         /*
1609          * 1. If IO port assignment fails, release bridge IO port.
1610          * 2. If non pref MMIO assignment fails, release bridge nonpref MMIO.
1611          * 3. If 64bit pref MMIO assignment fails, and bridge pref is 64bit,
1612          *    release bridge pref MMIO.
1613          * 4. If pref MMIO assignment fails, and bridge pref is 32bit,
1614          *    release bridge pref MMIO.
1615          * 5. If pref MMIO assignment fails, and bridge pref is not
1616          *    assigned, release bridge nonpref MMIO.
1617          */
1618         if (type & IORESOURCE_IO)
1619                 idx = 0;
1620         else if (!(type & IORESOURCE_PREFETCH))
1621                 idx = 1;
1622         else if ((type & IORESOURCE_MEM_64) &&
1623                  (b_res[2].flags & IORESOURCE_MEM_64))
1624                 idx = 2;
1625         else if (!(b_res[2].flags & IORESOURCE_MEM_64) &&
1626                  (b_res[2].flags & IORESOURCE_PREFETCH))
1627                 idx = 2;
1628         else
1629                 idx = 1;
1630
1631         r = &b_res[idx];
1632
1633         if (!r->parent)
1634                 return;
1635
1636         /* If there are children, release them all */
1637         release_child_resources(r);
1638         if (!release_resource(r)) {
1639                 type = old_flags = r->flags & PCI_RES_TYPE_MASK;
1640                 pci_info(dev, "resource %d %pR released\n",
1641                          PCI_BRIDGE_RESOURCES + idx, r);
1642                 /* Keep the old size */
1643                 r->end = resource_size(r) - 1;
1644                 r->start = 0;
1645                 r->flags = 0;
1646
1647                 /* Avoiding touch the one without PREF */
1648                 if (type & IORESOURCE_PREFETCH)
1649                         type = IORESOURCE_PREFETCH;
1650                 __pci_setup_bridge(bus, type);
1651                 /* For next child res under same bridge */
1652                 r->flags = old_flags;
1653         }
1654 }
1655
1656 enum release_type {
1657         leaf_only,
1658         whole_subtree,
1659 };
1660
1661 /*
1662  * Try to release PCI bridge resources from leaf bridge, so we can allocate
1663  * a larger window later.
1664  */
1665 static void pci_bus_release_bridge_resources(struct pci_bus *bus,
1666                                              unsigned long type,
1667                                              enum release_type rel_type)
1668 {
1669         struct pci_dev *dev;
1670         bool is_leaf_bridge = true;
1671
1672         list_for_each_entry(dev, &bus->devices, bus_list) {
1673                 struct pci_bus *b = dev->subordinate;
1674                 if (!b)
1675                         continue;
1676
1677                 is_leaf_bridge = false;
1678
1679                 if ((dev->class >> 8) != PCI_CLASS_BRIDGE_PCI)
1680                         continue;
1681
1682                 if (rel_type == whole_subtree)
1683                         pci_bus_release_bridge_resources(b, type,
1684                                                  whole_subtree);
1685         }
1686
1687         if (pci_is_root_bus(bus))
1688                 return;
1689
1690         if ((bus->self->class >> 8) != PCI_CLASS_BRIDGE_PCI)
1691                 return;
1692
1693         if ((rel_type == whole_subtree) || is_leaf_bridge)
1694                 pci_bridge_release_resources(bus, type);
1695 }
1696
1697 static void pci_bus_dump_res(struct pci_bus *bus)
1698 {
1699         struct resource *res;
1700         int i;
1701
1702         pci_bus_for_each_resource(bus, res, i) {
1703                 if (!res || !res->end || !res->flags)
1704                         continue;
1705
1706                 dev_info(&bus->dev, "resource %d %pR\n", i, res);
1707         }
1708 }
1709
1710 static void pci_bus_dump_resources(struct pci_bus *bus)
1711 {
1712         struct pci_bus *b;
1713         struct pci_dev *dev;
1714
1715
1716         pci_bus_dump_res(bus);
1717
1718         list_for_each_entry(dev, &bus->devices, bus_list) {
1719                 b = dev->subordinate;
1720                 if (!b)
1721                         continue;
1722
1723                 pci_bus_dump_resources(b);
1724         }
1725 }
1726
1727 static int pci_bus_get_depth(struct pci_bus *bus)
1728 {
1729         int depth = 0;
1730         struct pci_bus *child_bus;
1731
1732         list_for_each_entry(child_bus, &bus->children, node) {
1733                 int ret;
1734
1735                 ret = pci_bus_get_depth(child_bus);
1736                 if (ret + 1 > depth)
1737                         depth = ret + 1;
1738         }
1739
1740         return depth;
1741 }
1742
1743 /*
1744  * -1: undefined, will auto detect later
1745  *  0: disabled by user
1746  *  1: disabled by auto detect
1747  *  2: enabled by user
1748  *  3: enabled by auto detect
1749  */
1750 enum enable_type {
1751         undefined = -1,
1752         user_disabled,
1753         auto_disabled,
1754         user_enabled,
1755         auto_enabled,
1756 };
1757
1758 static enum enable_type pci_realloc_enable = undefined;
1759 void __init pci_realloc_get_opt(char *str)
1760 {
1761         if (!strncmp(str, "off", 3))
1762                 pci_realloc_enable = user_disabled;
1763         else if (!strncmp(str, "on", 2))
1764                 pci_realloc_enable = user_enabled;
1765 }
1766 static bool pci_realloc_enabled(enum enable_type enable)
1767 {
1768         return enable >= user_enabled;
1769 }
1770
1771 #if defined(CONFIG_PCI_IOV) && defined(CONFIG_PCI_REALLOC_ENABLE_AUTO)
1772 static int iov_resources_unassigned(struct pci_dev *dev, void *data)
1773 {
1774         int i;
1775         bool *unassigned = data;
1776
1777         for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
1778                 struct resource *r = &dev->resource[i + PCI_IOV_RESOURCES];
1779                 struct pci_bus_region region;
1780
1781                 /* Not assigned or rejected by kernel? */
1782                 if (!r->flags)
1783                         continue;
1784
1785                 pcibios_resource_to_bus(dev->bus, &region, r);
1786                 if (!region.start) {
1787                         *unassigned = true;
1788                         return 1; /* Return early from pci_walk_bus() */
1789                 }
1790         }
1791
1792         return 0;
1793 }
1794
1795 static enum enable_type pci_realloc_detect(struct pci_bus *bus,
1796                                            enum enable_type enable_local)
1797 {
1798         bool unassigned = false;
1799         struct pci_host_bridge *host;
1800
1801         if (enable_local != undefined)
1802                 return enable_local;
1803
1804         host = pci_find_host_bridge(bus);
1805         if (host->preserve_config)
1806                 return auto_disabled;
1807
1808         pci_walk_bus(bus, iov_resources_unassigned, &unassigned);
1809         if (unassigned)
1810                 return auto_enabled;
1811
1812         return enable_local;
1813 }
1814 #else
1815 static enum enable_type pci_realloc_detect(struct pci_bus *bus,
1816                                            enum enable_type enable_local)
1817 {
1818         return enable_local;
1819 }
1820 #endif
1821
1822 static void adjust_bridge_window(struct pci_dev *bridge, struct resource *res,
1823                                  struct list_head *add_list,
1824                                  resource_size_t new_size)
1825 {
1826         resource_size_t add_size, size = resource_size(res);
1827
1828         if (res->parent)
1829                 return;
1830
1831         if (!new_size)
1832                 return;
1833
1834         if (new_size > size) {
1835                 add_size = new_size - size;
1836                 pci_dbg(bridge, "bridge window %pR extended by %pa\n", res,
1837                         &add_size);
1838         } else if (new_size < size) {
1839                 add_size = size - new_size;
1840                 pci_dbg(bridge, "bridge window %pR shrunken by %pa\n", res,
1841                         &add_size);
1842         } else {
1843                 return;
1844         }
1845
1846         resource_set_size(res, new_size);
1847
1848         /* If the resource is part of the add_list, remove it now */
1849         if (add_list)
1850                 remove_from_list(add_list, res);
1851 }
1852
1853 static void remove_dev_resource(struct resource *avail, struct pci_dev *dev,
1854                                 struct resource *res)
1855 {
1856         resource_size_t size, align, tmp;
1857
1858         size = resource_size(res);
1859         if (!size)
1860                 return;
1861
1862         align = pci_resource_alignment(dev, res);
1863         align = align ? ALIGN(avail->start, align) - avail->start : 0;
1864         tmp = align + size;
1865         avail->start = min(avail->start + tmp, avail->end + 1);
1866 }
1867
1868 static void remove_dev_resources(struct pci_dev *dev, struct resource *io,
1869                                  struct resource *mmio,
1870                                  struct resource *mmio_pref)
1871 {
1872         struct resource *res;
1873
1874         pci_dev_for_each_resource(dev, res) {
1875                 if (resource_type(res) == IORESOURCE_IO) {
1876                         remove_dev_resource(io, dev, res);
1877                 } else if (resource_type(res) == IORESOURCE_MEM) {
1878
1879                         /*
1880                          * Make sure prefetchable memory is reduced from
1881                          * the correct resource. Specifically we put 32-bit
1882                          * prefetchable memory in non-prefetchable window
1883                          * if there is an 64-bit prefetchable window.
1884                          *
1885                          * See comments in __pci_bus_size_bridges() for
1886                          * more information.
1887                          */
1888                         if ((res->flags & IORESOURCE_PREFETCH) &&
1889                             ((res->flags & IORESOURCE_MEM_64) ==
1890                              (mmio_pref->flags & IORESOURCE_MEM_64)))
1891                                 remove_dev_resource(mmio_pref, dev, res);
1892                         else
1893                                 remove_dev_resource(mmio, dev, res);
1894                 }
1895         }
1896 }
1897
1898 #define ALIGN_DOWN_IF_NONZERO(addr, align) \
1899                         ((align) ? ALIGN_DOWN((addr), (align)) : (addr))
1900
1901 /*
1902  * io, mmio and mmio_pref contain the total amount of bridge window space
1903  * available. This includes the minimal space needed to cover all the
1904  * existing devices on the bus and the possible extra space that can be
1905  * shared with the bridges.
1906  */
1907 static void pci_bus_distribute_available_resources(struct pci_bus *bus,
1908                                             struct list_head *add_list,
1909                                             struct resource io,
1910                                             struct resource mmio,
1911                                             struct resource mmio_pref)
1912 {
1913         unsigned int normal_bridges = 0, hotplug_bridges = 0;
1914         struct resource *io_res, *mmio_res, *mmio_pref_res;
1915         struct pci_dev *dev, *bridge = bus->self;
1916         resource_size_t io_per_b, mmio_per_b, mmio_pref_per_b, align;
1917
1918         io_res = &bridge->resource[PCI_BRIDGE_IO_WINDOW];
1919         mmio_res = &bridge->resource[PCI_BRIDGE_MEM_WINDOW];
1920         mmio_pref_res = &bridge->resource[PCI_BRIDGE_PREF_MEM_WINDOW];
1921
1922         /*
1923          * The alignment of this bridge is yet to be considered, hence it must
1924          * be done now before extending its bridge window.
1925          */
1926         align = pci_resource_alignment(bridge, io_res);
1927         if (!io_res->parent && align)
1928                 io.start = min(ALIGN(io.start, align), io.end + 1);
1929
1930         align = pci_resource_alignment(bridge, mmio_res);
1931         if (!mmio_res->parent && align)
1932                 mmio.start = min(ALIGN(mmio.start, align), mmio.end + 1);
1933
1934         align = pci_resource_alignment(bridge, mmio_pref_res);
1935         if (!mmio_pref_res->parent && align)
1936                 mmio_pref.start = min(ALIGN(mmio_pref.start, align),
1937                         mmio_pref.end + 1);
1938
1939         /*
1940          * Now that we have adjusted for alignment, update the bridge window
1941          * resources to fill as much remaining resource space as possible.
1942          */
1943         adjust_bridge_window(bridge, io_res, add_list, resource_size(&io));
1944         adjust_bridge_window(bridge, mmio_res, add_list, resource_size(&mmio));
1945         adjust_bridge_window(bridge, mmio_pref_res, add_list,
1946                              resource_size(&mmio_pref));
1947
1948         /*
1949          * Calculate how many hotplug bridges and normal bridges there
1950          * are on this bus.  We will distribute the additional available
1951          * resources between hotplug bridges.
1952          */
1953         for_each_pci_bridge(dev, bus) {
1954                 if (dev->is_hotplug_bridge)
1955                         hotplug_bridges++;
1956                 else
1957                         normal_bridges++;
1958         }
1959
1960         if (!(hotplug_bridges + normal_bridges))
1961                 return;
1962
1963         /*
1964          * Calculate the amount of space we can forward from "bus" to any
1965          * downstream buses, i.e., the space left over after assigning the
1966          * BARs and windows on "bus".
1967          */
1968         list_for_each_entry(dev, &bus->devices, bus_list) {
1969                 if (!dev->is_virtfn)
1970                         remove_dev_resources(dev, &io, &mmio, &mmio_pref);
1971         }
1972
1973         /*
1974          * If there is at least one hotplug bridge on this bus it gets all
1975          * the extra resource space that was left after the reductions
1976          * above.
1977          *
1978          * If there are no hotplug bridges the extra resource space is
1979          * split between non-hotplug bridges. This is to allow possible
1980          * hotplug bridges below them to get the extra space as well.
1981          */
1982         if (hotplug_bridges) {
1983                 io_per_b = div64_ul(resource_size(&io), hotplug_bridges);
1984                 mmio_per_b = div64_ul(resource_size(&mmio), hotplug_bridges);
1985                 mmio_pref_per_b = div64_ul(resource_size(&mmio_pref),
1986                                            hotplug_bridges);
1987         } else {
1988                 io_per_b = div64_ul(resource_size(&io), normal_bridges);
1989                 mmio_per_b = div64_ul(resource_size(&mmio), normal_bridges);
1990                 mmio_pref_per_b = div64_ul(resource_size(&mmio_pref),
1991                                            normal_bridges);
1992         }
1993
1994         for_each_pci_bridge(dev, bus) {
1995                 struct resource *res;
1996                 struct pci_bus *b;
1997
1998                 b = dev->subordinate;
1999                 if (!b)
2000                         continue;
2001                 if (hotplug_bridges && !dev->is_hotplug_bridge)
2002                         continue;
2003
2004                 res = &dev->resource[PCI_BRIDGE_IO_WINDOW];
2005
2006                 /*
2007                  * Make sure the split resource space is properly aligned
2008                  * for bridge windows (align it down to avoid going above
2009                  * what is available).
2010                  */
2011                 align = pci_resource_alignment(dev, res);
2012                 resource_set_size(&io, ALIGN_DOWN_IF_NONZERO(io_per_b, align));
2013
2014                 /*
2015                  * The x_per_b holds the extra resource space that can be
2016                  * added for each bridge but there is the minimal already
2017                  * reserved as well so adjust x.start down accordingly to
2018                  * cover the whole space.
2019                  */
2020                 io.start -= resource_size(res);
2021
2022                 res = &dev->resource[PCI_BRIDGE_MEM_WINDOW];
2023                 align = pci_resource_alignment(dev, res);
2024                 resource_set_size(&mmio,
2025                                   ALIGN_DOWN_IF_NONZERO(mmio_per_b,align));
2026                 mmio.start -= resource_size(res);
2027
2028                 res = &dev->resource[PCI_BRIDGE_PREF_MEM_WINDOW];
2029                 align = pci_resource_alignment(dev, res);
2030                 resource_set_size(&mmio_pref,
2031                                   ALIGN_DOWN_IF_NONZERO(mmio_pref_per_b, align));
2032                 mmio_pref.start -= resource_size(res);
2033
2034                 pci_bus_distribute_available_resources(b, add_list, io, mmio,
2035                                                        mmio_pref);
2036
2037                 io.start += io.end + 1;
2038                 mmio.start += mmio.end + 1;
2039                 mmio_pref.start += mmio_pref.end + 1;
2040         }
2041 }
2042
2043 static void pci_bridge_distribute_available_resources(struct pci_dev *bridge,
2044                                                       struct list_head *add_list)
2045 {
2046         struct resource available_io, available_mmio, available_mmio_pref;
2047
2048         if (!bridge->is_hotplug_bridge)
2049                 return;
2050
2051         pci_dbg(bridge, "distributing available resources\n");
2052
2053         /* Take the initial extra resources from the hotplug port */
2054         available_io = bridge->resource[PCI_BRIDGE_IO_WINDOW];
2055         available_mmio = bridge->resource[PCI_BRIDGE_MEM_WINDOW];
2056         available_mmio_pref = bridge->resource[PCI_BRIDGE_PREF_MEM_WINDOW];
2057
2058         pci_bus_distribute_available_resources(bridge->subordinate,
2059                                                add_list, available_io,
2060                                                available_mmio,
2061                                                available_mmio_pref);
2062 }
2063
2064 static bool pci_bridge_resources_not_assigned(struct pci_dev *dev)
2065 {
2066         const struct resource *r;
2067
2068         /*
2069          * If the child device's resources are not yet assigned it means we
2070          * are configuring them (not the boot firmware), so we should be
2071          * able to extend the upstream bridge resources in the same way we
2072          * do with the normal hotplug case.
2073          */
2074         r = &dev->resource[PCI_BRIDGE_IO_WINDOW];
2075         if (r->flags && !(r->flags & IORESOURCE_STARTALIGN))
2076                 return false;
2077         r = &dev->resource[PCI_BRIDGE_MEM_WINDOW];
2078         if (r->flags && !(r->flags & IORESOURCE_STARTALIGN))
2079                 return false;
2080         r = &dev->resource[PCI_BRIDGE_PREF_MEM_WINDOW];
2081         if (r->flags && !(r->flags & IORESOURCE_STARTALIGN))
2082                 return false;
2083
2084         return true;
2085 }
2086
2087 static void
2088 pci_root_bus_distribute_available_resources(struct pci_bus *bus,
2089                                             struct list_head *add_list)
2090 {
2091         struct pci_dev *dev, *bridge = bus->self;
2092
2093         for_each_pci_bridge(dev, bus) {
2094                 struct pci_bus *b;
2095
2096                 b = dev->subordinate;
2097                 if (!b)
2098                         continue;
2099
2100                 /*
2101                  * Need to check "bridge" here too because it is NULL
2102                  * in case of root bus.
2103                  */
2104                 if (bridge && pci_bridge_resources_not_assigned(dev))
2105                         pci_bridge_distribute_available_resources(bridge,
2106                                                                   add_list);
2107                 else
2108                         pci_root_bus_distribute_available_resources(b, add_list);
2109         }
2110 }
2111
2112 /*
2113  * First try will not touch PCI bridge res.
2114  * Second and later try will clear small leaf bridge res.
2115  * Will stop till to the max depth if can not find good one.
2116  */
2117 void pci_assign_unassigned_root_bus_resources(struct pci_bus *bus)
2118 {
2119         LIST_HEAD(realloc_head);
2120         /* List of resources that want additional resources */
2121         struct list_head *add_list = NULL;
2122         int tried_times = 0;
2123         enum release_type rel_type = leaf_only;
2124         LIST_HEAD(fail_head);
2125         struct pci_dev_resource *fail_res;
2126         int pci_try_num = 1;
2127         enum enable_type enable_local;
2128
2129         /* Don't realloc if asked to do so */
2130         enable_local = pci_realloc_detect(bus, pci_realloc_enable);
2131         if (pci_realloc_enabled(enable_local)) {
2132                 int max_depth = pci_bus_get_depth(bus);
2133
2134                 pci_try_num = max_depth + 1;
2135                 dev_info(&bus->dev, "max bus depth: %d pci_try_num: %d\n",
2136                          max_depth, pci_try_num);
2137         }
2138
2139 again:
2140         /*
2141          * Last try will use add_list, otherwise will try good to have as must
2142          * have, so can realloc parent bridge resource
2143          */
2144         if (tried_times + 1 == pci_try_num)
2145                 add_list = &realloc_head;
2146         /*
2147          * Depth first, calculate sizes and alignments of all subordinate buses.
2148          */
2149         __pci_bus_size_bridges(bus, add_list);
2150
2151         pci_root_bus_distribute_available_resources(bus, add_list);
2152
2153         /* Depth last, allocate resources and update the hardware. */
2154         __pci_bus_assign_resources(bus, add_list, &fail_head);
2155         if (add_list)
2156                 BUG_ON(!list_empty(add_list));
2157         tried_times++;
2158
2159         /* Any device complain? */
2160         if (list_empty(&fail_head))
2161                 goto dump;
2162
2163         if (tried_times >= pci_try_num) {
2164                 if (enable_local == undefined)
2165                         dev_info(&bus->dev, "Some PCI device resources are unassigned, try booting with pci=realloc\n");
2166                 else if (enable_local == auto_enabled)
2167                         dev_info(&bus->dev, "Automatically enabled pci realloc, if you have problem, try booting with pci=realloc=off\n");
2168
2169                 free_list(&fail_head);
2170                 goto dump;
2171         }
2172
2173         dev_info(&bus->dev, "No. %d try to assign unassigned res\n",
2174                  tried_times + 1);
2175
2176         /* Third times and later will not check if it is leaf */
2177         if ((tried_times + 1) > 2)
2178                 rel_type = whole_subtree;
2179
2180         /*
2181          * Try to release leaf bridge's resources that doesn't fit resource of
2182          * child device under that bridge.
2183          */
2184         list_for_each_entry(fail_res, &fail_head, list)
2185                 pci_bus_release_bridge_resources(fail_res->dev->bus,
2186                                                  fail_res->flags & PCI_RES_TYPE_MASK,
2187                                                  rel_type);
2188
2189         /* Restore size and flags */
2190         list_for_each_entry(fail_res, &fail_head, list) {
2191                 struct resource *res = fail_res->res;
2192                 int idx;
2193
2194                 res->start = fail_res->start;
2195                 res->end = fail_res->end;
2196                 res->flags = fail_res->flags;
2197
2198                 if (pci_is_bridge(fail_res->dev)) {
2199                         idx = res - &fail_res->dev->resource[0];
2200                         if (idx >= PCI_BRIDGE_RESOURCES &&
2201                             idx <= PCI_BRIDGE_RESOURCE_END)
2202                                 res->flags = 0;
2203                 }
2204         }
2205         free_list(&fail_head);
2206
2207         goto again;
2208
2209 dump:
2210         /* Dump the resource on buses */
2211         pci_bus_dump_resources(bus);
2212 }
2213
2214 void pci_assign_unassigned_resources(void)
2215 {
2216         struct pci_bus *root_bus;
2217
2218         list_for_each_entry(root_bus, &pci_root_buses, node) {
2219                 pci_assign_unassigned_root_bus_resources(root_bus);
2220
2221                 /* Make sure the root bridge has a companion ACPI device */
2222                 if (ACPI_HANDLE(root_bus->bridge))
2223                         acpi_ioapic_add(ACPI_HANDLE(root_bus->bridge));
2224         }
2225 }
2226
2227 void pci_assign_unassigned_bridge_resources(struct pci_dev *bridge)
2228 {
2229         struct pci_bus *parent = bridge->subordinate;
2230         /* List of resources that want additional resources */
2231         LIST_HEAD(add_list);
2232
2233         int tried_times = 0;
2234         LIST_HEAD(fail_head);
2235         struct pci_dev_resource *fail_res;
2236         int retval;
2237
2238 again:
2239         __pci_bus_size_bridges(parent, &add_list);
2240
2241         /*
2242          * Distribute remaining resources (if any) equally between hotplug
2243          * bridges below.  This makes it possible to extend the hierarchy
2244          * later without running out of resources.
2245          */
2246         pci_bridge_distribute_available_resources(bridge, &add_list);
2247
2248         __pci_bridge_assign_resources(bridge, &add_list, &fail_head);
2249         BUG_ON(!list_empty(&add_list));
2250         tried_times++;
2251
2252         if (list_empty(&fail_head))
2253                 goto enable_all;
2254
2255         if (tried_times >= 2) {
2256                 /* Still fail, don't need to try more */
2257                 free_list(&fail_head);
2258                 goto enable_all;
2259         }
2260
2261         printk(KERN_DEBUG "PCI: No. %d try to assign unassigned res\n",
2262                          tried_times + 1);
2263
2264         /*
2265          * Try to release leaf bridge's resources that aren't big enough
2266          * to contain child device resources.
2267          */
2268         list_for_each_entry(fail_res, &fail_head, list)
2269                 pci_bus_release_bridge_resources(fail_res->dev->bus,
2270                                                  fail_res->flags & PCI_RES_TYPE_MASK,
2271                                                  whole_subtree);
2272
2273         /* Restore size and flags */
2274         list_for_each_entry(fail_res, &fail_head, list) {
2275                 struct resource *res = fail_res->res;
2276                 int idx;
2277
2278                 res->start = fail_res->start;
2279                 res->end = fail_res->end;
2280                 res->flags = fail_res->flags;
2281
2282                 if (pci_is_bridge(fail_res->dev)) {
2283                         idx = res - &fail_res->dev->resource[0];
2284                         if (idx >= PCI_BRIDGE_RESOURCES &&
2285                             idx <= PCI_BRIDGE_RESOURCE_END)
2286                                 res->flags = 0;
2287                 }
2288         }
2289         free_list(&fail_head);
2290
2291         goto again;
2292
2293 enable_all:
2294         retval = pci_reenable_device(bridge);
2295         if (retval)
2296                 pci_err(bridge, "Error reenabling bridge (%d)\n", retval);
2297         pci_set_master(bridge);
2298 }
2299 EXPORT_SYMBOL_GPL(pci_assign_unassigned_bridge_resources);
2300
2301 int pci_reassign_bridge_resources(struct pci_dev *bridge, unsigned long type)
2302 {
2303         struct pci_dev_resource *dev_res;
2304         struct pci_dev *next;
2305         LIST_HEAD(saved);
2306         LIST_HEAD(added);
2307         LIST_HEAD(failed);
2308         unsigned int i;
2309         int ret;
2310
2311         down_read(&pci_bus_sem);
2312
2313         /* Walk to the root hub, releasing bridge BARs when possible */
2314         next = bridge;
2315         do {
2316                 bridge = next;
2317                 for (i = PCI_BRIDGE_RESOURCES; i < PCI_BRIDGE_RESOURCE_END;
2318                      i++) {
2319                         struct resource *res = &bridge->resource[i];
2320                         const char *res_name = pci_resource_name(bridge, i);
2321
2322                         if ((res->flags ^ type) & PCI_RES_TYPE_MASK)
2323                                 continue;
2324
2325                         /* Ignore BARs which are still in use */
2326                         if (res->child)
2327                                 continue;
2328
2329                         ret = add_to_list(&saved, bridge, res, 0, 0);
2330                         if (ret)
2331                                 goto cleanup;
2332
2333                         pci_info(bridge, "%s %pR: releasing\n", res_name, res);
2334
2335                         if (res->parent)
2336                                 release_resource(res);
2337                         res->start = 0;
2338                         res->end = 0;
2339                         break;
2340                 }
2341                 if (i == PCI_BRIDGE_RESOURCE_END)
2342                         break;
2343
2344                 next = bridge->bus ? bridge->bus->self : NULL;
2345         } while (next);
2346
2347         if (list_empty(&saved)) {
2348                 up_read(&pci_bus_sem);
2349                 return -ENOENT;
2350         }
2351
2352         __pci_bus_size_bridges(bridge->subordinate, &added);
2353         __pci_bridge_assign_resources(bridge, &added, &failed);
2354         BUG_ON(!list_empty(&added));
2355
2356         if (!list_empty(&failed)) {
2357                 ret = -ENOSPC;
2358                 goto cleanup;
2359         }
2360
2361         list_for_each_entry(dev_res, &saved, list) {
2362                 /* Skip the bridge we just assigned resources for */
2363                 if (bridge == dev_res->dev)
2364                         continue;
2365
2366                 bridge = dev_res->dev;
2367                 pci_setup_bridge(bridge->subordinate);
2368         }
2369
2370         free_list(&saved);
2371         up_read(&pci_bus_sem);
2372         return 0;
2373
2374 cleanup:
2375         /* Restore size and flags */
2376         list_for_each_entry(dev_res, &failed, list) {
2377                 struct resource *res = dev_res->res;
2378
2379                 res->start = dev_res->start;
2380                 res->end = dev_res->end;
2381                 res->flags = dev_res->flags;
2382         }
2383         free_list(&failed);
2384
2385         /* Revert to the old configuration */
2386         list_for_each_entry(dev_res, &saved, list) {
2387                 struct resource *res = dev_res->res;
2388
2389                 bridge = dev_res->dev;
2390                 i = res - bridge->resource;
2391
2392                 res->start = dev_res->start;
2393                 res->end = dev_res->end;
2394                 res->flags = dev_res->flags;
2395
2396                 pci_claim_resource(bridge, i);
2397                 pci_setup_bridge(bridge->subordinate);
2398         }
2399         free_list(&saved);
2400         up_read(&pci_bus_sem);
2401
2402         return ret;
2403 }
2404
2405 void pci_assign_unassigned_bus_resources(struct pci_bus *bus)
2406 {
2407         struct pci_dev *dev;
2408         /* List of resources that want additional resources */
2409         LIST_HEAD(add_list);
2410
2411         down_read(&pci_bus_sem);
2412         for_each_pci_bridge(dev, bus)
2413                 if (pci_has_subordinate(dev))
2414                         __pci_bus_size_bridges(dev->subordinate, &add_list);
2415         up_read(&pci_bus_sem);
2416         __pci_bus_assign_resources(bus, &add_list, NULL);
2417         BUG_ON(!list_empty(&add_list));
2418 }
2419 EXPORT_SYMBOL_GPL(pci_assign_unassigned_bus_resources);
This page took 0.163118 seconds and 4 git commands to generate.