diff options
Diffstat (limited to 'drivers/pci/setup-bus.c')
-rw-r--r-- | drivers/pci/setup-bus.c | 514 |
1 files changed, 462 insertions, 52 deletions
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c index c48cd377b3f5..4fe36d2e1049 100644 --- a/drivers/pci/setup-bus.c +++ b/drivers/pci/setup-bus.c | |||
@@ -27,37 +27,91 @@ | |||
27 | #include <linux/slab.h> | 27 | #include <linux/slab.h> |
28 | #include "pci.h" | 28 | #include "pci.h" |
29 | 29 | ||
30 | static void pbus_assign_resources_sorted(const struct pci_bus *bus) | 30 | struct resource_list_x { |
31 | { | 31 | struct resource_list_x *next; |
32 | struct pci_dev *dev; | ||
33 | struct resource *res; | 32 | struct resource *res; |
34 | struct resource_list head, *list, *tmp; | 33 | struct pci_dev *dev; |
35 | int idx; | 34 | resource_size_t start; |
35 | resource_size_t end; | ||
36 | unsigned long flags; | ||
37 | }; | ||
36 | 38 | ||
37 | head.next = NULL; | 39 | static void add_to_failed_list(struct resource_list_x *head, |
38 | list_for_each_entry(dev, &bus->devices, bus_list) { | 40 | struct pci_dev *dev, struct resource *res) |
39 | u16 class = dev->class >> 8; | 41 | { |
42 | struct resource_list_x *list = head; | ||
43 | struct resource_list_x *ln = list->next; | ||
44 | struct resource_list_x *tmp; | ||
40 | 45 | ||
41 | /* Don't touch classless devices or host bridges or ioapics. */ | 46 | tmp = kmalloc(sizeof(*tmp), GFP_KERNEL); |
42 | if (class == PCI_CLASS_NOT_DEFINED || | 47 | if (!tmp) { |
43 | class == PCI_CLASS_BRIDGE_HOST) | 48 | pr_warning("add_to_failed_list: kmalloc() failed!\n"); |
44 | continue; | 49 | return; |
50 | } | ||
45 | 51 | ||
46 | /* Don't touch ioapic devices already enabled by firmware */ | 52 | tmp->next = ln; |
47 | if (class == PCI_CLASS_SYSTEM_PIC) { | 53 | tmp->res = res; |
48 | u16 command; | 54 | tmp->dev = dev; |
49 | pci_read_config_word(dev, PCI_COMMAND, &command); | 55 | tmp->start = res->start; |
50 | if (command & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY)) | 56 | tmp->end = res->end; |
51 | continue; | 57 | tmp->flags = res->flags; |
52 | } | 58 | list->next = tmp; |
59 | } | ||
60 | |||
61 | static void free_failed_list(struct resource_list_x *head) | ||
62 | { | ||
63 | struct resource_list_x *list, *tmp; | ||
53 | 64 | ||
54 | pdev_sort_resources(dev, &head); | 65 | for (list = head->next; list;) { |
66 | tmp = list; | ||
67 | list = list->next; | ||
68 | kfree(tmp); | ||
55 | } | 69 | } |
56 | 70 | ||
57 | for (list = head.next; list;) { | 71 | head->next = NULL; |
72 | } | ||
73 | |||
74 | static void __dev_sort_resources(struct pci_dev *dev, | ||
75 | struct resource_list *head) | ||
76 | { | ||
77 | u16 class = dev->class >> 8; | ||
78 | |||
79 | /* Don't touch classless devices or host bridges or ioapics. */ | ||
80 | if (class == PCI_CLASS_NOT_DEFINED || class == PCI_CLASS_BRIDGE_HOST) | ||
81 | return; | ||
82 | |||
83 | /* Don't touch ioapic devices already enabled by firmware */ | ||
84 | if (class == PCI_CLASS_SYSTEM_PIC) { | ||
85 | u16 command; | ||
86 | pci_read_config_word(dev, PCI_COMMAND, &command); | ||
87 | if (command & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY)) | ||
88 | return; | ||
89 | } | ||
90 | |||
91 | pdev_sort_resources(dev, head); | ||
92 | } | ||
93 | |||
94 | static void __assign_resources_sorted(struct resource_list *head, | ||
95 | struct resource_list_x *fail_head) | ||
96 | { | ||
97 | struct resource *res; | ||
98 | struct resource_list *list, *tmp; | ||
99 | int idx; | ||
100 | |||
101 | for (list = head->next; list;) { | ||
58 | res = list->res; | 102 | res = list->res; |
59 | idx = res - &list->dev->resource[0]; | 103 | idx = res - &list->dev->resource[0]; |
104 | |||
60 | if (pci_assign_resource(list->dev, idx)) { | 105 | if (pci_assign_resource(list->dev, idx)) { |
106 | if (fail_head && !pci_is_root_bus(list->dev->bus)) { | ||
107 | /* | ||
108 | * if the failed res is for ROM BAR, and it will | ||
109 | * be enabled later, don't add it to the list | ||
110 | */ | ||
111 | if (!((idx == PCI_ROM_RESOURCE) && | ||
112 | (!(res->flags & IORESOURCE_ROM_ENABLE)))) | ||
113 | add_to_failed_list(fail_head, list->dev, res); | ||
114 | } | ||
61 | res->start = 0; | 115 | res->start = 0; |
62 | res->end = 0; | 116 | res->end = 0; |
63 | res->flags = 0; | 117 | res->flags = 0; |
@@ -68,6 +122,30 @@ static void pbus_assign_resources_sorted(const struct pci_bus *bus) | |||
68 | } | 122 | } |
69 | } | 123 | } |
70 | 124 | ||
125 | static void pdev_assign_resources_sorted(struct pci_dev *dev, | ||
126 | struct resource_list_x *fail_head) | ||
127 | { | ||
128 | struct resource_list head; | ||
129 | |||
130 | head.next = NULL; | ||
131 | __dev_sort_resources(dev, &head); | ||
132 | __assign_resources_sorted(&head, fail_head); | ||
133 | |||
134 | } | ||
135 | |||
136 | static void pbus_assign_resources_sorted(const struct pci_bus *bus, | ||
137 | struct resource_list_x *fail_head) | ||
138 | { | ||
139 | struct pci_dev *dev; | ||
140 | struct resource_list head; | ||
141 | |||
142 | head.next = NULL; | ||
143 | list_for_each_entry(dev, &bus->devices, bus_list) | ||
144 | __dev_sort_resources(dev, &head); | ||
145 | |||
146 | __assign_resources_sorted(&head, fail_head); | ||
147 | } | ||
148 | |||
71 | void pci_setup_cardbus(struct pci_bus *bus) | 149 | void pci_setup_cardbus(struct pci_bus *bus) |
72 | { | 150 | { |
73 | struct pci_dev *bridge = bus->self; | 151 | struct pci_dev *bridge = bus->self; |
@@ -134,18 +212,12 @@ EXPORT_SYMBOL(pci_setup_cardbus); | |||
134 | config space writes, so it's quite possible that an I/O window of | 212 | config space writes, so it's quite possible that an I/O window of |
135 | the bridge will have some undesirable address (e.g. 0) after the | 213 | the bridge will have some undesirable address (e.g. 0) after the |
136 | first write. Ditto 64-bit prefetchable MMIO. */ | 214 | first write. Ditto 64-bit prefetchable MMIO. */ |
137 | static void pci_setup_bridge(struct pci_bus *bus) | 215 | static void pci_setup_bridge_io(struct pci_bus *bus) |
138 | { | 216 | { |
139 | struct pci_dev *bridge = bus->self; | 217 | struct pci_dev *bridge = bus->self; |
140 | struct resource *res; | 218 | struct resource *res; |
141 | struct pci_bus_region region; | 219 | struct pci_bus_region region; |
142 | u32 l, bu, lu, io_upper16; | 220 | u32 l, io_upper16; |
143 | |||
144 | if (pci_is_enabled(bridge)) | ||
145 | return; | ||
146 | |||
147 | dev_info(&bridge->dev, "PCI bridge to [bus %02x-%02x]\n", | ||
148 | bus->secondary, bus->subordinate); | ||
149 | 221 | ||
150 | /* Set up the top and bottom of the PCI I/O segment for this bus. */ | 222 | /* Set up the top and bottom of the PCI I/O segment for this bus. */ |
151 | res = bus->resource[0]; | 223 | res = bus->resource[0]; |
@@ -158,8 +230,7 @@ static void pci_setup_bridge(struct pci_bus *bus) | |||
158 | /* Set up upper 16 bits of I/O base/limit. */ | 230 | /* Set up upper 16 bits of I/O base/limit. */ |
159 | io_upper16 = (region.end & 0xffff0000) | (region.start >> 16); | 231 | io_upper16 = (region.end & 0xffff0000) | (region.start >> 16); |
160 | dev_info(&bridge->dev, " bridge window %pR\n", res); | 232 | dev_info(&bridge->dev, " bridge window %pR\n", res); |
161 | } | 233 | } else { |
162 | else { | ||
163 | /* Clear upper 16 bits of I/O base/limit. */ | 234 | /* Clear upper 16 bits of I/O base/limit. */ |
164 | io_upper16 = 0; | 235 | io_upper16 = 0; |
165 | l = 0x00f0; | 236 | l = 0x00f0; |
@@ -171,21 +242,35 @@ static void pci_setup_bridge(struct pci_bus *bus) | |||
171 | pci_write_config_dword(bridge, PCI_IO_BASE, l); | 242 | pci_write_config_dword(bridge, PCI_IO_BASE, l); |
172 | /* Update upper 16 bits of I/O base/limit. */ | 243 | /* Update upper 16 bits of I/O base/limit. */ |
173 | pci_write_config_dword(bridge, PCI_IO_BASE_UPPER16, io_upper16); | 244 | pci_write_config_dword(bridge, PCI_IO_BASE_UPPER16, io_upper16); |
245 | } | ||
246 | |||
247 | static void pci_setup_bridge_mmio(struct pci_bus *bus) | ||
248 | { | ||
249 | struct pci_dev *bridge = bus->self; | ||
250 | struct resource *res; | ||
251 | struct pci_bus_region region; | ||
252 | u32 l; | ||
174 | 253 | ||
175 | /* Set up the top and bottom of the PCI Memory segment | 254 | /* Set up the top and bottom of the PCI Memory segment for this bus. */ |
176 | for this bus. */ | ||
177 | res = bus->resource[1]; | 255 | res = bus->resource[1]; |
178 | pcibios_resource_to_bus(bridge, ®ion, res); | 256 | pcibios_resource_to_bus(bridge, ®ion, res); |
179 | if (res->flags & IORESOURCE_MEM) { | 257 | if (res->flags & IORESOURCE_MEM) { |
180 | l = (region.start >> 16) & 0xfff0; | 258 | l = (region.start >> 16) & 0xfff0; |
181 | l |= region.end & 0xfff00000; | 259 | l |= region.end & 0xfff00000; |
182 | dev_info(&bridge->dev, " bridge window %pR\n", res); | 260 | dev_info(&bridge->dev, " bridge window %pR\n", res); |
183 | } | 261 | } else { |
184 | else { | ||
185 | l = 0x0000fff0; | 262 | l = 0x0000fff0; |
186 | dev_info(&bridge->dev, " bridge window [mem disabled]\n"); | 263 | dev_info(&bridge->dev, " bridge window [mem disabled]\n"); |
187 | } | 264 | } |
188 | pci_write_config_dword(bridge, PCI_MEMORY_BASE, l); | 265 | pci_write_config_dword(bridge, PCI_MEMORY_BASE, l); |
266 | } | ||
267 | |||
268 | static void pci_setup_bridge_mmio_pref(struct pci_bus *bus) | ||
269 | { | ||
270 | struct pci_dev *bridge = bus->self; | ||
271 | struct resource *res; | ||
272 | struct pci_bus_region region; | ||
273 | u32 l, bu, lu; | ||
189 | 274 | ||
190 | /* Clear out the upper 32 bits of PREF limit. | 275 | /* Clear out the upper 32 bits of PREF limit. |
191 | If PCI_PREF_BASE_UPPER32 was non-zero, this temporarily | 276 | If PCI_PREF_BASE_UPPER32 was non-zero, this temporarily |
@@ -204,8 +289,7 @@ static void pci_setup_bridge(struct pci_bus *bus) | |||
204 | lu = upper_32_bits(region.end); | 289 | lu = upper_32_bits(region.end); |
205 | } | 290 | } |
206 | dev_info(&bridge->dev, " bridge window %pR\n", res); | 291 | dev_info(&bridge->dev, " bridge window %pR\n", res); |
207 | } | 292 | } else { |
208 | else { | ||
209 | l = 0x0000fff0; | 293 | l = 0x0000fff0; |
210 | dev_info(&bridge->dev, " bridge window [mem pref disabled]\n"); | 294 | dev_info(&bridge->dev, " bridge window [mem pref disabled]\n"); |
211 | } | 295 | } |
@@ -214,10 +298,35 @@ static void pci_setup_bridge(struct pci_bus *bus) | |||
214 | /* Set the upper 32 bits of PREF base & limit. */ | 298 | /* Set the upper 32 bits of PREF base & limit. */ |
215 | pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32, bu); | 299 | pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32, bu); |
216 | pci_write_config_dword(bridge, PCI_PREF_LIMIT_UPPER32, lu); | 300 | pci_write_config_dword(bridge, PCI_PREF_LIMIT_UPPER32, lu); |
301 | } | ||
302 | |||
303 | static void __pci_setup_bridge(struct pci_bus *bus, unsigned long type) | ||
304 | { | ||
305 | struct pci_dev *bridge = bus->self; | ||
306 | |||
307 | dev_info(&bridge->dev, "PCI bridge to [bus %02x-%02x]\n", | ||
308 | bus->secondary, bus->subordinate); | ||
309 | |||
310 | if (type & IORESOURCE_IO) | ||
311 | pci_setup_bridge_io(bus); | ||
312 | |||
313 | if (type & IORESOURCE_MEM) | ||
314 | pci_setup_bridge_mmio(bus); | ||
315 | |||
316 | if (type & IORESOURCE_PREFETCH) | ||
317 | pci_setup_bridge_mmio_pref(bus); | ||
217 | 318 | ||
218 | pci_write_config_word(bridge, PCI_BRIDGE_CONTROL, bus->bridge_ctl); | 319 | pci_write_config_word(bridge, PCI_BRIDGE_CONTROL, bus->bridge_ctl); |
219 | } | 320 | } |
220 | 321 | ||
322 | static void pci_setup_bridge(struct pci_bus *bus) | ||
323 | { | ||
324 | unsigned long type = IORESOURCE_IO | IORESOURCE_MEM | | ||
325 | IORESOURCE_PREFETCH; | ||
326 | |||
327 | __pci_setup_bridge(bus, type); | ||
328 | } | ||
329 | |||
221 | /* Check whether the bridge supports optional I/O and | 330 | /* Check whether the bridge supports optional I/O and |
222 | prefetchable memory ranges. If not, the respective | 331 | prefetchable memory ranges. If not, the respective |
223 | base/limit registers must be read-only and read as 0. */ | 332 | base/limit registers must be read-only and read as 0. */ |
@@ -253,8 +362,11 @@ static void pci_bridge_check_ranges(struct pci_bus *bus) | |||
253 | } | 362 | } |
254 | if (pmem) { | 363 | if (pmem) { |
255 | b_res[2].flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH; | 364 | b_res[2].flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH; |
256 | if ((pmem & PCI_PREF_RANGE_TYPE_MASK) == PCI_PREF_RANGE_TYPE_64) | 365 | if ((pmem & PCI_PREF_RANGE_TYPE_MASK) == |
366 | PCI_PREF_RANGE_TYPE_64) { | ||
257 | b_res[2].flags |= IORESOURCE_MEM_64; | 367 | b_res[2].flags |= IORESOURCE_MEM_64; |
368 | b_res[2].flags |= PCI_PREF_RANGE_TYPE_64; | ||
369 | } | ||
258 | } | 370 | } |
259 | 371 | ||
260 | /* double check if bridge does support 64 bit pref */ | 372 | /* double check if bridge does support 64 bit pref */ |
@@ -283,8 +395,7 @@ static struct resource *find_free_bus_resource(struct pci_bus *bus, unsigned lon | |||
283 | unsigned long type_mask = IORESOURCE_IO | IORESOURCE_MEM | | 395 | unsigned long type_mask = IORESOURCE_IO | IORESOURCE_MEM | |
284 | IORESOURCE_PREFETCH; | 396 | IORESOURCE_PREFETCH; |
285 | 397 | ||
286 | for (i = 0; i < PCI_BUS_NUM_RESOURCES; i++) { | 398 | pci_bus_for_each_resource(bus, r, i) { |
287 | r = bus->resource[i]; | ||
288 | if (r == &ioport_resource || r == &iomem_resource) | 399 | if (r == &ioport_resource || r == &iomem_resource) |
289 | continue; | 400 | continue; |
290 | if (r && (r->flags & type_mask) == type && !r->parent) | 401 | if (r && (r->flags & type_mask) == type && !r->parent) |
@@ -301,7 +412,7 @@ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size) | |||
301 | { | 412 | { |
302 | struct pci_dev *dev; | 413 | struct pci_dev *dev; |
303 | struct resource *b_res = find_free_bus_resource(bus, IORESOURCE_IO); | 414 | struct resource *b_res = find_free_bus_resource(bus, IORESOURCE_IO); |
304 | unsigned long size = 0, size1 = 0; | 415 | unsigned long size = 0, size1 = 0, old_size; |
305 | 416 | ||
306 | if (!b_res) | 417 | if (!b_res) |
307 | return; | 418 | return; |
@@ -326,12 +437,17 @@ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size) | |||
326 | } | 437 | } |
327 | if (size < min_size) | 438 | if (size < min_size) |
328 | size = min_size; | 439 | size = min_size; |
440 | old_size = resource_size(b_res); | ||
441 | if (old_size == 1) | ||
442 | old_size = 0; | ||
329 | /* To be fixed in 2.5: we should have sort of HAVE_ISA | 443 | /* To be fixed in 2.5: we should have sort of HAVE_ISA |
330 | flag in the struct pci_bus. */ | 444 | flag in the struct pci_bus. */ |
331 | #if defined(CONFIG_ISA) || defined(CONFIG_EISA) | 445 | #if defined(CONFIG_ISA) || defined(CONFIG_EISA) |
332 | size = (size & 0xff) + ((size & ~0xffUL) << 2); | 446 | size = (size & 0xff) + ((size & ~0xffUL) << 2); |
333 | #endif | 447 | #endif |
334 | size = ALIGN(size + size1, 4096); | 448 | size = ALIGN(size + size1, 4096); |
449 | if (size < old_size) | ||
450 | size = old_size; | ||
335 | if (!size) { | 451 | if (!size) { |
336 | if (b_res->start || b_res->end) | 452 | if (b_res->start || b_res->end) |
337 | dev_info(&bus->self->dev, "disabling bridge window " | 453 | dev_info(&bus->self->dev, "disabling bridge window " |
@@ -352,7 +468,7 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, | |||
352 | unsigned long type, resource_size_t min_size) | 468 | unsigned long type, resource_size_t min_size) |
353 | { | 469 | { |
354 | struct pci_dev *dev; | 470 | struct pci_dev *dev; |
355 | resource_size_t min_align, align, size; | 471 | resource_size_t min_align, align, size, old_size; |
356 | resource_size_t aligns[12]; /* Alignments from 1Mb to 2Gb */ | 472 | resource_size_t aligns[12]; /* Alignments from 1Mb to 2Gb */ |
357 | int order, max_order; | 473 | int order, max_order; |
358 | struct resource *b_res = find_free_bus_resource(bus, type); | 474 | struct resource *b_res = find_free_bus_resource(bus, type); |
@@ -402,6 +518,11 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, | |||
402 | } | 518 | } |
403 | if (size < min_size) | 519 | if (size < min_size) |
404 | size = min_size; | 520 | size = min_size; |
521 | old_size = resource_size(b_res); | ||
522 | if (old_size == 1) | ||
523 | old_size = 0; | ||
524 | if (size < old_size) | ||
525 | size = old_size; | ||
405 | 526 | ||
406 | align = 0; | 527 | align = 0; |
407 | min_align = 0; | 528 | min_align = 0; |
@@ -538,23 +659,25 @@ void __ref pci_bus_size_bridges(struct pci_bus *bus) | |||
538 | } | 659 | } |
539 | EXPORT_SYMBOL(pci_bus_size_bridges); | 660 | EXPORT_SYMBOL(pci_bus_size_bridges); |
540 | 661 | ||
541 | void __ref pci_bus_assign_resources(const struct pci_bus *bus) | 662 | static void __ref __pci_bus_assign_resources(const struct pci_bus *bus, |
663 | struct resource_list_x *fail_head) | ||
542 | { | 664 | { |
543 | struct pci_bus *b; | 665 | struct pci_bus *b; |
544 | struct pci_dev *dev; | 666 | struct pci_dev *dev; |
545 | 667 | ||
546 | pbus_assign_resources_sorted(bus); | 668 | pbus_assign_resources_sorted(bus, fail_head); |
547 | 669 | ||
548 | list_for_each_entry(dev, &bus->devices, bus_list) { | 670 | list_for_each_entry(dev, &bus->devices, bus_list) { |
549 | b = dev->subordinate; | 671 | b = dev->subordinate; |
550 | if (!b) | 672 | if (!b) |
551 | continue; | 673 | continue; |
552 | 674 | ||
553 | pci_bus_assign_resources(b); | 675 | __pci_bus_assign_resources(b, fail_head); |
554 | 676 | ||
555 | switch (dev->class >> 8) { | 677 | switch (dev->class >> 8) { |
556 | case PCI_CLASS_BRIDGE_PCI: | 678 | case PCI_CLASS_BRIDGE_PCI: |
557 | pci_setup_bridge(b); | 679 | if (!pci_is_enabled(dev)) |
680 | pci_setup_bridge(b); | ||
558 | break; | 681 | break; |
559 | 682 | ||
560 | case PCI_CLASS_BRIDGE_CARDBUS: | 683 | case PCI_CLASS_BRIDGE_CARDBUS: |
@@ -568,15 +691,130 @@ void __ref pci_bus_assign_resources(const struct pci_bus *bus) | |||
568 | } | 691 | } |
569 | } | 692 | } |
570 | } | 693 | } |
694 | |||
695 | void __ref pci_bus_assign_resources(const struct pci_bus *bus) | ||
696 | { | ||
697 | __pci_bus_assign_resources(bus, NULL); | ||
698 | } | ||
571 | EXPORT_SYMBOL(pci_bus_assign_resources); | 699 | EXPORT_SYMBOL(pci_bus_assign_resources); |
572 | 700 | ||
701 | static void __ref __pci_bridge_assign_resources(const struct pci_dev *bridge, | ||
702 | struct resource_list_x *fail_head) | ||
703 | { | ||
704 | struct pci_bus *b; | ||
705 | |||
706 | pdev_assign_resources_sorted((struct pci_dev *)bridge, fail_head); | ||
707 | |||
708 | b = bridge->subordinate; | ||
709 | if (!b) | ||
710 | return; | ||
711 | |||
712 | __pci_bus_assign_resources(b, fail_head); | ||
713 | |||
714 | switch (bridge->class >> 8) { | ||
715 | case PCI_CLASS_BRIDGE_PCI: | ||
716 | pci_setup_bridge(b); | ||
717 | break; | ||
718 | |||
719 | case PCI_CLASS_BRIDGE_CARDBUS: | ||
720 | pci_setup_cardbus(b); | ||
721 | break; | ||
722 | |||
723 | default: | ||
724 | dev_info(&bridge->dev, "not setting up bridge for bus " | ||
725 | "%04x:%02x\n", pci_domain_nr(b), b->number); | ||
726 | break; | ||
727 | } | ||
728 | } | ||
729 | static void pci_bridge_release_resources(struct pci_bus *bus, | ||
730 | unsigned long type) | ||
731 | { | ||
732 | int idx; | ||
733 | bool changed = false; | ||
734 | struct pci_dev *dev; | ||
735 | struct resource *r; | ||
736 | unsigned long type_mask = IORESOURCE_IO | IORESOURCE_MEM | | ||
737 | IORESOURCE_PREFETCH; | ||
738 | |||
739 | dev = bus->self; | ||
740 | for (idx = PCI_BRIDGE_RESOURCES; idx <= PCI_BRIDGE_RESOURCE_END; | ||
741 | idx++) { | ||
742 | r = &dev->resource[idx]; | ||
743 | if ((r->flags & type_mask) != type) | ||
744 | continue; | ||
745 | if (!r->parent) | ||
746 | continue; | ||
747 | /* | ||
748 | * if there are children under that, we should release them | ||
749 | * all | ||
750 | */ | ||
751 | release_child_resources(r); | ||
752 | if (!release_resource(r)) { | ||
753 | dev_printk(KERN_DEBUG, &dev->dev, | ||
754 | "resource %d %pR released\n", idx, r); | ||
755 | /* keep the old size */ | ||
756 | r->end = resource_size(r) - 1; | ||
757 | r->start = 0; | ||
758 | r->flags = 0; | ||
759 | changed = true; | ||
760 | } | ||
761 | } | ||
762 | |||
763 | if (changed) { | ||
764 | /* avoiding touch the one without PREF */ | ||
765 | if (type & IORESOURCE_PREFETCH) | ||
766 | type = IORESOURCE_PREFETCH; | ||
767 | __pci_setup_bridge(bus, type); | ||
768 | } | ||
769 | } | ||
770 | |||
771 | enum release_type { | ||
772 | leaf_only, | ||
773 | whole_subtree, | ||
774 | }; | ||
775 | /* | ||
776 | * try to release pci bridge resources that is from leaf bridge, | ||
777 | * so we can allocate big new one later | ||
778 | */ | ||
779 | static void __ref pci_bus_release_bridge_resources(struct pci_bus *bus, | ||
780 | unsigned long type, | ||
781 | enum release_type rel_type) | ||
782 | { | ||
783 | struct pci_dev *dev; | ||
784 | bool is_leaf_bridge = true; | ||
785 | |||
786 | list_for_each_entry(dev, &bus->devices, bus_list) { | ||
787 | struct pci_bus *b = dev->subordinate; | ||
788 | if (!b) | ||
789 | continue; | ||
790 | |||
791 | is_leaf_bridge = false; | ||
792 | |||
793 | if ((dev->class >> 8) != PCI_CLASS_BRIDGE_PCI) | ||
794 | continue; | ||
795 | |||
796 | if (rel_type == whole_subtree) | ||
797 | pci_bus_release_bridge_resources(b, type, | ||
798 | whole_subtree); | ||
799 | } | ||
800 | |||
801 | if (pci_is_root_bus(bus)) | ||
802 | return; | ||
803 | |||
804 | if ((bus->self->class >> 8) != PCI_CLASS_BRIDGE_PCI) | ||
805 | return; | ||
806 | |||
807 | if ((rel_type == whole_subtree) || is_leaf_bridge) | ||
808 | pci_bridge_release_resources(bus, type); | ||
809 | } | ||
810 | |||
573 | static void pci_bus_dump_res(struct pci_bus *bus) | 811 | static void pci_bus_dump_res(struct pci_bus *bus) |
574 | { | 812 | { |
575 | int i; | 813 | struct resource *res; |
814 | int i; | ||
576 | 815 | ||
577 | for (i = 0; i < PCI_BUS_NUM_RESOURCES; i++) { | 816 | pci_bus_for_each_resource(bus, res, i) { |
578 | struct resource *res = bus->resource[i]; | 817 | if (!res || !res->end || !res->flags) |
579 | if (!res || !res->end) | ||
580 | continue; | 818 | continue; |
581 | 819 | ||
582 | dev_printk(KERN_DEBUG, &bus->dev, "resource %d %pR\n", i, res); | 820 | dev_printk(KERN_DEBUG, &bus->dev, "resource %d %pR\n", i, res); |
@@ -600,11 +838,65 @@ static void pci_bus_dump_resources(struct pci_bus *bus) | |||
600 | } | 838 | } |
601 | } | 839 | } |
602 | 840 | ||
841 | static int __init pci_bus_get_depth(struct pci_bus *bus) | ||
842 | { | ||
843 | int depth = 0; | ||
844 | struct pci_dev *dev; | ||
845 | |||
846 | list_for_each_entry(dev, &bus->devices, bus_list) { | ||
847 | int ret; | ||
848 | struct pci_bus *b = dev->subordinate; | ||
849 | if (!b) | ||
850 | continue; | ||
851 | |||
852 | ret = pci_bus_get_depth(b); | ||
853 | if (ret + 1 > depth) | ||
854 | depth = ret + 1; | ||
855 | } | ||
856 | |||
857 | return depth; | ||
858 | } | ||
859 | static int __init pci_get_max_depth(void) | ||
860 | { | ||
861 | int depth = 0; | ||
862 | struct pci_bus *bus; | ||
863 | |||
864 | list_for_each_entry(bus, &pci_root_buses, node) { | ||
865 | int ret; | ||
866 | |||
867 | ret = pci_bus_get_depth(bus); | ||
868 | if (ret > depth) | ||
869 | depth = ret; | ||
870 | } | ||
871 | |||
872 | return depth; | ||
873 | } | ||
874 | |||
875 | /* | ||
876 | * first try will not touch pci bridge res | ||
877 | * second and later try will clear small leaf bridge res | ||
878 | * will stop till to the max deepth if can not find good one | ||
879 | */ | ||
603 | void __init | 880 | void __init |
604 | pci_assign_unassigned_resources(void) | 881 | pci_assign_unassigned_resources(void) |
605 | { | 882 | { |
606 | struct pci_bus *bus; | 883 | struct pci_bus *bus; |
884 | int tried_times = 0; | ||
885 | enum release_type rel_type = leaf_only; | ||
886 | struct resource_list_x head, *list; | ||
887 | unsigned long type_mask = IORESOURCE_IO | IORESOURCE_MEM | | ||
888 | IORESOURCE_PREFETCH; | ||
889 | unsigned long failed_type; | ||
890 | int max_depth = pci_get_max_depth(); | ||
891 | int pci_try_num; | ||
892 | |||
893 | head.next = NULL; | ||
894 | |||
895 | pci_try_num = max_depth + 1; | ||
896 | printk(KERN_DEBUG "PCI: max bus depth: %d pci_try_num: %d\n", | ||
897 | max_depth, pci_try_num); | ||
607 | 898 | ||
899 | again: | ||
608 | /* Depth first, calculate sizes and alignments of all | 900 | /* Depth first, calculate sizes and alignments of all |
609 | subordinate buses. */ | 901 | subordinate buses. */ |
610 | list_for_each_entry(bus, &pci_root_buses, node) { | 902 | list_for_each_entry(bus, &pci_root_buses, node) { |
@@ -612,12 +904,130 @@ pci_assign_unassigned_resources(void) | |||
612 | } | 904 | } |
613 | /* Depth last, allocate resources and update the hardware. */ | 905 | /* Depth last, allocate resources and update the hardware. */ |
614 | list_for_each_entry(bus, &pci_root_buses, node) { | 906 | list_for_each_entry(bus, &pci_root_buses, node) { |
615 | pci_bus_assign_resources(bus); | 907 | __pci_bus_assign_resources(bus, &head); |
616 | pci_enable_bridges(bus); | ||
617 | } | 908 | } |
909 | tried_times++; | ||
910 | |||
911 | /* any device complain? */ | ||
912 | if (!head.next) | ||
913 | goto enable_and_dump; | ||
914 | failed_type = 0; | ||
915 | for (list = head.next; list;) { | ||
916 | failed_type |= list->flags; | ||
917 | list = list->next; | ||
918 | } | ||
919 | /* | ||
920 | * io port are tight, don't try extra | ||
921 | * or if reach the limit, don't want to try more | ||
922 | */ | ||
923 | failed_type &= type_mask; | ||
924 | if ((failed_type == IORESOURCE_IO) || (tried_times >= pci_try_num)) { | ||
925 | free_failed_list(&head); | ||
926 | goto enable_and_dump; | ||
927 | } | ||
928 | |||
929 | printk(KERN_DEBUG "PCI: No. %d try to assign unassigned res\n", | ||
930 | tried_times + 1); | ||
931 | |||
932 | /* third times and later will not check if it is leaf */ | ||
933 | if ((tried_times + 1) > 2) | ||
934 | rel_type = whole_subtree; | ||
935 | |||
936 | /* | ||
937 | * Try to release leaf bridge's resources that doesn't fit resource of | ||
938 | * child device under that bridge | ||
939 | */ | ||
940 | for (list = head.next; list;) { | ||
941 | bus = list->dev->bus; | ||
942 | pci_bus_release_bridge_resources(bus, list->flags & type_mask, | ||
943 | rel_type); | ||
944 | list = list->next; | ||
945 | } | ||
946 | /* restore size and flags */ | ||
947 | for (list = head.next; list;) { | ||
948 | struct resource *res = list->res; | ||
949 | |||
950 | res->start = list->start; | ||
951 | res->end = list->end; | ||
952 | res->flags = list->flags; | ||
953 | if (list->dev->subordinate) | ||
954 | res->flags = 0; | ||
955 | |||
956 | list = list->next; | ||
957 | } | ||
958 | free_failed_list(&head); | ||
959 | |||
960 | goto again; | ||
961 | |||
962 | enable_and_dump: | ||
963 | /* Depth last, update the hardware. */ | ||
964 | list_for_each_entry(bus, &pci_root_buses, node) | ||
965 | pci_enable_bridges(bus); | ||
618 | 966 | ||
619 | /* dump the resource on buses */ | 967 | /* dump the resource on buses */ |
620 | list_for_each_entry(bus, &pci_root_buses, node) { | 968 | list_for_each_entry(bus, &pci_root_buses, node) { |
621 | pci_bus_dump_resources(bus); | 969 | pci_bus_dump_resources(bus); |
622 | } | 970 | } |
623 | } | 971 | } |
972 | |||
973 | void pci_assign_unassigned_bridge_resources(struct pci_dev *bridge) | ||
974 | { | ||
975 | struct pci_bus *parent = bridge->subordinate; | ||
976 | int tried_times = 0; | ||
977 | struct resource_list_x head, *list; | ||
978 | int retval; | ||
979 | unsigned long type_mask = IORESOURCE_IO | IORESOURCE_MEM | | ||
980 | IORESOURCE_PREFETCH; | ||
981 | |||
982 | head.next = NULL; | ||
983 | |||
984 | again: | ||
985 | pci_bus_size_bridges(parent); | ||
986 | __pci_bridge_assign_resources(bridge, &head); | ||
987 | retval = pci_reenable_device(bridge); | ||
988 | pci_set_master(bridge); | ||
989 | pci_enable_bridges(parent); | ||
990 | |||
991 | tried_times++; | ||
992 | |||
993 | if (!head.next) | ||
994 | return; | ||
995 | |||
996 | if (tried_times >= 2) { | ||
997 | /* still fail, don't need to try more */ | ||
998 | free_failed_list(&head); | ||
999 | return; | ||
1000 | } | ||
1001 | |||
1002 | printk(KERN_DEBUG "PCI: No. %d try to assign unassigned res\n", | ||
1003 | tried_times + 1); | ||
1004 | |||
1005 | /* | ||
1006 | * Try to release leaf bridge's resources that doesn't fit resource of | ||
1007 | * child device under that bridge | ||
1008 | */ | ||
1009 | for (list = head.next; list;) { | ||
1010 | struct pci_bus *bus = list->dev->bus; | ||
1011 | unsigned long flags = list->flags; | ||
1012 | |||
1013 | pci_bus_release_bridge_resources(bus, flags & type_mask, | ||
1014 | whole_subtree); | ||
1015 | list = list->next; | ||
1016 | } | ||
1017 | /* restore size and flags */ | ||
1018 | for (list = head.next; list;) { | ||
1019 | struct resource *res = list->res; | ||
1020 | |||
1021 | res->start = list->start; | ||
1022 | res->end = list->end; | ||
1023 | res->flags = list->flags; | ||
1024 | if (list->dev->subordinate) | ||
1025 | res->flags = 0; | ||
1026 | |||
1027 | list = list->next; | ||
1028 | } | ||
1029 | free_failed_list(&head); | ||
1030 | |||
1031 | goto again; | ||
1032 | } | ||
1033 | EXPORT_SYMBOL_GPL(pci_assign_unassigned_bridge_resources); | ||