diff options
author | Gavin Shan <shangw@linux.vnet.ibm.com> | 2012-08-19 23:49:21 -0400 |
---|---|---|
committer | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2012-09-17 02:35:16 -0400 |
commit | b9ae38aeca2fddbabe2942bee431873a05d21d74 (patch) | |
tree | b6e6edd7aa2f3a79e1b4d3c46a734a2925e719c0 /arch/powerpc | |
parent | c40a4210a4b55284a71ed52721d9894b5bdb1953 (diff) |
powerpc/powernv: Remove unused functions
We don't need them anymore. The patch removes those functions.
Signed-off-by: Gavin Shan <shangw@linux.vnet.ibm.com>
Reviewed-by: Ram Pai <linuxram@us.ibm.com>
Reviewed-by: Richard Yang <weiyang@linux.vnet.ibm.com>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc')
-rw-r--r-- | arch/powerpc/platforms/powernv/pci-ioda.c | 441 |
1 files changed, 0 insertions, 441 deletions
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c index d65f84c87fbe..471aa3ccd9fd 100644 --- a/arch/powerpc/platforms/powernv/pci-ioda.c +++ b/arch/powerpc/platforms/powernv/pci-ioda.c | |||
@@ -34,14 +34,6 @@ | |||
34 | #include "powernv.h" | 34 | #include "powernv.h" |
35 | #include "pci.h" | 35 | #include "pci.h" |
36 | 36 | ||
37 | struct resource_wrap { | ||
38 | struct list_head link; | ||
39 | resource_size_t size; | ||
40 | resource_size_t align; | ||
41 | struct pci_dev *dev; /* Set if it's a device */ | ||
42 | struct pci_bus *bus; /* Set if it's a bridge */ | ||
43 | }; | ||
44 | |||
45 | static int __pe_printk(const char *level, const struct pnv_ioda_pe *pe, | 37 | static int __pe_printk(const char *level, const struct pnv_ioda_pe *pe, |
46 | struct va_format *vaf) | 38 | struct va_format *vaf) |
47 | { | 39 | { |
@@ -77,273 +69,6 @@ define_pe_printk_level(pe_err, KERN_ERR); | |||
77 | define_pe_printk_level(pe_warn, KERN_WARNING); | 69 | define_pe_printk_level(pe_warn, KERN_WARNING); |
78 | define_pe_printk_level(pe_info, KERN_INFO); | 70 | define_pe_printk_level(pe_info, KERN_INFO); |
79 | 71 | ||
80 | |||
81 | /* Calculate resource usage & alignment requirement of a single | ||
82 | * device. This will also assign all resources within the device | ||
83 | * for a given type starting at 0 for the biggest one and then | ||
84 | * assigning in decreasing order of size. | ||
85 | */ | ||
86 | static void __devinit pnv_ioda_calc_dev(struct pci_dev *dev, unsigned int flags, | ||
87 | resource_size_t *size, | ||
88 | resource_size_t *align) | ||
89 | { | ||
90 | resource_size_t start; | ||
91 | struct resource *r; | ||
92 | int i; | ||
93 | |||
94 | pr_devel(" -> CDR %s\n", pci_name(dev)); | ||
95 | |||
96 | *size = *align = 0; | ||
97 | |||
98 | /* Clear the resources out and mark them all unset */ | ||
99 | for (i = 0; i <= PCI_ROM_RESOURCE; i++) { | ||
100 | r = &dev->resource[i]; | ||
101 | if (!(r->flags & flags)) | ||
102 | continue; | ||
103 | if (r->start) { | ||
104 | r->end -= r->start; | ||
105 | r->start = 0; | ||
106 | } | ||
107 | r->flags |= IORESOURCE_UNSET; | ||
108 | } | ||
109 | |||
110 | /* We currently keep all memory resources together, we | ||
111 | * will handle prefetch & 64-bit separately in the future | ||
112 | * but for now we stick everybody in M32 | ||
113 | */ | ||
114 | start = 0; | ||
115 | for (;;) { | ||
116 | resource_size_t max_size = 0; | ||
117 | int max_no = -1; | ||
118 | |||
119 | /* Find next biggest resource */ | ||
120 | for (i = 0; i <= PCI_ROM_RESOURCE; i++) { | ||
121 | r = &dev->resource[i]; | ||
122 | if (!(r->flags & IORESOURCE_UNSET) || | ||
123 | !(r->flags & flags)) | ||
124 | continue; | ||
125 | if (resource_size(r) > max_size) { | ||
126 | max_size = resource_size(r); | ||
127 | max_no = i; | ||
128 | } | ||
129 | } | ||
130 | if (max_no < 0) | ||
131 | break; | ||
132 | r = &dev->resource[max_no]; | ||
133 | if (max_size > *align) | ||
134 | *align = max_size; | ||
135 | *size += max_size; | ||
136 | r->start = start; | ||
137 | start += max_size; | ||
138 | r->end = r->start + max_size - 1; | ||
139 | r->flags &= ~IORESOURCE_UNSET; | ||
140 | pr_devel(" -> R%d %016llx..%016llx\n", | ||
141 | max_no, r->start, r->end); | ||
142 | } | ||
143 | pr_devel(" <- CDR %s size=%llx align=%llx\n", | ||
144 | pci_name(dev), *size, *align); | ||
145 | } | ||
146 | |||
147 | /* Allocate a resource "wrap" for a given device or bridge and | ||
148 | * insert it at the right position in the sorted list | ||
149 | */ | ||
150 | static void __devinit pnv_ioda_add_wrap(struct list_head *list, | ||
151 | struct pci_bus *bus, | ||
152 | struct pci_dev *dev, | ||
153 | resource_size_t size, | ||
154 | resource_size_t align) | ||
155 | { | ||
156 | struct resource_wrap *w1, *w = kzalloc(sizeof(*w), GFP_KERNEL); | ||
157 | |||
158 | w->size = size; | ||
159 | w->align = align; | ||
160 | w->dev = dev; | ||
161 | w->bus = bus; | ||
162 | |||
163 | list_for_each_entry(w1, list, link) { | ||
164 | if (w1->align < align) { | ||
165 | list_add_tail(&w->link, &w1->link); | ||
166 | return; | ||
167 | } | ||
168 | } | ||
169 | list_add_tail(&w->link, list); | ||
170 | } | ||
171 | |||
172 | /* Offset device resources of a given type */ | ||
173 | static void __devinit pnv_ioda_offset_dev(struct pci_dev *dev, | ||
174 | unsigned int flags, | ||
175 | resource_size_t offset) | ||
176 | { | ||
177 | struct resource *r; | ||
178 | int i; | ||
179 | |||
180 | pr_devel(" -> ODR %s [%x] +%016llx\n", pci_name(dev), flags, offset); | ||
181 | |||
182 | for (i = 0; i <= PCI_ROM_RESOURCE; i++) { | ||
183 | r = &dev->resource[i]; | ||
184 | if (r->flags & flags) { | ||
185 | dev->resource[i].start += offset; | ||
186 | dev->resource[i].end += offset; | ||
187 | } | ||
188 | } | ||
189 | |||
190 | pr_devel(" <- ODR %s [%x] +%016llx\n", pci_name(dev), flags, offset); | ||
191 | } | ||
192 | |||
193 | /* Offset bus resources (& all children) of a given type */ | ||
194 | static void __devinit pnv_ioda_offset_bus(struct pci_bus *bus, | ||
195 | unsigned int flags, | ||
196 | resource_size_t offset) | ||
197 | { | ||
198 | struct resource *r; | ||
199 | struct pci_dev *dev; | ||
200 | struct pci_bus *cbus; | ||
201 | int i; | ||
202 | |||
203 | pr_devel(" -> OBR %s [%x] +%016llx\n", | ||
204 | bus->self ? pci_name(bus->self) : "root", flags, offset); | ||
205 | |||
206 | pci_bus_for_each_resource(bus, r, i) { | ||
207 | if (r && (r->flags & flags)) { | ||
208 | r->start += offset; | ||
209 | r->end += offset; | ||
210 | } | ||
211 | } | ||
212 | list_for_each_entry(dev, &bus->devices, bus_list) | ||
213 | pnv_ioda_offset_dev(dev, flags, offset); | ||
214 | list_for_each_entry(cbus, &bus->children, node) | ||
215 | pnv_ioda_offset_bus(cbus, flags, offset); | ||
216 | |||
217 | pr_devel(" <- OBR %s [%x]\n", | ||
218 | bus->self ? pci_name(bus->self) : "root", flags); | ||
219 | } | ||
220 | |||
221 | /* This is the guts of our IODA resource allocation. This is called | ||
222 | * recursively for each bus in the system. It calculates all the | ||
223 | * necessary size and requirements for children and assign them | ||
224 | * resources such that: | ||
225 | * | ||
226 | * - Each function fits in it's own contiguous set of IO/M32 | ||
227 | * segment | ||
228 | * | ||
229 | * - All segments behind a P2P bridge are contiguous and obey | ||
230 | * alignment constraints of those bridges | ||
231 | */ | ||
232 | static void __devinit pnv_ioda_calc_bus(struct pci_bus *bus, unsigned int flags, | ||
233 | resource_size_t *size, | ||
234 | resource_size_t *align) | ||
235 | { | ||
236 | struct pci_controller *hose = pci_bus_to_host(bus); | ||
237 | struct pnv_phb *phb = hose->private_data; | ||
238 | resource_size_t dev_size, dev_align, start; | ||
239 | resource_size_t min_align, min_balign; | ||
240 | struct pci_dev *cdev; | ||
241 | struct pci_bus *cbus; | ||
242 | struct list_head head; | ||
243 | struct resource_wrap *w; | ||
244 | unsigned int bres; | ||
245 | |||
246 | *size = *align = 0; | ||
247 | |||
248 | pr_devel("-> CBR %s [%x]\n", | ||
249 | bus->self ? pci_name(bus->self) : "root", flags); | ||
250 | |||
251 | /* Calculate alignment requirements based on the type | ||
252 | * of resource we are working on | ||
253 | */ | ||
254 | if (flags & IORESOURCE_IO) { | ||
255 | bres = 0; | ||
256 | min_align = phb->ioda.io_segsize; | ||
257 | min_balign = 0x1000; | ||
258 | } else { | ||
259 | bres = 1; | ||
260 | min_align = phb->ioda.m32_segsize; | ||
261 | min_balign = 0x100000; | ||
262 | } | ||
263 | |||
264 | /* Gather all our children resources ordered by alignment */ | ||
265 | INIT_LIST_HEAD(&head); | ||
266 | |||
267 | /* - Busses */ | ||
268 | list_for_each_entry(cbus, &bus->children, node) { | ||
269 | pnv_ioda_calc_bus(cbus, flags, &dev_size, &dev_align); | ||
270 | pnv_ioda_add_wrap(&head, cbus, NULL, dev_size, dev_align); | ||
271 | } | ||
272 | |||
273 | /* - Devices */ | ||
274 | list_for_each_entry(cdev, &bus->devices, bus_list) { | ||
275 | pnv_ioda_calc_dev(cdev, flags, &dev_size, &dev_align); | ||
276 | /* Align them to segment size */ | ||
277 | if (dev_align < min_align) | ||
278 | dev_align = min_align; | ||
279 | pnv_ioda_add_wrap(&head, NULL, cdev, dev_size, dev_align); | ||
280 | } | ||
281 | if (list_empty(&head)) | ||
282 | goto empty; | ||
283 | |||
284 | /* Now we can do two things: assign offsets to them within that | ||
285 | * level and get our total alignment & size requirements. The | ||
286 | * assignment algorithm is going to be uber-trivial for now, we | ||
287 | * can try to be smarter later at filling out holes. | ||
288 | */ | ||
289 | if (bus->self) { | ||
290 | /* No offset for downstream bridges */ | ||
291 | start = 0; | ||
292 | } else { | ||
293 | /* Offset from the root */ | ||
294 | if (flags & IORESOURCE_IO) | ||
295 | /* Don't hand out IO 0 */ | ||
296 | start = hose->io_resource.start + 0x1000; | ||
297 | else | ||
298 | start = hose->mem_resources[0].start; | ||
299 | } | ||
300 | while(!list_empty(&head)) { | ||
301 | w = list_first_entry(&head, struct resource_wrap, link); | ||
302 | list_del(&w->link); | ||
303 | if (w->size) { | ||
304 | if (start) { | ||
305 | start = ALIGN(start, w->align); | ||
306 | if (w->dev) | ||
307 | pnv_ioda_offset_dev(w->dev,flags,start); | ||
308 | else if (w->bus) | ||
309 | pnv_ioda_offset_bus(w->bus,flags,start); | ||
310 | } | ||
311 | if (w->align > *align) | ||
312 | *align = w->align; | ||
313 | } | ||
314 | start += w->size; | ||
315 | kfree(w); | ||
316 | } | ||
317 | *size = start; | ||
318 | |||
319 | /* Align and setup bridge resources */ | ||
320 | *align = max_t(resource_size_t, *align, | ||
321 | max_t(resource_size_t, min_align, min_balign)); | ||
322 | *size = ALIGN(*size, | ||
323 | max_t(resource_size_t, min_align, min_balign)); | ||
324 | empty: | ||
325 | /* Only setup P2P's, not the PHB itself */ | ||
326 | if (bus->self) { | ||
327 | struct resource *res = bus->resource[bres]; | ||
328 | |||
329 | if (WARN_ON(res == NULL)) | ||
330 | return; | ||
331 | |||
332 | /* | ||
333 | * FIXME: We should probably export and call | ||
334 | * pci_bridge_check_ranges() to properly re-initialize | ||
335 | * the PCI portion of the flags here, and to detect | ||
336 | * what the bridge actually supports. | ||
337 | */ | ||
338 | res->start = 0; | ||
339 | res->flags = (*size) ? flags : 0; | ||
340 | res->end = (*size) ? (*size - 1) : 0; | ||
341 | } | ||
342 | |||
343 | pr_devel("<- CBR %s [%x] *size=%016llx *align=%016llx\n", | ||
344 | bus->self ? pci_name(bus->self) : "root", flags,*size,*align); | ||
345 | } | ||
346 | |||
347 | static struct pci_dn *pnv_ioda_get_pdn(struct pci_dev *dev) | 72 | static struct pci_dn *pnv_ioda_get_pdn(struct pci_dev *dev) |
348 | { | 73 | { |
349 | struct device_node *np; | 74 | struct device_node *np; |
@@ -354,172 +79,6 @@ static struct pci_dn *pnv_ioda_get_pdn(struct pci_dev *dev) | |||
354 | return PCI_DN(np); | 79 | return PCI_DN(np); |
355 | } | 80 | } |
356 | 81 | ||
357 | static void __devinit pnv_ioda_setup_pe_segments(struct pci_dev *dev) | ||
358 | { | ||
359 | struct pci_controller *hose = pci_bus_to_host(dev->bus); | ||
360 | struct pnv_phb *phb = hose->private_data; | ||
361 | struct pci_dn *pdn = pnv_ioda_get_pdn(dev); | ||
362 | unsigned int pe, i; | ||
363 | resource_size_t pos; | ||
364 | struct resource io_res; | ||
365 | struct resource m32_res; | ||
366 | struct pci_bus_region region; | ||
367 | int rc; | ||
368 | |||
369 | /* Anything not referenced in the device-tree gets PE#0 */ | ||
370 | pe = pdn ? pdn->pe_number : 0; | ||
371 | |||
372 | /* Calculate the device min/max */ | ||
373 | io_res.start = m32_res.start = (resource_size_t)-1; | ||
374 | io_res.end = m32_res.end = 0; | ||
375 | io_res.flags = IORESOURCE_IO; | ||
376 | m32_res.flags = IORESOURCE_MEM; | ||
377 | |||
378 | for (i = 0; i <= PCI_ROM_RESOURCE; i++) { | ||
379 | struct resource *r = NULL; | ||
380 | if (dev->resource[i].flags & IORESOURCE_IO) | ||
381 | r = &io_res; | ||
382 | if (dev->resource[i].flags & IORESOURCE_MEM) | ||
383 | r = &m32_res; | ||
384 | if (!r) | ||
385 | continue; | ||
386 | if (dev->resource[i].start < r->start) | ||
387 | r->start = dev->resource[i].start; | ||
388 | if (dev->resource[i].end > r->end) | ||
389 | r->end = dev->resource[i].end; | ||
390 | } | ||
391 | |||
392 | /* Setup IO segments */ | ||
393 | if (io_res.start < io_res.end) { | ||
394 | pcibios_resource_to_bus(dev, ®ion, &io_res); | ||
395 | pos = region.start; | ||
396 | i = pos / phb->ioda.io_segsize; | ||
397 | while(i < phb->ioda.total_pe && pos <= region.end) { | ||
398 | if (phb->ioda.io_segmap[i]) { | ||
399 | pr_err("%s: Trying to use IO seg #%d which is" | ||
400 | " already used by PE# %d\n", | ||
401 | pci_name(dev), i, | ||
402 | phb->ioda.io_segmap[i]); | ||
403 | /* XXX DO SOMETHING TO DISABLE DEVICE ? */ | ||
404 | break; | ||
405 | } | ||
406 | phb->ioda.io_segmap[i] = pe; | ||
407 | rc = opal_pci_map_pe_mmio_window(phb->opal_id, pe, | ||
408 | OPAL_IO_WINDOW_TYPE, | ||
409 | 0, i); | ||
410 | if (rc != OPAL_SUCCESS) { | ||
411 | pr_err("%s: OPAL error %d setting up mapping" | ||
412 | " for IO seg# %d\n", | ||
413 | pci_name(dev), rc, i); | ||
414 | /* XXX DO SOMETHING TO DISABLE DEVICE ? */ | ||
415 | break; | ||
416 | } | ||
417 | pos += phb->ioda.io_segsize; | ||
418 | i++; | ||
419 | }; | ||
420 | } | ||
421 | |||
422 | /* Setup M32 segments */ | ||
423 | if (m32_res.start < m32_res.end) { | ||
424 | pcibios_resource_to_bus(dev, ®ion, &m32_res); | ||
425 | pos = region.start; | ||
426 | i = pos / phb->ioda.m32_segsize; | ||
427 | while(i < phb->ioda.total_pe && pos <= region.end) { | ||
428 | if (phb->ioda.m32_segmap[i]) { | ||
429 | pr_err("%s: Trying to use M32 seg #%d which is" | ||
430 | " already used by PE# %d\n", | ||
431 | pci_name(dev), i, | ||
432 | phb->ioda.m32_segmap[i]); | ||
433 | /* XXX DO SOMETHING TO DISABLE DEVICE ? */ | ||
434 | break; | ||
435 | } | ||
436 | phb->ioda.m32_segmap[i] = pe; | ||
437 | rc = opal_pci_map_pe_mmio_window(phb->opal_id, pe, | ||
438 | OPAL_M32_WINDOW_TYPE, | ||
439 | 0, i); | ||
440 | if (rc != OPAL_SUCCESS) { | ||
441 | pr_err("%s: OPAL error %d setting up mapping" | ||
442 | " for M32 seg# %d\n", | ||
443 | pci_name(dev), rc, i); | ||
444 | /* XXX DO SOMETHING TO DISABLE DEVICE ? */ | ||
445 | break; | ||
446 | } | ||
447 | pos += phb->ioda.m32_segsize; | ||
448 | i++; | ||
449 | } | ||
450 | } | ||
451 | } | ||
452 | |||
453 | /* Check if a resource still fits in the total IO or M32 range | ||
454 | * for a given PHB | ||
455 | */ | ||
456 | static int __devinit pnv_ioda_resource_fit(struct pci_controller *hose, | ||
457 | struct resource *r) | ||
458 | { | ||
459 | struct resource *bounds; | ||
460 | |||
461 | if (r->flags & IORESOURCE_IO) | ||
462 | bounds = &hose->io_resource; | ||
463 | else if (r->flags & IORESOURCE_MEM) | ||
464 | bounds = &hose->mem_resources[0]; | ||
465 | else | ||
466 | return 1; | ||
467 | |||
468 | if (r->start >= bounds->start && r->end <= bounds->end) | ||
469 | return 1; | ||
470 | r->flags = 0; | ||
471 | return 0; | ||
472 | } | ||
473 | |||
474 | static void __devinit pnv_ioda_update_resources(struct pci_bus *bus) | ||
475 | { | ||
476 | struct pci_controller *hose = pci_bus_to_host(bus); | ||
477 | struct pci_bus *cbus; | ||
478 | struct pci_dev *cdev; | ||
479 | unsigned int i; | ||
480 | |||
481 | /* We used to clear all device enables here. However it looks like | ||
482 | * clearing MEM enable causes Obsidian (IPR SCS) to go bonkers, | ||
483 | * and shoot fatal errors to the PHB which in turns fences itself | ||
484 | * and we can't recover from that ... yet. So for now, let's leave | ||
485 | * the enables as-is and hope for the best. | ||
486 | */ | ||
487 | |||
488 | /* Check if bus resources fit in our IO or M32 range */ | ||
489 | for (i = 0; bus->self && (i < 2); i++) { | ||
490 | struct resource *r = bus->resource[i]; | ||
491 | if (r && !pnv_ioda_resource_fit(hose, r)) | ||
492 | pr_err("%s: Bus %d resource %d disabled, no room\n", | ||
493 | pci_name(bus->self), bus->number, i); | ||
494 | } | ||
495 | |||
496 | /* Update self if it's not a PHB */ | ||
497 | if (bus->self) | ||
498 | pci_setup_bridge(bus); | ||
499 | |||
500 | /* Update child devices */ | ||
501 | list_for_each_entry(cdev, &bus->devices, bus_list) { | ||
502 | /* Check if resource fits, if not, disabled it */ | ||
503 | for (i = 0; i <= PCI_ROM_RESOURCE; i++) { | ||
504 | struct resource *r = &cdev->resource[i]; | ||
505 | if (!pnv_ioda_resource_fit(hose, r)) | ||
506 | pr_err("%s: Resource %d disabled, no room\n", | ||
507 | pci_name(cdev), i); | ||
508 | } | ||
509 | |||
510 | /* Assign segments */ | ||
511 | pnv_ioda_setup_pe_segments(cdev); | ||
512 | |||
513 | /* Update HW BARs */ | ||
514 | for (i = 0; i <= PCI_ROM_RESOURCE; i++) | ||
515 | pci_update_resource(cdev, i); | ||
516 | } | ||
517 | |||
518 | /* Update child busses */ | ||
519 | list_for_each_entry(cbus, &bus->children, node) | ||
520 | pnv_ioda_update_resources(cbus); | ||
521 | } | ||
522 | |||
523 | static int __devinit pnv_ioda_alloc_pe(struct pnv_phb *phb) | 82 | static int __devinit pnv_ioda_alloc_pe(struct pnv_phb *phb) |
524 | { | 83 | { |
525 | unsigned long pe; | 84 | unsigned long pe; |