diff options
-rw-r--r-- | arch/microblaze/pci/pci-common.c | 1640 | ||||
-rw-r--r-- | arch/microblaze/pci/pci_32.c | 430 |
2 files changed, 2070 insertions, 0 deletions
diff --git a/arch/microblaze/pci/pci-common.c b/arch/microblaze/pci/pci-common.c new file mode 100644 index 000000000000..f03f8be2740a --- /dev/null +++ b/arch/microblaze/pci/pci-common.c | |||
@@ -0,0 +1,1640 @@ | |||
1 | /* | ||
2 | * Contains common pci routines for ALL ppc platform | ||
3 | * (based on pci_32.c and pci_64.c) | ||
4 | * | ||
5 | * Port for PPC64 David Engebretsen, IBM Corp. | ||
6 | * Contains common pci routines for ppc64 platform, pSeries and iSeries brands. | ||
7 | * | ||
8 | * Copyright (C) 2003 Anton Blanchard <anton@au.ibm.com>, IBM | ||
9 | * Rework, based on alpha PCI code. | ||
10 | * | ||
11 | * Common pmac/prep/chrp pci routines. -- Cort | ||
12 | * | ||
13 | * This program is free software; you can redistribute it and/or | ||
14 | * modify it under the terms of the GNU General Public License | ||
15 | * as published by the Free Software Foundation; either version | ||
16 | * 2 of the License, or (at your option) any later version. | ||
17 | */ | ||
18 | |||
19 | #include <linux/kernel.h> | ||
20 | #include <linux/pci.h> | ||
21 | #include <linux/string.h> | ||
22 | #include <linux/init.h> | ||
23 | #include <linux/bootmem.h> | ||
24 | #include <linux/mm.h> | ||
25 | #include <linux/list.h> | ||
26 | #include <linux/syscalls.h> | ||
27 | #include <linux/irq.h> | ||
28 | #include <linux/vmalloc.h> | ||
29 | |||
30 | #include <asm/processor.h> | ||
31 | #include <asm/io.h> | ||
32 | #include <asm/prom.h> | ||
33 | #include <asm/pci-bridge.h> | ||
34 | #include <asm/byteorder.h> | ||
35 | |||
36 | static DEFINE_SPINLOCK(hose_spinlock); | ||
37 | LIST_HEAD(hose_list); | ||
38 | |||
39 | /* XXX kill that some day ... */ | ||
40 | static int global_phb_number; /* Global phb counter */ | ||
41 | |||
42 | /* ISA Memory physical address */ | ||
43 | resource_size_t isa_mem_base; | ||
44 | |||
45 | /* Default PCI flags is 0 on ppc32, modified at boot on ppc64 */ | ||
46 | unsigned int pci_flags; | ||
47 | |||
48 | static struct dma_map_ops *pci_dma_ops = &dma_direct_ops; | ||
49 | |||
50 | void set_pci_dma_ops(struct dma_map_ops *dma_ops) | ||
51 | { | ||
52 | pci_dma_ops = dma_ops; | ||
53 | } | ||
54 | |||
55 | struct dma_map_ops *get_pci_dma_ops(void) | ||
56 | { | ||
57 | return pci_dma_ops; | ||
58 | } | ||
59 | EXPORT_SYMBOL(get_pci_dma_ops); | ||
60 | |||
61 | int pci_set_dma_mask(struct pci_dev *dev, u64 mask) | ||
62 | { | ||
63 | return dma_set_mask(&dev->dev, mask); | ||
64 | } | ||
65 | |||
66 | int pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask) | ||
67 | { | ||
68 | int rc; | ||
69 | |||
70 | rc = dma_set_mask(&dev->dev, mask); | ||
71 | dev->dev.coherent_dma_mask = dev->dma_mask; | ||
72 | |||
73 | return rc; | ||
74 | } | ||
75 | |||
76 | struct pci_controller *pcibios_alloc_controller(struct device_node *dev) | ||
77 | { | ||
78 | struct pci_controller *phb; | ||
79 | |||
80 | phb = zalloc_maybe_bootmem(sizeof(struct pci_controller), GFP_KERNEL); | ||
81 | if (!phb) | ||
82 | return NULL; | ||
83 | spin_lock(&hose_spinlock); | ||
84 | phb->global_number = global_phb_number++; | ||
85 | list_add_tail(&phb->list_node, &hose_list); | ||
86 | spin_unlock(&hose_spinlock); | ||
87 | phb->dn = dev; | ||
88 | phb->is_dynamic = mem_init_done; | ||
89 | return phb; | ||
90 | } | ||
91 | |||
92 | void pcibios_free_controller(struct pci_controller *phb) | ||
93 | { | ||
94 | spin_lock(&hose_spinlock); | ||
95 | list_del(&phb->list_node); | ||
96 | spin_unlock(&hose_spinlock); | ||
97 | |||
98 | if (phb->is_dynamic) | ||
99 | kfree(phb); | ||
100 | } | ||
101 | |||
102 | static resource_size_t pcibios_io_size(const struct pci_controller *hose) | ||
103 | { | ||
104 | return hose->io_resource.end - hose->io_resource.start + 1; | ||
105 | } | ||
106 | |||
107 | int pcibios_vaddr_is_ioport(void __iomem *address) | ||
108 | { | ||
109 | int ret = 0; | ||
110 | struct pci_controller *hose; | ||
111 | resource_size_t size; | ||
112 | |||
113 | spin_lock(&hose_spinlock); | ||
114 | list_for_each_entry(hose, &hose_list, list_node) { | ||
115 | size = pcibios_io_size(hose); | ||
116 | if (address >= hose->io_base_virt && | ||
117 | address < (hose->io_base_virt + size)) { | ||
118 | ret = 1; | ||
119 | break; | ||
120 | } | ||
121 | } | ||
122 | spin_unlock(&hose_spinlock); | ||
123 | return ret; | ||
124 | } | ||
125 | |||
126 | unsigned long pci_address_to_pio(phys_addr_t address) | ||
127 | { | ||
128 | struct pci_controller *hose; | ||
129 | resource_size_t size; | ||
130 | unsigned long ret = ~0; | ||
131 | |||
132 | spin_lock(&hose_spinlock); | ||
133 | list_for_each_entry(hose, &hose_list, list_node) { | ||
134 | size = pcibios_io_size(hose); | ||
135 | if (address >= hose->io_base_phys && | ||
136 | address < (hose->io_base_phys + size)) { | ||
137 | unsigned long base = | ||
138 | (unsigned long)hose->io_base_virt - _IO_BASE; | ||
139 | ret = base + (address - hose->io_base_phys); | ||
140 | break; | ||
141 | } | ||
142 | } | ||
143 | spin_unlock(&hose_spinlock); | ||
144 | |||
145 | return ret; | ||
146 | } | ||
147 | EXPORT_SYMBOL_GPL(pci_address_to_pio); | ||
148 | |||
149 | /* | ||
150 | * Return the domain number for this bus. | ||
151 | */ | ||
152 | int pci_domain_nr(struct pci_bus *bus) | ||
153 | { | ||
154 | struct pci_controller *hose = pci_bus_to_host(bus); | ||
155 | |||
156 | return hose->global_number; | ||
157 | } | ||
158 | EXPORT_SYMBOL(pci_domain_nr); | ||
159 | |||
160 | /* This routine is meant to be used early during boot, when the | ||
161 | * PCI bus numbers have not yet been assigned, and you need to | ||
162 | * issue PCI config cycles to an OF device. | ||
163 | * It could also be used to "fix" RTAS config cycles if you want | ||
164 | * to set pci_assign_all_buses to 1 and still use RTAS for PCI | ||
165 | * config cycles. | ||
166 | */ | ||
167 | struct pci_controller *pci_find_hose_for_OF_device(struct device_node *node) | ||
168 | { | ||
169 | while (node) { | ||
170 | struct pci_controller *hose, *tmp; | ||
171 | list_for_each_entry_safe(hose, tmp, &hose_list, list_node) | ||
172 | if (hose->dn == node) | ||
173 | return hose; | ||
174 | node = node->parent; | ||
175 | } | ||
176 | return NULL; | ||
177 | } | ||
178 | |||
179 | static ssize_t pci_show_devspec(struct device *dev, | ||
180 | struct device_attribute *attr, char *buf) | ||
181 | { | ||
182 | struct pci_dev *pdev; | ||
183 | struct device_node *np; | ||
184 | |||
185 | pdev = to_pci_dev(dev); | ||
186 | np = pci_device_to_OF_node(pdev); | ||
187 | if (np == NULL || np->full_name == NULL) | ||
188 | return 0; | ||
189 | return sprintf(buf, "%s", np->full_name); | ||
190 | } | ||
191 | static DEVICE_ATTR(devspec, S_IRUGO, pci_show_devspec, NULL); | ||
192 | |||
193 | /* Add sysfs properties */ | ||
194 | int pcibios_add_platform_entries(struct pci_dev *pdev) | ||
195 | { | ||
196 | return device_create_file(&pdev->dev, &dev_attr_devspec); | ||
197 | } | ||
198 | |||
199 | char __devinit *pcibios_setup(char *str) | ||
200 | { | ||
201 | return str; | ||
202 | } | ||
203 | |||
204 | /* | ||
205 | * Reads the interrupt pin to determine if interrupt is use by card. | ||
206 | * If the interrupt is used, then gets the interrupt line from the | ||
207 | * openfirmware and sets it in the pci_dev and pci_config line. | ||
208 | */ | ||
209 | int pci_read_irq_line(struct pci_dev *pci_dev) | ||
210 | { | ||
211 | struct of_irq oirq; | ||
212 | unsigned int virq; | ||
213 | |||
214 | /* The current device-tree that iSeries generates from the HV | ||
215 | * PCI informations doesn't contain proper interrupt routing, | ||
216 | * and all the fallback would do is print out crap, so we | ||
217 | * don't attempt to resolve the interrupts here at all, some | ||
218 | * iSeries specific fixup does it. | ||
219 | * | ||
220 | * In the long run, we will hopefully fix the generated device-tree | ||
221 | * instead. | ||
222 | */ | ||
223 | pr_debug("PCI: Try to map irq for %s...\n", pci_name(pci_dev)); | ||
224 | |||
225 | #ifdef DEBUG | ||
226 | memset(&oirq, 0xff, sizeof(oirq)); | ||
227 | #endif | ||
228 | /* Try to get a mapping from the device-tree */ | ||
229 | if (of_irq_map_pci(pci_dev, &oirq)) { | ||
230 | u8 line, pin; | ||
231 | |||
232 | /* If that fails, lets fallback to what is in the config | ||
233 | * space and map that through the default controller. We | ||
234 | * also set the type to level low since that's what PCI | ||
235 | * interrupts are. If your platform does differently, then | ||
236 | * either provide a proper interrupt tree or don't use this | ||
237 | * function. | ||
238 | */ | ||
239 | if (pci_read_config_byte(pci_dev, PCI_INTERRUPT_PIN, &pin)) | ||
240 | return -1; | ||
241 | if (pin == 0) | ||
242 | return -1; | ||
243 | if (pci_read_config_byte(pci_dev, PCI_INTERRUPT_LINE, &line) || | ||
244 | line == 0xff || line == 0) { | ||
245 | return -1; | ||
246 | } | ||
247 | pr_debug(" No map ! Using line %d (pin %d) from PCI config\n", | ||
248 | line, pin); | ||
249 | |||
250 | virq = irq_create_mapping(NULL, line); | ||
251 | if (virq != NO_IRQ) | ||
252 | set_irq_type(virq, IRQ_TYPE_LEVEL_LOW); | ||
253 | } else { | ||
254 | pr_debug(" Got one, spec %d cells (0x%08x 0x%08x...) on %s\n", | ||
255 | oirq.size, oirq.specifier[0], oirq.specifier[1], | ||
256 | oirq.controller ? oirq.controller->full_name : | ||
257 | "<default>"); | ||
258 | |||
259 | virq = irq_create_of_mapping(oirq.controller, oirq.specifier, | ||
260 | oirq.size); | ||
261 | } | ||
262 | if (virq == NO_IRQ) { | ||
263 | pr_debug(" Failed to map !\n"); | ||
264 | return -1; | ||
265 | } | ||
266 | |||
267 | pr_debug(" Mapped to linux irq %d\n", virq); | ||
268 | |||
269 | pci_dev->irq = virq; | ||
270 | |||
271 | return 0; | ||
272 | } | ||
273 | EXPORT_SYMBOL(pci_read_irq_line); | ||
274 | |||
275 | /* | ||
276 | * Platform support for /proc/bus/pci/X/Y mmap()s, | ||
277 | * modelled on the sparc64 implementation by Dave Miller. | ||
278 | * -- paulus. | ||
279 | */ | ||
280 | |||
281 | /* | ||
282 | * Adjust vm_pgoff of VMA such that it is the physical page offset | ||
283 | * corresponding to the 32-bit pci bus offset for DEV requested by the user. | ||
284 | * | ||
285 | * Basically, the user finds the base address for his device which he wishes | ||
286 | * to mmap. They read the 32-bit value from the config space base register, | ||
287 | * add whatever PAGE_SIZE multiple offset they wish, and feed this into the | ||
288 | * offset parameter of mmap on /proc/bus/pci/XXX for that device. | ||
289 | * | ||
290 | * Returns negative error code on failure, zero on success. | ||
291 | */ | ||
292 | static struct resource *__pci_mmap_make_offset(struct pci_dev *dev, | ||
293 | resource_size_t *offset, | ||
294 | enum pci_mmap_state mmap_state) | ||
295 | { | ||
296 | struct pci_controller *hose = pci_bus_to_host(dev->bus); | ||
297 | unsigned long io_offset = 0; | ||
298 | int i, res_bit; | ||
299 | |||
300 | if (hose == 0) | ||
301 | return NULL; /* should never happen */ | ||
302 | |||
303 | /* If memory, add on the PCI bridge address offset */ | ||
304 | if (mmap_state == pci_mmap_mem) { | ||
305 | #if 0 /* See comment in pci_resource_to_user() for why this is disabled */ | ||
306 | *offset += hose->pci_mem_offset; | ||
307 | #endif | ||
308 | res_bit = IORESOURCE_MEM; | ||
309 | } else { | ||
310 | io_offset = (unsigned long)hose->io_base_virt - _IO_BASE; | ||
311 | *offset += io_offset; | ||
312 | res_bit = IORESOURCE_IO; | ||
313 | } | ||
314 | |||
315 | /* | ||
316 | * Check that the offset requested corresponds to one of the | ||
317 | * resources of the device. | ||
318 | */ | ||
319 | for (i = 0; i <= PCI_ROM_RESOURCE; i++) { | ||
320 | struct resource *rp = &dev->resource[i]; | ||
321 | int flags = rp->flags; | ||
322 | |||
323 | /* treat ROM as memory (should be already) */ | ||
324 | if (i == PCI_ROM_RESOURCE) | ||
325 | flags |= IORESOURCE_MEM; | ||
326 | |||
327 | /* Active and same type? */ | ||
328 | if ((flags & res_bit) == 0) | ||
329 | continue; | ||
330 | |||
331 | /* In the range of this resource? */ | ||
332 | if (*offset < (rp->start & PAGE_MASK) || *offset > rp->end) | ||
333 | continue; | ||
334 | |||
335 | /* found it! construct the final physical address */ | ||
336 | if (mmap_state == pci_mmap_io) | ||
337 | *offset += hose->io_base_phys - io_offset; | ||
338 | return rp; | ||
339 | } | ||
340 | |||
341 | return NULL; | ||
342 | } | ||
343 | |||
344 | /* | ||
345 | * Set vm_page_prot of VMA, as appropriate for this architecture, for a pci | ||
346 | * device mapping. | ||
347 | */ | ||
348 | static pgprot_t __pci_mmap_set_pgprot(struct pci_dev *dev, struct resource *rp, | ||
349 | pgprot_t protection, | ||
350 | enum pci_mmap_state mmap_state, | ||
351 | int write_combine) | ||
352 | { | ||
353 | pgprot_t prot = protection; | ||
354 | |||
355 | /* Write combine is always 0 on non-memory space mappings. On | ||
356 | * memory space, if the user didn't pass 1, we check for a | ||
357 | * "prefetchable" resource. This is a bit hackish, but we use | ||
358 | * this to workaround the inability of /sysfs to provide a write | ||
359 | * combine bit | ||
360 | */ | ||
361 | if (mmap_state != pci_mmap_mem) | ||
362 | write_combine = 0; | ||
363 | else if (write_combine == 0) { | ||
364 | if (rp->flags & IORESOURCE_PREFETCH) | ||
365 | write_combine = 1; | ||
366 | } | ||
367 | |||
368 | return pgprot_noncached(prot); | ||
369 | } | ||
370 | |||
371 | /* | ||
372 | * This one is used by /dev/mem and fbdev who have no clue about the | ||
373 | * PCI device, it tries to find the PCI device first and calls the | ||
374 | * above routine | ||
375 | */ | ||
376 | pgprot_t pci_phys_mem_access_prot(struct file *file, | ||
377 | unsigned long pfn, | ||
378 | unsigned long size, | ||
379 | pgprot_t prot) | ||
380 | { | ||
381 | struct pci_dev *pdev = NULL; | ||
382 | struct resource *found = NULL; | ||
383 | resource_size_t offset = ((resource_size_t)pfn) << PAGE_SHIFT; | ||
384 | int i; | ||
385 | |||
386 | if (page_is_ram(pfn)) | ||
387 | return prot; | ||
388 | |||
389 | prot = pgprot_noncached(prot); | ||
390 | for_each_pci_dev(pdev) { | ||
391 | for (i = 0; i <= PCI_ROM_RESOURCE; i++) { | ||
392 | struct resource *rp = &pdev->resource[i]; | ||
393 | int flags = rp->flags; | ||
394 | |||
395 | /* Active and same type? */ | ||
396 | if ((flags & IORESOURCE_MEM) == 0) | ||
397 | continue; | ||
398 | /* In the range of this resource? */ | ||
399 | if (offset < (rp->start & PAGE_MASK) || | ||
400 | offset > rp->end) | ||
401 | continue; | ||
402 | found = rp; | ||
403 | break; | ||
404 | } | ||
405 | if (found) | ||
406 | break; | ||
407 | } | ||
408 | if (found) { | ||
409 | if (found->flags & IORESOURCE_PREFETCH) | ||
410 | prot = pgprot_noncached_wc(prot); | ||
411 | pci_dev_put(pdev); | ||
412 | } | ||
413 | |||
414 | pr_debug("PCI: Non-PCI map for %llx, prot: %lx\n", | ||
415 | (unsigned long long)offset, pgprot_val(prot)); | ||
416 | |||
417 | return prot; | ||
418 | } | ||
419 | |||
420 | /* | ||
421 | * Perform the actual remap of the pages for a PCI device mapping, as | ||
422 | * appropriate for this architecture. The region in the process to map | ||
423 | * is described by vm_start and vm_end members of VMA, the base physical | ||
424 | * address is found in vm_pgoff. | ||
425 | * The pci device structure is provided so that architectures may make mapping | ||
426 | * decisions on a per-device or per-bus basis. | ||
427 | * | ||
428 | * Returns a negative error code on failure, zero on success. | ||
429 | */ | ||
430 | int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, | ||
431 | enum pci_mmap_state mmap_state, int write_combine) | ||
432 | { | ||
433 | resource_size_t offset = | ||
434 | ((resource_size_t)vma->vm_pgoff) << PAGE_SHIFT; | ||
435 | struct resource *rp; | ||
436 | int ret; | ||
437 | |||
438 | rp = __pci_mmap_make_offset(dev, &offset, mmap_state); | ||
439 | if (rp == NULL) | ||
440 | return -EINVAL; | ||
441 | |||
442 | vma->vm_pgoff = offset >> PAGE_SHIFT; | ||
443 | vma->vm_page_prot = __pci_mmap_set_pgprot(dev, rp, | ||
444 | vma->vm_page_prot, | ||
445 | mmap_state, write_combine); | ||
446 | |||
447 | ret = remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, | ||
448 | vma->vm_end - vma->vm_start, vma->vm_page_prot); | ||
449 | |||
450 | return ret; | ||
451 | } | ||
452 | |||
453 | /* This provides legacy IO read access on a bus */ | ||
454 | int pci_legacy_read(struct pci_bus *bus, loff_t port, u32 *val, size_t size) | ||
455 | { | ||
456 | unsigned long offset; | ||
457 | struct pci_controller *hose = pci_bus_to_host(bus); | ||
458 | struct resource *rp = &hose->io_resource; | ||
459 | void __iomem *addr; | ||
460 | |||
461 | /* Check if port can be supported by that bus. We only check | ||
462 | * the ranges of the PHB though, not the bus itself as the rules | ||
463 | * for forwarding legacy cycles down bridges are not our problem | ||
464 | * here. So if the host bridge supports it, we do it. | ||
465 | */ | ||
466 | offset = (unsigned long)hose->io_base_virt - _IO_BASE; | ||
467 | offset += port; | ||
468 | |||
469 | if (!(rp->flags & IORESOURCE_IO)) | ||
470 | return -ENXIO; | ||
471 | if (offset < rp->start || (offset + size) > rp->end) | ||
472 | return -ENXIO; | ||
473 | addr = hose->io_base_virt + port; | ||
474 | |||
475 | switch (size) { | ||
476 | case 1: | ||
477 | *((u8 *)val) = in_8(addr); | ||
478 | return 1; | ||
479 | case 2: | ||
480 | if (port & 1) | ||
481 | return -EINVAL; | ||
482 | *((u16 *)val) = in_le16(addr); | ||
483 | return 2; | ||
484 | case 4: | ||
485 | if (port & 3) | ||
486 | return -EINVAL; | ||
487 | *((u32 *)val) = in_le32(addr); | ||
488 | return 4; | ||
489 | } | ||
490 | return -EINVAL; | ||
491 | } | ||
492 | |||
493 | /* This provides legacy IO write access on a bus */ | ||
494 | int pci_legacy_write(struct pci_bus *bus, loff_t port, u32 val, size_t size) | ||
495 | { | ||
496 | unsigned long offset; | ||
497 | struct pci_controller *hose = pci_bus_to_host(bus); | ||
498 | struct resource *rp = &hose->io_resource; | ||
499 | void __iomem *addr; | ||
500 | |||
501 | /* Check if port can be supported by that bus. We only check | ||
502 | * the ranges of the PHB though, not the bus itself as the rules | ||
503 | * for forwarding legacy cycles down bridges are not our problem | ||
504 | * here. So if the host bridge supports it, we do it. | ||
505 | */ | ||
506 | offset = (unsigned long)hose->io_base_virt - _IO_BASE; | ||
507 | offset += port; | ||
508 | |||
509 | if (!(rp->flags & IORESOURCE_IO)) | ||
510 | return -ENXIO; | ||
511 | if (offset < rp->start || (offset + size) > rp->end) | ||
512 | return -ENXIO; | ||
513 | addr = hose->io_base_virt + port; | ||
514 | |||
515 | /* WARNING: The generic code is idiotic. It gets passed a pointer | ||
516 | * to what can be a 1, 2 or 4 byte quantity and always reads that | ||
517 | * as a u32, which means that we have to correct the location of | ||
518 | * the data read within those 32 bits for size 1 and 2 | ||
519 | */ | ||
520 | switch (size) { | ||
521 | case 1: | ||
522 | out_8(addr, val >> 24); | ||
523 | return 1; | ||
524 | case 2: | ||
525 | if (port & 1) | ||
526 | return -EINVAL; | ||
527 | out_le16(addr, val >> 16); | ||
528 | return 2; | ||
529 | case 4: | ||
530 | if (port & 3) | ||
531 | return -EINVAL; | ||
532 | out_le32(addr, val); | ||
533 | return 4; | ||
534 | } | ||
535 | return -EINVAL; | ||
536 | } | ||
537 | |||
538 | /* This provides legacy IO or memory mmap access on a bus */ | ||
539 | int pci_mmap_legacy_page_range(struct pci_bus *bus, | ||
540 | struct vm_area_struct *vma, | ||
541 | enum pci_mmap_state mmap_state) | ||
542 | { | ||
543 | struct pci_controller *hose = pci_bus_to_host(bus); | ||
544 | resource_size_t offset = | ||
545 | ((resource_size_t)vma->vm_pgoff) << PAGE_SHIFT; | ||
546 | resource_size_t size = vma->vm_end - vma->vm_start; | ||
547 | struct resource *rp; | ||
548 | |||
549 | pr_debug("pci_mmap_legacy_page_range(%04x:%02x, %s @%llx..%llx)\n", | ||
550 | pci_domain_nr(bus), bus->number, | ||
551 | mmap_state == pci_mmap_mem ? "MEM" : "IO", | ||
552 | (unsigned long long)offset, | ||
553 | (unsigned long long)(offset + size - 1)); | ||
554 | |||
555 | if (mmap_state == pci_mmap_mem) { | ||
556 | /* Hack alert ! | ||
557 | * | ||
558 | * Because X is lame and can fail starting if it gets an error | ||
559 | * trying to mmap legacy_mem (instead of just moving on without | ||
560 | * legacy memory access) we fake it here by giving it anonymous | ||
561 | * memory, effectively behaving just like /dev/zero | ||
562 | */ | ||
563 | if ((offset + size) > hose->isa_mem_size) { | ||
564 | printk(KERN_DEBUG | ||
565 | "Process %s (pid:%d) mapped non-existing PCI" | ||
566 | "legacy memory for 0%04x:%02x\n", | ||
567 | current->comm, current->pid, pci_domain_nr(bus), | ||
568 | bus->number); | ||
569 | if (vma->vm_flags & VM_SHARED) | ||
570 | return shmem_zero_setup(vma); | ||
571 | return 0; | ||
572 | } | ||
573 | offset += hose->isa_mem_phys; | ||
574 | } else { | ||
575 | unsigned long io_offset = (unsigned long)hose->io_base_virt - \ | ||
576 | _IO_BASE; | ||
577 | unsigned long roffset = offset + io_offset; | ||
578 | rp = &hose->io_resource; | ||
579 | if (!(rp->flags & IORESOURCE_IO)) | ||
580 | return -ENXIO; | ||
581 | if (roffset < rp->start || (roffset + size) > rp->end) | ||
582 | return -ENXIO; | ||
583 | offset += hose->io_base_phys; | ||
584 | } | ||
585 | pr_debug(" -> mapping phys %llx\n", (unsigned long long)offset); | ||
586 | |||
587 | vma->vm_pgoff = offset >> PAGE_SHIFT; | ||
588 | vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); | ||
589 | return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, | ||
590 | vma->vm_end - vma->vm_start, | ||
591 | vma->vm_page_prot); | ||
592 | } | ||
593 | |||
594 | void pci_resource_to_user(const struct pci_dev *dev, int bar, | ||
595 | const struct resource *rsrc, | ||
596 | resource_size_t *start, resource_size_t *end) | ||
597 | { | ||
598 | struct pci_controller *hose = pci_bus_to_host(dev->bus); | ||
599 | resource_size_t offset = 0; | ||
600 | |||
601 | if (hose == NULL) | ||
602 | return; | ||
603 | |||
604 | if (rsrc->flags & IORESOURCE_IO) | ||
605 | offset = (unsigned long)hose->io_base_virt - _IO_BASE; | ||
606 | |||
607 | /* We pass a fully fixed up address to userland for MMIO instead of | ||
608 | * a BAR value because X is lame and expects to be able to use that | ||
609 | * to pass to /dev/mem ! | ||
610 | * | ||
611 | * That means that we'll have potentially 64 bits values where some | ||
612 | * userland apps only expect 32 (like X itself since it thinks only | ||
613 | * Sparc has 64 bits MMIO) but if we don't do that, we break it on | ||
614 | * 32 bits CHRPs :-( | ||
615 | * | ||
616 | * Hopefully, the sysfs insterface is immune to that gunk. Once X | ||
617 | * has been fixed (and the fix spread enough), we can re-enable the | ||
618 | * 2 lines below and pass down a BAR value to userland. In that case | ||
619 | * we'll also have to re-enable the matching code in | ||
620 | * __pci_mmap_make_offset(). | ||
621 | * | ||
622 | * BenH. | ||
623 | */ | ||
624 | #if 0 | ||
625 | else if (rsrc->flags & IORESOURCE_MEM) | ||
626 | offset = hose->pci_mem_offset; | ||
627 | #endif | ||
628 | |||
629 | *start = rsrc->start - offset; | ||
630 | *end = rsrc->end - offset; | ||
631 | } | ||
632 | |||
633 | /** | ||
634 | * pci_process_bridge_OF_ranges - Parse PCI bridge resources from device tree | ||
635 | * @hose: newly allocated pci_controller to be setup | ||
636 | * @dev: device node of the host bridge | ||
637 | * @primary: set if primary bus (32 bits only, soon to be deprecated) | ||
638 | * | ||
639 | * This function will parse the "ranges" property of a PCI host bridge device | ||
640 | * node and setup the resource mapping of a pci controller based on its | ||
641 | * content. | ||
642 | * | ||
643 | * Life would be boring if it wasn't for a few issues that we have to deal | ||
644 | * with here: | ||
645 | * | ||
646 | * - We can only cope with one IO space range and up to 3 Memory space | ||
647 | * ranges. However, some machines (thanks Apple !) tend to split their | ||
648 | * space into lots of small contiguous ranges. So we have to coalesce. | ||
649 | * | ||
650 | * - We can only cope with all memory ranges having the same offset | ||
651 | * between CPU addresses and PCI addresses. Unfortunately, some bridges | ||
652 | * are setup for a large 1:1 mapping along with a small "window" which | ||
653 | * maps PCI address 0 to some arbitrary high address of the CPU space in | ||
654 | * order to give access to the ISA memory hole. | ||
655 | * The way out of here that I've chosen for now is to always set the | ||
656 | * offset based on the first resource found, then override it if we | ||
657 | * have a different offset and the previous was set by an ISA hole. | ||
658 | * | ||
659 | * - Some busses have IO space not starting at 0, which causes trouble with | ||
660 | * the way we do our IO resource renumbering. The code somewhat deals with | ||
661 | * it for 64 bits but I would expect problems on 32 bits. | ||
662 | * | ||
663 | * - Some 32 bits platforms such as 4xx can have physical space larger than | ||
664 | * 32 bits so we need to use 64 bits values for the parsing | ||
665 | */ | ||
666 | void __devinit pci_process_bridge_OF_ranges(struct pci_controller *hose, | ||
667 | struct device_node *dev, | ||
668 | int primary) | ||
669 | { | ||
670 | const u32 *ranges; | ||
671 | int rlen; | ||
672 | int pna = of_n_addr_cells(dev); | ||
673 | int np = pna + 5; | ||
674 | int memno = 0, isa_hole = -1; | ||
675 | u32 pci_space; | ||
676 | unsigned long long pci_addr, cpu_addr, pci_next, cpu_next, size; | ||
677 | unsigned long long isa_mb = 0; | ||
678 | struct resource *res; | ||
679 | |||
680 | printk(KERN_INFO "PCI host bridge %s %s ranges:\n", | ||
681 | dev->full_name, primary ? "(primary)" : ""); | ||
682 | |||
683 | /* Get ranges property */ | ||
684 | ranges = of_get_property(dev, "ranges", &rlen); | ||
685 | if (ranges == NULL) | ||
686 | return; | ||
687 | |||
688 | /* Parse it */ | ||
689 | pr_debug("Parsing ranges property...\n"); | ||
690 | while ((rlen -= np * 4) >= 0) { | ||
691 | /* Read next ranges element */ | ||
692 | pci_space = ranges[0]; | ||
693 | pci_addr = of_read_number(ranges + 1, 2); | ||
694 | cpu_addr = of_translate_address(dev, ranges + 3); | ||
695 | size = of_read_number(ranges + pna + 3, 2); | ||
696 | |||
697 | pr_debug("pci_space: 0x%08x pci_addr:0x%016llx " | ||
698 | "cpu_addr:0x%016llx size:0x%016llx\n", | ||
699 | pci_space, pci_addr, cpu_addr, size); | ||
700 | |||
701 | ranges += np; | ||
702 | |||
703 | /* If we failed translation or got a zero-sized region | ||
704 | * (some FW try to feed us with non sensical zero sized regions | ||
705 | * such as power3 which look like some kind of attempt | ||
706 | * at exposing the VGA memory hole) | ||
707 | */ | ||
708 | if (cpu_addr == OF_BAD_ADDR || size == 0) | ||
709 | continue; | ||
710 | |||
711 | /* Now consume following elements while they are contiguous */ | ||
712 | for (; rlen >= np * sizeof(u32); | ||
713 | ranges += np, rlen -= np * 4) { | ||
714 | if (ranges[0] != pci_space) | ||
715 | break; | ||
716 | pci_next = of_read_number(ranges + 1, 2); | ||
717 | cpu_next = of_translate_address(dev, ranges + 3); | ||
718 | if (pci_next != pci_addr + size || | ||
719 | cpu_next != cpu_addr + size) | ||
720 | break; | ||
721 | size += of_read_number(ranges + pna + 3, 2); | ||
722 | } | ||
723 | |||
724 | /* Act based on address space type */ | ||
725 | res = NULL; | ||
726 | switch ((pci_space >> 24) & 0x3) { | ||
727 | case 1: /* PCI IO space */ | ||
728 | printk(KERN_INFO | ||
729 | " IO 0x%016llx..0x%016llx -> 0x%016llx\n", | ||
730 | cpu_addr, cpu_addr + size - 1, pci_addr); | ||
731 | |||
732 | /* We support only one IO range */ | ||
733 | if (hose->pci_io_size) { | ||
734 | printk(KERN_INFO | ||
735 | " \\--> Skipped (too many) !\n"); | ||
736 | continue; | ||
737 | } | ||
738 | /* On 32 bits, limit I/O space to 16MB */ | ||
739 | if (size > 0x01000000) | ||
740 | size = 0x01000000; | ||
741 | |||
742 | /* 32 bits needs to map IOs here */ | ||
743 | hose->io_base_virt = ioremap(cpu_addr, size); | ||
744 | |||
745 | /* Expect trouble if pci_addr is not 0 */ | ||
746 | if (primary) | ||
747 | isa_io_base = | ||
748 | (unsigned long)hose->io_base_virt; | ||
749 | /* pci_io_size and io_base_phys always represent IO | ||
750 | * space starting at 0 so we factor in pci_addr | ||
751 | */ | ||
752 | hose->pci_io_size = pci_addr + size; | ||
753 | hose->io_base_phys = cpu_addr - pci_addr; | ||
754 | |||
755 | /* Build resource */ | ||
756 | res = &hose->io_resource; | ||
757 | res->flags = IORESOURCE_IO; | ||
758 | res->start = pci_addr; | ||
759 | break; | ||
760 | case 2: /* PCI Memory space */ | ||
761 | case 3: /* PCI 64 bits Memory space */ | ||
762 | printk(KERN_INFO | ||
763 | " MEM 0x%016llx..0x%016llx -> 0x%016llx %s\n", | ||
764 | cpu_addr, cpu_addr + size - 1, pci_addr, | ||
765 | (pci_space & 0x40000000) ? "Prefetch" : ""); | ||
766 | |||
767 | /* We support only 3 memory ranges */ | ||
768 | if (memno >= 3) { | ||
769 | printk(KERN_INFO | ||
770 | " \\--> Skipped (too many) !\n"); | ||
771 | continue; | ||
772 | } | ||
773 | /* Handles ISA memory hole space here */ | ||
774 | if (pci_addr == 0) { | ||
775 | isa_mb = cpu_addr; | ||
776 | isa_hole = memno; | ||
777 | if (primary || isa_mem_base == 0) | ||
778 | isa_mem_base = cpu_addr; | ||
779 | hose->isa_mem_phys = cpu_addr; | ||
780 | hose->isa_mem_size = size; | ||
781 | } | ||
782 | |||
783 | /* We get the PCI/Mem offset from the first range or | ||
784 | * the, current one if the offset came from an ISA | ||
785 | * hole. If they don't match, bugger. | ||
786 | */ | ||
787 | if (memno == 0 || | ||
788 | (isa_hole >= 0 && pci_addr != 0 && | ||
789 | hose->pci_mem_offset == isa_mb)) | ||
790 | hose->pci_mem_offset = cpu_addr - pci_addr; | ||
791 | else if (pci_addr != 0 && | ||
792 | hose->pci_mem_offset != cpu_addr - pci_addr) { | ||
793 | printk(KERN_INFO | ||
794 | " \\--> Skipped (offset mismatch) !\n"); | ||
795 | continue; | ||
796 | } | ||
797 | |||
798 | /* Build resource */ | ||
799 | res = &hose->mem_resources[memno++]; | ||
800 | res->flags = IORESOURCE_MEM; | ||
801 | if (pci_space & 0x40000000) | ||
802 | res->flags |= IORESOURCE_PREFETCH; | ||
803 | res->start = cpu_addr; | ||
804 | break; | ||
805 | } | ||
806 | if (res != NULL) { | ||
807 | res->name = dev->full_name; | ||
808 | res->end = res->start + size - 1; | ||
809 | res->parent = NULL; | ||
810 | res->sibling = NULL; | ||
811 | res->child = NULL; | ||
812 | } | ||
813 | } | ||
814 | |||
815 | /* If there's an ISA hole and the pci_mem_offset is -not- matching | ||
816 | * the ISA hole offset, then we need to remove the ISA hole from | ||
817 | * the resource list for that brige | ||
818 | */ | ||
819 | if (isa_hole >= 0 && hose->pci_mem_offset != isa_mb) { | ||
820 | unsigned int next = isa_hole + 1; | ||
821 | printk(KERN_INFO " Removing ISA hole at 0x%016llx\n", isa_mb); | ||
822 | if (next < memno) | ||
823 | memmove(&hose->mem_resources[isa_hole], | ||
824 | &hose->mem_resources[next], | ||
825 | sizeof(struct resource) * (memno - next)); | ||
826 | hose->mem_resources[--memno].flags = 0; | ||
827 | } | ||
828 | } | ||
829 | |||
830 | /* Decide whether to display the domain number in /proc */ | ||
831 | int pci_proc_domain(struct pci_bus *bus) | ||
832 | { | ||
833 | struct pci_controller *hose = pci_bus_to_host(bus); | ||
834 | |||
835 | if (!(pci_flags & PCI_ENABLE_PROC_DOMAINS)) | ||
836 | return 0; | ||
837 | if (pci_flags & PCI_COMPAT_DOMAIN_0) | ||
838 | return hose->global_number != 0; | ||
839 | return 1; | ||
840 | } | ||
841 | |||
842 | void pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region, | ||
843 | struct resource *res) | ||
844 | { | ||
845 | resource_size_t offset = 0, mask = (resource_size_t)-1; | ||
846 | struct pci_controller *hose = pci_bus_to_host(dev->bus); | ||
847 | |||
848 | if (!hose) | ||
849 | return; | ||
850 | if (res->flags & IORESOURCE_IO) { | ||
851 | offset = (unsigned long)hose->io_base_virt - _IO_BASE; | ||
852 | mask = 0xffffffffu; | ||
853 | } else if (res->flags & IORESOURCE_MEM) | ||
854 | offset = hose->pci_mem_offset; | ||
855 | |||
856 | region->start = (res->start - offset) & mask; | ||
857 | region->end = (res->end - offset) & mask; | ||
858 | } | ||
859 | EXPORT_SYMBOL(pcibios_resource_to_bus); | ||
860 | |||
861 | void pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res, | ||
862 | struct pci_bus_region *region) | ||
863 | { | ||
864 | resource_size_t offset = 0, mask = (resource_size_t)-1; | ||
865 | struct pci_controller *hose = pci_bus_to_host(dev->bus); | ||
866 | |||
867 | if (!hose) | ||
868 | return; | ||
869 | if (res->flags & IORESOURCE_IO) { | ||
870 | offset = (unsigned long)hose->io_base_virt - _IO_BASE; | ||
871 | mask = 0xffffffffu; | ||
872 | } else if (res->flags & IORESOURCE_MEM) | ||
873 | offset = hose->pci_mem_offset; | ||
874 | res->start = (region->start + offset) & mask; | ||
875 | res->end = (region->end + offset) & mask; | ||
876 | } | ||
877 | EXPORT_SYMBOL(pcibios_bus_to_resource); | ||
878 | |||
879 | /* Fixup a bus resource into a linux resource */ | ||
880 | static void __devinit fixup_resource(struct resource *res, struct pci_dev *dev) | ||
881 | { | ||
882 | struct pci_controller *hose = pci_bus_to_host(dev->bus); | ||
883 | resource_size_t offset = 0, mask = (resource_size_t)-1; | ||
884 | |||
885 | if (res->flags & IORESOURCE_IO) { | ||
886 | offset = (unsigned long)hose->io_base_virt - _IO_BASE; | ||
887 | mask = 0xffffffffu; | ||
888 | } else if (res->flags & IORESOURCE_MEM) | ||
889 | offset = hose->pci_mem_offset; | ||
890 | |||
891 | res->start = (res->start + offset) & mask; | ||
892 | res->end = (res->end + offset) & mask; | ||
893 | } | ||
894 | |||
895 | /* This header fixup will do the resource fixup for all devices as they are | ||
896 | * probed, but not for bridge ranges | ||
897 | */ | ||
898 | static void __devinit pcibios_fixup_resources(struct pci_dev *dev) | ||
899 | { | ||
900 | struct pci_controller *hose = pci_bus_to_host(dev->bus); | ||
901 | int i; | ||
902 | |||
903 | if (!hose) { | ||
904 | printk(KERN_ERR "No host bridge for PCI dev %s !\n", | ||
905 | pci_name(dev)); | ||
906 | return; | ||
907 | } | ||
908 | for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { | ||
909 | struct resource *res = dev->resource + i; | ||
910 | if (!res->flags) | ||
911 | continue; | ||
912 | /* On platforms that have PCI_PROBE_ONLY set, we don't | ||
913 | * consider 0 as an unassigned BAR value. It's technically | ||
914 | * a valid value, but linux doesn't like it... so when we can | ||
915 | * re-assign things, we do so, but if we can't, we keep it | ||
916 | * around and hope for the best... | ||
917 | */ | ||
918 | if (res->start == 0 && !(pci_flags & PCI_PROBE_ONLY)) { | ||
919 | pr_debug("PCI:%s Resource %d %016llx-%016llx [%x]" \ | ||
920 | "is unassigned\n", | ||
921 | pci_name(dev), i, | ||
922 | (unsigned long long)res->start, | ||
923 | (unsigned long long)res->end, | ||
924 | (unsigned int)res->flags); | ||
925 | res->end -= res->start; | ||
926 | res->start = 0; | ||
927 | res->flags |= IORESOURCE_UNSET; | ||
928 | continue; | ||
929 | } | ||
930 | |||
931 | pr_debug("PCI:%s Resource %d %016llx-%016llx [%x] fixup...\n", | ||
932 | pci_name(dev), i, | ||
933 | (unsigned long long)res->start,\ | ||
934 | (unsigned long long)res->end, | ||
935 | (unsigned int)res->flags); | ||
936 | |||
937 | fixup_resource(res, dev); | ||
938 | |||
939 | pr_debug("PCI:%s %016llx-%016llx\n", | ||
940 | pci_name(dev), | ||
941 | (unsigned long long)res->start, | ||
942 | (unsigned long long)res->end); | ||
943 | } | ||
944 | } | ||
945 | DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pcibios_fixup_resources); | ||
946 | |||
947 | /* This function tries to figure out if a bridge resource has been initialized | ||
948 | * by the firmware or not. It doesn't have to be absolutely bullet proof, but | ||
949 | * things go more smoothly when it gets it right. It should covers cases such | ||
950 | * as Apple "closed" bridge resources and bare-metal pSeries unassigned bridges | ||
951 | */ | ||
952 | static int __devinit pcibios_uninitialized_bridge_resource(struct pci_bus *bus, | ||
953 | struct resource *res) | ||
954 | { | ||
955 | struct pci_controller *hose = pci_bus_to_host(bus); | ||
956 | struct pci_dev *dev = bus->self; | ||
957 | resource_size_t offset; | ||
958 | u16 command; | ||
959 | int i; | ||
960 | |||
961 | /* We don't do anything if PCI_PROBE_ONLY is set */ | ||
962 | if (pci_flags & PCI_PROBE_ONLY) | ||
963 | return 0; | ||
964 | |||
965 | /* Job is a bit different between memory and IO */ | ||
966 | if (res->flags & IORESOURCE_MEM) { | ||
967 | /* If the BAR is non-0 (res != pci_mem_offset) then it's | ||
968 | * probably been initialized by somebody | ||
969 | */ | ||
970 | if (res->start != hose->pci_mem_offset) | ||
971 | return 0; | ||
972 | |||
973 | /* The BAR is 0, let's check if memory decoding is enabled on | ||
974 | * the bridge. If not, we consider it unassigned | ||
975 | */ | ||
976 | pci_read_config_word(dev, PCI_COMMAND, &command); | ||
977 | if ((command & PCI_COMMAND_MEMORY) == 0) | ||
978 | return 1; | ||
979 | |||
980 | /* Memory decoding is enabled and the BAR is 0. If any of | ||
981 | * the bridge resources covers that starting address (0 then | ||
982 | * it's good enough for us for memory | ||
983 | */ | ||
984 | for (i = 0; i < 3; i++) { | ||
985 | if ((hose->mem_resources[i].flags & IORESOURCE_MEM) && | ||
986 | hose->mem_resources[i].start == hose->pci_mem_offset) | ||
987 | return 0; | ||
988 | } | ||
989 | |||
990 | /* Well, it starts at 0 and we know it will collide so we may as | ||
991 | * well consider it as unassigned. That covers the Apple case. | ||
992 | */ | ||
993 | return 1; | ||
994 | } else { | ||
995 | /* If the BAR is non-0, then we consider it assigned */ | ||
996 | offset = (unsigned long)hose->io_base_virt - _IO_BASE; | ||
997 | if (((res->start - offset) & 0xfffffffful) != 0) | ||
998 | return 0; | ||
999 | |||
1000 | /* Here, we are a bit different than memory as typically IO | ||
1001 | * space starting at low addresses -is- valid. What we do | ||
1002 | * instead if that we consider as unassigned anything that | ||
1003 | * doesn't have IO enabled in the PCI command register, | ||
1004 | * and that's it. | ||
1005 | */ | ||
1006 | pci_read_config_word(dev, PCI_COMMAND, &command); | ||
1007 | if (command & PCI_COMMAND_IO) | ||
1008 | return 0; | ||
1009 | |||
1010 | /* It's starting at 0 and IO is disabled in the bridge, consider | ||
1011 | * it unassigned | ||
1012 | */ | ||
1013 | return 1; | ||
1014 | } | ||
1015 | } | ||
1016 | |||
1017 | /* Fixup resources of a PCI<->PCI bridge */ | ||
1018 | static void __devinit pcibios_fixup_bridge(struct pci_bus *bus) | ||
1019 | { | ||
1020 | struct resource *res; | ||
1021 | int i; | ||
1022 | |||
1023 | struct pci_dev *dev = bus->self; | ||
1024 | |||
1025 | for (i = 0; i < PCI_BUS_NUM_RESOURCES; ++i) { | ||
1026 | res = bus->resource[i]; | ||
1027 | if (!res) | ||
1028 | continue; | ||
1029 | if (!res->flags) | ||
1030 | continue; | ||
1031 | if (i >= 3 && bus->self->transparent) | ||
1032 | continue; | ||
1033 | |||
1034 | pr_debug("PCI:%s Bus rsrc %d %016llx-%016llx [%x] fixup...\n", | ||
1035 | pci_name(dev), i, | ||
1036 | (unsigned long long)res->start,\ | ||
1037 | (unsigned long long)res->end, | ||
1038 | (unsigned int)res->flags); | ||
1039 | |||
1040 | /* Perform fixup */ | ||
1041 | fixup_resource(res, dev); | ||
1042 | |||
1043 | /* Try to detect uninitialized P2P bridge resources, | ||
1044 | * and clear them out so they get re-assigned later | ||
1045 | */ | ||
1046 | if (pcibios_uninitialized_bridge_resource(bus, res)) { | ||
1047 | res->flags = 0; | ||
1048 | pr_debug("PCI:%s (unassigned)\n", | ||
1049 | pci_name(dev)); | ||
1050 | } else { | ||
1051 | pr_debug("PCI:%s %016llx-%016llx\n", | ||
1052 | pci_name(dev), | ||
1053 | (unsigned long long)res->start, | ||
1054 | (unsigned long long)res->end); | ||
1055 | } | ||
1056 | } | ||
1057 | } | ||
1058 | |||
1059 | void __devinit pcibios_setup_bus_self(struct pci_bus *bus) | ||
1060 | { | ||
1061 | /* Fix up the bus resources for P2P bridges */ | ||
1062 | if (bus->self != NULL) | ||
1063 | pcibios_fixup_bridge(bus); | ||
1064 | } | ||
1065 | |||
1066 | void __devinit pcibios_setup_bus_devices(struct pci_bus *bus) | ||
1067 | { | ||
1068 | struct pci_dev *dev; | ||
1069 | |||
1070 | pr_debug("PCI: Fixup bus devices %d (%s)\n", | ||
1071 | bus->number, bus->self ? pci_name(bus->self) : "PHB"); | ||
1072 | |||
1073 | list_for_each_entry(dev, &bus->devices, bus_list) { | ||
1074 | struct dev_archdata *sd = &dev->dev.archdata; | ||
1075 | |||
1076 | /* Setup OF node pointer in archdata */ | ||
1077 | sd->of_node = pci_device_to_OF_node(dev); | ||
1078 | |||
1079 | /* Fixup NUMA node as it may not be setup yet by the generic | ||
1080 | * code and is needed by the DMA init | ||
1081 | */ | ||
1082 | set_dev_node(&dev->dev, pcibus_to_node(dev->bus)); | ||
1083 | |||
1084 | /* Hook up default DMA ops */ | ||
1085 | sd->dma_ops = pci_dma_ops; | ||
1086 | sd->dma_data = (void *)PCI_DRAM_OFFSET; | ||
1087 | |||
1088 | /* Read default IRQs and fixup if necessary */ | ||
1089 | pci_read_irq_line(dev); | ||
1090 | } | ||
1091 | } | ||
1092 | |||
1093 | void __devinit pcibios_fixup_bus(struct pci_bus *bus) | ||
1094 | { | ||
1095 | /* When called from the generic PCI probe, read PCI<->PCI bridge | ||
1096 | * bases. This is -not- called when generating the PCI tree from | ||
1097 | * the OF device-tree. | ||
1098 | */ | ||
1099 | if (bus->self != NULL) | ||
1100 | pci_read_bridge_bases(bus); | ||
1101 | |||
1102 | /* Now fixup the bus bus */ | ||
1103 | pcibios_setup_bus_self(bus); | ||
1104 | |||
1105 | /* Now fixup devices on that bus */ | ||
1106 | pcibios_setup_bus_devices(bus); | ||
1107 | } | ||
1108 | EXPORT_SYMBOL(pcibios_fixup_bus); | ||
1109 | |||
1110 | static int skip_isa_ioresource_align(struct pci_dev *dev) | ||
1111 | { | ||
1112 | if ((pci_flags & PCI_CAN_SKIP_ISA_ALIGN) && | ||
1113 | !(dev->bus->bridge_ctl & PCI_BRIDGE_CTL_ISA)) | ||
1114 | return 1; | ||
1115 | return 0; | ||
1116 | } | ||
1117 | |||
1118 | /* | ||
1119 | * We need to avoid collisions with `mirrored' VGA ports | ||
1120 | * and other strange ISA hardware, so we always want the | ||
1121 | * addresses to be allocated in the 0x000-0x0ff region | ||
1122 | * modulo 0x400. | ||
1123 | * | ||
1124 | * Why? Because some silly external IO cards only decode | ||
1125 | * the low 10 bits of the IO address. The 0x00-0xff region | ||
1126 | * is reserved for motherboard devices that decode all 16 | ||
1127 | * bits, so it's ok to allocate at, say, 0x2800-0x28ff, | ||
1128 | * but we want to try to avoid allocating at 0x2900-0x2bff | ||
1129 | * which might have be mirrored at 0x0100-0x03ff.. | ||
1130 | */ | ||
1131 | void pcibios_align_resource(void *data, struct resource *res, | ||
1132 | resource_size_t size, resource_size_t align) | ||
1133 | { | ||
1134 | struct pci_dev *dev = data; | ||
1135 | |||
1136 | if (res->flags & IORESOURCE_IO) { | ||
1137 | resource_size_t start = res->start; | ||
1138 | |||
1139 | if (skip_isa_ioresource_align(dev)) | ||
1140 | return; | ||
1141 | if (start & 0x300) { | ||
1142 | start = (start + 0x3ff) & ~0x3ff; | ||
1143 | res->start = start; | ||
1144 | } | ||
1145 | } | ||
1146 | } | ||
1147 | EXPORT_SYMBOL(pcibios_align_resource); | ||
1148 | |||
1149 | /* | ||
1150 | * Reparent resource children of pr that conflict with res | ||
1151 | * under res, and make res replace those children. | ||
1152 | */ | ||
1153 | static int __init reparent_resources(struct resource *parent, | ||
1154 | struct resource *res) | ||
1155 | { | ||
1156 | struct resource *p, **pp; | ||
1157 | struct resource **firstpp = NULL; | ||
1158 | |||
1159 | for (pp = &parent->child; (p = *pp) != NULL; pp = &p->sibling) { | ||
1160 | if (p->end < res->start) | ||
1161 | continue; | ||
1162 | if (res->end < p->start) | ||
1163 | break; | ||
1164 | if (p->start < res->start || p->end > res->end) | ||
1165 | return -1; /* not completely contained */ | ||
1166 | if (firstpp == NULL) | ||
1167 | firstpp = pp; | ||
1168 | } | ||
1169 | if (firstpp == NULL) | ||
1170 | return -1; /* didn't find any conflicting entries? */ | ||
1171 | res->parent = parent; | ||
1172 | res->child = *firstpp; | ||
1173 | res->sibling = *pp; | ||
1174 | *firstpp = res; | ||
1175 | *pp = NULL; | ||
1176 | for (p = res->child; p != NULL; p = p->sibling) { | ||
1177 | p->parent = res; | ||
1178 | pr_debug("PCI: Reparented %s [%llx..%llx] under %s\n", | ||
1179 | p->name, | ||
1180 | (unsigned long long)p->start, | ||
1181 | (unsigned long long)p->end, res->name); | ||
1182 | } | ||
1183 | return 0; | ||
1184 | } | ||
1185 | |||
1186 | /* | ||
1187 | * Handle resources of PCI devices. If the world were perfect, we could | ||
1188 | * just allocate all the resource regions and do nothing more. It isn't. | ||
1189 | * On the other hand, we cannot just re-allocate all devices, as it would | ||
1190 | * require us to know lots of host bridge internals. So we attempt to | ||
1191 | * keep as much of the original configuration as possible, but tweak it | ||
1192 | * when it's found to be wrong. | ||
1193 | * | ||
1194 | * Known BIOS problems we have to work around: | ||
1195 | * - I/O or memory regions not configured | ||
1196 | * - regions configured, but not enabled in the command register | ||
1197 | * - bogus I/O addresses above 64K used | ||
1198 | * - expansion ROMs left enabled (this may sound harmless, but given | ||
1199 | * the fact the PCI specs explicitly allow address decoders to be | ||
1200 | * shared between expansion ROMs and other resource regions, it's | ||
1201 | * at least dangerous) | ||
1202 | * | ||
1203 | * Our solution: | ||
1204 | * (1) Allocate resources for all buses behind PCI-to-PCI bridges. | ||
1205 | * This gives us fixed barriers on where we can allocate. | ||
1206 | * (2) Allocate resources for all enabled devices. If there is | ||
1207 | * a collision, just mark the resource as unallocated. Also | ||
1208 | * disable expansion ROMs during this step. | ||
1209 | * (3) Try to allocate resources for disabled devices. If the | ||
1210 | * resources were assigned correctly, everything goes well, | ||
1211 | * if they weren't, they won't disturb allocation of other | ||
1212 | * resources. | ||
1213 | * (4) Assign new addresses to resources which were either | ||
1214 | * not configured at all or misconfigured. If explicitly | ||
1215 | * requested by the user, configure expansion ROM address | ||
1216 | * as well. | ||
1217 | */ | ||
1218 | |||
1219 | void pcibios_allocate_bus_resources(struct pci_bus *bus) | ||
1220 | { | ||
1221 | struct pci_bus *b; | ||
1222 | int i; | ||
1223 | struct resource *res, *pr; | ||
1224 | |||
1225 | pr_debug("PCI: Allocating bus resources for %04x:%02x...\n", | ||
1226 | pci_domain_nr(bus), bus->number); | ||
1227 | |||
1228 | for (i = 0; i < PCI_BUS_NUM_RESOURCES; ++i) { | ||
1229 | res = bus->resource[i]; | ||
1230 | if (!res || !res->flags | ||
1231 | || res->start > res->end || res->parent) | ||
1232 | continue; | ||
1233 | if (bus->parent == NULL) | ||
1234 | pr = (res->flags & IORESOURCE_IO) ? | ||
1235 | &ioport_resource : &iomem_resource; | ||
1236 | else { | ||
1237 | /* Don't bother with non-root busses when | ||
1238 | * re-assigning all resources. We clear the | ||
1239 | * resource flags as if they were colliding | ||
1240 | * and as such ensure proper re-allocation | ||
1241 | * later. | ||
1242 | */ | ||
1243 | if (pci_flags & PCI_REASSIGN_ALL_RSRC) | ||
1244 | goto clear_resource; | ||
1245 | pr = pci_find_parent_resource(bus->self, res); | ||
1246 | if (pr == res) { | ||
1247 | /* this happens when the generic PCI | ||
1248 | * code (wrongly) decides that this | ||
1249 | * bridge is transparent -- paulus | ||
1250 | */ | ||
1251 | continue; | ||
1252 | } | ||
1253 | } | ||
1254 | |||
1255 | pr_debug("PCI: %s (bus %d) bridge rsrc %d: %016llx-%016llx " | ||
1256 | "[0x%x], parent %p (%s)\n", | ||
1257 | bus->self ? pci_name(bus->self) : "PHB", | ||
1258 | bus->number, i, | ||
1259 | (unsigned long long)res->start, | ||
1260 | (unsigned long long)res->end, | ||
1261 | (unsigned int)res->flags, | ||
1262 | pr, (pr && pr->name) ? pr->name : "nil"); | ||
1263 | |||
1264 | if (pr && !(pr->flags & IORESOURCE_UNSET)) { | ||
1265 | if (request_resource(pr, res) == 0) | ||
1266 | continue; | ||
1267 | /* | ||
1268 | * Must be a conflict with an existing entry. | ||
1269 | * Move that entry (or entries) under the | ||
1270 | * bridge resource and try again. | ||
1271 | */ | ||
1272 | if (reparent_resources(pr, res) == 0) | ||
1273 | continue; | ||
1274 | } | ||
1275 | printk(KERN_WARNING "PCI: Cannot allocate resource region " | ||
1276 | "%d of PCI bridge %d, will remap\n", i, bus->number); | ||
1277 | clear_resource: | ||
1278 | res->flags = 0; | ||
1279 | } | ||
1280 | |||
1281 | list_for_each_entry(b, &bus->children, node) | ||
1282 | pcibios_allocate_bus_resources(b); | ||
1283 | } | ||
1284 | |||
1285 | static inline void __devinit alloc_resource(struct pci_dev *dev, int idx) | ||
1286 | { | ||
1287 | struct resource *pr, *r = &dev->resource[idx]; | ||
1288 | |||
1289 | pr_debug("PCI: Allocating %s: Resource %d: %016llx..%016llx [%x]\n", | ||
1290 | pci_name(dev), idx, | ||
1291 | (unsigned long long)r->start, | ||
1292 | (unsigned long long)r->end, | ||
1293 | (unsigned int)r->flags); | ||
1294 | |||
1295 | pr = pci_find_parent_resource(dev, r); | ||
1296 | if (!pr || (pr->flags & IORESOURCE_UNSET) || | ||
1297 | request_resource(pr, r) < 0) { | ||
1298 | printk(KERN_WARNING "PCI: Cannot allocate resource region %d" | ||
1299 | " of device %s, will remap\n", idx, pci_name(dev)); | ||
1300 | if (pr) | ||
1301 | pr_debug("PCI: parent is %p: %016llx-%016llx [%x]\n", | ||
1302 | pr, | ||
1303 | (unsigned long long)pr->start, | ||
1304 | (unsigned long long)pr->end, | ||
1305 | (unsigned int)pr->flags); | ||
1306 | /* We'll assign a new address later */ | ||
1307 | r->flags |= IORESOURCE_UNSET; | ||
1308 | r->end -= r->start; | ||
1309 | r->start = 0; | ||
1310 | } | ||
1311 | } | ||
1312 | |||
1313 | static void __init pcibios_allocate_resources(int pass) | ||
1314 | { | ||
1315 | struct pci_dev *dev = NULL; | ||
1316 | int idx, disabled; | ||
1317 | u16 command; | ||
1318 | struct resource *r; | ||
1319 | |||
1320 | for_each_pci_dev(dev) { | ||
1321 | pci_read_config_word(dev, PCI_COMMAND, &command); | ||
1322 | for (idx = 0; idx <= PCI_ROM_RESOURCE; idx++) { | ||
1323 | r = &dev->resource[idx]; | ||
1324 | if (r->parent) /* Already allocated */ | ||
1325 | continue; | ||
1326 | if (!r->flags || (r->flags & IORESOURCE_UNSET)) | ||
1327 | continue; /* Not assigned at all */ | ||
1328 | /* We only allocate ROMs on pass 1 just in case they | ||
1329 | * have been screwed up by firmware | ||
1330 | */ | ||
1331 | if (idx == PCI_ROM_RESOURCE) | ||
1332 | disabled = 1; | ||
1333 | if (r->flags & IORESOURCE_IO) | ||
1334 | disabled = !(command & PCI_COMMAND_IO); | ||
1335 | else | ||
1336 | disabled = !(command & PCI_COMMAND_MEMORY); | ||
1337 | if (pass == disabled) | ||
1338 | alloc_resource(dev, idx); | ||
1339 | } | ||
1340 | if (pass) | ||
1341 | continue; | ||
1342 | r = &dev->resource[PCI_ROM_RESOURCE]; | ||
1343 | if (r->flags) { | ||
1344 | /* Turn the ROM off, leave the resource region, | ||
1345 | * but keep it unregistered. | ||
1346 | */ | ||
1347 | u32 reg; | ||
1348 | pci_read_config_dword(dev, dev->rom_base_reg, ®); | ||
1349 | if (reg & PCI_ROM_ADDRESS_ENABLE) { | ||
1350 | pr_debug("PCI: Switching off ROM of %s\n", | ||
1351 | pci_name(dev)); | ||
1352 | r->flags &= ~IORESOURCE_ROM_ENABLE; | ||
1353 | pci_write_config_dword(dev, dev->rom_base_reg, | ||
1354 | reg & ~PCI_ROM_ADDRESS_ENABLE); | ||
1355 | } | ||
1356 | } | ||
1357 | } | ||
1358 | } | ||
1359 | |||
1360 | static void __init pcibios_reserve_legacy_regions(struct pci_bus *bus) | ||
1361 | { | ||
1362 | struct pci_controller *hose = pci_bus_to_host(bus); | ||
1363 | resource_size_t offset; | ||
1364 | struct resource *res, *pres; | ||
1365 | int i; | ||
1366 | |||
1367 | pr_debug("Reserving legacy ranges for domain %04x\n", | ||
1368 | pci_domain_nr(bus)); | ||
1369 | |||
1370 | /* Check for IO */ | ||
1371 | if (!(hose->io_resource.flags & IORESOURCE_IO)) | ||
1372 | goto no_io; | ||
1373 | offset = (unsigned long)hose->io_base_virt - _IO_BASE; | ||
1374 | res = kzalloc(sizeof(struct resource), GFP_KERNEL); | ||
1375 | BUG_ON(res == NULL); | ||
1376 | res->name = "Legacy IO"; | ||
1377 | res->flags = IORESOURCE_IO; | ||
1378 | res->start = offset; | ||
1379 | res->end = (offset + 0xfff) & 0xfffffffful; | ||
1380 | pr_debug("Candidate legacy IO: %pR\n", res); | ||
1381 | if (request_resource(&hose->io_resource, res)) { | ||
1382 | printk(KERN_DEBUG | ||
1383 | "PCI %04x:%02x Cannot reserve Legacy IO %pR\n", | ||
1384 | pci_domain_nr(bus), bus->number, res); | ||
1385 | kfree(res); | ||
1386 | } | ||
1387 | |||
1388 | no_io: | ||
1389 | /* Check for memory */ | ||
1390 | offset = hose->pci_mem_offset; | ||
1391 | pr_debug("hose mem offset: %016llx\n", (unsigned long long)offset); | ||
1392 | for (i = 0; i < 3; i++) { | ||
1393 | pres = &hose->mem_resources[i]; | ||
1394 | if (!(pres->flags & IORESOURCE_MEM)) | ||
1395 | continue; | ||
1396 | pr_debug("hose mem res: %pR\n", pres); | ||
1397 | if ((pres->start - offset) <= 0xa0000 && | ||
1398 | (pres->end - offset) >= 0xbffff) | ||
1399 | break; | ||
1400 | } | ||
1401 | if (i >= 3) | ||
1402 | return; | ||
1403 | res = kzalloc(sizeof(struct resource), GFP_KERNEL); | ||
1404 | BUG_ON(res == NULL); | ||
1405 | res->name = "Legacy VGA memory"; | ||
1406 | res->flags = IORESOURCE_MEM; | ||
1407 | res->start = 0xa0000 + offset; | ||
1408 | res->end = 0xbffff + offset; | ||
1409 | pr_debug("Candidate VGA memory: %pR\n", res); | ||
1410 | if (request_resource(pres, res)) { | ||
1411 | printk(KERN_DEBUG | ||
1412 | "PCI %04x:%02x Cannot reserve VGA memory %pR\n", | ||
1413 | pci_domain_nr(bus), bus->number, res); | ||
1414 | kfree(res); | ||
1415 | } | ||
1416 | } | ||
1417 | |||
1418 | void __init pcibios_resource_survey(void) | ||
1419 | { | ||
1420 | struct pci_bus *b; | ||
1421 | |||
1422 | /* Allocate and assign resources. If we re-assign everything, then | ||
1423 | * we skip the allocate phase | ||
1424 | */ | ||
1425 | list_for_each_entry(b, &pci_root_buses, node) | ||
1426 | pcibios_allocate_bus_resources(b); | ||
1427 | |||
1428 | if (!(pci_flags & PCI_REASSIGN_ALL_RSRC)) { | ||
1429 | pcibios_allocate_resources(0); | ||
1430 | pcibios_allocate_resources(1); | ||
1431 | } | ||
1432 | |||
1433 | /* Before we start assigning unassigned resource, we try to reserve | ||
1434 | * the low IO area and the VGA memory area if they intersect the | ||
1435 | * bus available resources to avoid allocating things on top of them | ||
1436 | */ | ||
1437 | if (!(pci_flags & PCI_PROBE_ONLY)) { | ||
1438 | list_for_each_entry(b, &pci_root_buses, node) | ||
1439 | pcibios_reserve_legacy_regions(b); | ||
1440 | } | ||
1441 | |||
1442 | /* Now, if the platform didn't decide to blindly trust the firmware, | ||
1443 | * we proceed to assigning things that were left unassigned | ||
1444 | */ | ||
1445 | if (!(pci_flags & PCI_PROBE_ONLY)) { | ||
1446 | pr_debug("PCI: Assigning unassigned resources...\n"); | ||
1447 | pci_assign_unassigned_resources(); | ||
1448 | } | ||
1449 | } | ||
1450 | |||
1451 | #ifdef CONFIG_HOTPLUG | ||
1452 | |||
1453 | /* This is used by the PCI hotplug driver to allocate resource | ||
1454 | * of newly plugged busses. We can try to consolidate with the | ||
1455 | * rest of the code later, for now, keep it as-is as our main | ||
1456 | * resource allocation function doesn't deal with sub-trees yet. | ||
1457 | */ | ||
1458 | void __devinit pcibios_claim_one_bus(struct pci_bus *bus) | ||
1459 | { | ||
1460 | struct pci_dev *dev; | ||
1461 | struct pci_bus *child_bus; | ||
1462 | |||
1463 | list_for_each_entry(dev, &bus->devices, bus_list) { | ||
1464 | int i; | ||
1465 | |||
1466 | for (i = 0; i < PCI_NUM_RESOURCES; i++) { | ||
1467 | struct resource *r = &dev->resource[i]; | ||
1468 | |||
1469 | if (r->parent || !r->start || !r->flags) | ||
1470 | continue; | ||
1471 | |||
1472 | pr_debug("PCI: Claiming %s: " | ||
1473 | "Resource %d: %016llx..%016llx [%x]\n", | ||
1474 | pci_name(dev), i, | ||
1475 | (unsigned long long)r->start, | ||
1476 | (unsigned long long)r->end, | ||
1477 | (unsigned int)r->flags); | ||
1478 | |||
1479 | pci_claim_resource(dev, i); | ||
1480 | } | ||
1481 | } | ||
1482 | |||
1483 | list_for_each_entry(child_bus, &bus->children, node) | ||
1484 | pcibios_claim_one_bus(child_bus); | ||
1485 | } | ||
1486 | EXPORT_SYMBOL_GPL(pcibios_claim_one_bus); | ||
1487 | |||
1488 | |||
1489 | /* pcibios_finish_adding_to_bus | ||
1490 | * | ||
1491 | * This is to be called by the hotplug code after devices have been | ||
1492 | * added to a bus, this include calling it for a PHB that is just | ||
1493 | * being added | ||
1494 | */ | ||
1495 | void pcibios_finish_adding_to_bus(struct pci_bus *bus) | ||
1496 | { | ||
1497 | pr_debug("PCI: Finishing adding to hotplug bus %04x:%02x\n", | ||
1498 | pci_domain_nr(bus), bus->number); | ||
1499 | |||
1500 | /* Allocate bus and devices resources */ | ||
1501 | pcibios_allocate_bus_resources(bus); | ||
1502 | pcibios_claim_one_bus(bus); | ||
1503 | |||
1504 | /* Add new devices to global lists. Register in proc, sysfs. */ | ||
1505 | pci_bus_add_devices(bus); | ||
1506 | |||
1507 | /* Fixup EEH */ | ||
1508 | eeh_add_device_tree_late(bus); | ||
1509 | } | ||
1510 | EXPORT_SYMBOL_GPL(pcibios_finish_adding_to_bus); | ||
1511 | |||
1512 | #endif /* CONFIG_HOTPLUG */ | ||
1513 | |||
1514 | int pcibios_enable_device(struct pci_dev *dev, int mask) | ||
1515 | { | ||
1516 | return pci_enable_resources(dev, mask); | ||
1517 | } | ||
1518 | |||
1519 | void __devinit pcibios_setup_phb_resources(struct pci_controller *hose) | ||
1520 | { | ||
1521 | struct pci_bus *bus = hose->bus; | ||
1522 | struct resource *res; | ||
1523 | int i; | ||
1524 | |||
1525 | /* Hookup PHB IO resource */ | ||
1526 | bus->resource[0] = res = &hose->io_resource; | ||
1527 | |||
1528 | if (!res->flags) { | ||
1529 | printk(KERN_WARNING "PCI: I/O resource not set for host" | ||
1530 | " bridge %s (domain %d)\n", | ||
1531 | hose->dn->full_name, hose->global_number); | ||
1532 | /* Workaround for lack of IO resource only on 32-bit */ | ||
1533 | res->start = (unsigned long)hose->io_base_virt - isa_io_base; | ||
1534 | res->end = res->start + IO_SPACE_LIMIT; | ||
1535 | res->flags = IORESOURCE_IO; | ||
1536 | } | ||
1537 | |||
1538 | pr_debug("PCI: PHB IO resource = %016llx-%016llx [%lx]\n", | ||
1539 | (unsigned long long)res->start, | ||
1540 | (unsigned long long)res->end, | ||
1541 | (unsigned long)res->flags); | ||
1542 | |||
1543 | /* Hookup PHB Memory resources */ | ||
1544 | for (i = 0; i < 3; ++i) { | ||
1545 | res = &hose->mem_resources[i]; | ||
1546 | if (!res->flags) { | ||
1547 | if (i > 0) | ||
1548 | continue; | ||
1549 | printk(KERN_ERR "PCI: Memory resource 0 not set for " | ||
1550 | "host bridge %s (domain %d)\n", | ||
1551 | hose->dn->full_name, hose->global_number); | ||
1552 | |||
1553 | /* Workaround for lack of MEM resource only on 32-bit */ | ||
1554 | res->start = hose->pci_mem_offset; | ||
1555 | res->end = (resource_size_t)-1LL; | ||
1556 | res->flags = IORESOURCE_MEM; | ||
1557 | |||
1558 | } | ||
1559 | bus->resource[i+1] = res; | ||
1560 | |||
1561 | pr_debug("PCI: PHB MEM resource %d = %016llx-%016llx [%lx]\n", | ||
1562 | i, (unsigned long long)res->start, | ||
1563 | (unsigned long long)res->end, | ||
1564 | (unsigned long)res->flags); | ||
1565 | } | ||
1566 | |||
1567 | pr_debug("PCI: PHB MEM offset = %016llx\n", | ||
1568 | (unsigned long long)hose->pci_mem_offset); | ||
1569 | pr_debug("PCI: PHB IO offset = %08lx\n", | ||
1570 | (unsigned long)hose->io_base_virt - _IO_BASE); | ||
1571 | } | ||
1572 | |||
1573 | /* | ||
1574 | * Null PCI config access functions, for the case when we can't | ||
1575 | * find a hose. | ||
1576 | */ | ||
1577 | #define NULL_PCI_OP(rw, size, type) \ | ||
1578 | static int \ | ||
1579 | null_##rw##_config_##size(struct pci_dev *dev, int offset, type val) \ | ||
1580 | { \ | ||
1581 | return PCIBIOS_DEVICE_NOT_FOUND; \ | ||
1582 | } | ||
1583 | |||
1584 | static int | ||
1585 | null_read_config(struct pci_bus *bus, unsigned int devfn, int offset, | ||
1586 | int len, u32 *val) | ||
1587 | { | ||
1588 | return PCIBIOS_DEVICE_NOT_FOUND; | ||
1589 | } | ||
1590 | |||
1591 | static int | ||
1592 | null_write_config(struct pci_bus *bus, unsigned int devfn, int offset, | ||
1593 | int len, u32 val) | ||
1594 | { | ||
1595 | return PCIBIOS_DEVICE_NOT_FOUND; | ||
1596 | } | ||
1597 | |||
1598 | static struct pci_ops null_pci_ops = { | ||
1599 | .read = null_read_config, | ||
1600 | .write = null_write_config, | ||
1601 | }; | ||
1602 | |||
1603 | /* | ||
1604 | * These functions are used early on before PCI scanning is done | ||
1605 | * and all of the pci_dev and pci_bus structures have been created. | ||
1606 | */ | ||
1607 | static struct pci_bus * | ||
1608 | fake_pci_bus(struct pci_controller *hose, int busnr) | ||
1609 | { | ||
1610 | static struct pci_bus bus; | ||
1611 | |||
1612 | if (!hose) | ||
1613 | printk(KERN_ERR "Can't find hose for PCI bus %d!\n", busnr); | ||
1614 | |||
1615 | bus.number = busnr; | ||
1616 | bus.sysdata = hose; | ||
1617 | bus.ops = hose ? hose->ops : &null_pci_ops; | ||
1618 | return &bus; | ||
1619 | } | ||
1620 | |||
1621 | #define EARLY_PCI_OP(rw, size, type) \ | ||
1622 | int early_##rw##_config_##size(struct pci_controller *hose, int bus, \ | ||
1623 | int devfn, int offset, type value) \ | ||
1624 | { \ | ||
1625 | return pci_bus_##rw##_config_##size(fake_pci_bus(hose, bus), \ | ||
1626 | devfn, offset, value); \ | ||
1627 | } | ||
1628 | |||
1629 | EARLY_PCI_OP(read, byte, u8 *) | ||
1630 | EARLY_PCI_OP(read, word, u16 *) | ||
1631 | EARLY_PCI_OP(read, dword, u32 *) | ||
1632 | EARLY_PCI_OP(write, byte, u8) | ||
1633 | EARLY_PCI_OP(write, word, u16) | ||
1634 | EARLY_PCI_OP(write, dword, u32) | ||
1635 | |||
1636 | int early_find_capability(struct pci_controller *hose, int bus, int devfn, | ||
1637 | int cap) | ||
1638 | { | ||
1639 | return pci_bus_find_capability(fake_pci_bus(hose, bus), devfn, cap); | ||
1640 | } | ||
diff --git a/arch/microblaze/pci/pci_32.c b/arch/microblaze/pci/pci_32.c new file mode 100644 index 000000000000..7e0c94f501cc --- /dev/null +++ b/arch/microblaze/pci/pci_32.c | |||
@@ -0,0 +1,430 @@ | |||
1 | /* | ||
2 | * Common pmac/prep/chrp pci routines. -- Cort | ||
3 | */ | ||
4 | |||
5 | #include <linux/kernel.h> | ||
6 | #include <linux/pci.h> | ||
7 | #include <linux/delay.h> | ||
8 | #include <linux/string.h> | ||
9 | #include <linux/init.h> | ||
10 | #include <linux/capability.h> | ||
11 | #include <linux/sched.h> | ||
12 | #include <linux/errno.h> | ||
13 | #include <linux/bootmem.h> | ||
14 | #include <linux/irq.h> | ||
15 | #include <linux/list.h> | ||
16 | #include <linux/of.h> | ||
17 | |||
18 | #include <asm/processor.h> | ||
19 | #include <asm/io.h> | ||
20 | #include <asm/prom.h> | ||
21 | #include <asm/sections.h> | ||
22 | #include <asm/pci-bridge.h> | ||
23 | #include <asm/byteorder.h> | ||
24 | #include <asm/uaccess.h> | ||
25 | |||
26 | #undef DEBUG | ||
27 | |||
28 | unsigned long isa_io_base; | ||
29 | unsigned long pci_dram_offset; | ||
30 | int pcibios_assign_bus_offset = 1; | ||
31 | |||
32 | static u8 *pci_to_OF_bus_map; | ||
33 | |||
34 | /* By default, we don't re-assign bus numbers. We do this only on | ||
35 | * some pmacs | ||
36 | */ | ||
37 | static int pci_assign_all_buses; | ||
38 | |||
39 | static int pci_bus_count; | ||
40 | |||
41 | /* | ||
42 | * Functions below are used on OpenFirmware machines. | ||
43 | */ | ||
44 | static void | ||
45 | make_one_node_map(struct device_node *node, u8 pci_bus) | ||
46 | { | ||
47 | const int *bus_range; | ||
48 | int len; | ||
49 | |||
50 | if (pci_bus >= pci_bus_count) | ||
51 | return; | ||
52 | bus_range = of_get_property(node, "bus-range", &len); | ||
53 | if (bus_range == NULL || len < 2 * sizeof(int)) { | ||
54 | printk(KERN_WARNING "Can't get bus-range for %s, " | ||
55 | "assuming it starts at 0\n", node->full_name); | ||
56 | pci_to_OF_bus_map[pci_bus] = 0; | ||
57 | } else | ||
58 | pci_to_OF_bus_map[pci_bus] = bus_range[0]; | ||
59 | |||
60 | for_each_child_of_node(node, node) { | ||
61 | struct pci_dev *dev; | ||
62 | const unsigned int *class_code, *reg; | ||
63 | |||
64 | class_code = of_get_property(node, "class-code", NULL); | ||
65 | if (!class_code || | ||
66 | ((*class_code >> 8) != PCI_CLASS_BRIDGE_PCI && | ||
67 | (*class_code >> 8) != PCI_CLASS_BRIDGE_CARDBUS)) | ||
68 | continue; | ||
69 | reg = of_get_property(node, "reg", NULL); | ||
70 | if (!reg) | ||
71 | continue; | ||
72 | dev = pci_get_bus_and_slot(pci_bus, ((reg[0] >> 8) & 0xff)); | ||
73 | if (!dev || !dev->subordinate) { | ||
74 | pci_dev_put(dev); | ||
75 | continue; | ||
76 | } | ||
77 | make_one_node_map(node, dev->subordinate->number); | ||
78 | pci_dev_put(dev); | ||
79 | } | ||
80 | } | ||
81 | |||
82 | void | ||
83 | pcibios_make_OF_bus_map(void) | ||
84 | { | ||
85 | int i; | ||
86 | struct pci_controller *hose, *tmp; | ||
87 | struct property *map_prop; | ||
88 | struct device_node *dn; | ||
89 | |||
90 | pci_to_OF_bus_map = kmalloc(pci_bus_count, GFP_KERNEL); | ||
91 | if (!pci_to_OF_bus_map) { | ||
92 | printk(KERN_ERR "Can't allocate OF bus map !\n"); | ||
93 | return; | ||
94 | } | ||
95 | |||
96 | /* We fill the bus map with invalid values, that helps | ||
97 | * debugging. | ||
98 | */ | ||
99 | for (i = 0; i < pci_bus_count; i++) | ||
100 | pci_to_OF_bus_map[i] = 0xff; | ||
101 | |||
102 | /* For each hose, we begin searching bridges */ | ||
103 | list_for_each_entry_safe(hose, tmp, &hose_list, list_node) { | ||
104 | struct device_node *node = hose->dn; | ||
105 | |||
106 | if (!node) | ||
107 | continue; | ||
108 | make_one_node_map(node, hose->first_busno); | ||
109 | } | ||
110 | dn = of_find_node_by_path("/"); | ||
111 | map_prop = of_find_property(dn, "pci-OF-bus-map", NULL); | ||
112 | if (map_prop) { | ||
113 | BUG_ON(pci_bus_count > map_prop->length); | ||
114 | memcpy(map_prop->value, pci_to_OF_bus_map, pci_bus_count); | ||
115 | } | ||
116 | of_node_put(dn); | ||
117 | #ifdef DEBUG | ||
118 | printk(KERN_INFO "PCI->OF bus map:\n"); | ||
119 | for (i = 0; i < pci_bus_count; i++) { | ||
120 | if (pci_to_OF_bus_map[i] == 0xff) | ||
121 | continue; | ||
122 | printk(KERN_INFO "%d -> %d\n", i, pci_to_OF_bus_map[i]); | ||
123 | } | ||
124 | #endif | ||
125 | } | ||
126 | |||
127 | typedef int (*pci_OF_scan_iterator)(struct device_node *node, void *data); | ||
128 | |||
129 | static struct device_node *scan_OF_pci_childs(struct device_node *parent, | ||
130 | pci_OF_scan_iterator filter, void *data) | ||
131 | { | ||
132 | struct device_node *node; | ||
133 | struct device_node *sub_node; | ||
134 | |||
135 | for_each_child_of_node(parent, node) { | ||
136 | const unsigned int *class_code; | ||
137 | |||
138 | if (filter(node, data)) { | ||
139 | of_node_put(node); | ||
140 | return node; | ||
141 | } | ||
142 | |||
143 | /* For PCI<->PCI bridges or CardBus bridges, we go down | ||
144 | * Note: some OFs create a parent node "multifunc-device" as | ||
145 | * a fake root for all functions of a multi-function device, | ||
146 | * we go down them as well. | ||
147 | */ | ||
148 | class_code = of_get_property(node, "class-code", NULL); | ||
149 | if ((!class_code || | ||
150 | ((*class_code >> 8) != PCI_CLASS_BRIDGE_PCI && | ||
151 | (*class_code >> 8) != PCI_CLASS_BRIDGE_CARDBUS)) && | ||
152 | strcmp(node->name, "multifunc-device")) | ||
153 | continue; | ||
154 | sub_node = scan_OF_pci_childs(node, filter, data); | ||
155 | if (sub_node) { | ||
156 | of_node_put(node); | ||
157 | return sub_node; | ||
158 | } | ||
159 | } | ||
160 | return NULL; | ||
161 | } | ||
162 | |||
163 | static struct device_node *scan_OF_for_pci_dev(struct device_node *parent, | ||
164 | unsigned int devfn) | ||
165 | { | ||
166 | struct device_node *np, *cnp; | ||
167 | const u32 *reg; | ||
168 | unsigned int psize; | ||
169 | |||
170 | for_each_child_of_node(parent, np) { | ||
171 | reg = of_get_property(np, "reg", &psize); | ||
172 | if (reg && psize >= 4 && ((reg[0] >> 8) & 0xff) == devfn) | ||
173 | return np; | ||
174 | |||
175 | /* Note: some OFs create a parent node "multifunc-device" as | ||
176 | * a fake root for all functions of a multi-function device, | ||
177 | * we go down them as well. */ | ||
178 | if (!strcmp(np->name, "multifunc-device")) { | ||
179 | cnp = scan_OF_for_pci_dev(np, devfn); | ||
180 | if (cnp) | ||
181 | return cnp; | ||
182 | } | ||
183 | } | ||
184 | return NULL; | ||
185 | } | ||
186 | |||
187 | |||
188 | static struct device_node *scan_OF_for_pci_bus(struct pci_bus *bus) | ||
189 | { | ||
190 | struct device_node *parent, *np; | ||
191 | |||
192 | /* Are we a root bus ? */ | ||
193 | if (bus->self == NULL || bus->parent == NULL) { | ||
194 | struct pci_controller *hose = pci_bus_to_host(bus); | ||
195 | if (hose == NULL) | ||
196 | return NULL; | ||
197 | return of_node_get(hose->dn); | ||
198 | } | ||
199 | |||
200 | /* not a root bus, we need to get our parent */ | ||
201 | parent = scan_OF_for_pci_bus(bus->parent); | ||
202 | if (parent == NULL) | ||
203 | return NULL; | ||
204 | |||
205 | /* now iterate for children for a match */ | ||
206 | np = scan_OF_for_pci_dev(parent, bus->self->devfn); | ||
207 | of_node_put(parent); | ||
208 | |||
209 | return np; | ||
210 | } | ||
211 | |||
212 | /* | ||
213 | * Scans the OF tree for a device node matching a PCI device | ||
214 | */ | ||
215 | struct device_node * | ||
216 | pci_busdev_to_OF_node(struct pci_bus *bus, int devfn) | ||
217 | { | ||
218 | struct device_node *parent, *np; | ||
219 | |||
220 | pr_debug("pci_busdev_to_OF_node(%d,0x%x)\n", bus->number, devfn); | ||
221 | parent = scan_OF_for_pci_bus(bus); | ||
222 | if (parent == NULL) | ||
223 | return NULL; | ||
224 | pr_debug(" parent is %s\n", parent ? parent->full_name : "<NULL>"); | ||
225 | np = scan_OF_for_pci_dev(parent, devfn); | ||
226 | of_node_put(parent); | ||
227 | pr_debug(" result is %s\n", np ? np->full_name : "<NULL>"); | ||
228 | |||
229 | /* XXX most callers don't release the returned node | ||
230 | * mostly because ppc64 doesn't increase the refcount, | ||
231 | * we need to fix that. | ||
232 | */ | ||
233 | return np; | ||
234 | } | ||
235 | EXPORT_SYMBOL(pci_busdev_to_OF_node); | ||
236 | |||
237 | struct device_node* | ||
238 | pci_device_to_OF_node(struct pci_dev *dev) | ||
239 | { | ||
240 | return pci_busdev_to_OF_node(dev->bus, dev->devfn); | ||
241 | } | ||
242 | EXPORT_SYMBOL(pci_device_to_OF_node); | ||
243 | |||
244 | static int | ||
245 | find_OF_pci_device_filter(struct device_node *node, void *data) | ||
246 | { | ||
247 | return ((void *)node == data); | ||
248 | } | ||
249 | |||
250 | /* | ||
251 | * Returns the PCI device matching a given OF node | ||
252 | */ | ||
253 | int | ||
254 | pci_device_from_OF_node(struct device_node *node, u8 *bus, u8 *devfn) | ||
255 | { | ||
256 | const unsigned int *reg; | ||
257 | struct pci_controller *hose; | ||
258 | struct pci_dev *dev = NULL; | ||
259 | |||
260 | /* Make sure it's really a PCI device */ | ||
261 | hose = pci_find_hose_for_OF_device(node); | ||
262 | if (!hose || !hose->dn) | ||
263 | return -ENODEV; | ||
264 | if (!scan_OF_pci_childs(hose->dn, | ||
265 | find_OF_pci_device_filter, (void *)node)) | ||
266 | return -ENODEV; | ||
267 | reg = of_get_property(node, "reg", NULL); | ||
268 | if (!reg) | ||
269 | return -ENODEV; | ||
270 | *bus = (reg[0] >> 16) & 0xff; | ||
271 | *devfn = ((reg[0] >> 8) & 0xff); | ||
272 | |||
273 | /* Ok, here we need some tweak. If we have already renumbered | ||
274 | * all busses, we can't rely on the OF bus number any more. | ||
275 | * the pci_to_OF_bus_map is not enough as several PCI busses | ||
276 | * may match the same OF bus number. | ||
277 | */ | ||
278 | if (!pci_to_OF_bus_map) | ||
279 | return 0; | ||
280 | |||
281 | for_each_pci_dev(dev) | ||
282 | if (pci_to_OF_bus_map[dev->bus->number] == *bus && | ||
283 | dev->devfn == *devfn) { | ||
284 | *bus = dev->bus->number; | ||
285 | pci_dev_put(dev); | ||
286 | return 0; | ||
287 | } | ||
288 | |||
289 | return -ENODEV; | ||
290 | } | ||
291 | EXPORT_SYMBOL(pci_device_from_OF_node); | ||
292 | |||
293 | /* We create the "pci-OF-bus-map" property now so it appears in the | ||
294 | * /proc device tree | ||
295 | */ | ||
296 | void __init | ||
297 | pci_create_OF_bus_map(void) | ||
298 | { | ||
299 | struct property *of_prop; | ||
300 | struct device_node *dn; | ||
301 | |||
302 | of_prop = (struct property *) alloc_bootmem(sizeof(struct property) + \ | ||
303 | 256); | ||
304 | if (!of_prop) | ||
305 | return; | ||
306 | dn = of_find_node_by_path("/"); | ||
307 | if (dn) { | ||
308 | memset(of_prop, -1, sizeof(struct property) + 256); | ||
309 | of_prop->name = "pci-OF-bus-map"; | ||
310 | of_prop->length = 256; | ||
311 | of_prop->value = &of_prop[1]; | ||
312 | prom_add_property(dn, of_prop); | ||
313 | of_node_put(dn); | ||
314 | } | ||
315 | } | ||
316 | |||
317 | static void __devinit pcibios_scan_phb(struct pci_controller *hose) | ||
318 | { | ||
319 | struct pci_bus *bus; | ||
320 | struct device_node *node = hose->dn; | ||
321 | unsigned long io_offset; | ||
322 | struct resource *res = &hose->io_resource; | ||
323 | |||
324 | pr_debug("PCI: Scanning PHB %s\n", | ||
325 | node ? node->full_name : "<NO NAME>"); | ||
326 | |||
327 | /* Create an empty bus for the toplevel */ | ||
328 | bus = pci_create_bus(hose->parent, hose->first_busno, hose->ops, hose); | ||
329 | if (bus == NULL) { | ||
330 | printk(KERN_ERR "Failed to create bus for PCI domain %04x\n", | ||
331 | hose->global_number); | ||
332 | return; | ||
333 | } | ||
334 | bus->secondary = hose->first_busno; | ||
335 | hose->bus = bus; | ||
336 | |||
337 | /* Fixup IO space offset */ | ||
338 | io_offset = (unsigned long)hose->io_base_virt - isa_io_base; | ||
339 | res->start = (res->start + io_offset) & 0xffffffffu; | ||
340 | res->end = (res->end + io_offset) & 0xffffffffu; | ||
341 | |||
342 | /* Wire up PHB bus resources */ | ||
343 | pcibios_setup_phb_resources(hose); | ||
344 | |||
345 | /* Scan children */ | ||
346 | hose->last_busno = bus->subordinate = pci_scan_child_bus(bus); | ||
347 | } | ||
348 | |||
349 | static int __init pcibios_init(void) | ||
350 | { | ||
351 | struct pci_controller *hose, *tmp; | ||
352 | int next_busno = 0; | ||
353 | |||
354 | printk(KERN_INFO "PCI: Probing PCI hardware\n"); | ||
355 | |||
356 | if (pci_flags & PCI_REASSIGN_ALL_BUS) { | ||
357 | printk(KERN_INFO "setting pci_asign_all_busses\n"); | ||
358 | pci_assign_all_buses = 1; | ||
359 | } | ||
360 | |||
361 | /* Scan all of the recorded PCI controllers. */ | ||
362 | list_for_each_entry_safe(hose, tmp, &hose_list, list_node) { | ||
363 | if (pci_assign_all_buses) | ||
364 | hose->first_busno = next_busno; | ||
365 | hose->last_busno = 0xff; | ||
366 | pcibios_scan_phb(hose); | ||
367 | printk(KERN_INFO "calling pci_bus_add_devices()\n"); | ||
368 | pci_bus_add_devices(hose->bus); | ||
369 | if (pci_assign_all_buses || next_busno <= hose->last_busno) | ||
370 | next_busno = hose->last_busno + \ | ||
371 | pcibios_assign_bus_offset; | ||
372 | } | ||
373 | pci_bus_count = next_busno; | ||
374 | |||
375 | /* OpenFirmware based machines need a map of OF bus | ||
376 | * numbers vs. kernel bus numbers since we may have to | ||
377 | * remap them. | ||
378 | */ | ||
379 | if (pci_assign_all_buses) | ||
380 | pcibios_make_OF_bus_map(); | ||
381 | |||
382 | /* Call common code to handle resource allocation */ | ||
383 | pcibios_resource_survey(); | ||
384 | |||
385 | return 0; | ||
386 | } | ||
387 | |||
388 | subsys_initcall(pcibios_init); | ||
389 | |||
390 | static struct pci_controller* | ||
391 | pci_bus_to_hose(int bus) | ||
392 | { | ||
393 | struct pci_controller *hose, *tmp; | ||
394 | |||
395 | list_for_each_entry_safe(hose, tmp, &hose_list, list_node) | ||
396 | if (bus >= hose->first_busno && bus <= hose->last_busno) | ||
397 | return hose; | ||
398 | return NULL; | ||
399 | } | ||
400 | |||
401 | /* Provide information on locations of various I/O regions in physical | ||
402 | * memory. Do this on a per-card basis so that we choose the right | ||
403 | * root bridge. | ||
404 | * Note that the returned IO or memory base is a physical address | ||
405 | */ | ||
406 | |||
407 | long sys_pciconfig_iobase(long which, unsigned long bus, unsigned long devfn) | ||
408 | { | ||
409 | struct pci_controller *hose; | ||
410 | long result = -EOPNOTSUPP; | ||
411 | |||
412 | hose = pci_bus_to_hose(bus); | ||
413 | if (!hose) | ||
414 | return -ENODEV; | ||
415 | |||
416 | switch (which) { | ||
417 | case IOBASE_BRIDGE_NUMBER: | ||
418 | return (long)hose->first_busno; | ||
419 | case IOBASE_MEMORY: | ||
420 | return (long)hose->pci_mem_offset; | ||
421 | case IOBASE_IO: | ||
422 | return (long)hose->io_base_phys; | ||
423 | case IOBASE_ISA_IO: | ||
424 | return (long)isa_io_base; | ||
425 | case IOBASE_ISA_MEM: | ||
426 | return (long)isa_mem_base; | ||
427 | } | ||
428 | |||
429 | return result; | ||
430 | } | ||