diff options
author | Paul Mackerras <paulus@samba.org> | 2005-11-14 01:30:17 -0500 |
---|---|---|
committer | Paul Mackerras <paulus@samba.org> | 2005-11-14 01:30:17 -0500 |
commit | 7568cb4ef6c507164b65b01f972a3bd026898ae1 (patch) | |
tree | ba608f4c84d8765d5a1491c345f6dc265b5ec4ea /arch/powerpc/kernel/pci_64.c | |
parent | c55377ee73f6efeb373ae06f6e918d87660b4852 (diff) |
powerpc: Move most remaining ppc64 files over to arch/powerpc
Also deletes files in arch/ppc64 that are no longer used now that
we don't compile with ARCH=ppc64 any more.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc/kernel/pci_64.c')
-rw-r--r-- | arch/powerpc/kernel/pci_64.c | 1319 |
1 files changed, 1319 insertions, 0 deletions
diff --git a/arch/powerpc/kernel/pci_64.c b/arch/powerpc/kernel/pci_64.c new file mode 100644 index 000000000000..3cef1b8f57f0 --- /dev/null +++ b/arch/powerpc/kernel/pci_64.c | |||
@@ -0,0 +1,1319 @@ | |||
1 | /* | ||
2 | * Port for PPC64 David Engebretsen, IBM Corp. | ||
3 | * Contains common pci routines for ppc64 platform, pSeries and iSeries brands. | ||
4 | * | ||
5 | * Copyright (C) 2003 Anton Blanchard <anton@au.ibm.com>, IBM | ||
6 | * Rework, based on alpha PCI code. | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or | ||
9 | * modify it under the terms of the GNU General Public License | ||
10 | * as published by the Free Software Foundation; either version | ||
11 | * 2 of the License, or (at your option) any later version. | ||
12 | */ | ||
13 | |||
14 | #undef DEBUG | ||
15 | |||
16 | #include <linux/config.h> | ||
17 | #include <linux/kernel.h> | ||
18 | #include <linux/pci.h> | ||
19 | #include <linux/string.h> | ||
20 | #include <linux/init.h> | ||
21 | #include <linux/bootmem.h> | ||
22 | #include <linux/mm.h> | ||
23 | #include <linux/list.h> | ||
24 | #include <linux/syscalls.h> | ||
25 | |||
26 | #include <asm/processor.h> | ||
27 | #include <asm/io.h> | ||
28 | #include <asm/prom.h> | ||
29 | #include <asm/pci-bridge.h> | ||
30 | #include <asm/byteorder.h> | ||
31 | #include <asm/irq.h> | ||
32 | #include <asm/machdep.h> | ||
33 | #include <asm/udbg.h> | ||
34 | #include <asm/ppc-pci.h> | ||
35 | |||
36 | #ifdef DEBUG | ||
37 | #define DBG(fmt...) udbg_printf(fmt) | ||
38 | #else | ||
39 | #define DBG(fmt...) | ||
40 | #endif | ||
41 | |||
42 | unsigned long pci_probe_only = 1; | ||
43 | unsigned long pci_assign_all_buses = 0; | ||
44 | |||
45 | /* | ||
46 | * legal IO pages under MAX_ISA_PORT. This is to ensure we don't touch | ||
47 | * devices we don't have access to. | ||
48 | */ | ||
49 | unsigned long io_page_mask; | ||
50 | |||
51 | EXPORT_SYMBOL(io_page_mask); | ||
52 | |||
53 | #ifdef CONFIG_PPC_MULTIPLATFORM | ||
54 | static void fixup_resource(struct resource *res, struct pci_dev *dev); | ||
55 | static void do_bus_setup(struct pci_bus *bus); | ||
56 | #endif | ||
57 | |||
58 | unsigned int pcibios_assign_all_busses(void) | ||
59 | { | ||
60 | return pci_assign_all_buses; | ||
61 | } | ||
62 | |||
63 | /* pci_io_base -- the base address from which io bars are offsets. | ||
64 | * This is the lowest I/O base address (so bar values are always positive), | ||
65 | * and it *must* be the start of ISA space if an ISA bus exists because | ||
66 | * ISA drivers use hard coded offsets. If no ISA bus exists a dummy | ||
67 | * page is mapped and isa_io_limit prevents access to it. | ||
68 | */ | ||
69 | unsigned long isa_io_base; /* NULL if no ISA bus */ | ||
70 | EXPORT_SYMBOL(isa_io_base); | ||
71 | unsigned long pci_io_base; | ||
72 | EXPORT_SYMBOL(pci_io_base); | ||
73 | |||
74 | void iSeries_pcibios_init(void); | ||
75 | |||
76 | LIST_HEAD(hose_list); | ||
77 | |||
78 | struct dma_mapping_ops pci_dma_ops; | ||
79 | EXPORT_SYMBOL(pci_dma_ops); | ||
80 | |||
81 | int global_phb_number; /* Global phb counter */ | ||
82 | |||
83 | /* Cached ISA bridge dev. */ | ||
84 | struct pci_dev *ppc64_isabridge_dev = NULL; | ||
85 | |||
86 | static void fixup_broken_pcnet32(struct pci_dev* dev) | ||
87 | { | ||
88 | if ((dev->class>>8 == PCI_CLASS_NETWORK_ETHERNET)) { | ||
89 | dev->vendor = PCI_VENDOR_ID_AMD; | ||
90 | pci_write_config_word(dev, PCI_VENDOR_ID, PCI_VENDOR_ID_AMD); | ||
91 | } | ||
92 | } | ||
93 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TRIDENT, PCI_ANY_ID, fixup_broken_pcnet32); | ||
94 | |||
95 | void pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region, | ||
96 | struct resource *res) | ||
97 | { | ||
98 | unsigned long offset = 0; | ||
99 | struct pci_controller *hose = pci_bus_to_host(dev->bus); | ||
100 | |||
101 | if (!hose) | ||
102 | return; | ||
103 | |||
104 | if (res->flags & IORESOURCE_IO) | ||
105 | offset = (unsigned long)hose->io_base_virt - pci_io_base; | ||
106 | |||
107 | if (res->flags & IORESOURCE_MEM) | ||
108 | offset = hose->pci_mem_offset; | ||
109 | |||
110 | region->start = res->start - offset; | ||
111 | region->end = res->end - offset; | ||
112 | } | ||
113 | |||
114 | void pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res, | ||
115 | struct pci_bus_region *region) | ||
116 | { | ||
117 | unsigned long offset = 0; | ||
118 | struct pci_controller *hose = pci_bus_to_host(dev->bus); | ||
119 | |||
120 | if (!hose) | ||
121 | return; | ||
122 | |||
123 | if (res->flags & IORESOURCE_IO) | ||
124 | offset = (unsigned long)hose->io_base_virt - pci_io_base; | ||
125 | |||
126 | if (res->flags & IORESOURCE_MEM) | ||
127 | offset = hose->pci_mem_offset; | ||
128 | |||
129 | res->start = region->start + offset; | ||
130 | res->end = region->end + offset; | ||
131 | } | ||
132 | |||
133 | #ifdef CONFIG_HOTPLUG | ||
134 | EXPORT_SYMBOL(pcibios_resource_to_bus); | ||
135 | EXPORT_SYMBOL(pcibios_bus_to_resource); | ||
136 | #endif | ||
137 | |||
138 | /* | ||
139 | * We need to avoid collisions with `mirrored' VGA ports | ||
140 | * and other strange ISA hardware, so we always want the | ||
141 | * addresses to be allocated in the 0x000-0x0ff region | ||
142 | * modulo 0x400. | ||
143 | * | ||
144 | * Why? Because some silly external IO cards only decode | ||
145 | * the low 10 bits of the IO address. The 0x00-0xff region | ||
146 | * is reserved for motherboard devices that decode all 16 | ||
147 | * bits, so it's ok to allocate at, say, 0x2800-0x28ff, | ||
148 | * but we want to try to avoid allocating at 0x2900-0x2bff | ||
149 | * which might have be mirrored at 0x0100-0x03ff.. | ||
150 | */ | ||
151 | void pcibios_align_resource(void *data, struct resource *res, | ||
152 | unsigned long size, unsigned long align) | ||
153 | { | ||
154 | struct pci_dev *dev = data; | ||
155 | struct pci_controller *hose = pci_bus_to_host(dev->bus); | ||
156 | unsigned long start = res->start; | ||
157 | unsigned long alignto; | ||
158 | |||
159 | if (res->flags & IORESOURCE_IO) { | ||
160 | unsigned long offset = (unsigned long)hose->io_base_virt - | ||
161 | pci_io_base; | ||
162 | /* Make sure we start at our min on all hoses */ | ||
163 | if (start - offset < PCIBIOS_MIN_IO) | ||
164 | start = PCIBIOS_MIN_IO + offset; | ||
165 | |||
166 | /* | ||
167 | * Put everything into 0x00-0xff region modulo 0x400 | ||
168 | */ | ||
169 | if (start & 0x300) | ||
170 | start = (start + 0x3ff) & ~0x3ff; | ||
171 | |||
172 | } else if (res->flags & IORESOURCE_MEM) { | ||
173 | /* Make sure we start at our min on all hoses */ | ||
174 | if (start - hose->pci_mem_offset < PCIBIOS_MIN_MEM) | ||
175 | start = PCIBIOS_MIN_MEM + hose->pci_mem_offset; | ||
176 | |||
177 | /* Align to multiple of size of minimum base. */ | ||
178 | alignto = max(0x1000UL, align); | ||
179 | start = ALIGN(start, alignto); | ||
180 | } | ||
181 | |||
182 | res->start = start; | ||
183 | } | ||
184 | |||
185 | static DEFINE_SPINLOCK(hose_spinlock); | ||
186 | |||
187 | /* | ||
188 | * pci_controller(phb) initialized common variables. | ||
189 | */ | ||
190 | void __devinit pci_setup_pci_controller(struct pci_controller *hose) | ||
191 | { | ||
192 | memset(hose, 0, sizeof(struct pci_controller)); | ||
193 | |||
194 | spin_lock(&hose_spinlock); | ||
195 | hose->global_number = global_phb_number++; | ||
196 | list_add_tail(&hose->list_node, &hose_list); | ||
197 | spin_unlock(&hose_spinlock); | ||
198 | } | ||
199 | |||
200 | static void __init pcibios_claim_one_bus(struct pci_bus *b) | ||
201 | { | ||
202 | struct pci_dev *dev; | ||
203 | struct pci_bus *child_bus; | ||
204 | |||
205 | list_for_each_entry(dev, &b->devices, bus_list) { | ||
206 | int i; | ||
207 | |||
208 | for (i = 0; i < PCI_NUM_RESOURCES; i++) { | ||
209 | struct resource *r = &dev->resource[i]; | ||
210 | |||
211 | if (r->parent || !r->start || !r->flags) | ||
212 | continue; | ||
213 | pci_claim_resource(dev, i); | ||
214 | } | ||
215 | } | ||
216 | |||
217 | list_for_each_entry(child_bus, &b->children, node) | ||
218 | pcibios_claim_one_bus(child_bus); | ||
219 | } | ||
220 | |||
221 | #ifndef CONFIG_PPC_ISERIES | ||
222 | static void __init pcibios_claim_of_setup(void) | ||
223 | { | ||
224 | struct pci_bus *b; | ||
225 | |||
226 | list_for_each_entry(b, &pci_root_buses, node) | ||
227 | pcibios_claim_one_bus(b); | ||
228 | } | ||
229 | #endif | ||
230 | |||
231 | #ifdef CONFIG_PPC_MULTIPLATFORM | ||
232 | static u32 get_int_prop(struct device_node *np, const char *name, u32 def) | ||
233 | { | ||
234 | u32 *prop; | ||
235 | int len; | ||
236 | |||
237 | prop = (u32 *) get_property(np, name, &len); | ||
238 | if (prop && len >= 4) | ||
239 | return *prop; | ||
240 | return def; | ||
241 | } | ||
242 | |||
243 | static unsigned int pci_parse_of_flags(u32 addr0) | ||
244 | { | ||
245 | unsigned int flags = 0; | ||
246 | |||
247 | if (addr0 & 0x02000000) { | ||
248 | flags = IORESOURCE_MEM | PCI_BASE_ADDRESS_SPACE_MEMORY; | ||
249 | flags |= (addr0 >> 22) & PCI_BASE_ADDRESS_MEM_TYPE_64; | ||
250 | flags |= (addr0 >> 28) & PCI_BASE_ADDRESS_MEM_TYPE_1M; | ||
251 | if (addr0 & 0x40000000) | ||
252 | flags |= IORESOURCE_PREFETCH | ||
253 | | PCI_BASE_ADDRESS_MEM_PREFETCH; | ||
254 | } else if (addr0 & 0x01000000) | ||
255 | flags = IORESOURCE_IO | PCI_BASE_ADDRESS_SPACE_IO; | ||
256 | return flags; | ||
257 | } | ||
258 | |||
259 | #define GET_64BIT(prop, i) ((((u64) (prop)[(i)]) << 32) | (prop)[(i)+1]) | ||
260 | |||
261 | static void pci_parse_of_addrs(struct device_node *node, struct pci_dev *dev) | ||
262 | { | ||
263 | u64 base, size; | ||
264 | unsigned int flags; | ||
265 | struct resource *res; | ||
266 | u32 *addrs, i; | ||
267 | int proplen; | ||
268 | |||
269 | addrs = (u32 *) get_property(node, "assigned-addresses", &proplen); | ||
270 | if (!addrs) | ||
271 | return; | ||
272 | for (; proplen >= 20; proplen -= 20, addrs += 5) { | ||
273 | flags = pci_parse_of_flags(addrs[0]); | ||
274 | if (!flags) | ||
275 | continue; | ||
276 | base = GET_64BIT(addrs, 1); | ||
277 | size = GET_64BIT(addrs, 3); | ||
278 | if (!size) | ||
279 | continue; | ||
280 | i = addrs[0] & 0xff; | ||
281 | if (PCI_BASE_ADDRESS_0 <= i && i <= PCI_BASE_ADDRESS_5) { | ||
282 | res = &dev->resource[(i - PCI_BASE_ADDRESS_0) >> 2]; | ||
283 | } else if (i == dev->rom_base_reg) { | ||
284 | res = &dev->resource[PCI_ROM_RESOURCE]; | ||
285 | flags |= IORESOURCE_READONLY | IORESOURCE_CACHEABLE; | ||
286 | } else { | ||
287 | printk(KERN_ERR "PCI: bad cfg reg num 0x%x\n", i); | ||
288 | continue; | ||
289 | } | ||
290 | res->start = base; | ||
291 | res->end = base + size - 1; | ||
292 | res->flags = flags; | ||
293 | res->name = pci_name(dev); | ||
294 | fixup_resource(res, dev); | ||
295 | } | ||
296 | } | ||
297 | |||
298 | struct pci_dev *of_create_pci_dev(struct device_node *node, | ||
299 | struct pci_bus *bus, int devfn) | ||
300 | { | ||
301 | struct pci_dev *dev; | ||
302 | const char *type; | ||
303 | |||
304 | dev = kmalloc(sizeof(struct pci_dev), GFP_KERNEL); | ||
305 | if (!dev) | ||
306 | return NULL; | ||
307 | type = get_property(node, "device_type", NULL); | ||
308 | if (type == NULL) | ||
309 | type = ""; | ||
310 | |||
311 | memset(dev, 0, sizeof(struct pci_dev)); | ||
312 | dev->bus = bus; | ||
313 | dev->sysdata = node; | ||
314 | dev->dev.parent = bus->bridge; | ||
315 | dev->dev.bus = &pci_bus_type; | ||
316 | dev->devfn = devfn; | ||
317 | dev->multifunction = 0; /* maybe a lie? */ | ||
318 | |||
319 | dev->vendor = get_int_prop(node, "vendor-id", 0xffff); | ||
320 | dev->device = get_int_prop(node, "device-id", 0xffff); | ||
321 | dev->subsystem_vendor = get_int_prop(node, "subsystem-vendor-id", 0); | ||
322 | dev->subsystem_device = get_int_prop(node, "subsystem-id", 0); | ||
323 | |||
324 | dev->cfg_size = 256; /*pci_cfg_space_size(dev);*/ | ||
325 | |||
326 | sprintf(pci_name(dev), "%04x:%02x:%02x.%d", pci_domain_nr(bus), | ||
327 | dev->bus->number, PCI_SLOT(devfn), PCI_FUNC(devfn)); | ||
328 | dev->class = get_int_prop(node, "class-code", 0); | ||
329 | |||
330 | dev->current_state = 4; /* unknown power state */ | ||
331 | |||
332 | if (!strcmp(type, "pci")) { | ||
333 | /* a PCI-PCI bridge */ | ||
334 | dev->hdr_type = PCI_HEADER_TYPE_BRIDGE; | ||
335 | dev->rom_base_reg = PCI_ROM_ADDRESS1; | ||
336 | } else if (!strcmp(type, "cardbus")) { | ||
337 | dev->hdr_type = PCI_HEADER_TYPE_CARDBUS; | ||
338 | } else { | ||
339 | dev->hdr_type = PCI_HEADER_TYPE_NORMAL; | ||
340 | dev->rom_base_reg = PCI_ROM_ADDRESS; | ||
341 | dev->irq = NO_IRQ; | ||
342 | if (node->n_intrs > 0) { | ||
343 | dev->irq = node->intrs[0].line; | ||
344 | pci_write_config_byte(dev, PCI_INTERRUPT_LINE, | ||
345 | dev->irq); | ||
346 | } | ||
347 | } | ||
348 | |||
349 | pci_parse_of_addrs(node, dev); | ||
350 | |||
351 | pci_device_add(dev, bus); | ||
352 | |||
353 | /* XXX pci_scan_msi_device(dev); */ | ||
354 | |||
355 | return dev; | ||
356 | } | ||
357 | EXPORT_SYMBOL(of_create_pci_dev); | ||
358 | |||
359 | void __devinit of_scan_bus(struct device_node *node, | ||
360 | struct pci_bus *bus) | ||
361 | { | ||
362 | struct device_node *child = NULL; | ||
363 | u32 *reg; | ||
364 | int reglen, devfn; | ||
365 | struct pci_dev *dev; | ||
366 | |||
367 | while ((child = of_get_next_child(node, child)) != NULL) { | ||
368 | reg = (u32 *) get_property(child, "reg", ®len); | ||
369 | if (reg == NULL || reglen < 20) | ||
370 | continue; | ||
371 | devfn = (reg[0] >> 8) & 0xff; | ||
372 | /* create a new pci_dev for this device */ | ||
373 | dev = of_create_pci_dev(child, bus, devfn); | ||
374 | if (!dev) | ||
375 | continue; | ||
376 | if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE || | ||
377 | dev->hdr_type == PCI_HEADER_TYPE_CARDBUS) | ||
378 | of_scan_pci_bridge(child, dev); | ||
379 | } | ||
380 | |||
381 | do_bus_setup(bus); | ||
382 | } | ||
383 | EXPORT_SYMBOL(of_scan_bus); | ||
384 | |||
385 | void __devinit of_scan_pci_bridge(struct device_node *node, | ||
386 | struct pci_dev *dev) | ||
387 | { | ||
388 | struct pci_bus *bus; | ||
389 | u32 *busrange, *ranges; | ||
390 | int len, i, mode; | ||
391 | struct resource *res; | ||
392 | unsigned int flags; | ||
393 | u64 size; | ||
394 | |||
395 | /* parse bus-range property */ | ||
396 | busrange = (u32 *) get_property(node, "bus-range", &len); | ||
397 | if (busrange == NULL || len != 8) { | ||
398 | printk(KERN_ERR "Can't get bus-range for PCI-PCI bridge %s\n", | ||
399 | node->full_name); | ||
400 | return; | ||
401 | } | ||
402 | ranges = (u32 *) get_property(node, "ranges", &len); | ||
403 | if (ranges == NULL) { | ||
404 | printk(KERN_ERR "Can't get ranges for PCI-PCI bridge %s\n", | ||
405 | node->full_name); | ||
406 | return; | ||
407 | } | ||
408 | |||
409 | bus = pci_add_new_bus(dev->bus, dev, busrange[0]); | ||
410 | if (!bus) { | ||
411 | printk(KERN_ERR "Failed to create pci bus for %s\n", | ||
412 | node->full_name); | ||
413 | return; | ||
414 | } | ||
415 | |||
416 | bus->primary = dev->bus->number; | ||
417 | bus->subordinate = busrange[1]; | ||
418 | bus->bridge_ctl = 0; | ||
419 | bus->sysdata = node; | ||
420 | |||
421 | /* parse ranges property */ | ||
422 | /* PCI #address-cells == 3 and #size-cells == 2 always */ | ||
423 | res = &dev->resource[PCI_BRIDGE_RESOURCES]; | ||
424 | for (i = 0; i < PCI_NUM_RESOURCES - PCI_BRIDGE_RESOURCES; ++i) { | ||
425 | res->flags = 0; | ||
426 | bus->resource[i] = res; | ||
427 | ++res; | ||
428 | } | ||
429 | i = 1; | ||
430 | for (; len >= 32; len -= 32, ranges += 8) { | ||
431 | flags = pci_parse_of_flags(ranges[0]); | ||
432 | size = GET_64BIT(ranges, 6); | ||
433 | if (flags == 0 || size == 0) | ||
434 | continue; | ||
435 | if (flags & IORESOURCE_IO) { | ||
436 | res = bus->resource[0]; | ||
437 | if (res->flags) { | ||
438 | printk(KERN_ERR "PCI: ignoring extra I/O range" | ||
439 | " for bridge %s\n", node->full_name); | ||
440 | continue; | ||
441 | } | ||
442 | } else { | ||
443 | if (i >= PCI_NUM_RESOURCES - PCI_BRIDGE_RESOURCES) { | ||
444 | printk(KERN_ERR "PCI: too many memory ranges" | ||
445 | " for bridge %s\n", node->full_name); | ||
446 | continue; | ||
447 | } | ||
448 | res = bus->resource[i]; | ||
449 | ++i; | ||
450 | } | ||
451 | res->start = GET_64BIT(ranges, 1); | ||
452 | res->end = res->start + size - 1; | ||
453 | res->flags = flags; | ||
454 | fixup_resource(res, dev); | ||
455 | } | ||
456 | sprintf(bus->name, "PCI Bus %04x:%02x", pci_domain_nr(bus), | ||
457 | bus->number); | ||
458 | |||
459 | mode = PCI_PROBE_NORMAL; | ||
460 | if (ppc_md.pci_probe_mode) | ||
461 | mode = ppc_md.pci_probe_mode(bus); | ||
462 | if (mode == PCI_PROBE_DEVTREE) | ||
463 | of_scan_bus(node, bus); | ||
464 | else if (mode == PCI_PROBE_NORMAL) | ||
465 | pci_scan_child_bus(bus); | ||
466 | } | ||
467 | EXPORT_SYMBOL(of_scan_pci_bridge); | ||
468 | #endif /* CONFIG_PPC_MULTIPLATFORM */ | ||
469 | |||
470 | void __devinit scan_phb(struct pci_controller *hose) | ||
471 | { | ||
472 | struct pci_bus *bus; | ||
473 | struct device_node *node = hose->arch_data; | ||
474 | int i, mode; | ||
475 | struct resource *res; | ||
476 | |||
477 | bus = pci_create_bus(NULL, hose->first_busno, hose->ops, node); | ||
478 | if (bus == NULL) { | ||
479 | printk(KERN_ERR "Failed to create bus for PCI domain %04x\n", | ||
480 | hose->global_number); | ||
481 | return; | ||
482 | } | ||
483 | bus->secondary = hose->first_busno; | ||
484 | hose->bus = bus; | ||
485 | |||
486 | bus->resource[0] = res = &hose->io_resource; | ||
487 | if (res->flags && request_resource(&ioport_resource, res)) | ||
488 | printk(KERN_ERR "Failed to request PCI IO region " | ||
489 | "on PCI domain %04x\n", hose->global_number); | ||
490 | |||
491 | for (i = 0; i < 3; ++i) { | ||
492 | res = &hose->mem_resources[i]; | ||
493 | bus->resource[i+1] = res; | ||
494 | if (res->flags && request_resource(&iomem_resource, res)) | ||
495 | printk(KERN_ERR "Failed to request PCI memory region " | ||
496 | "on PCI domain %04x\n", hose->global_number); | ||
497 | } | ||
498 | |||
499 | mode = PCI_PROBE_NORMAL; | ||
500 | #ifdef CONFIG_PPC_MULTIPLATFORM | ||
501 | if (ppc_md.pci_probe_mode) | ||
502 | mode = ppc_md.pci_probe_mode(bus); | ||
503 | if (mode == PCI_PROBE_DEVTREE) { | ||
504 | bus->subordinate = hose->last_busno; | ||
505 | of_scan_bus(node, bus); | ||
506 | } | ||
507 | #endif /* CONFIG_PPC_MULTIPLATFORM */ | ||
508 | if (mode == PCI_PROBE_NORMAL) | ||
509 | hose->last_busno = bus->subordinate = pci_scan_child_bus(bus); | ||
510 | pci_bus_add_devices(bus); | ||
511 | } | ||
512 | |||
513 | static int __init pcibios_init(void) | ||
514 | { | ||
515 | struct pci_controller *hose, *tmp; | ||
516 | |||
517 | /* For now, override phys_mem_access_prot. If we need it, | ||
518 | * later, we may move that initialization to each ppc_md | ||
519 | */ | ||
520 | ppc_md.phys_mem_access_prot = pci_phys_mem_access_prot; | ||
521 | |||
522 | #ifdef CONFIG_PPC_ISERIES | ||
523 | iSeries_pcibios_init(); | ||
524 | #endif | ||
525 | |||
526 | printk("PCI: Probing PCI hardware\n"); | ||
527 | |||
528 | /* Scan all of the recorded PCI controllers. */ | ||
529 | list_for_each_entry_safe(hose, tmp, &hose_list, list_node) | ||
530 | scan_phb(hose); | ||
531 | |||
532 | #ifndef CONFIG_PPC_ISERIES | ||
533 | if (pci_probe_only) | ||
534 | pcibios_claim_of_setup(); | ||
535 | else | ||
536 | /* FIXME: `else' will be removed when | ||
537 | pci_assign_unassigned_resources() is able to work | ||
538 | correctly with [partially] allocated PCI tree. */ | ||
539 | pci_assign_unassigned_resources(); | ||
540 | #endif /* !CONFIG_PPC_ISERIES */ | ||
541 | |||
542 | /* Call machine dependent final fixup */ | ||
543 | if (ppc_md.pcibios_fixup) | ||
544 | ppc_md.pcibios_fixup(); | ||
545 | |||
546 | /* Cache the location of the ISA bridge (if we have one) */ | ||
547 | ppc64_isabridge_dev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL); | ||
548 | if (ppc64_isabridge_dev != NULL) | ||
549 | printk("ISA bridge at %s\n", pci_name(ppc64_isabridge_dev)); | ||
550 | |||
551 | #ifdef CONFIG_PPC_MULTIPLATFORM | ||
552 | /* map in PCI I/O space */ | ||
553 | phbs_remap_io(); | ||
554 | #endif | ||
555 | |||
556 | printk("PCI: Probing PCI hardware done\n"); | ||
557 | |||
558 | return 0; | ||
559 | } | ||
560 | |||
561 | subsys_initcall(pcibios_init); | ||
562 | |||
563 | char __init *pcibios_setup(char *str) | ||
564 | { | ||
565 | return str; | ||
566 | } | ||
567 | |||
568 | int pcibios_enable_device(struct pci_dev *dev, int mask) | ||
569 | { | ||
570 | u16 cmd, oldcmd; | ||
571 | int i; | ||
572 | |||
573 | pci_read_config_word(dev, PCI_COMMAND, &cmd); | ||
574 | oldcmd = cmd; | ||
575 | |||
576 | for (i = 0; i < PCI_NUM_RESOURCES; i++) { | ||
577 | struct resource *res = &dev->resource[i]; | ||
578 | |||
579 | /* Only set up the requested stuff */ | ||
580 | if (!(mask & (1<<i))) | ||
581 | continue; | ||
582 | |||
583 | if (res->flags & IORESOURCE_IO) | ||
584 | cmd |= PCI_COMMAND_IO; | ||
585 | if (res->flags & IORESOURCE_MEM) | ||
586 | cmd |= PCI_COMMAND_MEMORY; | ||
587 | } | ||
588 | |||
589 | if (cmd != oldcmd) { | ||
590 | printk(KERN_DEBUG "PCI: Enabling device: (%s), cmd %x\n", | ||
591 | pci_name(dev), cmd); | ||
592 | /* Enable the appropriate bits in the PCI command register. */ | ||
593 | pci_write_config_word(dev, PCI_COMMAND, cmd); | ||
594 | } | ||
595 | return 0; | ||
596 | } | ||
597 | |||
598 | /* | ||
599 | * Return the domain number for this bus. | ||
600 | */ | ||
601 | int pci_domain_nr(struct pci_bus *bus) | ||
602 | { | ||
603 | #ifdef CONFIG_PPC_ISERIES | ||
604 | return 0; | ||
605 | #else | ||
606 | struct pci_controller *hose = pci_bus_to_host(bus); | ||
607 | |||
608 | return hose->global_number; | ||
609 | #endif | ||
610 | } | ||
611 | |||
612 | EXPORT_SYMBOL(pci_domain_nr); | ||
613 | |||
614 | /* Decide whether to display the domain number in /proc */ | ||
615 | int pci_proc_domain(struct pci_bus *bus) | ||
616 | { | ||
617 | #ifdef CONFIG_PPC_ISERIES | ||
618 | return 0; | ||
619 | #else | ||
620 | struct pci_controller *hose = pci_bus_to_host(bus); | ||
621 | return hose->buid; | ||
622 | #endif | ||
623 | } | ||
624 | |||
625 | /* | ||
626 | * Platform support for /proc/bus/pci/X/Y mmap()s, | ||
627 | * modelled on the sparc64 implementation by Dave Miller. | ||
628 | * -- paulus. | ||
629 | */ | ||
630 | |||
631 | /* | ||
632 | * Adjust vm_pgoff of VMA such that it is the physical page offset | ||
633 | * corresponding to the 32-bit pci bus offset for DEV requested by the user. | ||
634 | * | ||
635 | * Basically, the user finds the base address for his device which he wishes | ||
636 | * to mmap. They read the 32-bit value from the config space base register, | ||
637 | * add whatever PAGE_SIZE multiple offset they wish, and feed this into the | ||
638 | * offset parameter of mmap on /proc/bus/pci/XXX for that device. | ||
639 | * | ||
640 | * Returns negative error code on failure, zero on success. | ||
641 | */ | ||
642 | static struct resource *__pci_mmap_make_offset(struct pci_dev *dev, | ||
643 | unsigned long *offset, | ||
644 | enum pci_mmap_state mmap_state) | ||
645 | { | ||
646 | struct pci_controller *hose = pci_bus_to_host(dev->bus); | ||
647 | unsigned long io_offset = 0; | ||
648 | int i, res_bit; | ||
649 | |||
650 | if (hose == 0) | ||
651 | return NULL; /* should never happen */ | ||
652 | |||
653 | /* If memory, add on the PCI bridge address offset */ | ||
654 | if (mmap_state == pci_mmap_mem) { | ||
655 | *offset += hose->pci_mem_offset; | ||
656 | res_bit = IORESOURCE_MEM; | ||
657 | } else { | ||
658 | io_offset = (unsigned long)hose->io_base_virt - pci_io_base; | ||
659 | *offset += io_offset; | ||
660 | res_bit = IORESOURCE_IO; | ||
661 | } | ||
662 | |||
663 | /* | ||
664 | * Check that the offset requested corresponds to one of the | ||
665 | * resources of the device. | ||
666 | */ | ||
667 | for (i = 0; i <= PCI_ROM_RESOURCE; i++) { | ||
668 | struct resource *rp = &dev->resource[i]; | ||
669 | int flags = rp->flags; | ||
670 | |||
671 | /* treat ROM as memory (should be already) */ | ||
672 | if (i == PCI_ROM_RESOURCE) | ||
673 | flags |= IORESOURCE_MEM; | ||
674 | |||
675 | /* Active and same type? */ | ||
676 | if ((flags & res_bit) == 0) | ||
677 | continue; | ||
678 | |||
679 | /* In the range of this resource? */ | ||
680 | if (*offset < (rp->start & PAGE_MASK) || *offset > rp->end) | ||
681 | continue; | ||
682 | |||
683 | /* found it! construct the final physical address */ | ||
684 | if (mmap_state == pci_mmap_io) | ||
685 | *offset += hose->io_base_phys - io_offset; | ||
686 | return rp; | ||
687 | } | ||
688 | |||
689 | return NULL; | ||
690 | } | ||
691 | |||
692 | /* | ||
693 | * Set vm_page_prot of VMA, as appropriate for this architecture, for a pci | ||
694 | * device mapping. | ||
695 | */ | ||
696 | static pgprot_t __pci_mmap_set_pgprot(struct pci_dev *dev, struct resource *rp, | ||
697 | pgprot_t protection, | ||
698 | enum pci_mmap_state mmap_state, | ||
699 | int write_combine) | ||
700 | { | ||
701 | unsigned long prot = pgprot_val(protection); | ||
702 | |||
703 | /* Write combine is always 0 on non-memory space mappings. On | ||
704 | * memory space, if the user didn't pass 1, we check for a | ||
705 | * "prefetchable" resource. This is a bit hackish, but we use | ||
706 | * this to workaround the inability of /sysfs to provide a write | ||
707 | * combine bit | ||
708 | */ | ||
709 | if (mmap_state != pci_mmap_mem) | ||
710 | write_combine = 0; | ||
711 | else if (write_combine == 0) { | ||
712 | if (rp->flags & IORESOURCE_PREFETCH) | ||
713 | write_combine = 1; | ||
714 | } | ||
715 | |||
716 | /* XXX would be nice to have a way to ask for write-through */ | ||
717 | prot |= _PAGE_NO_CACHE; | ||
718 | if (write_combine) | ||
719 | prot &= ~_PAGE_GUARDED; | ||
720 | else | ||
721 | prot |= _PAGE_GUARDED; | ||
722 | |||
723 | printk("PCI map for %s:%lx, prot: %lx\n", pci_name(dev), rp->start, | ||
724 | prot); | ||
725 | |||
726 | return __pgprot(prot); | ||
727 | } | ||
728 | |||
729 | /* | ||
730 | * This one is used by /dev/mem and fbdev who have no clue about the | ||
731 | * PCI device, it tries to find the PCI device first and calls the | ||
732 | * above routine | ||
733 | */ | ||
734 | pgprot_t pci_phys_mem_access_prot(struct file *file, | ||
735 | unsigned long pfn, | ||
736 | unsigned long size, | ||
737 | pgprot_t protection) | ||
738 | { | ||
739 | struct pci_dev *pdev = NULL; | ||
740 | struct resource *found = NULL; | ||
741 | unsigned long prot = pgprot_val(protection); | ||
742 | unsigned long offset = pfn << PAGE_SHIFT; | ||
743 | int i; | ||
744 | |||
745 | if (page_is_ram(pfn)) | ||
746 | return __pgprot(prot); | ||
747 | |||
748 | prot |= _PAGE_NO_CACHE | _PAGE_GUARDED; | ||
749 | |||
750 | for_each_pci_dev(pdev) { | ||
751 | for (i = 0; i <= PCI_ROM_RESOURCE; i++) { | ||
752 | struct resource *rp = &pdev->resource[i]; | ||
753 | int flags = rp->flags; | ||
754 | |||
755 | /* Active and same type? */ | ||
756 | if ((flags & IORESOURCE_MEM) == 0) | ||
757 | continue; | ||
758 | /* In the range of this resource? */ | ||
759 | if (offset < (rp->start & PAGE_MASK) || | ||
760 | offset > rp->end) | ||
761 | continue; | ||
762 | found = rp; | ||
763 | break; | ||
764 | } | ||
765 | if (found) | ||
766 | break; | ||
767 | } | ||
768 | if (found) { | ||
769 | if (found->flags & IORESOURCE_PREFETCH) | ||
770 | prot &= ~_PAGE_GUARDED; | ||
771 | pci_dev_put(pdev); | ||
772 | } | ||
773 | |||
774 | DBG("non-PCI map for %lx, prot: %lx\n", offset, prot); | ||
775 | |||
776 | return __pgprot(prot); | ||
777 | } | ||
778 | |||
779 | |||
780 | /* | ||
781 | * Perform the actual remap of the pages for a PCI device mapping, as | ||
782 | * appropriate for this architecture. The region in the process to map | ||
783 | * is described by vm_start and vm_end members of VMA, the base physical | ||
784 | * address is found in vm_pgoff. | ||
785 | * The pci device structure is provided so that architectures may make mapping | ||
786 | * decisions on a per-device or per-bus basis. | ||
787 | * | ||
788 | * Returns a negative error code on failure, zero on success. | ||
789 | */ | ||
790 | int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, | ||
791 | enum pci_mmap_state mmap_state, | ||
792 | int write_combine) | ||
793 | { | ||
794 | unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; | ||
795 | struct resource *rp; | ||
796 | int ret; | ||
797 | |||
798 | rp = __pci_mmap_make_offset(dev, &offset, mmap_state); | ||
799 | if (rp == NULL) | ||
800 | return -EINVAL; | ||
801 | |||
802 | vma->vm_pgoff = offset >> PAGE_SHIFT; | ||
803 | vma->vm_flags |= VM_SHM | VM_LOCKED | VM_IO; | ||
804 | vma->vm_page_prot = __pci_mmap_set_pgprot(dev, rp, | ||
805 | vma->vm_page_prot, | ||
806 | mmap_state, write_combine); | ||
807 | |||
808 | ret = remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, | ||
809 | vma->vm_end - vma->vm_start, vma->vm_page_prot); | ||
810 | |||
811 | return ret; | ||
812 | } | ||
813 | |||
814 | #ifdef CONFIG_PPC_MULTIPLATFORM | ||
815 | static ssize_t pci_show_devspec(struct device *dev, struct device_attribute *attr, char *buf) | ||
816 | { | ||
817 | struct pci_dev *pdev; | ||
818 | struct device_node *np; | ||
819 | |||
820 | pdev = to_pci_dev (dev); | ||
821 | np = pci_device_to_OF_node(pdev); | ||
822 | if (np == NULL || np->full_name == NULL) | ||
823 | return 0; | ||
824 | return sprintf(buf, "%s", np->full_name); | ||
825 | } | ||
826 | static DEVICE_ATTR(devspec, S_IRUGO, pci_show_devspec, NULL); | ||
827 | #endif /* CONFIG_PPC_MULTIPLATFORM */ | ||
828 | |||
829 | void pcibios_add_platform_entries(struct pci_dev *pdev) | ||
830 | { | ||
831 | #ifdef CONFIG_PPC_MULTIPLATFORM | ||
832 | device_create_file(&pdev->dev, &dev_attr_devspec); | ||
833 | #endif /* CONFIG_PPC_MULTIPLATFORM */ | ||
834 | } | ||
835 | |||
836 | #ifdef CONFIG_PPC_MULTIPLATFORM | ||
837 | |||
838 | #define ISA_SPACE_MASK 0x1 | ||
839 | #define ISA_SPACE_IO 0x1 | ||
840 | |||
841 | static void __devinit pci_process_ISA_OF_ranges(struct device_node *isa_node, | ||
842 | unsigned long phb_io_base_phys, | ||
843 | void __iomem * phb_io_base_virt) | ||
844 | { | ||
845 | struct isa_range *range; | ||
846 | unsigned long pci_addr; | ||
847 | unsigned int isa_addr; | ||
848 | unsigned int size; | ||
849 | int rlen = 0; | ||
850 | |||
851 | range = (struct isa_range *) get_property(isa_node, "ranges", &rlen); | ||
852 | if (range == NULL || (rlen < sizeof(struct isa_range))) { | ||
853 | printk(KERN_ERR "no ISA ranges or unexpected isa range size," | ||
854 | "mapping 64k\n"); | ||
855 | __ioremap_explicit(phb_io_base_phys, | ||
856 | (unsigned long)phb_io_base_virt, | ||
857 | 0x10000, _PAGE_NO_CACHE | _PAGE_GUARDED); | ||
858 | return; | ||
859 | } | ||
860 | |||
861 | /* From "ISA Binding to 1275" | ||
862 | * The ranges property is laid out as an array of elements, | ||
863 | * each of which comprises: | ||
864 | * cells 0 - 1: an ISA address | ||
865 | * cells 2 - 4: a PCI address | ||
866 | * (size depending on dev->n_addr_cells) | ||
867 | * cell 5: the size of the range | ||
868 | */ | ||
869 | if ((range->isa_addr.a_hi && ISA_SPACE_MASK) == ISA_SPACE_IO) { | ||
870 | isa_addr = range->isa_addr.a_lo; | ||
871 | pci_addr = (unsigned long) range->pci_addr.a_mid << 32 | | ||
872 | range->pci_addr.a_lo; | ||
873 | |||
874 | /* Assume these are both zero */ | ||
875 | if ((pci_addr != 0) || (isa_addr != 0)) { | ||
876 | printk(KERN_ERR "unexpected isa to pci mapping: %s\n", | ||
877 | __FUNCTION__); | ||
878 | return; | ||
879 | } | ||
880 | |||
881 | size = PAGE_ALIGN(range->size); | ||
882 | |||
883 | __ioremap_explicit(phb_io_base_phys, | ||
884 | (unsigned long) phb_io_base_virt, | ||
885 | size, _PAGE_NO_CACHE | _PAGE_GUARDED); | ||
886 | } | ||
887 | } | ||
888 | |||
889 | void __devinit pci_process_bridge_OF_ranges(struct pci_controller *hose, | ||
890 | struct device_node *dev, int prim) | ||
891 | { | ||
892 | unsigned int *ranges, pci_space; | ||
893 | unsigned long size; | ||
894 | int rlen = 0; | ||
895 | int memno = 0; | ||
896 | struct resource *res; | ||
897 | int np, na = prom_n_addr_cells(dev); | ||
898 | unsigned long pci_addr, cpu_phys_addr; | ||
899 | |||
900 | np = na + 5; | ||
901 | |||
902 | /* From "PCI Binding to 1275" | ||
903 | * The ranges property is laid out as an array of elements, | ||
904 | * each of which comprises: | ||
905 | * cells 0 - 2: a PCI address | ||
906 | * cells 3 or 3+4: a CPU physical address | ||
907 | * (size depending on dev->n_addr_cells) | ||
908 | * cells 4+5 or 5+6: the size of the range | ||
909 | */ | ||
910 | rlen = 0; | ||
911 | hose->io_base_phys = 0; | ||
912 | ranges = (unsigned int *) get_property(dev, "ranges", &rlen); | ||
913 | while ((rlen -= np * sizeof(unsigned int)) >= 0) { | ||
914 | res = NULL; | ||
915 | pci_space = ranges[0]; | ||
916 | pci_addr = ((unsigned long)ranges[1] << 32) | ranges[2]; | ||
917 | |||
918 | cpu_phys_addr = ranges[3]; | ||
919 | if (na >= 2) | ||
920 | cpu_phys_addr = (cpu_phys_addr << 32) | ranges[4]; | ||
921 | |||
922 | size = ((unsigned long)ranges[na+3] << 32) | ranges[na+4]; | ||
923 | ranges += np; | ||
924 | if (size == 0) | ||
925 | continue; | ||
926 | |||
927 | /* Now consume following elements while they are contiguous */ | ||
928 | while (rlen >= np * sizeof(unsigned int)) { | ||
929 | unsigned long addr, phys; | ||
930 | |||
931 | if (ranges[0] != pci_space) | ||
932 | break; | ||
933 | addr = ((unsigned long)ranges[1] << 32) | ranges[2]; | ||
934 | phys = ranges[3]; | ||
935 | if (na >= 2) | ||
936 | phys = (phys << 32) | ranges[4]; | ||
937 | if (addr != pci_addr + size || | ||
938 | phys != cpu_phys_addr + size) | ||
939 | break; | ||
940 | |||
941 | size += ((unsigned long)ranges[na+3] << 32) | ||
942 | | ranges[na+4]; | ||
943 | ranges += np; | ||
944 | rlen -= np * sizeof(unsigned int); | ||
945 | } | ||
946 | |||
947 | switch ((pci_space >> 24) & 0x3) { | ||
948 | case 1: /* I/O space */ | ||
949 | hose->io_base_phys = cpu_phys_addr; | ||
950 | hose->pci_io_size = size; | ||
951 | |||
952 | res = &hose->io_resource; | ||
953 | res->flags = IORESOURCE_IO; | ||
954 | res->start = pci_addr; | ||
955 | DBG("phb%d: IO 0x%lx -> 0x%lx\n", hose->global_number, | ||
956 | res->start, res->start + size - 1); | ||
957 | break; | ||
958 | case 2: /* memory space */ | ||
959 | memno = 0; | ||
960 | while (memno < 3 && hose->mem_resources[memno].flags) | ||
961 | ++memno; | ||
962 | |||
963 | if (memno == 0) | ||
964 | hose->pci_mem_offset = cpu_phys_addr - pci_addr; | ||
965 | if (memno < 3) { | ||
966 | res = &hose->mem_resources[memno]; | ||
967 | res->flags = IORESOURCE_MEM; | ||
968 | res->start = cpu_phys_addr; | ||
969 | DBG("phb%d: MEM 0x%lx -> 0x%lx\n", hose->global_number, | ||
970 | res->start, res->start + size - 1); | ||
971 | } | ||
972 | break; | ||
973 | } | ||
974 | if (res != NULL) { | ||
975 | res->name = dev->full_name; | ||
976 | res->end = res->start + size - 1; | ||
977 | res->parent = NULL; | ||
978 | res->sibling = NULL; | ||
979 | res->child = NULL; | ||
980 | } | ||
981 | } | ||
982 | } | ||
983 | |||
984 | void __init pci_setup_phb_io(struct pci_controller *hose, int primary) | ||
985 | { | ||
986 | unsigned long size = hose->pci_io_size; | ||
987 | unsigned long io_virt_offset; | ||
988 | struct resource *res; | ||
989 | struct device_node *isa_dn; | ||
990 | |||
991 | hose->io_base_virt = reserve_phb_iospace(size); | ||
992 | DBG("phb%d io_base_phys 0x%lx io_base_virt 0x%lx\n", | ||
993 | hose->global_number, hose->io_base_phys, | ||
994 | (unsigned long) hose->io_base_virt); | ||
995 | |||
996 | if (primary) { | ||
997 | pci_io_base = (unsigned long)hose->io_base_virt; | ||
998 | isa_dn = of_find_node_by_type(NULL, "isa"); | ||
999 | if (isa_dn) { | ||
1000 | isa_io_base = pci_io_base; | ||
1001 | pci_process_ISA_OF_ranges(isa_dn, hose->io_base_phys, | ||
1002 | hose->io_base_virt); | ||
1003 | of_node_put(isa_dn); | ||
1004 | /* Allow all IO */ | ||
1005 | io_page_mask = -1; | ||
1006 | } | ||
1007 | } | ||
1008 | |||
1009 | io_virt_offset = (unsigned long)hose->io_base_virt - pci_io_base; | ||
1010 | res = &hose->io_resource; | ||
1011 | res->start += io_virt_offset; | ||
1012 | res->end += io_virt_offset; | ||
1013 | } | ||
1014 | |||
1015 | void __devinit pci_setup_phb_io_dynamic(struct pci_controller *hose, | ||
1016 | int primary) | ||
1017 | { | ||
1018 | unsigned long size = hose->pci_io_size; | ||
1019 | unsigned long io_virt_offset; | ||
1020 | struct resource *res; | ||
1021 | |||
1022 | hose->io_base_virt = __ioremap(hose->io_base_phys, size, | ||
1023 | _PAGE_NO_CACHE | _PAGE_GUARDED); | ||
1024 | DBG("phb%d io_base_phys 0x%lx io_base_virt 0x%lx\n", | ||
1025 | hose->global_number, hose->io_base_phys, | ||
1026 | (unsigned long) hose->io_base_virt); | ||
1027 | |||
1028 | if (primary) | ||
1029 | pci_io_base = (unsigned long)hose->io_base_virt; | ||
1030 | |||
1031 | io_virt_offset = (unsigned long)hose->io_base_virt - pci_io_base; | ||
1032 | res = &hose->io_resource; | ||
1033 | res->start += io_virt_offset; | ||
1034 | res->end += io_virt_offset; | ||
1035 | } | ||
1036 | |||
1037 | |||
1038 | static int get_bus_io_range(struct pci_bus *bus, unsigned long *start_phys, | ||
1039 | unsigned long *start_virt, unsigned long *size) | ||
1040 | { | ||
1041 | struct pci_controller *hose = pci_bus_to_host(bus); | ||
1042 | struct pci_bus_region region; | ||
1043 | struct resource *res; | ||
1044 | |||
1045 | if (bus->self) { | ||
1046 | res = bus->resource[0]; | ||
1047 | pcibios_resource_to_bus(bus->self, ®ion, res); | ||
1048 | *start_phys = hose->io_base_phys + region.start; | ||
1049 | *start_virt = (unsigned long) hose->io_base_virt + | ||
1050 | region.start; | ||
1051 | if (region.end > region.start) | ||
1052 | *size = region.end - region.start + 1; | ||
1053 | else { | ||
1054 | printk("%s(): unexpected region 0x%lx->0x%lx\n", | ||
1055 | __FUNCTION__, region.start, region.end); | ||
1056 | return 1; | ||
1057 | } | ||
1058 | |||
1059 | } else { | ||
1060 | /* Root Bus */ | ||
1061 | res = &hose->io_resource; | ||
1062 | *start_phys = hose->io_base_phys; | ||
1063 | *start_virt = (unsigned long) hose->io_base_virt; | ||
1064 | if (res->end > res->start) | ||
1065 | *size = res->end - res->start + 1; | ||
1066 | else { | ||
1067 | printk("%s(): unexpected region 0x%lx->0x%lx\n", | ||
1068 | __FUNCTION__, res->start, res->end); | ||
1069 | return 1; | ||
1070 | } | ||
1071 | } | ||
1072 | |||
1073 | return 0; | ||
1074 | } | ||
1075 | |||
1076 | int unmap_bus_range(struct pci_bus *bus) | ||
1077 | { | ||
1078 | unsigned long start_phys; | ||
1079 | unsigned long start_virt; | ||
1080 | unsigned long size; | ||
1081 | |||
1082 | if (!bus) { | ||
1083 | printk(KERN_ERR "%s() expected bus\n", __FUNCTION__); | ||
1084 | return 1; | ||
1085 | } | ||
1086 | |||
1087 | if (get_bus_io_range(bus, &start_phys, &start_virt, &size)) | ||
1088 | return 1; | ||
1089 | if (iounmap_explicit((void __iomem *) start_virt, size)) | ||
1090 | return 1; | ||
1091 | |||
1092 | return 0; | ||
1093 | } | ||
1094 | EXPORT_SYMBOL(unmap_bus_range); | ||
1095 | |||
1096 | int remap_bus_range(struct pci_bus *bus) | ||
1097 | { | ||
1098 | unsigned long start_phys; | ||
1099 | unsigned long start_virt; | ||
1100 | unsigned long size; | ||
1101 | |||
1102 | if (!bus) { | ||
1103 | printk(KERN_ERR "%s() expected bus\n", __FUNCTION__); | ||
1104 | return 1; | ||
1105 | } | ||
1106 | |||
1107 | |||
1108 | if (get_bus_io_range(bus, &start_phys, &start_virt, &size)) | ||
1109 | return 1; | ||
1110 | printk("mapping IO %lx -> %lx, size: %lx\n", start_phys, start_virt, size); | ||
1111 | if (__ioremap_explicit(start_phys, start_virt, size, | ||
1112 | _PAGE_NO_CACHE | _PAGE_GUARDED)) | ||
1113 | return 1; | ||
1114 | |||
1115 | return 0; | ||
1116 | } | ||
1117 | EXPORT_SYMBOL(remap_bus_range); | ||
1118 | |||
1119 | void phbs_remap_io(void) | ||
1120 | { | ||
1121 | struct pci_controller *hose, *tmp; | ||
1122 | |||
1123 | list_for_each_entry_safe(hose, tmp, &hose_list, list_node) | ||
1124 | remap_bus_range(hose->bus); | ||
1125 | } | ||
1126 | |||
1127 | /* | ||
1128 | * ppc64 can have multifunction devices that do not respond to function 0. | ||
1129 | * In this case we must scan all functions. | ||
1130 | * XXX this can go now, we use the OF device tree in all the | ||
1131 | * cases that caused problems. -- paulus | ||
1132 | */ | ||
1133 | int pcibios_scan_all_fns(struct pci_bus *bus, int devfn) | ||
1134 | { | ||
1135 | return 0; | ||
1136 | } | ||
1137 | |||
1138 | static void __devinit fixup_resource(struct resource *res, struct pci_dev *dev) | ||
1139 | { | ||
1140 | struct pci_controller *hose = pci_bus_to_host(dev->bus); | ||
1141 | unsigned long start, end, mask, offset; | ||
1142 | |||
1143 | if (res->flags & IORESOURCE_IO) { | ||
1144 | offset = (unsigned long)hose->io_base_virt - pci_io_base; | ||
1145 | |||
1146 | start = res->start += offset; | ||
1147 | end = res->end += offset; | ||
1148 | |||
1149 | /* Need to allow IO access to pages that are in the | ||
1150 | ISA range */ | ||
1151 | if (start < MAX_ISA_PORT) { | ||
1152 | if (end > MAX_ISA_PORT) | ||
1153 | end = MAX_ISA_PORT; | ||
1154 | |||
1155 | start >>= PAGE_SHIFT; | ||
1156 | end >>= PAGE_SHIFT; | ||
1157 | |||
1158 | /* get the range of pages for the map */ | ||
1159 | mask = ((1 << (end+1)) - 1) ^ ((1 << start) - 1); | ||
1160 | io_page_mask |= mask; | ||
1161 | } | ||
1162 | } else if (res->flags & IORESOURCE_MEM) { | ||
1163 | res->start += hose->pci_mem_offset; | ||
1164 | res->end += hose->pci_mem_offset; | ||
1165 | } | ||
1166 | } | ||
1167 | |||
1168 | void __devinit pcibios_fixup_device_resources(struct pci_dev *dev, | ||
1169 | struct pci_bus *bus) | ||
1170 | { | ||
1171 | /* Update device resources. */ | ||
1172 | int i; | ||
1173 | |||
1174 | for (i = 0; i < PCI_NUM_RESOURCES; i++) | ||
1175 | if (dev->resource[i].flags) | ||
1176 | fixup_resource(&dev->resource[i], dev); | ||
1177 | } | ||
1178 | EXPORT_SYMBOL(pcibios_fixup_device_resources); | ||
1179 | |||
1180 | static void __devinit do_bus_setup(struct pci_bus *bus) | ||
1181 | { | ||
1182 | struct pci_dev *dev; | ||
1183 | |||
1184 | ppc_md.iommu_bus_setup(bus); | ||
1185 | |||
1186 | list_for_each_entry(dev, &bus->devices, bus_list) | ||
1187 | ppc_md.iommu_dev_setup(dev); | ||
1188 | |||
1189 | if (ppc_md.irq_bus_setup) | ||
1190 | ppc_md.irq_bus_setup(bus); | ||
1191 | } | ||
1192 | |||
1193 | void __devinit pcibios_fixup_bus(struct pci_bus *bus) | ||
1194 | { | ||
1195 | struct pci_dev *dev = bus->self; | ||
1196 | |||
1197 | if (dev && pci_probe_only && | ||
1198 | (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) { | ||
1199 | /* This is a subordinate bridge */ | ||
1200 | |||
1201 | pci_read_bridge_bases(bus); | ||
1202 | pcibios_fixup_device_resources(dev, bus); | ||
1203 | } | ||
1204 | |||
1205 | do_bus_setup(bus); | ||
1206 | |||
1207 | if (!pci_probe_only) | ||
1208 | return; | ||
1209 | |||
1210 | list_for_each_entry(dev, &bus->devices, bus_list) | ||
1211 | if ((dev->class >> 8) != PCI_CLASS_BRIDGE_PCI) | ||
1212 | pcibios_fixup_device_resources(dev, bus); | ||
1213 | } | ||
1214 | EXPORT_SYMBOL(pcibios_fixup_bus); | ||
1215 | |||
1216 | /* | ||
1217 | * Reads the interrupt pin to determine if interrupt is use by card. | ||
1218 | * If the interrupt is used, then gets the interrupt line from the | ||
1219 | * openfirmware and sets it in the pci_dev and pci_config line. | ||
1220 | */ | ||
1221 | int pci_read_irq_line(struct pci_dev *pci_dev) | ||
1222 | { | ||
1223 | u8 intpin; | ||
1224 | struct device_node *node; | ||
1225 | |||
1226 | pci_read_config_byte(pci_dev, PCI_INTERRUPT_PIN, &intpin); | ||
1227 | if (intpin == 0) | ||
1228 | return 0; | ||
1229 | |||
1230 | node = pci_device_to_OF_node(pci_dev); | ||
1231 | if (node == NULL) | ||
1232 | return -1; | ||
1233 | |||
1234 | if (node->n_intrs == 0) | ||
1235 | return -1; | ||
1236 | |||
1237 | pci_dev->irq = node->intrs[0].line; | ||
1238 | |||
1239 | pci_write_config_byte(pci_dev, PCI_INTERRUPT_LINE, pci_dev->irq); | ||
1240 | |||
1241 | return 0; | ||
1242 | } | ||
1243 | EXPORT_SYMBOL(pci_read_irq_line); | ||
1244 | |||
1245 | void pci_resource_to_user(const struct pci_dev *dev, int bar, | ||
1246 | const struct resource *rsrc, | ||
1247 | u64 *start, u64 *end) | ||
1248 | { | ||
1249 | struct pci_controller *hose = pci_bus_to_host(dev->bus); | ||
1250 | unsigned long offset = 0; | ||
1251 | |||
1252 | if (hose == NULL) | ||
1253 | return; | ||
1254 | |||
1255 | if (rsrc->flags & IORESOURCE_IO) | ||
1256 | offset = pci_io_base - (unsigned long)hose->io_base_virt + | ||
1257 | hose->io_base_phys; | ||
1258 | |||
1259 | *start = rsrc->start + offset; | ||
1260 | *end = rsrc->end + offset; | ||
1261 | } | ||
1262 | |||
1263 | #endif /* CONFIG_PPC_MULTIPLATFORM */ | ||
1264 | |||
1265 | |||
1266 | #define IOBASE_BRIDGE_NUMBER 0 | ||
1267 | #define IOBASE_MEMORY 1 | ||
1268 | #define IOBASE_IO 2 | ||
1269 | #define IOBASE_ISA_IO 3 | ||
1270 | #define IOBASE_ISA_MEM 4 | ||
1271 | |||
1272 | long sys_pciconfig_iobase(long which, unsigned long in_bus, | ||
1273 | unsigned long in_devfn) | ||
1274 | { | ||
1275 | struct pci_controller* hose; | ||
1276 | struct list_head *ln; | ||
1277 | struct pci_bus *bus = NULL; | ||
1278 | struct device_node *hose_node; | ||
1279 | |||
1280 | /* Argh ! Please forgive me for that hack, but that's the | ||
1281 | * simplest way to get existing XFree to not lockup on some | ||
1282 | * G5 machines... So when something asks for bus 0 io base | ||
1283 | * (bus 0 is HT root), we return the AGP one instead. | ||
1284 | */ | ||
1285 | if (machine_is_compatible("MacRISC4")) | ||
1286 | if (in_bus == 0) | ||
1287 | in_bus = 0xf0; | ||
1288 | |||
1289 | /* That syscall isn't quite compatible with PCI domains, but it's | ||
1290 | * used on pre-domains setup. We return the first match | ||
1291 | */ | ||
1292 | |||
1293 | for (ln = pci_root_buses.next; ln != &pci_root_buses; ln = ln->next) { | ||
1294 | bus = pci_bus_b(ln); | ||
1295 | if (in_bus >= bus->number && in_bus < (bus->number + bus->subordinate)) | ||
1296 | break; | ||
1297 | bus = NULL; | ||
1298 | } | ||
1299 | if (bus == NULL || bus->sysdata == NULL) | ||
1300 | return -ENODEV; | ||
1301 | |||
1302 | hose_node = (struct device_node *)bus->sysdata; | ||
1303 | hose = PCI_DN(hose_node)->phb; | ||
1304 | |||
1305 | switch (which) { | ||
1306 | case IOBASE_BRIDGE_NUMBER: | ||
1307 | return (long)hose->first_busno; | ||
1308 | case IOBASE_MEMORY: | ||
1309 | return (long)hose->pci_mem_offset; | ||
1310 | case IOBASE_IO: | ||
1311 | return (long)hose->io_base_phys; | ||
1312 | case IOBASE_ISA_IO: | ||
1313 | return (long)isa_io_base; | ||
1314 | case IOBASE_ISA_MEM: | ||
1315 | return -EINVAL; | ||
1316 | } | ||
1317 | |||
1318 | return -EOPNOTSUPP; | ||
1319 | } | ||