aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/powerpc/kernel/Makefile2
-rw-r--r--arch/powerpc/kernel/isa-bridge.c271
-rw-r--r--arch/powerpc/kernel/of_platform.c8
-rw-r--r--arch/powerpc/kernel/pci_64.c358
-rw-r--r--arch/powerpc/kernel/rtas_pci.c7
-rw-r--r--arch/powerpc/mm/Makefile3
-rw-r--r--arch/powerpc/mm/imalloc.c314
-rw-r--r--arch/powerpc/mm/mmu_decl.h12
-rw-r--r--arch/powerpc/mm/pgtable_64.c204
-rw-r--r--arch/powerpc/mm/tlb_64.c56
-rw-r--r--arch/powerpc/platforms/cell/io-workarounds.c2
-rw-r--r--arch/powerpc/platforms/iseries/pci.c5
-rw-r--r--arch/powerpc/platforms/maple/pci.c35
-rw-r--r--arch/powerpc/platforms/pasemi/pci.c20
-rw-r--r--arch/powerpc/platforms/powermac/pci.c32
-rw-r--r--arch/powerpc/platforms/pseries/pci_dlpar.c2
-rw-r--r--arch/powerpc/platforms/pseries/pseries.h2
-rw-r--r--drivers/pci/hotplug/rpadlpar_core.c6
-rw-r--r--include/asm-powerpc/floppy.h6
-rw-r--r--include/asm-powerpc/io.h19
-rw-r--r--include/asm-powerpc/pci-bridge.h6
-rw-r--r--include/asm-powerpc/pci.h4
-rw-r--r--include/asm-powerpc/pgtable-ppc64.h21
-rw-r--r--include/asm-powerpc/ppc-pci.h6
-rw-r--r--include/asm-powerpc/tlbflush.h5
25 files changed, 557 insertions, 849 deletions
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index 3e779f07f21b..08ce7de7c768 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -65,7 +65,7 @@ obj-$(CONFIG_PPC_UDBG_16550) += legacy_serial.o udbg_16550.o
65module-$(CONFIG_PPC64) += module_64.o 65module-$(CONFIG_PPC64) += module_64.o
66obj-$(CONFIG_MODULES) += $(module-y) 66obj-$(CONFIG_MODULES) += $(module-y)
67 67
68pci64-$(CONFIG_PPC64) += pci_64.o pci_dn.o 68pci64-$(CONFIG_PPC64) += pci_64.o pci_dn.o isa-bridge.o
69pci32-$(CONFIG_PPC32) := pci_32.o 69pci32-$(CONFIG_PPC32) := pci_32.o
70obj-$(CONFIG_PCI) += $(pci64-y) $(pci32-y) 70obj-$(CONFIG_PCI) += $(pci64-y) $(pci32-y)
71obj-$(CONFIG_PCI_MSI) += msi.o 71obj-$(CONFIG_PCI_MSI) += msi.o
diff --git a/arch/powerpc/kernel/isa-bridge.c b/arch/powerpc/kernel/isa-bridge.c
new file mode 100644
index 000000000000..f0f49d1be3d5
--- /dev/null
+++ b/arch/powerpc/kernel/isa-bridge.c
@@ -0,0 +1,271 @@
1/*
2 * Routines for tracking a legacy ISA bridge
3 *
4 * Copyrigh 2007 Benjamin Herrenschmidt <benh@kernel.crashing.org>, IBM Corp.
5 *
6 * Some bits and pieces moved over from pci_64.c
7 *
8 * Copyrigh 2003 Anton Blanchard <anton@au.ibm.com>, IBM Corp.
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
14 */
15
16#define DEBUG
17
18#include <linux/kernel.h>
19#include <linux/pci.h>
20#include <linux/string.h>
21#include <linux/init.h>
22#include <linux/mm.h>
23#include <linux/notifier.h>
24
25#include <asm/processor.h>
26#include <asm/io.h>
27#include <asm/prom.h>
28#include <asm/pci-bridge.h>
29#include <asm/machdep.h>
30#include <asm/ppc-pci.h>
31#include <asm/firmware.h>
32
33unsigned long isa_io_base; /* NULL if no ISA bus */
34EXPORT_SYMBOL(isa_io_base);
35
36/* Cached ISA bridge dev. */
37static struct device_node *isa_bridge_devnode;
38struct pci_dev *isa_bridge_pcidev;
39EXPORT_SYMBOL_GPL(isa_bridge_pcidev);
40
41#define ISA_SPACE_MASK 0x1
42#define ISA_SPACE_IO 0x1
43
44static void __devinit pci_process_ISA_OF_ranges(struct device_node *isa_node,
45 unsigned long phb_io_base_phys)
46{
47 /* We should get some saner parsing here and remove these structs */
48 struct pci_address {
49 u32 a_hi;
50 u32 a_mid;
51 u32 a_lo;
52 };
53
54 struct isa_address {
55 u32 a_hi;
56 u32 a_lo;
57 };
58
59 struct isa_range {
60 struct isa_address isa_addr;
61 struct pci_address pci_addr;
62 unsigned int size;
63 };
64
65 const struct isa_range *range;
66 unsigned long pci_addr;
67 unsigned int isa_addr;
68 unsigned int size;
69 int rlen = 0;
70
71 range = of_get_property(isa_node, "ranges", &rlen);
72 if (range == NULL || (rlen < sizeof(struct isa_range)))
73 goto inval_range;
74
75 /* From "ISA Binding to 1275"
76 * The ranges property is laid out as an array of elements,
77 * each of which comprises:
78 * cells 0 - 1: an ISA address
79 * cells 2 - 4: a PCI address
80 * (size depending on dev->n_addr_cells)
81 * cell 5: the size of the range
82 */
83 if ((range->isa_addr.a_hi && ISA_SPACE_MASK) != ISA_SPACE_IO) {
84 range++;
85 rlen -= sizeof(struct isa_range);
86 if (rlen < sizeof(struct isa_range))
87 goto inval_range;
88 }
89 if ((range->isa_addr.a_hi && ISA_SPACE_MASK) != ISA_SPACE_IO)
90 goto inval_range;
91
92 isa_addr = range->isa_addr.a_lo;
93 pci_addr = (unsigned long) range->pci_addr.a_mid << 32 |
94 range->pci_addr.a_lo;
95
96 /* Assume these are both zero. Note: We could fix that and
97 * do a proper parsing instead ... oh well, that will do for
98 * now as nobody uses fancy mappings for ISA bridges
99 */
100 if ((pci_addr != 0) || (isa_addr != 0)) {
101 printk(KERN_ERR "unexpected isa to pci mapping: %s\n",
102 __FUNCTION__);
103 return;
104 }
105
106 /* Align size and make sure it's cropped to 64K */
107 size = PAGE_ALIGN(range->size);
108 if (size > 0x10000)
109 size = 0x10000;
110
111 printk(KERN_ERR "no ISA IO ranges or unexpected isa range,"
112 "mapping 64k\n");
113
114 __ioremap_at(phb_io_base_phys, (void *)ISA_IO_BASE,
115 size, _PAGE_NO_CACHE|_PAGE_GUARDED);
116 return;
117
118inval_range:
119 printk(KERN_ERR "no ISA IO ranges or unexpected isa range,"
120 "mapping 64k\n");
121 __ioremap_at(phb_io_base_phys, (void *)ISA_IO_BASE,
122 0x10000, _PAGE_NO_CACHE|_PAGE_GUARDED);
123}
124
125
126/**
127 * isa_bridge_find_early - Find and map the ISA IO space early before
128 * main PCI discovery. This is optionally called by
129 * the arch code when adding PCI PHBs to get early
130 * access to ISA IO ports
131 */
132void __init isa_bridge_find_early(struct pci_controller *hose)
133{
134 struct device_node *np, *parent = NULL, *tmp;
135
136 /* If we already have an ISA bridge, bail off */
137 if (isa_bridge_devnode != NULL)
138 return;
139
140 /* For each "isa" node in the system. Note : we do a search by
141 * type and not by name. It might be better to do by name but that's
142 * what the code used to do and I don't want to break too much at
143 * once. We can look into changing that separately
144 */
145 for_each_node_by_type(np, "isa") {
146 /* Look for our hose being a parent */
147 for (parent = of_get_parent(np); parent;) {
148 if (parent == hose->arch_data) {
149 of_node_put(parent);
150 break;
151 }
152 tmp = parent;
153 parent = of_get_parent(parent);
154 of_node_put(tmp);
155 }
156 if (parent != NULL)
157 break;
158 }
159 if (np == NULL)
160 return;
161 isa_bridge_devnode = np;
162
163 /* Now parse the "ranges" property and setup the ISA mapping */
164 pci_process_ISA_OF_ranges(np, hose->io_base_phys);
165
166 /* Set the global ISA io base to indicate we have an ISA bridge */
167 isa_io_base = ISA_IO_BASE;
168
169 pr_debug("ISA bridge (early) is %s\n", np->full_name);
170}
171
172/**
173 * isa_bridge_find_late - Find and map the ISA IO space upon discovery of
174 * a new ISA bridge
175 */
176static void __devinit isa_bridge_find_late(struct pci_dev *pdev,
177 struct device_node *devnode)
178{
179 struct pci_controller *hose = pci_bus_to_host(pdev->bus);
180
181 /* Store ISA device node and PCI device */
182 isa_bridge_devnode = of_node_get(devnode);
183 isa_bridge_pcidev = pdev;
184
185 /* Now parse the "ranges" property and setup the ISA mapping */
186 pci_process_ISA_OF_ranges(devnode, hose->io_base_phys);
187
188 /* Set the global ISA io base to indicate we have an ISA bridge */
189 isa_io_base = ISA_IO_BASE;
190
191 pr_debug("ISA bridge (late) is %s on %s\n",
192 devnode->full_name, pci_name(pdev));
193}
194
195/**
196 * isa_bridge_remove - Remove/unmap an ISA bridge
197 */
198static void isa_bridge_remove(void)
199{
200 pr_debug("ISA bridge removed !\n");
201
202 /* Clear the global ISA io base to indicate that we have no more
203 * ISA bridge. Note that drivers don't quite handle that, though
204 * we should probably do something about it. But do we ever really
205 * have ISA bridges being removed on machines using legacy devices ?
206 */
207 isa_io_base = ISA_IO_BASE;
208
209 /* Clear references to the bridge */
210 of_node_put(isa_bridge_devnode);
211 isa_bridge_devnode = NULL;
212 isa_bridge_pcidev = NULL;
213
214 /* Unmap the ISA area */
215 __iounmap_at((void *)ISA_IO_BASE, 0x10000);
216}
217
218/**
219 * isa_bridge_notify - Get notified of PCI devices addition/removal
220 */
221static int __devinit isa_bridge_notify(struct notifier_block *nb,
222 unsigned long action, void *data)
223{
224 struct device *dev = data;
225 struct pci_dev *pdev = to_pci_dev(dev);
226 struct device_node *devnode = pci_device_to_OF_node(pdev);
227
228 switch(action) {
229 case BUS_NOTIFY_ADD_DEVICE:
230 /* Check if we have an early ISA device, without PCI dev */
231 if (isa_bridge_devnode && isa_bridge_devnode == devnode &&
232 !isa_bridge_pcidev) {
233 pr_debug("ISA bridge PCI attached: %s\n",
234 pci_name(pdev));
235 isa_bridge_pcidev = pdev;
236 }
237
238 /* Check if we have no ISA device, and this happens to be one,
239 * register it as such if it has an OF device
240 */
241 if (!isa_bridge_devnode && devnode && devnode->type &&
242 !strcmp(devnode->type, "isa"))
243 isa_bridge_find_late(pdev, devnode);
244
245 return 0;
246 case BUS_NOTIFY_DEL_DEVICE:
247 /* Check if this our existing ISA device */
248 if (pdev == isa_bridge_pcidev ||
249 (devnode && devnode == isa_bridge_devnode))
250 isa_bridge_remove();
251 return 0;
252 }
253 return 0;
254}
255
256static struct notifier_block isa_bridge_notifier = {
257 .notifier_call = isa_bridge_notify
258};
259
260/**
261 * isa_bridge_init - register to be notified of ISA bridge addition/removal
262 *
263 */
264static int __init isa_bridge_init(void)
265{
266 if (firmware_has_feature(FW_FEATURE_ISERIES))
267 return 0;
268 bus_register_notifier(&pci_bus_type, &isa_bridge_notifier);
269 return 0;
270}
271arch_initcall(isa_bridge_init);
diff --git a/arch/powerpc/kernel/of_platform.c b/arch/powerpc/kernel/of_platform.c
index d454f61c9c7c..9536ed7f247c 100644
--- a/arch/powerpc/kernel/of_platform.c
+++ b/arch/powerpc/kernel/of_platform.c
@@ -427,14 +427,6 @@ static int __devinit of_pci_phb_probe(struct of_device *dev,
427 /* Process "ranges" property */ 427 /* Process "ranges" property */
428 pci_process_bridge_OF_ranges(phb, dev->node, 0); 428 pci_process_bridge_OF_ranges(phb, dev->node, 0);
429 429
430 /* Setup IO space. We use the non-dynamic version of that code here,
431 * which doesn't quite support unplugging. Next kernel release will
432 * have a better fix for this.
433 * Note also that we don't do ISA, this will also be fixed with a
434 * more massive rework.
435 */
436 pci_setup_phb_io(phb, pci_io_base == 0);
437
438 /* Init pci_dn data structures */ 430 /* Init pci_dn data structures */
439 pci_devs_phb_init_dynamic(phb); 431 pci_devs_phb_init_dynamic(phb);
440 432
diff --git a/arch/powerpc/kernel/pci_64.c b/arch/powerpc/kernel/pci_64.c
index 249cca27a9b8..6ae67ebfab4d 100644
--- a/arch/powerpc/kernel/pci_64.c
+++ b/arch/powerpc/kernel/pci_64.c
@@ -11,7 +11,7 @@
11 * 2 of the License, or (at your option) any later version. 11 * 2 of the License, or (at your option) any later version.
12 */ 12 */
13 13
14#undef DEBUG 14#define DEBUG
15 15
16#include <linux/kernel.h> 16#include <linux/kernel.h>
17#include <linux/pci.h> 17#include <linux/pci.h>
@@ -22,6 +22,7 @@
22#include <linux/list.h> 22#include <linux/list.h>
23#include <linux/syscalls.h> 23#include <linux/syscalls.h>
24#include <linux/irq.h> 24#include <linux/irq.h>
25#include <linux/vmalloc.h>
25 26
26#include <asm/processor.h> 27#include <asm/processor.h>
27#include <asm/io.h> 28#include <asm/io.h>
@@ -41,35 +42,26 @@
41 42
42unsigned long pci_probe_only = 1; 43unsigned long pci_probe_only = 1;
43int pci_assign_all_buses = 0; 44int pci_assign_all_buses = 0;
44static int pci_initial_scan_done;
45 45
46static void fixup_resource(struct resource *res, struct pci_dev *dev); 46static void fixup_resource(struct resource *res, struct pci_dev *dev);
47static void do_bus_setup(struct pci_bus *bus); 47static void do_bus_setup(struct pci_bus *bus);
48static void phbs_remap_io(void);
49 48
50/* pci_io_base -- the base address from which io bars are offsets. 49/* pci_io_base -- the base address from which io bars are offsets.
51 * This is the lowest I/O base address (so bar values are always positive), 50 * This is the lowest I/O base address (so bar values are always positive),
52 * and it *must* be the start of ISA space if an ISA bus exists because 51 * and it *must* be the start of ISA space if an ISA bus exists because
53 * ISA drivers use hard coded offsets. If no ISA bus exists a dummy 52 * ISA drivers use hard coded offsets. If no ISA bus exists nothing
54 * page is mapped and isa_io_limit prevents access to it. 53 * is mapped on the first 64K of IO space
55 */ 54 */
56unsigned long isa_io_base; /* NULL if no ISA bus */ 55unsigned long pci_io_base = ISA_IO_BASE;
57EXPORT_SYMBOL(isa_io_base);
58unsigned long pci_io_base;
59EXPORT_SYMBOL(pci_io_base); 56EXPORT_SYMBOL(pci_io_base);
60 57
61void iSeries_pcibios_init(void);
62
63LIST_HEAD(hose_list); 58LIST_HEAD(hose_list);
64 59
65static struct dma_mapping_ops *pci_dma_ops; 60static struct dma_mapping_ops *pci_dma_ops;
66 61
62/* XXX kill that some day ... */
67int global_phb_number; /* Global phb counter */ 63int global_phb_number; /* Global phb counter */
68 64
69/* Cached ISA bridge dev. */
70struct pci_dev *ppc64_isabridge_dev = NULL;
71EXPORT_SYMBOL_GPL(ppc64_isabridge_dev);
72
73void set_pci_dma_ops(struct dma_mapping_ops *dma_ops) 65void set_pci_dma_ops(struct dma_mapping_ops *dma_ops)
74{ 66{
75 pci_dma_ops = dma_ops; 67 pci_dma_ops = dma_ops;
@@ -100,7 +92,7 @@ void pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region
100 return; 92 return;
101 93
102 if (res->flags & IORESOURCE_IO) 94 if (res->flags & IORESOURCE_IO)
103 offset = (unsigned long)hose->io_base_virt - pci_io_base; 95 offset = (unsigned long)hose->io_base_virt - _IO_BASE;
104 96
105 if (res->flags & IORESOURCE_MEM) 97 if (res->flags & IORESOURCE_MEM)
106 offset = hose->pci_mem_offset; 98 offset = hose->pci_mem_offset;
@@ -119,7 +111,7 @@ void pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res,
119 return; 111 return;
120 112
121 if (res->flags & IORESOURCE_IO) 113 if (res->flags & IORESOURCE_IO)
122 offset = (unsigned long)hose->io_base_virt - pci_io_base; 114 offset = (unsigned long)hose->io_base_virt - _IO_BASE;
123 115
124 if (res->flags & IORESOURCE_MEM) 116 if (res->flags & IORESOURCE_MEM)
125 offset = hose->pci_mem_offset; 117 offset = hose->pci_mem_offset;
@@ -156,7 +148,7 @@ void pcibios_align_resource(void *data, struct resource *res,
156 148
157 if (res->flags & IORESOURCE_IO) { 149 if (res->flags & IORESOURCE_IO) {
158 unsigned long offset = (unsigned long)hose->io_base_virt - 150 unsigned long offset = (unsigned long)hose->io_base_virt -
159 pci_io_base; 151 _IO_BASE;
160 /* Make sure we start at our min on all hoses */ 152 /* Make sure we start at our min on all hoses */
161 if (start - offset < PCIBIOS_MIN_IO) 153 if (start - offset < PCIBIOS_MIN_IO)
162 start = PCIBIOS_MIN_IO + offset; 154 start = PCIBIOS_MIN_IO + offset;
@@ -535,10 +527,16 @@ void __devinit scan_phb(struct pci_controller *hose)
535 bus->secondary = hose->first_busno; 527 bus->secondary = hose->first_busno;
536 hose->bus = bus; 528 hose->bus = bus;
537 529
530 if (!firmware_has_feature(FW_FEATURE_ISERIES))
531 pcibios_map_io_space(bus);
532
538 bus->resource[0] = res = &hose->io_resource; 533 bus->resource[0] = res = &hose->io_resource;
539 if (res->flags && request_resource(&ioport_resource, res)) 534 if (res->flags && request_resource(&ioport_resource, res)) {
540 printk(KERN_ERR "Failed to request PCI IO region " 535 printk(KERN_ERR "Failed to request PCI IO region "
541 "on PCI domain %04x\n", hose->global_number); 536 "on PCI domain %04x\n", hose->global_number);
537 DBG("res->start = 0x%016lx, res->end = 0x%016lx\n",
538 res->start, res->end);
539 }
542 540
543 for (i = 0; i < 3; ++i) { 541 for (i = 0; i < 3; ++i) {
544 res = &hose->mem_resources[i]; 542 res = &hose->mem_resources[i];
@@ -596,17 +594,6 @@ static int __init pcibios_init(void)
596 if (ppc_md.pcibios_fixup) 594 if (ppc_md.pcibios_fixup)
597 ppc_md.pcibios_fixup(); 595 ppc_md.pcibios_fixup();
598 596
599 /* Cache the location of the ISA bridge (if we have one) */
600 ppc64_isabridge_dev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
601 if (ppc64_isabridge_dev != NULL)
602 printk(KERN_DEBUG "ISA bridge at %s\n", pci_name(ppc64_isabridge_dev));
603
604 if (!firmware_has_feature(FW_FEATURE_ISERIES))
605 /* map in PCI I/O space */
606 phbs_remap_io();
607
608 pci_initial_scan_done = 1;
609
610 printk(KERN_DEBUG "PCI: Probing PCI hardware done\n"); 597 printk(KERN_DEBUG "PCI: Probing PCI hardware done\n");
611 598
612 return 0; 599 return 0;
@@ -711,7 +698,7 @@ static struct resource *__pci_mmap_make_offset(struct pci_dev *dev,
711#endif 698#endif
712 res_bit = IORESOURCE_MEM; 699 res_bit = IORESOURCE_MEM;
713 } else { 700 } else {
714 io_offset = (unsigned long)hose->io_base_virt - pci_io_base; 701 io_offset = (unsigned long)hose->io_base_virt - _IO_BASE;
715 *offset += io_offset; 702 *offset += io_offset;
716 res_bit = IORESOURCE_IO; 703 res_bit = IORESOURCE_IO;
717 } 704 }
@@ -881,76 +868,6 @@ void pcibios_add_platform_entries(struct pci_dev *pdev)
881 device_create_file(&pdev->dev, &dev_attr_devspec); 868 device_create_file(&pdev->dev, &dev_attr_devspec);
882} 869}
883 870
884#define ISA_SPACE_MASK 0x1
885#define ISA_SPACE_IO 0x1
886
887static void __devinit pci_process_ISA_OF_ranges(struct device_node *isa_node,
888 unsigned long phb_io_base_phys,
889 void __iomem * phb_io_base_virt)
890{
891 /* Remove these asap */
892
893 struct pci_address {
894 u32 a_hi;
895 u32 a_mid;
896 u32 a_lo;
897 };
898
899 struct isa_address {
900 u32 a_hi;
901 u32 a_lo;
902 };
903
904 struct isa_range {
905 struct isa_address isa_addr;
906 struct pci_address pci_addr;
907 unsigned int size;
908 };
909
910 const struct isa_range *range;
911 unsigned long pci_addr;
912 unsigned int isa_addr;
913 unsigned int size;
914 int rlen = 0;
915
916 range = of_get_property(isa_node, "ranges", &rlen);
917 if (range == NULL || (rlen < sizeof(struct isa_range))) {
918 printk(KERN_ERR "no ISA ranges or unexpected isa range size,"
919 "mapping 64k\n");
920 __ioremap_explicit(phb_io_base_phys,
921 (unsigned long)phb_io_base_virt,
922 0x10000, _PAGE_NO_CACHE | _PAGE_GUARDED);
923 return;
924 }
925
926 /* From "ISA Binding to 1275"
927 * The ranges property is laid out as an array of elements,
928 * each of which comprises:
929 * cells 0 - 1: an ISA address
930 * cells 2 - 4: a PCI address
931 * (size depending on dev->n_addr_cells)
932 * cell 5: the size of the range
933 */
934 if ((range->isa_addr.a_hi && ISA_SPACE_MASK) == ISA_SPACE_IO) {
935 isa_addr = range->isa_addr.a_lo;
936 pci_addr = (unsigned long) range->pci_addr.a_mid << 32 |
937 range->pci_addr.a_lo;
938
939 /* Assume these are both zero */
940 if ((pci_addr != 0) || (isa_addr != 0)) {
941 printk(KERN_ERR "unexpected isa to pci mapping: %s\n",
942 __FUNCTION__);
943 return;
944 }
945
946 size = PAGE_ALIGN(range->size);
947
948 __ioremap_explicit(phb_io_base_phys,
949 (unsigned long) phb_io_base_virt,
950 size, _PAGE_NO_CACHE | _PAGE_GUARDED);
951 }
952}
953
954void __devinit pci_process_bridge_OF_ranges(struct pci_controller *hose, 871void __devinit pci_process_bridge_OF_ranges(struct pci_controller *hose,
955 struct device_node *dev, int prim) 872 struct device_node *dev, int prim)
956{ 873{
@@ -1045,155 +962,122 @@ void __devinit pci_process_bridge_OF_ranges(struct pci_controller *hose,
1045 } 962 }
1046} 963}
1047 964
1048void __devinit pci_setup_phb_io(struct pci_controller *hose, int primary) 965#ifdef CONFIG_HOTPLUG
966
967int pcibios_unmap_io_space(struct pci_bus *bus)
1049{ 968{
1050 unsigned long size = hose->pci_io_size; 969 struct pci_controller *hose;
1051 unsigned long io_virt_offset;
1052 struct resource *res;
1053 struct device_node *isa_dn;
1054 970
1055 if (size == 0) 971 WARN_ON(bus == NULL);
1056 return;
1057 972
1058 hose->io_base_virt = reserve_phb_iospace(size); 973 /* If this is not a PHB, we only flush the hash table over
1059 DBG("phb%d io_base_phys 0x%lx io_base_virt 0x%lx\n", 974 * the area mapped by this bridge. We don't play with the PTE
1060 hose->global_number, hose->io_base_phys, 975 * mappings since we might have to deal with sub-page alignemnts
1061 (unsigned long) hose->io_base_virt); 976 * so flushing the hash table is the only sane way to make sure
1062 977 * that no hash entries are covering that removed bridge area
1063 if (primary) { 978 * while still allowing other busses overlapping those pages
1064 pci_io_base = (unsigned long)hose->io_base_virt; 979 */
1065 isa_dn = of_find_node_by_type(NULL, "isa"); 980 if (bus->self) {
1066 if (isa_dn) { 981 struct resource *res = bus->resource[0];
1067 isa_io_base = pci_io_base;
1068 pci_process_ISA_OF_ranges(isa_dn, hose->io_base_phys,
1069 hose->io_base_virt);
1070 of_node_put(isa_dn);
1071 }
1072 }
1073 982
1074 io_virt_offset = (unsigned long)hose->io_base_virt - pci_io_base; 983 DBG("IO unmapping for PCI-PCI bridge %s\n",
1075 res = &hose->io_resource; 984 pci_name(bus->self));
1076 res->start += io_virt_offset;
1077 res->end += io_virt_offset;
1078 985
1079 /* If this is called after the initial PCI scan, then we need to 986 __flush_hash_table_range(&init_mm, res->start + _IO_BASE,
1080 * proceed to IO mappings now 987 res->end - res->start + 1);
1081 */ 988 return 0;
1082 if (pci_initial_scan_done) 989 }
1083 __ioremap_explicit(hose->io_base_phys,
1084 (unsigned long)hose->io_base_virt,
1085 hose->pci_io_size,
1086 _PAGE_NO_CACHE | _PAGE_GUARDED);
1087}
1088 990
1089void __devinit pci_setup_phb_io_dynamic(struct pci_controller *hose, 991 /* Get the host bridge */
1090 int primary) 992 hose = pci_bus_to_host(bus);
1091{
1092 unsigned long size = hose->pci_io_size;
1093 unsigned long io_virt_offset;
1094 struct resource *res;
1095 993
1096 if (size == 0) 994 /* Check if we have IOs allocated */
1097 return; 995 if (hose->io_base_alloc == 0)
996 return 0;
1098 997
1099 hose->io_base_virt = __ioremap(hose->io_base_phys, size, 998 DBG("IO unmapping for PHB %s\n",
1100 _PAGE_NO_CACHE | _PAGE_GUARDED); 999 ((struct device_node *)hose->arch_data)->full_name);
1101 DBG("phb%d io_base_phys 0x%lx io_base_virt 0x%lx\n", 1000 DBG(" alloc=0x%p\n", hose->io_base_alloc);
1102 hose->global_number, hose->io_base_phys,
1103 (unsigned long) hose->io_base_virt);
1104 1001
1105 if (primary) 1002 /* This is a PHB, we fully unmap the IO area */
1106 pci_io_base = (unsigned long)hose->io_base_virt; 1003 vunmap(hose->io_base_alloc);
1107 1004
1108 io_virt_offset = (unsigned long)hose->io_base_virt - pci_io_base; 1005 return 0;
1109 res = &hose->io_resource;
1110 res->start += io_virt_offset;
1111 res->end += io_virt_offset;
1112} 1006}
1007EXPORT_SYMBOL_GPL(pcibios_unmap_io_space);
1113 1008
1009#endif /* CONFIG_HOTPLUG */
1114 1010
1115static int get_bus_io_range(struct pci_bus *bus, unsigned long *start_phys, 1011int __devinit pcibios_map_io_space(struct pci_bus *bus)
1116 unsigned long *start_virt, unsigned long *size)
1117{ 1012{
1118 struct pci_controller *hose = pci_bus_to_host(bus); 1013 struct vm_struct *area;
1119 struct resource *res; 1014 unsigned long phys_page;
1120 1015 unsigned long size_page;
1121 if (bus->self) 1016 unsigned long io_virt_offset;
1122 res = bus->resource[0]; 1017 struct pci_controller *hose;
1123 else
1124 /* Root Bus */
1125 res = &hose->io_resource;
1126
1127 if (res->end == 0 && res->start == 0)
1128 return 1;
1129 1018
1130 *start_virt = pci_io_base + res->start; 1019 WARN_ON(bus == NULL);
1131 *start_phys = *start_virt + hose->io_base_phys
1132 - (unsigned long) hose->io_base_virt;
1133 1020
1134 if (res->end > res->start) 1021 /* If this not a PHB, nothing to do, page tables still exist and
1135 *size = res->end - res->start + 1; 1022 * thus HPTEs will be faulted in when needed
1136 else { 1023 */
1137 printk("%s(): unexpected region 0x%lx->0x%lx\n", 1024 if (bus->self) {
1138 __FUNCTION__, res->start, res->end); 1025 DBG("IO mapping for PCI-PCI bridge %s\n",
1139 return 1; 1026 pci_name(bus->self));
1027 DBG(" virt=0x%016lx...0x%016lx\n",
1028 bus->resource[0]->start + _IO_BASE,
1029 bus->resource[0]->end + _IO_BASE);
1030 return 0;
1140 } 1031 }
1141 1032
1142 return 0; 1033 /* Get the host bridge */
1143} 1034 hose = pci_bus_to_host(bus);
1144 1035 phys_page = _ALIGN_DOWN(hose->io_base_phys, PAGE_SIZE);
1145int unmap_bus_range(struct pci_bus *bus) 1036 size_page = _ALIGN_UP(hose->pci_io_size, PAGE_SIZE);
1146{
1147 unsigned long start_phys;
1148 unsigned long start_virt;
1149 unsigned long size;
1150
1151 if (!bus) {
1152 printk(KERN_ERR "%s() expected bus\n", __FUNCTION__);
1153 return 1;
1154 }
1155
1156 if (get_bus_io_range(bus, &start_phys, &start_virt, &size))
1157 return 1;
1158 if (__iounmap_explicit((void __iomem *) start_virt, size))
1159 return 1;
1160 1037
1161 return 0; 1038 /* Make sure IO area address is clear */
1162} 1039 hose->io_base_alloc = NULL;
1163EXPORT_SYMBOL(unmap_bus_range);
1164 1040
1165int remap_bus_range(struct pci_bus *bus) 1041 /* If there's no IO to map on that bus, get away too */
1166{ 1042 if (hose->pci_io_size == 0 || hose->io_base_phys == 0)
1167 unsigned long start_phys; 1043 return 0;
1168 unsigned long start_virt;
1169 unsigned long size;
1170 1044
1171 if (!bus) { 1045 /* Let's allocate some IO space for that guy. We don't pass
1172 printk(KERN_ERR "%s() expected bus\n", __FUNCTION__); 1046 * VM_IOREMAP because we don't care about alignment tricks that
1173 return 1; 1047 * the core does in that case. Maybe we should due to stupid card
1174 } 1048 * with incomplete address decoding but I'd rather not deal with
1175 1049 * those outside of the reserved 64K legacy region.
1176 1050 */
1177 if (get_bus_io_range(bus, &start_phys, &start_virt, &size)) 1051 area = __get_vm_area(size_page, 0, PHB_IO_BASE, PHB_IO_END);
1178 return 1; 1052 if (area == NULL)
1179 if (start_phys == 0) 1053 return -ENOMEM;
1180 return 1; 1054 hose->io_base_alloc = area->addr;
1181 printk(KERN_DEBUG "mapping IO %lx -> %lx, size: %lx\n", start_phys, start_virt, size); 1055 hose->io_base_virt = (void __iomem *)(area->addr +
1182 if (__ioremap_explicit(start_phys, start_virt, size, 1056 hose->io_base_phys - phys_page);
1183 _PAGE_NO_CACHE | _PAGE_GUARDED)) 1057
1184 return 1; 1058 DBG("IO mapping for PHB %s\n",
1059 ((struct device_node *)hose->arch_data)->full_name);
1060 DBG(" phys=0x%016lx, virt=0x%p (alloc=0x%p)\n",
1061 hose->io_base_phys, hose->io_base_virt, hose->io_base_alloc);
1062 DBG(" size=0x%016lx (alloc=0x%016lx)\n",
1063 hose->pci_io_size, size_page);
1064
1065 /* Establish the mapping */
1066 if (__ioremap_at(phys_page, area->addr, size_page,
1067 _PAGE_NO_CACHE | _PAGE_GUARDED) == NULL)
1068 return -ENOMEM;
1069
1070 /* Fixup hose IO resource */
1071 io_virt_offset = (unsigned long)hose->io_base_virt - _IO_BASE;
1072 hose->io_resource.start += io_virt_offset;
1073 hose->io_resource.end += io_virt_offset;
1074
1075 DBG(" hose->io_resource=0x%016lx...0x%016lx\n",
1076 hose->io_resource.start, hose->io_resource.end);
1185 1077
1186 return 0; 1078 return 0;
1187} 1079}
1188EXPORT_SYMBOL(remap_bus_range); 1080EXPORT_SYMBOL_GPL(pcibios_map_io_space);
1189
1190static void phbs_remap_io(void)
1191{
1192 struct pci_controller *hose, *tmp;
1193
1194 list_for_each_entry_safe(hose, tmp, &hose_list, list_node)
1195 remap_bus_range(hose->bus);
1196}
1197 1081
1198static void __devinit fixup_resource(struct resource *res, struct pci_dev *dev) 1082static void __devinit fixup_resource(struct resource *res, struct pci_dev *dev)
1199{ 1083{
@@ -1201,8 +1085,7 @@ static void __devinit fixup_resource(struct resource *res, struct pci_dev *dev)
1201 unsigned long offset; 1085 unsigned long offset;
1202 1086
1203 if (res->flags & IORESOURCE_IO) { 1087 if (res->flags & IORESOURCE_IO) {
1204 offset = (unsigned long)hose->io_base_virt - pci_io_base; 1088 offset = (unsigned long)hose->io_base_virt - _IO_BASE;
1205
1206 res->start += offset; 1089 res->start += offset;
1207 res->end += offset; 1090 res->end += offset;
1208 } else if (res->flags & IORESOURCE_MEM) { 1091 } else if (res->flags & IORESOURCE_MEM) {
@@ -1217,9 +1100,20 @@ void __devinit pcibios_fixup_device_resources(struct pci_dev *dev,
1217 /* Update device resources. */ 1100 /* Update device resources. */
1218 int i; 1101 int i;
1219 1102
1220 for (i = 0; i < PCI_NUM_RESOURCES; i++) 1103 DBG("%s: Fixup resources:\n", pci_name(dev));
1221 if (dev->resource[i].flags) 1104 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
1222 fixup_resource(&dev->resource[i], dev); 1105 struct resource *res = &dev->resource[i];
1106 if (!res->flags)
1107 continue;
1108
1109 DBG(" 0x%02x < %08lx:0x%016lx...0x%016lx\n",
1110 i, res->flags, res->start, res->end);
1111
1112 fixup_resource(res, dev);
1113
1114 DBG(" > %08lx:0x%016lx...0x%016lx\n",
1115 res->flags, res->start, res->end);
1116 }
1223} 1117}
1224EXPORT_SYMBOL(pcibios_fixup_device_resources); 1118EXPORT_SYMBOL(pcibios_fixup_device_resources);
1225 1119
@@ -1360,7 +1254,7 @@ void pci_resource_to_user(const struct pci_dev *dev, int bar,
1360 return; 1254 return;
1361 1255
1362 if (rsrc->flags & IORESOURCE_IO) 1256 if (rsrc->flags & IORESOURCE_IO)
1363 offset = (unsigned long)hose->io_base_virt - pci_io_base; 1257 offset = (unsigned long)hose->io_base_virt - _IO_BASE;
1364 1258
1365 /* We pass a fully fixed up address to userland for MMIO instead of 1259 /* We pass a fully fixed up address to userland for MMIO instead of
1366 * a BAR value because X is lame and expects to be able to use that 1260 * a BAR value because X is lame and expects to be able to use that
@@ -1410,7 +1304,7 @@ unsigned long pci_address_to_pio(phys_addr_t address)
1410 if (address >= hose->io_base_phys && 1304 if (address >= hose->io_base_phys &&
1411 address < (hose->io_base_phys + hose->pci_io_size)) { 1305 address < (hose->io_base_phys + hose->pci_io_size)) {
1412 unsigned long base = 1306 unsigned long base =
1413 (unsigned long)hose->io_base_virt - pci_io_base; 1307 (unsigned long)hose->io_base_virt - _IO_BASE;
1414 return base + (address - hose->io_base_phys); 1308 return base + (address - hose->io_base_phys);
1415 } 1309 }
1416 } 1310 }
diff --git a/arch/powerpc/kernel/rtas_pci.c b/arch/powerpc/kernel/rtas_pci.c
index f2286822be09..a5de6211b97a 100644
--- a/arch/powerpc/kernel/rtas_pci.c
+++ b/arch/powerpc/kernel/rtas_pci.c
@@ -278,10 +278,8 @@ void __init find_and_init_phbs(void)
278{ 278{
279 struct device_node *node; 279 struct device_node *node;
280 struct pci_controller *phb; 280 struct pci_controller *phb;
281 unsigned int index;
282 struct device_node *root = of_find_node_by_path("/"); 281 struct device_node *root = of_find_node_by_path("/");
283 282
284 index = 0;
285 for (node = of_get_next_child(root, NULL); 283 for (node = of_get_next_child(root, NULL);
286 node != NULL; 284 node != NULL;
287 node = of_get_next_child(root, node)) { 285 node = of_get_next_child(root, node)) {
@@ -295,8 +293,7 @@ void __init find_and_init_phbs(void)
295 continue; 293 continue;
296 rtas_setup_phb(phb); 294 rtas_setup_phb(phb);
297 pci_process_bridge_OF_ranges(phb, node, 0); 295 pci_process_bridge_OF_ranges(phb, node, 0);
298 pci_setup_phb_io(phb, index == 0); 296 isa_bridge_find_early(phb);
299 index++;
300 } 297 }
301 298
302 of_node_put(root); 299 of_node_put(root);
@@ -335,7 +332,7 @@ int pcibios_remove_root_bus(struct pci_controller *phb)
335 return 1; 332 return 1;
336 } 333 }
337 334
338 rc = unmap_bus_range(b); 335 rc = pcibios_unmap_io_space(b);
339 if (rc) { 336 if (rc) {
340 printk(KERN_ERR "%s: failed to unmap IO on bus %s\n", 337 printk(KERN_ERR "%s: failed to unmap IO on bus %s\n",
341 __FUNCTION__, b->name); 338 __FUNCTION__, b->name);
diff --git a/arch/powerpc/mm/Makefile b/arch/powerpc/mm/Makefile
index 4f839c6a9768..7e4d27ad3dee 100644
--- a/arch/powerpc/mm/Makefile
+++ b/arch/powerpc/mm/Makefile
@@ -11,8 +11,7 @@ obj-$(CONFIG_PPC32) += init_32.o pgtable_32.o mmu_context_32.o
11hash-$(CONFIG_PPC_NATIVE) := hash_native_64.o 11hash-$(CONFIG_PPC_NATIVE) := hash_native_64.o
12obj-$(CONFIG_PPC64) += init_64.o pgtable_64.o mmu_context_64.o \ 12obj-$(CONFIG_PPC64) += init_64.o pgtable_64.o mmu_context_64.o \
13 hash_utils_64.o hash_low_64.o tlb_64.o \ 13 hash_utils_64.o hash_low_64.o tlb_64.o \
14 slb_low.o slb.o stab.o mmap.o imalloc.o \ 14 slb_low.o slb.o stab.o mmap.o $(hash-y)
15 $(hash-y)
16obj-$(CONFIG_PPC_STD_MMU_32) += ppc_mmu_32.o hash_low_32.o tlb_32.o 15obj-$(CONFIG_PPC_STD_MMU_32) += ppc_mmu_32.o hash_low_32.o tlb_32.o
17obj-$(CONFIG_40x) += 4xx_mmu.o 16obj-$(CONFIG_40x) += 4xx_mmu.o
18obj-$(CONFIG_44x) += 44x_mmu.o 17obj-$(CONFIG_44x) += 44x_mmu.o
diff --git a/arch/powerpc/mm/imalloc.c b/arch/powerpc/mm/imalloc.c
deleted file mode 100644
index 9eddf37303d7..000000000000
--- a/arch/powerpc/mm/imalloc.c
+++ /dev/null
@@ -1,314 +0,0 @@
1/*
2 * c 2001 PPC 64 Team, IBM Corp
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9
10#include <linux/slab.h>
11#include <linux/vmalloc.h>
12
13#include <asm/uaccess.h>
14#include <asm/pgalloc.h>
15#include <asm/pgtable.h>
16#include <linux/mutex.h>
17#include <asm/cacheflush.h>
18
19#include "mmu_decl.h"
20
21static DEFINE_MUTEX(imlist_mutex);
22struct vm_struct * imlist = NULL;
23
24static int get_free_im_addr(unsigned long size, unsigned long *im_addr)
25{
26 unsigned long addr;
27 struct vm_struct **p, *tmp;
28
29 addr = ioremap_bot;
30 for (p = &imlist; (tmp = *p) ; p = &tmp->next) {
31 if (size + addr < (unsigned long) tmp->addr)
32 break;
33 if ((unsigned long)tmp->addr >= ioremap_bot)
34 addr = tmp->size + (unsigned long) tmp->addr;
35 if (addr >= IMALLOC_END-size)
36 return 1;
37 }
38 *im_addr = addr;
39
40 return 0;
41}
42
43/* Return whether the region described by v_addr and size is a subset
44 * of the region described by parent
45 */
46static inline int im_region_is_subset(unsigned long v_addr, unsigned long size,
47 struct vm_struct *parent)
48{
49 return (int) (v_addr >= (unsigned long) parent->addr &&
50 v_addr < (unsigned long) parent->addr + parent->size &&
51 size < parent->size);
52}
53
54/* Return whether the region described by v_addr and size is a superset
55 * of the region described by child
56 */
57static int im_region_is_superset(unsigned long v_addr, unsigned long size,
58 struct vm_struct *child)
59{
60 struct vm_struct parent;
61
62 parent.addr = (void *) v_addr;
63 parent.size = size;
64
65 return im_region_is_subset((unsigned long) child->addr, child->size,
66 &parent);
67}
68
69/* Return whether the region described by v_addr and size overlaps
70 * the region described by vm. Overlapping regions meet the
71 * following conditions:
72 * 1) The regions share some part of the address space
73 * 2) The regions aren't identical
74 * 3) Neither region is a subset of the other
75 */
76static int im_region_overlaps(unsigned long v_addr, unsigned long size,
77 struct vm_struct *vm)
78{
79 if (im_region_is_superset(v_addr, size, vm))
80 return 0;
81
82 return (v_addr + size > (unsigned long) vm->addr + vm->size &&
83 v_addr < (unsigned long) vm->addr + vm->size) ||
84 (v_addr < (unsigned long) vm->addr &&
85 v_addr + size > (unsigned long) vm->addr);
86}
87
88/* Determine imalloc status of region described by v_addr and size.
89 * Can return one of the following:
90 * IM_REGION_UNUSED - Entire region is unallocated in imalloc space.
91 * IM_REGION_SUBSET - Region is a subset of a region that is already
92 * allocated in imalloc space.
93 * vm will be assigned to a ptr to the parent region.
94 * IM_REGION_EXISTS - Exact region already allocated in imalloc space.
95 * vm will be assigned to a ptr to the existing imlist
96 * member.
97 * IM_REGION_OVERLAPS - Region overlaps an allocated region in imalloc space.
98 * IM_REGION_SUPERSET - Region is a superset of a region that is already
99 * allocated in imalloc space.
100 */
101static int im_region_status(unsigned long v_addr, unsigned long size,
102 struct vm_struct **vm)
103{
104 struct vm_struct *tmp;
105
106 for (tmp = imlist; tmp; tmp = tmp->next)
107 if (v_addr < (unsigned long) tmp->addr + tmp->size)
108 break;
109
110 *vm = NULL;
111 if (tmp) {
112 if (im_region_overlaps(v_addr, size, tmp))
113 return IM_REGION_OVERLAP;
114
115 *vm = tmp;
116 if (im_region_is_subset(v_addr, size, tmp)) {
117 /* Return with tmp pointing to superset */
118 return IM_REGION_SUBSET;
119 }
120 if (im_region_is_superset(v_addr, size, tmp)) {
121 /* Return with tmp pointing to first subset */
122 return IM_REGION_SUPERSET;
123 }
124 else if (v_addr == (unsigned long) tmp->addr &&
125 size == tmp->size) {
126 /* Return with tmp pointing to exact region */
127 return IM_REGION_EXISTS;
128 }
129 }
130
131 return IM_REGION_UNUSED;
132}
133
134static struct vm_struct * split_im_region(unsigned long v_addr,
135 unsigned long size, struct vm_struct *parent)
136{
137 struct vm_struct *vm1 = NULL;
138 struct vm_struct *vm2 = NULL;
139 struct vm_struct *new_vm = NULL;
140
141 vm1 = kmalloc(sizeof(*vm1), GFP_KERNEL);
142 if (vm1 == NULL) {
143 printk(KERN_ERR "%s() out of memory\n", __FUNCTION__);
144 return NULL;
145 }
146
147 if (v_addr == (unsigned long) parent->addr) {
148 /* Use existing parent vm_struct to represent child, allocate
149 * new one for the remainder of parent range
150 */
151 vm1->size = parent->size - size;
152 vm1->addr = (void *) (v_addr + size);
153 vm1->next = parent->next;
154
155 parent->size = size;
156 parent->next = vm1;
157 new_vm = parent;
158 } else if (v_addr + size == (unsigned long) parent->addr +
159 parent->size) {
160 /* Allocate new vm_struct to represent child, use existing
161 * parent one for remainder of parent range
162 */
163 vm1->size = size;
164 vm1->addr = (void *) v_addr;
165 vm1->next = parent->next;
166 new_vm = vm1;
167
168 parent->size -= size;
169 parent->next = vm1;
170 } else {
171 /* Allocate two new vm_structs for the new child and
172 * uppermost remainder, and use existing parent one for the
173 * lower remainder of parent range
174 */
175 vm2 = kmalloc(sizeof(*vm2), GFP_KERNEL);
176 if (vm2 == NULL) {
177 printk(KERN_ERR "%s() out of memory\n", __FUNCTION__);
178 kfree(vm1);
179 return NULL;
180 }
181
182 vm1->size = size;
183 vm1->addr = (void *) v_addr;
184 vm1->next = vm2;
185 new_vm = vm1;
186
187 vm2->size = ((unsigned long) parent->addr + parent->size) -
188 (v_addr + size);
189 vm2->addr = (void *) v_addr + size;
190 vm2->next = parent->next;
191
192 parent->size = v_addr - (unsigned long) parent->addr;
193 parent->next = vm1;
194 }
195
196 return new_vm;
197}
198
199static struct vm_struct * __add_new_im_area(unsigned long req_addr,
200 unsigned long size)
201{
202 struct vm_struct **p, *tmp, *area;
203
204 for (p = &imlist; (tmp = *p) ; p = &tmp->next) {
205 if (req_addr + size <= (unsigned long)tmp->addr)
206 break;
207 }
208
209 area = kmalloc(sizeof(*area), GFP_KERNEL);
210 if (!area)
211 return NULL;
212 area->flags = 0;
213 area->addr = (void *)req_addr;
214 area->size = size;
215 area->next = *p;
216 *p = area;
217
218 return area;
219}
220
221static struct vm_struct * __im_get_area(unsigned long req_addr,
222 unsigned long size,
223 int criteria)
224{
225 struct vm_struct *tmp;
226 int status;
227
228 status = im_region_status(req_addr, size, &tmp);
229 if ((criteria & status) == 0) {
230 return NULL;
231 }
232
233 switch (status) {
234 case IM_REGION_UNUSED:
235 tmp = __add_new_im_area(req_addr, size);
236 break;
237 case IM_REGION_SUBSET:
238 tmp = split_im_region(req_addr, size, tmp);
239 break;
240 case IM_REGION_EXISTS:
241 /* Return requested region */
242 break;
243 case IM_REGION_SUPERSET:
244 /* Return first existing subset of requested region */
245 break;
246 default:
247 printk(KERN_ERR "%s() unexpected imalloc region status\n",
248 __FUNCTION__);
249 tmp = NULL;
250 }
251
252 return tmp;
253}
254
255struct vm_struct * im_get_free_area(unsigned long size)
256{
257 struct vm_struct *area;
258 unsigned long addr;
259
260 mutex_lock(&imlist_mutex);
261 if (get_free_im_addr(size, &addr)) {
262 printk(KERN_ERR "%s() cannot obtain addr for size 0x%lx\n",
263 __FUNCTION__, size);
264 area = NULL;
265 goto next_im_done;
266 }
267
268 area = __im_get_area(addr, size, IM_REGION_UNUSED);
269 if (area == NULL) {
270 printk(KERN_ERR
271 "%s() cannot obtain area for addr 0x%lx size 0x%lx\n",
272 __FUNCTION__, addr, size);
273 }
274next_im_done:
275 mutex_unlock(&imlist_mutex);
276 return area;
277}
278
279struct vm_struct * im_get_area(unsigned long v_addr, unsigned long size,
280 int criteria)
281{
282 struct vm_struct *area;
283
284 mutex_lock(&imlist_mutex);
285 area = __im_get_area(v_addr, size, criteria);
286 mutex_unlock(&imlist_mutex);
287 return area;
288}
289
290void im_free(void * addr)
291{
292 struct vm_struct **p, *tmp;
293
294 if (!addr)
295 return;
296 if ((unsigned long) addr & ~PAGE_MASK) {
297 printk(KERN_ERR "Trying to %s bad address (%p)\n", __FUNCTION__, addr);
298 return;
299 }
300 mutex_lock(&imlist_mutex);
301 for (p = &imlist ; (tmp = *p) ; p = &tmp->next) {
302 if (tmp->addr == addr) {
303 *p = tmp->next;
304 unmap_kernel_range((unsigned long)tmp->addr,
305 tmp->size);
306 kfree(tmp);
307 mutex_unlock(&imlist_mutex);
308 return;
309 }
310 }
311 mutex_unlock(&imlist_mutex);
312 printk(KERN_ERR "Trying to %s nonexistent area (%p)\n", __FUNCTION__,
313 addr);
314}
diff --git a/arch/powerpc/mm/mmu_decl.h b/arch/powerpc/mm/mmu_decl.h
index 2558c34eedaa..f7a4066a57ea 100644
--- a/arch/powerpc/mm/mmu_decl.h
+++ b/arch/powerpc/mm/mmu_decl.h
@@ -90,16 +90,4 @@ static inline void flush_HPTE(unsigned context, unsigned long va,
90 else 90 else
91 _tlbie(va); 91 _tlbie(va);
92} 92}
93#else /* CONFIG_PPC64 */
94/* imalloc region types */
95#define IM_REGION_UNUSED 0x1
96#define IM_REGION_SUBSET 0x2
97#define IM_REGION_EXISTS 0x4
98#define IM_REGION_OVERLAP 0x8
99#define IM_REGION_SUPERSET 0x10
100
101extern struct vm_struct * im_get_free_area(unsigned long size);
102extern struct vm_struct * im_get_area(unsigned long v_addr, unsigned long size,
103 int region_type);
104extern void im_free(void *addr);
105#endif 93#endif
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
index fa5c828d3876..a895de73beae 100644
--- a/arch/powerpc/mm/pgtable_64.c
+++ b/arch/powerpc/mm/pgtable_64.c
@@ -34,41 +34,27 @@
34#include <linux/stddef.h> 34#include <linux/stddef.h>
35#include <linux/vmalloc.h> 35#include <linux/vmalloc.h>
36#include <linux/init.h> 36#include <linux/init.h>
37#include <linux/delay.h>
38#include <linux/bootmem.h>
39#include <linux/highmem.h>
40#include <linux/idr.h>
41#include <linux/nodemask.h>
42#include <linux/module.h>
43 37
44#include <asm/pgalloc.h> 38#include <asm/pgalloc.h>
45#include <asm/page.h> 39#include <asm/page.h>
46#include <asm/prom.h> 40#include <asm/prom.h>
47#include <asm/lmb.h>
48#include <asm/rtas.h>
49#include <asm/io.h> 41#include <asm/io.h>
50#include <asm/mmu_context.h> 42#include <asm/mmu_context.h>
51#include <asm/pgtable.h> 43#include <asm/pgtable.h>
52#include <asm/mmu.h> 44#include <asm/mmu.h>
53#include <asm/uaccess.h>
54#include <asm/smp.h> 45#include <asm/smp.h>
55#include <asm/machdep.h> 46#include <asm/machdep.h>
56#include <asm/tlb.h> 47#include <asm/tlb.h>
57#include <asm/eeh.h>
58#include <asm/processor.h> 48#include <asm/processor.h>
59#include <asm/mmzone.h>
60#include <asm/cputable.h> 49#include <asm/cputable.h>
61#include <asm/sections.h> 50#include <asm/sections.h>
62#include <asm/system.h> 51#include <asm/system.h>
63#include <asm/iommu.h>
64#include <asm/abs_addr.h> 52#include <asm/abs_addr.h>
65#include <asm/vdso.h>
66#include <asm/firmware.h> 53#include <asm/firmware.h>
67 54
68#include "mmu_decl.h" 55#include "mmu_decl.h"
69 56
70unsigned long ioremap_bot = IMALLOC_BASE; 57unsigned long ioremap_bot = IOREMAP_BASE;
71static unsigned long phbs_io_bot = PHBS_IO_BASE;
72 58
73/* 59/*
74 * map_io_page currently only called by __ioremap 60 * map_io_page currently only called by __ioremap
@@ -102,8 +88,8 @@ static int map_io_page(unsigned long ea, unsigned long pa, int flags)
102 * entry in the hardware page table. 88 * entry in the hardware page table.
103 * 89 *
104 */ 90 */
105 if (htab_bolt_mapping(ea, ea + PAGE_SIZE, pa, flags, 91 if (htab_bolt_mapping(ea, (unsigned long)ea + PAGE_SIZE,
106 mmu_io_psize)) { 92 pa, flags, mmu_io_psize)) {
107 printk(KERN_ERR "Failed to do bolted mapping IO " 93 printk(KERN_ERR "Failed to do bolted mapping IO "
108 "memory at %016lx !\n", pa); 94 "memory at %016lx !\n", pa);
109 return -ENOMEM; 95 return -ENOMEM;
@@ -113,8 +99,11 @@ static int map_io_page(unsigned long ea, unsigned long pa, int flags)
113} 99}
114 100
115 101
116static void __iomem * __ioremap_com(phys_addr_t addr, unsigned long pa, 102/**
117 unsigned long ea, unsigned long size, 103 * __ioremap_at - Low level function to establish the page tables
104 * for an IO mapping
105 */
106void __iomem * __ioremap_at(phys_addr_t pa, void *ea, unsigned long size,
118 unsigned long flags) 107 unsigned long flags)
119{ 108{
120 unsigned long i; 109 unsigned long i;
@@ -122,17 +111,35 @@ static void __iomem * __ioremap_com(phys_addr_t addr, unsigned long pa,
122 if ((flags & _PAGE_PRESENT) == 0) 111 if ((flags & _PAGE_PRESENT) == 0)
123 flags |= pgprot_val(PAGE_KERNEL); 112 flags |= pgprot_val(PAGE_KERNEL);
124 113
114 WARN_ON(pa & ~PAGE_MASK);
115 WARN_ON(((unsigned long)ea) & ~PAGE_MASK);
116 WARN_ON(size & ~PAGE_MASK);
117
125 for (i = 0; i < size; i += PAGE_SIZE) 118 for (i = 0; i < size; i += PAGE_SIZE)
126 if (map_io_page(ea+i, pa+i, flags)) 119 if (map_io_page((unsigned long)ea+i, pa+i, flags))
127 return NULL; 120 return NULL;
128 121
129 return (void __iomem *) (ea + (addr & ~PAGE_MASK)); 122 return (void __iomem *)ea;
123}
124
125/**
126 * __iounmap_from - Low level function to tear down the page tables
127 * for an IO mapping. This is used for mappings that
128 * are manipulated manually, like partial unmapping of
129 * PCI IOs or ISA space.
130 */
131void __iounmap_at(void *ea, unsigned long size)
132{
133 WARN_ON(((unsigned long)ea) & ~PAGE_MASK);
134 WARN_ON(size & ~PAGE_MASK);
135
136 unmap_kernel_range((unsigned long)ea, size);
130} 137}
131 138
132void __iomem * __ioremap(phys_addr_t addr, unsigned long size, 139void __iomem * __ioremap(phys_addr_t addr, unsigned long size,
133 unsigned long flags) 140 unsigned long flags)
134{ 141{
135 unsigned long pa, ea; 142 phys_addr_t paligned;
136 void __iomem *ret; 143 void __iomem *ret;
137 144
138 /* 145 /*
@@ -144,27 +151,30 @@ void __iomem * __ioremap(phys_addr_t addr, unsigned long size,
144 * IMALLOC_END 151 * IMALLOC_END
145 * 152 *
146 */ 153 */
147 pa = addr & PAGE_MASK; 154 paligned = addr & PAGE_MASK;
148 size = PAGE_ALIGN(addr + size) - pa; 155 size = PAGE_ALIGN(addr + size) - paligned;
149 156
150 if ((size == 0) || (pa == 0)) 157 if ((size == 0) || (paligned == 0))
151 return NULL; 158 return NULL;
152 159
153 if (mem_init_done) { 160 if (mem_init_done) {
154 struct vm_struct *area; 161 struct vm_struct *area;
155 area = im_get_free_area(size); 162
163 area = __get_vm_area(size, VM_IOREMAP,
164 ioremap_bot, IOREMAP_END);
156 if (area == NULL) 165 if (area == NULL)
157 return NULL; 166 return NULL;
158 ea = (unsigned long)(area->addr); 167 ret = __ioremap_at(paligned, area->addr, size, flags);
159 ret = __ioremap_com(addr, pa, ea, size, flags);
160 if (!ret) 168 if (!ret)
161 im_free(area->addr); 169 vunmap(area->addr);
162 } else { 170 } else {
163 ea = ioremap_bot; 171 ret = __ioremap_at(paligned, (void *)ioremap_bot, size, flags);
164 ret = __ioremap_com(addr, pa, ea, size, flags);
165 if (ret) 172 if (ret)
166 ioremap_bot += size; 173 ioremap_bot += size;
167 } 174 }
175
176 if (ret)
177 ret += addr & ~PAGE_MASK;
168 return ret; 178 return ret;
169} 179}
170 180
@@ -187,61 +197,9 @@ void __iomem * ioremap_flags(phys_addr_t addr, unsigned long size,
187} 197}
188 198
189 199
190#define IS_PAGE_ALIGNED(_val) ((_val) == ((_val) & PAGE_MASK))
191
192int __ioremap_explicit(phys_addr_t pa, unsigned long ea,
193 unsigned long size, unsigned long flags)
194{
195 struct vm_struct *area;
196 void __iomem *ret;
197
198 /* For now, require page-aligned values for pa, ea, and size */
199 if (!IS_PAGE_ALIGNED(pa) || !IS_PAGE_ALIGNED(ea) ||
200 !IS_PAGE_ALIGNED(size)) {
201 printk(KERN_ERR "unaligned value in %s\n", __FUNCTION__);
202 return 1;
203 }
204
205 if (!mem_init_done) {
206 /* Two things to consider in this case:
207 * 1) No records will be kept (imalloc, etc) that the region
208 * has been remapped
209 * 2) It won't be easy to iounmap() the region later (because
210 * of 1)
211 */
212 ;
213 } else {
214 area = im_get_area(ea, size,
215 IM_REGION_UNUSED|IM_REGION_SUBSET|IM_REGION_EXISTS);
216 if (area == NULL) {
217 /* Expected when PHB-dlpar is in play */
218 return 1;
219 }
220 if (ea != (unsigned long) area->addr) {
221 printk(KERN_ERR "unexpected addr return from "
222 "im_get_area\n");
223 return 1;
224 }
225 }
226
227 ret = __ioremap_com(pa, pa, ea, size, flags);
228 if (ret == NULL) {
229 printk(KERN_ERR "ioremap_explicit() allocation failure !\n");
230 return 1;
231 }
232 if (ret != (void *) ea) {
233 printk(KERN_ERR "__ioremap_com() returned unexpected addr\n");
234 return 1;
235 }
236
237 return 0;
238}
239
240/* 200/*
241 * Unmap an IO region and remove it from imalloc'd list. 201 * Unmap an IO region and remove it from imalloc'd list.
242 * Access to IO memory should be serialized by driver. 202 * Access to IO memory should be serialized by driver.
243 *
244 * XXX what about calls before mem_init_done (ie python_countermeasures())
245 */ 203 */
246void __iounmap(volatile void __iomem *token) 204void __iounmap(volatile void __iomem *token)
247{ 205{
@@ -250,9 +208,14 @@ void __iounmap(volatile void __iomem *token)
250 if (!mem_init_done) 208 if (!mem_init_done)
251 return; 209 return;
252 210
253 addr = (void *) ((unsigned long __force) token & PAGE_MASK); 211 addr = (void *) ((unsigned long __force)
254 212 PCI_FIX_ADDR(token) & PAGE_MASK);
255 im_free(addr); 213 if ((unsigned long)addr < ioremap_bot) {
214 printk(KERN_WARNING "Attempt to iounmap early bolted mapping"
215 " at 0x%p\n", addr);
216 return;
217 }
218 vunmap(addr);
256} 219}
257 220
258void iounmap(volatile void __iomem *token) 221void iounmap(volatile void __iomem *token)
@@ -263,77 +226,8 @@ void iounmap(volatile void __iomem *token)
263 __iounmap(token); 226 __iounmap(token);
264} 227}
265 228
266static int iounmap_subset_regions(unsigned long addr, unsigned long size)
267{
268 struct vm_struct *area;
269
270 /* Check whether subsets of this region exist */
271 area = im_get_area(addr, size, IM_REGION_SUPERSET);
272 if (area == NULL)
273 return 1;
274
275 while (area) {
276 iounmap((void __iomem *) area->addr);
277 area = im_get_area(addr, size,
278 IM_REGION_SUPERSET);
279 }
280
281 return 0;
282}
283
284int __iounmap_explicit(volatile void __iomem *start, unsigned long size)
285{
286 struct vm_struct *area;
287 unsigned long addr;
288 int rc;
289
290 addr = (unsigned long __force) start & PAGE_MASK;
291
292 /* Verify that the region either exists or is a subset of an existing
293 * region. In the latter case, split the parent region to create
294 * the exact region
295 */
296 area = im_get_area(addr, size,
297 IM_REGION_EXISTS | IM_REGION_SUBSET);
298 if (area == NULL) {
299 /* Determine whether subset regions exist. If so, unmap */
300 rc = iounmap_subset_regions(addr, size);
301 if (rc) {
302 printk(KERN_ERR
303 "%s() cannot unmap nonexistent range 0x%lx\n",
304 __FUNCTION__, addr);
305 return 1;
306 }
307 } else {
308 iounmap((void __iomem *) area->addr);
309 }
310 /*
311 * FIXME! This can't be right:
312 iounmap(area->addr);
313 * Maybe it should be "iounmap(area);"
314 */
315 return 0;
316}
317
318EXPORT_SYMBOL(ioremap); 229EXPORT_SYMBOL(ioremap);
319EXPORT_SYMBOL(ioremap_flags); 230EXPORT_SYMBOL(ioremap_flags);
320EXPORT_SYMBOL(__ioremap); 231EXPORT_SYMBOL(__ioremap);
321EXPORT_SYMBOL(iounmap); 232EXPORT_SYMBOL(iounmap);
322EXPORT_SYMBOL(__iounmap); 233EXPORT_SYMBOL(__iounmap);
323
324static DEFINE_SPINLOCK(phb_io_lock);
325
326void __iomem * reserve_phb_iospace(unsigned long size)
327{
328 void __iomem *virt_addr;
329
330 if (phbs_io_bot >= IMALLOC_BASE)
331 panic("reserve_phb_iospace(): phb io space overflow\n");
332
333 spin_lock(&phb_io_lock);
334 virt_addr = (void __iomem *) phbs_io_bot;
335 phbs_io_bot += size;
336 spin_unlock(&phb_io_lock);
337
338 return virt_addr;
339}
diff --git a/arch/powerpc/mm/tlb_64.c b/arch/powerpc/mm/tlb_64.c
index 2bfc4d7e1aa2..fdecb7f764d6 100644
--- a/arch/powerpc/mm/tlb_64.c
+++ b/arch/powerpc/mm/tlb_64.c
@@ -239,3 +239,59 @@ void pte_free_finish(void)
239 pte_free_submit(*batchp); 239 pte_free_submit(*batchp);
240 *batchp = NULL; 240 *batchp = NULL;
241} 241}
242
243/**
244 * __flush_hash_table_range - Flush all HPTEs for a given address range
245 * from the hash table (and the TLB). But keeps
246 * the linux PTEs intact.
247 *
248 * @mm : mm_struct of the target address space (generally init_mm)
249 * @start : starting address
250 * @end : ending address (not included in the flush)
251 *
252 * This function is mostly to be used by some IO hotplug code in order
253 * to remove all hash entries from a given address range used to map IO
254 * space on a removed PCI-PCI bidge without tearing down the full mapping
255 * since 64K pages may overlap with other bridges when using 64K pages
256 * with 4K HW pages on IO space.
257 *
258 * Because of that usage pattern, it's only available with CONFIG_HOTPLUG
259 * and is implemented for small size rather than speed.
260 */
261#ifdef CONFIG_HOTPLUG
262
263void __flush_hash_table_range(struct mm_struct *mm, unsigned long start,
264 unsigned long end)
265{
266 unsigned long flags;
267
268 start = _ALIGN_DOWN(start, PAGE_SIZE);
269 end = _ALIGN_UP(end, PAGE_SIZE);
270
271 BUG_ON(!mm->pgd);
272
273 /* Note: Normally, we should only ever use a batch within a
274 * PTE locked section. This violates the rule, but will work
275 * since we don't actually modify the PTEs, we just flush the
276 * hash while leaving the PTEs intact (including their reference
277 * to being hashed). This is not the most performance oriented
278 * way to do things but is fine for our needs here.
279 */
280 local_irq_save(flags);
281 arch_enter_lazy_mmu_mode();
282 for (; start < end; start += PAGE_SIZE) {
283 pte_t *ptep = find_linux_pte(mm->pgd, start);
284 unsigned long pte;
285
286 if (ptep == NULL)
287 continue;
288 pte = pte_val(*ptep);
289 if (!(pte & _PAGE_HASHPTE))
290 continue;
291 hpte_need_flush(mm, start, ptep, pte, 0);
292 }
293 arch_leave_lazy_mmu_mode();
294 local_irq_restore(flags);
295}
296
297#endif /* CONFIG_HOTPLUG */
diff --git a/arch/powerpc/platforms/cell/io-workarounds.c b/arch/powerpc/platforms/cell/io-workarounds.c
index 7fb92f23f380..9d7c2ef940a8 100644
--- a/arch/powerpc/platforms/cell/io-workarounds.c
+++ b/arch/powerpc/platforms/cell/io-workarounds.c
@@ -102,7 +102,7 @@ static void spider_io_flush(const volatile void __iomem *addr)
102 vaddr = (unsigned long)PCI_FIX_ADDR(addr); 102 vaddr = (unsigned long)PCI_FIX_ADDR(addr);
103 103
104 /* Check if it's in allowed range for PIO */ 104 /* Check if it's in allowed range for PIO */
105 if (vaddr < PHBS_IO_BASE || vaddr >= IMALLOC_BASE) 105 if (vaddr < PHB_IO_BASE || vaddr > PHB_IO_END)
106 return; 106 return;
107 107
108 /* Try to find a PTE. If not, clear the paddr, we'll do 108 /* Try to find a PTE. If not, clear the paddr, we'll do
diff --git a/arch/powerpc/platforms/iseries/pci.c b/arch/powerpc/platforms/iseries/pci.c
index 9c974227155e..23d876211874 100644
--- a/arch/powerpc/platforms/iseries/pci.c
+++ b/arch/powerpc/platforms/iseries/pci.c
@@ -742,6 +742,11 @@ void __init iSeries_pcibios_init(void)
742 /* Install IO hooks */ 742 /* Install IO hooks */
743 ppc_pci_io = iseries_pci_io; 743 ppc_pci_io = iseries_pci_io;
744 744
745 /* iSeries has no IO space in the common sense, it needs to set
746 * the IO base to 0
747 */
748 pci_io_base = 0;
749
745 if (root == NULL) { 750 if (root == NULL) {
746 printk(KERN_CRIT "iSeries_pcibios_init: can't find root " 751 printk(KERN_CRIT "iSeries_pcibios_init: can't find root "
747 "of device tree\n"); 752 "of device tree\n");
diff --git a/arch/powerpc/platforms/maple/pci.c b/arch/powerpc/platforms/maple/pci.c
index 7aaa5bbc9363..f357b9258875 100644
--- a/arch/powerpc/platforms/maple/pci.c
+++ b/arch/powerpc/platforms/maple/pci.c
@@ -519,23 +519,6 @@ void __devinit maple_pci_irq_fixup(struct pci_dev *dev)
519 DBG(" <- maple_pci_irq_fixup\n"); 519 DBG(" <- maple_pci_irq_fixup\n");
520} 520}
521 521
522static void __init maple_fixup_phb_resources(void)
523{
524 struct pci_controller *hose, *tmp;
525
526 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
527 unsigned long offset = (unsigned long)hose->io_base_virt - pci_io_base;
528
529 hose->io_resource.start += offset;
530 hose->io_resource.end += offset;
531
532 printk(KERN_INFO "PCI Host %d, io start: %llx; io end: %llx\n",
533 hose->global_number,
534 (unsigned long long)hose->io_resource.start,
535 (unsigned long long)hose->io_resource.end);
536 }
537}
538
539void __init maple_pci_init(void) 522void __init maple_pci_init(void)
540{ 523{
541 struct device_node *np, *root; 524 struct device_node *np, *root;
@@ -573,24 +556,6 @@ void __init maple_pci_init(void)
573 if (ht && add_bridge(ht) != 0) 556 if (ht && add_bridge(ht) != 0)
574 of_node_put(ht); 557 of_node_put(ht);
575 558
576 /*
577 * We need to call pci_setup_phb_io for the HT bridge first
578 * so it gets the I/O port numbers starting at 0, and we
579 * need to call it for the AGP bridge after that so it gets
580 * small positive I/O port numbers.
581 */
582 if (u3_ht)
583 pci_setup_phb_io(u3_ht, 1);
584 if (u3_agp)
585 pci_setup_phb_io(u3_agp, 0);
586 if (u4_pcie)
587 pci_setup_phb_io(u4_pcie, 0);
588
589 /* Fixup the IO resources on our host bridges as the common code
590 * does it only for childs of the host bridges
591 */
592 maple_fixup_phb_resources();
593
594 /* Setup the linkage between OF nodes and PHBs */ 559 /* Setup the linkage between OF nodes and PHBs */
595 pci_devs_phb_init(); 560 pci_devs_phb_init();
596 561
diff --git a/arch/powerpc/platforms/pasemi/pci.c b/arch/powerpc/platforms/pasemi/pci.c
index bbc6dfcfaa91..5606f25760bc 100644
--- a/arch/powerpc/platforms/pasemi/pci.c
+++ b/arch/powerpc/platforms/pasemi/pci.c
@@ -150,29 +150,11 @@ static int __init add_bridge(struct device_node *dev)
150 printk(KERN_INFO "Found PA-PXP PCI host bridge.\n"); 150 printk(KERN_INFO "Found PA-PXP PCI host bridge.\n");
151 151
152 /* Interpret the "ranges" property */ 152 /* Interpret the "ranges" property */
153 /* This also maps the I/O region and sets isa_io/mem_base */
154 pci_process_bridge_OF_ranges(hose, dev, 1); 153 pci_process_bridge_OF_ranges(hose, dev, 1);
155 pci_setup_phb_io(hose, 1);
156 154
157 return 0; 155 return 0;
158} 156}
159 157
160
161static void __init pas_fixup_phb_resources(void)
162{
163 struct pci_controller *hose, *tmp;
164
165 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
166 unsigned long offset = (unsigned long)hose->io_base_virt - pci_io_base;
167 hose->io_resource.start += offset;
168 hose->io_resource.end += offset;
169 printk(KERN_INFO "PCI Host %d, io start: %lx; io end: %lx\n",
170 hose->global_number,
171 hose->io_resource.start, hose->io_resource.end);
172 }
173}
174
175
176void __init pas_pci_init(void) 158void __init pas_pci_init(void)
177{ 159{
178 struct device_node *np, *root; 160 struct device_node *np, *root;
@@ -190,8 +172,6 @@ void __init pas_pci_init(void)
190 172
191 of_node_put(root); 173 of_node_put(root);
192 174
193 pas_fixup_phb_resources();
194
195 /* Setup the linkage between OF nodes and PHBs */ 175 /* Setup the linkage between OF nodes and PHBs */
196 pci_devs_phb_init(); 176 pci_devs_phb_init();
197 177
diff --git a/arch/powerpc/platforms/powermac/pci.c b/arch/powerpc/platforms/powermac/pci.c
index c4af9e21ac93..8302e34a3cbf 100644
--- a/arch/powerpc/platforms/powermac/pci.c
+++ b/arch/powerpc/platforms/powermac/pci.c
@@ -1006,19 +1006,6 @@ void __devinit pmac_pci_irq_fixup(struct pci_dev *dev)
1006#endif /* CONFIG_PPC32 */ 1006#endif /* CONFIG_PPC32 */
1007} 1007}
1008 1008
1009#ifdef CONFIG_PPC64
1010static void __init pmac_fixup_phb_resources(void)
1011{
1012 struct pci_controller *hose, *tmp;
1013
1014 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
1015 printk(KERN_INFO "PCI Host %d, io start: %lx; io end: %lx\n",
1016 hose->global_number,
1017 hose->io_resource.start, hose->io_resource.end);
1018 }
1019}
1020#endif
1021
1022void __init pmac_pci_init(void) 1009void __init pmac_pci_init(void)
1023{ 1010{
1024 struct device_node *np, *root; 1011 struct device_node *np, *root;
@@ -1053,25 +1040,6 @@ void __init pmac_pci_init(void)
1053 if (ht && add_bridge(ht) != 0) 1040 if (ht && add_bridge(ht) != 0)
1054 of_node_put(ht); 1041 of_node_put(ht);
1055 1042
1056 /*
1057 * We need to call pci_setup_phb_io for the HT bridge first
1058 * so it gets the I/O port numbers starting at 0, and we
1059 * need to call it for the AGP bridge after that so it gets
1060 * small positive I/O port numbers.
1061 */
1062 if (u3_ht)
1063 pci_setup_phb_io(u3_ht, 1);
1064 if (u3_agp)
1065 pci_setup_phb_io(u3_agp, 0);
1066 if (u4_pcie)
1067 pci_setup_phb_io(u4_pcie, 0);
1068
1069 /*
1070 * On ppc64, fixup the IO resources on our host bridges as
1071 * the common code does it only for children of the host bridges
1072 */
1073 pmac_fixup_phb_resources();
1074
1075 /* Setup the linkage between OF nodes and PHBs */ 1043 /* Setup the linkage between OF nodes and PHBs */
1076 pci_devs_phb_init(); 1044 pci_devs_phb_init();
1077 1045
diff --git a/arch/powerpc/platforms/pseries/pci_dlpar.c b/arch/powerpc/platforms/pseries/pci_dlpar.c
index 0b113ab90ba9..47f0e0857f0e 100644
--- a/arch/powerpc/platforms/pseries/pci_dlpar.c
+++ b/arch/powerpc/platforms/pseries/pci_dlpar.c
@@ -202,8 +202,6 @@ struct pci_controller * __devinit init_phb_dynamic(struct device_node *dn)
202 rtas_setup_phb(phb); 202 rtas_setup_phb(phb);
203 pci_process_bridge_OF_ranges(phb, dn, 0); 203 pci_process_bridge_OF_ranges(phb, dn, 0);
204 204
205 pci_setup_phb_io_dynamic(phb, primary);
206
207 pci_devs_phb_init_dynamic(phb); 205 pci_devs_phb_init_dynamic(phb);
208 206
209 if (dn->child) 207 if (dn->child)
diff --git a/arch/powerpc/platforms/pseries/pseries.h b/arch/powerpc/platforms/pseries/pseries.h
index 2729d559fd91..61e19f78b923 100644
--- a/arch/powerpc/platforms/pseries/pseries.h
+++ b/arch/powerpc/platforms/pseries/pseries.h
@@ -33,6 +33,8 @@ static inline void setup_kexec_cpu_down_xics(void) { }
33static inline void setup_kexec_cpu_down_mpic(void) { } 33static inline void setup_kexec_cpu_down_mpic(void) { }
34#endif 34#endif
35 35
36extern void pSeries_final_fixup(void);
37
36/* Poweron flag used for enabling auto ups restart */ 38/* Poweron flag used for enabling auto ups restart */
37extern unsigned long rtas_poweron_auto; 39extern unsigned long rtas_poweron_auto;
38 40
diff --git a/drivers/pci/hotplug/rpadlpar_core.c b/drivers/pci/hotplug/rpadlpar_core.c
index bb3c101c2c5a..deb6b5e35feb 100644
--- a/drivers/pci/hotplug/rpadlpar_core.c
+++ b/drivers/pci/hotplug/rpadlpar_core.c
@@ -159,8 +159,8 @@ static void dlpar_pci_add_bus(struct device_node *dn)
159 /* Claim new bus resources */ 159 /* Claim new bus resources */
160 pcibios_claim_one_bus(dev->bus); 160 pcibios_claim_one_bus(dev->bus);
161 161
162 /* ioremap() for child bus, which may or may not succeed */ 162 /* Map IO space for child bus, which may or may not succeed */
163 remap_bus_range(dev->subordinate); 163 pcibios_map_io_space(dev->subordinate);
164 164
165 /* Add new devices to global lists. Register in proc, sysfs. */ 165 /* Add new devices to global lists. Register in proc, sysfs. */
166 pci_bus_add_devices(phb->bus); 166 pci_bus_add_devices(phb->bus);
@@ -390,7 +390,7 @@ int dlpar_remove_pci_slot(char *drc_name, struct device_node *dn)
390 } else 390 } else
391 pcibios_remove_pci_devices(bus); 391 pcibios_remove_pci_devices(bus);
392 392
393 if (unmap_bus_range(bus)) { 393 if (pcibios_unmap_io_space(bus)) {
394 printk(KERN_ERR "%s: failed to unmap bus range\n", 394 printk(KERN_ERR "%s: failed to unmap bus range\n",
395 __FUNCTION__); 395 __FUNCTION__);
396 return -ERANGE; 396 return -ERANGE;
diff --git a/include/asm-powerpc/floppy.h b/include/asm-powerpc/floppy.h
index afa700ded877..34146f0eea63 100644
--- a/include/asm-powerpc/floppy.h
+++ b/include/asm-powerpc/floppy.h
@@ -29,7 +29,7 @@
29#define fd_free_irq() free_irq(FLOPPY_IRQ, NULL); 29#define fd_free_irq() free_irq(FLOPPY_IRQ, NULL);
30 30
31#include <linux/pci.h> 31#include <linux/pci.h>
32#include <asm/ppc-pci.h> /* for ppc64_isabridge_dev */ 32#include <asm/ppc-pci.h> /* for isa_bridge_pcidev */
33 33
34#define fd_dma_setup(addr,size,mode,io) fd_ops->_dma_setup(addr,size,mode,io) 34#define fd_dma_setup(addr,size,mode,io) fd_ops->_dma_setup(addr,size,mode,io)
35 35
@@ -139,12 +139,12 @@ static int hard_dma_setup(char *addr, unsigned long size, int mode, int io)
139 if (bus_addr 139 if (bus_addr
140 && (addr != prev_addr || size != prev_size || dir != prev_dir)) { 140 && (addr != prev_addr || size != prev_size || dir != prev_dir)) {
141 /* different from last time -- unmap prev */ 141 /* different from last time -- unmap prev */
142 pci_unmap_single(ppc64_isabridge_dev, bus_addr, prev_size, prev_dir); 142 pci_unmap_single(isa_bridge_pcidev, bus_addr, prev_size, prev_dir);
143 bus_addr = 0; 143 bus_addr = 0;
144 } 144 }
145 145
146 if (!bus_addr) /* need to map it */ 146 if (!bus_addr) /* need to map it */
147 bus_addr = pci_map_single(ppc64_isabridge_dev, addr, size, dir); 147 bus_addr = pci_map_single(isa_bridge_pcidev, addr, size, dir);
148 148
149 /* remember this one as prev */ 149 /* remember this one as prev */
150 prev_addr = addr; 150 prev_addr = addr;
diff --git a/include/asm-powerpc/io.h b/include/asm-powerpc/io.h
index 350c9bdb31dc..17efea5b594c 100644
--- a/include/asm-powerpc/io.h
+++ b/include/asm-powerpc/io.h
@@ -607,9 +607,9 @@ static inline void iosync(void)
607 * 607 *
608 * * iounmap undoes such a mapping and can be hooked 608 * * iounmap undoes such a mapping and can be hooked
609 * 609 *
610 * * __ioremap_explicit (and the pending __iounmap_explicit) are low level 610 * * __ioremap_at (and the pending __iounmap_at) are low level functions to
611 * functions to create hand-made mappings for use only by the PCI code 611 * create hand-made mappings for use only by the PCI code and cannot
612 * and cannot currently be hooked. 612 * currently be hooked. Must be page aligned.
613 * 613 *
614 * * __ioremap is the low level implementation used by ioremap and 614 * * __ioremap is the low level implementation used by ioremap and
615 * ioremap_flags and cannot be hooked (but can be used by a hook on one 615 * ioremap_flags and cannot be hooked (but can be used by a hook on one
@@ -629,12 +629,9 @@ extern void __iomem *__ioremap(phys_addr_t, unsigned long size,
629 unsigned long flags); 629 unsigned long flags);
630extern void __iounmap(volatile void __iomem *addr); 630extern void __iounmap(volatile void __iomem *addr);
631 631
632extern int __ioremap_explicit(phys_addr_t p_addr, unsigned long v_addr, 632extern void __iomem * __ioremap_at(phys_addr_t pa, void *ea,
633 unsigned long size, unsigned long flags); 633 unsigned long size, unsigned long flags);
634extern int __iounmap_explicit(volatile void __iomem *start, 634extern void __iounmap_at(void *ea, unsigned long size);
635 unsigned long size);
636
637extern void __iomem * reserve_phb_iospace(unsigned long size);
638 635
639/* Those are more 32 bits only functions */ 636/* Those are more 32 bits only functions */
640extern unsigned long iopa(unsigned long addr); 637extern unsigned long iopa(unsigned long addr);
@@ -651,8 +648,8 @@ extern void io_block_mapping(unsigned long virt, phys_addr_t phys,
651 */ 648 */
652#define HAVE_ARCH_PIO_SIZE 1 649#define HAVE_ARCH_PIO_SIZE 1
653#define PIO_OFFSET 0x00000000UL 650#define PIO_OFFSET 0x00000000UL
654#define PIO_MASK 0x3fffffffUL 651#define PIO_MASK (FULL_IO_SIZE - 1)
655#define PIO_RESERVED 0x40000000UL 652#define PIO_RESERVED (FULL_IO_SIZE)
656 653
657#define mmio_read16be(addr) readw_be(addr) 654#define mmio_read16be(addr) readw_be(addr)
658#define mmio_read32be(addr) readl_be(addr) 655#define mmio_read32be(addr) readl_be(addr)
diff --git a/include/asm-powerpc/pci-bridge.h b/include/asm-powerpc/pci-bridge.h
index c49ce41cfa95..5261527ed7b1 100644
--- a/include/asm-powerpc/pci-bridge.h
+++ b/include/asm-powerpc/pci-bridge.h
@@ -31,6 +31,7 @@ struct pci_controller {
31 int last_busno; 31 int last_busno;
32 32
33 void __iomem *io_base_virt; 33 void __iomem *io_base_virt;
34 void *io_base_alloc;
34 resource_size_t io_base_phys; 35 resource_size_t io_base_phys;
35 36
36 /* Some machines have a non 1:1 mapping of 37 /* Some machines have a non 1:1 mapping of
@@ -167,6 +168,11 @@ static inline unsigned long pci_address_to_pio(phys_addr_t address)
167} 168}
168#endif 169#endif
169 170
171extern void isa_bridge_find_early(struct pci_controller *hose);
172
173extern int pcibios_unmap_io_space(struct pci_bus *bus);
174extern int pcibios_map_io_space(struct pci_bus *bus);
175
170/* Return values for ppc_md.pci_probe_mode function */ 176/* Return values for ppc_md.pci_probe_mode function */
171#define PCI_PROBE_NONE -1 /* Don't look at this bus at all */ 177#define PCI_PROBE_NONE -1 /* Don't look at this bus at all */
172#define PCI_PROBE_NORMAL 0 /* Do normal PCI probing */ 178#define PCI_PROBE_NORMAL 0 /* Do normal PCI probing */
diff --git a/include/asm-powerpc/pci.h b/include/asm-powerpc/pci.h
index ce0f13e8eb14..93e3752df6b7 100644
--- a/include/asm-powerpc/pci.h
+++ b/include/asm-powerpc/pci.h
@@ -220,10 +220,6 @@ static inline struct resource *pcibios_select_root(struct pci_dev *pdev,
220 return root; 220 return root;
221} 221}
222 222
223extern int unmap_bus_range(struct pci_bus *bus);
224
225extern int remap_bus_range(struct pci_bus *bus);
226
227extern void pcibios_fixup_device_resources(struct pci_dev *dev, 223extern void pcibios_fixup_device_resources(struct pci_dev *dev,
228 struct pci_bus *bus); 224 struct pci_bus *bus);
229 225
diff --git a/include/asm-powerpc/pgtable-ppc64.h b/include/asm-powerpc/pgtable-ppc64.h
index 704c4e669fe0..9b0f51ccad05 100644
--- a/include/asm-powerpc/pgtable-ppc64.h
+++ b/include/asm-powerpc/pgtable-ppc64.h
@@ -27,7 +27,7 @@ struct mm_struct;
27 */ 27 */
28#define PGTABLE_EADDR_SIZE (PTE_INDEX_SIZE + PMD_INDEX_SIZE + \ 28#define PGTABLE_EADDR_SIZE (PTE_INDEX_SIZE + PMD_INDEX_SIZE + \
29 PUD_INDEX_SIZE + PGD_INDEX_SIZE + PAGE_SHIFT) 29 PUD_INDEX_SIZE + PGD_INDEX_SIZE + PAGE_SHIFT)
30#define PGTABLE_RANGE (1UL << PGTABLE_EADDR_SIZE) 30#define PGTABLE_RANGE (ASM_CONST(1) << PGTABLE_EADDR_SIZE)
31 31
32#if TASK_SIZE_USER64 > PGTABLE_RANGE 32#if TASK_SIZE_USER64 > PGTABLE_RANGE
33#error TASK_SIZE_USER64 exceeds pagetable range 33#error TASK_SIZE_USER64 exceeds pagetable range
@@ -37,19 +37,28 @@ struct mm_struct;
37#error TASK_SIZE_USER64 exceeds user VSID range 37#error TASK_SIZE_USER64 exceeds user VSID range
38#endif 38#endif
39 39
40
40/* 41/*
41 * Define the address range of the vmalloc VM area. 42 * Define the address range of the vmalloc VM area.
42 */ 43 */
43#define VMALLOC_START ASM_CONST(0xD000000000000000) 44#define VMALLOC_START ASM_CONST(0xD000000000000000)
44#define VMALLOC_SIZE ASM_CONST(0x80000000000) 45#define VMALLOC_SIZE (PGTABLE_RANGE >> 1)
45#define VMALLOC_END (VMALLOC_START + VMALLOC_SIZE) 46#define VMALLOC_END (VMALLOC_START + VMALLOC_SIZE)
46 47
47/* 48/*
48 * Define the address range of the imalloc VM area. 49 * Define the address ranges for MMIO and IO space :
50 *
51 * ISA_IO_BASE = VMALLOC_END, 64K reserved area
52 * PHB_IO_BASE = ISA_IO_BASE + 64K to ISA_IO_BASE + 2G, PHB IO spaces
53 * IOREMAP_BASE = ISA_IO_BASE + 2G to VMALLOC_START + PGTABLE_RANGE
49 */ 54 */
50#define PHBS_IO_BASE VMALLOC_END 55#define FULL_IO_SIZE 0x80000000ul
51#define IMALLOC_BASE (PHBS_IO_BASE + 0x80000000ul) /* Reserve 2 gigs for PHBs */ 56#define ISA_IO_BASE (VMALLOC_END)
52#define IMALLOC_END (VMALLOC_START + PGTABLE_RANGE) 57#define ISA_IO_END (VMALLOC_END + 0x10000ul)
58#define PHB_IO_BASE (ISA_IO_END)
59#define PHB_IO_END (VMALLOC_END + FULL_IO_SIZE)
60#define IOREMAP_BASE (PHB_IO_END)
61#define IOREMAP_END (VMALLOC_START + PGTABLE_RANGE)
53 62
54/* 63/*
55 * Region IDs 64 * Region IDs
diff --git a/include/asm-powerpc/ppc-pci.h b/include/asm-powerpc/ppc-pci.h
index 2a6ac69cadc9..b847aa10074b 100644
--- a/include/asm-powerpc/ppc-pci.h
+++ b/include/asm-powerpc/ppc-pci.h
@@ -26,7 +26,7 @@ extern int global_phb_number;
26 26
27extern void find_and_init_phbs(void); 27extern void find_and_init_phbs(void);
28 28
29extern struct pci_dev *ppc64_isabridge_dev; /* may be NULL if no ISA bus */ 29extern struct pci_dev *isa_bridge_pcidev; /* may be NULL if no ISA bus */
30 30
31/** Bus Unit ID macros; get low and hi 32-bits of the 64-bit BUID */ 31/** Bus Unit ID macros; get low and hi 32-bits of the 64-bit BUID */
32#define BUID_HI(buid) ((buid) >> 32) 32#define BUID_HI(buid) ((buid) >> 32)
@@ -47,8 +47,8 @@ extern void init_pci_config_tokens (void);
47extern unsigned long get_phb_buid (struct device_node *); 47extern unsigned long get_phb_buid (struct device_node *);
48extern int rtas_setup_phb(struct pci_controller *phb); 48extern int rtas_setup_phb(struct pci_controller *phb);
49 49
50/* From pSeries_pci.h */ 50/* From iSeries PCI */
51extern void pSeries_final_fixup(void); 51extern void iSeries_pcibios_init(void);
52 52
53extern unsigned long pci_probe_only; 53extern unsigned long pci_probe_only;
54 54
diff --git a/include/asm-powerpc/tlbflush.h b/include/asm-powerpc/tlbflush.h
index 86e6266a028b..99a0439baa50 100644
--- a/include/asm-powerpc/tlbflush.h
+++ b/include/asm-powerpc/tlbflush.h
@@ -155,6 +155,11 @@ static inline void flush_tlb_kernel_range(unsigned long start,
155{ 155{
156} 156}
157 157
158/* Private function for use by PCI IO mapping code */
159extern void __flush_hash_table_range(struct mm_struct *mm, unsigned long start,
160 unsigned long end);
161
162
158#endif 163#endif
159 164
160/* 165/*