aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/platforms
diff options
context:
space:
mode:
authorMarcelo Tosatti <mtosatti@redhat.com>2012-10-29 17:15:32 -0400
committerMarcelo Tosatti <mtosatti@redhat.com>2012-10-29 17:15:32 -0400
commit19bf7f8ac3f8131100027281c495dbbe00cd5ae0 (patch)
tree270b97e3ca47c0f62a1babca2ae37f79a76a309c /arch/powerpc/platforms
parent787c57c0fb393fe8a3974d300ddcfe30373386fe (diff)
parent35fd3dc58da675d659513384221349ef90749a01 (diff)
Merge remote-tracking branch 'master' into queue
Merge reason: development work has dependency on kvm patches merged upstream. Conflicts: arch/powerpc/include/asm/Kbuild arch/powerpc/include/asm/kvm_para.h Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Diffstat (limited to 'arch/powerpc/platforms')
-rw-r--r--arch/powerpc/platforms/40x/ppc40x_simple.c2
-rw-r--r--arch/powerpc/platforms/44x/currituck.c10
-rw-r--r--arch/powerpc/platforms/512x/Kconfig1
-rw-r--r--arch/powerpc/platforms/512x/clock.c6
-rw-r--r--arch/powerpc/platforms/512x/mpc5121_generic.c2
-rw-r--r--arch/powerpc/platforms/512x/mpc512x_shared.c6
-rw-r--r--arch/powerpc/platforms/52xx/lite5200.c2
-rw-r--r--arch/powerpc/platforms/52xx/media5200.c2
-rw-r--r--arch/powerpc/platforms/52xx/mpc5200_simple.c1
-rw-r--r--arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c35
-rw-r--r--arch/powerpc/platforms/83xx/mpc837x_rdb.c2
-rw-r--r--arch/powerpc/platforms/85xx/Kconfig21
-rw-r--r--arch/powerpc/platforms/85xx/Makefile2
-rw-r--r--arch/powerpc/platforms/85xx/common.c10
-rw-r--r--arch/powerpc/platforms/85xx/corenet_ds.c38
-rw-r--r--arch/powerpc/platforms/85xx/ge_imp3a.c62
-rw-r--r--arch/powerpc/platforms/85xx/mpc8536_ds.c36
-rw-r--r--arch/powerpc/platforms/85xx/mpc85xx_ads.c11
-rw-r--r--arch/powerpc/platforms/85xx/mpc85xx_cds.c44
-rw-r--r--arch/powerpc/platforms/85xx/mpc85xx_ds.c15
-rw-r--r--arch/powerpc/platforms/85xx/mpc85xx_mds.c40
-rw-r--r--arch/powerpc/platforms/85xx/mpc85xx_rdb.c30
-rw-r--r--arch/powerpc/platforms/85xx/p1010rdb.c14
-rw-r--r--arch/powerpc/platforms/85xx/p1022_ds.c36
-rw-r--r--arch/powerpc/platforms/85xx/p1022_rdk.c167
-rw-r--r--arch/powerpc/platforms/85xx/p1023_rds.c9
-rw-r--r--arch/powerpc/platforms/85xx/p2041_rdb.c2
-rw-r--r--arch/powerpc/platforms/85xx/p3041_ds.c2
-rw-r--r--arch/powerpc/platforms/85xx/p4080_ds.c2
-rw-r--r--arch/powerpc/platforms/85xx/p5020_ds.c2
-rw-r--r--arch/powerpc/platforms/85xx/p5040_ds.c89
-rw-r--r--arch/powerpc/platforms/85xx/qemu_e500.c5
-rw-r--r--arch/powerpc/platforms/85xx/sbc8548.c21
-rw-r--r--arch/powerpc/platforms/85xx/smp.c220
-rw-r--r--arch/powerpc/platforms/85xx/socrates.c11
-rw-r--r--arch/powerpc/platforms/85xx/stx_gp3.c13
-rw-r--r--arch/powerpc/platforms/85xx/tqm85xx.c23
-rw-r--r--arch/powerpc/platforms/85xx/xes_mpc85xx.c56
-rw-r--r--arch/powerpc/platforms/86xx/gef_ppc9a.c12
-rw-r--r--arch/powerpc/platforms/86xx/gef_sbc310.c13
-rw-r--r--arch/powerpc/platforms/86xx/gef_sbc610.c12
-rw-r--r--arch/powerpc/platforms/86xx/mpc8610_hpcd.c21
-rw-r--r--arch/powerpc/platforms/86xx/mpc86xx_hpcn.c42
-rw-r--r--arch/powerpc/platforms/86xx/sbc8641d.c14
-rw-r--r--arch/powerpc/platforms/cell/beat.c4
-rw-r--r--arch/powerpc/platforms/cell/beat.h2
-rw-r--r--arch/powerpc/platforms/cell/beat_htab.c45
-rw-r--r--arch/powerpc/platforms/pasemi/iommu.c6
-rw-r--r--arch/powerpc/platforms/powernv/pci-ioda.c695
-rw-r--r--arch/powerpc/platforms/powernv/pci-p5ioc2.c1
-rw-r--r--arch/powerpc/platforms/powernv/pci.c7
-rw-r--r--arch/powerpc/platforms/powernv/pci.h21
-rw-r--r--arch/powerpc/platforms/ps3/htab.c22
-rw-r--r--arch/powerpc/platforms/ps3/setup.c10
-rw-r--r--arch/powerpc/platforms/pseries/Makefile5
-rw-r--r--arch/powerpc/platforms/pseries/eeh.c543
-rw-r--r--arch/powerpc/platforms/pseries/eeh_cache.c59
-rw-r--r--arch/powerpc/platforms/pseries/eeh_dev.c14
-rw-r--r--arch/powerpc/platforms/pseries/eeh_driver.c310
-rw-r--r--arch/powerpc/platforms/pseries/eeh_event.c59
-rw-r--r--arch/powerpc/platforms/pseries/eeh_pe.c652
-rw-r--r--arch/powerpc/platforms/pseries/eeh_pseries.c247
-rw-r--r--arch/powerpc/platforms/pseries/eeh_sysfs.c9
-rw-r--r--arch/powerpc/platforms/pseries/hotplug-memory.c12
-rw-r--r--arch/powerpc/platforms/pseries/iommu.c12
-rw-r--r--arch/powerpc/platforms/pseries/lpar.c77
-rw-r--r--arch/powerpc/platforms/pseries/msi.c26
-rw-r--r--arch/powerpc/platforms/pseries/pci.c2
-rw-r--r--arch/powerpc/platforms/pseries/pci_dlpar.c32
-rw-r--r--arch/powerpc/platforms/pseries/processor_idle.c62
-rw-r--r--arch/powerpc/platforms/pseries/setup.c22
71 files changed, 2230 insertions, 1890 deletions
diff --git a/arch/powerpc/platforms/40x/ppc40x_simple.c b/arch/powerpc/platforms/40x/ppc40x_simple.c
index 97612068fae..969dddcf332 100644
--- a/arch/powerpc/platforms/40x/ppc40x_simple.c
+++ b/arch/powerpc/platforms/40x/ppc40x_simple.c
@@ -50,7 +50,7 @@ machine_device_initcall(ppc40x_simple, ppc40x_device_probe);
50 * Again, if your board needs to do things differently then create a 50 * Again, if your board needs to do things differently then create a
51 * board.c file for it rather than adding it to this list. 51 * board.c file for it rather than adding it to this list.
52 */ 52 */
53static const char *board[] __initdata = { 53static const char * const board[] __initconst = {
54 "amcc,acadia", 54 "amcc,acadia",
55 "amcc,haleakala", 55 "amcc,haleakala",
56 "amcc,kilauea", 56 "amcc,kilauea",
diff --git a/arch/powerpc/platforms/44x/currituck.c b/arch/powerpc/platforms/44x/currituck.c
index 9f6c33d63a4..6bd89a0e0de 100644
--- a/arch/powerpc/platforms/44x/currituck.c
+++ b/arch/powerpc/platforms/44x/currituck.c
@@ -21,7 +21,6 @@
21 */ 21 */
22 22
23#include <linux/init.h> 23#include <linux/init.h>
24#include <linux/memblock.h>
25#include <linux/of.h> 24#include <linux/of.h>
26#include <linux/of_platform.h> 25#include <linux/of_platform.h>
27#include <linux/rtc.h> 26#include <linux/rtc.h>
@@ -159,13 +158,8 @@ static void __init ppc47x_setup_arch(void)
159 158
160 /* No need to check the DMA config as we /know/ our windows are all of 159 /* No need to check the DMA config as we /know/ our windows are all of
161 * RAM. Lets hope that doesn't change */ 160 * RAM. Lets hope that doesn't change */
162#ifdef CONFIG_SWIOTLB 161 swiotlb_detect_4g();
163 if ((memblock_end_of_DRAM() - 1) > 0xffffffff) { 162
164 ppc_swiotlb_enable = 1;
165 set_pci_dma_ops(&swiotlb_dma_ops);
166 ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_swiotlb;
167 }
168#endif
169 ppc47x_smp_init(); 163 ppc47x_smp_init();
170} 164}
171 165
diff --git a/arch/powerpc/platforms/512x/Kconfig b/arch/powerpc/platforms/512x/Kconfig
index c16999802ec..b62508b113d 100644
--- a/arch/powerpc/platforms/512x/Kconfig
+++ b/arch/powerpc/platforms/512x/Kconfig
@@ -2,6 +2,7 @@ config PPC_MPC512x
2 bool "512x-based boards" 2 bool "512x-based boards"
3 depends on 6xx 3 depends on 6xx
4 select FSL_SOC 4 select FSL_SOC
5 select FB_FSL_DIU
5 select IPIC 6 select IPIC
6 select PPC_CLOCK 7 select PPC_CLOCK
7 select PPC_PCI_CHOICE 8 select PPC_PCI_CHOICE
diff --git a/arch/powerpc/platforms/512x/clock.c b/arch/powerpc/platforms/512x/clock.c
index 1d8700ff60b..9f771e05457 100644
--- a/arch/powerpc/platforms/512x/clock.c
+++ b/arch/powerpc/platforms/512x/clock.c
@@ -54,14 +54,16 @@ static DEFINE_MUTEX(clocks_mutex);
54static struct clk *mpc5121_clk_get(struct device *dev, const char *id) 54static struct clk *mpc5121_clk_get(struct device *dev, const char *id)
55{ 55{
56 struct clk *p, *clk = ERR_PTR(-ENOENT); 56 struct clk *p, *clk = ERR_PTR(-ENOENT);
57 int dev_match = 0; 57 int dev_match;
58 int id_match = 0; 58 int id_match;
59 59
60 if (dev == NULL || id == NULL) 60 if (dev == NULL || id == NULL)
61 return clk; 61 return clk;
62 62
63 mutex_lock(&clocks_mutex); 63 mutex_lock(&clocks_mutex);
64 list_for_each_entry(p, &clocks, node) { 64 list_for_each_entry(p, &clocks, node) {
65 dev_match = id_match = 0;
66
65 if (dev == p->dev) 67 if (dev == p->dev)
66 dev_match++; 68 dev_match++;
67 if (strcmp(id, p->name) == 0) 69 if (strcmp(id, p->name) == 0)
diff --git a/arch/powerpc/platforms/512x/mpc5121_generic.c b/arch/powerpc/platforms/512x/mpc5121_generic.c
index 926731f1ff0..ca1ca666999 100644
--- a/arch/powerpc/platforms/512x/mpc5121_generic.c
+++ b/arch/powerpc/platforms/512x/mpc5121_generic.c
@@ -26,7 +26,7 @@
26/* 26/*
27 * list of supported boards 27 * list of supported boards
28 */ 28 */
29static const char *board[] __initdata = { 29static const char * const board[] __initconst = {
30 "prt,prtlvt", 30 "prt,prtlvt",
31 NULL 31 NULL
32}; 32};
diff --git a/arch/powerpc/platforms/512x/mpc512x_shared.c b/arch/powerpc/platforms/512x/mpc512x_shared.c
index cfe958e94e1..1650e090ef3 100644
--- a/arch/powerpc/platforms/512x/mpc512x_shared.c
+++ b/arch/powerpc/platforms/512x/mpc512x_shared.c
@@ -191,8 +191,6 @@ mpc512x_valid_monitor_port(enum fsl_diu_monitor_port port)
191 191
192static struct fsl_diu_shared_fb __attribute__ ((__aligned__(8))) diu_shared_fb; 192static struct fsl_diu_shared_fb __attribute__ ((__aligned__(8))) diu_shared_fb;
193 193
194#if defined(CONFIG_FB_FSL_DIU) || \
195 defined(CONFIG_FB_FSL_DIU_MODULE)
196static inline void mpc512x_free_bootmem(struct page *page) 194static inline void mpc512x_free_bootmem(struct page *page)
197{ 195{
198 __ClearPageReserved(page); 196 __ClearPageReserved(page);
@@ -220,7 +218,6 @@ void mpc512x_release_bootmem(void)
220 } 218 }
221 diu_ops.release_bootmem = NULL; 219 diu_ops.release_bootmem = NULL;
222} 220}
223#endif
224 221
225/* 222/*
226 * Check if DIU was pre-initialized. If so, perform steps 223 * Check if DIU was pre-initialized. If so, perform steps
@@ -323,15 +320,12 @@ void __init mpc512x_setup_diu(void)
323 } 320 }
324 } 321 }
325 322
326#if defined(CONFIG_FB_FSL_DIU) || \
327 defined(CONFIG_FB_FSL_DIU_MODULE)
328 diu_ops.get_pixel_format = mpc512x_get_pixel_format; 323 diu_ops.get_pixel_format = mpc512x_get_pixel_format;
329 diu_ops.set_gamma_table = mpc512x_set_gamma_table; 324 diu_ops.set_gamma_table = mpc512x_set_gamma_table;
330 diu_ops.set_monitor_port = mpc512x_set_monitor_port; 325 diu_ops.set_monitor_port = mpc512x_set_monitor_port;
331 diu_ops.set_pixel_clock = mpc512x_set_pixel_clock; 326 diu_ops.set_pixel_clock = mpc512x_set_pixel_clock;
332 diu_ops.valid_monitor_port = mpc512x_valid_monitor_port; 327 diu_ops.valid_monitor_port = mpc512x_valid_monitor_port;
333 diu_ops.release_bootmem = mpc512x_release_bootmem; 328 diu_ops.release_bootmem = mpc512x_release_bootmem;
334#endif
335} 329}
336 330
337void __init mpc512x_init_IRQ(void) 331void __init mpc512x_init_IRQ(void)
diff --git a/arch/powerpc/platforms/52xx/lite5200.c b/arch/powerpc/platforms/52xx/lite5200.c
index 01ffa64d2aa..448d862bcf3 100644
--- a/arch/powerpc/platforms/52xx/lite5200.c
+++ b/arch/powerpc/platforms/52xx/lite5200.c
@@ -172,7 +172,7 @@ static void __init lite5200_setup_arch(void)
172 mpc52xx_setup_pci(); 172 mpc52xx_setup_pci();
173} 173}
174 174
175static const char *board[] __initdata = { 175static const char * const board[] __initconst = {
176 "fsl,lite5200", 176 "fsl,lite5200",
177 "fsl,lite5200b", 177 "fsl,lite5200b",
178 NULL, 178 NULL,
diff --git a/arch/powerpc/platforms/52xx/media5200.c b/arch/powerpc/platforms/52xx/media5200.c
index 17d91b7da31..070d315dd6c 100644
--- a/arch/powerpc/platforms/52xx/media5200.c
+++ b/arch/powerpc/platforms/52xx/media5200.c
@@ -232,7 +232,7 @@ static void __init media5200_setup_arch(void)
232} 232}
233 233
234/* list of the supported boards */ 234/* list of the supported boards */
235static const char *board[] __initdata = { 235static const char * const board[] __initconst = {
236 "fsl,media5200", 236 "fsl,media5200",
237 NULL 237 NULL
238}; 238};
diff --git a/arch/powerpc/platforms/52xx/mpc5200_simple.c b/arch/powerpc/platforms/52xx/mpc5200_simple.c
index c0aa04068d6..9cf36020cf0 100644
--- a/arch/powerpc/platforms/52xx/mpc5200_simple.c
+++ b/arch/powerpc/platforms/52xx/mpc5200_simple.c
@@ -52,6 +52,7 @@ static void __init mpc5200_simple_setup_arch(void)
52static const char *board[] __initdata = { 52static const char *board[] __initdata = {
53 "anonymous,a4m072", 53 "anonymous,a4m072",
54 "anon,charon", 54 "anon,charon",
55 "ifm,o2d",
55 "intercontrol,digsy-mtc", 56 "intercontrol,digsy-mtc",
56 "manroland,mucmc52", 57 "manroland,mucmc52",
57 "manroland,uc101", 58 "manroland,uc101",
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c b/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c
index d61fb1c0c1a..2351f9e0fb6 100644
--- a/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c
+++ b/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c
@@ -170,7 +170,8 @@ static void mpc52xx_lpbfifo_kick(struct mpc52xx_lpbfifo_request *req)
170 out_be32(lpbfifo.regs + LPBFIFO_REG_CONTROL, bit_fields); 170 out_be32(lpbfifo.regs + LPBFIFO_REG_CONTROL, bit_fields);
171 171
172 /* Kick it off */ 172 /* Kick it off */
173 out_8(lpbfifo.regs + LPBFIFO_REG_PACKET_SIZE, 0x01); 173 if (!lpbfifo.req->defer_xfer_start)
174 out_8(lpbfifo.regs + LPBFIFO_REG_PACKET_SIZE, 0x01);
174 if (dma) 175 if (dma)
175 bcom_enable(lpbfifo.bcom_cur_task); 176 bcom_enable(lpbfifo.bcom_cur_task);
176} 177}
@@ -421,6 +422,38 @@ int mpc52xx_lpbfifo_submit(struct mpc52xx_lpbfifo_request *req)
421} 422}
422EXPORT_SYMBOL(mpc52xx_lpbfifo_submit); 423EXPORT_SYMBOL(mpc52xx_lpbfifo_submit);
423 424
425int mpc52xx_lpbfifo_start_xfer(struct mpc52xx_lpbfifo_request *req)
426{
427 unsigned long flags;
428
429 if (!lpbfifo.regs)
430 return -ENODEV;
431
432 spin_lock_irqsave(&lpbfifo.lock, flags);
433
434 /*
435 * If the req pointer is already set and a transfer was
436 * started on submit, then this transfer is in progress
437 */
438 if (lpbfifo.req && !lpbfifo.req->defer_xfer_start) {
439 spin_unlock_irqrestore(&lpbfifo.lock, flags);
440 return -EBUSY;
441 }
442
443 /*
444 * If the req was previously submitted but not
445 * started, start it now
446 */
447 if (lpbfifo.req && lpbfifo.req == req &&
448 lpbfifo.req->defer_xfer_start) {
449 out_8(lpbfifo.regs + LPBFIFO_REG_PACKET_SIZE, 0x01);
450 }
451
452 spin_unlock_irqrestore(&lpbfifo.lock, flags);
453 return 0;
454}
455EXPORT_SYMBOL(mpc52xx_lpbfifo_start_xfer);
456
424void mpc52xx_lpbfifo_abort(struct mpc52xx_lpbfifo_request *req) 457void mpc52xx_lpbfifo_abort(struct mpc52xx_lpbfifo_request *req)
425{ 458{
426 unsigned long flags; 459 unsigned long flags;
diff --git a/arch/powerpc/platforms/83xx/mpc837x_rdb.c b/arch/powerpc/platforms/83xx/mpc837x_rdb.c
index 16c9c9cbbb7..eca1f0960ff 100644
--- a/arch/powerpc/platforms/83xx/mpc837x_rdb.c
+++ b/arch/powerpc/platforms/83xx/mpc837x_rdb.c
@@ -60,7 +60,7 @@ static void __init mpc837x_rdb_setup_arch(void)
60 60
61machine_device_initcall(mpc837x_rdb, mpc83xx_declare_of_platform_devices); 61machine_device_initcall(mpc837x_rdb, mpc83xx_declare_of_platform_devices);
62 62
63static const char *board[] __initdata = { 63static const char * const board[] __initconst = {
64 "fsl,mpc8377rdb", 64 "fsl,mpc8377rdb",
65 "fsl,mpc8378rdb", 65 "fsl,mpc8378rdb",
66 "fsl,mpc8379rdb", 66 "fsl,mpc8379rdb",
diff --git a/arch/powerpc/platforms/85xx/Kconfig b/arch/powerpc/platforms/85xx/Kconfig
index 159c01e9146..02d02a09942 100644
--- a/arch/powerpc/platforms/85xx/Kconfig
+++ b/arch/powerpc/platforms/85xx/Kconfig
@@ -104,6 +104,13 @@ config P1022_DS
104 help 104 help
105 This option enables support for the Freescale P1022DS reference board. 105 This option enables support for the Freescale P1022DS reference board.
106 106
107config P1022_RDK
108 bool "Freescale / iVeia P1022 RDK"
109 select DEFAULT_UIMAGE
110 help
111 This option enables support for the Freescale / iVeia P1022RDK
112 reference board.
113
107config P1023_RDS 114config P1023_RDS
108 bool "Freescale P1023 RDS" 115 bool "Freescale P1023 RDS"
109 select DEFAULT_UIMAGE 116 select DEFAULT_UIMAGE
@@ -254,6 +261,20 @@ config P5020_DS
254 help 261 help
255 This option enables support for the P5020 DS board 262 This option enables support for the P5020 DS board
256 263
264config P5040_DS
265 bool "Freescale P5040 DS"
266 select DEFAULT_UIMAGE
267 select E500
268 select PPC_E500MC
269 select PHYS_64BIT
270 select SWIOTLB
271 select ARCH_REQUIRE_GPIOLIB
272 select GPIO_MPC8XXX
273 select HAS_RAPIDIO
274 select PPC_EPAPR_HV_PIC
275 help
276 This option enables support for the P5040 DS board
277
257config PPC_QEMU_E500 278config PPC_QEMU_E500
258 bool "QEMU generic e500 platform" 279 bool "QEMU generic e500 platform"
259 depends on EXPERIMENTAL 280 depends on EXPERIMENTAL
diff --git a/arch/powerpc/platforms/85xx/Makefile b/arch/powerpc/platforms/85xx/Makefile
index 3dfe8117503..76f679cb04a 100644
--- a/arch/powerpc/platforms/85xx/Makefile
+++ b/arch/powerpc/platforms/85xx/Makefile
@@ -15,11 +15,13 @@ obj-$(CONFIG_MPC85xx_MDS) += mpc85xx_mds.o
15obj-$(CONFIG_MPC85xx_RDB) += mpc85xx_rdb.o 15obj-$(CONFIG_MPC85xx_RDB) += mpc85xx_rdb.o
16obj-$(CONFIG_P1010_RDB) += p1010rdb.o 16obj-$(CONFIG_P1010_RDB) += p1010rdb.o
17obj-$(CONFIG_P1022_DS) += p1022_ds.o 17obj-$(CONFIG_P1022_DS) += p1022_ds.o
18obj-$(CONFIG_P1022_RDK) += p1022_rdk.o
18obj-$(CONFIG_P1023_RDS) += p1023_rds.o 19obj-$(CONFIG_P1023_RDS) += p1023_rds.o
19obj-$(CONFIG_P2041_RDB) += p2041_rdb.o corenet_ds.o 20obj-$(CONFIG_P2041_RDB) += p2041_rdb.o corenet_ds.o
20obj-$(CONFIG_P3041_DS) += p3041_ds.o corenet_ds.o 21obj-$(CONFIG_P3041_DS) += p3041_ds.o corenet_ds.o
21obj-$(CONFIG_P4080_DS) += p4080_ds.o corenet_ds.o 22obj-$(CONFIG_P4080_DS) += p4080_ds.o corenet_ds.o
22obj-$(CONFIG_P5020_DS) += p5020_ds.o corenet_ds.o 23obj-$(CONFIG_P5020_DS) += p5020_ds.o corenet_ds.o
24obj-$(CONFIG_P5040_DS) += p5040_ds.o corenet_ds.o
23obj-$(CONFIG_STX_GP3) += stx_gp3.o 25obj-$(CONFIG_STX_GP3) += stx_gp3.o
24obj-$(CONFIG_TQM85xx) += tqm85xx.o 26obj-$(CONFIG_TQM85xx) += tqm85xx.o
25obj-$(CONFIG_SBC8548) += sbc8548.o 27obj-$(CONFIG_SBC8548) += sbc8548.o
diff --git a/arch/powerpc/platforms/85xx/common.c b/arch/powerpc/platforms/85xx/common.c
index 67dac22b436..d0861a0d836 100644
--- a/arch/powerpc/platforms/85xx/common.c
+++ b/arch/powerpc/platforms/85xx/common.c
@@ -27,6 +27,16 @@ static struct of_device_id __initdata mpc85xx_common_ids[] = {
27 { .compatible = "fsl,mpc8548-guts", }, 27 { .compatible = "fsl,mpc8548-guts", },
28 /* Probably unnecessary? */ 28 /* Probably unnecessary? */
29 { .compatible = "gpio-leds", }, 29 { .compatible = "gpio-leds", },
30 /* For all PCI controllers */
31 { .compatible = "fsl,mpc8540-pci", },
32 { .compatible = "fsl,mpc8548-pcie", },
33 { .compatible = "fsl,p1022-pcie", },
34 { .compatible = "fsl,p1010-pcie", },
35 { .compatible = "fsl,p1023-pcie", },
36 { .compatible = "fsl,p4080-pcie", },
37 { .compatible = "fsl,qoriq-pcie-v2.4", },
38 { .compatible = "fsl,qoriq-pcie-v2.3", },
39 { .compatible = "fsl,qoriq-pcie-v2.2", },
30 {}, 40 {},
31}; 41};
32 42
diff --git a/arch/powerpc/platforms/85xx/corenet_ds.c b/arch/powerpc/platforms/85xx/corenet_ds.c
index 925b0287423..ed69c925071 100644
--- a/arch/powerpc/platforms/85xx/corenet_ds.c
+++ b/arch/powerpc/platforms/85xx/corenet_ds.c
@@ -16,7 +16,6 @@
16#include <linux/kdev_t.h> 16#include <linux/kdev_t.h>
17#include <linux/delay.h> 17#include <linux/delay.h>
18#include <linux/interrupt.h> 18#include <linux/interrupt.h>
19#include <linux/memblock.h>
20 19
21#include <asm/time.h> 20#include <asm/time.h>
22#include <asm/machdep.h> 21#include <asm/machdep.h>
@@ -52,37 +51,16 @@ void __init corenet_ds_pic_init(void)
52 */ 51 */
53void __init corenet_ds_setup_arch(void) 52void __init corenet_ds_setup_arch(void)
54{ 53{
55#ifdef CONFIG_PCI
56 struct device_node *np;
57 struct pci_controller *hose;
58#endif
59 dma_addr_t max = 0xffffffff;
60
61 mpc85xx_smp_init(); 54 mpc85xx_smp_init();
62 55
63#ifdef CONFIG_PCI 56#if defined(CONFIG_PCI) && defined(CONFIG_PPC64)
64 for_each_node_by_type(np, "pci") {
65 if (of_device_is_compatible(np, "fsl,p4080-pcie") ||
66 of_device_is_compatible(np, "fsl,qoriq-pcie-v2.2")) {
67 fsl_add_bridge(np, 0);
68 hose = pci_find_hose_for_OF_device(np);
69 max = min(max, hose->dma_window_base_cur +
70 hose->dma_window_size);
71 }
72 }
73
74#ifdef CONFIG_PPC64
75 pci_devs_phb_init(); 57 pci_devs_phb_init();
76#endif 58#endif
77#endif
78 59
79#ifdef CONFIG_SWIOTLB 60 fsl_pci_assign_primary();
80 if ((memblock_end_of_DRAM() - 1) > max) { 61
81 ppc_swiotlb_enable = 1; 62 swiotlb_detect_4g();
82 set_pci_dma_ops(&swiotlb_dma_ops); 63
83 ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_swiotlb;
84 }
85#endif
86 pr_info("%s board from Freescale Semiconductor\n", ppc_md.name); 64 pr_info("%s board from Freescale Semiconductor\n", ppc_md.name);
87} 65}
88 66
@@ -99,6 +77,12 @@ static const struct of_device_id of_device_ids[] __devinitconst = {
99 { 77 {
100 .compatible = "fsl,qoriq-pcie-v2.2", 78 .compatible = "fsl,qoriq-pcie-v2.2",
101 }, 79 },
80 {
81 .compatible = "fsl,qoriq-pcie-v2.3",
82 },
83 {
84 .compatible = "fsl,qoriq-pcie-v2.4",
85 },
102 /* The following two are for the Freescale hypervisor */ 86 /* The following two are for the Freescale hypervisor */
103 { 87 {
104 .name = "hypervisor", 88 .name = "hypervisor",
diff --git a/arch/powerpc/platforms/85xx/ge_imp3a.c b/arch/powerpc/platforms/85xx/ge_imp3a.c
index b6a728b0a8c..e6285ae6f42 100644
--- a/arch/powerpc/platforms/85xx/ge_imp3a.c
+++ b/arch/powerpc/platforms/85xx/ge_imp3a.c
@@ -22,7 +22,6 @@
22#include <linux/seq_file.h> 22#include <linux/seq_file.h>
23#include <linux/interrupt.h> 23#include <linux/interrupt.h>
24#include <linux/of_platform.h> 24#include <linux/of_platform.h>
25#include <linux/memblock.h>
26 25
27#include <asm/time.h> 26#include <asm/time.h>
28#include <asm/machdep.h> 27#include <asm/machdep.h>
@@ -84,53 +83,39 @@ void __init ge_imp3a_pic_init(void)
84 of_node_put(cascade_node); 83 of_node_put(cascade_node);
85} 84}
86 85
87#ifdef CONFIG_PCI 86static void ge_imp3a_pci_assign_primary(void)
88static int primary_phb_addr;
89#endif /* CONFIG_PCI */
90
91/*
92 * Setup the architecture
93 */
94static void __init ge_imp3a_setup_arch(void)
95{ 87{
96 struct device_node *regs;
97#ifdef CONFIG_PCI 88#ifdef CONFIG_PCI
98 struct device_node *np; 89 struct device_node *np;
99 struct pci_controller *hose; 90 struct resource rsrc;
100#endif
101 dma_addr_t max = 0xffffffff;
102 91
103 if (ppc_md.progress)
104 ppc_md.progress("ge_imp3a_setup_arch()", 0);
105
106#ifdef CONFIG_PCI
107 for_each_node_by_type(np, "pci") { 92 for_each_node_by_type(np, "pci") {
108 if (of_device_is_compatible(np, "fsl,mpc8540-pci") || 93 if (of_device_is_compatible(np, "fsl,mpc8540-pci") ||
109 of_device_is_compatible(np, "fsl,mpc8548-pcie") || 94 of_device_is_compatible(np, "fsl,mpc8548-pcie") ||
110 of_device_is_compatible(np, "fsl,p2020-pcie")) { 95 of_device_is_compatible(np, "fsl,p2020-pcie")) {
111 struct resource rsrc;
112 of_address_to_resource(np, 0, &rsrc); 96 of_address_to_resource(np, 0, &rsrc);
113 if ((rsrc.start & 0xfffff) == primary_phb_addr) 97 if ((rsrc.start & 0xfffff) == 0x9000)
114 fsl_add_bridge(np, 1); 98 fsl_pci_primary = np;
115 else
116 fsl_add_bridge(np, 0);
117
118 hose = pci_find_hose_for_OF_device(np);
119 max = min(max, hose->dma_window_base_cur +
120 hose->dma_window_size);
121 } 99 }
122 } 100 }
123#endif 101#endif
102}
103
104/*
105 * Setup the architecture
106 */
107static void __init ge_imp3a_setup_arch(void)
108{
109 struct device_node *regs;
110
111 if (ppc_md.progress)
112 ppc_md.progress("ge_imp3a_setup_arch()", 0);
124 113
125 mpc85xx_smp_init(); 114 mpc85xx_smp_init();
126 115
127#ifdef CONFIG_SWIOTLB 116 ge_imp3a_pci_assign_primary();
128 if ((memblock_end_of_DRAM() - 1) > max) { 117
129 ppc_swiotlb_enable = 1; 118 swiotlb_detect_4g();
130 set_pci_dma_ops(&swiotlb_dma_ops);
131 ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_swiotlb;
132 }
133#endif
134 119
135 /* Remap basic board registers */ 120 /* Remap basic board registers */
136 regs = of_find_compatible_node(NULL, NULL, "ge,imp3a-fpga-regs"); 121 regs = of_find_compatible_node(NULL, NULL, "ge,imp3a-fpga-regs");
@@ -215,17 +200,10 @@ static int __init ge_imp3a_probe(void)
215{ 200{
216 unsigned long root = of_get_flat_dt_root(); 201 unsigned long root = of_get_flat_dt_root();
217 202
218 if (of_flat_dt_is_compatible(root, "ge,IMP3A")) { 203 return of_flat_dt_is_compatible(root, "ge,IMP3A");
219#ifdef CONFIG_PCI
220 primary_phb_addr = 0x9000;
221#endif
222 return 1;
223 }
224
225 return 0;
226} 204}
227 205
228machine_device_initcall(ge_imp3a, mpc85xx_common_publish_devices); 206machine_arch_initcall(ge_imp3a, mpc85xx_common_publish_devices);
229 207
230machine_arch_initcall(ge_imp3a, swiotlb_setup_bus_notifier); 208machine_arch_initcall(ge_imp3a, swiotlb_setup_bus_notifier);
231 209
diff --git a/arch/powerpc/platforms/85xx/mpc8536_ds.c b/arch/powerpc/platforms/85xx/mpc8536_ds.c
index 767c7cf18a9..15ce4b55f11 100644
--- a/arch/powerpc/platforms/85xx/mpc8536_ds.c
+++ b/arch/powerpc/platforms/85xx/mpc8536_ds.c
@@ -17,7 +17,6 @@
17#include <linux/seq_file.h> 17#include <linux/seq_file.h>
18#include <linux/interrupt.h> 18#include <linux/interrupt.h>
19#include <linux/of_platform.h> 19#include <linux/of_platform.h>
20#include <linux/memblock.h>
21 20
22#include <asm/time.h> 21#include <asm/time.h>
23#include <asm/machdep.h> 22#include <asm/machdep.h>
@@ -46,46 +45,17 @@ void __init mpc8536_ds_pic_init(void)
46 */ 45 */
47static void __init mpc8536_ds_setup_arch(void) 46static void __init mpc8536_ds_setup_arch(void)
48{ 47{
49#ifdef CONFIG_PCI
50 struct device_node *np;
51 struct pci_controller *hose;
52#endif
53 dma_addr_t max = 0xffffffff;
54
55 if (ppc_md.progress) 48 if (ppc_md.progress)
56 ppc_md.progress("mpc8536_ds_setup_arch()", 0); 49 ppc_md.progress("mpc8536_ds_setup_arch()", 0);
57 50
58#ifdef CONFIG_PCI 51 fsl_pci_assign_primary();
59 for_each_node_by_type(np, "pci") {
60 if (of_device_is_compatible(np, "fsl,mpc8540-pci") ||
61 of_device_is_compatible(np, "fsl,mpc8548-pcie")) {
62 struct resource rsrc;
63 of_address_to_resource(np, 0, &rsrc);
64 if ((rsrc.start & 0xfffff) == 0x8000)
65 fsl_add_bridge(np, 1);
66 else
67 fsl_add_bridge(np, 0);
68
69 hose = pci_find_hose_for_OF_device(np);
70 max = min(max, hose->dma_window_base_cur +
71 hose->dma_window_size);
72 }
73 }
74
75#endif
76 52
77#ifdef CONFIG_SWIOTLB 53 swiotlb_detect_4g();
78 if ((memblock_end_of_DRAM() - 1) > max) {
79 ppc_swiotlb_enable = 1;
80 set_pci_dma_ops(&swiotlb_dma_ops);
81 ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_swiotlb;
82 }
83#endif
84 54
85 printk("MPC8536 DS board from Freescale Semiconductor\n"); 55 printk("MPC8536 DS board from Freescale Semiconductor\n");
86} 56}
87 57
88machine_device_initcall(mpc8536_ds, mpc85xx_common_publish_devices); 58machine_arch_initcall(mpc8536_ds, mpc85xx_common_publish_devices);
89 59
90machine_arch_initcall(mpc8536_ds, swiotlb_setup_bus_notifier); 60machine_arch_initcall(mpc8536_ds, swiotlb_setup_bus_notifier);
91 61
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_ads.c b/arch/powerpc/platforms/85xx/mpc85xx_ads.c
index 29ee8fcd75a..7d12a19aa7e 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_ads.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_ads.c
@@ -137,10 +137,6 @@ static void __init init_ioports(void)
137 137
138static void __init mpc85xx_ads_setup_arch(void) 138static void __init mpc85xx_ads_setup_arch(void)
139{ 139{
140#ifdef CONFIG_PCI
141 struct device_node *np;
142#endif
143
144 if (ppc_md.progress) 140 if (ppc_md.progress)
145 ppc_md.progress("mpc85xx_ads_setup_arch()", 0); 141 ppc_md.progress("mpc85xx_ads_setup_arch()", 0);
146 142
@@ -150,11 +146,10 @@ static void __init mpc85xx_ads_setup_arch(void)
150#endif 146#endif
151 147
152#ifdef CONFIG_PCI 148#ifdef CONFIG_PCI
153 for_each_compatible_node(np, "pci", "fsl,mpc8540-pci")
154 fsl_add_bridge(np, 1);
155
156 ppc_md.pci_exclude_device = mpc85xx_exclude_device; 149 ppc_md.pci_exclude_device = mpc85xx_exclude_device;
157#endif 150#endif
151
152 fsl_pci_assign_primary();
158} 153}
159 154
160static void mpc85xx_ads_show_cpuinfo(struct seq_file *m) 155static void mpc85xx_ads_show_cpuinfo(struct seq_file *m)
@@ -173,7 +168,7 @@ static void mpc85xx_ads_show_cpuinfo(struct seq_file *m)
173 seq_printf(m, "PLL setting\t: 0x%x\n", ((phid1 >> 24) & 0x3f)); 168 seq_printf(m, "PLL setting\t: 0x%x\n", ((phid1 >> 24) & 0x3f));
174} 169}
175 170
176machine_device_initcall(mpc85xx_ads, mpc85xx_common_publish_devices); 171machine_arch_initcall(mpc85xx_ads, mpc85xx_common_publish_devices);
177 172
178/* 173/*
179 * Called very early, device-tree isn't unflattened 174 * Called very early, device-tree isn't unflattened
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_cds.c b/arch/powerpc/platforms/85xx/mpc85xx_cds.c
index 11156fb53d8..c474505ad0d 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_cds.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_cds.c
@@ -276,6 +276,33 @@ machine_device_initcall(mpc85xx_cds, mpc85xx_cds_8259_attach);
276 276
277#endif /* CONFIG_PPC_I8259 */ 277#endif /* CONFIG_PPC_I8259 */
278 278
279static void mpc85xx_cds_pci_assign_primary(void)
280{
281#ifdef CONFIG_PCI
282 struct device_node *np;
283
284 if (fsl_pci_primary)
285 return;
286
287 /*
288 * MPC85xx_CDS has ISA bridge but unfortunately there is no
289 * isa node in device tree. We now looking for i8259 node as
290 * a workaround for such a broken device tree. This routine
291 * is for complying to all device trees.
292 */
293 np = of_find_node_by_name(NULL, "i8259");
294 while ((fsl_pci_primary = of_get_parent(np))) {
295 of_node_put(np);
296 np = fsl_pci_primary;
297
298 if ((of_device_is_compatible(np, "fsl,mpc8540-pci") ||
299 of_device_is_compatible(np, "fsl,mpc8548-pcie")) &&
300 of_device_is_available(np))
301 return;
302 }
303#endif
304}
305
279/* 306/*
280 * Setup the architecture 307 * Setup the architecture
281 */ 308 */
@@ -309,21 +336,12 @@ static void __init mpc85xx_cds_setup_arch(void)
309 } 336 }
310 337
311#ifdef CONFIG_PCI 338#ifdef CONFIG_PCI
312 for_each_node_by_type(np, "pci") {
313 if (of_device_is_compatible(np, "fsl,mpc8540-pci") ||
314 of_device_is_compatible(np, "fsl,mpc8548-pcie")) {
315 struct resource rsrc;
316 of_address_to_resource(np, 0, &rsrc);
317 if ((rsrc.start & 0xfffff) == 0x8000)
318 fsl_add_bridge(np, 1);
319 else
320 fsl_add_bridge(np, 0);
321 }
322 }
323
324 ppc_md.pci_irq_fixup = mpc85xx_cds_pci_irq_fixup; 339 ppc_md.pci_irq_fixup = mpc85xx_cds_pci_irq_fixup;
325 ppc_md.pci_exclude_device = mpc85xx_exclude_device; 340 ppc_md.pci_exclude_device = mpc85xx_exclude_device;
326#endif 341#endif
342
343 mpc85xx_cds_pci_assign_primary();
344 fsl_pci_assign_primary();
327} 345}
328 346
329static void mpc85xx_cds_show_cpuinfo(struct seq_file *m) 347static void mpc85xx_cds_show_cpuinfo(struct seq_file *m)
@@ -355,7 +373,7 @@ static int __init mpc85xx_cds_probe(void)
355 return of_flat_dt_is_compatible(root, "MPC85xxCDS"); 373 return of_flat_dt_is_compatible(root, "MPC85xxCDS");
356} 374}
357 375
358machine_device_initcall(mpc85xx_cds, mpc85xx_common_publish_devices); 376machine_arch_initcall(mpc85xx_cds, mpc85xx_common_publish_devices);
359 377
360define_machine(mpc85xx_cds) { 378define_machine(mpc85xx_cds) {
361 .name = "MPC85xx CDS", 379 .name = "MPC85xx CDS",
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_ds.c b/arch/powerpc/platforms/85xx/mpc85xx_ds.c
index 6d3265fe771..9ebb91ed96a 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_ds.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_ds.c
@@ -20,7 +20,6 @@
20#include <linux/seq_file.h> 20#include <linux/seq_file.h>
21#include <linux/interrupt.h> 21#include <linux/interrupt.h>
22#include <linux/of_platform.h> 22#include <linux/of_platform.h>
23#include <linux/memblock.h>
24 23
25#include <asm/time.h> 24#include <asm/time.h>
26#include <asm/machdep.h> 25#include <asm/machdep.h>
@@ -129,13 +128,11 @@ static int mpc85xx_exclude_device(struct pci_controller *hose,
129} 128}
130#endif /* CONFIG_PCI */ 129#endif /* CONFIG_PCI */
131 130
132static void __init mpc85xx_ds_pci_init(void) 131static void __init mpc85xx_ds_uli_init(void)
133{ 132{
134#ifdef CONFIG_PCI 133#ifdef CONFIG_PCI
135 struct device_node *node; 134 struct device_node *node;
136 135
137 fsl_pci_init();
138
139 /* See if we have a ULI under the primary */ 136 /* See if we have a ULI under the primary */
140 137
141 node = of_find_node_by_name(NULL, "uli1575"); 138 node = of_find_node_by_name(NULL, "uli1575");
@@ -159,7 +156,9 @@ static void __init mpc85xx_ds_setup_arch(void)
159 if (ppc_md.progress) 156 if (ppc_md.progress)
160 ppc_md.progress("mpc85xx_ds_setup_arch()", 0); 157 ppc_md.progress("mpc85xx_ds_setup_arch()", 0);
161 158
162 mpc85xx_ds_pci_init(); 159 swiotlb_detect_4g();
160 fsl_pci_assign_primary();
161 mpc85xx_ds_uli_init();
163 mpc85xx_smp_init(); 162 mpc85xx_smp_init();
164 163
165 printk("MPC85xx DS board from Freescale Semiconductor\n"); 164 printk("MPC85xx DS board from Freescale Semiconductor\n");
@@ -175,9 +174,9 @@ static int __init mpc8544_ds_probe(void)
175 return !!of_flat_dt_is_compatible(root, "MPC8544DS"); 174 return !!of_flat_dt_is_compatible(root, "MPC8544DS");
176} 175}
177 176
178machine_device_initcall(mpc8544_ds, mpc85xx_common_publish_devices); 177machine_arch_initcall(mpc8544_ds, mpc85xx_common_publish_devices);
179machine_device_initcall(mpc8572_ds, mpc85xx_common_publish_devices); 178machine_arch_initcall(mpc8572_ds, mpc85xx_common_publish_devices);
180machine_device_initcall(p2020_ds, mpc85xx_common_publish_devices); 179machine_arch_initcall(p2020_ds, mpc85xx_common_publish_devices);
181 180
182machine_arch_initcall(mpc8544_ds, swiotlb_setup_bus_notifier); 181machine_arch_initcall(mpc8544_ds, swiotlb_setup_bus_notifier);
183machine_arch_initcall(mpc8572_ds, swiotlb_setup_bus_notifier); 182machine_arch_initcall(mpc8572_ds, swiotlb_setup_bus_notifier);
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_mds.c b/arch/powerpc/platforms/85xx/mpc85xx_mds.c
index 8e4b094c553..8498f732347 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_mds.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_mds.c
@@ -327,44 +327,16 @@ static void __init mpc85xx_mds_qeic_init(void) { }
327 327
328static void __init mpc85xx_mds_setup_arch(void) 328static void __init mpc85xx_mds_setup_arch(void)
329{ 329{
330#ifdef CONFIG_PCI
331 struct pci_controller *hose;
332 struct device_node *np;
333#endif
334 dma_addr_t max = 0xffffffff;
335
336 if (ppc_md.progress) 330 if (ppc_md.progress)
337 ppc_md.progress("mpc85xx_mds_setup_arch()", 0); 331 ppc_md.progress("mpc85xx_mds_setup_arch()", 0);
338 332
339#ifdef CONFIG_PCI
340 for_each_node_by_type(np, "pci") {
341 if (of_device_is_compatible(np, "fsl,mpc8540-pci") ||
342 of_device_is_compatible(np, "fsl,mpc8548-pcie")) {
343 struct resource rsrc;
344 of_address_to_resource(np, 0, &rsrc);
345 if ((rsrc.start & 0xfffff) == 0x8000)
346 fsl_add_bridge(np, 1);
347 else
348 fsl_add_bridge(np, 0);
349
350 hose = pci_find_hose_for_OF_device(np);
351 max = min(max, hose->dma_window_base_cur +
352 hose->dma_window_size);
353 }
354 }
355#endif
356
357 mpc85xx_smp_init(); 333 mpc85xx_smp_init();
358 334
359 mpc85xx_mds_qe_init(); 335 mpc85xx_mds_qe_init();
360 336
361#ifdef CONFIG_SWIOTLB 337 fsl_pci_assign_primary();
362 if ((memblock_end_of_DRAM() - 1) > max) { 338
363 ppc_swiotlb_enable = 1; 339 swiotlb_detect_4g();
364 set_pci_dma_ops(&swiotlb_dma_ops);
365 ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_swiotlb;
366 }
367#endif
368} 340}
369 341
370 342
@@ -409,9 +381,9 @@ static int __init mpc85xx_publish_devices(void)
409 return mpc85xx_common_publish_devices(); 381 return mpc85xx_common_publish_devices();
410} 382}
411 383
412machine_device_initcall(mpc8568_mds, mpc85xx_publish_devices); 384machine_arch_initcall(mpc8568_mds, mpc85xx_publish_devices);
413machine_device_initcall(mpc8569_mds, mpc85xx_publish_devices); 385machine_arch_initcall(mpc8569_mds, mpc85xx_publish_devices);
414machine_device_initcall(p1021_mds, mpc85xx_common_publish_devices); 386machine_arch_initcall(p1021_mds, mpc85xx_common_publish_devices);
415 387
416machine_arch_initcall(mpc8568_mds, swiotlb_setup_bus_notifier); 388machine_arch_initcall(mpc8568_mds, swiotlb_setup_bus_notifier);
417machine_arch_initcall(mpc8569_mds, swiotlb_setup_bus_notifier); 389machine_arch_initcall(mpc8569_mds, swiotlb_setup_bus_notifier);
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_rdb.c b/arch/powerpc/platforms/85xx/mpc85xx_rdb.c
index 1910fdcb75b..ede8771d6f0 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_rdb.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_rdb.c
@@ -86,23 +86,17 @@ void __init mpc85xx_rdb_pic_init(void)
86 */ 86 */
87static void __init mpc85xx_rdb_setup_arch(void) 87static void __init mpc85xx_rdb_setup_arch(void)
88{ 88{
89#if defined(CONFIG_PCI) || defined(CONFIG_QUICC_ENGINE) 89#ifdef CONFIG_QUICC_ENGINE
90 struct device_node *np; 90 struct device_node *np;
91#endif 91#endif
92 92
93 if (ppc_md.progress) 93 if (ppc_md.progress)
94 ppc_md.progress("mpc85xx_rdb_setup_arch()", 0); 94 ppc_md.progress("mpc85xx_rdb_setup_arch()", 0);
95 95
96#ifdef CONFIG_PCI
97 for_each_node_by_type(np, "pci") {
98 if (of_device_is_compatible(np, "fsl,mpc8548-pcie"))
99 fsl_add_bridge(np, 0);
100 }
101
102#endif
103
104 mpc85xx_smp_init(); 96 mpc85xx_smp_init();
105 97
98 fsl_pci_assign_primary();
99
106#ifdef CONFIG_QUICC_ENGINE 100#ifdef CONFIG_QUICC_ENGINE
107 np = of_find_compatible_node(NULL, NULL, "fsl,qe"); 101 np = of_find_compatible_node(NULL, NULL, "fsl,qe");
108 if (!np) { 102 if (!np) {
@@ -161,15 +155,15 @@ qe_fail:
161 printk(KERN_INFO "MPC85xx RDB board from Freescale Semiconductor\n"); 155 printk(KERN_INFO "MPC85xx RDB board from Freescale Semiconductor\n");
162} 156}
163 157
164machine_device_initcall(p2020_rdb, mpc85xx_common_publish_devices); 158machine_arch_initcall(p2020_rdb, mpc85xx_common_publish_devices);
165machine_device_initcall(p2020_rdb_pc, mpc85xx_common_publish_devices); 159machine_arch_initcall(p2020_rdb_pc, mpc85xx_common_publish_devices);
166machine_device_initcall(p1020_mbg_pc, mpc85xx_common_publish_devices); 160machine_arch_initcall(p1020_mbg_pc, mpc85xx_common_publish_devices);
167machine_device_initcall(p1020_rdb, mpc85xx_common_publish_devices); 161machine_arch_initcall(p1020_rdb, mpc85xx_common_publish_devices);
168machine_device_initcall(p1020_rdb_pc, mpc85xx_common_publish_devices); 162machine_arch_initcall(p1020_rdb_pc, mpc85xx_common_publish_devices);
169machine_device_initcall(p1020_utm_pc, mpc85xx_common_publish_devices); 163machine_arch_initcall(p1020_utm_pc, mpc85xx_common_publish_devices);
170machine_device_initcall(p1021_rdb_pc, mpc85xx_common_publish_devices); 164machine_arch_initcall(p1021_rdb_pc, mpc85xx_common_publish_devices);
171machine_device_initcall(p1025_rdb, mpc85xx_common_publish_devices); 165machine_arch_initcall(p1025_rdb, mpc85xx_common_publish_devices);
172machine_device_initcall(p1024_rdb, mpc85xx_common_publish_devices); 166machine_arch_initcall(p1024_rdb, mpc85xx_common_publish_devices);
173 167
174/* 168/*
175 * Called very early, device-tree isn't unflattened 169 * Called very early, device-tree isn't unflattened
diff --git a/arch/powerpc/platforms/85xx/p1010rdb.c b/arch/powerpc/platforms/85xx/p1010rdb.c
index dbaf44354f0..0252961392d 100644
--- a/arch/powerpc/platforms/85xx/p1010rdb.c
+++ b/arch/powerpc/platforms/85xx/p1010rdb.c
@@ -46,25 +46,15 @@ void __init p1010_rdb_pic_init(void)
46 */ 46 */
47static void __init p1010_rdb_setup_arch(void) 47static void __init p1010_rdb_setup_arch(void)
48{ 48{
49#ifdef CONFIG_PCI
50 struct device_node *np;
51#endif
52
53 if (ppc_md.progress) 49 if (ppc_md.progress)
54 ppc_md.progress("p1010_rdb_setup_arch()", 0); 50 ppc_md.progress("p1010_rdb_setup_arch()", 0);
55 51
56#ifdef CONFIG_PCI 52 fsl_pci_assign_primary();
57 for_each_node_by_type(np, "pci") {
58 if (of_device_is_compatible(np, "fsl,p1010-pcie"))
59 fsl_add_bridge(np, 0);
60 }
61
62#endif
63 53
64 printk(KERN_INFO "P1010 RDB board from Freescale Semiconductor\n"); 54 printk(KERN_INFO "P1010 RDB board from Freescale Semiconductor\n");
65} 55}
66 56
67machine_device_initcall(p1010_rdb, mpc85xx_common_publish_devices); 57machine_arch_initcall(p1010_rdb, mpc85xx_common_publish_devices);
68machine_arch_initcall(p1010_rdb, swiotlb_setup_bus_notifier); 58machine_arch_initcall(p1010_rdb, swiotlb_setup_bus_notifier);
69 59
70/* 60/*
diff --git a/arch/powerpc/platforms/85xx/p1022_ds.c b/arch/powerpc/platforms/85xx/p1022_ds.c
index 3c732acf331..848a3e98e1c 100644
--- a/arch/powerpc/platforms/85xx/p1022_ds.c
+++ b/arch/powerpc/platforms/85xx/p1022_ds.c
@@ -18,7 +18,6 @@
18 18
19#include <linux/pci.h> 19#include <linux/pci.h>
20#include <linux/of_platform.h> 20#include <linux/of_platform.h>
21#include <linux/memblock.h>
22#include <asm/div64.h> 21#include <asm/div64.h>
23#include <asm/mpic.h> 22#include <asm/mpic.h>
24#include <asm/swiotlb.h> 23#include <asm/swiotlb.h>
@@ -507,32 +506,9 @@ early_param("video", early_video_setup);
507 */ 506 */
508static void __init p1022_ds_setup_arch(void) 507static void __init p1022_ds_setup_arch(void)
509{ 508{
510#ifdef CONFIG_PCI
511 struct device_node *np;
512#endif
513 dma_addr_t max = 0xffffffff;
514
515 if (ppc_md.progress) 509 if (ppc_md.progress)
516 ppc_md.progress("p1022_ds_setup_arch()", 0); 510 ppc_md.progress("p1022_ds_setup_arch()", 0);
517 511
518#ifdef CONFIG_PCI
519 for_each_compatible_node(np, "pci", "fsl,p1022-pcie") {
520 struct resource rsrc;
521 struct pci_controller *hose;
522
523 of_address_to_resource(np, 0, &rsrc);
524
525 if ((rsrc.start & 0xfffff) == 0x8000)
526 fsl_add_bridge(np, 1);
527 else
528 fsl_add_bridge(np, 0);
529
530 hose = pci_find_hose_for_OF_device(np);
531 max = min(max, hose->dma_window_base_cur +
532 hose->dma_window_size);
533 }
534#endif
535
536#if defined(CONFIG_FB_FSL_DIU) || defined(CONFIG_FB_FSL_DIU_MODULE) 512#if defined(CONFIG_FB_FSL_DIU) || defined(CONFIG_FB_FSL_DIU_MODULE)
537 diu_ops.get_pixel_format = p1022ds_get_pixel_format; 513 diu_ops.get_pixel_format = p1022ds_get_pixel_format;
538 diu_ops.set_gamma_table = p1022ds_set_gamma_table; 514 diu_ops.set_gamma_table = p1022ds_set_gamma_table;
@@ -601,18 +577,14 @@ static void __init p1022_ds_setup_arch(void)
601 577
602 mpc85xx_smp_init(); 578 mpc85xx_smp_init();
603 579
604#ifdef CONFIG_SWIOTLB 580 fsl_pci_assign_primary();
605 if ((memblock_end_of_DRAM() - 1) > max) { 581
606 ppc_swiotlb_enable = 1; 582 swiotlb_detect_4g();
607 set_pci_dma_ops(&swiotlb_dma_ops);
608 ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_swiotlb;
609 }
610#endif
611 583
612 pr_info("Freescale P1022 DS reference board\n"); 584 pr_info("Freescale P1022 DS reference board\n");
613} 585}
614 586
615machine_device_initcall(p1022_ds, mpc85xx_common_publish_devices); 587machine_arch_initcall(p1022_ds, mpc85xx_common_publish_devices);
616 588
617machine_arch_initcall(p1022_ds, swiotlb_setup_bus_notifier); 589machine_arch_initcall(p1022_ds, swiotlb_setup_bus_notifier);
618 590
diff --git a/arch/powerpc/platforms/85xx/p1022_rdk.c b/arch/powerpc/platforms/85xx/p1022_rdk.c
new file mode 100644
index 00000000000..55ffa1cc380
--- /dev/null
+++ b/arch/powerpc/platforms/85xx/p1022_rdk.c
@@ -0,0 +1,167 @@
1/*
2 * P1022 RDK board specific routines
3 *
4 * Copyright 2012 Freescale Semiconductor, Inc.
5 *
6 * Author: Timur Tabi <timur@freescale.com>
7 *
8 * Based on p1022_ds.c
9 *
10 * This file is licensed under the terms of the GNU General Public License
11 * version 2. This program is licensed "as is" without any warranty of any
12 * kind, whether express or implied.
13 */
14
15#include <linux/pci.h>
16#include <linux/of_platform.h>
17#include <asm/div64.h>
18#include <asm/mpic.h>
19#include <asm/swiotlb.h>
20
21#include <sysdev/fsl_soc.h>
22#include <sysdev/fsl_pci.h>
23#include <asm/udbg.h>
24#include <asm/fsl_guts.h>
25#include "smp.h"
26
27#include "mpc85xx.h"
28
29#if defined(CONFIG_FB_FSL_DIU) || defined(CONFIG_FB_FSL_DIU_MODULE)
30
31/* DIU Pixel Clock bits of the CLKDVDR Global Utilities register */
32#define CLKDVDR_PXCKEN 0x80000000
33#define CLKDVDR_PXCKINV 0x10000000
34#define CLKDVDR_PXCKDLY 0x06000000
35#define CLKDVDR_PXCLK_MASK 0x00FF0000
36
37/**
38 * p1022rdk_set_monitor_port: switch the output to a different monitor port
39 */
40static void p1022rdk_set_monitor_port(enum fsl_diu_monitor_port port)
41{
42 if (port != FSL_DIU_PORT_DVI) {
43 pr_err("p1022rdk: unsupported monitor port %i\n", port);
44 return;
45 }
46}
47
48/**
49 * p1022rdk_set_pixel_clock: program the DIU's clock
50 *
51 * @pixclock: the wavelength, in picoseconds, of the clock
52 */
53void p1022rdk_set_pixel_clock(unsigned int pixclock)
54{
55 struct device_node *guts_np = NULL;
56 struct ccsr_guts __iomem *guts;
57 unsigned long freq;
58 u64 temp;
59 u32 pxclk;
60
61 /* Map the global utilities registers. */
62 guts_np = of_find_compatible_node(NULL, NULL, "fsl,p1022-guts");
63 if (!guts_np) {
64 pr_err("p1022rdk: missing global utilties device node\n");
65 return;
66 }
67
68 guts = of_iomap(guts_np, 0);
69 of_node_put(guts_np);
70 if (!guts) {
71 pr_err("p1022rdk: could not map global utilties device\n");
72 return;
73 }
74
75 /* Convert pixclock from a wavelength to a frequency */
76 temp = 1000000000000ULL;
77 do_div(temp, pixclock);
78 freq = temp;
79
80 /*
81 * 'pxclk' is the ratio of the platform clock to the pixel clock.
82 * This number is programmed into the CLKDVDR register, and the valid
83 * range of values is 2-255.
84 */
85 pxclk = DIV_ROUND_CLOSEST(fsl_get_sys_freq(), freq);
86 pxclk = clamp_t(u32, pxclk, 2, 255);
87
88 /* Disable the pixel clock, and set it to non-inverted and no delay */
89 clrbits32(&guts->clkdvdr,
90 CLKDVDR_PXCKEN | CLKDVDR_PXCKDLY | CLKDVDR_PXCLK_MASK);
91
92 /* Enable the clock and set the pxclk */
93 setbits32(&guts->clkdvdr, CLKDVDR_PXCKEN | (pxclk << 16));
94
95 iounmap(guts);
96}
97
98/**
99 * p1022rdk_valid_monitor_port: set the monitor port for sysfs
100 */
101enum fsl_diu_monitor_port
102p1022rdk_valid_monitor_port(enum fsl_diu_monitor_port port)
103{
104 return FSL_DIU_PORT_DVI;
105}
106
107#endif
108
109void __init p1022_rdk_pic_init(void)
110{
111 struct mpic *mpic = mpic_alloc(NULL, 0, MPIC_BIG_ENDIAN |
112 MPIC_SINGLE_DEST_CPU,
113 0, 256, " OpenPIC ");
114 BUG_ON(mpic == NULL);
115 mpic_init(mpic);
116}
117
118/*
119 * Setup the architecture
120 */
121static void __init p1022_rdk_setup_arch(void)
122{
123 if (ppc_md.progress)
124 ppc_md.progress("p1022_rdk_setup_arch()", 0);
125
126#if defined(CONFIG_FB_FSL_DIU) || defined(CONFIG_FB_FSL_DIU_MODULE)
127 diu_ops.set_monitor_port = p1022rdk_set_monitor_port;
128 diu_ops.set_pixel_clock = p1022rdk_set_pixel_clock;
129 diu_ops.valid_monitor_port = p1022rdk_valid_monitor_port;
130#endif
131
132 mpc85xx_smp_init();
133
134 fsl_pci_assign_primary();
135
136 swiotlb_detect_4g();
137
138 pr_info("Freescale / iVeia P1022 RDK reference board\n");
139}
140
141machine_arch_initcall(p1022_rdk, mpc85xx_common_publish_devices);
142
143machine_arch_initcall(p1022_rdk, swiotlb_setup_bus_notifier);
144
145/*
146 * Called very early, device-tree isn't unflattened
147 */
148static int __init p1022_rdk_probe(void)
149{
150 unsigned long root = of_get_flat_dt_root();
151
152 return of_flat_dt_is_compatible(root, "fsl,p1022rdk");
153}
154
155define_machine(p1022_rdk) {
156 .name = "P1022 RDK",
157 .probe = p1022_rdk_probe,
158 .setup_arch = p1022_rdk_setup_arch,
159 .init_IRQ = p1022_rdk_pic_init,
160#ifdef CONFIG_PCI
161 .pcibios_fixup_bus = fsl_pcibios_fixup_bus,
162#endif
163 .get_irq = mpic_get_irq,
164 .restart = fsl_rstcr_restart,
165 .calibrate_decr = generic_calibrate_decr,
166 .progress = udbg_progress,
167};
diff --git a/arch/powerpc/platforms/85xx/p1023_rds.c b/arch/powerpc/platforms/85xx/p1023_rds.c
index 2990e8b13dc..9cc60a73883 100644
--- a/arch/powerpc/platforms/85xx/p1023_rds.c
+++ b/arch/powerpc/platforms/85xx/p1023_rds.c
@@ -80,15 +80,12 @@ static void __init mpc85xx_rds_setup_arch(void)
80 } 80 }
81 } 81 }
82 82
83#ifdef CONFIG_PCI
84 for_each_compatible_node(np, "pci", "fsl,p1023-pcie")
85 fsl_add_bridge(np, 0);
86#endif
87
88 mpc85xx_smp_init(); 83 mpc85xx_smp_init();
84
85 fsl_pci_assign_primary();
89} 86}
90 87
91machine_device_initcall(p1023_rds, mpc85xx_common_publish_devices); 88machine_arch_initcall(p1023_rds, mpc85xx_common_publish_devices);
92 89
93static void __init mpc85xx_rds_pic_init(void) 90static void __init mpc85xx_rds_pic_init(void)
94{ 91{
diff --git a/arch/powerpc/platforms/85xx/p2041_rdb.c b/arch/powerpc/platforms/85xx/p2041_rdb.c
index 6541fa2630c..000c0892fc4 100644
--- a/arch/powerpc/platforms/85xx/p2041_rdb.c
+++ b/arch/powerpc/platforms/85xx/p2041_rdb.c
@@ -80,7 +80,7 @@ define_machine(p2041_rdb) {
80 .power_save = e500_idle, 80 .power_save = e500_idle,
81}; 81};
82 82
83machine_device_initcall(p2041_rdb, corenet_ds_publish_devices); 83machine_arch_initcall(p2041_rdb, corenet_ds_publish_devices);
84 84
85#ifdef CONFIG_SWIOTLB 85#ifdef CONFIG_SWIOTLB
86machine_arch_initcall(p2041_rdb, swiotlb_setup_bus_notifier); 86machine_arch_initcall(p2041_rdb, swiotlb_setup_bus_notifier);
diff --git a/arch/powerpc/platforms/85xx/p3041_ds.c b/arch/powerpc/platforms/85xx/p3041_ds.c
index f238efa7589..b3edc205daa 100644
--- a/arch/powerpc/platforms/85xx/p3041_ds.c
+++ b/arch/powerpc/platforms/85xx/p3041_ds.c
@@ -82,7 +82,7 @@ define_machine(p3041_ds) {
82 .power_save = e500_idle, 82 .power_save = e500_idle,
83}; 83};
84 84
85machine_device_initcall(p3041_ds, corenet_ds_publish_devices); 85machine_arch_initcall(p3041_ds, corenet_ds_publish_devices);
86 86
87#ifdef CONFIG_SWIOTLB 87#ifdef CONFIG_SWIOTLB
88machine_arch_initcall(p3041_ds, swiotlb_setup_bus_notifier); 88machine_arch_initcall(p3041_ds, swiotlb_setup_bus_notifier);
diff --git a/arch/powerpc/platforms/85xx/p4080_ds.c b/arch/powerpc/platforms/85xx/p4080_ds.c
index c92417dc657..54df10632ae 100644
--- a/arch/powerpc/platforms/85xx/p4080_ds.c
+++ b/arch/powerpc/platforms/85xx/p4080_ds.c
@@ -81,7 +81,7 @@ define_machine(p4080_ds) {
81 .power_save = e500_idle, 81 .power_save = e500_idle,
82}; 82};
83 83
84machine_device_initcall(p4080_ds, corenet_ds_publish_devices); 84machine_arch_initcall(p4080_ds, corenet_ds_publish_devices);
85#ifdef CONFIG_SWIOTLB 85#ifdef CONFIG_SWIOTLB
86machine_arch_initcall(p4080_ds, swiotlb_setup_bus_notifier); 86machine_arch_initcall(p4080_ds, swiotlb_setup_bus_notifier);
87#endif 87#endif
diff --git a/arch/powerpc/platforms/85xx/p5020_ds.c b/arch/powerpc/platforms/85xx/p5020_ds.c
index 17bef15a85e..753a42c29d4 100644
--- a/arch/powerpc/platforms/85xx/p5020_ds.c
+++ b/arch/powerpc/platforms/85xx/p5020_ds.c
@@ -91,7 +91,7 @@ define_machine(p5020_ds) {
91#endif 91#endif
92}; 92};
93 93
94machine_device_initcall(p5020_ds, corenet_ds_publish_devices); 94machine_arch_initcall(p5020_ds, corenet_ds_publish_devices);
95 95
96#ifdef CONFIG_SWIOTLB 96#ifdef CONFIG_SWIOTLB
97machine_arch_initcall(p5020_ds, swiotlb_setup_bus_notifier); 97machine_arch_initcall(p5020_ds, swiotlb_setup_bus_notifier);
diff --git a/arch/powerpc/platforms/85xx/p5040_ds.c b/arch/powerpc/platforms/85xx/p5040_ds.c
new file mode 100644
index 00000000000..11381851828
--- /dev/null
+++ b/arch/powerpc/platforms/85xx/p5040_ds.c
@@ -0,0 +1,89 @@
1/*
2 * P5040 DS Setup
3 *
4 * Copyright 2009-2010 Freescale Semiconductor Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
10 */
11
12#include <linux/kernel.h>
13#include <linux/pci.h>
14
15#include <asm/machdep.h>
16#include <asm/udbg.h>
17#include <asm/mpic.h>
18
19#include <linux/of_fdt.h>
20
21#include <sysdev/fsl_soc.h>
22#include <sysdev/fsl_pci.h>
23#include <asm/ehv_pic.h>
24
25#include "corenet_ds.h"
26
27/*
28 * Called very early, device-tree isn't unflattened
29 */
30static int __init p5040_ds_probe(void)
31{
32 unsigned long root = of_get_flat_dt_root();
33#ifdef CONFIG_SMP
34 extern struct smp_ops_t smp_85xx_ops;
35#endif
36
37 if (of_flat_dt_is_compatible(root, "fsl,P5040DS"))
38 return 1;
39
40 /* Check if we're running under the Freescale hypervisor */
41 if (of_flat_dt_is_compatible(root, "fsl,P5040DS-hv")) {
42 ppc_md.init_IRQ = ehv_pic_init;
43 ppc_md.get_irq = ehv_pic_get_irq;
44 ppc_md.restart = fsl_hv_restart;
45 ppc_md.power_off = fsl_hv_halt;
46 ppc_md.halt = fsl_hv_halt;
47#ifdef CONFIG_SMP
48 /*
49 * Disable the timebase sync operations because we can't write
50 * to the timebase registers under the hypervisor.
51 */
52 smp_85xx_ops.give_timebase = NULL;
53 smp_85xx_ops.take_timebase = NULL;
54#endif
55 return 1;
56 }
57
58 return 0;
59}
60
61define_machine(p5040_ds) {
62 .name = "P5040 DS",
63 .probe = p5040_ds_probe,
64 .setup_arch = corenet_ds_setup_arch,
65 .init_IRQ = corenet_ds_pic_init,
66#ifdef CONFIG_PCI
67 .pcibios_fixup_bus = fsl_pcibios_fixup_bus,
68#endif
69/* coreint doesn't play nice with lazy EE, use legacy mpic for now */
70#ifdef CONFIG_PPC64
71 .get_irq = mpic_get_irq,
72#else
73 .get_irq = mpic_get_coreint_irq,
74#endif
75 .restart = fsl_rstcr_restart,
76 .calibrate_decr = generic_calibrate_decr,
77 .progress = udbg_progress,
78#ifdef CONFIG_PPC64
79 .power_save = book3e_idle,
80#else
81 .power_save = e500_idle,
82#endif
83};
84
85machine_arch_initcall(p5040_ds, corenet_ds_publish_devices);
86
87#ifdef CONFIG_SWIOTLB
88machine_arch_initcall(p5040_ds, swiotlb_setup_bus_notifier);
89#endif
diff --git a/arch/powerpc/platforms/85xx/qemu_e500.c b/arch/powerpc/platforms/85xx/qemu_e500.c
index 95a2e53af71..f6ea5618c73 100644
--- a/arch/powerpc/platforms/85xx/qemu_e500.c
+++ b/arch/powerpc/platforms/85xx/qemu_e500.c
@@ -41,7 +41,8 @@ static void __init qemu_e500_setup_arch(void)
41{ 41{
42 ppc_md.progress("qemu_e500_setup_arch()", 0); 42 ppc_md.progress("qemu_e500_setup_arch()", 0);
43 43
44 fsl_pci_init(); 44 fsl_pci_assign_primary();
45 swiotlb_detect_4g();
45 mpc85xx_smp_init(); 46 mpc85xx_smp_init();
46} 47}
47 48
@@ -55,7 +56,7 @@ static int __init qemu_e500_probe(void)
55 return !!of_flat_dt_is_compatible(root, "fsl,qemu-e500"); 56 return !!of_flat_dt_is_compatible(root, "fsl,qemu-e500");
56} 57}
57 58
58machine_device_initcall(qemu_e500, mpc85xx_common_publish_devices); 59machine_arch_initcall(qemu_e500, mpc85xx_common_publish_devices);
59 60
60define_machine(qemu_e500) { 61define_machine(qemu_e500) {
61 .name = "QEMU e500", 62 .name = "QEMU e500",
diff --git a/arch/powerpc/platforms/85xx/sbc8548.c b/arch/powerpc/platforms/85xx/sbc8548.c
index cd3a66bdb54..f6212182591 100644
--- a/arch/powerpc/platforms/85xx/sbc8548.c
+++ b/arch/powerpc/platforms/85xx/sbc8548.c
@@ -88,26 +88,11 @@ static int __init sbc8548_hw_rev(void)
88 */ 88 */
89static void __init sbc8548_setup_arch(void) 89static void __init sbc8548_setup_arch(void)
90{ 90{
91#ifdef CONFIG_PCI
92 struct device_node *np;
93#endif
94
95 if (ppc_md.progress) 91 if (ppc_md.progress)
96 ppc_md.progress("sbc8548_setup_arch()", 0); 92 ppc_md.progress("sbc8548_setup_arch()", 0);
97 93
98#ifdef CONFIG_PCI 94 fsl_pci_assign_primary();
99 for_each_node_by_type(np, "pci") { 95
100 if (of_device_is_compatible(np, "fsl,mpc8540-pci") ||
101 of_device_is_compatible(np, "fsl,mpc8548-pcie")) {
102 struct resource rsrc;
103 of_address_to_resource(np, 0, &rsrc);
104 if ((rsrc.start & 0xfffff) == 0x8000)
105 fsl_add_bridge(np, 1);
106 else
107 fsl_add_bridge(np, 0);
108 }
109 }
110#endif
111 sbc_rev = sbc8548_hw_rev(); 96 sbc_rev = sbc8548_hw_rev();
112} 97}
113 98
@@ -128,7 +113,7 @@ static void sbc8548_show_cpuinfo(struct seq_file *m)
128 seq_printf(m, "PLL setting\t: 0x%x\n", ((phid1 >> 24) & 0x3f)); 113 seq_printf(m, "PLL setting\t: 0x%x\n", ((phid1 >> 24) & 0x3f));
129} 114}
130 115
131machine_device_initcall(sbc8548, mpc85xx_common_publish_devices); 116machine_arch_initcall(sbc8548, mpc85xx_common_publish_devices);
132 117
133/* 118/*
134 * Called very early, device-tree isn't unflattened 119 * Called very early, device-tree isn't unflattened
diff --git a/arch/powerpc/platforms/85xx/smp.c b/arch/powerpc/platforms/85xx/smp.c
index ff4249044a3..6fcfa12e5c5 100644
--- a/arch/powerpc/platforms/85xx/smp.c
+++ b/arch/powerpc/platforms/85xx/smp.c
@@ -2,7 +2,7 @@
2 * Author: Andy Fleming <afleming@freescale.com> 2 * Author: Andy Fleming <afleming@freescale.com>
3 * Kumar Gala <galak@kernel.crashing.org> 3 * Kumar Gala <galak@kernel.crashing.org>
4 * 4 *
5 * Copyright 2006-2008, 2011 Freescale Semiconductor Inc. 5 * Copyright 2006-2008, 2011-2012 Freescale Semiconductor Inc.
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify it 7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the 8 * under the terms of the GNU General Public License as published by the
@@ -17,6 +17,7 @@
17#include <linux/of.h> 17#include <linux/of.h>
18#include <linux/kexec.h> 18#include <linux/kexec.h>
19#include <linux/highmem.h> 19#include <linux/highmem.h>
20#include <linux/cpu.h>
20 21
21#include <asm/machdep.h> 22#include <asm/machdep.h>
22#include <asm/pgtable.h> 23#include <asm/pgtable.h>
@@ -24,33 +25,118 @@
24#include <asm/mpic.h> 25#include <asm/mpic.h>
25#include <asm/cacheflush.h> 26#include <asm/cacheflush.h>
26#include <asm/dbell.h> 27#include <asm/dbell.h>
28#include <asm/fsl_guts.h>
27 29
28#include <sysdev/fsl_soc.h> 30#include <sysdev/fsl_soc.h>
29#include <sysdev/mpic.h> 31#include <sysdev/mpic.h>
30#include "smp.h" 32#include "smp.h"
31 33
32extern void __early_start(void); 34struct epapr_spin_table {
33 35 u32 addr_h;
34#define BOOT_ENTRY_ADDR_UPPER 0 36 u32 addr_l;
35#define BOOT_ENTRY_ADDR_LOWER 1 37 u32 r3_h;
36#define BOOT_ENTRY_R3_UPPER 2 38 u32 r3_l;
37#define BOOT_ENTRY_R3_LOWER 3 39 u32 reserved;
38#define BOOT_ENTRY_RESV 4 40 u32 pir;
39#define BOOT_ENTRY_PIR 5 41};
40#define BOOT_ENTRY_R6_UPPER 6 42
41#define BOOT_ENTRY_R6_LOWER 7 43static struct ccsr_guts __iomem *guts;
42#define NUM_BOOT_ENTRY 8 44static u64 timebase;
43#define SIZE_BOOT_ENTRY (NUM_BOOT_ENTRY * sizeof(u32)) 45static int tb_req;
44 46static int tb_valid;
45static int __init 47
46smp_85xx_kick_cpu(int nr) 48static void mpc85xx_timebase_freeze(int freeze)
49{
50 uint32_t mask;
51
52 mask = CCSR_GUTS_DEVDISR_TB0 | CCSR_GUTS_DEVDISR_TB1;
53 if (freeze)
54 setbits32(&guts->devdisr, mask);
55 else
56 clrbits32(&guts->devdisr, mask);
57
58 in_be32(&guts->devdisr);
59}
60
61static void mpc85xx_give_timebase(void)
62{
63 unsigned long flags;
64
65 local_irq_save(flags);
66
67 while (!tb_req)
68 barrier();
69 tb_req = 0;
70
71 mpc85xx_timebase_freeze(1);
72 timebase = get_tb();
73 mb();
74 tb_valid = 1;
75
76 while (tb_valid)
77 barrier();
78
79 mpc85xx_timebase_freeze(0);
80
81 local_irq_restore(flags);
82}
83
84static void mpc85xx_take_timebase(void)
85{
86 unsigned long flags;
87
88 local_irq_save(flags);
89
90 tb_req = 1;
91 while (!tb_valid)
92 barrier();
93
94 set_tb(timebase >> 32, timebase & 0xffffffff);
95 isync();
96 tb_valid = 0;
97
98 local_irq_restore(flags);
99}
100
101#ifdef CONFIG_HOTPLUG_CPU
102static void __cpuinit smp_85xx_mach_cpu_die(void)
103{
104 unsigned int cpu = smp_processor_id();
105 u32 tmp;
106
107 local_irq_disable();
108 idle_task_exit();
109 generic_set_cpu_dead(cpu);
110 mb();
111
112 mtspr(SPRN_TCR, 0);
113
114 __flush_disable_L1();
115 tmp = (mfspr(SPRN_HID0) & ~(HID0_DOZE|HID0_SLEEP)) | HID0_NAP;
116 mtspr(SPRN_HID0, tmp);
117 isync();
118
119 /* Enter NAP mode. */
120 tmp = mfmsr();
121 tmp |= MSR_WE;
122 mb();
123 mtmsr(tmp);
124 isync();
125
126 while (1)
127 ;
128}
129#endif
130
131static int __cpuinit smp_85xx_kick_cpu(int nr)
47{ 132{
48 unsigned long flags; 133 unsigned long flags;
49 const u64 *cpu_rel_addr; 134 const u64 *cpu_rel_addr;
50 __iomem u32 *bptr_vaddr; 135 __iomem struct epapr_spin_table *spin_table;
51 struct device_node *np; 136 struct device_node *np;
52 int n = 0, hw_cpu = get_hard_smp_processor_id(nr); 137 int hw_cpu = get_hard_smp_processor_id(nr);
53 int ioremappable; 138 int ioremappable;
139 int ret = 0;
54 140
55 WARN_ON(nr < 0 || nr >= NR_CPUS); 141 WARN_ON(nr < 0 || nr >= NR_CPUS);
56 WARN_ON(hw_cpu < 0 || hw_cpu >= NR_CPUS); 142 WARN_ON(hw_cpu < 0 || hw_cpu >= NR_CPUS);
@@ -75,46 +161,81 @@ smp_85xx_kick_cpu(int nr)
75 161
76 /* Map the spin table */ 162 /* Map the spin table */
77 if (ioremappable) 163 if (ioremappable)
78 bptr_vaddr = ioremap(*cpu_rel_addr, SIZE_BOOT_ENTRY); 164 spin_table = ioremap(*cpu_rel_addr,
165 sizeof(struct epapr_spin_table));
79 else 166 else
80 bptr_vaddr = phys_to_virt(*cpu_rel_addr); 167 spin_table = phys_to_virt(*cpu_rel_addr);
81 168
82 local_irq_save(flags); 169 local_irq_save(flags);
83
84 out_be32(bptr_vaddr + BOOT_ENTRY_PIR, hw_cpu);
85#ifdef CONFIG_PPC32 170#ifdef CONFIG_PPC32
86 out_be32(bptr_vaddr + BOOT_ENTRY_ADDR_LOWER, __pa(__early_start)); 171#ifdef CONFIG_HOTPLUG_CPU
172 /* Corresponding to generic_set_cpu_dead() */
173 generic_set_cpu_up(nr);
174
175 if (system_state == SYSTEM_RUNNING) {
176 out_be32(&spin_table->addr_l, 0);
177
178 /*
179 * We don't set the BPTR register here since it already points
180 * to the boot page properly.
181 */
182 mpic_reset_core(hw_cpu);
183
184 /* wait until core is ready... */
185 if (!spin_event_timeout(in_be32(&spin_table->addr_l) == 1,
186 10000, 100)) {
187 pr_err("%s: timeout waiting for core %d to reset\n",
188 __func__, hw_cpu);
189 ret = -ENOENT;
190 goto out;
191 }
192
193 /* clear the acknowledge status */
194 __secondary_hold_acknowledge = -1;
195 }
196#endif
197 out_be32(&spin_table->pir, hw_cpu);
198 out_be32(&spin_table->addr_l, __pa(__early_start));
87 199
88 if (!ioremappable) 200 if (!ioremappable)
89 flush_dcache_range((ulong)bptr_vaddr, 201 flush_dcache_range((ulong)spin_table,
90 (ulong)(bptr_vaddr + SIZE_BOOT_ENTRY)); 202 (ulong)spin_table + sizeof(struct epapr_spin_table));
91 203
92 /* Wait a bit for the CPU to ack. */ 204 /* Wait a bit for the CPU to ack. */
93 while ((__secondary_hold_acknowledge != hw_cpu) && (++n < 1000)) 205 if (!spin_event_timeout(__secondary_hold_acknowledge == hw_cpu,
94 mdelay(1); 206 10000, 100)) {
207 pr_err("%s: timeout waiting for core %d to ack\n",
208 __func__, hw_cpu);
209 ret = -ENOENT;
210 goto out;
211 }
212out:
95#else 213#else
96 smp_generic_kick_cpu(nr); 214 smp_generic_kick_cpu(nr);
97 215
98 out_be64((u64 *)(bptr_vaddr + BOOT_ENTRY_ADDR_UPPER), 216 out_be32(&spin_table->pir, hw_cpu);
99 __pa((u64)*((unsigned long long *) generic_secondary_smp_init))); 217 out_be64((u64 *)(&spin_table->addr_h),
218 __pa((u64)*((unsigned long long *)generic_secondary_smp_init)));
100 219
101 if (!ioremappable) 220 if (!ioremappable)
102 flush_dcache_range((ulong)bptr_vaddr, 221 flush_dcache_range((ulong)spin_table,
103 (ulong)(bptr_vaddr + SIZE_BOOT_ENTRY)); 222 (ulong)spin_table + sizeof(struct epapr_spin_table));
104#endif 223#endif
105 224
106 local_irq_restore(flags); 225 local_irq_restore(flags);
107 226
108 if (ioremappable) 227 if (ioremappable)
109 iounmap(bptr_vaddr); 228 iounmap(spin_table);
110
111 pr_debug("waited %d msecs for CPU #%d.\n", n, nr);
112 229
113 return 0; 230 return ret;
114} 231}
115 232
116struct smp_ops_t smp_85xx_ops = { 233struct smp_ops_t smp_85xx_ops = {
117 .kick_cpu = smp_85xx_kick_cpu, 234 .kick_cpu = smp_85xx_kick_cpu,
235#ifdef CONFIG_HOTPLUG_CPU
236 .cpu_disable = generic_cpu_disable,
237 .cpu_die = generic_cpu_die,
238#endif
118#ifdef CONFIG_KEXEC 239#ifdef CONFIG_KEXEC
119 .give_timebase = smp_generic_give_timebase, 240 .give_timebase = smp_generic_give_timebase,
120 .take_timebase = smp_generic_take_timebase, 241 .take_timebase = smp_generic_take_timebase,
@@ -218,8 +339,7 @@ static void mpc85xx_smp_machine_kexec(struct kimage *image)
218} 339}
219#endif /* CONFIG_KEXEC */ 340#endif /* CONFIG_KEXEC */
220 341
221static void __init 342static void __cpuinit smp_85xx_setup_cpu(int cpu_nr)
222smp_85xx_setup_cpu(int cpu_nr)
223{ 343{
224 if (smp_85xx_ops.probe == smp_mpic_probe) 344 if (smp_85xx_ops.probe == smp_mpic_probe)
225 mpic_setup_this_cpu(); 345 mpic_setup_this_cpu();
@@ -228,6 +348,16 @@ smp_85xx_setup_cpu(int cpu_nr)
228 doorbell_setup_this_cpu(); 348 doorbell_setup_this_cpu();
229} 349}
230 350
351static const struct of_device_id mpc85xx_smp_guts_ids[] = {
352 { .compatible = "fsl,mpc8572-guts", },
353 { .compatible = "fsl,p1020-guts", },
354 { .compatible = "fsl,p1021-guts", },
355 { .compatible = "fsl,p1022-guts", },
356 { .compatible = "fsl,p1023-guts", },
357 { .compatible = "fsl,p2020-guts", },
358 {},
359};
360
231void __init mpc85xx_smp_init(void) 361void __init mpc85xx_smp_init(void)
232{ 362{
233 struct device_node *np; 363 struct device_node *np;
@@ -249,6 +379,22 @@ void __init mpc85xx_smp_init(void)
249 smp_85xx_ops.cause_ipi = doorbell_cause_ipi; 379 smp_85xx_ops.cause_ipi = doorbell_cause_ipi;
250 } 380 }
251 381
382 np = of_find_matching_node(NULL, mpc85xx_smp_guts_ids);
383 if (np) {
384 guts = of_iomap(np, 0);
385 of_node_put(np);
386 if (!guts) {
387 pr_err("%s: Could not map guts node address\n",
388 __func__);
389 return;
390 }
391 smp_85xx_ops.give_timebase = mpc85xx_give_timebase;
392 smp_85xx_ops.take_timebase = mpc85xx_take_timebase;
393#ifdef CONFIG_HOTPLUG_CPU
394 ppc_md.cpu_die = smp_85xx_mach_cpu_die;
395#endif
396 }
397
252 smp_ops = &smp_85xx_ops; 398 smp_ops = &smp_85xx_ops;
253 399
254#ifdef CONFIG_KEXEC 400#ifdef CONFIG_KEXEC
diff --git a/arch/powerpc/platforms/85xx/socrates.c b/arch/powerpc/platforms/85xx/socrates.c
index b9c6daa07b6..ae368e0e107 100644
--- a/arch/powerpc/platforms/85xx/socrates.c
+++ b/arch/powerpc/platforms/85xx/socrates.c
@@ -66,20 +66,13 @@ static void __init socrates_pic_init(void)
66 */ 66 */
67static void __init socrates_setup_arch(void) 67static void __init socrates_setup_arch(void)
68{ 68{
69#ifdef CONFIG_PCI
70 struct device_node *np;
71#endif
72
73 if (ppc_md.progress) 69 if (ppc_md.progress)
74 ppc_md.progress("socrates_setup_arch()", 0); 70 ppc_md.progress("socrates_setup_arch()", 0);
75 71
76#ifdef CONFIG_PCI 72 fsl_pci_assign_primary();
77 for_each_compatible_node(np, "pci", "fsl,mpc8540-pci")
78 fsl_add_bridge(np, 1);
79#endif
80} 73}
81 74
82machine_device_initcall(socrates, mpc85xx_common_publish_devices); 75machine_arch_initcall(socrates, mpc85xx_common_publish_devices);
83 76
84/* 77/*
85 * Called very early, device-tree isn't unflattened 78 * Called very early, device-tree isn't unflattened
diff --git a/arch/powerpc/platforms/85xx/stx_gp3.c b/arch/powerpc/platforms/85xx/stx_gp3.c
index e0508002b08..6f4939b6309 100644
--- a/arch/powerpc/platforms/85xx/stx_gp3.c
+++ b/arch/powerpc/platforms/85xx/stx_gp3.c
@@ -60,21 +60,14 @@ static void __init stx_gp3_pic_init(void)
60 */ 60 */
61static void __init stx_gp3_setup_arch(void) 61static void __init stx_gp3_setup_arch(void)
62{ 62{
63#ifdef CONFIG_PCI
64 struct device_node *np;
65#endif
66
67 if (ppc_md.progress) 63 if (ppc_md.progress)
68 ppc_md.progress("stx_gp3_setup_arch()", 0); 64 ppc_md.progress("stx_gp3_setup_arch()", 0);
69 65
66 fsl_pci_assign_primary();
67
70#ifdef CONFIG_CPM2 68#ifdef CONFIG_CPM2
71 cpm2_reset(); 69 cpm2_reset();
72#endif 70#endif
73
74#ifdef CONFIG_PCI
75 for_each_compatible_node(np, "pci", "fsl,mpc8540-pci")
76 fsl_add_bridge(np, 1);
77#endif
78} 71}
79 72
80static void stx_gp3_show_cpuinfo(struct seq_file *m) 73static void stx_gp3_show_cpuinfo(struct seq_file *m)
@@ -93,7 +86,7 @@ static void stx_gp3_show_cpuinfo(struct seq_file *m)
93 seq_printf(m, "PLL setting\t: 0x%x\n", ((phid1 >> 24) & 0x3f)); 86 seq_printf(m, "PLL setting\t: 0x%x\n", ((phid1 >> 24) & 0x3f));
94} 87}
95 88
96machine_device_initcall(stx_gp3, mpc85xx_common_publish_devices); 89machine_arch_initcall(stx_gp3, mpc85xx_common_publish_devices);
97 90
98/* 91/*
99 * Called very early, device-tree isn't unflattened 92 * Called very early, device-tree isn't unflattened
diff --git a/arch/powerpc/platforms/85xx/tqm85xx.c b/arch/powerpc/platforms/85xx/tqm85xx.c
index 3e70a2035e5..b4e58cdc09a 100644
--- a/arch/powerpc/platforms/85xx/tqm85xx.c
+++ b/arch/powerpc/platforms/85xx/tqm85xx.c
@@ -59,10 +59,6 @@ static void __init tqm85xx_pic_init(void)
59 */ 59 */
60static void __init tqm85xx_setup_arch(void) 60static void __init tqm85xx_setup_arch(void)
61{ 61{
62#ifdef CONFIG_PCI
63 struct device_node *np;
64#endif
65
66 if (ppc_md.progress) 62 if (ppc_md.progress)
67 ppc_md.progress("tqm85xx_setup_arch()", 0); 63 ppc_md.progress("tqm85xx_setup_arch()", 0);
68 64
@@ -70,20 +66,7 @@ static void __init tqm85xx_setup_arch(void)
70 cpm2_reset(); 66 cpm2_reset();
71#endif 67#endif
72 68
73#ifdef CONFIG_PCI 69 fsl_pci_assign_primary();
74 for_each_node_by_type(np, "pci") {
75 if (of_device_is_compatible(np, "fsl,mpc8540-pci") ||
76 of_device_is_compatible(np, "fsl,mpc8548-pcie")) {
77 struct resource rsrc;
78 if (!of_address_to_resource(np, 0, &rsrc)) {
79 if ((rsrc.start & 0xfffff) == 0x8000)
80 fsl_add_bridge(np, 1);
81 else
82 fsl_add_bridge(np, 0);
83 }
84 }
85 }
86#endif
87} 70}
88 71
89static void tqm85xx_show_cpuinfo(struct seq_file *m) 72static void tqm85xx_show_cpuinfo(struct seq_file *m)
@@ -123,9 +106,9 @@ static void __devinit tqm85xx_ti1520_fixup(struct pci_dev *pdev)
123DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_1520, 106DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_1520,
124 tqm85xx_ti1520_fixup); 107 tqm85xx_ti1520_fixup);
125 108
126machine_device_initcall(tqm85xx, mpc85xx_common_publish_devices); 109machine_arch_initcall(tqm85xx, mpc85xx_common_publish_devices);
127 110
128static const char *board[] __initdata = { 111static const char * const board[] __initconst = {
129 "tqc,tqm8540", 112 "tqc,tqm8540",
130 "tqc,tqm8541", 113 "tqc,tqm8541",
131 "tqc,tqm8548", 114 "tqc,tqm8548",
diff --git a/arch/powerpc/platforms/85xx/xes_mpc85xx.c b/arch/powerpc/platforms/85xx/xes_mpc85xx.c
index 41c687550ea..dcbf7e42dce 100644
--- a/arch/powerpc/platforms/85xx/xes_mpc85xx.c
+++ b/arch/powerpc/platforms/85xx/xes_mpc85xx.c
@@ -111,18 +111,11 @@ static void xes_mpc85xx_fixups(void)
111 } 111 }
112} 112}
113 113
114#ifdef CONFIG_PCI
115static int primary_phb_addr;
116#endif
117
118/* 114/*
119 * Setup the architecture 115 * Setup the architecture
120 */ 116 */
121static void __init xes_mpc85xx_setup_arch(void) 117static void __init xes_mpc85xx_setup_arch(void)
122{ 118{
123#ifdef CONFIG_PCI
124 struct device_node *np;
125#endif
126 struct device_node *root; 119 struct device_node *root;
127 const char *model = "Unknown"; 120 const char *model = "Unknown";
128 121
@@ -137,26 +130,14 @@ static void __init xes_mpc85xx_setup_arch(void)
137 130
138 xes_mpc85xx_fixups(); 131 xes_mpc85xx_fixups();
139 132
140#ifdef CONFIG_PCI
141 for_each_node_by_type(np, "pci") {
142 if (of_device_is_compatible(np, "fsl,mpc8540-pci") ||
143 of_device_is_compatible(np, "fsl,mpc8548-pcie")) {
144 struct resource rsrc;
145 of_address_to_resource(np, 0, &rsrc);
146 if ((rsrc.start & 0xfffff) == primary_phb_addr)
147 fsl_add_bridge(np, 1);
148 else
149 fsl_add_bridge(np, 0);
150 }
151 }
152#endif
153
154 mpc85xx_smp_init(); 133 mpc85xx_smp_init();
134
135 fsl_pci_assign_primary();
155} 136}
156 137
157machine_device_initcall(xes_mpc8572, mpc85xx_common_publish_devices); 138machine_arch_initcall(xes_mpc8572, mpc85xx_common_publish_devices);
158machine_device_initcall(xes_mpc8548, mpc85xx_common_publish_devices); 139machine_arch_initcall(xes_mpc8548, mpc85xx_common_publish_devices);
159machine_device_initcall(xes_mpc8540, mpc85xx_common_publish_devices); 140machine_arch_initcall(xes_mpc8540, mpc85xx_common_publish_devices);
160 141
161/* 142/*
162 * Called very early, device-tree isn't unflattened 143 * Called very early, device-tree isn't unflattened
@@ -165,42 +146,21 @@ static int __init xes_mpc8572_probe(void)
165{ 146{
166 unsigned long root = of_get_flat_dt_root(); 147 unsigned long root = of_get_flat_dt_root();
167 148
168 if (of_flat_dt_is_compatible(root, "xes,MPC8572")) { 149 return of_flat_dt_is_compatible(root, "xes,MPC8572");
169#ifdef CONFIG_PCI
170 primary_phb_addr = 0x8000;
171#endif
172 return 1;
173 } else {
174 return 0;
175 }
176} 150}
177 151
178static int __init xes_mpc8548_probe(void) 152static int __init xes_mpc8548_probe(void)
179{ 153{
180 unsigned long root = of_get_flat_dt_root(); 154 unsigned long root = of_get_flat_dt_root();
181 155
182 if (of_flat_dt_is_compatible(root, "xes,MPC8548")) { 156 return of_flat_dt_is_compatible(root, "xes,MPC8548");
183#ifdef CONFIG_PCI
184 primary_phb_addr = 0xb000;
185#endif
186 return 1;
187 } else {
188 return 0;
189 }
190} 157}
191 158
192static int __init xes_mpc8540_probe(void) 159static int __init xes_mpc8540_probe(void)
193{ 160{
194 unsigned long root = of_get_flat_dt_root(); 161 unsigned long root = of_get_flat_dt_root();
195 162
196 if (of_flat_dt_is_compatible(root, "xes,MPC8540")) { 163 return of_flat_dt_is_compatible(root, "xes,MPC8540");
197#ifdef CONFIG_PCI
198 primary_phb_addr = 0xb000;
199#endif
200 return 1;
201 } else {
202 return 0;
203 }
204} 164}
205 165
206define_machine(xes_mpc8572) { 166define_machine(xes_mpc8572) {
diff --git a/arch/powerpc/platforms/86xx/gef_ppc9a.c b/arch/powerpc/platforms/86xx/gef_ppc9a.c
index 563aafa8629..bf5338754c5 100644
--- a/arch/powerpc/platforms/86xx/gef_ppc9a.c
+++ b/arch/powerpc/platforms/86xx/gef_ppc9a.c
@@ -73,13 +73,6 @@ static void __init gef_ppc9a_init_irq(void)
73static void __init gef_ppc9a_setup_arch(void) 73static void __init gef_ppc9a_setup_arch(void)
74{ 74{
75 struct device_node *regs; 75 struct device_node *regs;
76#ifdef CONFIG_PCI
77 struct device_node *np;
78
79 for_each_compatible_node(np, "pci", "fsl,mpc8641-pcie") {
80 fsl_add_bridge(np, 1);
81 }
82#endif
83 76
84 printk(KERN_INFO "GE Intelligent Platforms PPC9A 6U VME SBC\n"); 77 printk(KERN_INFO "GE Intelligent Platforms PPC9A 6U VME SBC\n");
85 78
@@ -87,6 +80,8 @@ static void __init gef_ppc9a_setup_arch(void)
87 mpc86xx_smp_init(); 80 mpc86xx_smp_init();
88#endif 81#endif
89 82
83 fsl_pci_assign_primary();
84
90 /* Remap basic board registers */ 85 /* Remap basic board registers */
91 regs = of_find_compatible_node(NULL, NULL, "gef,ppc9a-fpga-regs"); 86 regs = of_find_compatible_node(NULL, NULL, "gef,ppc9a-fpga-regs");
92 if (regs) { 87 if (regs) {
@@ -221,6 +216,7 @@ static long __init mpc86xx_time_init(void)
221static __initdata struct of_device_id of_bus_ids[] = { 216static __initdata struct of_device_id of_bus_ids[] = {
222 { .compatible = "simple-bus", }, 217 { .compatible = "simple-bus", },
223 { .compatible = "gianfar", }, 218 { .compatible = "gianfar", },
219 { .compatible = "fsl,mpc8641-pcie", },
224 {}, 220 {},
225}; 221};
226 222
@@ -231,7 +227,7 @@ static int __init declare_of_platform_devices(void)
231 227
232 return 0; 228 return 0;
233} 229}
234machine_device_initcall(gef_ppc9a, declare_of_platform_devices); 230machine_arch_initcall(gef_ppc9a, declare_of_platform_devices);
235 231
236define_machine(gef_ppc9a) { 232define_machine(gef_ppc9a) {
237 .name = "GE PPC9A", 233 .name = "GE PPC9A",
diff --git a/arch/powerpc/platforms/86xx/gef_sbc310.c b/arch/powerpc/platforms/86xx/gef_sbc310.c
index cc6a91ae088..0b7851330a0 100644
--- a/arch/powerpc/platforms/86xx/gef_sbc310.c
+++ b/arch/powerpc/platforms/86xx/gef_sbc310.c
@@ -73,20 +73,14 @@ static void __init gef_sbc310_init_irq(void)
73static void __init gef_sbc310_setup_arch(void) 73static void __init gef_sbc310_setup_arch(void)
74{ 74{
75 struct device_node *regs; 75 struct device_node *regs;
76#ifdef CONFIG_PCI
77 struct device_node *np;
78
79 for_each_compatible_node(np, "pci", "fsl,mpc8641-pcie") {
80 fsl_add_bridge(np, 1);
81 }
82#endif
83
84 printk(KERN_INFO "GE Intelligent Platforms SBC310 6U VPX SBC\n"); 76 printk(KERN_INFO "GE Intelligent Platforms SBC310 6U VPX SBC\n");
85 77
86#ifdef CONFIG_SMP 78#ifdef CONFIG_SMP
87 mpc86xx_smp_init(); 79 mpc86xx_smp_init();
88#endif 80#endif
89 81
82 fsl_pci_assign_primary();
83
90 /* Remap basic board registers */ 84 /* Remap basic board registers */
91 regs = of_find_compatible_node(NULL, NULL, "gef,fpga-regs"); 85 regs = of_find_compatible_node(NULL, NULL, "gef,fpga-regs");
92 if (regs) { 86 if (regs) {
@@ -209,6 +203,7 @@ static long __init mpc86xx_time_init(void)
209static __initdata struct of_device_id of_bus_ids[] = { 203static __initdata struct of_device_id of_bus_ids[] = {
210 { .compatible = "simple-bus", }, 204 { .compatible = "simple-bus", },
211 { .compatible = "gianfar", }, 205 { .compatible = "gianfar", },
206 { .compatible = "fsl,mpc8641-pcie", },
212 {}, 207 {},
213}; 208};
214 209
@@ -219,7 +214,7 @@ static int __init declare_of_platform_devices(void)
219 214
220 return 0; 215 return 0;
221} 216}
222machine_device_initcall(gef_sbc310, declare_of_platform_devices); 217machine_arch_initcall(gef_sbc310, declare_of_platform_devices);
223 218
224define_machine(gef_sbc310) { 219define_machine(gef_sbc310) {
225 .name = "GE SBC310", 220 .name = "GE SBC310",
diff --git a/arch/powerpc/platforms/86xx/gef_sbc610.c b/arch/powerpc/platforms/86xx/gef_sbc610.c
index aead6b337f4..b9eb174897b 100644
--- a/arch/powerpc/platforms/86xx/gef_sbc610.c
+++ b/arch/powerpc/platforms/86xx/gef_sbc610.c
@@ -73,13 +73,6 @@ static void __init gef_sbc610_init_irq(void)
73static void __init gef_sbc610_setup_arch(void) 73static void __init gef_sbc610_setup_arch(void)
74{ 74{
75 struct device_node *regs; 75 struct device_node *regs;
76#ifdef CONFIG_PCI
77 struct device_node *np;
78
79 for_each_compatible_node(np, "pci", "fsl,mpc8641-pcie") {
80 fsl_add_bridge(np, 1);
81 }
82#endif
83 76
84 printk(KERN_INFO "GE Intelligent Platforms SBC610 6U VPX SBC\n"); 77 printk(KERN_INFO "GE Intelligent Platforms SBC610 6U VPX SBC\n");
85 78
@@ -87,6 +80,8 @@ static void __init gef_sbc610_setup_arch(void)
87 mpc86xx_smp_init(); 80 mpc86xx_smp_init();
88#endif 81#endif
89 82
83 fsl_pci_assign_primary();
84
90 /* Remap basic board registers */ 85 /* Remap basic board registers */
91 regs = of_find_compatible_node(NULL, NULL, "gef,fpga-regs"); 86 regs = of_find_compatible_node(NULL, NULL, "gef,fpga-regs");
92 if (regs) { 87 if (regs) {
@@ -198,6 +193,7 @@ static long __init mpc86xx_time_init(void)
198static __initdata struct of_device_id of_bus_ids[] = { 193static __initdata struct of_device_id of_bus_ids[] = {
199 { .compatible = "simple-bus", }, 194 { .compatible = "simple-bus", },
200 { .compatible = "gianfar", }, 195 { .compatible = "gianfar", },
196 { .compatible = "fsl,mpc8641-pcie", },
201 {}, 197 {},
202}; 198};
203 199
@@ -208,7 +204,7 @@ static int __init declare_of_platform_devices(void)
208 204
209 return 0; 205 return 0;
210} 206}
211machine_device_initcall(gef_sbc610, declare_of_platform_devices); 207machine_arch_initcall(gef_sbc610, declare_of_platform_devices);
212 208
213define_machine(gef_sbc610) { 209define_machine(gef_sbc610) {
214 .name = "GE SBC610", 210 .name = "GE SBC610",
diff --git a/arch/powerpc/platforms/86xx/mpc8610_hpcd.c b/arch/powerpc/platforms/86xx/mpc8610_hpcd.c
index 62cd3c555bf..a817398a56d 100644
--- a/arch/powerpc/platforms/86xx/mpc8610_hpcd.c
+++ b/arch/powerpc/platforms/86xx/mpc8610_hpcd.c
@@ -91,6 +91,9 @@ static struct of_device_id __initdata mpc8610_ids[] = {
91 { .compatible = "simple-bus", }, 91 { .compatible = "simple-bus", },
92 /* So that the DMA channel nodes can be probed individually: */ 92 /* So that the DMA channel nodes can be probed individually: */
93 { .compatible = "fsl,eloplus-dma", }, 93 { .compatible = "fsl,eloplus-dma", },
94 /* PCI controllers */
95 { .compatible = "fsl,mpc8610-pci", },
96 { .compatible = "fsl,mpc8641-pcie", },
94 {} 97 {}
95}; 98};
96 99
@@ -107,7 +110,7 @@ static int __init mpc8610_declare_of_platform_devices(void)
107 110
108 return 0; 111 return 0;
109} 112}
110machine_device_initcall(mpc86xx_hpcd, mpc8610_declare_of_platform_devices); 113machine_arch_initcall(mpc86xx_hpcd, mpc8610_declare_of_platform_devices);
111 114
112#if defined(CONFIG_FB_FSL_DIU) || defined(CONFIG_FB_FSL_DIU_MODULE) 115#if defined(CONFIG_FB_FSL_DIU) || defined(CONFIG_FB_FSL_DIU_MODULE)
113 116
@@ -278,25 +281,13 @@ mpc8610hpcd_valid_monitor_port(enum fsl_diu_monitor_port port)
278static void __init mpc86xx_hpcd_setup_arch(void) 281static void __init mpc86xx_hpcd_setup_arch(void)
279{ 282{
280 struct resource r; 283 struct resource r;
281 struct device_node *np;
282 unsigned char *pixis; 284 unsigned char *pixis;
283 285
284 if (ppc_md.progress) 286 if (ppc_md.progress)
285 ppc_md.progress("mpc86xx_hpcd_setup_arch()", 0); 287 ppc_md.progress("mpc86xx_hpcd_setup_arch()", 0);
286 288
287#ifdef CONFIG_PCI 289 fsl_pci_assign_primary();
288 for_each_node_by_type(np, "pci") { 290
289 if (of_device_is_compatible(np, "fsl,mpc8610-pci")
290 || of_device_is_compatible(np, "fsl,mpc8641-pcie")) {
291 struct resource rsrc;
292 of_address_to_resource(np, 0, &rsrc);
293 if ((rsrc.start & 0xfffff) == 0xa000)
294 fsl_add_bridge(np, 1);
295 else
296 fsl_add_bridge(np, 0);
297 }
298 }
299#endif
300#if defined(CONFIG_FB_FSL_DIU) || defined(CONFIG_FB_FSL_DIU_MODULE) 291#if defined(CONFIG_FB_FSL_DIU) || defined(CONFIG_FB_FSL_DIU_MODULE)
301 diu_ops.get_pixel_format = mpc8610hpcd_get_pixel_format; 292 diu_ops.get_pixel_format = mpc8610hpcd_get_pixel_format;
302 diu_ops.set_gamma_table = mpc8610hpcd_set_gamma_table; 293 diu_ops.set_gamma_table = mpc8610hpcd_set_gamma_table;
diff --git a/arch/powerpc/platforms/86xx/mpc86xx_hpcn.c b/arch/powerpc/platforms/86xx/mpc86xx_hpcn.c
index 817245bc021..e8bf3fae560 100644
--- a/arch/powerpc/platforms/86xx/mpc86xx_hpcn.c
+++ b/arch/powerpc/platforms/86xx/mpc86xx_hpcn.c
@@ -19,7 +19,6 @@
19#include <linux/delay.h> 19#include <linux/delay.h>
20#include <linux/seq_file.h> 20#include <linux/seq_file.h>
21#include <linux/of_platform.h> 21#include <linux/of_platform.h>
22#include <linux/memblock.h>
23 22
24#include <asm/time.h> 23#include <asm/time.h>
25#include <asm/machdep.h> 24#include <asm/machdep.h>
@@ -51,15 +50,8 @@ extern int uli_exclude_device(struct pci_controller *hose,
51static int mpc86xx_exclude_device(struct pci_controller *hose, 50static int mpc86xx_exclude_device(struct pci_controller *hose,
52 u_char bus, u_char devfn) 51 u_char bus, u_char devfn)
53{ 52{
54 struct device_node* node; 53 if (hose->dn == fsl_pci_primary)
55 struct resource rsrc;
56
57 node = hose->dn;
58 of_address_to_resource(node, 0, &rsrc);
59
60 if ((rsrc.start & 0xfffff) == 0x8000) {
61 return uli_exclude_device(hose, bus, devfn); 54 return uli_exclude_device(hose, bus, devfn);
62 }
63 55
64 return PCIBIOS_SUCCESSFUL; 56 return PCIBIOS_SUCCESSFUL;
65} 57}
@@ -69,30 +61,11 @@ static int mpc86xx_exclude_device(struct pci_controller *hose,
69static void __init 61static void __init
70mpc86xx_hpcn_setup_arch(void) 62mpc86xx_hpcn_setup_arch(void)
71{ 63{
72#ifdef CONFIG_PCI
73 struct device_node *np;
74 struct pci_controller *hose;
75#endif
76 dma_addr_t max = 0xffffffff;
77
78 if (ppc_md.progress) 64 if (ppc_md.progress)
79 ppc_md.progress("mpc86xx_hpcn_setup_arch()", 0); 65 ppc_md.progress("mpc86xx_hpcn_setup_arch()", 0);
80 66
81#ifdef CONFIG_PCI 67#ifdef CONFIG_PCI
82 for_each_compatible_node(np, "pci", "fsl,mpc8641-pcie") {
83 struct resource rsrc;
84 of_address_to_resource(np, 0, &rsrc);
85 if ((rsrc.start & 0xfffff) == 0x8000)
86 fsl_add_bridge(np, 1);
87 else
88 fsl_add_bridge(np, 0);
89 hose = pci_find_hose_for_OF_device(np);
90 max = min(max, hose->dma_window_base_cur +
91 hose->dma_window_size);
92 }
93
94 ppc_md.pci_exclude_device = mpc86xx_exclude_device; 68 ppc_md.pci_exclude_device = mpc86xx_exclude_device;
95
96#endif 69#endif
97 70
98 printk("MPC86xx HPCN board from Freescale Semiconductor\n"); 71 printk("MPC86xx HPCN board from Freescale Semiconductor\n");
@@ -101,13 +74,9 @@ mpc86xx_hpcn_setup_arch(void)
101 mpc86xx_smp_init(); 74 mpc86xx_smp_init();
102#endif 75#endif
103 76
104#ifdef CONFIG_SWIOTLB 77 fsl_pci_assign_primary();
105 if ((memblock_end_of_DRAM() - 1) > max) { 78
106 ppc_swiotlb_enable = 1; 79 swiotlb_detect_4g();
107 set_pci_dma_ops(&swiotlb_dma_ops);
108 ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_swiotlb;
109 }
110#endif
111} 80}
112 81
113 82
@@ -162,6 +131,7 @@ static __initdata struct of_device_id of_bus_ids[] = {
162 { .compatible = "simple-bus", }, 131 { .compatible = "simple-bus", },
163 { .compatible = "fsl,srio", }, 132 { .compatible = "fsl,srio", },
164 { .compatible = "gianfar", }, 133 { .compatible = "gianfar", },
134 { .compatible = "fsl,mpc8641-pcie", },
165 {}, 135 {},
166}; 136};
167 137
@@ -171,7 +141,7 @@ static int __init declare_of_platform_devices(void)
171 141
172 return 0; 142 return 0;
173} 143}
174machine_device_initcall(mpc86xx_hpcn, declare_of_platform_devices); 144machine_arch_initcall(mpc86xx_hpcn, declare_of_platform_devices);
175machine_arch_initcall(mpc86xx_hpcn, swiotlb_setup_bus_notifier); 145machine_arch_initcall(mpc86xx_hpcn, swiotlb_setup_bus_notifier);
176 146
177define_machine(mpc86xx_hpcn) { 147define_machine(mpc86xx_hpcn) {
diff --git a/arch/powerpc/platforms/86xx/sbc8641d.c b/arch/powerpc/platforms/86xx/sbc8641d.c
index e7007d0d949..b47a8fd0f3d 100644
--- a/arch/powerpc/platforms/86xx/sbc8641d.c
+++ b/arch/powerpc/platforms/86xx/sbc8641d.c
@@ -38,23 +38,16 @@
38static void __init 38static void __init
39sbc8641_setup_arch(void) 39sbc8641_setup_arch(void)
40{ 40{
41#ifdef CONFIG_PCI
42 struct device_node *np;
43#endif
44
45 if (ppc_md.progress) 41 if (ppc_md.progress)
46 ppc_md.progress("sbc8641_setup_arch()", 0); 42 ppc_md.progress("sbc8641_setup_arch()", 0);
47 43
48#ifdef CONFIG_PCI
49 for_each_compatible_node(np, "pci", "fsl,mpc8641-pcie")
50 fsl_add_bridge(np, 0);
51#endif
52
53 printk("SBC8641 board from Wind River\n"); 44 printk("SBC8641 board from Wind River\n");
54 45
55#ifdef CONFIG_SMP 46#ifdef CONFIG_SMP
56 mpc86xx_smp_init(); 47 mpc86xx_smp_init();
57#endif 48#endif
49
50 fsl_pci_assign_primary();
58} 51}
59 52
60 53
@@ -102,6 +95,7 @@ mpc86xx_time_init(void)
102static __initdata struct of_device_id of_bus_ids[] = { 95static __initdata struct of_device_id of_bus_ids[] = {
103 { .compatible = "simple-bus", }, 96 { .compatible = "simple-bus", },
104 { .compatible = "gianfar", }, 97 { .compatible = "gianfar", },
98 { .compatible = "fsl,mpc8641-pcie", },
105 {}, 99 {},
106}; 100};
107 101
@@ -111,7 +105,7 @@ static int __init declare_of_platform_devices(void)
111 105
112 return 0; 106 return 0;
113} 107}
114machine_device_initcall(sbc8641, declare_of_platform_devices); 108machine_arch_initcall(sbc8641, declare_of_platform_devices);
115 109
116define_machine(sbc8641) { 110define_machine(sbc8641) {
117 .name = "SBC8641D", 111 .name = "SBC8641D",
diff --git a/arch/powerpc/platforms/cell/beat.c b/arch/powerpc/platforms/cell/beat.c
index 852592b2b71..affcf566d46 100644
--- a/arch/powerpc/platforms/cell/beat.c
+++ b/arch/powerpc/platforms/cell/beat.c
@@ -136,9 +136,9 @@ ssize_t beat_nvram_get_size(void)
136 return BEAT_NVRAM_SIZE; 136 return BEAT_NVRAM_SIZE;
137} 137}
138 138
139int beat_set_xdabr(unsigned long dabr) 139int beat_set_xdabr(unsigned long dabr, unsigned long dabrx)
140{ 140{
141 if (beat_set_dabr(dabr, DABRX_KERNEL | DABRX_USER)) 141 if (beat_set_dabr(dabr, dabrx))
142 return -1; 142 return -1;
143 return 0; 143 return 0;
144} 144}
diff --git a/arch/powerpc/platforms/cell/beat.h b/arch/powerpc/platforms/cell/beat.h
index 32c8efcedc8..bfcb8e351ae 100644
--- a/arch/powerpc/platforms/cell/beat.h
+++ b/arch/powerpc/platforms/cell/beat.h
@@ -32,7 +32,7 @@ void beat_get_rtc_time(struct rtc_time *);
32ssize_t beat_nvram_get_size(void); 32ssize_t beat_nvram_get_size(void);
33ssize_t beat_nvram_read(char *, size_t, loff_t *); 33ssize_t beat_nvram_read(char *, size_t, loff_t *);
34ssize_t beat_nvram_write(char *, size_t, loff_t *); 34ssize_t beat_nvram_write(char *, size_t, loff_t *);
35int beat_set_xdabr(unsigned long); 35int beat_set_xdabr(unsigned long, unsigned long);
36void beat_power_save(void); 36void beat_power_save(void);
37void beat_kexec_cpu_down(int, int); 37void beat_kexec_cpu_down(int, int);
38 38
diff --git a/arch/powerpc/platforms/cell/beat_htab.c b/arch/powerpc/platforms/cell/beat_htab.c
index 943c9d39aa1..0f6f83988b3 100644
--- a/arch/powerpc/platforms/cell/beat_htab.c
+++ b/arch/powerpc/platforms/cell/beat_htab.c
@@ -88,7 +88,7 @@ static inline unsigned int beat_read_mask(unsigned hpte_group)
88} 88}
89 89
90static long beat_lpar_hpte_insert(unsigned long hpte_group, 90static long beat_lpar_hpte_insert(unsigned long hpte_group,
91 unsigned long va, unsigned long pa, 91 unsigned long vpn, unsigned long pa,
92 unsigned long rflags, unsigned long vflags, 92 unsigned long rflags, unsigned long vflags,
93 int psize, int ssize) 93 int psize, int ssize)
94{ 94{
@@ -103,7 +103,7 @@ static long beat_lpar_hpte_insert(unsigned long hpte_group,
103 "rflags=%lx, vflags=%lx, psize=%d)\n", 103 "rflags=%lx, vflags=%lx, psize=%d)\n",
104 hpte_group, va, pa, rflags, vflags, psize); 104 hpte_group, va, pa, rflags, vflags, psize);
105 105
106 hpte_v = hpte_encode_v(va, psize, MMU_SEGSIZE_256M) | 106 hpte_v = hpte_encode_v(vpn, psize, MMU_SEGSIZE_256M) |
107 vflags | HPTE_V_VALID; 107 vflags | HPTE_V_VALID;
108 hpte_r = hpte_encode_r(pa, psize) | rflags; 108 hpte_r = hpte_encode_r(pa, psize) | rflags;
109 109
@@ -184,14 +184,14 @@ static void beat_lpar_hptab_clear(void)
184 */ 184 */
185static long beat_lpar_hpte_updatepp(unsigned long slot, 185static long beat_lpar_hpte_updatepp(unsigned long slot,
186 unsigned long newpp, 186 unsigned long newpp,
187 unsigned long va, 187 unsigned long vpn,
188 int psize, int ssize, int local) 188 int psize, int ssize, int local)
189{ 189{
190 unsigned long lpar_rc; 190 unsigned long lpar_rc;
191 u64 dummy0, dummy1; 191 u64 dummy0, dummy1;
192 unsigned long want_v; 192 unsigned long want_v;
193 193
194 want_v = hpte_encode_v(va, psize, MMU_SEGSIZE_256M); 194 want_v = hpte_encode_v(vpn, psize, MMU_SEGSIZE_256M);
195 195
196 DBG_LOW(" update: " 196 DBG_LOW(" update: "
197 "avpnv=%016lx, slot=%016lx, psize: %d, newpp %016lx ... ", 197 "avpnv=%016lx, slot=%016lx, psize: %d, newpp %016lx ... ",
@@ -220,15 +220,15 @@ static long beat_lpar_hpte_updatepp(unsigned long slot,
220 return 0; 220 return 0;
221} 221}
222 222
223static long beat_lpar_hpte_find(unsigned long va, int psize) 223static long beat_lpar_hpte_find(unsigned long vpn, int psize)
224{ 224{
225 unsigned long hash; 225 unsigned long hash;
226 unsigned long i, j; 226 unsigned long i, j;
227 long slot; 227 long slot;
228 unsigned long want_v, hpte_v; 228 unsigned long want_v, hpte_v;
229 229
230 hash = hpt_hash(va, mmu_psize_defs[psize].shift, MMU_SEGSIZE_256M); 230 hash = hpt_hash(vpn, mmu_psize_defs[psize].shift, MMU_SEGSIZE_256M);
231 want_v = hpte_encode_v(va, psize, MMU_SEGSIZE_256M); 231 want_v = hpte_encode_v(vpn, psize, MMU_SEGSIZE_256M);
232 232
233 for (j = 0; j < 2; j++) { 233 for (j = 0; j < 2; j++) {
234 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; 234 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
@@ -255,14 +255,15 @@ static void beat_lpar_hpte_updateboltedpp(unsigned long newpp,
255 unsigned long ea, 255 unsigned long ea,
256 int psize, int ssize) 256 int psize, int ssize)
257{ 257{
258 unsigned long lpar_rc, slot, vsid, va; 258 unsigned long vpn;
259 unsigned long lpar_rc, slot, vsid;
259 u64 dummy0, dummy1; 260 u64 dummy0, dummy1;
260 261
261 vsid = get_kernel_vsid(ea, MMU_SEGSIZE_256M); 262 vsid = get_kernel_vsid(ea, MMU_SEGSIZE_256M);
262 va = (vsid << 28) | (ea & 0x0fffffff); 263 vpn = hpt_vpn(ea, vsid, MMU_SEGSIZE_256M);
263 264
264 raw_spin_lock(&beat_htab_lock); 265 raw_spin_lock(&beat_htab_lock);
265 slot = beat_lpar_hpte_find(va, psize); 266 slot = beat_lpar_hpte_find(vpn, psize);
266 BUG_ON(slot == -1); 267 BUG_ON(slot == -1);
267 268
268 lpar_rc = beat_write_htab_entry(0, slot, 0, newpp, 0, 7, 269 lpar_rc = beat_write_htab_entry(0, slot, 0, newpp, 0, 7,
@@ -272,7 +273,7 @@ static void beat_lpar_hpte_updateboltedpp(unsigned long newpp,
272 BUG_ON(lpar_rc != 0); 273 BUG_ON(lpar_rc != 0);
273} 274}
274 275
275static void beat_lpar_hpte_invalidate(unsigned long slot, unsigned long va, 276static void beat_lpar_hpte_invalidate(unsigned long slot, unsigned long vpn,
276 int psize, int ssize, int local) 277 int psize, int ssize, int local)
277{ 278{
278 unsigned long want_v; 279 unsigned long want_v;
@@ -282,7 +283,7 @@ static void beat_lpar_hpte_invalidate(unsigned long slot, unsigned long va,
282 283
283 DBG_LOW(" inval : slot=%lx, va=%016lx, psize: %d, local: %d\n", 284 DBG_LOW(" inval : slot=%lx, va=%016lx, psize: %d, local: %d\n",
284 slot, va, psize, local); 285 slot, va, psize, local);
285 want_v = hpte_encode_v(va, psize, MMU_SEGSIZE_256M); 286 want_v = hpte_encode_v(vpn, psize, MMU_SEGSIZE_256M);
286 287
287 raw_spin_lock_irqsave(&beat_htab_lock, flags); 288 raw_spin_lock_irqsave(&beat_htab_lock, flags);
288 dummy1 = beat_lpar_hpte_getword0(slot); 289 dummy1 = beat_lpar_hpte_getword0(slot);
@@ -311,7 +312,7 @@ void __init hpte_init_beat(void)
311} 312}
312 313
313static long beat_lpar_hpte_insert_v3(unsigned long hpte_group, 314static long beat_lpar_hpte_insert_v3(unsigned long hpte_group,
314 unsigned long va, unsigned long pa, 315 unsigned long vpn, unsigned long pa,
315 unsigned long rflags, unsigned long vflags, 316 unsigned long rflags, unsigned long vflags,
316 int psize, int ssize) 317 int psize, int ssize)
317{ 318{
@@ -322,11 +323,11 @@ static long beat_lpar_hpte_insert_v3(unsigned long hpte_group,
322 return -1; 323 return -1;
323 324
324 if (!(vflags & HPTE_V_BOLTED)) 325 if (!(vflags & HPTE_V_BOLTED))
325 DBG_LOW("hpte_insert(group=%lx, va=%016lx, pa=%016lx, " 326 DBG_LOW("hpte_insert(group=%lx, vpn=%016lx, pa=%016lx, "
326 "rflags=%lx, vflags=%lx, psize=%d)\n", 327 "rflags=%lx, vflags=%lx, psize=%d)\n",
327 hpte_group, va, pa, rflags, vflags, psize); 328 hpte_group, vpn, pa, rflags, vflags, psize);
328 329
329 hpte_v = hpte_encode_v(va, psize, MMU_SEGSIZE_256M) | 330 hpte_v = hpte_encode_v(vpn, psize, MMU_SEGSIZE_256M) |
330 vflags | HPTE_V_VALID; 331 vflags | HPTE_V_VALID;
331 hpte_r = hpte_encode_r(pa, psize) | rflags; 332 hpte_r = hpte_encode_r(pa, psize) | rflags;
332 333
@@ -364,14 +365,14 @@ static long beat_lpar_hpte_insert_v3(unsigned long hpte_group,
364 */ 365 */
365static long beat_lpar_hpte_updatepp_v3(unsigned long slot, 366static long beat_lpar_hpte_updatepp_v3(unsigned long slot,
366 unsigned long newpp, 367 unsigned long newpp,
367 unsigned long va, 368 unsigned long vpn,
368 int psize, int ssize, int local) 369 int psize, int ssize, int local)
369{ 370{
370 unsigned long lpar_rc; 371 unsigned long lpar_rc;
371 unsigned long want_v; 372 unsigned long want_v;
372 unsigned long pss; 373 unsigned long pss;
373 374
374 want_v = hpte_encode_v(va, psize, MMU_SEGSIZE_256M); 375 want_v = hpte_encode_v(vpn, psize, MMU_SEGSIZE_256M);
375 pss = (psize == MMU_PAGE_4K) ? -1UL : mmu_psize_defs[psize].penc; 376 pss = (psize == MMU_PAGE_4K) ? -1UL : mmu_psize_defs[psize].penc;
376 377
377 DBG_LOW(" update: " 378 DBG_LOW(" update: "
@@ -392,16 +393,16 @@ static long beat_lpar_hpte_updatepp_v3(unsigned long slot,
392 return 0; 393 return 0;
393} 394}
394 395
395static void beat_lpar_hpte_invalidate_v3(unsigned long slot, unsigned long va, 396static void beat_lpar_hpte_invalidate_v3(unsigned long slot, unsigned long vpn,
396 int psize, int ssize, int local) 397 int psize, int ssize, int local)
397{ 398{
398 unsigned long want_v; 399 unsigned long want_v;
399 unsigned long lpar_rc; 400 unsigned long lpar_rc;
400 unsigned long pss; 401 unsigned long pss;
401 402
402 DBG_LOW(" inval : slot=%lx, va=%016lx, psize: %d, local: %d\n", 403 DBG_LOW(" inval : slot=%lx, vpn=%016lx, psize: %d, local: %d\n",
403 slot, va, psize, local); 404 slot, vpn, psize, local);
404 want_v = hpte_encode_v(va, psize, MMU_SEGSIZE_256M); 405 want_v = hpte_encode_v(vpn, psize, MMU_SEGSIZE_256M);
405 pss = (psize == MMU_PAGE_4K) ? -1UL : mmu_psize_defs[psize].penc; 406 pss = (psize == MMU_PAGE_4K) ? -1UL : mmu_psize_defs[psize].penc;
406 407
407 lpar_rc = beat_invalidate_htab_entry3(0, slot, want_v, pss); 408 lpar_rc = beat_invalidate_htab_entry3(0, slot, want_v, pss);
diff --git a/arch/powerpc/platforms/pasemi/iommu.c b/arch/powerpc/platforms/pasemi/iommu.c
index 14943ef0191..7d2d036754b 100644
--- a/arch/powerpc/platforms/pasemi/iommu.c
+++ b/arch/powerpc/platforms/pasemi/iommu.c
@@ -19,12 +19,12 @@
19 19
20#undef DEBUG 20#undef DEBUG
21 21
22#include <linux/memblock.h>
22#include <linux/types.h> 23#include <linux/types.h>
23#include <linux/spinlock.h> 24#include <linux/spinlock.h>
24#include <linux/pci.h> 25#include <linux/pci.h>
25#include <asm/iommu.h> 26#include <asm/iommu.h>
26#include <asm/machdep.h> 27#include <asm/machdep.h>
27#include <asm/abs_addr.h>
28#include <asm/firmware.h> 28#include <asm/firmware.h>
29 29
30#define IOBMAP_PAGE_SHIFT 12 30#define IOBMAP_PAGE_SHIFT 12
@@ -99,7 +99,7 @@ static int iobmap_build(struct iommu_table *tbl, long index,
99 ip = ((u32 *)tbl->it_base) + index; 99 ip = ((u32 *)tbl->it_base) + index;
100 100
101 while (npages--) { 101 while (npages--) {
102 rpn = virt_to_abs(uaddr) >> IOBMAP_PAGE_SHIFT; 102 rpn = __pa(uaddr) >> IOBMAP_PAGE_SHIFT;
103 103
104 *(ip++) = IOBMAP_L2E_V | rpn; 104 *(ip++) = IOBMAP_L2E_V | rpn;
105 /* invalidate tlb, can be optimized more */ 105 /* invalidate tlb, can be optimized more */
@@ -258,7 +258,7 @@ void __init alloc_iobmap_l2(void)
258 return; 258 return;
259#endif 259#endif
260 /* For 2G space, 8x64 pages (2^21 bytes) is max total l2 size */ 260 /* For 2G space, 8x64 pages (2^21 bytes) is max total l2 size */
261 iob_l2_base = (u32 *)abs_to_virt(memblock_alloc_base(1UL<<21, 1UL<<21, 0x80000000)); 261 iob_l2_base = (u32 *)__va(memblock_alloc_base(1UL<<21, 1UL<<21, 0x80000000));
262 262
263 printk(KERN_INFO "IOBMAP L2 allocated at: %p\n", iob_l2_base); 263 printk(KERN_INFO "IOBMAP L2 allocated at: %p\n", iob_l2_base);
264} 264}
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
index 0e7eccc0f88..471aa3ccd9f 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda.c
@@ -30,19 +30,10 @@
30#include <asm/opal.h> 30#include <asm/opal.h>
31#include <asm/iommu.h> 31#include <asm/iommu.h>
32#include <asm/tce.h> 32#include <asm/tce.h>
33#include <asm/abs_addr.h>
34 33
35#include "powernv.h" 34#include "powernv.h"
36#include "pci.h" 35#include "pci.h"
37 36
38struct resource_wrap {
39 struct list_head link;
40 resource_size_t size;
41 resource_size_t align;
42 struct pci_dev *dev; /* Set if it's a device */
43 struct pci_bus *bus; /* Set if it's a bridge */
44};
45
46static int __pe_printk(const char *level, const struct pnv_ioda_pe *pe, 37static int __pe_printk(const char *level, const struct pnv_ioda_pe *pe,
47 struct va_format *vaf) 38 struct va_format *vaf)
48{ 39{
@@ -78,273 +69,6 @@ define_pe_printk_level(pe_err, KERN_ERR);
78define_pe_printk_level(pe_warn, KERN_WARNING); 69define_pe_printk_level(pe_warn, KERN_WARNING);
79define_pe_printk_level(pe_info, KERN_INFO); 70define_pe_printk_level(pe_info, KERN_INFO);
80 71
81
82/* Calculate resource usage & alignment requirement of a single
83 * device. This will also assign all resources within the device
84 * for a given type starting at 0 for the biggest one and then
85 * assigning in decreasing order of size.
86 */
87static void __devinit pnv_ioda_calc_dev(struct pci_dev *dev, unsigned int flags,
88 resource_size_t *size,
89 resource_size_t *align)
90{
91 resource_size_t start;
92 struct resource *r;
93 int i;
94
95 pr_devel(" -> CDR %s\n", pci_name(dev));
96
97 *size = *align = 0;
98
99 /* Clear the resources out and mark them all unset */
100 for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
101 r = &dev->resource[i];
102 if (!(r->flags & flags))
103 continue;
104 if (r->start) {
105 r->end -= r->start;
106 r->start = 0;
107 }
108 r->flags |= IORESOURCE_UNSET;
109 }
110
111 /* We currently keep all memory resources together, we
112 * will handle prefetch & 64-bit separately in the future
113 * but for now we stick everybody in M32
114 */
115 start = 0;
116 for (;;) {
117 resource_size_t max_size = 0;
118 int max_no = -1;
119
120 /* Find next biggest resource */
121 for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
122 r = &dev->resource[i];
123 if (!(r->flags & IORESOURCE_UNSET) ||
124 !(r->flags & flags))
125 continue;
126 if (resource_size(r) > max_size) {
127 max_size = resource_size(r);
128 max_no = i;
129 }
130 }
131 if (max_no < 0)
132 break;
133 r = &dev->resource[max_no];
134 if (max_size > *align)
135 *align = max_size;
136 *size += max_size;
137 r->start = start;
138 start += max_size;
139 r->end = r->start + max_size - 1;
140 r->flags &= ~IORESOURCE_UNSET;
141 pr_devel(" -> R%d %016llx..%016llx\n",
142 max_no, r->start, r->end);
143 }
144 pr_devel(" <- CDR %s size=%llx align=%llx\n",
145 pci_name(dev), *size, *align);
146}
147
148/* Allocate a resource "wrap" for a given device or bridge and
149 * insert it at the right position in the sorted list
150 */
151static void __devinit pnv_ioda_add_wrap(struct list_head *list,
152 struct pci_bus *bus,
153 struct pci_dev *dev,
154 resource_size_t size,
155 resource_size_t align)
156{
157 struct resource_wrap *w1, *w = kzalloc(sizeof(*w), GFP_KERNEL);
158
159 w->size = size;
160 w->align = align;
161 w->dev = dev;
162 w->bus = bus;
163
164 list_for_each_entry(w1, list, link) {
165 if (w1->align < align) {
166 list_add_tail(&w->link, &w1->link);
167 return;
168 }
169 }
170 list_add_tail(&w->link, list);
171}
172
173/* Offset device resources of a given type */
174static void __devinit pnv_ioda_offset_dev(struct pci_dev *dev,
175 unsigned int flags,
176 resource_size_t offset)
177{
178 struct resource *r;
179 int i;
180
181 pr_devel(" -> ODR %s [%x] +%016llx\n", pci_name(dev), flags, offset);
182
183 for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
184 r = &dev->resource[i];
185 if (r->flags & flags) {
186 dev->resource[i].start += offset;
187 dev->resource[i].end += offset;
188 }
189 }
190
191 pr_devel(" <- ODR %s [%x] +%016llx\n", pci_name(dev), flags, offset);
192}
193
194/* Offset bus resources (& all children) of a given type */
195static void __devinit pnv_ioda_offset_bus(struct pci_bus *bus,
196 unsigned int flags,
197 resource_size_t offset)
198{
199 struct resource *r;
200 struct pci_dev *dev;
201 struct pci_bus *cbus;
202 int i;
203
204 pr_devel(" -> OBR %s [%x] +%016llx\n",
205 bus->self ? pci_name(bus->self) : "root", flags, offset);
206
207 pci_bus_for_each_resource(bus, r, i) {
208 if (r && (r->flags & flags)) {
209 r->start += offset;
210 r->end += offset;
211 }
212 }
213 list_for_each_entry(dev, &bus->devices, bus_list)
214 pnv_ioda_offset_dev(dev, flags, offset);
215 list_for_each_entry(cbus, &bus->children, node)
216 pnv_ioda_offset_bus(cbus, flags, offset);
217
218 pr_devel(" <- OBR %s [%x]\n",
219 bus->self ? pci_name(bus->self) : "root", flags);
220}
221
222/* This is the guts of our IODA resource allocation. This is called
223 * recursively for each bus in the system. It calculates all the
224 * necessary size and requirements for children and assign them
225 * resources such that:
226 *
227 * - Each function fits in it's own contiguous set of IO/M32
228 * segment
229 *
230 * - All segments behind a P2P bridge are contiguous and obey
231 * alignment constraints of those bridges
232 */
233static void __devinit pnv_ioda_calc_bus(struct pci_bus *bus, unsigned int flags,
234 resource_size_t *size,
235 resource_size_t *align)
236{
237 struct pci_controller *hose = pci_bus_to_host(bus);
238 struct pnv_phb *phb = hose->private_data;
239 resource_size_t dev_size, dev_align, start;
240 resource_size_t min_align, min_balign;
241 struct pci_dev *cdev;
242 struct pci_bus *cbus;
243 struct list_head head;
244 struct resource_wrap *w;
245 unsigned int bres;
246
247 *size = *align = 0;
248
249 pr_devel("-> CBR %s [%x]\n",
250 bus->self ? pci_name(bus->self) : "root", flags);
251
252 /* Calculate alignment requirements based on the type
253 * of resource we are working on
254 */
255 if (flags & IORESOURCE_IO) {
256 bres = 0;
257 min_align = phb->ioda.io_segsize;
258 min_balign = 0x1000;
259 } else {
260 bres = 1;
261 min_align = phb->ioda.m32_segsize;
262 min_balign = 0x100000;
263 }
264
265 /* Gather all our children resources ordered by alignment */
266 INIT_LIST_HEAD(&head);
267
268 /* - Busses */
269 list_for_each_entry(cbus, &bus->children, node) {
270 pnv_ioda_calc_bus(cbus, flags, &dev_size, &dev_align);
271 pnv_ioda_add_wrap(&head, cbus, NULL, dev_size, dev_align);
272 }
273
274 /* - Devices */
275 list_for_each_entry(cdev, &bus->devices, bus_list) {
276 pnv_ioda_calc_dev(cdev, flags, &dev_size, &dev_align);
277 /* Align them to segment size */
278 if (dev_align < min_align)
279 dev_align = min_align;
280 pnv_ioda_add_wrap(&head, NULL, cdev, dev_size, dev_align);
281 }
282 if (list_empty(&head))
283 goto empty;
284
285 /* Now we can do two things: assign offsets to them within that
286 * level and get our total alignment & size requirements. The
287 * assignment algorithm is going to be uber-trivial for now, we
288 * can try to be smarter later at filling out holes.
289 */
290 if (bus->self) {
291 /* No offset for downstream bridges */
292 start = 0;
293 } else {
294 /* Offset from the root */
295 if (flags & IORESOURCE_IO)
296 /* Don't hand out IO 0 */
297 start = hose->io_resource.start + 0x1000;
298 else
299 start = hose->mem_resources[0].start;
300 }
301 while(!list_empty(&head)) {
302 w = list_first_entry(&head, struct resource_wrap, link);
303 list_del(&w->link);
304 if (w->size) {
305 if (start) {
306 start = ALIGN(start, w->align);
307 if (w->dev)
308 pnv_ioda_offset_dev(w->dev,flags,start);
309 else if (w->bus)
310 pnv_ioda_offset_bus(w->bus,flags,start);
311 }
312 if (w->align > *align)
313 *align = w->align;
314 }
315 start += w->size;
316 kfree(w);
317 }
318 *size = start;
319
320 /* Align and setup bridge resources */
321 *align = max_t(resource_size_t, *align,
322 max_t(resource_size_t, min_align, min_balign));
323 *size = ALIGN(*size,
324 max_t(resource_size_t, min_align, min_balign));
325 empty:
326 /* Only setup P2P's, not the PHB itself */
327 if (bus->self) {
328 struct resource *res = bus->resource[bres];
329
330 if (WARN_ON(res == NULL))
331 return;
332
333 /*
334 * FIXME: We should probably export and call
335 * pci_bridge_check_ranges() to properly re-initialize
336 * the PCI portion of the flags here, and to detect
337 * what the bridge actually supports.
338 */
339 res->start = 0;
340 res->flags = (*size) ? flags : 0;
341 res->end = (*size) ? (*size - 1) : 0;
342 }
343
344 pr_devel("<- CBR %s [%x] *size=%016llx *align=%016llx\n",
345 bus->self ? pci_name(bus->self) : "root", flags,*size,*align);
346}
347
348static struct pci_dn *pnv_ioda_get_pdn(struct pci_dev *dev) 72static struct pci_dn *pnv_ioda_get_pdn(struct pci_dev *dev)
349{ 73{
350 struct device_node *np; 74 struct device_node *np;
@@ -355,172 +79,6 @@ static struct pci_dn *pnv_ioda_get_pdn(struct pci_dev *dev)
355 return PCI_DN(np); 79 return PCI_DN(np);
356} 80}
357 81
358static void __devinit pnv_ioda_setup_pe_segments(struct pci_dev *dev)
359{
360 struct pci_controller *hose = pci_bus_to_host(dev->bus);
361 struct pnv_phb *phb = hose->private_data;
362 struct pci_dn *pdn = pnv_ioda_get_pdn(dev);
363 unsigned int pe, i;
364 resource_size_t pos;
365 struct resource io_res;
366 struct resource m32_res;
367 struct pci_bus_region region;
368 int rc;
369
370 /* Anything not referenced in the device-tree gets PE#0 */
371 pe = pdn ? pdn->pe_number : 0;
372
373 /* Calculate the device min/max */
374 io_res.start = m32_res.start = (resource_size_t)-1;
375 io_res.end = m32_res.end = 0;
376 io_res.flags = IORESOURCE_IO;
377 m32_res.flags = IORESOURCE_MEM;
378
379 for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
380 struct resource *r = NULL;
381 if (dev->resource[i].flags & IORESOURCE_IO)
382 r = &io_res;
383 if (dev->resource[i].flags & IORESOURCE_MEM)
384 r = &m32_res;
385 if (!r)
386 continue;
387 if (dev->resource[i].start < r->start)
388 r->start = dev->resource[i].start;
389 if (dev->resource[i].end > r->end)
390 r->end = dev->resource[i].end;
391 }
392
393 /* Setup IO segments */
394 if (io_res.start < io_res.end) {
395 pcibios_resource_to_bus(dev, &region, &io_res);
396 pos = region.start;
397 i = pos / phb->ioda.io_segsize;
398 while(i < phb->ioda.total_pe && pos <= region.end) {
399 if (phb->ioda.io_segmap[i]) {
400 pr_err("%s: Trying to use IO seg #%d which is"
401 " already used by PE# %d\n",
402 pci_name(dev), i,
403 phb->ioda.io_segmap[i]);
404 /* XXX DO SOMETHING TO DISABLE DEVICE ? */
405 break;
406 }
407 phb->ioda.io_segmap[i] = pe;
408 rc = opal_pci_map_pe_mmio_window(phb->opal_id, pe,
409 OPAL_IO_WINDOW_TYPE,
410 0, i);
411 if (rc != OPAL_SUCCESS) {
412 pr_err("%s: OPAL error %d setting up mapping"
413 " for IO seg# %d\n",
414 pci_name(dev), rc, i);
415 /* XXX DO SOMETHING TO DISABLE DEVICE ? */
416 break;
417 }
418 pos += phb->ioda.io_segsize;
419 i++;
420 };
421 }
422
423 /* Setup M32 segments */
424 if (m32_res.start < m32_res.end) {
425 pcibios_resource_to_bus(dev, &region, &m32_res);
426 pos = region.start;
427 i = pos / phb->ioda.m32_segsize;
428 while(i < phb->ioda.total_pe && pos <= region.end) {
429 if (phb->ioda.m32_segmap[i]) {
430 pr_err("%s: Trying to use M32 seg #%d which is"
431 " already used by PE# %d\n",
432 pci_name(dev), i,
433 phb->ioda.m32_segmap[i]);
434 /* XXX DO SOMETHING TO DISABLE DEVICE ? */
435 break;
436 }
437 phb->ioda.m32_segmap[i] = pe;
438 rc = opal_pci_map_pe_mmio_window(phb->opal_id, pe,
439 OPAL_M32_WINDOW_TYPE,
440 0, i);
441 if (rc != OPAL_SUCCESS) {
442 pr_err("%s: OPAL error %d setting up mapping"
443 " for M32 seg# %d\n",
444 pci_name(dev), rc, i);
445 /* XXX DO SOMETHING TO DISABLE DEVICE ? */
446 break;
447 }
448 pos += phb->ioda.m32_segsize;
449 i++;
450 }
451 }
452}
453
454/* Check if a resource still fits in the total IO or M32 range
455 * for a given PHB
456 */
457static int __devinit pnv_ioda_resource_fit(struct pci_controller *hose,
458 struct resource *r)
459{
460 struct resource *bounds;
461
462 if (r->flags & IORESOURCE_IO)
463 bounds = &hose->io_resource;
464 else if (r->flags & IORESOURCE_MEM)
465 bounds = &hose->mem_resources[0];
466 else
467 return 1;
468
469 if (r->start >= bounds->start && r->end <= bounds->end)
470 return 1;
471 r->flags = 0;
472 return 0;
473}
474
475static void __devinit pnv_ioda_update_resources(struct pci_bus *bus)
476{
477 struct pci_controller *hose = pci_bus_to_host(bus);
478 struct pci_bus *cbus;
479 struct pci_dev *cdev;
480 unsigned int i;
481
482 /* We used to clear all device enables here. However it looks like
483 * clearing MEM enable causes Obsidian (IPR SCS) to go bonkers,
484 * and shoot fatal errors to the PHB which in turns fences itself
485 * and we can't recover from that ... yet. So for now, let's leave
486 * the enables as-is and hope for the best.
487 */
488
489 /* Check if bus resources fit in our IO or M32 range */
490 for (i = 0; bus->self && (i < 2); i++) {
491 struct resource *r = bus->resource[i];
492 if (r && !pnv_ioda_resource_fit(hose, r))
493 pr_err("%s: Bus %d resource %d disabled, no room\n",
494 pci_name(bus->self), bus->number, i);
495 }
496
497 /* Update self if it's not a PHB */
498 if (bus->self)
499 pci_setup_bridge(bus);
500
501 /* Update child devices */
502 list_for_each_entry(cdev, &bus->devices, bus_list) {
503 /* Check if resource fits, if not, disabled it */
504 for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
505 struct resource *r = &cdev->resource[i];
506 if (!pnv_ioda_resource_fit(hose, r))
507 pr_err("%s: Resource %d disabled, no room\n",
508 pci_name(cdev), i);
509 }
510
511 /* Assign segments */
512 pnv_ioda_setup_pe_segments(cdev);
513
514 /* Update HW BARs */
515 for (i = 0; i <= PCI_ROM_RESOURCE; i++)
516 pci_update_resource(cdev, i);
517 }
518
519 /* Update child busses */
520 list_for_each_entry(cbus, &bus->children, node)
521 pnv_ioda_update_resources(cbus);
522}
523
524static int __devinit pnv_ioda_alloc_pe(struct pnv_phb *phb) 82static int __devinit pnv_ioda_alloc_pe(struct pnv_phb *phb)
525{ 83{
526 unsigned long pe; 84 unsigned long pe;
@@ -548,7 +106,7 @@ static void __devinit pnv_ioda_free_pe(struct pnv_phb *phb, int pe)
548 * but in the meantime, we need to protect them to avoid warnings 106 * but in the meantime, we need to protect them to avoid warnings
549 */ 107 */
550#ifdef CONFIG_PCI_MSI 108#ifdef CONFIG_PCI_MSI
551static struct pnv_ioda_pe * __devinit __pnv_ioda_get_one_pe(struct pci_dev *dev) 109static struct pnv_ioda_pe * __devinit pnv_ioda_get_pe(struct pci_dev *dev)
552{ 110{
553 struct pci_controller *hose = pci_bus_to_host(dev->bus); 111 struct pci_controller *hose = pci_bus_to_host(dev->bus);
554 struct pnv_phb *phb = hose->private_data; 112 struct pnv_phb *phb = hose->private_data;
@@ -560,19 +118,6 @@ static struct pnv_ioda_pe * __devinit __pnv_ioda_get_one_pe(struct pci_dev *dev)
560 return NULL; 118 return NULL;
561 return &phb->ioda.pe_array[pdn->pe_number]; 119 return &phb->ioda.pe_array[pdn->pe_number];
562} 120}
563
564static struct pnv_ioda_pe * __devinit pnv_ioda_get_pe(struct pci_dev *dev)
565{
566 struct pnv_ioda_pe *pe = __pnv_ioda_get_one_pe(dev);
567
568 while (!pe && dev->bus->self) {
569 dev = dev->bus->self;
570 pe = __pnv_ioda_get_one_pe(dev);
571 if (pe)
572 pe = pe->bus_pe;
573 }
574 return pe;
575}
576#endif /* CONFIG_PCI_MSI */ 121#endif /* CONFIG_PCI_MSI */
577 122
578static int __devinit pnv_ioda_configure_pe(struct pnv_phb *phb, 123static int __devinit pnv_ioda_configure_pe(struct pnv_phb *phb,
@@ -589,7 +134,11 @@ static int __devinit pnv_ioda_configure_pe(struct pnv_phb *phb,
589 dcomp = OPAL_IGNORE_RID_DEVICE_NUMBER; 134 dcomp = OPAL_IGNORE_RID_DEVICE_NUMBER;
590 fcomp = OPAL_IGNORE_RID_FUNCTION_NUMBER; 135 fcomp = OPAL_IGNORE_RID_FUNCTION_NUMBER;
591 parent = pe->pbus->self; 136 parent = pe->pbus->self;
592 count = pe->pbus->busn_res.end - pe->pbus->busn_res.start + 1; 137 if (pe->flags & PNV_IODA_PE_BUS_ALL)
138 count = pe->pbus->busn_res.end - pe->pbus->busn_res.start + 1;
139 else
140 count = 1;
141
593 switch(count) { 142 switch(count) {
594 case 1: bcomp = OpalPciBusAll; break; 143 case 1: bcomp = OpalPciBusAll; break;
595 case 2: bcomp = OpalPciBus7Bits; break; 144 case 2: bcomp = OpalPciBus7Bits; break;
@@ -666,13 +215,13 @@ static void __devinit pnv_ioda_link_pe_by_weight(struct pnv_phb *phb,
666{ 215{
667 struct pnv_ioda_pe *lpe; 216 struct pnv_ioda_pe *lpe;
668 217
669 list_for_each_entry(lpe, &phb->ioda.pe_list, link) { 218 list_for_each_entry(lpe, &phb->ioda.pe_dma_list, dma_link) {
670 if (lpe->dma_weight < pe->dma_weight) { 219 if (lpe->dma_weight < pe->dma_weight) {
671 list_add_tail(&pe->link, &lpe->link); 220 list_add_tail(&pe->dma_link, &lpe->dma_link);
672 return; 221 return;
673 } 222 }
674 } 223 }
675 list_add_tail(&pe->link, &phb->ioda.pe_list); 224 list_add_tail(&pe->dma_link, &phb->ioda.pe_dma_list);
676} 225}
677 226
678static unsigned int pnv_ioda_dma_weight(struct pci_dev *dev) 227static unsigned int pnv_ioda_dma_weight(struct pci_dev *dev)
@@ -699,6 +248,7 @@ static unsigned int pnv_ioda_dma_weight(struct pci_dev *dev)
699 return 10; 248 return 10;
700} 249}
701 250
251#if 0
702static struct pnv_ioda_pe * __devinit pnv_ioda_setup_dev_PE(struct pci_dev *dev) 252static struct pnv_ioda_pe * __devinit pnv_ioda_setup_dev_PE(struct pci_dev *dev)
703{ 253{
704 struct pci_controller *hose = pci_bus_to_host(dev->bus); 254 struct pci_controller *hose = pci_bus_to_host(dev->bus);
@@ -767,6 +317,7 @@ static struct pnv_ioda_pe * __devinit pnv_ioda_setup_dev_PE(struct pci_dev *dev)
767 317
768 return pe; 318 return pe;
769} 319}
320#endif /* Useful for SRIOV case */
770 321
771static void pnv_ioda_setup_same_PE(struct pci_bus *bus, struct pnv_ioda_pe *pe) 322static void pnv_ioda_setup_same_PE(struct pci_bus *bus, struct pnv_ioda_pe *pe)
772{ 323{
@@ -784,34 +335,33 @@ static void pnv_ioda_setup_same_PE(struct pci_bus *bus, struct pnv_ioda_pe *pe)
784 pdn->pcidev = dev; 335 pdn->pcidev = dev;
785 pdn->pe_number = pe->pe_number; 336 pdn->pe_number = pe->pe_number;
786 pe->dma_weight += pnv_ioda_dma_weight(dev); 337 pe->dma_weight += pnv_ioda_dma_weight(dev);
787 if (dev->subordinate) 338 if ((pe->flags & PNV_IODA_PE_BUS_ALL) && dev->subordinate)
788 pnv_ioda_setup_same_PE(dev->subordinate, pe); 339 pnv_ioda_setup_same_PE(dev->subordinate, pe);
789 } 340 }
790} 341}
791 342
792static void __devinit pnv_ioda_setup_bus_PE(struct pci_dev *dev, 343/*
793 struct pnv_ioda_pe *ppe) 344 * There're 2 types of PCI bus sensitive PEs: One that is compromised of
345 * single PCI bus. Another one that contains the primary PCI bus and its
346 * subordinate PCI devices and buses. The second type of PE is normally
347 * orgiriated by PCIe-to-PCI bridge or PLX switch downstream ports.
348 */
349static void __devinit pnv_ioda_setup_bus_PE(struct pci_bus *bus, int all)
794{ 350{
795 struct pci_controller *hose = pci_bus_to_host(dev->bus); 351 struct pci_controller *hose = pci_bus_to_host(bus);
796 struct pnv_phb *phb = hose->private_data; 352 struct pnv_phb *phb = hose->private_data;
797 struct pci_bus *bus = dev->subordinate;
798 struct pnv_ioda_pe *pe; 353 struct pnv_ioda_pe *pe;
799 int pe_num; 354 int pe_num;
800 355
801 if (!bus) {
802 pr_warning("%s: Bridge without a subordinate bus !\n",
803 pci_name(dev));
804 return;
805 }
806 pe_num = pnv_ioda_alloc_pe(phb); 356 pe_num = pnv_ioda_alloc_pe(phb);
807 if (pe_num == IODA_INVALID_PE) { 357 if (pe_num == IODA_INVALID_PE) {
808 pr_warning("%s: Not enough PE# available, disabling bus\n", 358 pr_warning("%s: Not enough PE# available for PCI bus %04x:%02x\n",
809 pci_name(dev)); 359 __func__, pci_domain_nr(bus), bus->number);
810 return; 360 return;
811 } 361 }
812 362
813 pe = &phb->ioda.pe_array[pe_num]; 363 pe = &phb->ioda.pe_array[pe_num];
814 ppe->bus_pe = pe; 364 pe->flags = (all ? PNV_IODA_PE_BUS_ALL : PNV_IODA_PE_BUS);
815 pe->pbus = bus; 365 pe->pbus = bus;
816 pe->pdev = NULL; 366 pe->pdev = NULL;
817 pe->tce32_seg = -1; 367 pe->tce32_seg = -1;
@@ -819,8 +369,12 @@ static void __devinit pnv_ioda_setup_bus_PE(struct pci_dev *dev,
819 pe->rid = bus->busn_res.start << 8; 369 pe->rid = bus->busn_res.start << 8;
820 pe->dma_weight = 0; 370 pe->dma_weight = 0;
821 371
822 pe_info(pe, "Secondary busses %pR associated with PE\n", 372 if (all)
823 &bus->busn_res); 373 pe_info(pe, "Secondary bus %d..%d associated with PE#%d\n",
374 bus->busn_res.start, bus->busn_res.end, pe_num);
375 else
376 pe_info(pe, "Secondary bus %d associated with PE#%d\n",
377 bus->busn_res.start, pe_num);
824 378
825 if (pnv_ioda_configure_pe(phb, pe)) { 379 if (pnv_ioda_configure_pe(phb, pe)) {
826 /* XXX What do we do here ? */ 380 /* XXX What do we do here ? */
@@ -833,6 +387,9 @@ static void __devinit pnv_ioda_setup_bus_PE(struct pci_dev *dev,
833 /* Associate it with all child devices */ 387 /* Associate it with all child devices */
834 pnv_ioda_setup_same_PE(bus, pe); 388 pnv_ioda_setup_same_PE(bus, pe);
835 389
390 /* Put PE to the list */
391 list_add_tail(&pe->list, &phb->ioda.pe_list);
392
836 /* Account for one DMA PE if at least one DMA capable device exist 393 /* Account for one DMA PE if at least one DMA capable device exist
837 * below the bridge 394 * below the bridge
838 */ 395 */
@@ -848,17 +405,33 @@ static void __devinit pnv_ioda_setup_bus_PE(struct pci_dev *dev,
848static void __devinit pnv_ioda_setup_PEs(struct pci_bus *bus) 405static void __devinit pnv_ioda_setup_PEs(struct pci_bus *bus)
849{ 406{
850 struct pci_dev *dev; 407 struct pci_dev *dev;
851 struct pnv_ioda_pe *pe; 408
409 pnv_ioda_setup_bus_PE(bus, 0);
852 410
853 list_for_each_entry(dev, &bus->devices, bus_list) { 411 list_for_each_entry(dev, &bus->devices, bus_list) {
854 pe = pnv_ioda_setup_dev_PE(dev); 412 if (dev->subordinate) {
855 if (pe == NULL) 413 if (pci_pcie_type(dev) == PCI_EXP_TYPE_PCI_BRIDGE)
856 continue; 414 pnv_ioda_setup_bus_PE(dev->subordinate, 1);
857 /* Leaving the PCIe domain ... single PE# */ 415 else
858 if (pci_pcie_type(dev) == PCI_EXP_TYPE_PCI_BRIDGE) 416 pnv_ioda_setup_PEs(dev->subordinate);
859 pnv_ioda_setup_bus_PE(dev, pe); 417 }
860 else if (dev->subordinate) 418 }
861 pnv_ioda_setup_PEs(dev->subordinate); 419}
420
421/*
422 * Configure PEs so that the downstream PCI buses and devices
423 * could have their associated PE#. Unfortunately, we didn't
424 * figure out the way to identify the PLX bridge yet. So we
425 * simply put the PCI bus and the subordinate behind the root
426 * port to PE# here. The game rule here is expected to be changed
427 * as soon as we can detected PLX bridge correctly.
428 */
429static void __devinit pnv_pci_ioda_setup_PEs(void)
430{
431 struct pci_controller *hose, *tmp;
432
433 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
434 pnv_ioda_setup_PEs(hose->bus);
862 } 435 }
863} 436}
864 437
@@ -1000,7 +573,7 @@ static void __devinit pnv_ioda_setup_dma(struct pnv_phb *phb)
1000 remaining = phb->ioda.tce32_count; 573 remaining = phb->ioda.tce32_count;
1001 tw = phb->ioda.dma_weight; 574 tw = phb->ioda.dma_weight;
1002 base = 0; 575 base = 0;
1003 list_for_each_entry(pe, &phb->ioda.pe_list, link) { 576 list_for_each_entry(pe, &phb->ioda.pe_dma_list, dma_link) {
1004 if (!pe->dma_weight) 577 if (!pe->dma_weight)
1005 continue; 578 continue;
1006 if (!remaining) { 579 if (!remaining) {
@@ -1109,36 +682,115 @@ static void pnv_pci_init_ioda_msis(struct pnv_phb *phb)
1109static void pnv_pci_init_ioda_msis(struct pnv_phb *phb) { } 682static void pnv_pci_init_ioda_msis(struct pnv_phb *phb) { }
1110#endif /* CONFIG_PCI_MSI */ 683#endif /* CONFIG_PCI_MSI */
1111 684
1112/* This is the starting point of our IODA specific resource 685/*
1113 * allocation process 686 * This function is supposed to be called on basis of PE from top
687 * to bottom style. So the the I/O or MMIO segment assigned to
688 * parent PE could be overrided by its child PEs if necessary.
1114 */ 689 */
1115static void __devinit pnv_pci_ioda_fixup_phb(struct pci_controller *hose) 690static void __devinit pnv_ioda_setup_pe_seg(struct pci_controller *hose,
691 struct pnv_ioda_pe *pe)
1116{ 692{
1117 resource_size_t size, align; 693 struct pnv_phb *phb = hose->private_data;
1118 struct pci_bus *child; 694 struct pci_bus_region region;
695 struct resource *res;
696 int i, index;
697 int rc;
1119 698
1120 /* Associate PEs per functions */ 699 /*
1121 pnv_ioda_setup_PEs(hose->bus); 700 * NOTE: We only care PCI bus based PE for now. For PCI
701 * device based PE, for example SRIOV sensitive VF should
702 * be figured out later.
703 */
704 BUG_ON(!(pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL)));
1122 705
1123 /* Calculate all resources */ 706 pci_bus_for_each_resource(pe->pbus, res, i) {
1124 pnv_ioda_calc_bus(hose->bus, IORESOURCE_IO, &size, &align); 707 if (!res || !res->flags ||
1125 pnv_ioda_calc_bus(hose->bus, IORESOURCE_MEM, &size, &align); 708 res->start > res->end)
709 continue;
1126 710
1127 /* Apply then to HW */ 711 if (res->flags & IORESOURCE_IO) {
1128 pnv_ioda_update_resources(hose->bus); 712 region.start = res->start - phb->ioda.io_pci_base;
713 region.end = res->end - phb->ioda.io_pci_base;
714 index = region.start / phb->ioda.io_segsize;
715
716 while (index < phb->ioda.total_pe &&
717 region.start <= region.end) {
718 phb->ioda.io_segmap[index] = pe->pe_number;
719 rc = opal_pci_map_pe_mmio_window(phb->opal_id,
720 pe->pe_number, OPAL_IO_WINDOW_TYPE, 0, index);
721 if (rc != OPAL_SUCCESS) {
722 pr_err("%s: OPAL error %d when mapping IO "
723 "segment #%d to PE#%d\n",
724 __func__, rc, index, pe->pe_number);
725 break;
726 }
727
728 region.start += phb->ioda.io_segsize;
729 index++;
730 }
731 } else if (res->flags & IORESOURCE_MEM) {
732 region.start = res->start -
733 hose->pci_mem_offset -
734 phb->ioda.m32_pci_base;
735 region.end = res->end -
736 hose->pci_mem_offset -
737 phb->ioda.m32_pci_base;
738 index = region.start / phb->ioda.m32_segsize;
739
740 while (index < phb->ioda.total_pe &&
741 region.start <= region.end) {
742 phb->ioda.m32_segmap[index] = pe->pe_number;
743 rc = opal_pci_map_pe_mmio_window(phb->opal_id,
744 pe->pe_number, OPAL_M32_WINDOW_TYPE, 0, index);
745 if (rc != OPAL_SUCCESS) {
746 pr_err("%s: OPAL error %d when mapping M32 "
747 "segment#%d to PE#%d",
748 __func__, rc, index, pe->pe_number);
749 break;
750 }
751
752 region.start += phb->ioda.m32_segsize;
753 index++;
754 }
755 }
756 }
757}
1129 758
1130 /* Setup DMA */ 759static void __devinit pnv_pci_ioda_setup_seg(void)
1131 pnv_ioda_setup_dma(hose->private_data); 760{
761 struct pci_controller *tmp, *hose;
762 struct pnv_phb *phb;
763 struct pnv_ioda_pe *pe;
1132 764
1133 /* Configure PCI Express settings */ 765 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
1134 list_for_each_entry(child, &hose->bus->children, node) { 766 phb = hose->private_data;
1135 struct pci_dev *self = child->self; 767 list_for_each_entry(pe, &phb->ioda.pe_list, list) {
1136 if (!self) 768 pnv_ioda_setup_pe_seg(hose, pe);
1137 continue; 769 }
1138 pcie_bus_configure_settings(child, self->pcie_mpss); 770 }
771}
772
773static void __devinit pnv_pci_ioda_setup_DMA(void)
774{
775 struct pci_controller *hose, *tmp;
776 struct pnv_phb *phb;
777
778 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
779 pnv_ioda_setup_dma(hose->private_data);
780
781 /* Mark the PHB initialization done */
782 phb = hose->private_data;
783 phb->initialized = 1;
1139 } 784 }
1140} 785}
1141 786
787static void __devinit pnv_pci_ioda_fixup(void)
788{
789 pnv_pci_ioda_setup_PEs();
790 pnv_pci_ioda_setup_seg();
791 pnv_pci_ioda_setup_DMA();
792}
793
1142/* 794/*
1143 * Returns the alignment for I/O or memory windows for P2P 795 * Returns the alignment for I/O or memory windows for P2P
1144 * bridges. That actually depends on how PEs are segmented. 796 * bridges. That actually depends on how PEs are segmented.
@@ -1182,10 +834,22 @@ static resource_size_t pnv_pci_window_alignment(struct pci_bus *bus,
1182 */ 834 */
1183static int __devinit pnv_pci_enable_device_hook(struct pci_dev *dev) 835static int __devinit pnv_pci_enable_device_hook(struct pci_dev *dev)
1184{ 836{
1185 struct pci_dn *pdn = pnv_ioda_get_pdn(dev); 837 struct pci_controller *hose = pci_bus_to_host(dev->bus);
838 struct pnv_phb *phb = hose->private_data;
839 struct pci_dn *pdn;
1186 840
841 /* The function is probably called while the PEs have
842 * not be created yet. For example, resource reassignment
843 * during PCI probe period. We just skip the check if
844 * PEs isn't ready.
845 */
846 if (!phb->initialized)
847 return 0;
848
849 pdn = pnv_ioda_get_pdn(dev);
1187 if (!pdn || pdn->pe_number == IODA_INVALID_PE) 850 if (!pdn || pdn->pe_number == IODA_INVALID_PE)
1188 return -EINVAL; 851 return -EINVAL;
852
1189 return 0; 853 return 0;
1190} 854}
1191 855
@@ -1276,9 +940,9 @@ void __init pnv_pci_init_ioda1_phb(struct device_node *np)
1276 /* Allocate aux data & arrays */ 940 /* Allocate aux data & arrays */
1277 size = _ALIGN_UP(phb->ioda.total_pe / 8, sizeof(unsigned long)); 941 size = _ALIGN_UP(phb->ioda.total_pe / 8, sizeof(unsigned long));
1278 m32map_off = size; 942 m32map_off = size;
1279 size += phb->ioda.total_pe; 943 size += phb->ioda.total_pe * sizeof(phb->ioda.m32_segmap[0]);
1280 iomap_off = size; 944 iomap_off = size;
1281 size += phb->ioda.total_pe; 945 size += phb->ioda.total_pe * sizeof(phb->ioda.io_segmap[0]);
1282 pemap_off = size; 946 pemap_off = size;
1283 size += phb->ioda.total_pe * sizeof(struct pnv_ioda_pe); 947 size += phb->ioda.total_pe * sizeof(struct pnv_ioda_pe);
1284 aux = alloc_bootmem(size); 948 aux = alloc_bootmem(size);
@@ -1289,6 +953,7 @@ void __init pnv_pci_init_ioda1_phb(struct device_node *np)
1289 phb->ioda.pe_array = aux + pemap_off; 953 phb->ioda.pe_array = aux + pemap_off;
1290 set_bit(0, phb->ioda.pe_alloc); 954 set_bit(0, phb->ioda.pe_alloc);
1291 955
956 INIT_LIST_HEAD(&phb->ioda.pe_dma_list);
1292 INIT_LIST_HEAD(&phb->ioda.pe_list); 957 INIT_LIST_HEAD(&phb->ioda.pe_list);
1293 958
1294 /* Calculate how many 32-bit TCE segments we have */ 959 /* Calculate how many 32-bit TCE segments we have */
@@ -1337,15 +1002,17 @@ void __init pnv_pci_init_ioda1_phb(struct device_node *np)
1337 /* Setup MSI support */ 1002 /* Setup MSI support */
1338 pnv_pci_init_ioda_msis(phb); 1003 pnv_pci_init_ioda_msis(phb);
1339 1004
1340 /* We set both PCI_PROBE_ONLY and PCI_REASSIGN_ALL_RSRC. This is an 1005 /*
1341 * odd combination which essentially means that we skip all resource 1006 * We pass the PCI probe flag PCI_REASSIGN_ALL_RSRC here
1342 * fixups and assignments in the generic code, and do it all 1007 * to let the PCI core do resource assignment. It's supposed
1343 * ourselves here 1008 * that the PCI core will do correct I/O and MMIO alignment
1009 * for the P2P bridge bars so that each PCI bus (excluding
1010 * the child P2P bridges) can form individual PE.
1344 */ 1011 */
1345 ppc_md.pcibios_fixup_phb = pnv_pci_ioda_fixup_phb; 1012 ppc_md.pcibios_fixup = pnv_pci_ioda_fixup;
1346 ppc_md.pcibios_enable_device_hook = pnv_pci_enable_device_hook; 1013 ppc_md.pcibios_enable_device_hook = pnv_pci_enable_device_hook;
1347 ppc_md.pcibios_window_alignment = pnv_pci_window_alignment; 1014 ppc_md.pcibios_window_alignment = pnv_pci_window_alignment;
1348 pci_add_flags(PCI_PROBE_ONLY | PCI_REASSIGN_ALL_RSRC); 1015 pci_add_flags(PCI_REASSIGN_ALL_RSRC);
1349 1016
1350 /* Reset IODA tables to a clean state */ 1017 /* Reset IODA tables to a clean state */
1351 rc = opal_pci_reset(phb_id, OPAL_PCI_IODA_TABLE_RESET, OPAL_ASSERT_RESET); 1018 rc = opal_pci_reset(phb_id, OPAL_PCI_IODA_TABLE_RESET, OPAL_ASSERT_RESET);
diff --git a/arch/powerpc/platforms/powernv/pci-p5ioc2.c b/arch/powerpc/platforms/powernv/pci-p5ioc2.c
index 264967770c3..6b4bef4e9d8 100644
--- a/arch/powerpc/platforms/powernv/pci-p5ioc2.c
+++ b/arch/powerpc/platforms/powernv/pci-p5ioc2.c
@@ -30,7 +30,6 @@
30#include <asm/opal.h> 30#include <asm/opal.h>
31#include <asm/iommu.h> 31#include <asm/iommu.h>
32#include <asm/tce.h> 32#include <asm/tce.h>
33#include <asm/abs_addr.h>
34 33
35#include "powernv.h" 34#include "powernv.h"
36#include "pci.h" 35#include "pci.h"
diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c
index be3cfc5ceab..c01688a1a74 100644
--- a/arch/powerpc/platforms/powernv/pci.c
+++ b/arch/powerpc/platforms/powernv/pci.c
@@ -30,7 +30,6 @@
30#include <asm/opal.h> 30#include <asm/opal.h>
31#include <asm/iommu.h> 31#include <asm/iommu.h>
32#include <asm/tce.h> 32#include <asm/tce.h>
33#include <asm/abs_addr.h>
34#include <asm/firmware.h> 33#include <asm/firmware.h>
35 34
36#include "powernv.h" 35#include "powernv.h"
@@ -447,6 +446,11 @@ static void pnv_tce_free(struct iommu_table *tbl, long index, long npages)
447 pnv_tce_invalidate(tbl, tces, tcep - 1); 446 pnv_tce_invalidate(tbl, tces, tcep - 1);
448} 447}
449 448
449static unsigned long pnv_tce_get(struct iommu_table *tbl, long index)
450{
451 return ((u64 *)tbl->it_base)[index - tbl->it_offset];
452}
453
450void pnv_pci_setup_iommu_table(struct iommu_table *tbl, 454void pnv_pci_setup_iommu_table(struct iommu_table *tbl,
451 void *tce_mem, u64 tce_size, 455 void *tce_mem, u64 tce_size,
452 u64 dma_offset) 456 u64 dma_offset)
@@ -597,6 +601,7 @@ void __init pnv_pci_init(void)
597 ppc_md.pci_dma_dev_setup = pnv_pci_dma_dev_setup; 601 ppc_md.pci_dma_dev_setup = pnv_pci_dma_dev_setup;
598 ppc_md.tce_build = pnv_tce_build; 602 ppc_md.tce_build = pnv_tce_build;
599 ppc_md.tce_free = pnv_tce_free; 603 ppc_md.tce_free = pnv_tce_free;
604 ppc_md.tce_get = pnv_tce_get;
600 ppc_md.pci_probe_mode = pnv_pci_probe_mode; 605 ppc_md.pci_probe_mode = pnv_pci_probe_mode;
601 set_pci_dma_ops(&dma_iommu_ops); 606 set_pci_dma_ops(&dma_iommu_ops);
602 607
diff --git a/arch/powerpc/platforms/powernv/pci.h b/arch/powerpc/platforms/powernv/pci.h
index 8bc47963464..7cfb7c883de 100644
--- a/arch/powerpc/platforms/powernv/pci.h
+++ b/arch/powerpc/platforms/powernv/pci.h
@@ -17,9 +17,14 @@ enum pnv_phb_model {
17}; 17};
18 18
19#define PNV_PCI_DIAG_BUF_SIZE 4096 19#define PNV_PCI_DIAG_BUF_SIZE 4096
20#define PNV_IODA_PE_DEV (1 << 0) /* PE has single PCI device */
21#define PNV_IODA_PE_BUS (1 << 1) /* PE has primary PCI bus */
22#define PNV_IODA_PE_BUS_ALL (1 << 2) /* PE has subordinate buses */
20 23
21/* Data associated with a PE, including IOMMU tracking etc.. */ 24/* Data associated with a PE, including IOMMU tracking etc.. */
22struct pnv_ioda_pe { 25struct pnv_ioda_pe {
26 unsigned long flags;
27
23 /* A PE can be associated with a single device or an 28 /* A PE can be associated with a single device or an
24 * entire bus (& children). In the former case, pdev 29 * entire bus (& children). In the former case, pdev
25 * is populated, in the later case, pbus is. 30 * is populated, in the later case, pbus is.
@@ -40,11 +45,6 @@ struct pnv_ioda_pe {
40 */ 45 */
41 unsigned int dma_weight; 46 unsigned int dma_weight;
42 47
43 /* This is a PCI-E -> PCI-X bridge, this points to the
44 * corresponding bus PE
45 */
46 struct pnv_ioda_pe *bus_pe;
47
48 /* "Base" iommu table, ie, 4K TCEs, 32-bit DMA */ 48 /* "Base" iommu table, ie, 4K TCEs, 32-bit DMA */
49 int tce32_seg; 49 int tce32_seg;
50 int tce32_segcount; 50 int tce32_segcount;
@@ -59,7 +59,8 @@ struct pnv_ioda_pe {
59 int mve_number; 59 int mve_number;
60 60
61 /* Link in list of PE#s */ 61 /* Link in list of PE#s */
62 struct list_head link; 62 struct list_head dma_link;
63 struct list_head list;
63}; 64};
64 65
65struct pnv_phb { 66struct pnv_phb {
@@ -68,6 +69,7 @@ struct pnv_phb {
68 enum pnv_phb_model model; 69 enum pnv_phb_model model;
69 u64 opal_id; 70 u64 opal_id;
70 void __iomem *regs; 71 void __iomem *regs;
72 int initialized;
71 spinlock_t lock; 73 spinlock_t lock;
72 74
73#ifdef CONFIG_PCI_MSI 75#ifdef CONFIG_PCI_MSI
@@ -107,6 +109,11 @@ struct pnv_phb {
107 unsigned int *io_segmap; 109 unsigned int *io_segmap;
108 struct pnv_ioda_pe *pe_array; 110 struct pnv_ioda_pe *pe_array;
109 111
112 /* Sorted list of used PE's based
113 * on the sequence of creation
114 */
115 struct list_head pe_list;
116
110 /* Reverse map of PEs, will have to extend if 117 /* Reverse map of PEs, will have to extend if
111 * we are to support more than 256 PEs, indexed 118 * we are to support more than 256 PEs, indexed
112 * bus { bus, devfn } 119 * bus { bus, devfn }
@@ -125,7 +132,7 @@ struct pnv_phb {
125 /* Sorted list of used PE's, sorted at 132 /* Sorted list of used PE's, sorted at
126 * boot for resource allocation purposes 133 * boot for resource allocation purposes
127 */ 134 */
128 struct list_head pe_list; 135 struct list_head pe_dma_list;
129 } ioda; 136 } ioda;
130 }; 137 };
131 138
diff --git a/arch/powerpc/platforms/ps3/htab.c b/arch/powerpc/platforms/ps3/htab.c
index 3124cf791eb..d00d7b0a3bd 100644
--- a/arch/powerpc/platforms/ps3/htab.c
+++ b/arch/powerpc/platforms/ps3/htab.c
@@ -43,7 +43,7 @@ enum ps3_lpar_vas_id {
43 43
44static DEFINE_SPINLOCK(ps3_htab_lock); 44static DEFINE_SPINLOCK(ps3_htab_lock);
45 45
46static long ps3_hpte_insert(unsigned long hpte_group, unsigned long va, 46static long ps3_hpte_insert(unsigned long hpte_group, unsigned long vpn,
47 unsigned long pa, unsigned long rflags, unsigned long vflags, 47 unsigned long pa, unsigned long rflags, unsigned long vflags,
48 int psize, int ssize) 48 int psize, int ssize)
49{ 49{
@@ -61,7 +61,7 @@ static long ps3_hpte_insert(unsigned long hpte_group, unsigned long va,
61 */ 61 */
62 vflags &= ~HPTE_V_SECONDARY; 62 vflags &= ~HPTE_V_SECONDARY;
63 63
64 hpte_v = hpte_encode_v(va, psize, ssize) | vflags | HPTE_V_VALID; 64 hpte_v = hpte_encode_v(vpn, psize, ssize) | vflags | HPTE_V_VALID;
65 hpte_r = hpte_encode_r(ps3_mm_phys_to_lpar(pa), psize) | rflags; 65 hpte_r = hpte_encode_r(ps3_mm_phys_to_lpar(pa), psize) | rflags;
66 66
67 spin_lock_irqsave(&ps3_htab_lock, flags); 67 spin_lock_irqsave(&ps3_htab_lock, flags);
@@ -75,8 +75,8 @@ static long ps3_hpte_insert(unsigned long hpte_group, unsigned long va,
75 75
76 if (result) { 76 if (result) {
77 /* all entries bolted !*/ 77 /* all entries bolted !*/
78 pr_info("%s:result=%d va=%lx pa=%lx ix=%lx v=%llx r=%llx\n", 78 pr_info("%s:result=%d vpn=%lx pa=%lx ix=%lx v=%llx r=%llx\n",
79 __func__, result, va, pa, hpte_group, hpte_v, hpte_r); 79 __func__, result, vpn, pa, hpte_group, hpte_v, hpte_r);
80 BUG(); 80 BUG();
81 } 81 }
82 82
@@ -107,7 +107,7 @@ static long ps3_hpte_remove(unsigned long hpte_group)
107} 107}
108 108
109static long ps3_hpte_updatepp(unsigned long slot, unsigned long newpp, 109static long ps3_hpte_updatepp(unsigned long slot, unsigned long newpp,
110 unsigned long va, int psize, int ssize, int local) 110 unsigned long vpn, int psize, int ssize, int local)
111{ 111{
112 int result; 112 int result;
113 u64 hpte_v, want_v, hpte_rs; 113 u64 hpte_v, want_v, hpte_rs;
@@ -115,7 +115,7 @@ static long ps3_hpte_updatepp(unsigned long slot, unsigned long newpp,
115 unsigned long flags; 115 unsigned long flags;
116 long ret; 116 long ret;
117 117
118 want_v = hpte_encode_v(va, psize, ssize); 118 want_v = hpte_encode_v(vpn, psize, ssize);
119 119
120 spin_lock_irqsave(&ps3_htab_lock, flags); 120 spin_lock_irqsave(&ps3_htab_lock, flags);
121 121
@@ -125,8 +125,8 @@ static long ps3_hpte_updatepp(unsigned long slot, unsigned long newpp,
125 &hpte_rs); 125 &hpte_rs);
126 126
127 if (result) { 127 if (result) {
128 pr_info("%s: res=%d read va=%lx slot=%lx psize=%d\n", 128 pr_info("%s: res=%d read vpn=%lx slot=%lx psize=%d\n",
129 __func__, result, va, slot, psize); 129 __func__, result, vpn, slot, psize);
130 BUG(); 130 BUG();
131 } 131 }
132 132
@@ -159,7 +159,7 @@ static void ps3_hpte_updateboltedpp(unsigned long newpp, unsigned long ea,
159 panic("ps3_hpte_updateboltedpp() not implemented"); 159 panic("ps3_hpte_updateboltedpp() not implemented");
160} 160}
161 161
162static void ps3_hpte_invalidate(unsigned long slot, unsigned long va, 162static void ps3_hpte_invalidate(unsigned long slot, unsigned long vpn,
163 int psize, int ssize, int local) 163 int psize, int ssize, int local)
164{ 164{
165 unsigned long flags; 165 unsigned long flags;
@@ -170,8 +170,8 @@ static void ps3_hpte_invalidate(unsigned long slot, unsigned long va,
170 result = lv1_write_htab_entry(PS3_LPAR_VAS_ID_CURRENT, slot, 0, 0); 170 result = lv1_write_htab_entry(PS3_LPAR_VAS_ID_CURRENT, slot, 0, 0);
171 171
172 if (result) { 172 if (result) {
173 pr_info("%s: res=%d va=%lx slot=%lx psize=%d\n", 173 pr_info("%s: res=%d vpn=%lx slot=%lx psize=%d\n",
174 __func__, result, va, slot, psize); 174 __func__, result, vpn, slot, psize);
175 BUG(); 175 BUG();
176 } 176 }
177 177
diff --git a/arch/powerpc/platforms/ps3/setup.c b/arch/powerpc/platforms/ps3/setup.c
index 2d664c5a83b..3f509f86432 100644
--- a/arch/powerpc/platforms/ps3/setup.c
+++ b/arch/powerpc/platforms/ps3/setup.c
@@ -184,11 +184,15 @@ early_param("ps3flash", early_parse_ps3flash);
184#define prealloc_ps3flash_bounce_buffer() do { } while (0) 184#define prealloc_ps3flash_bounce_buffer() do { } while (0)
185#endif 185#endif
186 186
187static int ps3_set_dabr(unsigned long dabr) 187static int ps3_set_dabr(unsigned long dabr, unsigned long dabrx)
188{ 188{
189 enum {DABR_USER = 1, DABR_KERNEL = 2,}; 189 /* Have to set at least one bit in the DABRX */
190 if (dabrx == 0 && dabr == 0)
191 dabrx = DABRX_USER;
192 /* hypervisor only allows us to set BTI, Kernel and user */
193 dabrx &= DABRX_BTI | DABRX_KERNEL | DABRX_USER;
190 194
191 return lv1_set_dabr(dabr, DABR_KERNEL | DABR_USER) ? -1 : 0; 195 return lv1_set_dabr(dabr, dabrx) ? -1 : 0;
192} 196}
193 197
194static void __init ps3_setup_arch(void) 198static void __init ps3_setup_arch(void)
diff --git a/arch/powerpc/platforms/pseries/Makefile b/arch/powerpc/platforms/pseries/Makefile
index c222189f5bb..890622b87c8 100644
--- a/arch/powerpc/platforms/pseries/Makefile
+++ b/arch/powerpc/platforms/pseries/Makefile
@@ -6,8 +6,9 @@ obj-y := lpar.o hvCall.o nvram.o reconfig.o \
6 firmware.o power.o dlpar.o mobility.o 6 firmware.o power.o dlpar.o mobility.o
7obj-$(CONFIG_SMP) += smp.o 7obj-$(CONFIG_SMP) += smp.o
8obj-$(CONFIG_SCANLOG) += scanlog.o 8obj-$(CONFIG_SCANLOG) += scanlog.o
9obj-$(CONFIG_EEH) += eeh.o eeh_dev.o eeh_cache.o eeh_driver.o \ 9obj-$(CONFIG_EEH) += eeh.o eeh_pe.o eeh_dev.o eeh_cache.o \
10 eeh_event.o eeh_sysfs.o eeh_pseries.o 10 eeh_driver.o eeh_event.o eeh_sysfs.o \
11 eeh_pseries.o
11obj-$(CONFIG_KEXEC) += kexec.o 12obj-$(CONFIG_KEXEC) += kexec.o
12obj-$(CONFIG_PCI) += pci.o pci_dlpar.o 13obj-$(CONFIG_PCI) += pci.o pci_dlpar.o
13obj-$(CONFIG_PSERIES_MSI) += msi.o 14obj-$(CONFIG_PSERIES_MSI) += msi.o
diff --git a/arch/powerpc/platforms/pseries/eeh.c b/arch/powerpc/platforms/pseries/eeh.c
index ecd394cf34e..9a04322b173 100644
--- a/arch/powerpc/platforms/pseries/eeh.c
+++ b/arch/powerpc/platforms/pseries/eeh.c
@@ -92,6 +92,20 @@ struct eeh_ops *eeh_ops = NULL;
92int eeh_subsystem_enabled; 92int eeh_subsystem_enabled;
93EXPORT_SYMBOL(eeh_subsystem_enabled); 93EXPORT_SYMBOL(eeh_subsystem_enabled);
94 94
95/*
96 * EEH probe mode support. The intention is to support multiple
97 * platforms for EEH. Some platforms like pSeries do PCI emunation
98 * based on device tree. However, other platforms like powernv probe
99 * PCI devices from hardware. The flag is used to distinguish that.
100 * In addition, struct eeh_ops::probe would be invoked for particular
101 * OF node or PCI device so that the corresponding PE would be created
102 * there.
103 */
104int eeh_probe_mode;
105
106/* Global EEH mutex */
107DEFINE_MUTEX(eeh_mutex);
108
95/* Lock to avoid races due to multiple reports of an error */ 109/* Lock to avoid races due to multiple reports of an error */
96static DEFINE_RAW_SPINLOCK(confirm_error_lock); 110static DEFINE_RAW_SPINLOCK(confirm_error_lock);
97 111
@@ -204,22 +218,12 @@ static size_t eeh_gather_pci_data(struct eeh_dev *edev, char * buf, size_t len)
204 } 218 }
205 } 219 }
206 220
207 /* Gather status on devices under the bridge */
208 if (dev->class >> 16 == PCI_BASE_CLASS_BRIDGE) {
209 struct device_node *child;
210
211 for_each_child_of_node(dn, child) {
212 if (of_node_to_eeh_dev(child))
213 n += eeh_gather_pci_data(of_node_to_eeh_dev(child), buf+n, len-n);
214 }
215 }
216
217 return n; 221 return n;
218} 222}
219 223
220/** 224/**
221 * eeh_slot_error_detail - Generate combined log including driver log and error log 225 * eeh_slot_error_detail - Generate combined log including driver log and error log
222 * @edev: device to report error log for 226 * @pe: EEH PE
223 * @severity: temporary or permanent error log 227 * @severity: temporary or permanent error log
224 * 228 *
225 * This routine should be called to generate the combined log, which 229 * This routine should be called to generate the combined log, which
@@ -227,17 +231,22 @@ static size_t eeh_gather_pci_data(struct eeh_dev *edev, char * buf, size_t len)
227 * out from the config space of the corresponding PCI device, while 231 * out from the config space of the corresponding PCI device, while
228 * the error log is fetched through platform dependent function call. 232 * the error log is fetched through platform dependent function call.
229 */ 233 */
230void eeh_slot_error_detail(struct eeh_dev *edev, int severity) 234void eeh_slot_error_detail(struct eeh_pe *pe, int severity)
231{ 235{
232 size_t loglen = 0; 236 size_t loglen = 0;
233 pci_regs_buf[0] = 0; 237 struct eeh_dev *edev;
234 238
235 eeh_pci_enable(edev, EEH_OPT_THAW_MMIO); 239 eeh_pci_enable(pe, EEH_OPT_THAW_MMIO);
236 eeh_ops->configure_bridge(eeh_dev_to_of_node(edev)); 240 eeh_ops->configure_bridge(pe);
237 eeh_restore_bars(edev); 241 eeh_pe_restore_bars(pe);
238 loglen = eeh_gather_pci_data(edev, pci_regs_buf, EEH_PCI_REGS_LOG_LEN);
239 242
240 eeh_ops->get_log(eeh_dev_to_of_node(edev), severity, pci_regs_buf, loglen); 243 pci_regs_buf[0] = 0;
244 eeh_pe_for_each_dev(pe, edev) {
245 loglen += eeh_gather_pci_data(edev, pci_regs_buf,
246 EEH_PCI_REGS_LOG_LEN);
247 }
248
249 eeh_ops->get_log(pe, severity, pci_regs_buf, loglen);
241} 250}
242 251
243/** 252/**
@@ -261,126 +270,8 @@ static inline unsigned long eeh_token_to_phys(unsigned long token)
261} 270}
262 271
263/** 272/**
264 * eeh_find_device_pe - Retrieve the PE for the given device 273 * eeh_dev_check_failure - Check if all 1's data is due to EEH slot freeze
265 * @dn: device node 274 * @edev: eeh device
266 *
267 * Return the PE under which this device lies
268 */
269struct device_node *eeh_find_device_pe(struct device_node *dn)
270{
271 while (dn->parent && of_node_to_eeh_dev(dn->parent) &&
272 (of_node_to_eeh_dev(dn->parent)->mode & EEH_MODE_SUPPORTED)) {
273 dn = dn->parent;
274 }
275 return dn;
276}
277
278/**
279 * __eeh_mark_slot - Mark all child devices as failed
280 * @parent: parent device
281 * @mode_flag: failure flag
282 *
283 * Mark all devices that are children of this device as failed.
284 * Mark the device driver too, so that it can see the failure
285 * immediately; this is critical, since some drivers poll
286 * status registers in interrupts ... If a driver is polling,
287 * and the slot is frozen, then the driver can deadlock in
288 * an interrupt context, which is bad.
289 */
290static void __eeh_mark_slot(struct device_node *parent, int mode_flag)
291{
292 struct device_node *dn;
293
294 for_each_child_of_node(parent, dn) {
295 if (of_node_to_eeh_dev(dn)) {
296 /* Mark the pci device driver too */
297 struct pci_dev *dev = of_node_to_eeh_dev(dn)->pdev;
298
299 of_node_to_eeh_dev(dn)->mode |= mode_flag;
300
301 if (dev && dev->driver)
302 dev->error_state = pci_channel_io_frozen;
303
304 __eeh_mark_slot(dn, mode_flag);
305 }
306 }
307}
308
309/**
310 * eeh_mark_slot - Mark the indicated device and its children as failed
311 * @dn: parent device
312 * @mode_flag: failure flag
313 *
314 * Mark the indicated device and its child devices as failed.
315 * The device drivers are marked as failed as well.
316 */
317void eeh_mark_slot(struct device_node *dn, int mode_flag)
318{
319 struct pci_dev *dev;
320 dn = eeh_find_device_pe(dn);
321
322 /* Back up one, since config addrs might be shared */
323 if (!pcibios_find_pci_bus(dn) && of_node_to_eeh_dev(dn->parent))
324 dn = dn->parent;
325
326 of_node_to_eeh_dev(dn)->mode |= mode_flag;
327
328 /* Mark the pci device too */
329 dev = of_node_to_eeh_dev(dn)->pdev;
330 if (dev)
331 dev->error_state = pci_channel_io_frozen;
332
333 __eeh_mark_slot(dn, mode_flag);
334}
335
336/**
337 * __eeh_clear_slot - Clear failure flag for the child devices
338 * @parent: parent device
339 * @mode_flag: flag to be cleared
340 *
341 * Clear failure flag for the child devices.
342 */
343static void __eeh_clear_slot(struct device_node *parent, int mode_flag)
344{
345 struct device_node *dn;
346
347 for_each_child_of_node(parent, dn) {
348 if (of_node_to_eeh_dev(dn)) {
349 of_node_to_eeh_dev(dn)->mode &= ~mode_flag;
350 of_node_to_eeh_dev(dn)->check_count = 0;
351 __eeh_clear_slot(dn, mode_flag);
352 }
353 }
354}
355
356/**
357 * eeh_clear_slot - Clear failure flag for the indicated device and its children
358 * @dn: parent device
359 * @mode_flag: flag to be cleared
360 *
361 * Clear failure flag for the indicated device and its children.
362 */
363void eeh_clear_slot(struct device_node *dn, int mode_flag)
364{
365 unsigned long flags;
366 raw_spin_lock_irqsave(&confirm_error_lock, flags);
367
368 dn = eeh_find_device_pe(dn);
369
370 /* Back up one, since config addrs might be shared */
371 if (!pcibios_find_pci_bus(dn) && of_node_to_eeh_dev(dn->parent))
372 dn = dn->parent;
373
374 of_node_to_eeh_dev(dn)->mode &= ~mode_flag;
375 of_node_to_eeh_dev(dn)->check_count = 0;
376 __eeh_clear_slot(dn, mode_flag);
377 raw_spin_unlock_irqrestore(&confirm_error_lock, flags);
378}
379
380/**
381 * eeh_dn_check_failure - Check if all 1's data is due to EEH slot freeze
382 * @dn: device node
383 * @dev: pci device, if known
384 * 275 *
385 * Check for an EEH failure for the given device node. Call this 276 * Check for an EEH failure for the given device node. Call this
386 * routine if the result of a read was all 0xff's and you want to 277 * routine if the result of a read was all 0xff's and you want to
@@ -392,11 +283,13 @@ void eeh_clear_slot(struct device_node *dn, int mode_flag)
392 * 283 *
393 * It is safe to call this routine in an interrupt context. 284 * It is safe to call this routine in an interrupt context.
394 */ 285 */
395int eeh_dn_check_failure(struct device_node *dn, struct pci_dev *dev) 286int eeh_dev_check_failure(struct eeh_dev *edev)
396{ 287{
397 int ret; 288 int ret;
398 unsigned long flags; 289 unsigned long flags;
399 struct eeh_dev *edev; 290 struct device_node *dn;
291 struct pci_dev *dev;
292 struct eeh_pe *pe;
400 int rc = 0; 293 int rc = 0;
401 const char *location; 294 const char *location;
402 295
@@ -405,23 +298,23 @@ int eeh_dn_check_failure(struct device_node *dn, struct pci_dev *dev)
405 if (!eeh_subsystem_enabled) 298 if (!eeh_subsystem_enabled)
406 return 0; 299 return 0;
407 300
408 if (!dn) { 301 if (!edev) {
409 eeh_stats.no_dn++; 302 eeh_stats.no_dn++;
410 return 0; 303 return 0;
411 } 304 }
412 dn = eeh_find_device_pe(dn); 305 dn = eeh_dev_to_of_node(edev);
413 edev = of_node_to_eeh_dev(dn); 306 dev = eeh_dev_to_pci_dev(edev);
307 pe = edev->pe;
414 308
415 /* Access to IO BARs might get this far and still not want checking. */ 309 /* Access to IO BARs might get this far and still not want checking. */
416 if (!(edev->mode & EEH_MODE_SUPPORTED) || 310 if (!pe) {
417 edev->mode & EEH_MODE_NOCHECK) {
418 eeh_stats.ignored_check++; 311 eeh_stats.ignored_check++;
419 pr_debug("EEH: Ignored check (%x) for %s %s\n", 312 pr_debug("EEH: Ignored check for %s %s\n",
420 edev->mode, eeh_pci_name(dev), dn->full_name); 313 eeh_pci_name(dev), dn->full_name);
421 return 0; 314 return 0;
422 } 315 }
423 316
424 if (!edev->config_addr && !edev->pe_config_addr) { 317 if (!pe->addr && !pe->config_addr) {
425 eeh_stats.no_cfg_addr++; 318 eeh_stats.no_cfg_addr++;
426 return 0; 319 return 0;
427 } 320 }
@@ -434,13 +327,13 @@ int eeh_dn_check_failure(struct device_node *dn, struct pci_dev *dev)
434 */ 327 */
435 raw_spin_lock_irqsave(&confirm_error_lock, flags); 328 raw_spin_lock_irqsave(&confirm_error_lock, flags);
436 rc = 1; 329 rc = 1;
437 if (edev->mode & EEH_MODE_ISOLATED) { 330 if (pe->state & EEH_PE_ISOLATED) {
438 edev->check_count++; 331 pe->check_count++;
439 if (edev->check_count % EEH_MAX_FAILS == 0) { 332 if (pe->check_count % EEH_MAX_FAILS == 0) {
440 location = of_get_property(dn, "ibm,loc-code", NULL); 333 location = of_get_property(dn, "ibm,loc-code", NULL);
441 printk(KERN_ERR "EEH: %d reads ignored for recovering device at " 334 printk(KERN_ERR "EEH: %d reads ignored for recovering device at "
442 "location=%s driver=%s pci addr=%s\n", 335 "location=%s driver=%s pci addr=%s\n",
443 edev->check_count, location, 336 pe->check_count, location,
444 eeh_driver_name(dev), eeh_pci_name(dev)); 337 eeh_driver_name(dev), eeh_pci_name(dev));
445 printk(KERN_ERR "EEH: Might be infinite loop in %s driver\n", 338 printk(KERN_ERR "EEH: Might be infinite loop in %s driver\n",
446 eeh_driver_name(dev)); 339 eeh_driver_name(dev));
@@ -456,7 +349,7 @@ int eeh_dn_check_failure(struct device_node *dn, struct pci_dev *dev)
456 * function zero of a multi-function device. 349 * function zero of a multi-function device.
457 * In any case they must share a common PHB. 350 * In any case they must share a common PHB.
458 */ 351 */
459 ret = eeh_ops->get_state(dn, NULL); 352 ret = eeh_ops->get_state(pe, NULL);
460 353
461 /* Note that config-io to empty slots may fail; 354 /* Note that config-io to empty slots may fail;
462 * they are empty when they don't have children. 355 * they are empty when they don't have children.
@@ -469,7 +362,7 @@ int eeh_dn_check_failure(struct device_node *dn, struct pci_dev *dev)
469 (ret & (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE)) == 362 (ret & (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE)) ==
470 (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE)) { 363 (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE)) {
471 eeh_stats.false_positives++; 364 eeh_stats.false_positives++;
472 edev->false_positives ++; 365 pe->false_positives++;
473 rc = 0; 366 rc = 0;
474 goto dn_unlock; 367 goto dn_unlock;
475 } 368 }
@@ -480,10 +373,10 @@ int eeh_dn_check_failure(struct device_node *dn, struct pci_dev *dev)
480 * with other functions on this device, and functions under 373 * with other functions on this device, and functions under
481 * bridges. 374 * bridges.
482 */ 375 */
483 eeh_mark_slot(dn, EEH_MODE_ISOLATED); 376 eeh_pe_state_mark(pe, EEH_PE_ISOLATED);
484 raw_spin_unlock_irqrestore(&confirm_error_lock, flags); 377 raw_spin_unlock_irqrestore(&confirm_error_lock, flags);
485 378
486 eeh_send_failure_event(edev); 379 eeh_send_failure_event(pe);
487 380
488 /* Most EEH events are due to device driver bugs. Having 381 /* Most EEH events are due to device driver bugs. Having
489 * a stack trace will help the device-driver authors figure 382 * a stack trace will help the device-driver authors figure
@@ -497,7 +390,7 @@ dn_unlock:
497 return rc; 390 return rc;
498} 391}
499 392
500EXPORT_SYMBOL_GPL(eeh_dn_check_failure); 393EXPORT_SYMBOL_GPL(eeh_dev_check_failure);
501 394
502/** 395/**
503 * eeh_check_failure - Check if all 1's data is due to EEH slot freeze 396 * eeh_check_failure - Check if all 1's data is due to EEH slot freeze
@@ -514,21 +407,19 @@ EXPORT_SYMBOL_GPL(eeh_dn_check_failure);
514unsigned long eeh_check_failure(const volatile void __iomem *token, unsigned long val) 407unsigned long eeh_check_failure(const volatile void __iomem *token, unsigned long val)
515{ 408{
516 unsigned long addr; 409 unsigned long addr;
517 struct pci_dev *dev; 410 struct eeh_dev *edev;
518 struct device_node *dn;
519 411
520 /* Finding the phys addr + pci device; this is pretty quick. */ 412 /* Finding the phys addr + pci device; this is pretty quick. */
521 addr = eeh_token_to_phys((unsigned long __force) token); 413 addr = eeh_token_to_phys((unsigned long __force) token);
522 dev = pci_addr_cache_get_device(addr); 414 edev = eeh_addr_cache_get_dev(addr);
523 if (!dev) { 415 if (!edev) {
524 eeh_stats.no_device++; 416 eeh_stats.no_device++;
525 return val; 417 return val;
526 } 418 }
527 419
528 dn = pci_device_to_OF_node(dev); 420 eeh_dev_check_failure(edev);
529 eeh_dn_check_failure(dn, dev);
530 421
531 pci_dev_put(dev); 422 pci_dev_put(eeh_dev_to_pci_dev(edev));
532 return val; 423 return val;
533} 424}
534 425
@@ -537,23 +428,22 @@ EXPORT_SYMBOL(eeh_check_failure);
537 428
538/** 429/**
539 * eeh_pci_enable - Enable MMIO or DMA transfers for this slot 430 * eeh_pci_enable - Enable MMIO or DMA transfers for this slot
540 * @edev: pci device node 431 * @pe: EEH PE
541 * 432 *
542 * This routine should be called to reenable frozen MMIO or DMA 433 * This routine should be called to reenable frozen MMIO or DMA
543 * so that it would work correctly again. It's useful while doing 434 * so that it would work correctly again. It's useful while doing
544 * recovery or log collection on the indicated device. 435 * recovery or log collection on the indicated device.
545 */ 436 */
546int eeh_pci_enable(struct eeh_dev *edev, int function) 437int eeh_pci_enable(struct eeh_pe *pe, int function)
547{ 438{
548 int rc; 439 int rc;
549 struct device_node *dn = eeh_dev_to_of_node(edev);
550 440
551 rc = eeh_ops->set_option(dn, function); 441 rc = eeh_ops->set_option(pe, function);
552 if (rc) 442 if (rc)
553 printk(KERN_WARNING "EEH: Unexpected state change %d, err=%d dn=%s\n", 443 pr_warning("%s: Unexpected state change %d on PHB#%d-PE#%x, err=%d\n",
554 function, rc, dn->full_name); 444 __func__, function, pe->phb->global_number, pe->addr, rc);
555 445
556 rc = eeh_ops->wait_state(dn, PCI_BUS_RESET_WAIT_MSEC); 446 rc = eeh_ops->wait_state(pe, PCI_BUS_RESET_WAIT_MSEC);
557 if (rc > 0 && (rc & EEH_STATE_MMIO_ENABLED) && 447 if (rc > 0 && (rc & EEH_STATE_MMIO_ENABLED) &&
558 (function == EEH_OPT_THAW_MMIO)) 448 (function == EEH_OPT_THAW_MMIO))
559 return 0; 449 return 0;
@@ -571,17 +461,24 @@ int eeh_pci_enable(struct eeh_dev *edev, int function)
571 */ 461 */
572int pcibios_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state) 462int pcibios_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state)
573{ 463{
574 struct device_node *dn = pci_device_to_OF_node(dev); 464 struct eeh_dev *edev = pci_dev_to_eeh_dev(dev);
465 struct eeh_pe *pe = edev->pe;
466
467 if (!pe) {
468 pr_err("%s: No PE found on PCI device %s\n",
469 __func__, pci_name(dev));
470 return -EINVAL;
471 }
575 472
576 switch (state) { 473 switch (state) {
577 case pcie_deassert_reset: 474 case pcie_deassert_reset:
578 eeh_ops->reset(dn, EEH_RESET_DEACTIVATE); 475 eeh_ops->reset(pe, EEH_RESET_DEACTIVATE);
579 break; 476 break;
580 case pcie_hot_reset: 477 case pcie_hot_reset:
581 eeh_ops->reset(dn, EEH_RESET_HOT); 478 eeh_ops->reset(pe, EEH_RESET_HOT);
582 break; 479 break;
583 case pcie_warm_reset: 480 case pcie_warm_reset:
584 eeh_ops->reset(dn, EEH_RESET_FUNDAMENTAL); 481 eeh_ops->reset(pe, EEH_RESET_FUNDAMENTAL);
585 break; 482 break;
586 default: 483 default:
587 return -EINVAL; 484 return -EINVAL;
@@ -591,66 +488,37 @@ int pcibios_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state stat
591} 488}
592 489
593/** 490/**
594 * __eeh_set_pe_freset - Check the required reset for child devices 491 * eeh_set_pe_freset - Check the required reset for the indicated device
595 * @parent: parent device 492 * @data: EEH device
596 * @freset: return value 493 * @flag: return value
597 *
598 * Each device might have its preferred reset type: fundamental or
599 * hot reset. The routine is used to collect the information from
600 * the child devices so that they could be reset accordingly.
601 */
602void __eeh_set_pe_freset(struct device_node *parent, unsigned int *freset)
603{
604 struct device_node *dn;
605
606 for_each_child_of_node(parent, dn) {
607 if (of_node_to_eeh_dev(dn)) {
608 struct pci_dev *dev = of_node_to_eeh_dev(dn)->pdev;
609
610 if (dev && dev->driver)
611 *freset |= dev->needs_freset;
612
613 __eeh_set_pe_freset(dn, freset);
614 }
615 }
616}
617
618/**
619 * eeh_set_pe_freset - Check the required reset for the indicated device and its children
620 * @dn: parent device
621 * @freset: return value
622 * 494 *
623 * Each device might have its preferred reset type: fundamental or 495 * Each device might have its preferred reset type: fundamental or
624 * hot reset. The routine is used to collected the information for 496 * hot reset. The routine is used to collected the information for
625 * the indicated device and its children so that the bunch of the 497 * the indicated device and its children so that the bunch of the
626 * devices could be reset properly. 498 * devices could be reset properly.
627 */ 499 */
628void eeh_set_pe_freset(struct device_node *dn, unsigned int *freset) 500static void *eeh_set_dev_freset(void *data, void *flag)
629{ 501{
630 struct pci_dev *dev; 502 struct pci_dev *dev;
631 dn = eeh_find_device_pe(dn); 503 unsigned int *freset = (unsigned int *)flag;
632 504 struct eeh_dev *edev = (struct eeh_dev *)data;
633 /* Back up one, since config addrs might be shared */
634 if (!pcibios_find_pci_bus(dn) && of_node_to_eeh_dev(dn->parent))
635 dn = dn->parent;
636 505
637 dev = of_node_to_eeh_dev(dn)->pdev; 506 dev = eeh_dev_to_pci_dev(edev);
638 if (dev) 507 if (dev)
639 *freset |= dev->needs_freset; 508 *freset |= dev->needs_freset;
640 509
641 __eeh_set_pe_freset(dn, freset); 510 return NULL;
642} 511}
643 512
644/** 513/**
645 * eeh_reset_pe_once - Assert the pci #RST line for 1/4 second 514 * eeh_reset_pe_once - Assert the pci #RST line for 1/4 second
646 * @edev: pci device node to be reset. 515 * @pe: EEH PE
647 * 516 *
648 * Assert the PCI #RST line for 1/4 second. 517 * Assert the PCI #RST line for 1/4 second.
649 */ 518 */
650static void eeh_reset_pe_once(struct eeh_dev *edev) 519static void eeh_reset_pe_once(struct eeh_pe *pe)
651{ 520{
652 unsigned int freset = 0; 521 unsigned int freset = 0;
653 struct device_node *dn = eeh_dev_to_of_node(edev);
654 522
655 /* Determine type of EEH reset required for 523 /* Determine type of EEH reset required for
656 * Partitionable Endpoint, a hot-reset (1) 524 * Partitionable Endpoint, a hot-reset (1)
@@ -658,12 +526,12 @@ static void eeh_reset_pe_once(struct eeh_dev *edev)
658 * A fundamental reset required by any device under 526 * A fundamental reset required by any device under
659 * Partitionable Endpoint trumps hot-reset. 527 * Partitionable Endpoint trumps hot-reset.
660 */ 528 */
661 eeh_set_pe_freset(dn, &freset); 529 eeh_pe_dev_traverse(pe, eeh_set_dev_freset, &freset);
662 530
663 if (freset) 531 if (freset)
664 eeh_ops->reset(dn, EEH_RESET_FUNDAMENTAL); 532 eeh_ops->reset(pe, EEH_RESET_FUNDAMENTAL);
665 else 533 else
666 eeh_ops->reset(dn, EEH_RESET_HOT); 534 eeh_ops->reset(pe, EEH_RESET_HOT);
667 535
668 /* The PCI bus requires that the reset be held high for at least 536 /* The PCI bus requires that the reset be held high for at least
669 * a 100 milliseconds. We wait a bit longer 'just in case'. 537 * a 100 milliseconds. We wait a bit longer 'just in case'.
@@ -675,9 +543,9 @@ static void eeh_reset_pe_once(struct eeh_dev *edev)
675 * pci slot reset line is dropped. Make sure we don't miss 543 * pci slot reset line is dropped. Make sure we don't miss
676 * these, and clear the flag now. 544 * these, and clear the flag now.
677 */ 545 */
678 eeh_clear_slot(dn, EEH_MODE_ISOLATED); 546 eeh_pe_state_clear(pe, EEH_PE_ISOLATED);
679 547
680 eeh_ops->reset(dn, EEH_RESET_DEACTIVATE); 548 eeh_ops->reset(pe, EEH_RESET_DEACTIVATE);
681 549
682 /* After a PCI slot has been reset, the PCI Express spec requires 550 /* After a PCI slot has been reset, the PCI Express spec requires
683 * a 1.5 second idle time for the bus to stabilize, before starting 551 * a 1.5 second idle time for the bus to stabilize, before starting
@@ -689,116 +557,36 @@ static void eeh_reset_pe_once(struct eeh_dev *edev)
689 557
690/** 558/**
691 * eeh_reset_pe - Reset the indicated PE 559 * eeh_reset_pe - Reset the indicated PE
692 * @edev: PCI device associated EEH device 560 * @pe: EEH PE
693 * 561 *
694 * This routine should be called to reset indicated device, including 562 * This routine should be called to reset indicated device, including
695 * PE. A PE might include multiple PCI devices and sometimes PCI bridges 563 * PE. A PE might include multiple PCI devices and sometimes PCI bridges
696 * might be involved as well. 564 * might be involved as well.
697 */ 565 */
698int eeh_reset_pe(struct eeh_dev *edev) 566int eeh_reset_pe(struct eeh_pe *pe)
699{ 567{
700 int i, rc; 568 int i, rc;
701 struct device_node *dn = eeh_dev_to_of_node(edev);
702 569
703 /* Take three shots at resetting the bus */ 570 /* Take three shots at resetting the bus */
704 for (i=0; i<3; i++) { 571 for (i=0; i<3; i++) {
705 eeh_reset_pe_once(edev); 572 eeh_reset_pe_once(pe);
706 573
707 rc = eeh_ops->wait_state(dn, PCI_BUS_RESET_WAIT_MSEC); 574 rc = eeh_ops->wait_state(pe, PCI_BUS_RESET_WAIT_MSEC);
708 if (rc == (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE)) 575 if (rc == (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE))
709 return 0; 576 return 0;
710 577
711 if (rc < 0) { 578 if (rc < 0) {
712 printk(KERN_ERR "EEH: unrecoverable slot failure %s\n", 579 pr_err("%s: Unrecoverable slot failure on PHB#%d-PE#%x",
713 dn->full_name); 580 __func__, pe->phb->global_number, pe->addr);
714 return -1; 581 return -1;
715 } 582 }
716 printk(KERN_ERR "EEH: bus reset %d failed on slot %s, rc=%d\n", 583 pr_err("EEH: bus reset %d failed on PHB#%d-PE#%x, rc=%d\n",
717 i+1, dn->full_name, rc); 584 i+1, pe->phb->global_number, pe->addr, rc);
718 } 585 }
719 586
720 return -1; 587 return -1;
721} 588}
722 589
723/** Save and restore of PCI BARs
724 *
725 * Although firmware will set up BARs during boot, it doesn't
726 * set up device BAR's after a device reset, although it will,
727 * if requested, set up bridge configuration. Thus, we need to
728 * configure the PCI devices ourselves.
729 */
730
731/**
732 * eeh_restore_one_device_bars - Restore the Base Address Registers for one device
733 * @edev: PCI device associated EEH device
734 *
735 * Loads the PCI configuration space base address registers,
736 * the expansion ROM base address, the latency timer, and etc.
737 * from the saved values in the device node.
738 */
739static inline void eeh_restore_one_device_bars(struct eeh_dev *edev)
740{
741 int i;
742 u32 cmd;
743 struct device_node *dn = eeh_dev_to_of_node(edev);
744
745 if (!edev->phb)
746 return;
747
748 for (i=4; i<10; i++) {
749 eeh_ops->write_config(dn, i*4, 4, edev->config_space[i]);
750 }
751
752 /* 12 == Expansion ROM Address */
753 eeh_ops->write_config(dn, 12*4, 4, edev->config_space[12]);
754
755#define BYTE_SWAP(OFF) (8*((OFF)/4)+3-(OFF))
756#define SAVED_BYTE(OFF) (((u8 *)(edev->config_space))[BYTE_SWAP(OFF)])
757
758 eeh_ops->write_config(dn, PCI_CACHE_LINE_SIZE, 1,
759 SAVED_BYTE(PCI_CACHE_LINE_SIZE));
760
761 eeh_ops->write_config(dn, PCI_LATENCY_TIMER, 1,
762 SAVED_BYTE(PCI_LATENCY_TIMER));
763
764 /* max latency, min grant, interrupt pin and line */
765 eeh_ops->write_config(dn, 15*4, 4, edev->config_space[15]);
766
767 /* Restore PERR & SERR bits, some devices require it,
768 * don't touch the other command bits
769 */
770 eeh_ops->read_config(dn, PCI_COMMAND, 4, &cmd);
771 if (edev->config_space[1] & PCI_COMMAND_PARITY)
772 cmd |= PCI_COMMAND_PARITY;
773 else
774 cmd &= ~PCI_COMMAND_PARITY;
775 if (edev->config_space[1] & PCI_COMMAND_SERR)
776 cmd |= PCI_COMMAND_SERR;
777 else
778 cmd &= ~PCI_COMMAND_SERR;
779 eeh_ops->write_config(dn, PCI_COMMAND, 4, cmd);
780}
781
782/**
783 * eeh_restore_bars - Restore the PCI config space info
784 * @edev: EEH device
785 *
786 * This routine performs a recursive walk to the children
787 * of this device as well.
788 */
789void eeh_restore_bars(struct eeh_dev *edev)
790{
791 struct device_node *dn;
792 if (!edev)
793 return;
794
795 if ((edev->mode & EEH_MODE_SUPPORTED) && !IS_BRIDGE(edev->class_code))
796 eeh_restore_one_device_bars(edev);
797
798 for_each_child_of_node(eeh_dev_to_of_node(edev), dn)
799 eeh_restore_bars(of_node_to_eeh_dev(dn));
800}
801
802/** 590/**
803 * eeh_save_bars - Save device bars 591 * eeh_save_bars - Save device bars
804 * @edev: PCI device associated EEH device 592 * @edev: PCI device associated EEH device
@@ -808,7 +596,7 @@ void eeh_restore_bars(struct eeh_dev *edev)
808 * PCI devices are added individually; but, for the restore, 596 * PCI devices are added individually; but, for the restore,
809 * an entire slot is reset at a time. 597 * an entire slot is reset at a time.
810 */ 598 */
811static void eeh_save_bars(struct eeh_dev *edev) 599void eeh_save_bars(struct eeh_dev *edev)
812{ 600{
813 int i; 601 int i;
814 struct device_node *dn; 602 struct device_node *dn;
@@ -822,102 +610,6 @@ static void eeh_save_bars(struct eeh_dev *edev)
822} 610}
823 611
824/** 612/**
825 * eeh_early_enable - Early enable EEH on the indicated device
826 * @dn: device node
827 * @data: BUID
828 *
829 * Enable EEH functionality on the specified PCI device. The function
830 * is expected to be called before real PCI probing is done. However,
831 * the PHBs have been initialized at this point.
832 */
833static void *eeh_early_enable(struct device_node *dn, void *data)
834{
835 int ret;
836 const u32 *class_code = of_get_property(dn, "class-code", NULL);
837 const u32 *vendor_id = of_get_property(dn, "vendor-id", NULL);
838 const u32 *device_id = of_get_property(dn, "device-id", NULL);
839 const u32 *regs;
840 int enable;
841 struct eeh_dev *edev = of_node_to_eeh_dev(dn);
842
843 edev->class_code = 0;
844 edev->mode = 0;
845 edev->check_count = 0;
846 edev->freeze_count = 0;
847 edev->false_positives = 0;
848
849 if (!of_device_is_available(dn))
850 return NULL;
851
852 /* Ignore bad nodes. */
853 if (!class_code || !vendor_id || !device_id)
854 return NULL;
855
856 /* There is nothing to check on PCI to ISA bridges */
857 if (dn->type && !strcmp(dn->type, "isa")) {
858 edev->mode |= EEH_MODE_NOCHECK;
859 return NULL;
860 }
861 edev->class_code = *class_code;
862
863 /* Ok... see if this device supports EEH. Some do, some don't,
864 * and the only way to find out is to check each and every one.
865 */
866 regs = of_get_property(dn, "reg", NULL);
867 if (regs) {
868 /* First register entry is addr (00BBSS00) */
869 /* Try to enable eeh */
870 ret = eeh_ops->set_option(dn, EEH_OPT_ENABLE);
871
872 enable = 0;
873 if (ret == 0) {
874 edev->config_addr = regs[0];
875
876 /* If the newer, better, ibm,get-config-addr-info is supported,
877 * then use that instead.
878 */
879 edev->pe_config_addr = eeh_ops->get_pe_addr(dn);
880
881 /* Some older systems (Power4) allow the
882 * ibm,set-eeh-option call to succeed even on nodes
883 * where EEH is not supported. Verify support
884 * explicitly.
885 */
886 ret = eeh_ops->get_state(dn, NULL);
887 if (ret > 0 && ret != EEH_STATE_NOT_SUPPORT)
888 enable = 1;
889 }
890
891 if (enable) {
892 eeh_subsystem_enabled = 1;
893 edev->mode |= EEH_MODE_SUPPORTED;
894
895 pr_debug("EEH: %s: eeh enabled, config=%x pe_config=%x\n",
896 dn->full_name, edev->config_addr,
897 edev->pe_config_addr);
898 } else {
899
900 /* This device doesn't support EEH, but it may have an
901 * EEH parent, in which case we mark it as supported.
902 */
903 if (dn->parent && of_node_to_eeh_dev(dn->parent) &&
904 (of_node_to_eeh_dev(dn->parent)->mode & EEH_MODE_SUPPORTED)) {
905 /* Parent supports EEH. */
906 edev->mode |= EEH_MODE_SUPPORTED;
907 edev->config_addr = of_node_to_eeh_dev(dn->parent)->config_addr;
908 return NULL;
909 }
910 }
911 } else {
912 printk(KERN_WARNING "EEH: %s: unable to get reg property.\n",
913 dn->full_name);
914 }
915
916 eeh_save_bars(edev);
917 return NULL;
918}
919
920/**
921 * eeh_ops_register - Register platform dependent EEH operations 613 * eeh_ops_register - Register platform dependent EEH operations
922 * @ops: platform dependent EEH operations 614 * @ops: platform dependent EEH operations
923 * 615 *
@@ -982,7 +674,7 @@ int __exit eeh_ops_unregister(const char *name)
982 * Even if force-off is set, the EEH hardware is still enabled, so that 674 * Even if force-off is set, the EEH hardware is still enabled, so that
983 * newer systems can boot. 675 * newer systems can boot.
984 */ 676 */
985void __init eeh_init(void) 677static int __init eeh_init(void)
986{ 678{
987 struct pci_controller *hose, *tmp; 679 struct pci_controller *hose, *tmp;
988 struct device_node *phb; 680 struct device_node *phb;
@@ -992,27 +684,34 @@ void __init eeh_init(void)
992 if (!eeh_ops) { 684 if (!eeh_ops) {
993 pr_warning("%s: Platform EEH operation not found\n", 685 pr_warning("%s: Platform EEH operation not found\n",
994 __func__); 686 __func__);
995 return; 687 return -EEXIST;
996 } else if ((ret = eeh_ops->init())) { 688 } else if ((ret = eeh_ops->init())) {
997 pr_warning("%s: Failed to call platform init function (%d)\n", 689 pr_warning("%s: Failed to call platform init function (%d)\n",
998 __func__, ret); 690 __func__, ret);
999 return; 691 return ret;
1000 } 692 }
1001 693
1002 raw_spin_lock_init(&confirm_error_lock); 694 raw_spin_lock_init(&confirm_error_lock);
1003 695
1004 /* Enable EEH for all adapters */ 696 /* Enable EEH for all adapters */
1005 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) { 697 if (eeh_probe_mode_devtree()) {
1006 phb = hose->dn; 698 list_for_each_entry_safe(hose, tmp,
1007 traverse_pci_devices(phb, eeh_early_enable, NULL); 699 &hose_list, list_node) {
700 phb = hose->dn;
701 traverse_pci_devices(phb, eeh_ops->of_probe, NULL);
702 }
1008 } 703 }
1009 704
1010 if (eeh_subsystem_enabled) 705 if (eeh_subsystem_enabled)
1011 printk(KERN_INFO "EEH: PCI Enhanced I/O Error Handling Enabled\n"); 706 pr_info("EEH: PCI Enhanced I/O Error Handling Enabled\n");
1012 else 707 else
1013 printk(KERN_WARNING "EEH: No capable adapters found\n"); 708 pr_warning("EEH: No capable adapters found\n");
709
710 return ret;
1014} 711}
1015 712
713core_initcall_sync(eeh_init);
714
1016/** 715/**
1017 * eeh_add_device_early - Enable EEH for the indicated device_node 716 * eeh_add_device_early - Enable EEH for the indicated device_node
1018 * @dn: device node for which to set up EEH 717 * @dn: device node for which to set up EEH
@@ -1029,7 +728,7 @@ static void eeh_add_device_early(struct device_node *dn)
1029{ 728{
1030 struct pci_controller *phb; 729 struct pci_controller *phb;
1031 730
1032 if (!dn || !of_node_to_eeh_dev(dn)) 731 if (!of_node_to_eeh_dev(dn))
1033 return; 732 return;
1034 phb = of_node_to_eeh_dev(dn)->phb; 733 phb = of_node_to_eeh_dev(dn)->phb;
1035 734
@@ -1037,7 +736,8 @@ static void eeh_add_device_early(struct device_node *dn)
1037 if (NULL == phb || 0 == phb->buid) 736 if (NULL == phb || 0 == phb->buid)
1038 return; 737 return;
1039 738
1040 eeh_early_enable(dn, NULL); 739 /* FIXME: hotplug support on POWERNV */
740 eeh_ops->of_probe(dn, NULL);
1041} 741}
1042 742
1043/** 743/**
@@ -1087,7 +787,7 @@ static void eeh_add_device_late(struct pci_dev *dev)
1087 edev->pdev = dev; 787 edev->pdev = dev;
1088 dev->dev.archdata.edev = edev; 788 dev->dev.archdata.edev = edev;
1089 789
1090 pci_addr_cache_insert_device(dev); 790 eeh_addr_cache_insert_dev(dev);
1091 eeh_sysfs_add_device(dev); 791 eeh_sysfs_add_device(dev);
1092} 792}
1093 793
@@ -1117,6 +817,7 @@ EXPORT_SYMBOL_GPL(eeh_add_device_tree_late);
1117/** 817/**
1118 * eeh_remove_device - Undo EEH setup for the indicated pci device 818 * eeh_remove_device - Undo EEH setup for the indicated pci device
1119 * @dev: pci device to be removed 819 * @dev: pci device to be removed
820 * @purge_pe: remove the PE or not
1120 * 821 *
1121 * This routine should be called when a device is removed from 822 * This routine should be called when a device is removed from
1122 * a running system (e.g. by hotplug or dlpar). It unregisters 823 * a running system (e.g. by hotplug or dlpar). It unregisters
@@ -1124,7 +825,7 @@ EXPORT_SYMBOL_GPL(eeh_add_device_tree_late);
1124 * this device will no longer be detected after this call; thus, 825 * this device will no longer be detected after this call; thus,
1125 * i/o errors affecting this slot may leave this device unusable. 826 * i/o errors affecting this slot may leave this device unusable.
1126 */ 827 */
1127static void eeh_remove_device(struct pci_dev *dev) 828static void eeh_remove_device(struct pci_dev *dev, int purge_pe)
1128{ 829{
1129 struct eeh_dev *edev; 830 struct eeh_dev *edev;
1130 831
@@ -1143,28 +844,30 @@ static void eeh_remove_device(struct pci_dev *dev)
1143 dev->dev.archdata.edev = NULL; 844 dev->dev.archdata.edev = NULL;
1144 pci_dev_put(dev); 845 pci_dev_put(dev);
1145 846
1146 pci_addr_cache_remove_device(dev); 847 eeh_rmv_from_parent_pe(edev, purge_pe);
848 eeh_addr_cache_rmv_dev(dev);
1147 eeh_sysfs_remove_device(dev); 849 eeh_sysfs_remove_device(dev);
1148} 850}
1149 851
1150/** 852/**
1151 * eeh_remove_bus_device - Undo EEH setup for the indicated PCI device 853 * eeh_remove_bus_device - Undo EEH setup for the indicated PCI device
1152 * @dev: PCI device 854 * @dev: PCI device
855 * @purge_pe: remove the corresponding PE or not
1153 * 856 *
1154 * This routine must be called when a device is removed from the 857 * This routine must be called when a device is removed from the
1155 * running system through hotplug or dlpar. The corresponding 858 * running system through hotplug or dlpar. The corresponding
1156 * PCI address cache will be removed. 859 * PCI address cache will be removed.
1157 */ 860 */
1158void eeh_remove_bus_device(struct pci_dev *dev) 861void eeh_remove_bus_device(struct pci_dev *dev, int purge_pe)
1159{ 862{
1160 struct pci_bus *bus = dev->subordinate; 863 struct pci_bus *bus = dev->subordinate;
1161 struct pci_dev *child, *tmp; 864 struct pci_dev *child, *tmp;
1162 865
1163 eeh_remove_device(dev); 866 eeh_remove_device(dev, purge_pe);
1164 867
1165 if (bus && dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) { 868 if (bus && dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
1166 list_for_each_entry_safe(child, tmp, &bus->devices, bus_list) 869 list_for_each_entry_safe(child, tmp, &bus->devices, bus_list)
1167 eeh_remove_bus_device(child); 870 eeh_remove_bus_device(child, purge_pe);
1168 } 871 }
1169} 872}
1170EXPORT_SYMBOL_GPL(eeh_remove_bus_device); 873EXPORT_SYMBOL_GPL(eeh_remove_bus_device);
diff --git a/arch/powerpc/platforms/pseries/eeh_cache.c b/arch/powerpc/platforms/pseries/eeh_cache.c
index e5ae1c687c6..5a4c8790305 100644
--- a/arch/powerpc/platforms/pseries/eeh_cache.c
+++ b/arch/powerpc/platforms/pseries/eeh_cache.c
@@ -50,6 +50,7 @@ struct pci_io_addr_range {
50 struct rb_node rb_node; 50 struct rb_node rb_node;
51 unsigned long addr_lo; 51 unsigned long addr_lo;
52 unsigned long addr_hi; 52 unsigned long addr_hi;
53 struct eeh_dev *edev;
53 struct pci_dev *pcidev; 54 struct pci_dev *pcidev;
54 unsigned int flags; 55 unsigned int flags;
55}; 56};
@@ -59,7 +60,7 @@ static struct pci_io_addr_cache {
59 spinlock_t piar_lock; 60 spinlock_t piar_lock;
60} pci_io_addr_cache_root; 61} pci_io_addr_cache_root;
61 62
62static inline struct pci_dev *__pci_addr_cache_get_device(unsigned long addr) 63static inline struct eeh_dev *__eeh_addr_cache_get_device(unsigned long addr)
63{ 64{
64 struct rb_node *n = pci_io_addr_cache_root.rb_root.rb_node; 65 struct rb_node *n = pci_io_addr_cache_root.rb_root.rb_node;
65 66
@@ -74,7 +75,7 @@ static inline struct pci_dev *__pci_addr_cache_get_device(unsigned long addr)
74 n = n->rb_right; 75 n = n->rb_right;
75 } else { 76 } else {
76 pci_dev_get(piar->pcidev); 77 pci_dev_get(piar->pcidev);
77 return piar->pcidev; 78 return piar->edev;
78 } 79 }
79 } 80 }
80 } 81 }
@@ -83,7 +84,7 @@ static inline struct pci_dev *__pci_addr_cache_get_device(unsigned long addr)
83} 84}
84 85
85/** 86/**
86 * pci_addr_cache_get_device - Get device, given only address 87 * eeh_addr_cache_get_dev - Get device, given only address
87 * @addr: mmio (PIO) phys address or i/o port number 88 * @addr: mmio (PIO) phys address or i/o port number
88 * 89 *
89 * Given an mmio phys address, or a port number, find a pci device 90 * Given an mmio phys address, or a port number, find a pci device
@@ -92,15 +93,15 @@ static inline struct pci_dev *__pci_addr_cache_get_device(unsigned long addr)
92 * from zero (that is, they do *not* have pci_io_addr added in). 93 * from zero (that is, they do *not* have pci_io_addr added in).
93 * It is safe to call this function within an interrupt. 94 * It is safe to call this function within an interrupt.
94 */ 95 */
95struct pci_dev *pci_addr_cache_get_device(unsigned long addr) 96struct eeh_dev *eeh_addr_cache_get_dev(unsigned long addr)
96{ 97{
97 struct pci_dev *dev; 98 struct eeh_dev *edev;
98 unsigned long flags; 99 unsigned long flags;
99 100
100 spin_lock_irqsave(&pci_io_addr_cache_root.piar_lock, flags); 101 spin_lock_irqsave(&pci_io_addr_cache_root.piar_lock, flags);
101 dev = __pci_addr_cache_get_device(addr); 102 edev = __eeh_addr_cache_get_device(addr);
102 spin_unlock_irqrestore(&pci_io_addr_cache_root.piar_lock, flags); 103 spin_unlock_irqrestore(&pci_io_addr_cache_root.piar_lock, flags);
103 return dev; 104 return edev;
104} 105}
105 106
106#ifdef DEBUG 107#ifdef DEBUG
@@ -108,7 +109,7 @@ struct pci_dev *pci_addr_cache_get_device(unsigned long addr)
108 * Handy-dandy debug print routine, does nothing more 109 * Handy-dandy debug print routine, does nothing more
109 * than print out the contents of our addr cache. 110 * than print out the contents of our addr cache.
110 */ 111 */
111static void pci_addr_cache_print(struct pci_io_addr_cache *cache) 112static void eeh_addr_cache_print(struct pci_io_addr_cache *cache)
112{ 113{
113 struct rb_node *n; 114 struct rb_node *n;
114 int cnt = 0; 115 int cnt = 0;
@@ -117,7 +118,7 @@ static void pci_addr_cache_print(struct pci_io_addr_cache *cache)
117 while (n) { 118 while (n) {
118 struct pci_io_addr_range *piar; 119 struct pci_io_addr_range *piar;
119 piar = rb_entry(n, struct pci_io_addr_range, rb_node); 120 piar = rb_entry(n, struct pci_io_addr_range, rb_node);
120 printk(KERN_DEBUG "PCI: %s addr range %d [%lx-%lx]: %s\n", 121 pr_debug("PCI: %s addr range %d [%lx-%lx]: %s\n",
121 (piar->flags & IORESOURCE_IO) ? "i/o" : "mem", cnt, 122 (piar->flags & IORESOURCE_IO) ? "i/o" : "mem", cnt,
122 piar->addr_lo, piar->addr_hi, pci_name(piar->pcidev)); 123 piar->addr_lo, piar->addr_hi, pci_name(piar->pcidev));
123 cnt++; 124 cnt++;
@@ -128,7 +129,7 @@ static void pci_addr_cache_print(struct pci_io_addr_cache *cache)
128 129
129/* Insert address range into the rb tree. */ 130/* Insert address range into the rb tree. */
130static struct pci_io_addr_range * 131static struct pci_io_addr_range *
131pci_addr_cache_insert(struct pci_dev *dev, unsigned long alo, 132eeh_addr_cache_insert(struct pci_dev *dev, unsigned long alo,
132 unsigned long ahi, unsigned int flags) 133 unsigned long ahi, unsigned int flags)
133{ 134{
134 struct rb_node **p = &pci_io_addr_cache_root.rb_root.rb_node; 135 struct rb_node **p = &pci_io_addr_cache_root.rb_root.rb_node;
@@ -146,23 +147,24 @@ pci_addr_cache_insert(struct pci_dev *dev, unsigned long alo,
146 } else { 147 } else {
147 if (dev != piar->pcidev || 148 if (dev != piar->pcidev ||
148 alo != piar->addr_lo || ahi != piar->addr_hi) { 149 alo != piar->addr_lo || ahi != piar->addr_hi) {
149 printk(KERN_WARNING "PIAR: overlapping address range\n"); 150 pr_warning("PIAR: overlapping address range\n");
150 } 151 }
151 return piar; 152 return piar;
152 } 153 }
153 } 154 }
154 piar = kmalloc(sizeof(struct pci_io_addr_range), GFP_ATOMIC); 155 piar = kzalloc(sizeof(struct pci_io_addr_range), GFP_ATOMIC);
155 if (!piar) 156 if (!piar)
156 return NULL; 157 return NULL;
157 158
158 pci_dev_get(dev); 159 pci_dev_get(dev);
159 piar->addr_lo = alo; 160 piar->addr_lo = alo;
160 piar->addr_hi = ahi; 161 piar->addr_hi = ahi;
162 piar->edev = pci_dev_to_eeh_dev(dev);
161 piar->pcidev = dev; 163 piar->pcidev = dev;
162 piar->flags = flags; 164 piar->flags = flags;
163 165
164#ifdef DEBUG 166#ifdef DEBUG
165 printk(KERN_DEBUG "PIAR: insert range=[%lx:%lx] dev=%s\n", 167 pr_debug("PIAR: insert range=[%lx:%lx] dev=%s\n",
166 alo, ahi, pci_name(dev)); 168 alo, ahi, pci_name(dev));
167#endif 169#endif
168 170
@@ -172,7 +174,7 @@ pci_addr_cache_insert(struct pci_dev *dev, unsigned long alo,
172 return piar; 174 return piar;
173} 175}
174 176
175static void __pci_addr_cache_insert_device(struct pci_dev *dev) 177static void __eeh_addr_cache_insert_dev(struct pci_dev *dev)
176{ 178{
177 struct device_node *dn; 179 struct device_node *dn;
178 struct eeh_dev *edev; 180 struct eeh_dev *edev;
@@ -180,7 +182,7 @@ static void __pci_addr_cache_insert_device(struct pci_dev *dev)
180 182
181 dn = pci_device_to_OF_node(dev); 183 dn = pci_device_to_OF_node(dev);
182 if (!dn) { 184 if (!dn) {
183 printk(KERN_WARNING "PCI: no pci dn found for dev=%s\n", pci_name(dev)); 185 pr_warning("PCI: no pci dn found for dev=%s\n", pci_name(dev));
184 return; 186 return;
185 } 187 }
186 188
@@ -192,8 +194,7 @@ static void __pci_addr_cache_insert_device(struct pci_dev *dev)
192 } 194 }
193 195
194 /* Skip any devices for which EEH is not enabled. */ 196 /* Skip any devices for which EEH is not enabled. */
195 if (!(edev->mode & EEH_MODE_SUPPORTED) || 197 if (!edev->pe) {
196 edev->mode & EEH_MODE_NOCHECK) {
197#ifdef DEBUG 198#ifdef DEBUG
198 pr_info("PCI: skip building address cache for=%s - %s\n", 199 pr_info("PCI: skip building address cache for=%s - %s\n",
199 pci_name(dev), dn->full_name); 200 pci_name(dev), dn->full_name);
@@ -212,19 +213,19 @@ static void __pci_addr_cache_insert_device(struct pci_dev *dev)
212 continue; 213 continue;
213 if (start == 0 || ~start == 0 || end == 0 || ~end == 0) 214 if (start == 0 || ~start == 0 || end == 0 || ~end == 0)
214 continue; 215 continue;
215 pci_addr_cache_insert(dev, start, end, flags); 216 eeh_addr_cache_insert(dev, start, end, flags);
216 } 217 }
217} 218}
218 219
219/** 220/**
220 * pci_addr_cache_insert_device - Add a device to the address cache 221 * eeh_addr_cache_insert_dev - Add a device to the address cache
221 * @dev: PCI device whose I/O addresses we are interested in. 222 * @dev: PCI device whose I/O addresses we are interested in.
222 * 223 *
223 * In order to support the fast lookup of devices based on addresses, 224 * In order to support the fast lookup of devices based on addresses,
224 * we maintain a cache of devices that can be quickly searched. 225 * we maintain a cache of devices that can be quickly searched.
225 * This routine adds a device to that cache. 226 * This routine adds a device to that cache.
226 */ 227 */
227void pci_addr_cache_insert_device(struct pci_dev *dev) 228void eeh_addr_cache_insert_dev(struct pci_dev *dev)
228{ 229{
229 unsigned long flags; 230 unsigned long flags;
230 231
@@ -233,11 +234,11 @@ void pci_addr_cache_insert_device(struct pci_dev *dev)
233 return; 234 return;
234 235
235 spin_lock_irqsave(&pci_io_addr_cache_root.piar_lock, flags); 236 spin_lock_irqsave(&pci_io_addr_cache_root.piar_lock, flags);
236 __pci_addr_cache_insert_device(dev); 237 __eeh_addr_cache_insert_dev(dev);
237 spin_unlock_irqrestore(&pci_io_addr_cache_root.piar_lock, flags); 238 spin_unlock_irqrestore(&pci_io_addr_cache_root.piar_lock, flags);
238} 239}
239 240
240static inline void __pci_addr_cache_remove_device(struct pci_dev *dev) 241static inline void __eeh_addr_cache_rmv_dev(struct pci_dev *dev)
241{ 242{
242 struct rb_node *n; 243 struct rb_node *n;
243 244
@@ -258,7 +259,7 @@ restart:
258} 259}
259 260
260/** 261/**
261 * pci_addr_cache_remove_device - remove pci device from addr cache 262 * eeh_addr_cache_rmv_dev - remove pci device from addr cache
262 * @dev: device to remove 263 * @dev: device to remove
263 * 264 *
264 * Remove a device from the addr-cache tree. 265 * Remove a device from the addr-cache tree.
@@ -266,17 +267,17 @@ restart:
266 * the tree multiple times (once per resource). 267 * the tree multiple times (once per resource).
267 * But so what; device removal doesn't need to be that fast. 268 * But so what; device removal doesn't need to be that fast.
268 */ 269 */
269void pci_addr_cache_remove_device(struct pci_dev *dev) 270void eeh_addr_cache_rmv_dev(struct pci_dev *dev)
270{ 271{
271 unsigned long flags; 272 unsigned long flags;
272 273
273 spin_lock_irqsave(&pci_io_addr_cache_root.piar_lock, flags); 274 spin_lock_irqsave(&pci_io_addr_cache_root.piar_lock, flags);
274 __pci_addr_cache_remove_device(dev); 275 __eeh_addr_cache_rmv_dev(dev);
275 spin_unlock_irqrestore(&pci_io_addr_cache_root.piar_lock, flags); 276 spin_unlock_irqrestore(&pci_io_addr_cache_root.piar_lock, flags);
276} 277}
277 278
278/** 279/**
279 * pci_addr_cache_build - Build a cache of I/O addresses 280 * eeh_addr_cache_build - Build a cache of I/O addresses
280 * 281 *
281 * Build a cache of pci i/o addresses. This cache will be used to 282 * Build a cache of pci i/o addresses. This cache will be used to
282 * find the pci device that corresponds to a given address. 283 * find the pci device that corresponds to a given address.
@@ -284,7 +285,7 @@ void pci_addr_cache_remove_device(struct pci_dev *dev)
284 * Must be run late in boot process, after the pci controllers 285 * Must be run late in boot process, after the pci controllers
285 * have been scanned for devices (after all device resources are known). 286 * have been scanned for devices (after all device resources are known).
286 */ 287 */
287void __init pci_addr_cache_build(void) 288void __init eeh_addr_cache_build(void)
288{ 289{
289 struct device_node *dn; 290 struct device_node *dn;
290 struct eeh_dev *edev; 291 struct eeh_dev *edev;
@@ -293,7 +294,7 @@ void __init pci_addr_cache_build(void)
293 spin_lock_init(&pci_io_addr_cache_root.piar_lock); 294 spin_lock_init(&pci_io_addr_cache_root.piar_lock);
294 295
295 for_each_pci_dev(dev) { 296 for_each_pci_dev(dev) {
296 pci_addr_cache_insert_device(dev); 297 eeh_addr_cache_insert_dev(dev);
297 298
298 dn = pci_device_to_OF_node(dev); 299 dn = pci_device_to_OF_node(dev);
299 if (!dn) 300 if (!dn)
@@ -312,7 +313,7 @@ void __init pci_addr_cache_build(void)
312 313
313#ifdef DEBUG 314#ifdef DEBUG
314 /* Verify tree built up above, echo back the list of addrs. */ 315 /* Verify tree built up above, echo back the list of addrs. */
315 pci_addr_cache_print(&pci_io_addr_cache_root); 316 eeh_addr_cache_print(&pci_io_addr_cache_root);
316#endif 317#endif
317} 318}
318 319
diff --git a/arch/powerpc/platforms/pseries/eeh_dev.c b/arch/powerpc/platforms/pseries/eeh_dev.c
index c4507d09590..66442341d3a 100644
--- a/arch/powerpc/platforms/pseries/eeh_dev.c
+++ b/arch/powerpc/platforms/pseries/eeh_dev.c
@@ -55,7 +55,7 @@ void * __devinit eeh_dev_init(struct device_node *dn, void *data)
55 struct eeh_dev *edev; 55 struct eeh_dev *edev;
56 56
57 /* Allocate EEH device */ 57 /* Allocate EEH device */
58 edev = zalloc_maybe_bootmem(sizeof(*edev), GFP_KERNEL); 58 edev = kzalloc(sizeof(*edev), GFP_KERNEL);
59 if (!edev) { 59 if (!edev) {
60 pr_warning("%s: out of memory\n", __func__); 60 pr_warning("%s: out of memory\n", __func__);
61 return NULL; 61 return NULL;
@@ -65,6 +65,7 @@ void * __devinit eeh_dev_init(struct device_node *dn, void *data)
65 PCI_DN(dn)->edev = edev; 65 PCI_DN(dn)->edev = edev;
66 edev->dn = dn; 66 edev->dn = dn;
67 edev->phb = phb; 67 edev->phb = phb;
68 INIT_LIST_HEAD(&edev->list);
68 69
69 return NULL; 70 return NULL;
70} 71}
@@ -80,6 +81,9 @@ void __devinit eeh_dev_phb_init_dynamic(struct pci_controller *phb)
80{ 81{
81 struct device_node *dn = phb->dn; 82 struct device_node *dn = phb->dn;
82 83
84 /* EEH PE for PHB */
85 eeh_phb_pe_create(phb);
86
83 /* EEH device for PHB */ 87 /* EEH device for PHB */
84 eeh_dev_init(dn, phb); 88 eeh_dev_init(dn, phb);
85 89
@@ -93,10 +97,16 @@ void __devinit eeh_dev_phb_init_dynamic(struct pci_controller *phb)
93 * Scan all the existing PHBs and create EEH devices for their OF 97 * Scan all the existing PHBs and create EEH devices for their OF
94 * nodes and their children OF nodes 98 * nodes and their children OF nodes
95 */ 99 */
96void __init eeh_dev_phb_init(void) 100static int __init eeh_dev_phb_init(void)
97{ 101{
98 struct pci_controller *phb, *tmp; 102 struct pci_controller *phb, *tmp;
99 103
100 list_for_each_entry_safe(phb, tmp, &hose_list, list_node) 104 list_for_each_entry_safe(phb, tmp, &hose_list, list_node)
101 eeh_dev_phb_init_dynamic(phb); 105 eeh_dev_phb_init_dynamic(phb);
106
107 pr_info("EEH: devices created\n");
108
109 return 0;
102} 110}
111
112core_initcall(eeh_dev_phb_init);
diff --git a/arch/powerpc/platforms/pseries/eeh_driver.c b/arch/powerpc/platforms/pseries/eeh_driver.c
index baf92cd9dfa..a3fefb61097 100644
--- a/arch/powerpc/platforms/pseries/eeh_driver.c
+++ b/arch/powerpc/platforms/pseries/eeh_driver.c
@@ -25,6 +25,7 @@
25#include <linux/delay.h> 25#include <linux/delay.h>
26#include <linux/interrupt.h> 26#include <linux/interrupt.h>
27#include <linux/irq.h> 27#include <linux/irq.h>
28#include <linux/module.h>
28#include <linux/pci.h> 29#include <linux/pci.h>
29#include <asm/eeh.h> 30#include <asm/eeh.h>
30#include <asm/eeh_event.h> 31#include <asm/eeh_event.h>
@@ -47,6 +48,41 @@ static inline const char *eeh_pcid_name(struct pci_dev *pdev)
47 return ""; 48 return "";
48} 49}
49 50
51/**
52 * eeh_pcid_get - Get the PCI device driver
53 * @pdev: PCI device
54 *
55 * The function is used to retrieve the PCI device driver for
56 * the indicated PCI device. Besides, we will increase the reference
57 * of the PCI device driver to prevent that being unloaded on
58 * the fly. Otherwise, kernel crash would be seen.
59 */
60static inline struct pci_driver *eeh_pcid_get(struct pci_dev *pdev)
61{
62 if (!pdev || !pdev->driver)
63 return NULL;
64
65 if (!try_module_get(pdev->driver->driver.owner))
66 return NULL;
67
68 return pdev->driver;
69}
70
71/**
72 * eeh_pcid_put - Dereference on the PCI device driver
73 * @pdev: PCI device
74 *
75 * The function is called to do dereference on the PCI device
76 * driver of the indicated PCI device.
77 */
78static inline void eeh_pcid_put(struct pci_dev *pdev)
79{
80 if (!pdev || !pdev->driver)
81 return;
82
83 module_put(pdev->driver->driver.owner);
84}
85
50#if 0 86#if 0
51static void print_device_node_tree(struct pci_dn *pdn, int dent) 87static void print_device_node_tree(struct pci_dn *pdn, int dent)
52{ 88{
@@ -93,7 +129,7 @@ static void eeh_disable_irq(struct pci_dev *dev)
93 if (!irq_has_action(dev->irq)) 129 if (!irq_has_action(dev->irq))
94 return; 130 return;
95 131
96 edev->mode |= EEH_MODE_IRQ_DISABLED; 132 edev->mode |= EEH_DEV_IRQ_DISABLED;
97 disable_irq_nosync(dev->irq); 133 disable_irq_nosync(dev->irq);
98} 134}
99 135
@@ -108,36 +144,44 @@ static void eeh_enable_irq(struct pci_dev *dev)
108{ 144{
109 struct eeh_dev *edev = pci_dev_to_eeh_dev(dev); 145 struct eeh_dev *edev = pci_dev_to_eeh_dev(dev);
110 146
111 if ((edev->mode) & EEH_MODE_IRQ_DISABLED) { 147 if ((edev->mode) & EEH_DEV_IRQ_DISABLED) {
112 edev->mode &= ~EEH_MODE_IRQ_DISABLED; 148 edev->mode &= ~EEH_DEV_IRQ_DISABLED;
113 enable_irq(dev->irq); 149 enable_irq(dev->irq);
114 } 150 }
115} 151}
116 152
117/** 153/**
118 * eeh_report_error - Report pci error to each device driver 154 * eeh_report_error - Report pci error to each device driver
119 * @dev: PCI device 155 * @data: eeh device
120 * @userdata: return value 156 * @userdata: return value
121 * 157 *
122 * Report an EEH error to each device driver, collect up and 158 * Report an EEH error to each device driver, collect up and
123 * merge the device driver responses. Cumulative response 159 * merge the device driver responses. Cumulative response
124 * passed back in "userdata". 160 * passed back in "userdata".
125 */ 161 */
126static int eeh_report_error(struct pci_dev *dev, void *userdata) 162static void *eeh_report_error(void *data, void *userdata)
127{ 163{
164 struct eeh_dev *edev = (struct eeh_dev *)data;
165 struct pci_dev *dev = eeh_dev_to_pci_dev(edev);
128 enum pci_ers_result rc, *res = userdata; 166 enum pci_ers_result rc, *res = userdata;
129 struct pci_driver *driver = dev->driver; 167 struct pci_driver *driver;
130 168
169 /* We might not have the associated PCI device,
170 * then we should continue for next one.
171 */
172 if (!dev) return NULL;
131 dev->error_state = pci_channel_io_frozen; 173 dev->error_state = pci_channel_io_frozen;
132 174
133 if (!driver) 175 driver = eeh_pcid_get(dev);
134 return 0; 176 if (!driver) return NULL;
135 177
136 eeh_disable_irq(dev); 178 eeh_disable_irq(dev);
137 179
138 if (!driver->err_handler || 180 if (!driver->err_handler ||
139 !driver->err_handler->error_detected) 181 !driver->err_handler->error_detected) {
140 return 0; 182 eeh_pcid_put(dev);
183 return NULL;
184 }
141 185
142 rc = driver->err_handler->error_detected(dev, pci_channel_io_frozen); 186 rc = driver->err_handler->error_detected(dev, pci_channel_io_frozen);
143 187
@@ -145,27 +189,34 @@ static int eeh_report_error(struct pci_dev *dev, void *userdata)
145 if (rc == PCI_ERS_RESULT_NEED_RESET) *res = rc; 189 if (rc == PCI_ERS_RESULT_NEED_RESET) *res = rc;
146 if (*res == PCI_ERS_RESULT_NONE) *res = rc; 190 if (*res == PCI_ERS_RESULT_NONE) *res = rc;
147 191
148 return 0; 192 eeh_pcid_put(dev);
193 return NULL;
149} 194}
150 195
151/** 196/**
152 * eeh_report_mmio_enabled - Tell drivers that MMIO has been enabled 197 * eeh_report_mmio_enabled - Tell drivers that MMIO has been enabled
153 * @dev: PCI device 198 * @data: eeh device
154 * @userdata: return value 199 * @userdata: return value
155 * 200 *
156 * Tells each device driver that IO ports, MMIO and config space I/O 201 * Tells each device driver that IO ports, MMIO and config space I/O
157 * are now enabled. Collects up and merges the device driver responses. 202 * are now enabled. Collects up and merges the device driver responses.
158 * Cumulative response passed back in "userdata". 203 * Cumulative response passed back in "userdata".
159 */ 204 */
160static int eeh_report_mmio_enabled(struct pci_dev *dev, void *userdata) 205static void *eeh_report_mmio_enabled(void *data, void *userdata)
161{ 206{
207 struct eeh_dev *edev = (struct eeh_dev *)data;
208 struct pci_dev *dev = eeh_dev_to_pci_dev(edev);
162 enum pci_ers_result rc, *res = userdata; 209 enum pci_ers_result rc, *res = userdata;
163 struct pci_driver *driver = dev->driver; 210 struct pci_driver *driver;
164 211
165 if (!driver || 212 driver = eeh_pcid_get(dev);
166 !driver->err_handler || 213 if (!driver) return NULL;
167 !driver->err_handler->mmio_enabled) 214
168 return 0; 215 if (!driver->err_handler ||
216 !driver->err_handler->mmio_enabled) {
217 eeh_pcid_put(dev);
218 return NULL;
219 }
169 220
170 rc = driver->err_handler->mmio_enabled(dev); 221 rc = driver->err_handler->mmio_enabled(dev);
171 222
@@ -173,12 +224,13 @@ static int eeh_report_mmio_enabled(struct pci_dev *dev, void *userdata)
173 if (rc == PCI_ERS_RESULT_NEED_RESET) *res = rc; 224 if (rc == PCI_ERS_RESULT_NEED_RESET) *res = rc;
174 if (*res == PCI_ERS_RESULT_NONE) *res = rc; 225 if (*res == PCI_ERS_RESULT_NONE) *res = rc;
175 226
176 return 0; 227 eeh_pcid_put(dev);
228 return NULL;
177} 229}
178 230
179/** 231/**
180 * eeh_report_reset - Tell device that slot has been reset 232 * eeh_report_reset - Tell device that slot has been reset
181 * @dev: PCI device 233 * @data: eeh device
182 * @userdata: return value 234 * @userdata: return value
183 * 235 *
184 * This routine must be called while EEH tries to reset particular 236 * This routine must be called while EEH tries to reset particular
@@ -186,21 +238,26 @@ static int eeh_report_mmio_enabled(struct pci_dev *dev, void *userdata)
186 * some actions, usually to save data the driver needs so that the 238 * some actions, usually to save data the driver needs so that the
187 * driver can work again while the device is recovered. 239 * driver can work again while the device is recovered.
188 */ 240 */
189static int eeh_report_reset(struct pci_dev *dev, void *userdata) 241static void *eeh_report_reset(void *data, void *userdata)
190{ 242{
243 struct eeh_dev *edev = (struct eeh_dev *)data;
244 struct pci_dev *dev = eeh_dev_to_pci_dev(edev);
191 enum pci_ers_result rc, *res = userdata; 245 enum pci_ers_result rc, *res = userdata;
192 struct pci_driver *driver = dev->driver; 246 struct pci_driver *driver;
193
194 if (!driver)
195 return 0;
196 247
248 if (!dev) return NULL;
197 dev->error_state = pci_channel_io_normal; 249 dev->error_state = pci_channel_io_normal;
198 250
251 driver = eeh_pcid_get(dev);
252 if (!driver) return NULL;
253
199 eeh_enable_irq(dev); 254 eeh_enable_irq(dev);
200 255
201 if (!driver->err_handler || 256 if (!driver->err_handler ||
202 !driver->err_handler->slot_reset) 257 !driver->err_handler->slot_reset) {
203 return 0; 258 eeh_pcid_put(dev);
259 return NULL;
260 }
204 261
205 rc = driver->err_handler->slot_reset(dev); 262 rc = driver->err_handler->slot_reset(dev);
206 if ((*res == PCI_ERS_RESULT_NONE) || 263 if ((*res == PCI_ERS_RESULT_NONE) ||
@@ -208,109 +265,115 @@ static int eeh_report_reset(struct pci_dev *dev, void *userdata)
208 if (*res == PCI_ERS_RESULT_DISCONNECT && 265 if (*res == PCI_ERS_RESULT_DISCONNECT &&
209 rc == PCI_ERS_RESULT_NEED_RESET) *res = rc; 266 rc == PCI_ERS_RESULT_NEED_RESET) *res = rc;
210 267
211 return 0; 268 eeh_pcid_put(dev);
269 return NULL;
212} 270}
213 271
214/** 272/**
215 * eeh_report_resume - Tell device to resume normal operations 273 * eeh_report_resume - Tell device to resume normal operations
216 * @dev: PCI device 274 * @data: eeh device
217 * @userdata: return value 275 * @userdata: return value
218 * 276 *
219 * This routine must be called to notify the device driver that it 277 * This routine must be called to notify the device driver that it
220 * could resume so that the device driver can do some initialization 278 * could resume so that the device driver can do some initialization
221 * to make the recovered device work again. 279 * to make the recovered device work again.
222 */ 280 */
223static int eeh_report_resume(struct pci_dev *dev, void *userdata) 281static void *eeh_report_resume(void *data, void *userdata)
224{ 282{
225 struct pci_driver *driver = dev->driver; 283 struct eeh_dev *edev = (struct eeh_dev *)data;
284 struct pci_dev *dev = eeh_dev_to_pci_dev(edev);
285 struct pci_driver *driver;
226 286
287 if (!dev) return NULL;
227 dev->error_state = pci_channel_io_normal; 288 dev->error_state = pci_channel_io_normal;
228 289
229 if (!driver) 290 driver = eeh_pcid_get(dev);
230 return 0; 291 if (!driver) return NULL;
231 292
232 eeh_enable_irq(dev); 293 eeh_enable_irq(dev);
233 294
234 if (!driver->err_handler || 295 if (!driver->err_handler ||
235 !driver->err_handler->resume) 296 !driver->err_handler->resume) {
236 return 0; 297 eeh_pcid_put(dev);
298 return NULL;
299 }
237 300
238 driver->err_handler->resume(dev); 301 driver->err_handler->resume(dev);
239 302
240 return 0; 303 eeh_pcid_put(dev);
304 return NULL;
241} 305}
242 306
243/** 307/**
244 * eeh_report_failure - Tell device driver that device is dead. 308 * eeh_report_failure - Tell device driver that device is dead.
245 * @dev: PCI device 309 * @data: eeh device
246 * @userdata: return value 310 * @userdata: return value
247 * 311 *
248 * This informs the device driver that the device is permanently 312 * This informs the device driver that the device is permanently
249 * dead, and that no further recovery attempts will be made on it. 313 * dead, and that no further recovery attempts will be made on it.
250 */ 314 */
251static int eeh_report_failure(struct pci_dev *dev, void *userdata) 315static void *eeh_report_failure(void *data, void *userdata)
252{ 316{
253 struct pci_driver *driver = dev->driver; 317 struct eeh_dev *edev = (struct eeh_dev *)data;
318 struct pci_dev *dev = eeh_dev_to_pci_dev(edev);
319 struct pci_driver *driver;
254 320
321 if (!dev) return NULL;
255 dev->error_state = pci_channel_io_perm_failure; 322 dev->error_state = pci_channel_io_perm_failure;
256 323
257 if (!driver) 324 driver = eeh_pcid_get(dev);
258 return 0; 325 if (!driver) return NULL;
259 326
260 eeh_disable_irq(dev); 327 eeh_disable_irq(dev);
261 328
262 if (!driver->err_handler || 329 if (!driver->err_handler ||
263 !driver->err_handler->error_detected) 330 !driver->err_handler->error_detected) {
264 return 0; 331 eeh_pcid_put(dev);
332 return NULL;
333 }
265 334
266 driver->err_handler->error_detected(dev, pci_channel_io_perm_failure); 335 driver->err_handler->error_detected(dev, pci_channel_io_perm_failure);
267 336
268 return 0; 337 eeh_pcid_put(dev);
338 return NULL;
269} 339}
270 340
271/** 341/**
272 * eeh_reset_device - Perform actual reset of a pci slot 342 * eeh_reset_device - Perform actual reset of a pci slot
273 * @edev: PE associated EEH device 343 * @pe: EEH PE
274 * @bus: PCI bus corresponding to the isolcated slot 344 * @bus: PCI bus corresponding to the isolcated slot
275 * 345 *
276 * This routine must be called to do reset on the indicated PE. 346 * This routine must be called to do reset on the indicated PE.
277 * During the reset, udev might be invoked because those affected 347 * During the reset, udev might be invoked because those affected
278 * PCI devices will be removed and then added. 348 * PCI devices will be removed and then added.
279 */ 349 */
280static int eeh_reset_device(struct eeh_dev *edev, struct pci_bus *bus) 350static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus)
281{ 351{
282 struct device_node *dn;
283 int cnt, rc; 352 int cnt, rc;
284 353
285 /* pcibios will clear the counter; save the value */ 354 /* pcibios will clear the counter; save the value */
286 cnt = edev->freeze_count; 355 cnt = pe->freeze_count;
287 356
357 /*
358 * We don't remove the corresponding PE instances because
359 * we need the information afterwords. The attached EEH
360 * devices are expected to be attached soon when calling
361 * into pcibios_add_pci_devices().
362 */
288 if (bus) 363 if (bus)
289 pcibios_remove_pci_devices(bus); 364 __pcibios_remove_pci_devices(bus, 0);
290 365
291 /* Reset the pci controller. (Asserts RST#; resets config space). 366 /* Reset the pci controller. (Asserts RST#; resets config space).
292 * Reconfigure bridges and devices. Don't try to bring the system 367 * Reconfigure bridges and devices. Don't try to bring the system
293 * up if the reset failed for some reason. 368 * up if the reset failed for some reason.
294 */ 369 */
295 rc = eeh_reset_pe(edev); 370 rc = eeh_reset_pe(pe);
296 if (rc) 371 if (rc)
297 return rc; 372 return rc;
298 373
299 /* Walk over all functions on this device. */ 374 /* Restore PE */
300 dn = eeh_dev_to_of_node(edev); 375 eeh_ops->configure_bridge(pe);
301 if (!pcibios_find_pci_bus(dn) && of_node_to_eeh_dev(dn->parent)) 376 eeh_pe_restore_bars(pe);
302 dn = dn->parent->child;
303
304 while (dn) {
305 struct eeh_dev *pedev = of_node_to_eeh_dev(dn);
306
307 /* On Power4, always true because eeh_pe_config_addr=0 */
308 if (edev->pe_config_addr == pedev->pe_config_addr) {
309 eeh_ops->configure_bridge(dn);
310 eeh_restore_bars(pedev);
311 }
312 dn = dn->sibling;
313 }
314 377
315 /* Give the system 5 seconds to finish running the user-space 378 /* Give the system 5 seconds to finish running the user-space
316 * hotplug shutdown scripts, e.g. ifdown for ethernet. Yes, 379 * hotplug shutdown scripts, e.g. ifdown for ethernet. Yes,
@@ -322,7 +385,7 @@ static int eeh_reset_device(struct eeh_dev *edev, struct pci_bus *bus)
322 ssleep(5); 385 ssleep(5);
323 pcibios_add_pci_devices(bus); 386 pcibios_add_pci_devices(bus);
324 } 387 }
325 edev->freeze_count = cnt; 388 pe->freeze_count = cnt;
326 389
327 return 0; 390 return 0;
328} 391}
@@ -334,7 +397,7 @@ static int eeh_reset_device(struct eeh_dev *edev, struct pci_bus *bus)
334 397
335/** 398/**
336 * eeh_handle_event - Reset a PCI device after hard lockup. 399 * eeh_handle_event - Reset a PCI device after hard lockup.
337 * @event: EEH event 400 * @pe: EEH PE
338 * 401 *
339 * While PHB detects address or data parity errors on particular PCI 402 * While PHB detects address or data parity errors on particular PCI
340 * slot, the associated PE will be frozen. Besides, DMA's occurring 403 * slot, the associated PE will be frozen. Besides, DMA's occurring
@@ -349,69 +412,24 @@ static int eeh_reset_device(struct eeh_dev *edev, struct pci_bus *bus)
349 * drivers (which cause a second set of hotplug events to go out to 412 * drivers (which cause a second set of hotplug events to go out to
350 * userspace). 413 * userspace).
351 */ 414 */
352struct eeh_dev *handle_eeh_events(struct eeh_event *event) 415void eeh_handle_event(struct eeh_pe *pe)
353{ 416{
354 struct device_node *frozen_dn;
355 struct eeh_dev *frozen_edev;
356 struct pci_bus *frozen_bus; 417 struct pci_bus *frozen_bus;
357 int rc = 0; 418 int rc = 0;
358 enum pci_ers_result result = PCI_ERS_RESULT_NONE; 419 enum pci_ers_result result = PCI_ERS_RESULT_NONE;
359 const char *location, *pci_str, *drv_str, *bus_pci_str, *bus_drv_str;
360
361 frozen_dn = eeh_find_device_pe(eeh_dev_to_of_node(event->edev));
362 if (!frozen_dn) {
363 location = of_get_property(eeh_dev_to_of_node(event->edev), "ibm,loc-code", NULL);
364 location = location ? location : "unknown";
365 printk(KERN_ERR "EEH: Error: Cannot find partition endpoint "
366 "for location=%s pci addr=%s\n",
367 location, eeh_pci_name(eeh_dev_to_pci_dev(event->edev)));
368 return NULL;
369 }
370
371 frozen_bus = pcibios_find_pci_bus(frozen_dn);
372 location = of_get_property(frozen_dn, "ibm,loc-code", NULL);
373 location = location ? location : "unknown";
374
375 /* There are two different styles for coming up with the PE.
376 * In the old style, it was the highest EEH-capable device
377 * which was always an EADS pci bridge. In the new style,
378 * there might not be any EADS bridges, and even when there are,
379 * the firmware marks them as "EEH incapable". So another
380 * two-step is needed to find the pci bus..
381 */
382 if (!frozen_bus)
383 frozen_bus = pcibios_find_pci_bus(frozen_dn->parent);
384 420
421 frozen_bus = eeh_pe_bus_get(pe);
385 if (!frozen_bus) { 422 if (!frozen_bus) {
386 printk(KERN_ERR "EEH: Cannot find PCI bus " 423 pr_err("%s: Cannot find PCI bus for PHB#%d-PE#%x\n",
387 "for location=%s dn=%s\n", 424 __func__, pe->phb->global_number, pe->addr);
388 location, frozen_dn->full_name); 425 return;
389 return NULL;
390 } 426 }
391 427
392 frozen_edev = of_node_to_eeh_dev(frozen_dn); 428 pe->freeze_count++;
393 frozen_edev->freeze_count++; 429 if (pe->freeze_count > EEH_MAX_ALLOWED_FREEZES)
394 pci_str = eeh_pci_name(eeh_dev_to_pci_dev(event->edev));
395 drv_str = eeh_pcid_name(eeh_dev_to_pci_dev(event->edev));
396
397 if (frozen_edev->freeze_count > EEH_MAX_ALLOWED_FREEZES)
398 goto excess_failures; 430 goto excess_failures;
399 431 pr_warning("EEH: This PCI device has failed %d times in the last hour\n",
400 printk(KERN_WARNING 432 pe->freeze_count);
401 "EEH: This PCI device has failed %d times in the last hour:\n",
402 frozen_edev->freeze_count);
403
404 if (frozen_edev->pdev) {
405 bus_pci_str = pci_name(frozen_edev->pdev);
406 bus_drv_str = eeh_pcid_name(frozen_edev->pdev);
407 printk(KERN_WARNING
408 "EEH: Bus location=%s driver=%s pci addr=%s\n",
409 location, bus_drv_str, bus_pci_str);
410 }
411
412 printk(KERN_WARNING
413 "EEH: Device location=%s driver=%s pci addr=%s\n",
414 location, drv_str, pci_str);
415 433
416 /* Walk the various device drivers attached to this slot through 434 /* Walk the various device drivers attached to this slot through
417 * a reset sequence, giving each an opportunity to do what it needs 435 * a reset sequence, giving each an opportunity to do what it needs
@@ -419,12 +437,12 @@ struct eeh_dev *handle_eeh_events(struct eeh_event *event)
419 * status ... if any child can't handle the reset, then the entire 437 * status ... if any child can't handle the reset, then the entire
420 * slot is dlpar removed and added. 438 * slot is dlpar removed and added.
421 */ 439 */
422 pci_walk_bus(frozen_bus, eeh_report_error, &result); 440 eeh_pe_dev_traverse(pe, eeh_report_error, &result);
423 441
424 /* Get the current PCI slot state. This can take a long time, 442 /* Get the current PCI slot state. This can take a long time,
425 * sometimes over 3 seconds for certain systems. 443 * sometimes over 3 seconds for certain systems.
426 */ 444 */
427 rc = eeh_ops->wait_state(eeh_dev_to_of_node(frozen_edev), MAX_WAIT_FOR_RECOVERY*1000); 445 rc = eeh_ops->wait_state(pe, MAX_WAIT_FOR_RECOVERY*1000);
428 if (rc < 0 || rc == EEH_STATE_NOT_SUPPORT) { 446 if (rc < 0 || rc == EEH_STATE_NOT_SUPPORT) {
429 printk(KERN_WARNING "EEH: Permanent failure\n"); 447 printk(KERN_WARNING "EEH: Permanent failure\n");
430 goto hard_fail; 448 goto hard_fail;
@@ -434,14 +452,14 @@ struct eeh_dev *handle_eeh_events(struct eeh_event *event)
434 * don't post the error log until after all dev drivers 452 * don't post the error log until after all dev drivers
435 * have been informed. 453 * have been informed.
436 */ 454 */
437 eeh_slot_error_detail(frozen_edev, EEH_LOG_TEMP); 455 eeh_slot_error_detail(pe, EEH_LOG_TEMP);
438 456
439 /* If all device drivers were EEH-unaware, then shut 457 /* If all device drivers were EEH-unaware, then shut
440 * down all of the device drivers, and hope they 458 * down all of the device drivers, and hope they
441 * go down willingly, without panicing the system. 459 * go down willingly, without panicing the system.
442 */ 460 */
443 if (result == PCI_ERS_RESULT_NONE) { 461 if (result == PCI_ERS_RESULT_NONE) {
444 rc = eeh_reset_device(frozen_edev, frozen_bus); 462 rc = eeh_reset_device(pe, frozen_bus);
445 if (rc) { 463 if (rc) {
446 printk(KERN_WARNING "EEH: Unable to reset, rc=%d\n", rc); 464 printk(KERN_WARNING "EEH: Unable to reset, rc=%d\n", rc);
447 goto hard_fail; 465 goto hard_fail;
@@ -450,7 +468,7 @@ struct eeh_dev *handle_eeh_events(struct eeh_event *event)
450 468
451 /* If all devices reported they can proceed, then re-enable MMIO */ 469 /* If all devices reported they can proceed, then re-enable MMIO */
452 if (result == PCI_ERS_RESULT_CAN_RECOVER) { 470 if (result == PCI_ERS_RESULT_CAN_RECOVER) {
453 rc = eeh_pci_enable(frozen_edev, EEH_OPT_THAW_MMIO); 471 rc = eeh_pci_enable(pe, EEH_OPT_THAW_MMIO);
454 472
455 if (rc < 0) 473 if (rc < 0)
456 goto hard_fail; 474 goto hard_fail;
@@ -458,13 +476,13 @@ struct eeh_dev *handle_eeh_events(struct eeh_event *event)
458 result = PCI_ERS_RESULT_NEED_RESET; 476 result = PCI_ERS_RESULT_NEED_RESET;
459 } else { 477 } else {
460 result = PCI_ERS_RESULT_NONE; 478 result = PCI_ERS_RESULT_NONE;
461 pci_walk_bus(frozen_bus, eeh_report_mmio_enabled, &result); 479 eeh_pe_dev_traverse(pe, eeh_report_mmio_enabled, &result);
462 } 480 }
463 } 481 }
464 482
465 /* If all devices reported they can proceed, then re-enable DMA */ 483 /* If all devices reported they can proceed, then re-enable DMA */
466 if (result == PCI_ERS_RESULT_CAN_RECOVER) { 484 if (result == PCI_ERS_RESULT_CAN_RECOVER) {
467 rc = eeh_pci_enable(frozen_edev, EEH_OPT_THAW_DMA); 485 rc = eeh_pci_enable(pe, EEH_OPT_THAW_DMA);
468 486
469 if (rc < 0) 487 if (rc < 0)
470 goto hard_fail; 488 goto hard_fail;
@@ -482,13 +500,13 @@ struct eeh_dev *handle_eeh_events(struct eeh_event *event)
482 500
483 /* If any device called out for a reset, then reset the slot */ 501 /* If any device called out for a reset, then reset the slot */
484 if (result == PCI_ERS_RESULT_NEED_RESET) { 502 if (result == PCI_ERS_RESULT_NEED_RESET) {
485 rc = eeh_reset_device(frozen_edev, NULL); 503 rc = eeh_reset_device(pe, NULL);
486 if (rc) { 504 if (rc) {
487 printk(KERN_WARNING "EEH: Cannot reset, rc=%d\n", rc); 505 printk(KERN_WARNING "EEH: Cannot reset, rc=%d\n", rc);
488 goto hard_fail; 506 goto hard_fail;
489 } 507 }
490 result = PCI_ERS_RESULT_NONE; 508 result = PCI_ERS_RESULT_NONE;
491 pci_walk_bus(frozen_bus, eeh_report_reset, &result); 509 eeh_pe_dev_traverse(pe, eeh_report_reset, &result);
492 } 510 }
493 511
494 /* All devices should claim they have recovered by now. */ 512 /* All devices should claim they have recovered by now. */
@@ -499,9 +517,9 @@ struct eeh_dev *handle_eeh_events(struct eeh_event *event)
499 } 517 }
500 518
501 /* Tell all device drivers that they can resume operations */ 519 /* Tell all device drivers that they can resume operations */
502 pci_walk_bus(frozen_bus, eeh_report_resume, NULL); 520 eeh_pe_dev_traverse(pe, eeh_report_resume, NULL);
503 521
504 return frozen_edev; 522 return;
505 523
506excess_failures: 524excess_failures:
507 /* 525 /*
@@ -509,30 +527,26 @@ excess_failures:
509 * are due to poorly seated PCI cards. Only 10% or so are 527 * are due to poorly seated PCI cards. Only 10% or so are
510 * due to actual, failed cards. 528 * due to actual, failed cards.
511 */ 529 */
512 printk(KERN_ERR 530 pr_err("EEH: PHB#%d-PE#%x has failed %d times in the\n"
513 "EEH: PCI device at location=%s driver=%s pci addr=%s\n" 531 "last hour and has been permanently disabled.\n"
514 "has failed %d times in the last hour " 532 "Please try reseating or replacing it.\n",
515 "and has been permanently disabled.\n" 533 pe->phb->global_number, pe->addr,
516 "Please try reseating this device or replacing it.\n", 534 pe->freeze_count);
517 location, drv_str, pci_str, frozen_edev->freeze_count);
518 goto perm_error; 535 goto perm_error;
519 536
520hard_fail: 537hard_fail:
521 printk(KERN_ERR 538 pr_err("EEH: Unable to recover from failure from PHB#%d-PE#%x.\n"
522 "EEH: Unable to recover from failure of PCI device " 539 "Please try reseating or replacing it\n",
523 "at location=%s driver=%s pci addr=%s\n" 540 pe->phb->global_number, pe->addr);
524 "Please try reseating this device or replacing it.\n",
525 location, drv_str, pci_str);
526 541
527perm_error: 542perm_error:
528 eeh_slot_error_detail(frozen_edev, EEH_LOG_PERM); 543 eeh_slot_error_detail(pe, EEH_LOG_PERM);
529 544
530 /* Notify all devices that they're about to go down. */ 545 /* Notify all devices that they're about to go down. */
531 pci_walk_bus(frozen_bus, eeh_report_failure, NULL); 546 eeh_pe_dev_traverse(pe, eeh_report_failure, NULL);
532 547
533 /* Shut down the device drivers for good. */ 548 /* Shut down the device drivers for good. */
534 pcibios_remove_pci_devices(frozen_bus); 549 if (frozen_bus)
535 550 pcibios_remove_pci_devices(frozen_bus);
536 return NULL;
537} 551}
538 552
diff --git a/arch/powerpc/platforms/pseries/eeh_event.c b/arch/powerpc/platforms/pseries/eeh_event.c
index fb506317ebb..185bedd926d 100644
--- a/arch/powerpc/platforms/pseries/eeh_event.c
+++ b/arch/powerpc/platforms/pseries/eeh_event.c
@@ -23,6 +23,7 @@
23#include <linux/pci.h> 23#include <linux/pci.h>
24#include <linux/slab.h> 24#include <linux/slab.h>
25#include <linux/workqueue.h> 25#include <linux/workqueue.h>
26#include <linux/kthread.h>
26#include <asm/eeh_event.h> 27#include <asm/eeh_event.h>
27#include <asm/ppc-pci.h> 28#include <asm/ppc-pci.h>
28 29
@@ -57,9 +58,7 @@ static int eeh_event_handler(void * dummy)
57{ 58{
58 unsigned long flags; 59 unsigned long flags;
59 struct eeh_event *event; 60 struct eeh_event *event;
60 struct eeh_dev *edev; 61 struct eeh_pe *pe;
61
62 set_task_comm(current, "eehd");
63 62
64 spin_lock_irqsave(&eeh_eventlist_lock, flags); 63 spin_lock_irqsave(&eeh_eventlist_lock, flags);
65 event = NULL; 64 event = NULL;
@@ -76,28 +75,23 @@ static int eeh_event_handler(void * dummy)
76 75
77 /* Serialize processing of EEH events */ 76 /* Serialize processing of EEH events */
78 mutex_lock(&eeh_event_mutex); 77 mutex_lock(&eeh_event_mutex);
79 edev = event->edev; 78 pe = event->pe;
80 eeh_mark_slot(eeh_dev_to_of_node(edev), EEH_MODE_RECOVERING); 79 eeh_pe_state_mark(pe, EEH_PE_RECOVERING);
81 80 pr_info("EEH: Detected PCI bus error on PHB#%d-PE#%x\n",
82 printk(KERN_INFO "EEH: Detected PCI bus error on device %s\n", 81 pe->phb->global_number, pe->addr);
83 eeh_pci_name(edev->pdev));
84 82
85 set_current_state(TASK_INTERRUPTIBLE); /* Don't add to load average */ 83 set_current_state(TASK_INTERRUPTIBLE); /* Don't add to load average */
86 edev = handle_eeh_events(event); 84 eeh_handle_event(pe);
87 85 eeh_pe_state_clear(pe, EEH_PE_RECOVERING);
88 if (edev) {
89 eeh_clear_slot(eeh_dev_to_of_node(edev), EEH_MODE_RECOVERING);
90 pci_dev_put(edev->pdev);
91 }
92 86
93 kfree(event); 87 kfree(event);
94 mutex_unlock(&eeh_event_mutex); 88 mutex_unlock(&eeh_event_mutex);
95 89
96 /* If there are no new errors after an hour, clear the counter. */ 90 /* If there are no new errors after an hour, clear the counter. */
97 if (edev && edev->freeze_count>0) { 91 if (pe && pe->freeze_count > 0) {
98 msleep_interruptible(3600*1000); 92 msleep_interruptible(3600*1000);
99 if (edev->freeze_count>0) 93 if (pe->freeze_count > 0)
100 edev->freeze_count--; 94 pe->freeze_count--;
101 95
102 } 96 }
103 97
@@ -113,42 +107,29 @@ static int eeh_event_handler(void * dummy)
113 */ 107 */
114static void eeh_thread_launcher(struct work_struct *dummy) 108static void eeh_thread_launcher(struct work_struct *dummy)
115{ 109{
116 if (kernel_thread(eeh_event_handler, NULL, CLONE_KERNEL) < 0) 110 if (IS_ERR(kthread_run(eeh_event_handler, NULL, "eehd")))
117 printk(KERN_ERR "Failed to start EEH daemon\n"); 111 printk(KERN_ERR "Failed to start EEH daemon\n");
118} 112}
119 113
120/** 114/**
121 * eeh_send_failure_event - Generate a PCI error event 115 * eeh_send_failure_event - Generate a PCI error event
122 * @edev: EEH device 116 * @pe: EEH PE
123 * 117 *
124 * This routine can be called within an interrupt context; 118 * This routine can be called within an interrupt context;
125 * the actual event will be delivered in a normal context 119 * the actual event will be delivered in a normal context
126 * (from a workqueue). 120 * (from a workqueue).
127 */ 121 */
128int eeh_send_failure_event(struct eeh_dev *edev) 122int eeh_send_failure_event(struct eeh_pe *pe)
129{ 123{
130 unsigned long flags; 124 unsigned long flags;
131 struct eeh_event *event; 125 struct eeh_event *event;
132 struct device_node *dn = eeh_dev_to_of_node(edev);
133 const char *location;
134
135 if (!mem_init_done) {
136 printk(KERN_ERR "EEH: event during early boot not handled\n");
137 location = of_get_property(dn, "ibm,loc-code", NULL);
138 printk(KERN_ERR "EEH: device node = %s\n", dn->full_name);
139 printk(KERN_ERR "EEH: PCI location = %s\n", location);
140 return 1;
141 }
142 event = kmalloc(sizeof(*event), GFP_ATOMIC);
143 if (event == NULL) {
144 printk(KERN_ERR "EEH: out of memory, event not handled\n");
145 return 1;
146 }
147
148 if (edev->pdev)
149 pci_dev_get(edev->pdev);
150 126
151 event->edev = edev; 127 event = kzalloc(sizeof(*event), GFP_ATOMIC);
128 if (!event) {
129 pr_err("EEH: out of memory, event not handled\n");
130 return -ENOMEM;
131 }
132 event->pe = pe;
152 133
153 /* We may or may not be called in an interrupt context */ 134 /* We may or may not be called in an interrupt context */
154 spin_lock_irqsave(&eeh_eventlist_lock, flags); 135 spin_lock_irqsave(&eeh_eventlist_lock, flags);
diff --git a/arch/powerpc/platforms/pseries/eeh_pe.c b/arch/powerpc/platforms/pseries/eeh_pe.c
new file mode 100644
index 00000000000..797cd181dc3
--- /dev/null
+++ b/arch/powerpc/platforms/pseries/eeh_pe.c
@@ -0,0 +1,652 @@
1/*
2 * The file intends to implement PE based on the information from
3 * platforms. Basically, there have 3 types of PEs: PHB/Bus/Device.
4 * All the PEs should be organized as hierarchy tree. The first level
5 * of the tree will be associated to existing PHBs since the particular
6 * PE is only meaningful in one PHB domain.
7 *
8 * Copyright Benjamin Herrenschmidt & Gavin Shan, IBM Corporation 2012.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 */
24
25#include <linux/export.h>
26#include <linux/gfp.h>
27#include <linux/init.h>
28#include <linux/kernel.h>
29#include <linux/pci.h>
30#include <linux/string.h>
31
32#include <asm/pci-bridge.h>
33#include <asm/ppc-pci.h>
34
35static LIST_HEAD(eeh_phb_pe);
36
37/**
38 * eeh_pe_alloc - Allocate PE
39 * @phb: PCI controller
40 * @type: PE type
41 *
42 * Allocate PE instance dynamically.
43 */
44static struct eeh_pe *eeh_pe_alloc(struct pci_controller *phb, int type)
45{
46 struct eeh_pe *pe;
47
48 /* Allocate PHB PE */
49 pe = kzalloc(sizeof(struct eeh_pe), GFP_KERNEL);
50 if (!pe) return NULL;
51
52 /* Initialize PHB PE */
53 pe->type = type;
54 pe->phb = phb;
55 INIT_LIST_HEAD(&pe->child_list);
56 INIT_LIST_HEAD(&pe->child);
57 INIT_LIST_HEAD(&pe->edevs);
58
59 return pe;
60}
61
62/**
63 * eeh_phb_pe_create - Create PHB PE
64 * @phb: PCI controller
65 *
66 * The function should be called while the PHB is detected during
67 * system boot or PCI hotplug in order to create PHB PE.
68 */
69int __devinit eeh_phb_pe_create(struct pci_controller *phb)
70{
71 struct eeh_pe *pe;
72
73 /* Allocate PHB PE */
74 pe = eeh_pe_alloc(phb, EEH_PE_PHB);
75 if (!pe) {
76 pr_err("%s: out of memory!\n", __func__);
77 return -ENOMEM;
78 }
79
80 /* Put it into the list */
81 eeh_lock();
82 list_add_tail(&pe->child, &eeh_phb_pe);
83 eeh_unlock();
84
85 pr_debug("EEH: Add PE for PHB#%d\n", phb->global_number);
86
87 return 0;
88}
89
90/**
91 * eeh_phb_pe_get - Retrieve PHB PE based on the given PHB
92 * @phb: PCI controller
93 *
94 * The overall PEs form hierarchy tree. The first layer of the
95 * hierarchy tree is composed of PHB PEs. The function is used
96 * to retrieve the corresponding PHB PE according to the given PHB.
97 */
98static struct eeh_pe *eeh_phb_pe_get(struct pci_controller *phb)
99{
100 struct eeh_pe *pe;
101
102 list_for_each_entry(pe, &eeh_phb_pe, child) {
103 /*
104 * Actually, we needn't check the type since
105 * the PE for PHB has been determined when that
106 * was created.
107 */
108 if ((pe->type & EEH_PE_PHB) && pe->phb == phb)
109 return pe;
110 }
111
112 return NULL;
113}
114
115/**
116 * eeh_pe_next - Retrieve the next PE in the tree
117 * @pe: current PE
118 * @root: root PE
119 *
120 * The function is used to retrieve the next PE in the
121 * hierarchy PE tree.
122 */
123static struct eeh_pe *eeh_pe_next(struct eeh_pe *pe,
124 struct eeh_pe *root)
125{
126 struct list_head *next = pe->child_list.next;
127
128 if (next == &pe->child_list) {
129 while (1) {
130 if (pe == root)
131 return NULL;
132 next = pe->child.next;
133 if (next != &pe->parent->child_list)
134 break;
135 pe = pe->parent;
136 }
137 }
138
139 return list_entry(next, struct eeh_pe, child);
140}
141
142/**
143 * eeh_pe_traverse - Traverse PEs in the specified PHB
144 * @root: root PE
145 * @fn: callback
146 * @flag: extra parameter to callback
147 *
148 * The function is used to traverse the specified PE and its
149 * child PEs. The traversing is to be terminated once the
150 * callback returns something other than NULL, or no more PEs
151 * to be traversed.
152 */
153static void *eeh_pe_traverse(struct eeh_pe *root,
154 eeh_traverse_func fn, void *flag)
155{
156 struct eeh_pe *pe;
157 void *ret;
158
159 for (pe = root; pe; pe = eeh_pe_next(pe, root)) {
160 ret = fn(pe, flag);
161 if (ret) return ret;
162 }
163
164 return NULL;
165}
166
167/**
168 * eeh_pe_dev_traverse - Traverse the devices from the PE
169 * @root: EEH PE
170 * @fn: function callback
171 * @flag: extra parameter to callback
172 *
173 * The function is used to traverse the devices of the specified
174 * PE and its child PEs.
175 */
176void *eeh_pe_dev_traverse(struct eeh_pe *root,
177 eeh_traverse_func fn, void *flag)
178{
179 struct eeh_pe *pe;
180 struct eeh_dev *edev;
181 void *ret;
182
183 if (!root) {
184 pr_warning("%s: Invalid PE %p\n", __func__, root);
185 return NULL;
186 }
187
188 eeh_lock();
189
190 /* Traverse root PE */
191 for (pe = root; pe; pe = eeh_pe_next(pe, root)) {
192 eeh_pe_for_each_dev(pe, edev) {
193 ret = fn(edev, flag);
194 if (ret) {
195 eeh_unlock();
196 return ret;
197 }
198 }
199 }
200
201 eeh_unlock();
202
203 return NULL;
204}
205
206/**
207 * __eeh_pe_get - Check the PE address
208 * @data: EEH PE
209 * @flag: EEH device
210 *
211 * For one particular PE, it can be identified by PE address
212 * or tranditional BDF address. BDF address is composed of
213 * Bus/Device/Function number. The extra data referred by flag
214 * indicates which type of address should be used.
215 */
216static void *__eeh_pe_get(void *data, void *flag)
217{
218 struct eeh_pe *pe = (struct eeh_pe *)data;
219 struct eeh_dev *edev = (struct eeh_dev *)flag;
220
221 /* Unexpected PHB PE */
222 if (pe->type & EEH_PE_PHB)
223 return NULL;
224
225 /* We prefer PE address */
226 if (edev->pe_config_addr &&
227 (edev->pe_config_addr == pe->addr))
228 return pe;
229
230 /* Try BDF address */
231 if (edev->pe_config_addr &&
232 (edev->config_addr == pe->config_addr))
233 return pe;
234
235 return NULL;
236}
237
238/**
239 * eeh_pe_get - Search PE based on the given address
240 * @edev: EEH device
241 *
242 * Search the corresponding PE based on the specified address which
243 * is included in the eeh device. The function is used to check if
244 * the associated PE has been created against the PE address. It's
245 * notable that the PE address has 2 format: traditional PE address
246 * which is composed of PCI bus/device/function number, or unified
247 * PE address.
248 */
249static struct eeh_pe *eeh_pe_get(struct eeh_dev *edev)
250{
251 struct eeh_pe *root = eeh_phb_pe_get(edev->phb);
252 struct eeh_pe *pe;
253
254 pe = eeh_pe_traverse(root, __eeh_pe_get, edev);
255
256 return pe;
257}
258
259/**
260 * eeh_pe_get_parent - Retrieve the parent PE
261 * @edev: EEH device
262 *
263 * The whole PEs existing in the system are organized as hierarchy
264 * tree. The function is used to retrieve the parent PE according
265 * to the parent EEH device.
266 */
267static struct eeh_pe *eeh_pe_get_parent(struct eeh_dev *edev)
268{
269 struct device_node *dn;
270 struct eeh_dev *parent;
271
272 /*
273 * It might have the case for the indirect parent
274 * EEH device already having associated PE, but
275 * the direct parent EEH device doesn't have yet.
276 */
277 dn = edev->dn->parent;
278 while (dn) {
279 /* We're poking out of PCI territory */
280 if (!PCI_DN(dn)) return NULL;
281
282 parent = of_node_to_eeh_dev(dn);
283 /* We're poking out of PCI territory */
284 if (!parent) return NULL;
285
286 if (parent->pe)
287 return parent->pe;
288
289 dn = dn->parent;
290 }
291
292 return NULL;
293}
294
295/**
296 * eeh_add_to_parent_pe - Add EEH device to parent PE
297 * @edev: EEH device
298 *
299 * Add EEH device to the parent PE. If the parent PE already
300 * exists, the PE type will be changed to EEH_PE_BUS. Otherwise,
301 * we have to create new PE to hold the EEH device and the new
302 * PE will be linked to its parent PE as well.
303 */
304int eeh_add_to_parent_pe(struct eeh_dev *edev)
305{
306 struct eeh_pe *pe, *parent;
307
308 eeh_lock();
309
310 /*
311 * Search the PE has been existing or not according
312 * to the PE address. If that has been existing, the
313 * PE should be composed of PCI bus and its subordinate
314 * components.
315 */
316 pe = eeh_pe_get(edev);
317 if (pe && !(pe->type & EEH_PE_INVALID)) {
318 if (!edev->pe_config_addr) {
319 eeh_unlock();
320 pr_err("%s: PE with addr 0x%x already exists\n",
321 __func__, edev->config_addr);
322 return -EEXIST;
323 }
324
325 /* Mark the PE as type of PCI bus */
326 pe->type = EEH_PE_BUS;
327 edev->pe = pe;
328
329 /* Put the edev to PE */
330 list_add_tail(&edev->list, &pe->edevs);
331 eeh_unlock();
332 pr_debug("EEH: Add %s to Bus PE#%x\n",
333 edev->dn->full_name, pe->addr);
334
335 return 0;
336 } else if (pe && (pe->type & EEH_PE_INVALID)) {
337 list_add_tail(&edev->list, &pe->edevs);
338 edev->pe = pe;
339 /*
340 * We're running to here because of PCI hotplug caused by
341 * EEH recovery. We need clear EEH_PE_INVALID until the top.
342 */
343 parent = pe;
344 while (parent) {
345 if (!(parent->type & EEH_PE_INVALID))
346 break;
347 parent->type &= ~EEH_PE_INVALID;
348 parent = parent->parent;
349 }
350 eeh_unlock();
351 pr_debug("EEH: Add %s to Device PE#%x, Parent PE#%x\n",
352 edev->dn->full_name, pe->addr, pe->parent->addr);
353
354 return 0;
355 }
356
357 /* Create a new EEH PE */
358 pe = eeh_pe_alloc(edev->phb, EEH_PE_DEVICE);
359 if (!pe) {
360 eeh_unlock();
361 pr_err("%s: out of memory!\n", __func__);
362 return -ENOMEM;
363 }
364 pe->addr = edev->pe_config_addr;
365 pe->config_addr = edev->config_addr;
366
367 /*
368 * Put the new EEH PE into hierarchy tree. If the parent
369 * can't be found, the newly created PE will be attached
370 * to PHB directly. Otherwise, we have to associate the
371 * PE with its parent.
372 */
373 parent = eeh_pe_get_parent(edev);
374 if (!parent) {
375 parent = eeh_phb_pe_get(edev->phb);
376 if (!parent) {
377 eeh_unlock();
378 pr_err("%s: No PHB PE is found (PHB Domain=%d)\n",
379 __func__, edev->phb->global_number);
380 edev->pe = NULL;
381 kfree(pe);
382 return -EEXIST;
383 }
384 }
385 pe->parent = parent;
386
387 /*
388 * Put the newly created PE into the child list and
389 * link the EEH device accordingly.
390 */
391 list_add_tail(&pe->child, &parent->child_list);
392 list_add_tail(&edev->list, &pe->edevs);
393 edev->pe = pe;
394 eeh_unlock();
395 pr_debug("EEH: Add %s to Device PE#%x, Parent PE#%x\n",
396 edev->dn->full_name, pe->addr, pe->parent->addr);
397
398 return 0;
399}
400
401/**
402 * eeh_rmv_from_parent_pe - Remove one EEH device from the associated PE
403 * @edev: EEH device
404 * @purge_pe: remove PE or not
405 *
406 * The PE hierarchy tree might be changed when doing PCI hotplug.
407 * Also, the PCI devices or buses could be removed from the system
408 * during EEH recovery. So we have to call the function remove the
409 * corresponding PE accordingly if necessary.
410 */
411int eeh_rmv_from_parent_pe(struct eeh_dev *edev, int purge_pe)
412{
413 struct eeh_pe *pe, *parent, *child;
414 int cnt;
415
416 if (!edev->pe) {
417 pr_warning("%s: No PE found for EEH device %s\n",
418 __func__, edev->dn->full_name);
419 return -EEXIST;
420 }
421
422 eeh_lock();
423
424 /* Remove the EEH device */
425 pe = edev->pe;
426 edev->pe = NULL;
427 list_del(&edev->list);
428
429 /*
430 * Check if the parent PE includes any EEH devices.
431 * If not, we should delete that. Also, we should
432 * delete the parent PE if it doesn't have associated
433 * child PEs and EEH devices.
434 */
435 while (1) {
436 parent = pe->parent;
437 if (pe->type & EEH_PE_PHB)
438 break;
439
440 if (purge_pe) {
441 if (list_empty(&pe->edevs) &&
442 list_empty(&pe->child_list)) {
443 list_del(&pe->child);
444 kfree(pe);
445 } else {
446 break;
447 }
448 } else {
449 if (list_empty(&pe->edevs)) {
450 cnt = 0;
451 list_for_each_entry(child, &pe->child_list, child) {
452 if (!(pe->type & EEH_PE_INVALID)) {
453 cnt++;
454 break;
455 }
456 }
457
458 if (!cnt)
459 pe->type |= EEH_PE_INVALID;
460 else
461 break;
462 }
463 }
464
465 pe = parent;
466 }
467
468 eeh_unlock();
469
470 return 0;
471}
472
473/**
474 * __eeh_pe_state_mark - Mark the state for the PE
475 * @data: EEH PE
476 * @flag: state
477 *
478 * The function is used to mark the indicated state for the given
479 * PE. Also, the associated PCI devices will be put into IO frozen
480 * state as well.
481 */
482static void *__eeh_pe_state_mark(void *data, void *flag)
483{
484 struct eeh_pe *pe = (struct eeh_pe *)data;
485 int state = *((int *)flag);
486 struct eeh_dev *tmp;
487 struct pci_dev *pdev;
488
489 /*
490 * Mark the PE with the indicated state. Also,
491 * the associated PCI device will be put into
492 * I/O frozen state to avoid I/O accesses from
493 * the PCI device driver.
494 */
495 pe->state |= state;
496 eeh_pe_for_each_dev(pe, tmp) {
497 pdev = eeh_dev_to_pci_dev(tmp);
498 if (pdev)
499 pdev->error_state = pci_channel_io_frozen;
500 }
501
502 return NULL;
503}
504
505/**
506 * eeh_pe_state_mark - Mark specified state for PE and its associated device
507 * @pe: EEH PE
508 *
509 * EEH error affects the current PE and its child PEs. The function
510 * is used to mark appropriate state for the affected PEs and the
511 * associated devices.
512 */
513void eeh_pe_state_mark(struct eeh_pe *pe, int state)
514{
515 eeh_lock();
516 eeh_pe_traverse(pe, __eeh_pe_state_mark, &state);
517 eeh_unlock();
518}
519
520/**
521 * __eeh_pe_state_clear - Clear state for the PE
522 * @data: EEH PE
523 * @flag: state
524 *
525 * The function is used to clear the indicated state from the
526 * given PE. Besides, we also clear the check count of the PE
527 * as well.
528 */
529static void *__eeh_pe_state_clear(void *data, void *flag)
530{
531 struct eeh_pe *pe = (struct eeh_pe *)data;
532 int state = *((int *)flag);
533
534 pe->state &= ~state;
535 pe->check_count = 0;
536
537 return NULL;
538}
539
540/**
541 * eeh_pe_state_clear - Clear state for the PE and its children
542 * @pe: PE
543 * @state: state to be cleared
544 *
545 * When the PE and its children has been recovered from error,
546 * we need clear the error state for that. The function is used
547 * for the purpose.
548 */
549void eeh_pe_state_clear(struct eeh_pe *pe, int state)
550{
551 eeh_lock();
552 eeh_pe_traverse(pe, __eeh_pe_state_clear, &state);
553 eeh_unlock();
554}
555
556/**
557 * eeh_restore_one_device_bars - Restore the Base Address Registers for one device
558 * @data: EEH device
559 * @flag: Unused
560 *
561 * Loads the PCI configuration space base address registers,
562 * the expansion ROM base address, the latency timer, and etc.
563 * from the saved values in the device node.
564 */
565static void *eeh_restore_one_device_bars(void *data, void *flag)
566{
567 int i;
568 u32 cmd;
569 struct eeh_dev *edev = (struct eeh_dev *)data;
570 struct device_node *dn = eeh_dev_to_of_node(edev);
571
572 for (i = 4; i < 10; i++)
573 eeh_ops->write_config(dn, i*4, 4, edev->config_space[i]);
574 /* 12 == Expansion ROM Address */
575 eeh_ops->write_config(dn, 12*4, 4, edev->config_space[12]);
576
577#define BYTE_SWAP(OFF) (8*((OFF)/4)+3-(OFF))
578#define SAVED_BYTE(OFF) (((u8 *)(edev->config_space))[BYTE_SWAP(OFF)])
579
580 eeh_ops->write_config(dn, PCI_CACHE_LINE_SIZE, 1,
581 SAVED_BYTE(PCI_CACHE_LINE_SIZE));
582 eeh_ops->write_config(dn, PCI_LATENCY_TIMER, 1,
583 SAVED_BYTE(PCI_LATENCY_TIMER));
584
585 /* max latency, min grant, interrupt pin and line */
586 eeh_ops->write_config(dn, 15*4, 4, edev->config_space[15]);
587
588 /*
589 * Restore PERR & SERR bits, some devices require it,
590 * don't touch the other command bits
591 */
592 eeh_ops->read_config(dn, PCI_COMMAND, 4, &cmd);
593 if (edev->config_space[1] & PCI_COMMAND_PARITY)
594 cmd |= PCI_COMMAND_PARITY;
595 else
596 cmd &= ~PCI_COMMAND_PARITY;
597 if (edev->config_space[1] & PCI_COMMAND_SERR)
598 cmd |= PCI_COMMAND_SERR;
599 else
600 cmd &= ~PCI_COMMAND_SERR;
601 eeh_ops->write_config(dn, PCI_COMMAND, 4, cmd);
602
603 return NULL;
604}
605
606/**
607 * eeh_pe_restore_bars - Restore the PCI config space info
608 * @pe: EEH PE
609 *
610 * This routine performs a recursive walk to the children
611 * of this device as well.
612 */
613void eeh_pe_restore_bars(struct eeh_pe *pe)
614{
615 /*
616 * We needn't take the EEH lock since eeh_pe_dev_traverse()
617 * will take that.
618 */
619 eeh_pe_dev_traverse(pe, eeh_restore_one_device_bars, NULL);
620}
621
622/**
623 * eeh_pe_bus_get - Retrieve PCI bus according to the given PE
624 * @pe: EEH PE
625 *
626 * Retrieve the PCI bus according to the given PE. Basically,
627 * there're 3 types of PEs: PHB/Bus/Device. For PHB PE, the
628 * primary PCI bus will be retrieved. The parent bus will be
629 * returned for BUS PE. However, we don't have associated PCI
630 * bus for DEVICE PE.
631 */
632struct pci_bus *eeh_pe_bus_get(struct eeh_pe *pe)
633{
634 struct pci_bus *bus = NULL;
635 struct eeh_dev *edev;
636 struct pci_dev *pdev;
637
638 eeh_lock();
639
640 if (pe->type & EEH_PE_PHB) {
641 bus = pe->phb->bus;
642 } else if (pe->type & EEH_PE_BUS) {
643 edev = list_first_entry(&pe->edevs, struct eeh_dev, list);
644 pdev = eeh_dev_to_pci_dev(edev);
645 if (pdev)
646 bus = pdev->bus;
647 }
648
649 eeh_unlock();
650
651 return bus;
652}
diff --git a/arch/powerpc/platforms/pseries/eeh_pseries.c b/arch/powerpc/platforms/pseries/eeh_pseries.c
index c33360ec4f4..19506f93573 100644
--- a/arch/powerpc/platforms/pseries/eeh_pseries.c
+++ b/arch/powerpc/platforms/pseries/eeh_pseries.c
@@ -129,27 +129,117 @@ static int pseries_eeh_init(void)
129 eeh_error_buf_size = RTAS_ERROR_LOG_MAX; 129 eeh_error_buf_size = RTAS_ERROR_LOG_MAX;
130 } 130 }
131 131
132 /* Set EEH probe mode */
133 eeh_probe_mode_set(EEH_PROBE_MODE_DEVTREE);
134
132 return 0; 135 return 0;
133} 136}
134 137
135/** 138/**
139 * pseries_eeh_of_probe - EEH probe on the given device
140 * @dn: OF node
141 * @flag: Unused
142 *
143 * When EEH module is installed during system boot, all PCI devices
144 * are checked one by one to see if it supports EEH. The function
145 * is introduced for the purpose.
146 */
147static void *pseries_eeh_of_probe(struct device_node *dn, void *flag)
148{
149 struct eeh_dev *edev;
150 struct eeh_pe pe;
151 const u32 *class_code, *vendor_id, *device_id;
152 const u32 *regs;
153 int enable = 0;
154 int ret;
155
156 /* Retrieve OF node and eeh device */
157 edev = of_node_to_eeh_dev(dn);
158 if (!of_device_is_available(dn))
159 return NULL;
160
161 /* Retrieve class/vendor/device IDs */
162 class_code = of_get_property(dn, "class-code", NULL);
163 vendor_id = of_get_property(dn, "vendor-id", NULL);
164 device_id = of_get_property(dn, "device-id", NULL);
165
166 /* Skip for bad OF node or PCI-ISA bridge */
167 if (!class_code || !vendor_id || !device_id)
168 return NULL;
169 if (dn->type && !strcmp(dn->type, "isa"))
170 return NULL;
171
172 /* Update class code and mode of eeh device */
173 edev->class_code = *class_code;
174 edev->mode = 0;
175
176 /* Retrieve the device address */
177 regs = of_get_property(dn, "reg", NULL);
178 if (!regs) {
179 pr_warning("%s: OF node property %s::reg not found\n",
180 __func__, dn->full_name);
181 return NULL;
182 }
183
184 /* Initialize the fake PE */
185 memset(&pe, 0, sizeof(struct eeh_pe));
186 pe.phb = edev->phb;
187 pe.config_addr = regs[0];
188
189 /* Enable EEH on the device */
190 ret = eeh_ops->set_option(&pe, EEH_OPT_ENABLE);
191 if (!ret) {
192 edev->config_addr = regs[0];
193 /* Retrieve PE address */
194 edev->pe_config_addr = eeh_ops->get_pe_addr(&pe);
195 pe.addr = edev->pe_config_addr;
196
197 /* Some older systems (Power4) allow the ibm,set-eeh-option
198 * call to succeed even on nodes where EEH is not supported.
199 * Verify support explicitly.
200 */
201 ret = eeh_ops->get_state(&pe, NULL);
202 if (ret > 0 && ret != EEH_STATE_NOT_SUPPORT)
203 enable = 1;
204
205 if (enable) {
206 eeh_subsystem_enabled = 1;
207 eeh_add_to_parent_pe(edev);
208
209 pr_debug("%s: EEH enabled on %s PHB#%d-PE#%x, config addr#%x\n",
210 __func__, dn->full_name, pe.phb->global_number,
211 pe.addr, pe.config_addr);
212 } else if (dn->parent && of_node_to_eeh_dev(dn->parent) &&
213 (of_node_to_eeh_dev(dn->parent))->pe) {
214 /* This device doesn't support EEH, but it may have an
215 * EEH parent, in which case we mark it as supported.
216 */
217 edev->config_addr = of_node_to_eeh_dev(dn->parent)->config_addr;
218 edev->pe_config_addr = of_node_to_eeh_dev(dn->parent)->pe_config_addr;
219 eeh_add_to_parent_pe(edev);
220 }
221 }
222
223 /* Save memory bars */
224 eeh_save_bars(edev);
225
226 return NULL;
227}
228
229/**
136 * pseries_eeh_set_option - Initialize EEH or MMIO/DMA reenable 230 * pseries_eeh_set_option - Initialize EEH or MMIO/DMA reenable
137 * @dn: device node 231 * @pe: EEH PE
138 * @option: operation to be issued 232 * @option: operation to be issued
139 * 233 *
140 * The function is used to control the EEH functionality globally. 234 * The function is used to control the EEH functionality globally.
141 * Currently, following options are support according to PAPR: 235 * Currently, following options are support according to PAPR:
142 * Enable EEH, Disable EEH, Enable MMIO and Enable DMA 236 * Enable EEH, Disable EEH, Enable MMIO and Enable DMA
143 */ 237 */
144static int pseries_eeh_set_option(struct device_node *dn, int option) 238static int pseries_eeh_set_option(struct eeh_pe *pe, int option)
145{ 239{
146 int ret = 0; 240 int ret = 0;
147 struct eeh_dev *edev;
148 const u32 *reg;
149 int config_addr; 241 int config_addr;
150 242
151 edev = of_node_to_eeh_dev(dn);
152
153 /* 243 /*
154 * When we're enabling or disabling EEH functioality on 244 * When we're enabling or disabling EEH functioality on
155 * the particular PE, the PE config address is possibly 245 * the particular PE, the PE config address is possibly
@@ -159,15 +249,11 @@ static int pseries_eeh_set_option(struct device_node *dn, int option)
159 switch (option) { 249 switch (option) {
160 case EEH_OPT_DISABLE: 250 case EEH_OPT_DISABLE:
161 case EEH_OPT_ENABLE: 251 case EEH_OPT_ENABLE:
162 reg = of_get_property(dn, "reg", NULL);
163 config_addr = reg[0];
164 break;
165
166 case EEH_OPT_THAW_MMIO: 252 case EEH_OPT_THAW_MMIO:
167 case EEH_OPT_THAW_DMA: 253 case EEH_OPT_THAW_DMA:
168 config_addr = edev->config_addr; 254 config_addr = pe->config_addr;
169 if (edev->pe_config_addr) 255 if (pe->addr)
170 config_addr = edev->pe_config_addr; 256 config_addr = pe->addr;
171 break; 257 break;
172 258
173 default: 259 default:
@@ -177,15 +263,15 @@ static int pseries_eeh_set_option(struct device_node *dn, int option)
177 } 263 }
178 264
179 ret = rtas_call(ibm_set_eeh_option, 4, 1, NULL, 265 ret = rtas_call(ibm_set_eeh_option, 4, 1, NULL,
180 config_addr, BUID_HI(edev->phb->buid), 266 config_addr, BUID_HI(pe->phb->buid),
181 BUID_LO(edev->phb->buid), option); 267 BUID_LO(pe->phb->buid), option);
182 268
183 return ret; 269 return ret;
184} 270}
185 271
186/** 272/**
187 * pseries_eeh_get_pe_addr - Retrieve PE address 273 * pseries_eeh_get_pe_addr - Retrieve PE address
188 * @dn: device node 274 * @pe: EEH PE
189 * 275 *
190 * Retrieve the assocated PE address. Actually, there're 2 RTAS 276 * Retrieve the assocated PE address. Actually, there're 2 RTAS
191 * function calls dedicated for the purpose. We need implement 277 * function calls dedicated for the purpose. We need implement
@@ -196,14 +282,11 @@ static int pseries_eeh_set_option(struct device_node *dn, int option)
196 * It's notable that zero'ed return value means invalid PE config 282 * It's notable that zero'ed return value means invalid PE config
197 * address. 283 * address.
198 */ 284 */
199static int pseries_eeh_get_pe_addr(struct device_node *dn) 285static int pseries_eeh_get_pe_addr(struct eeh_pe *pe)
200{ 286{
201 struct eeh_dev *edev;
202 int ret = 0; 287 int ret = 0;
203 int rets[3]; 288 int rets[3];
204 289
205 edev = of_node_to_eeh_dev(dn);
206
207 if (ibm_get_config_addr_info2 != RTAS_UNKNOWN_SERVICE) { 290 if (ibm_get_config_addr_info2 != RTAS_UNKNOWN_SERVICE) {
208 /* 291 /*
209 * First of all, we need to make sure there has one PE 292 * First of all, we need to make sure there has one PE
@@ -211,18 +294,18 @@ static int pseries_eeh_get_pe_addr(struct device_node *dn)
211 * meaningless. 294 * meaningless.
212 */ 295 */
213 ret = rtas_call(ibm_get_config_addr_info2, 4, 2, rets, 296 ret = rtas_call(ibm_get_config_addr_info2, 4, 2, rets,
214 edev->config_addr, BUID_HI(edev->phb->buid), 297 pe->config_addr, BUID_HI(pe->phb->buid),
215 BUID_LO(edev->phb->buid), 1); 298 BUID_LO(pe->phb->buid), 1);
216 if (ret || (rets[0] == 0)) 299 if (ret || (rets[0] == 0))
217 return 0; 300 return 0;
218 301
219 /* Retrieve the associated PE config address */ 302 /* Retrieve the associated PE config address */
220 ret = rtas_call(ibm_get_config_addr_info2, 4, 2, rets, 303 ret = rtas_call(ibm_get_config_addr_info2, 4, 2, rets,
221 edev->config_addr, BUID_HI(edev->phb->buid), 304 pe->config_addr, BUID_HI(pe->phb->buid),
222 BUID_LO(edev->phb->buid), 0); 305 BUID_LO(pe->phb->buid), 0);
223 if (ret) { 306 if (ret) {
224 pr_warning("%s: Failed to get PE address for %s\n", 307 pr_warning("%s: Failed to get address for PHB#%d-PE#%x\n",
225 __func__, dn->full_name); 308 __func__, pe->phb->global_number, pe->config_addr);
226 return 0; 309 return 0;
227 } 310 }
228 311
@@ -231,11 +314,11 @@ static int pseries_eeh_get_pe_addr(struct device_node *dn)
231 314
232 if (ibm_get_config_addr_info != RTAS_UNKNOWN_SERVICE) { 315 if (ibm_get_config_addr_info != RTAS_UNKNOWN_SERVICE) {
233 ret = rtas_call(ibm_get_config_addr_info, 4, 2, rets, 316 ret = rtas_call(ibm_get_config_addr_info, 4, 2, rets,
234 edev->config_addr, BUID_HI(edev->phb->buid), 317 pe->config_addr, BUID_HI(pe->phb->buid),
235 BUID_LO(edev->phb->buid), 0); 318 BUID_LO(pe->phb->buid), 0);
236 if (ret) { 319 if (ret) {
237 pr_warning("%s: Failed to get PE address for %s\n", 320 pr_warning("%s: Failed to get address for PHB#%d-PE#%x\n",
238 __func__, dn->full_name); 321 __func__, pe->phb->global_number, pe->config_addr);
239 return 0; 322 return 0;
240 } 323 }
241 324
@@ -247,7 +330,7 @@ static int pseries_eeh_get_pe_addr(struct device_node *dn)
247 330
248/** 331/**
249 * pseries_eeh_get_state - Retrieve PE state 332 * pseries_eeh_get_state - Retrieve PE state
250 * @dn: PE associated device node 333 * @pe: EEH PE
251 * @state: return value 334 * @state: return value
252 * 335 *
253 * Retrieve the state of the specified PE. On RTAS compliant 336 * Retrieve the state of the specified PE. On RTAS compliant
@@ -258,30 +341,28 @@ static int pseries_eeh_get_pe_addr(struct device_node *dn)
258 * RTAS calls for the purpose, we need to try the new one and back 341 * RTAS calls for the purpose, we need to try the new one and back
259 * to the old one if the new one couldn't work properly. 342 * to the old one if the new one couldn't work properly.
260 */ 343 */
261static int pseries_eeh_get_state(struct device_node *dn, int *state) 344static int pseries_eeh_get_state(struct eeh_pe *pe, int *state)
262{ 345{
263 struct eeh_dev *edev;
264 int config_addr; 346 int config_addr;
265 int ret; 347 int ret;
266 int rets[4]; 348 int rets[4];
267 int result; 349 int result;
268 350
269 /* Figure out PE config address if possible */ 351 /* Figure out PE config address if possible */
270 edev = of_node_to_eeh_dev(dn); 352 config_addr = pe->config_addr;
271 config_addr = edev->config_addr; 353 if (pe->addr)
272 if (edev->pe_config_addr) 354 config_addr = pe->addr;
273 config_addr = edev->pe_config_addr;
274 355
275 if (ibm_read_slot_reset_state2 != RTAS_UNKNOWN_SERVICE) { 356 if (ibm_read_slot_reset_state2 != RTAS_UNKNOWN_SERVICE) {
276 ret = rtas_call(ibm_read_slot_reset_state2, 3, 4, rets, 357 ret = rtas_call(ibm_read_slot_reset_state2, 3, 4, rets,
277 config_addr, BUID_HI(edev->phb->buid), 358 config_addr, BUID_HI(pe->phb->buid),
278 BUID_LO(edev->phb->buid)); 359 BUID_LO(pe->phb->buid));
279 } else if (ibm_read_slot_reset_state != RTAS_UNKNOWN_SERVICE) { 360 } else if (ibm_read_slot_reset_state != RTAS_UNKNOWN_SERVICE) {
280 /* Fake PE unavailable info */ 361 /* Fake PE unavailable info */
281 rets[2] = 0; 362 rets[2] = 0;
282 ret = rtas_call(ibm_read_slot_reset_state, 3, 3, rets, 363 ret = rtas_call(ibm_read_slot_reset_state, 3, 3, rets,
283 config_addr, BUID_HI(edev->phb->buid), 364 config_addr, BUID_HI(pe->phb->buid),
284 BUID_LO(edev->phb->buid)); 365 BUID_LO(pe->phb->buid));
285 } else { 366 } else {
286 return EEH_STATE_NOT_SUPPORT; 367 return EEH_STATE_NOT_SUPPORT;
287 } 368 }
@@ -333,34 +414,32 @@ static int pseries_eeh_get_state(struct device_node *dn, int *state)
333 414
334/** 415/**
335 * pseries_eeh_reset - Reset the specified PE 416 * pseries_eeh_reset - Reset the specified PE
336 * @dn: PE associated device node 417 * @pe: EEH PE
337 * @option: reset option 418 * @option: reset option
338 * 419 *
339 * Reset the specified PE 420 * Reset the specified PE
340 */ 421 */
341static int pseries_eeh_reset(struct device_node *dn, int option) 422static int pseries_eeh_reset(struct eeh_pe *pe, int option)
342{ 423{
343 struct eeh_dev *edev;
344 int config_addr; 424 int config_addr;
345 int ret; 425 int ret;
346 426
347 /* Figure out PE address */ 427 /* Figure out PE address */
348 edev = of_node_to_eeh_dev(dn); 428 config_addr = pe->config_addr;
349 config_addr = edev->config_addr; 429 if (pe->addr)
350 if (edev->pe_config_addr) 430 config_addr = pe->addr;
351 config_addr = edev->pe_config_addr;
352 431
353 /* Reset PE through RTAS call */ 432 /* Reset PE through RTAS call */
354 ret = rtas_call(ibm_set_slot_reset, 4, 1, NULL, 433 ret = rtas_call(ibm_set_slot_reset, 4, 1, NULL,
355 config_addr, BUID_HI(edev->phb->buid), 434 config_addr, BUID_HI(pe->phb->buid),
356 BUID_LO(edev->phb->buid), option); 435 BUID_LO(pe->phb->buid), option);
357 436
358 /* If fundamental-reset not supported, try hot-reset */ 437 /* If fundamental-reset not supported, try hot-reset */
359 if (option == EEH_RESET_FUNDAMENTAL && 438 if (option == EEH_RESET_FUNDAMENTAL &&
360 ret == -8) { 439 ret == -8) {
361 ret = rtas_call(ibm_set_slot_reset, 4, 1, NULL, 440 ret = rtas_call(ibm_set_slot_reset, 4, 1, NULL,
362 config_addr, BUID_HI(edev->phb->buid), 441 config_addr, BUID_HI(pe->phb->buid),
363 BUID_LO(edev->phb->buid), EEH_RESET_HOT); 442 BUID_LO(pe->phb->buid), EEH_RESET_HOT);
364 } 443 }
365 444
366 return ret; 445 return ret;
@@ -368,13 +447,13 @@ static int pseries_eeh_reset(struct device_node *dn, int option)
368 447
369/** 448/**
370 * pseries_eeh_wait_state - Wait for PE state 449 * pseries_eeh_wait_state - Wait for PE state
371 * @dn: PE associated device node 450 * @pe: EEH PE
372 * @max_wait: maximal period in microsecond 451 * @max_wait: maximal period in microsecond
373 * 452 *
374 * Wait for the state of associated PE. It might take some time 453 * Wait for the state of associated PE. It might take some time
375 * to retrieve the PE's state. 454 * to retrieve the PE's state.
376 */ 455 */
377static int pseries_eeh_wait_state(struct device_node *dn, int max_wait) 456static int pseries_eeh_wait_state(struct eeh_pe *pe, int max_wait)
378{ 457{
379 int ret; 458 int ret;
380 int mwait; 459 int mwait;
@@ -391,7 +470,7 @@ static int pseries_eeh_wait_state(struct device_node *dn, int max_wait)
391#define EEH_STATE_MAX_WAIT_TIME (300 * 1000) 470#define EEH_STATE_MAX_WAIT_TIME (300 * 1000)
392 471
393 while (1) { 472 while (1) {
394 ret = pseries_eeh_get_state(dn, &mwait); 473 ret = pseries_eeh_get_state(pe, &mwait);
395 474
396 /* 475 /*
397 * If the PE's state is temporarily unavailable, 476 * If the PE's state is temporarily unavailable,
@@ -426,7 +505,7 @@ static int pseries_eeh_wait_state(struct device_node *dn, int max_wait)
426 505
427/** 506/**
428 * pseries_eeh_get_log - Retrieve error log 507 * pseries_eeh_get_log - Retrieve error log
429 * @dn: device node 508 * @pe: EEH PE
430 * @severity: temporary or permanent error log 509 * @severity: temporary or permanent error log
431 * @drv_log: driver log to be combined with retrieved error log 510 * @drv_log: driver log to be combined with retrieved error log
432 * @len: length of driver log 511 * @len: length of driver log
@@ -435,24 +514,22 @@ static int pseries_eeh_wait_state(struct device_node *dn, int max_wait)
435 * Actually, the error will be retrieved through the dedicated 514 * Actually, the error will be retrieved through the dedicated
436 * RTAS call. 515 * RTAS call.
437 */ 516 */
438static int pseries_eeh_get_log(struct device_node *dn, int severity, char *drv_log, unsigned long len) 517static int pseries_eeh_get_log(struct eeh_pe *pe, int severity, char *drv_log, unsigned long len)
439{ 518{
440 struct eeh_dev *edev;
441 int config_addr; 519 int config_addr;
442 unsigned long flags; 520 unsigned long flags;
443 int ret; 521 int ret;
444 522
445 edev = of_node_to_eeh_dev(dn);
446 spin_lock_irqsave(&slot_errbuf_lock, flags); 523 spin_lock_irqsave(&slot_errbuf_lock, flags);
447 memset(slot_errbuf, 0, eeh_error_buf_size); 524 memset(slot_errbuf, 0, eeh_error_buf_size);
448 525
449 /* Figure out the PE address */ 526 /* Figure out the PE address */
450 config_addr = edev->config_addr; 527 config_addr = pe->config_addr;
451 if (edev->pe_config_addr) 528 if (pe->addr)
452 config_addr = edev->pe_config_addr; 529 config_addr = pe->addr;
453 530
454 ret = rtas_call(ibm_slot_error_detail, 8, 1, NULL, config_addr, 531 ret = rtas_call(ibm_slot_error_detail, 8, 1, NULL, config_addr,
455 BUID_HI(edev->phb->buid), BUID_LO(edev->phb->buid), 532 BUID_HI(pe->phb->buid), BUID_LO(pe->phb->buid),
456 virt_to_phys(drv_log), len, 533 virt_to_phys(drv_log), len,
457 virt_to_phys(slot_errbuf), eeh_error_buf_size, 534 virt_to_phys(slot_errbuf), eeh_error_buf_size,
458 severity); 535 severity);
@@ -465,40 +542,38 @@ static int pseries_eeh_get_log(struct device_node *dn, int severity, char *drv_l
465 542
466/** 543/**
467 * pseries_eeh_configure_bridge - Configure PCI bridges in the indicated PE 544 * pseries_eeh_configure_bridge - Configure PCI bridges in the indicated PE
468 * @dn: PE associated device node 545 * @pe: EEH PE
469 * 546 *
470 * The function will be called to reconfigure the bridges included 547 * The function will be called to reconfigure the bridges included
471 * in the specified PE so that the mulfunctional PE would be recovered 548 * in the specified PE so that the mulfunctional PE would be recovered
472 * again. 549 * again.
473 */ 550 */
474static int pseries_eeh_configure_bridge(struct device_node *dn) 551static int pseries_eeh_configure_bridge(struct eeh_pe *pe)
475{ 552{
476 struct eeh_dev *edev;
477 int config_addr; 553 int config_addr;
478 int ret; 554 int ret;
479 555
480 /* Figure out the PE address */ 556 /* Figure out the PE address */
481 edev = of_node_to_eeh_dev(dn); 557 config_addr = pe->config_addr;
482 config_addr = edev->config_addr; 558 if (pe->addr)
483 if (edev->pe_config_addr) 559 config_addr = pe->addr;
484 config_addr = edev->pe_config_addr;
485 560
486 /* Use new configure-pe function, if supported */ 561 /* Use new configure-pe function, if supported */
487 if (ibm_configure_pe != RTAS_UNKNOWN_SERVICE) { 562 if (ibm_configure_pe != RTAS_UNKNOWN_SERVICE) {
488 ret = rtas_call(ibm_configure_pe, 3, 1, NULL, 563 ret = rtas_call(ibm_configure_pe, 3, 1, NULL,
489 config_addr, BUID_HI(edev->phb->buid), 564 config_addr, BUID_HI(pe->phb->buid),
490 BUID_LO(edev->phb->buid)); 565 BUID_LO(pe->phb->buid));
491 } else if (ibm_configure_bridge != RTAS_UNKNOWN_SERVICE) { 566 } else if (ibm_configure_bridge != RTAS_UNKNOWN_SERVICE) {
492 ret = rtas_call(ibm_configure_bridge, 3, 1, NULL, 567 ret = rtas_call(ibm_configure_bridge, 3, 1, NULL,
493 config_addr, BUID_HI(edev->phb->buid), 568 config_addr, BUID_HI(pe->phb->buid),
494 BUID_LO(edev->phb->buid)); 569 BUID_LO(pe->phb->buid));
495 } else { 570 } else {
496 return -EFAULT; 571 return -EFAULT;
497 } 572 }
498 573
499 if (ret) 574 if (ret)
500 pr_warning("%s: Unable to configure bridge %d for %s\n", 575 pr_warning("%s: Unable to configure bridge PHB#%d-PE#%x (%d)\n",
501 __func__, ret, dn->full_name); 576 __func__, pe->phb->global_number, pe->addr, ret);
502 577
503 return ret; 578 return ret;
504} 579}
@@ -542,6 +617,8 @@ static int pseries_eeh_write_config(struct device_node *dn, int where, int size,
542static struct eeh_ops pseries_eeh_ops = { 617static struct eeh_ops pseries_eeh_ops = {
543 .name = "pseries", 618 .name = "pseries",
544 .init = pseries_eeh_init, 619 .init = pseries_eeh_init,
620 .of_probe = pseries_eeh_of_probe,
621 .dev_probe = NULL,
545 .set_option = pseries_eeh_set_option, 622 .set_option = pseries_eeh_set_option,
546 .get_pe_addr = pseries_eeh_get_pe_addr, 623 .get_pe_addr = pseries_eeh_get_pe_addr,
547 .get_state = pseries_eeh_get_state, 624 .get_state = pseries_eeh_get_state,
@@ -559,7 +636,21 @@ static struct eeh_ops pseries_eeh_ops = {
559 * EEH initialization on pseries platform. This function should be 636 * EEH initialization on pseries platform. This function should be
560 * called before any EEH related functions. 637 * called before any EEH related functions.
561 */ 638 */
562int __init eeh_pseries_init(void) 639static int __init eeh_pseries_init(void)
563{ 640{
564 return eeh_ops_register(&pseries_eeh_ops); 641 int ret = -EINVAL;
642
643 if (!machine_is(pseries))
644 return ret;
645
646 ret = eeh_ops_register(&pseries_eeh_ops);
647 if (!ret)
648 pr_info("EEH: pSeries platform initialized\n");
649 else
650 pr_info("EEH: pSeries platform initialization failure (%d)\n",
651 ret);
652
653 return ret;
565} 654}
655
656early_initcall(eeh_pseries_init);
diff --git a/arch/powerpc/platforms/pseries/eeh_sysfs.c b/arch/powerpc/platforms/pseries/eeh_sysfs.c
index 243b3510d70..d37708360f2 100644
--- a/arch/powerpc/platforms/pseries/eeh_sysfs.c
+++ b/arch/powerpc/platforms/pseries/eeh_sysfs.c
@@ -53,9 +53,6 @@ static DEVICE_ATTR(_name, S_IRUGO, eeh_show_##_name, NULL);
53EEH_SHOW_ATTR(eeh_mode, mode, "0x%x"); 53EEH_SHOW_ATTR(eeh_mode, mode, "0x%x");
54EEH_SHOW_ATTR(eeh_config_addr, config_addr, "0x%x"); 54EEH_SHOW_ATTR(eeh_config_addr, config_addr, "0x%x");
55EEH_SHOW_ATTR(eeh_pe_config_addr, pe_config_addr, "0x%x"); 55EEH_SHOW_ATTR(eeh_pe_config_addr, pe_config_addr, "0x%x");
56EEH_SHOW_ATTR(eeh_check_count, check_count, "%d" );
57EEH_SHOW_ATTR(eeh_freeze_count, freeze_count, "%d" );
58EEH_SHOW_ATTR(eeh_false_positives, false_positives, "%d" );
59 56
60void eeh_sysfs_add_device(struct pci_dev *pdev) 57void eeh_sysfs_add_device(struct pci_dev *pdev)
61{ 58{
@@ -64,9 +61,6 @@ void eeh_sysfs_add_device(struct pci_dev *pdev)
64 rc += device_create_file(&pdev->dev, &dev_attr_eeh_mode); 61 rc += device_create_file(&pdev->dev, &dev_attr_eeh_mode);
65 rc += device_create_file(&pdev->dev, &dev_attr_eeh_config_addr); 62 rc += device_create_file(&pdev->dev, &dev_attr_eeh_config_addr);
66 rc += device_create_file(&pdev->dev, &dev_attr_eeh_pe_config_addr); 63 rc += device_create_file(&pdev->dev, &dev_attr_eeh_pe_config_addr);
67 rc += device_create_file(&pdev->dev, &dev_attr_eeh_check_count);
68 rc += device_create_file(&pdev->dev, &dev_attr_eeh_false_positives);
69 rc += device_create_file(&pdev->dev, &dev_attr_eeh_freeze_count);
70 64
71 if (rc) 65 if (rc)
72 printk(KERN_WARNING "EEH: Unable to create sysfs entries\n"); 66 printk(KERN_WARNING "EEH: Unable to create sysfs entries\n");
@@ -77,8 +71,5 @@ void eeh_sysfs_remove_device(struct pci_dev *pdev)
77 device_remove_file(&pdev->dev, &dev_attr_eeh_mode); 71 device_remove_file(&pdev->dev, &dev_attr_eeh_mode);
78 device_remove_file(&pdev->dev, &dev_attr_eeh_config_addr); 72 device_remove_file(&pdev->dev, &dev_attr_eeh_config_addr);
79 device_remove_file(&pdev->dev, &dev_attr_eeh_pe_config_addr); 73 device_remove_file(&pdev->dev, &dev_attr_eeh_pe_config_addr);
80 device_remove_file(&pdev->dev, &dev_attr_eeh_check_count);
81 device_remove_file(&pdev->dev, &dev_attr_eeh_false_positives);
82 device_remove_file(&pdev->dev, &dev_attr_eeh_freeze_count);
83} 74}
84 75
diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c
index 11d8e0544ac..ecdb0a6b317 100644
--- a/arch/powerpc/platforms/pseries/hotplug-memory.c
+++ b/arch/powerpc/platforms/pseries/hotplug-memory.c
@@ -78,6 +78,8 @@ static int pseries_remove_memblock(unsigned long base, unsigned int memblock_siz
78 unsigned long start, start_pfn; 78 unsigned long start, start_pfn;
79 struct zone *zone; 79 struct zone *zone;
80 int ret; 80 int ret;
81 unsigned long section;
82 unsigned long sections_to_remove;
81 83
82 start_pfn = base >> PAGE_SHIFT; 84 start_pfn = base >> PAGE_SHIFT;
83 85
@@ -97,9 +99,13 @@ static int pseries_remove_memblock(unsigned long base, unsigned int memblock_siz
97 * to sysfs "state" file and we can't remove sysfs entries 99 * to sysfs "state" file and we can't remove sysfs entries
98 * while writing to it. So we have to defer it to here. 100 * while writing to it. So we have to defer it to here.
99 */ 101 */
100 ret = __remove_pages(zone, start_pfn, memblock_size >> PAGE_SHIFT); 102 sections_to_remove = (memblock_size >> PAGE_SHIFT) / PAGES_PER_SECTION;
101 if (ret) 103 for (section = 0; section < sections_to_remove; section++) {
102 return ret; 104 unsigned long pfn = start_pfn + section * PAGES_PER_SECTION;
105 ret = __remove_pages(zone, pfn, PAGES_PER_SECTION);
106 if (ret)
107 return ret;
108 }
103 109
104 /* 110 /*
105 * Update memory regions for memory remove 111 * Update memory regions for memory remove
diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
index bca220f2873..6153eea27ce 100644
--- a/arch/powerpc/platforms/pseries/iommu.c
+++ b/arch/powerpc/platforms/pseries/iommu.c
@@ -28,6 +28,7 @@
28#include <linux/types.h> 28#include <linux/types.h>
29#include <linux/slab.h> 29#include <linux/slab.h>
30#include <linux/mm.h> 30#include <linux/mm.h>
31#include <linux/memblock.h>
31#include <linux/spinlock.h> 32#include <linux/spinlock.h>
32#include <linux/sched.h> /* for show_stack */ 33#include <linux/sched.h> /* for show_stack */
33#include <linux/string.h> 34#include <linux/string.h>
@@ -41,7 +42,6 @@
41#include <asm/iommu.h> 42#include <asm/iommu.h>
42#include <asm/pci-bridge.h> 43#include <asm/pci-bridge.h>
43#include <asm/machdep.h> 44#include <asm/machdep.h>
44#include <asm/abs_addr.h>
45#include <asm/pSeries_reconfig.h> 45#include <asm/pSeries_reconfig.h>
46#include <asm/firmware.h> 46#include <asm/firmware.h>
47#include <asm/tce.h> 47#include <asm/tce.h>
@@ -99,7 +99,7 @@ static int tce_build_pSeries(struct iommu_table *tbl, long index,
99 99
100 while (npages--) { 100 while (npages--) {
101 /* can't move this out since we might cross MEMBLOCK boundary */ 101 /* can't move this out since we might cross MEMBLOCK boundary */
102 rpn = (virt_to_abs(uaddr)) >> TCE_SHIFT; 102 rpn = __pa(uaddr) >> TCE_SHIFT;
103 *tcep = proto_tce | (rpn & TCE_RPN_MASK) << TCE_RPN_SHIFT; 103 *tcep = proto_tce | (rpn & TCE_RPN_MASK) << TCE_RPN_SHIFT;
104 104
105 uaddr += TCE_PAGE_SIZE; 105 uaddr += TCE_PAGE_SIZE;
@@ -148,7 +148,7 @@ static int tce_build_pSeriesLP(struct iommu_table *tbl, long tcenum,
148 int ret = 0; 148 int ret = 0;
149 long tcenum_start = tcenum, npages_start = npages; 149 long tcenum_start = tcenum, npages_start = npages;
150 150
151 rpn = (virt_to_abs(uaddr)) >> TCE_SHIFT; 151 rpn = __pa(uaddr) >> TCE_SHIFT;
152 proto_tce = TCE_PCI_READ; 152 proto_tce = TCE_PCI_READ;
153 if (direction != DMA_TO_DEVICE) 153 if (direction != DMA_TO_DEVICE)
154 proto_tce |= TCE_PCI_WRITE; 154 proto_tce |= TCE_PCI_WRITE;
@@ -217,7 +217,7 @@ static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
217 __get_cpu_var(tce_page) = tcep; 217 __get_cpu_var(tce_page) = tcep;
218 } 218 }
219 219
220 rpn = (virt_to_abs(uaddr)) >> TCE_SHIFT; 220 rpn = __pa(uaddr) >> TCE_SHIFT;
221 proto_tce = TCE_PCI_READ; 221 proto_tce = TCE_PCI_READ;
222 if (direction != DMA_TO_DEVICE) 222 if (direction != DMA_TO_DEVICE)
223 proto_tce |= TCE_PCI_WRITE; 223 proto_tce |= TCE_PCI_WRITE;
@@ -237,7 +237,7 @@ static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
237 237
238 rc = plpar_tce_put_indirect((u64)tbl->it_index, 238 rc = plpar_tce_put_indirect((u64)tbl->it_index,
239 (u64)tcenum << 12, 239 (u64)tcenum << 12,
240 (u64)virt_to_abs(tcep), 240 (u64)__pa(tcep),
241 limit); 241 limit);
242 242
243 npages -= limit; 243 npages -= limit;
@@ -441,7 +441,7 @@ static int tce_setrange_multi_pSeriesLP(unsigned long start_pfn,
441 441
442 rc = plpar_tce_put_indirect(liobn, 442 rc = plpar_tce_put_indirect(liobn,
443 dma_offset, 443 dma_offset,
444 (u64)virt_to_abs(tcep), 444 (u64)__pa(tcep),
445 limit); 445 limit);
446 446
447 num_tce -= limit; 447 num_tce -= limit;
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
index 5f3ef876ded..0da39fed355 100644
--- a/arch/powerpc/platforms/pseries/lpar.c
+++ b/arch/powerpc/platforms/pseries/lpar.c
@@ -31,7 +31,6 @@
31#include <asm/page.h> 31#include <asm/page.h>
32#include <asm/pgtable.h> 32#include <asm/pgtable.h>
33#include <asm/machdep.h> 33#include <asm/machdep.h>
34#include <asm/abs_addr.h>
35#include <asm/mmu_context.h> 34#include <asm/mmu_context.h>
36#include <asm/iommu.h> 35#include <asm/iommu.h>
37#include <asm/tlbflush.h> 36#include <asm/tlbflush.h>
@@ -108,9 +107,9 @@ void vpa_init(int cpu)
108} 107}
109 108
110static long pSeries_lpar_hpte_insert(unsigned long hpte_group, 109static long pSeries_lpar_hpte_insert(unsigned long hpte_group,
111 unsigned long va, unsigned long pa, 110 unsigned long vpn, unsigned long pa,
112 unsigned long rflags, unsigned long vflags, 111 unsigned long rflags, unsigned long vflags,
113 int psize, int ssize) 112 int psize, int ssize)
114{ 113{
115 unsigned long lpar_rc; 114 unsigned long lpar_rc;
116 unsigned long flags; 115 unsigned long flags;
@@ -118,11 +117,11 @@ static long pSeries_lpar_hpte_insert(unsigned long hpte_group,
118 unsigned long hpte_v, hpte_r; 117 unsigned long hpte_v, hpte_r;
119 118
120 if (!(vflags & HPTE_V_BOLTED)) 119 if (!(vflags & HPTE_V_BOLTED))
121 pr_devel("hpte_insert(group=%lx, va=%016lx, pa=%016lx, " 120 pr_devel("hpte_insert(group=%lx, vpn=%016lx, "
122 "rflags=%lx, vflags=%lx, psize=%d)\n", 121 "pa=%016lx, rflags=%lx, vflags=%lx, psize=%d)\n",
123 hpte_group, va, pa, rflags, vflags, psize); 122 hpte_group, vpn, pa, rflags, vflags, psize);
124 123
125 hpte_v = hpte_encode_v(va, psize, ssize) | vflags | HPTE_V_VALID; 124 hpte_v = hpte_encode_v(vpn, psize, ssize) | vflags | HPTE_V_VALID;
126 hpte_r = hpte_encode_r(pa, psize) | rflags; 125 hpte_r = hpte_encode_r(pa, psize) | rflags;
127 126
128 if (!(vflags & HPTE_V_BOLTED)) 127 if (!(vflags & HPTE_V_BOLTED))
@@ -227,22 +226,6 @@ static void pSeries_lpar_hptab_clear(void)
227} 226}
228 227
229/* 228/*
230 * This computes the AVPN and B fields of the first dword of a HPTE,
231 * for use when we want to match an existing PTE. The bottom 7 bits
232 * of the returned value are zero.
233 */
234static inline unsigned long hpte_encode_avpn(unsigned long va, int psize,
235 int ssize)
236{
237 unsigned long v;
238
239 v = (va >> 23) & ~(mmu_psize_defs[psize].avpnm);
240 v <<= HPTE_V_AVPN_SHIFT;
241 v |= ((unsigned long) ssize) << HPTE_V_SSIZE_SHIFT;
242 return v;
243}
244
245/*
246 * NOTE: for updatepp ops we are fortunate that the linux "newpp" bits and 229 * NOTE: for updatepp ops we are fortunate that the linux "newpp" bits and
247 * the low 3 bits of flags happen to line up. So no transform is needed. 230 * the low 3 bits of flags happen to line up. So no transform is needed.
248 * We can probably optimize here and assume the high bits of newpp are 231 * We can probably optimize here and assume the high bits of newpp are
@@ -250,14 +233,14 @@ static inline unsigned long hpte_encode_avpn(unsigned long va, int psize,
250 */ 233 */
251static long pSeries_lpar_hpte_updatepp(unsigned long slot, 234static long pSeries_lpar_hpte_updatepp(unsigned long slot,
252 unsigned long newpp, 235 unsigned long newpp,
253 unsigned long va, 236 unsigned long vpn,
254 int psize, int ssize, int local) 237 int psize, int ssize, int local)
255{ 238{
256 unsigned long lpar_rc; 239 unsigned long lpar_rc;
257 unsigned long flags = (newpp & 7) | H_AVPN; 240 unsigned long flags = (newpp & 7) | H_AVPN;
258 unsigned long want_v; 241 unsigned long want_v;
259 242
260 want_v = hpte_encode_avpn(va, psize, ssize); 243 want_v = hpte_encode_avpn(vpn, psize, ssize);
261 244
262 pr_devel(" update: avpnv=%016lx, hash=%016lx, f=%lx, psize: %d ...", 245 pr_devel(" update: avpnv=%016lx, hash=%016lx, f=%lx, psize: %d ...",
263 want_v, slot, flags, psize); 246 want_v, slot, flags, psize);
@@ -295,15 +278,15 @@ static unsigned long pSeries_lpar_hpte_getword0(unsigned long slot)
295 return dword0; 278 return dword0;
296} 279}
297 280
298static long pSeries_lpar_hpte_find(unsigned long va, int psize, int ssize) 281static long pSeries_lpar_hpte_find(unsigned long vpn, int psize, int ssize)
299{ 282{
300 unsigned long hash; 283 unsigned long hash;
301 unsigned long i; 284 unsigned long i;
302 long slot; 285 long slot;
303 unsigned long want_v, hpte_v; 286 unsigned long want_v, hpte_v;
304 287
305 hash = hpt_hash(va, mmu_psize_defs[psize].shift, ssize); 288 hash = hpt_hash(vpn, mmu_psize_defs[psize].shift, ssize);
306 want_v = hpte_encode_avpn(va, psize, ssize); 289 want_v = hpte_encode_avpn(vpn, psize, ssize);
307 290
308 /* Bolted entries are always in the primary group */ 291 /* Bolted entries are always in the primary group */
309 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; 292 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
@@ -323,12 +306,13 @@ static void pSeries_lpar_hpte_updateboltedpp(unsigned long newpp,
323 unsigned long ea, 306 unsigned long ea,
324 int psize, int ssize) 307 int psize, int ssize)
325{ 308{
326 unsigned long lpar_rc, slot, vsid, va, flags; 309 unsigned long vpn;
310 unsigned long lpar_rc, slot, vsid, flags;
327 311
328 vsid = get_kernel_vsid(ea, ssize); 312 vsid = get_kernel_vsid(ea, ssize);
329 va = hpt_va(ea, vsid, ssize); 313 vpn = hpt_vpn(ea, vsid, ssize);
330 314
331 slot = pSeries_lpar_hpte_find(va, psize, ssize); 315 slot = pSeries_lpar_hpte_find(vpn, psize, ssize);
332 BUG_ON(slot == -1); 316 BUG_ON(slot == -1);
333 317
334 flags = newpp & 7; 318 flags = newpp & 7;
@@ -337,17 +321,17 @@ static void pSeries_lpar_hpte_updateboltedpp(unsigned long newpp,
337 BUG_ON(lpar_rc != H_SUCCESS); 321 BUG_ON(lpar_rc != H_SUCCESS);
338} 322}
339 323
340static void pSeries_lpar_hpte_invalidate(unsigned long slot, unsigned long va, 324static void pSeries_lpar_hpte_invalidate(unsigned long slot, unsigned long vpn,
341 int psize, int ssize, int local) 325 int psize, int ssize, int local)
342{ 326{
343 unsigned long want_v; 327 unsigned long want_v;
344 unsigned long lpar_rc; 328 unsigned long lpar_rc;
345 unsigned long dummy1, dummy2; 329 unsigned long dummy1, dummy2;
346 330
347 pr_devel(" inval : slot=%lx, va=%016lx, psize: %d, local: %d\n", 331 pr_devel(" inval : slot=%lx, vpn=%016lx, psize: %d, local: %d\n",
348 slot, va, psize, local); 332 slot, vpn, psize, local);
349 333
350 want_v = hpte_encode_avpn(va, psize, ssize); 334 want_v = hpte_encode_avpn(vpn, psize, ssize);
351 lpar_rc = plpar_pte_remove(H_AVPN, slot, want_v, &dummy1, &dummy2); 335 lpar_rc = plpar_pte_remove(H_AVPN, slot, want_v, &dummy1, &dummy2);
352 if (lpar_rc == H_NOT_FOUND) 336 if (lpar_rc == H_NOT_FOUND)
353 return; 337 return;
@@ -358,15 +342,16 @@ static void pSeries_lpar_hpte_invalidate(unsigned long slot, unsigned long va,
358static void pSeries_lpar_hpte_removebolted(unsigned long ea, 342static void pSeries_lpar_hpte_removebolted(unsigned long ea,
359 int psize, int ssize) 343 int psize, int ssize)
360{ 344{
361 unsigned long slot, vsid, va; 345 unsigned long vpn;
346 unsigned long slot, vsid;
362 347
363 vsid = get_kernel_vsid(ea, ssize); 348 vsid = get_kernel_vsid(ea, ssize);
364 va = hpt_va(ea, vsid, ssize); 349 vpn = hpt_vpn(ea, vsid, ssize);
365 350
366 slot = pSeries_lpar_hpte_find(va, psize, ssize); 351 slot = pSeries_lpar_hpte_find(vpn, psize, ssize);
367 BUG_ON(slot == -1); 352 BUG_ON(slot == -1);
368 353
369 pSeries_lpar_hpte_invalidate(slot, va, psize, ssize, 0); 354 pSeries_lpar_hpte_invalidate(slot, vpn, psize, ssize, 0);
370} 355}
371 356
372/* Flag bits for H_BULK_REMOVE */ 357/* Flag bits for H_BULK_REMOVE */
@@ -382,12 +367,12 @@ static void pSeries_lpar_hpte_removebolted(unsigned long ea,
382 */ 367 */
383static void pSeries_lpar_flush_hash_range(unsigned long number, int local) 368static void pSeries_lpar_flush_hash_range(unsigned long number, int local)
384{ 369{
370 unsigned long vpn;
385 unsigned long i, pix, rc; 371 unsigned long i, pix, rc;
386 unsigned long flags = 0; 372 unsigned long flags = 0;
387 struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); 373 struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch);
388 int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE); 374 int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
389 unsigned long param[9]; 375 unsigned long param[9];
390 unsigned long va;
391 unsigned long hash, index, shift, hidx, slot; 376 unsigned long hash, index, shift, hidx, slot;
392 real_pte_t pte; 377 real_pte_t pte;
393 int psize, ssize; 378 int psize, ssize;
@@ -399,21 +384,21 @@ static void pSeries_lpar_flush_hash_range(unsigned long number, int local)
399 ssize = batch->ssize; 384 ssize = batch->ssize;
400 pix = 0; 385 pix = 0;
401 for (i = 0; i < number; i++) { 386 for (i = 0; i < number; i++) {
402 va = batch->vaddr[i]; 387 vpn = batch->vpn[i];
403 pte = batch->pte[i]; 388 pte = batch->pte[i];
404 pte_iterate_hashed_subpages(pte, psize, va, index, shift) { 389 pte_iterate_hashed_subpages(pte, psize, vpn, index, shift) {
405 hash = hpt_hash(va, shift, ssize); 390 hash = hpt_hash(vpn, shift, ssize);
406 hidx = __rpte_to_hidx(pte, index); 391 hidx = __rpte_to_hidx(pte, index);
407 if (hidx & _PTEIDX_SECONDARY) 392 if (hidx & _PTEIDX_SECONDARY)
408 hash = ~hash; 393 hash = ~hash;
409 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; 394 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
410 slot += hidx & _PTEIDX_GROUP_IX; 395 slot += hidx & _PTEIDX_GROUP_IX;
411 if (!firmware_has_feature(FW_FEATURE_BULK_REMOVE)) { 396 if (!firmware_has_feature(FW_FEATURE_BULK_REMOVE)) {
412 pSeries_lpar_hpte_invalidate(slot, va, psize, 397 pSeries_lpar_hpte_invalidate(slot, vpn, psize,
413 ssize, local); 398 ssize, local);
414 } else { 399 } else {
415 param[pix] = HBR_REQUEST | HBR_AVPN | slot; 400 param[pix] = HBR_REQUEST | HBR_AVPN | slot;
416 param[pix+1] = hpte_encode_avpn(va, psize, 401 param[pix+1] = hpte_encode_avpn(vpn, psize,
417 ssize); 402 ssize);
418 pix += 2; 403 pix += 2;
419 if (pix == 8) { 404 if (pix == 8) {
diff --git a/arch/powerpc/platforms/pseries/msi.c b/arch/powerpc/platforms/pseries/msi.c
index 109fdb75578..d19f4977c83 100644
--- a/arch/powerpc/platforms/pseries/msi.c
+++ b/arch/powerpc/platforms/pseries/msi.c
@@ -210,6 +210,7 @@ static struct device_node *find_pe_total_msi(struct pci_dev *dev, int *total)
210static struct device_node *find_pe_dn(struct pci_dev *dev, int *total) 210static struct device_node *find_pe_dn(struct pci_dev *dev, int *total)
211{ 211{
212 struct device_node *dn; 212 struct device_node *dn;
213 struct eeh_dev *edev;
213 214
214 /* Found our PE and assume 8 at that point. */ 215 /* Found our PE and assume 8 at that point. */
215 216
@@ -217,7 +218,10 @@ static struct device_node *find_pe_dn(struct pci_dev *dev, int *total)
217 if (!dn) 218 if (!dn)
218 return NULL; 219 return NULL;
219 220
220 dn = eeh_find_device_pe(dn); 221 /* Get the top level device in the PE */
222 edev = of_node_to_eeh_dev(dn);
223 edev = list_first_entry(&edev->pe->edevs, struct eeh_dev, list);
224 dn = eeh_dev_to_of_node(edev);
221 if (!dn) 225 if (!dn)
222 return NULL; 226 return NULL;
223 227
@@ -387,12 +391,13 @@ static int check_msix_entries(struct pci_dev *pdev)
387 return 0; 391 return 0;
388} 392}
389 393
390static int rtas_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type) 394static int rtas_setup_msi_irqs(struct pci_dev *pdev, int nvec_in, int type)
391{ 395{
392 struct pci_dn *pdn; 396 struct pci_dn *pdn;
393 int hwirq, virq, i, rc; 397 int hwirq, virq, i, rc;
394 struct msi_desc *entry; 398 struct msi_desc *entry;
395 struct msi_msg msg; 399 struct msi_msg msg;
400 int nvec = nvec_in;
396 401
397 pdn = get_pdn(pdev); 402 pdn = get_pdn(pdev);
398 if (!pdn) 403 if (!pdn)
@@ -402,10 +407,23 @@ static int rtas_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
402 return -EINVAL; 407 return -EINVAL;
403 408
404 /* 409 /*
410 * Firmware currently refuse any non power of two allocation
411 * so we round up if the quota will allow it.
412 */
413 if (type == PCI_CAP_ID_MSIX) {
414 int m = roundup_pow_of_two(nvec);
415 int quota = msi_quota_for_device(pdev, m);
416
417 if (quota >= m)
418 nvec = m;
419 }
420
421 /*
405 * Try the new more explicit firmware interface, if that fails fall 422 * Try the new more explicit firmware interface, if that fails fall
406 * back to the old interface. The old interface is known to never 423 * back to the old interface. The old interface is known to never
407 * return MSI-Xs. 424 * return MSI-Xs.
408 */ 425 */
426again:
409 if (type == PCI_CAP_ID_MSI) { 427 if (type == PCI_CAP_ID_MSI) {
410 rc = rtas_change_msi(pdn, RTAS_CHANGE_MSI_FN, nvec); 428 rc = rtas_change_msi(pdn, RTAS_CHANGE_MSI_FN, nvec);
411 429
@@ -417,6 +435,10 @@ static int rtas_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
417 rc = rtas_change_msi(pdn, RTAS_CHANGE_MSIX_FN, nvec); 435 rc = rtas_change_msi(pdn, RTAS_CHANGE_MSIX_FN, nvec);
418 436
419 if (rc != nvec) { 437 if (rc != nvec) {
438 if (nvec != nvec_in) {
439 nvec = nvec_in;
440 goto again;
441 }
420 pr_debug("rtas_msi: rtas_change_msi() failed\n"); 442 pr_debug("rtas_msi: rtas_change_msi() failed\n");
421 return rc; 443 return rc;
422 } 444 }
diff --git a/arch/powerpc/platforms/pseries/pci.c b/arch/powerpc/platforms/pseries/pci.c
index 2c6ded29f73..56b864d777e 100644
--- a/arch/powerpc/platforms/pseries/pci.c
+++ b/arch/powerpc/platforms/pseries/pci.c
@@ -73,7 +73,7 @@ void __init pSeries_final_fixup(void)
73{ 73{
74 pSeries_request_regions(); 74 pSeries_request_regions();
75 75
76 pci_addr_cache_build(); 76 eeh_addr_cache_build();
77} 77}
78 78
79/* 79/*
diff --git a/arch/powerpc/platforms/pseries/pci_dlpar.c b/arch/powerpc/platforms/pseries/pci_dlpar.c
index 3ccebc83dc0..261a577a3dd 100644
--- a/arch/powerpc/platforms/pseries/pci_dlpar.c
+++ b/arch/powerpc/platforms/pseries/pci_dlpar.c
@@ -65,27 +65,43 @@ pcibios_find_pci_bus(struct device_node *dn)
65EXPORT_SYMBOL_GPL(pcibios_find_pci_bus); 65EXPORT_SYMBOL_GPL(pcibios_find_pci_bus);
66 66
67/** 67/**
68 * pcibios_remove_pci_devices - remove all devices under this bus 68 * __pcibios_remove_pci_devices - remove all devices under this bus
69 * @bus: the indicated PCI bus
70 * @purge_pe: destroy the PE on removal of PCI devices
69 * 71 *
70 * Remove all of the PCI devices under this bus both from the 72 * Remove all of the PCI devices under this bus both from the
71 * linux pci device tree, and from the powerpc EEH address cache. 73 * linux pci device tree, and from the powerpc EEH address cache.
74 * By default, the corresponding PE will be destroied during the
75 * normal PCI hotplug path. For PCI hotplug during EEH recovery,
76 * the corresponding PE won't be destroied and deallocated.
72 */ 77 */
73void pcibios_remove_pci_devices(struct pci_bus *bus) 78void __pcibios_remove_pci_devices(struct pci_bus *bus, int purge_pe)
74{ 79{
75 struct pci_dev *dev, *tmp; 80 struct pci_dev *dev, *tmp;
76 struct pci_bus *child_bus; 81 struct pci_bus *child_bus;
77 82
78 /* First go down child busses */ 83 /* First go down child busses */
79 list_for_each_entry(child_bus, &bus->children, node) 84 list_for_each_entry(child_bus, &bus->children, node)
80 pcibios_remove_pci_devices(child_bus); 85 __pcibios_remove_pci_devices(child_bus, purge_pe);
81 86
82 pr_debug("PCI: Removing devices on bus %04x:%02x\n", 87 pr_debug("PCI: Removing devices on bus %04x:%02x\n",
83 pci_domain_nr(bus), bus->number); 88 pci_domain_nr(bus), bus->number);
84 list_for_each_entry_safe(dev, tmp, &bus->devices, bus_list) { 89 list_for_each_entry_safe(dev, tmp, &bus->devices, bus_list) {
85 pr_debug(" * Removing %s...\n", pci_name(dev)); 90 pr_debug(" * Removing %s...\n", pci_name(dev));
86 eeh_remove_bus_device(dev); 91 eeh_remove_bus_device(dev, purge_pe);
87 pci_stop_and_remove_bus_device(dev); 92 pci_stop_and_remove_bus_device(dev);
88 } 93 }
94}
95
96/**
97 * pcibios_remove_pci_devices - remove all devices under this bus
98 *
99 * Remove all of the PCI devices under this bus both from the
100 * linux pci device tree, and from the powerpc EEH address cache.
101 */
102void pcibios_remove_pci_devices(struct pci_bus *bus)
103{
104 __pcibios_remove_pci_devices(bus, 1);
89} 105}
90EXPORT_SYMBOL_GPL(pcibios_remove_pci_devices); 106EXPORT_SYMBOL_GPL(pcibios_remove_pci_devices);
91 107
diff --git a/arch/powerpc/platforms/pseries/processor_idle.c b/arch/powerpc/platforms/pseries/processor_idle.c
index 455760b1fe6..45d00e5fe14 100644
--- a/arch/powerpc/platforms/pseries/processor_idle.c
+++ b/arch/powerpc/platforms/pseries/processor_idle.c
@@ -33,13 +33,6 @@ static int max_idle_state = MAX_IDLE_STATE_COUNT - 1;
33static struct cpuidle_device __percpu *pseries_cpuidle_devices; 33static struct cpuidle_device __percpu *pseries_cpuidle_devices;
34static struct cpuidle_state *cpuidle_state_table; 34static struct cpuidle_state *cpuidle_state_table;
35 35
36void update_smt_snooze_delay(int snooze)
37{
38 struct cpuidle_driver *drv = cpuidle_get_driver();
39 if (drv)
40 drv->states[0].target_residency = snooze;
41}
42
43static inline void idle_loop_prolog(unsigned long *in_purr, ktime_t *kt_before) 36static inline void idle_loop_prolog(unsigned long *in_purr, ktime_t *kt_before)
44{ 37{
45 38
@@ -66,32 +59,22 @@ static int snooze_loop(struct cpuidle_device *dev,
66{ 59{
67 unsigned long in_purr; 60 unsigned long in_purr;
68 ktime_t kt_before; 61 ktime_t kt_before;
69 unsigned long start_snooze; 62 int cpu = dev->cpu;
70 long snooze = drv->states[0].target_residency;
71 63
72 idle_loop_prolog(&in_purr, &kt_before); 64 idle_loop_prolog(&in_purr, &kt_before);
65 local_irq_enable();
66 set_thread_flag(TIF_POLLING_NRFLAG);
73 67
74 if (snooze) { 68 while ((!need_resched()) && cpu_online(cpu)) {
75 start_snooze = get_tb() + snooze * tb_ticks_per_usec; 69 ppc64_runlatch_off();
76 local_irq_enable(); 70 HMT_low();
77 set_thread_flag(TIF_POLLING_NRFLAG); 71 HMT_very_low();
78
79 while ((snooze < 0) || (get_tb() < start_snooze)) {
80 if (need_resched() || cpu_is_offline(dev->cpu))
81 goto out;
82 ppc64_runlatch_off();
83 HMT_low();
84 HMT_very_low();
85 }
86
87 HMT_medium();
88 clear_thread_flag(TIF_POLLING_NRFLAG);
89 smp_mb();
90 local_irq_disable();
91 } 72 }
92 73
93out:
94 HMT_medium(); 74 HMT_medium();
75 clear_thread_flag(TIF_POLLING_NRFLAG);
76 smp_mb();
77
95 dev->last_residency = 78 dev->last_residency =
96 (int)idle_loop_epilog(in_purr, kt_before); 79 (int)idle_loop_epilog(in_purr, kt_before);
97 return index; 80 return index;
@@ -172,8 +155,8 @@ static struct cpuidle_state dedicated_states[MAX_IDLE_STATE_COUNT] = {
172 .name = "CEDE", 155 .name = "CEDE",
173 .desc = "CEDE", 156 .desc = "CEDE",
174 .flags = CPUIDLE_FLAG_TIME_VALID, 157 .flags = CPUIDLE_FLAG_TIME_VALID,
175 .exit_latency = 1, 158 .exit_latency = 10,
176 .target_residency = 10, 159 .target_residency = 100,
177 .enter = &dedicated_cede_loop }, 160 .enter = &dedicated_cede_loop },
178}; 161};
179 162
@@ -190,6 +173,23 @@ static struct cpuidle_state shared_states[MAX_IDLE_STATE_COUNT] = {
190 .enter = &shared_cede_loop }, 173 .enter = &shared_cede_loop },
191}; 174};
192 175
176void update_smt_snooze_delay(int cpu, int residency)
177{
178 struct cpuidle_driver *drv = cpuidle_get_driver();
179 struct cpuidle_device *dev = per_cpu(cpuidle_devices, cpu);
180
181 if (cpuidle_state_table != dedicated_states)
182 return;
183
184 if (residency < 0) {
185 /* Disable the Nap state on that cpu */
186 if (dev)
187 dev->states_usage[1].disable = 1;
188 } else
189 if (drv)
190 drv->states[1].target_residency = residency;
191}
192
193static int pseries_cpuidle_add_cpu_notifier(struct notifier_block *n, 193static int pseries_cpuidle_add_cpu_notifier(struct notifier_block *n,
194 unsigned long action, void *hcpu) 194 unsigned long action, void *hcpu)
195{ 195{
@@ -246,10 +246,6 @@ static int pseries_cpuidle_driver_init(void)
246 drv->states[drv->state_count] = /* structure copy */ 246 drv->states[drv->state_count] = /* structure copy */
247 cpuidle_state_table[idle_state]; 247 cpuidle_state_table[idle_state];
248 248
249 if (cpuidle_state_table == dedicated_states)
250 drv->states[drv->state_count].target_residency =
251 __get_cpu_var(smt_snooze_delay);
252
253 drv->state_count += 1; 249 drv->state_count += 1;
254 } 250 }
255 251
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index 51ecac920dd..e3cb7ae6165 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -388,10 +388,8 @@ static void __init pSeries_setup_arch(void)
388 388
389 /* Find and initialize PCI host bridges */ 389 /* Find and initialize PCI host bridges */
390 init_pci_config_tokens(); 390 init_pci_config_tokens();
391 eeh_pseries_init();
392 find_and_init_phbs(); 391 find_and_init_phbs();
393 pSeries_reconfig_notifier_register(&pci_dn_reconfig_nb); 392 pSeries_reconfig_notifier_register(&pci_dn_reconfig_nb);
394 eeh_init();
395 393
396 pSeries_nvram_init(); 394 pSeries_nvram_init();
397 395
@@ -416,16 +414,20 @@ static int __init pSeries_init_panel(void)
416} 414}
417machine_arch_initcall(pseries, pSeries_init_panel); 415machine_arch_initcall(pseries, pSeries_init_panel);
418 416
419static int pseries_set_dabr(unsigned long dabr) 417static int pseries_set_dabr(unsigned long dabr, unsigned long dabrx)
420{ 418{
421 return plpar_hcall_norets(H_SET_DABR, dabr); 419 return plpar_hcall_norets(H_SET_DABR, dabr);
422} 420}
423 421
424static int pseries_set_xdabr(unsigned long dabr) 422static int pseries_set_xdabr(unsigned long dabr, unsigned long dabrx)
425{ 423{
426 /* We want to catch accesses from kernel and userspace */ 424 /* Have to set at least one bit in the DABRX according to PAPR */
427 return plpar_hcall_norets(H_SET_XDABR, dabr, 425 if (dabrx == 0 && dabr == 0)
428 H_DABRX_KERNEL | H_DABRX_USER); 426 dabrx = DABRX_USER;
427 /* PAPR says we can only set kernel and user bits */
428 dabrx &= DABRX_KERNEL | DABRX_USER;
429
430 return plpar_hcall_norets(H_SET_XDABR, dabr, dabrx);
429} 431}
430 432
431#define CMO_CHARACTERISTICS_TOKEN 44 433#define CMO_CHARACTERISTICS_TOKEN 44
@@ -529,10 +531,10 @@ static void __init pSeries_init_early(void)
529 if (firmware_has_feature(FW_FEATURE_LPAR)) 531 if (firmware_has_feature(FW_FEATURE_LPAR))
530 hvc_vio_init_early(); 532 hvc_vio_init_early();
531#endif 533#endif
532 if (firmware_has_feature(FW_FEATURE_DABR)) 534 if (firmware_has_feature(FW_FEATURE_XDABR))
533 ppc_md.set_dabr = pseries_set_dabr;
534 else if (firmware_has_feature(FW_FEATURE_XDABR))
535 ppc_md.set_dabr = pseries_set_xdabr; 535 ppc_md.set_dabr = pseries_set_xdabr;
536 else if (firmware_has_feature(FW_FEATURE_DABR))
537 ppc_md.set_dabr = pseries_set_dabr;
536 538
537 pSeries_cmo_feature_init(); 539 pSeries_cmo_feature_init();
538 iommu_init_early_pSeries(); 540 iommu_init_early_pSeries();