aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/platforms
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/platforms')
-rw-r--r--arch/powerpc/platforms/40x/Kconfig8
-rw-r--r--arch/powerpc/platforms/40x/Makefile1
-rw-r--r--arch/powerpc/platforms/40x/hcu4.c61
-rw-r--r--arch/powerpc/platforms/512x/Kconfig1
-rw-r--r--arch/powerpc/platforms/82xx/km82xx.c4
-rw-r--r--arch/powerpc/platforms/83xx/Kconfig9
-rw-r--r--arch/powerpc/platforms/83xx/mcu_mpc8349emitx.c58
-rw-r--r--arch/powerpc/platforms/85xx/Kconfig30
-rw-r--r--arch/powerpc/platforms/85xx/Makefile3
-rw-r--r--arch/powerpc/platforms/85xx/p1022_ds.c11
-rw-r--r--arch/powerpc/platforms/85xx/p2041_rdb.c (renamed from arch/powerpc/platforms/85xx/p2040_rdb.c)18
-rw-r--r--arch/powerpc/platforms/85xx/p3060_qds.c77
-rw-r--r--arch/powerpc/platforms/85xx/sbc8560.c2
-rw-r--r--arch/powerpc/platforms/85xx/smp.c12
-rw-r--r--arch/powerpc/platforms/86xx/Kconfig1
-rw-r--r--arch/powerpc/platforms/Kconfig13
-rw-r--r--arch/powerpc/platforms/Kconfig.cputype4
-rw-r--r--arch/powerpc/platforms/Makefile1
-rw-r--r--arch/powerpc/platforms/cell/iommu.c21
-rw-r--r--arch/powerpc/platforms/powernv/Kconfig16
-rw-r--r--arch/powerpc/platforms/powernv/Makefile5
-rw-r--r--arch/powerpc/platforms/powernv/opal-nvram.c88
-rw-r--r--arch/powerpc/platforms/powernv/opal-rtc.c97
-rw-r--r--arch/powerpc/platforms/powernv/opal-takeover.S140
-rw-r--r--arch/powerpc/platforms/powernv/opal-wrappers.S101
-rw-r--r--arch/powerpc/platforms/powernv/opal.c322
-rw-r--r--arch/powerpc/platforms/powernv/pci-p5ioc2.c234
-rw-r--r--arch/powerpc/platforms/powernv/pci.c427
-rw-r--r--arch/powerpc/platforms/powernv/pci.h48
-rw-r--r--arch/powerpc/platforms/powernv/powernv.h16
-rw-r--r--arch/powerpc/platforms/powernv/setup.c196
-rw-r--r--arch/powerpc/platforms/powernv/smp.c182
-rw-r--r--arch/powerpc/platforms/ps3/Kconfig12
-rw-r--r--arch/powerpc/platforms/ps3/Makefile1
-rw-r--r--arch/powerpc/platforms/ps3/gelic_udbg.c273
-rw-r--r--arch/powerpc/platforms/ps3/system-bus.c7
-rw-r--r--arch/powerpc/platforms/pseries/Kconfig1
-rw-r--r--arch/powerpc/platforms/pseries/dlpar.c4
-rw-r--r--arch/powerpc/platforms/pseries/eeh.c2
-rw-r--r--arch/powerpc/platforms/pseries/iommu.c34
-rw-r--r--arch/powerpc/platforms/pseries/nvram.c171
-rw-r--r--arch/powerpc/platforms/wsp/Kconfig11
-rw-r--r--arch/powerpc/platforms/wsp/Makefile2
-rw-r--r--arch/powerpc/platforms/wsp/ics.c48
-rw-r--r--arch/powerpc/platforms/wsp/ics.h5
-rw-r--r--arch/powerpc/platforms/wsp/msi.c102
-rw-r--r--arch/powerpc/platforms/wsp/msi.h19
-rw-r--r--arch/powerpc/platforms/wsp/psr2.c4
-rw-r--r--arch/powerpc/platforms/wsp/wsp.h3
-rw-r--r--arch/powerpc/platforms/wsp/wsp_pci.c1133
-rw-r--r--arch/powerpc/platforms/wsp/wsp_pci.h268
51 files changed, 4180 insertions, 127 deletions
diff --git a/arch/powerpc/platforms/40x/Kconfig b/arch/powerpc/platforms/40x/Kconfig
index b5d87067a58b..8f9c3e245cff 100644
--- a/arch/powerpc/platforms/40x/Kconfig
+++ b/arch/powerpc/platforms/40x/Kconfig
@@ -32,14 +32,6 @@ config EP405
32 help 32 help
33 This option enables support for the EP405/EP405PC boards. 33 This option enables support for the EP405/EP405PC boards.
34 34
35config HCU4
36 bool "Hcu4"
37 depends on 40x
38 default n
39 select 405GPR
40 help
41 This option enables support for the Nestal Maschinen HCU4 board.
42
43config HOTFOOT 35config HOTFOOT
44 bool "Hotfoot" 36 bool "Hotfoot"
45 depends on 40x 37 depends on 40x
diff --git a/arch/powerpc/platforms/40x/Makefile b/arch/powerpc/platforms/40x/Makefile
index 56e89004c468..88c22de0c850 100644
--- a/arch/powerpc/platforms/40x/Makefile
+++ b/arch/powerpc/platforms/40x/Makefile
@@ -1,4 +1,3 @@
1obj-$(CONFIG_HCU4) += hcu4.o
2obj-$(CONFIG_WALNUT) += walnut.o 1obj-$(CONFIG_WALNUT) += walnut.o
3obj-$(CONFIG_XILINX_VIRTEX_GENERIC_BOARD) += virtex.o 2obj-$(CONFIG_XILINX_VIRTEX_GENERIC_BOARD) += virtex.o
4obj-$(CONFIG_EP405) += ep405.o 3obj-$(CONFIG_EP405) += ep405.o
diff --git a/arch/powerpc/platforms/40x/hcu4.c b/arch/powerpc/platforms/40x/hcu4.c
deleted file mode 100644
index 60b2afecab75..000000000000
--- a/arch/powerpc/platforms/40x/hcu4.c
+++ /dev/null
@@ -1,61 +0,0 @@
1/*
2 * Architecture- / platform-specific boot-time initialization code for
3 * IBM PowerPC 4xx based boards. Adapted from original
4 * code by Gary Thomas, Cort Dougan <cort@fsmlabs.com>, and Dan Malek
5 * <dan@net4x.com>.
6 *
7 * Copyright(c) 1999-2000 Grant Erickson <grant@lcse.umn.edu>
8 *
9 * Rewritten and ported to the merged powerpc tree:
10 * Copyright 2007 IBM Corporation
11 * Josh Boyer <jwboyer@linux.vnet.ibm.com>
12 *
13 * 2002 (c) MontaVista, Software, Inc. This file is licensed under
14 * the terms of the GNU General Public License version 2. This program
15 * is licensed "as is" without any warranty of any kind, whether express
16 * or implied.
17 */
18
19#include <linux/init.h>
20#include <linux/of_platform.h>
21
22#include <asm/machdep.h>
23#include <asm/prom.h>
24#include <asm/udbg.h>
25#include <asm/time.h>
26#include <asm/uic.h>
27#include <asm/ppc4xx.h>
28
29static __initdata struct of_device_id hcu4_of_bus[] = {
30 { .compatible = "ibm,plb3", },
31 { .compatible = "ibm,opb", },
32 { .compatible = "ibm,ebc", },
33 {},
34};
35
36static int __init hcu4_device_probe(void)
37{
38 of_platform_bus_probe(NULL, hcu4_of_bus, NULL);
39 return 0;
40}
41machine_device_initcall(hcu4, hcu4_device_probe);
42
43static int __init hcu4_probe(void)
44{
45 unsigned long root = of_get_flat_dt_root();
46
47 if (!of_flat_dt_is_compatible(root, "netstal,hcu4"))
48 return 0;
49
50 return 1;
51}
52
53define_machine(hcu4) {
54 .name = "HCU4",
55 .probe = hcu4_probe,
56 .progress = udbg_progress,
57 .init_IRQ = uic_init_tree,
58 .get_irq = uic_get_irq,
59 .restart = ppc4xx_reset_system,
60 .calibrate_decr = generic_calibrate_decr,
61};
diff --git a/arch/powerpc/platforms/512x/Kconfig b/arch/powerpc/platforms/512x/Kconfig
index 27b0651221d1..b3ebce1aec07 100644
--- a/arch/powerpc/platforms/512x/Kconfig
+++ b/arch/powerpc/platforms/512x/Kconfig
@@ -6,6 +6,7 @@ config PPC_MPC512x
6 select PPC_CLOCK 6 select PPC_CLOCK
7 select PPC_PCI_CHOICE 7 select PPC_PCI_CHOICE
8 select FSL_PCI if PCI 8 select FSL_PCI if PCI
9 select ARCH_WANT_OPTIONAL_GPIOLIB
9 10
10config MPC5121_ADS 11config MPC5121_ADS
11 bool "Freescale MPC5121E ADS" 12 bool "Freescale MPC5121E ADS"
diff --git a/arch/powerpc/platforms/82xx/km82xx.c b/arch/powerpc/platforms/82xx/km82xx.c
index 428c5e0a0e75..3661bcdc326a 100644
--- a/arch/powerpc/platforms/82xx/km82xx.c
+++ b/arch/powerpc/platforms/82xx/km82xx.c
@@ -49,6 +49,9 @@ struct cpm_pin {
49}; 49};
50 50
51static __initdata struct cpm_pin km82xx_pins[] = { 51static __initdata struct cpm_pin km82xx_pins[] = {
52 /* SMC1 */
53 {2, 4, CPM_PIN_INPUT | CPM_PIN_PRIMARY},
54 {2, 5, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY},
52 55
53 /* SMC2 */ 56 /* SMC2 */
54 {0, 8, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, 57 {0, 8, CPM_PIN_INPUT | CPM_PIN_PRIMARY},
@@ -137,6 +140,7 @@ static void __init init_ioports(void)
137 } 140 }
138 141
139 cpm2_smc_clk_setup(CPM_CLK_SMC2, CPM_BRG8); 142 cpm2_smc_clk_setup(CPM_CLK_SMC2, CPM_BRG8);
143 cpm2_smc_clk_setup(CPM_CLK_SMC1, CPM_BRG7);
140 cpm2_clk_setup(CPM_CLK_SCC1, CPM_CLK11, CPM_CLK_RX); 144 cpm2_clk_setup(CPM_CLK_SCC1, CPM_CLK11, CPM_CLK_RX);
141 cpm2_clk_setup(CPM_CLK_SCC1, CPM_CLK11, CPM_CLK_TX); 145 cpm2_clk_setup(CPM_CLK_SCC1, CPM_CLK11, CPM_CLK_TX);
142 cpm2_clk_setup(CPM_CLK_SCC3, CPM_CLK5, CPM_CLK_RTX); 146 cpm2_clk_setup(CPM_CLK_SCC3, CPM_CLK5, CPM_CLK_RTX);
diff --git a/arch/powerpc/platforms/83xx/Kconfig b/arch/powerpc/platforms/83xx/Kconfig
index 73f4135f3a1a..670a033264c0 100644
--- a/arch/powerpc/platforms/83xx/Kconfig
+++ b/arch/powerpc/platforms/83xx/Kconfig
@@ -114,18 +114,21 @@ config KMETER1
114 114
115endif 115endif
116 116
117# used for usb 117# used for usb & gpio
118config PPC_MPC831x 118config PPC_MPC831x
119 bool 119 bool
120 select ARCH_WANT_OPTIONAL_GPIOLIB
120 121
121# used for math-emu 122# used for math-emu
122config PPC_MPC832x 123config PPC_MPC832x
123 bool 124 bool
124 125
125# used for usb 126# used for usb & gpio
126config PPC_MPC834x 127config PPC_MPC834x
127 bool 128 bool
129 select ARCH_WANT_OPTIONAL_GPIOLIB
128 130
129# used for usb 131# used for usb & gpio
130config PPC_MPC837x 132config PPC_MPC837x
131 bool 133 bool
134 select ARCH_WANT_OPTIONAL_GPIOLIB
diff --git a/arch/powerpc/platforms/83xx/mcu_mpc8349emitx.c b/arch/powerpc/platforms/83xx/mcu_mpc8349emitx.c
index 70798ac911ef..ef6537b8ed33 100644
--- a/arch/powerpc/platforms/83xx/mcu_mpc8349emitx.c
+++ b/arch/powerpc/platforms/83xx/mcu_mpc8349emitx.c
@@ -21,6 +21,8 @@
21#include <linux/of.h> 21#include <linux/of.h>
22#include <linux/of_gpio.h> 22#include <linux/of_gpio.h>
23#include <linux/slab.h> 23#include <linux/slab.h>
24#include <linux/kthread.h>
25#include <linux/reboot.h>
24#include <asm/prom.h> 26#include <asm/prom.h>
25#include <asm/machdep.h> 27#include <asm/machdep.h>
26 28
@@ -30,6 +32,7 @@
30 */ 32 */
31#define MCU_REG_CTRL 0x20 33#define MCU_REG_CTRL 0x20
32#define MCU_CTRL_POFF 0x40 34#define MCU_CTRL_POFF 0x40
35#define MCU_CTRL_BTN 0x80
33 36
34#define MCU_NUM_GPIO 2 37#define MCU_NUM_GPIO 2
35 38
@@ -42,13 +45,55 @@ struct mcu {
42 45
43static struct mcu *glob_mcu; 46static struct mcu *glob_mcu;
44 47
48struct task_struct *shutdown_thread;
49static int shutdown_thread_fn(void *data)
50{
51 int ret;
52 struct mcu *mcu = glob_mcu;
53
54 while (!kthread_should_stop()) {
55 ret = i2c_smbus_read_byte_data(mcu->client, MCU_REG_CTRL);
56 if (ret < 0)
57 pr_err("MCU status reg read failed.\n");
58 mcu->reg_ctrl = ret;
59
60
61 if (mcu->reg_ctrl & MCU_CTRL_BTN) {
62 i2c_smbus_write_byte_data(mcu->client, MCU_REG_CTRL,
63 mcu->reg_ctrl & ~MCU_CTRL_BTN);
64
65 ctrl_alt_del();
66 }
67
68 set_current_state(TASK_INTERRUPTIBLE);
69 schedule_timeout(HZ);
70 }
71
72 return 0;
73}
74
75static ssize_t show_status(struct device *d,
76 struct device_attribute *attr, char *buf)
77{
78 int ret;
79 struct mcu *mcu = glob_mcu;
80
81 ret = i2c_smbus_read_byte_data(mcu->client, MCU_REG_CTRL);
82 if (ret < 0)
83 return -ENODEV;
84 mcu->reg_ctrl = ret;
85
86 return sprintf(buf, "%02x\n", ret);
87}
88static DEVICE_ATTR(status, S_IRUGO, show_status, NULL);
89
45static void mcu_power_off(void) 90static void mcu_power_off(void)
46{ 91{
47 struct mcu *mcu = glob_mcu; 92 struct mcu *mcu = glob_mcu;
48 93
49 pr_info("Sending power-off request to the MCU...\n"); 94 pr_info("Sending power-off request to the MCU...\n");
50 mutex_lock(&mcu->lock); 95 mutex_lock(&mcu->lock);
51 i2c_smbus_write_byte_data(glob_mcu->client, MCU_REG_CTRL, 96 i2c_smbus_write_byte_data(mcu->client, MCU_REG_CTRL,
52 mcu->reg_ctrl | MCU_CTRL_POFF); 97 mcu->reg_ctrl | MCU_CTRL_POFF);
53 mutex_unlock(&mcu->lock); 98 mutex_unlock(&mcu->lock);
54} 99}
@@ -130,6 +175,13 @@ static int __devinit mcu_probe(struct i2c_client *client,
130 dev_info(&client->dev, "will provide power-off service\n"); 175 dev_info(&client->dev, "will provide power-off service\n");
131 } 176 }
132 177
178 if (device_create_file(&client->dev, &dev_attr_status))
179 dev_err(&client->dev,
180 "couldn't create device file for status\n");
181
182 shutdown_thread = kthread_run(shutdown_thread_fn, NULL,
183 "mcu-i2c-shdn");
184
133 return 0; 185 return 0;
134err: 186err:
135 kfree(mcu); 187 kfree(mcu);
@@ -141,6 +193,10 @@ static int __devexit mcu_remove(struct i2c_client *client)
141 struct mcu *mcu = i2c_get_clientdata(client); 193 struct mcu *mcu = i2c_get_clientdata(client);
142 int ret; 194 int ret;
143 195
196 kthread_stop(shutdown_thread);
197
198 device_remove_file(&client->dev, &dev_attr_status);
199
144 if (glob_mcu == mcu) { 200 if (glob_mcu == mcu) {
145 ppc_md.power_off = NULL; 201 ppc_md.power_off = NULL;
146 glob_mcu = NULL; 202 glob_mcu = NULL;
diff --git a/arch/powerpc/platforms/85xx/Kconfig b/arch/powerpc/platforms/85xx/Kconfig
index 12f5932dadc9..45023e26aea3 100644
--- a/arch/powerpc/platforms/85xx/Kconfig
+++ b/arch/powerpc/platforms/85xx/Kconfig
@@ -171,17 +171,18 @@ config SBC8560
171 help 171 help
172 This option enables support for the Wind River SBC8560 board 172 This option enables support for the Wind River SBC8560 board
173 173
174config P2040_RDB 174config P2041_RDB
175 bool "Freescale P2040 RDB" 175 bool "Freescale P2041 RDB"
176 select DEFAULT_UIMAGE 176 select DEFAULT_UIMAGE
177 select PPC_E500MC 177 select PPC_E500MC
178 select PHYS_64BIT 178 select PHYS_64BIT
179 select SWIOTLB 179 select SWIOTLB
180 select MPC8xxx_GPIO 180 select ARCH_REQUIRE_GPIOLIB
181 select GPIO_MPC8XXX
181 select HAS_RAPIDIO 182 select HAS_RAPIDIO
182 select PPC_EPAPR_HV_PIC 183 select PPC_EPAPR_HV_PIC
183 help 184 help
184 This option enables support for the P2040 RDB board 185 This option enables support for the P2041 RDB board
185 186
186config P3041_DS 187config P3041_DS
187 bool "Freescale P3041 DS" 188 bool "Freescale P3041 DS"
@@ -189,19 +190,33 @@ config P3041_DS
189 select PPC_E500MC 190 select PPC_E500MC
190 select PHYS_64BIT 191 select PHYS_64BIT
191 select SWIOTLB 192 select SWIOTLB
192 select MPC8xxx_GPIO 193 select ARCH_REQUIRE_GPIOLIB
194 select GPIO_MPC8XXX
193 select HAS_RAPIDIO 195 select HAS_RAPIDIO
194 select PPC_EPAPR_HV_PIC 196 select PPC_EPAPR_HV_PIC
195 help 197 help
196 This option enables support for the P3041 DS board 198 This option enables support for the P3041 DS board
197 199
200config P3060_QDS
201 bool "Freescale P3060 QDS"
202 select DEFAULT_UIMAGE
203 select PPC_E500MC
204 select PHYS_64BIT
205 select SWIOTLB
206 select MPC8xxx_GPIO
207 select HAS_RAPIDIO
208 select PPC_EPAPR_HV_PIC
209 help
210 This option enables support for the P3060 QDS board
211
198config P4080_DS 212config P4080_DS
199 bool "Freescale P4080 DS" 213 bool "Freescale P4080 DS"
200 select DEFAULT_UIMAGE 214 select DEFAULT_UIMAGE
201 select PPC_E500MC 215 select PPC_E500MC
202 select PHYS_64BIT 216 select PHYS_64BIT
203 select SWIOTLB 217 select SWIOTLB
204 select MPC8xxx_GPIO 218 select ARCH_REQUIRE_GPIOLIB
219 select GPIO_MPC8XXX
205 select HAS_RAPIDIO 220 select HAS_RAPIDIO
206 select PPC_EPAPR_HV_PIC 221 select PPC_EPAPR_HV_PIC
207 help 222 help
@@ -216,7 +231,8 @@ config P5020_DS
216 select PPC_E500MC 231 select PPC_E500MC
217 select PHYS_64BIT 232 select PHYS_64BIT
218 select SWIOTLB 233 select SWIOTLB
219 select MPC8xxx_GPIO 234 select ARCH_REQUIRE_GPIOLIB
235 select GPIO_MPC8XXX
220 select HAS_RAPIDIO 236 select HAS_RAPIDIO
221 select PPC_EPAPR_HV_PIC 237 select PPC_EPAPR_HV_PIC
222 help 238 help
diff --git a/arch/powerpc/platforms/85xx/Makefile b/arch/powerpc/platforms/85xx/Makefile
index a971b32c5c0a..bc5acb95917a 100644
--- a/arch/powerpc/platforms/85xx/Makefile
+++ b/arch/powerpc/platforms/85xx/Makefile
@@ -13,8 +13,9 @@ obj-$(CONFIG_MPC85xx_RDB) += mpc85xx_rdb.o
13obj-$(CONFIG_P1010_RDB) += p1010rdb.o 13obj-$(CONFIG_P1010_RDB) += p1010rdb.o
14obj-$(CONFIG_P1022_DS) += p1022_ds.o 14obj-$(CONFIG_P1022_DS) += p1022_ds.o
15obj-$(CONFIG_P1023_RDS) += p1023_rds.o 15obj-$(CONFIG_P1023_RDS) += p1023_rds.o
16obj-$(CONFIG_P2040_RDB) += p2040_rdb.o corenet_ds.o 16obj-$(CONFIG_P2041_RDB) += p2041_rdb.o corenet_ds.o
17obj-$(CONFIG_P3041_DS) += p3041_ds.o corenet_ds.o 17obj-$(CONFIG_P3041_DS) += p3041_ds.o corenet_ds.o
18obj-$(CONFIG_P3060_QDS) += p3060_qds.o corenet_ds.o
18obj-$(CONFIG_P4080_DS) += p4080_ds.o corenet_ds.o 19obj-$(CONFIG_P4080_DS) += p4080_ds.o corenet_ds.o
19obj-$(CONFIG_P5020_DS) += p5020_ds.o corenet_ds.o 20obj-$(CONFIG_P5020_DS) += p5020_ds.o corenet_ds.o
20obj-$(CONFIG_STX_GP3) += stx_gp3.o 21obj-$(CONFIG_STX_GP3) += stx_gp3.o
diff --git a/arch/powerpc/platforms/85xx/p1022_ds.c b/arch/powerpc/platforms/85xx/p1022_ds.c
index c01c7277888c..fda15716fada 100644
--- a/arch/powerpc/platforms/85xx/p1022_ds.c
+++ b/arch/powerpc/platforms/85xx/p1022_ds.c
@@ -129,17 +129,20 @@ static void p1022ds_set_gamma_table(enum fsl_diu_monitor_port port,
129 */ 129 */
130static void p1022ds_set_monitor_port(enum fsl_diu_monitor_port port) 130static void p1022ds_set_monitor_port(enum fsl_diu_monitor_port port)
131{ 131{
132 struct device_node *pixis_node; 132 struct device_node *np;
133 void __iomem *pixis; 133 void __iomem *pixis;
134 u8 __iomem *brdcfg1; 134 u8 __iomem *brdcfg1;
135 135
136 pixis_node = of_find_compatible_node(NULL, NULL, "fsl,p1022ds-pixis"); 136 np = of_find_compatible_node(NULL, NULL, "fsl,p1022ds-fpga");
137 if (!pixis_node) { 137 if (!np)
138 /* older device trees used "fsl,p1022ds-pixis" */
139 np = of_find_compatible_node(NULL, NULL, "fsl,p1022ds-pixis");
140 if (!np) {
138 pr_err("p1022ds: missing ngPIXIS node\n"); 141 pr_err("p1022ds: missing ngPIXIS node\n");
139 return; 142 return;
140 } 143 }
141 144
142 pixis = of_iomap(pixis_node, 0); 145 pixis = of_iomap(np, 0);
143 if (!pixis) { 146 if (!pixis) {
144 pr_err("p1022ds: could not map ngPIXIS registers\n"); 147 pr_err("p1022ds: could not map ngPIXIS registers\n");
145 return; 148 return;
diff --git a/arch/powerpc/platforms/85xx/p2040_rdb.c b/arch/powerpc/platforms/85xx/p2041_rdb.c
index 32b56ac73dfb..eda6ed5683e1 100644
--- a/arch/powerpc/platforms/85xx/p2040_rdb.c
+++ b/arch/powerpc/platforms/85xx/p2041_rdb.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * P2040 RDB Setup 2 * P2041 RDB Setup
3 * 3 *
4 * Copyright 2011 Freescale Semiconductor Inc. 4 * Copyright 2011 Freescale Semiconductor Inc.
5 * 5 *
@@ -35,18 +35,18 @@
35/* 35/*
36 * Called very early, device-tree isn't unflattened 36 * Called very early, device-tree isn't unflattened
37 */ 37 */
38static int __init p2040_rdb_probe(void) 38static int __init p2041_rdb_probe(void)
39{ 39{
40 unsigned long root = of_get_flat_dt_root(); 40 unsigned long root = of_get_flat_dt_root();
41#ifdef CONFIG_SMP 41#ifdef CONFIG_SMP
42 extern struct smp_ops_t smp_85xx_ops; 42 extern struct smp_ops_t smp_85xx_ops;
43#endif 43#endif
44 44
45 if (of_flat_dt_is_compatible(root, "fsl,P2040RDB")) 45 if (of_flat_dt_is_compatible(root, "fsl,P2041RDB"))
46 return 1; 46 return 1;
47 47
48 /* Check if we're running under the Freescale hypervisor */ 48 /* Check if we're running under the Freescale hypervisor */
49 if (of_flat_dt_is_compatible(root, "fsl,P2040RDB-hv")) { 49 if (of_flat_dt_is_compatible(root, "fsl,P2041RDB-hv")) {
50 ppc_md.init_IRQ = ehv_pic_init; 50 ppc_md.init_IRQ = ehv_pic_init;
51 ppc_md.get_irq = ehv_pic_get_irq; 51 ppc_md.get_irq = ehv_pic_get_irq;
52 ppc_md.restart = fsl_hv_restart; 52 ppc_md.restart = fsl_hv_restart;
@@ -66,9 +66,9 @@ static int __init p2040_rdb_probe(void)
66 return 0; 66 return 0;
67} 67}
68 68
69define_machine(p2040_rdb) { 69define_machine(p2041_rdb) {
70 .name = "P2040 RDB", 70 .name = "P2041 RDB",
71 .probe = p2040_rdb_probe, 71 .probe = p2041_rdb_probe,
72 .setup_arch = corenet_ds_setup_arch, 72 .setup_arch = corenet_ds_setup_arch,
73 .init_IRQ = corenet_ds_pic_init, 73 .init_IRQ = corenet_ds_pic_init,
74#ifdef CONFIG_PCI 74#ifdef CONFIG_PCI
@@ -81,8 +81,8 @@ define_machine(p2040_rdb) {
81 .power_save = e500_idle, 81 .power_save = e500_idle,
82}; 82};
83 83
84machine_device_initcall(p2040_rdb, corenet_ds_publish_devices); 84machine_device_initcall(p2041_rdb, corenet_ds_publish_devices);
85 85
86#ifdef CONFIG_SWIOTLB 86#ifdef CONFIG_SWIOTLB
87machine_arch_initcall(p2040_rdb, swiotlb_setup_bus_notifier); 87machine_arch_initcall(p2041_rdb, swiotlb_setup_bus_notifier);
88#endif 88#endif
diff --git a/arch/powerpc/platforms/85xx/p3060_qds.c b/arch/powerpc/platforms/85xx/p3060_qds.c
new file mode 100644
index 000000000000..01dcf44871e9
--- /dev/null
+++ b/arch/powerpc/platforms/85xx/p3060_qds.c
@@ -0,0 +1,77 @@
1/*
2 * P3060 QDS Setup
3 *
4 * Copyright 2011 Freescale Semiconductor Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
10 */
11
12#include <linux/kernel.h>
13#include <linux/interrupt.h>
14#include <linux/phy.h>
15#include <asm/machdep.h>
16#include <asm/udbg.h>
17#include <asm/mpic.h>
18#include <linux/of_platform.h>
19#include <sysdev/fsl_soc.h>
20#include <sysdev/fsl_pci.h>
21#include <asm/ehv_pic.h>
22#include "corenet_ds.h"
23
24/*
25 * Called very early, device-tree isn't unflattened
26 */
27static int __init p3060_qds_probe(void)
28{
29 unsigned long root = of_get_flat_dt_root();
30#ifdef CONFIG_SMP
31 extern struct smp_ops_t smp_85xx_ops;
32#endif
33
34 if (of_flat_dt_is_compatible(root, "fsl,P3060QDS"))
35 return 1;
36
37 /* Check if we're running under the Freescale hypervisor */
38 if (of_flat_dt_is_compatible(root, "fsl,P3060QDS-hv")) {
39 ppc_md.init_IRQ = ehv_pic_init;
40 ppc_md.get_irq = ehv_pic_get_irq;
41 ppc_md.restart = fsl_hv_restart;
42 ppc_md.power_off = fsl_hv_halt;
43 ppc_md.halt = fsl_hv_halt;
44#ifdef CONFIG_SMP
45 /*
46 * Disable the timebase sync operations because we can't write
47 * to the timebase registers under the hypervisor.
48 */
49 smp_85xx_ops.give_timebase = NULL;
50 smp_85xx_ops.take_timebase = NULL;
51#endif
52 return 1;
53 }
54
55 return 0;
56}
57
58define_machine(p3060_qds) {
59 .name = "P3060 QDS",
60 .probe = p3060_qds_probe,
61 .setup_arch = corenet_ds_setup_arch,
62 .init_IRQ = corenet_ds_pic_init,
63#ifdef CONFIG_PCI
64 .pcibios_fixup_bus = fsl_pcibios_fixup_bus,
65#endif
66 .get_irq = mpic_get_coreint_irq,
67 .restart = fsl_rstcr_restart,
68 .calibrate_decr = generic_calibrate_decr,
69 .progress = udbg_progress,
70 .power_save = e500_idle,
71};
72
73machine_device_initcall(p3060_qds, declare_of_platform_devices);
74
75#ifdef CONFIG_SWIOTLB
76machine_arch_initcall(p3060_qds, swiotlb_setup_bus_notifier);
77#endif
diff --git a/arch/powerpc/platforms/85xx/sbc8560.c b/arch/powerpc/platforms/85xx/sbc8560.c
index 09ced7221750..cebd786dc334 100644
--- a/arch/powerpc/platforms/85xx/sbc8560.c
+++ b/arch/powerpc/platforms/85xx/sbc8560.c
@@ -283,7 +283,7 @@ static int __init sbc8560_bdrstcr_init(void)
283 283
284 of_address_to_resource(np, 0, &res); 284 of_address_to_resource(np, 0, &res);
285 285
286 printk(KERN_INFO "sbc8560: Found BRSTCR at i/o 0x%x\n", res.start); 286 printk(KERN_INFO "sbc8560: Found BRSTCR at %pR\n", &res);
287 287
288 brstcr = ioremap(res.start, resource_size(&res)); 288 brstcr = ioremap(res.start, resource_size(&res));
289 if(!brstcr) 289 if(!brstcr)
diff --git a/arch/powerpc/platforms/85xx/smp.c b/arch/powerpc/platforms/85xx/smp.c
index 5b9b901f6443..2df4785ffd4e 100644
--- a/arch/powerpc/platforms/85xx/smp.c
+++ b/arch/powerpc/platforms/85xx/smp.c
@@ -48,10 +48,11 @@ smp_85xx_kick_cpu(int nr)
48 const u64 *cpu_rel_addr; 48 const u64 *cpu_rel_addr;
49 __iomem u32 *bptr_vaddr; 49 __iomem u32 *bptr_vaddr;
50 struct device_node *np; 50 struct device_node *np;
51 int n = 0; 51 int n = 0, hw_cpu = get_hard_smp_processor_id(nr);
52 int ioremappable; 52 int ioremappable;
53 53
54 WARN_ON (nr < 0 || nr >= NR_CPUS); 54 WARN_ON(nr < 0 || nr >= NR_CPUS);
55 WARN_ON(hw_cpu < 0 || hw_cpu >= NR_CPUS);
55 56
56 pr_debug("smp_85xx_kick_cpu: kick CPU #%d\n", nr); 57 pr_debug("smp_85xx_kick_cpu: kick CPU #%d\n", nr);
57 58
@@ -79,7 +80,7 @@ smp_85xx_kick_cpu(int nr)
79 80
80 local_irq_save(flags); 81 local_irq_save(flags);
81 82
82 out_be32(bptr_vaddr + BOOT_ENTRY_PIR, nr); 83 out_be32(bptr_vaddr + BOOT_ENTRY_PIR, hw_cpu);
83#ifdef CONFIG_PPC32 84#ifdef CONFIG_PPC32
84 out_be32(bptr_vaddr + BOOT_ENTRY_ADDR_LOWER, __pa(__early_start)); 85 out_be32(bptr_vaddr + BOOT_ENTRY_ADDR_LOWER, __pa(__early_start));
85 86
@@ -88,7 +89,7 @@ smp_85xx_kick_cpu(int nr)
88 (ulong)(bptr_vaddr + SIZE_BOOT_ENTRY)); 89 (ulong)(bptr_vaddr + SIZE_BOOT_ENTRY));
89 90
90 /* Wait a bit for the CPU to ack. */ 91 /* Wait a bit for the CPU to ack. */
91 while ((__secondary_hold_acknowledge != nr) && (++n < 1000)) 92 while ((__secondary_hold_acknowledge != hw_cpu) && (++n < 1000))
92 mdelay(1); 93 mdelay(1);
93#else 94#else
94 smp_generic_kick_cpu(nr); 95 smp_generic_kick_cpu(nr);
@@ -206,7 +207,7 @@ static void mpc85xx_smp_machine_kexec(struct kimage *image)
206 if ( !timeout ) 207 if ( !timeout )
207 printk(KERN_ERR "Unable to bring down secondary cpu(s)"); 208 printk(KERN_ERR "Unable to bring down secondary cpu(s)");
208 209
209 for (i = 0; i < num_cpus; i++) 210 for_each_online_cpu(i)
210 { 211 {
211 if ( i == smp_processor_id() ) continue; 212 if ( i == smp_processor_id() ) continue;
212 mpic_reset_core(i); 213 mpic_reset_core(i);
@@ -243,6 +244,7 @@ void __init mpc85xx_smp_init(void)
243 * If left NULL, .message_pass defaults to 244 * If left NULL, .message_pass defaults to
244 * smp_muxed_ipi_message_pass 245 * smp_muxed_ipi_message_pass
245 */ 246 */
247 smp_85xx_ops.message_pass = NULL;
246 smp_85xx_ops.cause_ipi = doorbell_cause_ipi; 248 smp_85xx_ops.cause_ipi = doorbell_cause_ipi;
247 } 249 }
248 250
diff --git a/arch/powerpc/platforms/86xx/Kconfig b/arch/powerpc/platforms/86xx/Kconfig
index a0b5638c5dc8..8d6599d54ea6 100644
--- a/arch/powerpc/platforms/86xx/Kconfig
+++ b/arch/powerpc/platforms/86xx/Kconfig
@@ -4,6 +4,7 @@ menuconfig PPC_86xx
4 depends on 6xx 4 depends on 6xx
5 select FSL_SOC 5 select FSL_SOC
6 select ALTIVEC 6 select ALTIVEC
7 select ARCH_WANT_OPTIONAL_GPIOLIB
7 help 8 help
8 The Freescale E600 SoCs have 74xx cores. 9 The Freescale E600 SoCs have 74xx cores.
9 10
diff --git a/arch/powerpc/platforms/Kconfig b/arch/powerpc/platforms/Kconfig
index b9ba86191aed..e4588721ef34 100644
--- a/arch/powerpc/platforms/Kconfig
+++ b/arch/powerpc/platforms/Kconfig
@@ -1,5 +1,6 @@
1menu "Platform support" 1menu "Platform support"
2 2
3source "arch/powerpc/platforms/powernv/Kconfig"
3source "arch/powerpc/platforms/pseries/Kconfig" 4source "arch/powerpc/platforms/pseries/Kconfig"
4source "arch/powerpc/platforms/iseries/Kconfig" 5source "arch/powerpc/platforms/iseries/Kconfig"
5source "arch/powerpc/platforms/chrp/Kconfig" 6source "arch/powerpc/platforms/chrp/Kconfig"
@@ -333,16 +334,6 @@ config OF_RTC
333 334
334source "arch/powerpc/sysdev/bestcomm/Kconfig" 335source "arch/powerpc/sysdev/bestcomm/Kconfig"
335 336
336config MPC8xxx_GPIO
337 bool "MPC512x/MPC8xxx GPIO support"
338 depends on PPC_MPC512x || PPC_MPC831x || PPC_MPC834x || PPC_MPC837x || \
339 FSL_SOC_BOOKE || PPC_86xx
340 select GENERIC_GPIO
341 select ARCH_REQUIRE_GPIOLIB
342 help
343 Say Y here if you're going to use hardware that connects to the
344 MPC512x/831x/834x/837x/8572/8610 GPIOs.
345
346config SIMPLE_GPIO 337config SIMPLE_GPIO
347 bool "Support for simple, memory-mapped GPIO controllers" 338 bool "Support for simple, memory-mapped GPIO controllers"
348 depends on PPC 339 depends on PPC
@@ -355,7 +346,7 @@ config SIMPLE_GPIO
355 on-board peripherals. 346 on-board peripherals.
356 347
357config MCU_MPC8349EMITX 348config MCU_MPC8349EMITX
358 tristate "MPC8349E-mITX MCU driver" 349 bool "MPC8349E-mITX MCU driver"
359 depends on I2C && PPC_83xx 350 depends on I2C && PPC_83xx
360 select GENERIC_GPIO 351 select GENERIC_GPIO
361 select ARCH_REQUIRE_GPIOLIB 352 select ARCH_REQUIRE_GPIOLIB
diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype
index e06e39589a09..a85990c886e9 100644
--- a/arch/powerpc/platforms/Kconfig.cputype
+++ b/arch/powerpc/platforms/Kconfig.cputype
@@ -69,6 +69,7 @@ config PPC_BOOK3S_64
69 bool "Server processors" 69 bool "Server processors"
70 select PPC_FPU 70 select PPC_FPU
71 select PPC_HAVE_PMU_SUPPORT 71 select PPC_HAVE_PMU_SUPPORT
72 select SYS_SUPPORTS_HUGETLBFS
72 73
73config PPC_BOOK3E_64 74config PPC_BOOK3E_64
74 bool "Embedded processors" 75 bool "Embedded processors"
@@ -173,6 +174,7 @@ config BOOKE
173config FSL_BOOKE 174config FSL_BOOKE
174 bool 175 bool
175 depends on (E200 || E500) && PPC32 176 depends on (E200 || E500) && PPC32
177 select SYS_SUPPORTS_HUGETLBFS if PHYS_64BIT
176 default y 178 default y
177 179
178# this is for common code between PPC32 & PPC64 FSL BOOKE 180# this is for common code between PPC32 & PPC64 FSL BOOKE
@@ -296,7 +298,7 @@ config PPC_BOOK3E_MMU
296 298
297config PPC_MM_SLICES 299config PPC_MM_SLICES
298 bool 300 bool
299 default y if HUGETLB_PAGE || (PPC_STD_MMU_64 && PPC_64K_PAGES) 301 default y if (PPC64 && HUGETLB_PAGE) || (PPC_STD_MMU_64 && PPC_64K_PAGES)
300 default n 302 default n
301 303
302config VIRT_CPU_ACCOUNTING 304config VIRT_CPU_ACCOUNTING
diff --git a/arch/powerpc/platforms/Makefile b/arch/powerpc/platforms/Makefile
index 73e2116cfeed..2635a22bade2 100644
--- a/arch/powerpc/platforms/Makefile
+++ b/arch/powerpc/platforms/Makefile
@@ -14,6 +14,7 @@ obj-$(CONFIG_PPC_82xx) += 82xx/
14obj-$(CONFIG_PPC_83xx) += 83xx/ 14obj-$(CONFIG_PPC_83xx) += 83xx/
15obj-$(CONFIG_FSL_SOC_BOOKE) += 85xx/ 15obj-$(CONFIG_FSL_SOC_BOOKE) += 85xx/
16obj-$(CONFIG_PPC_86xx) += 86xx/ 16obj-$(CONFIG_PPC_86xx) += 86xx/
17obj-$(CONFIG_PPC_POWERNV) += powernv/
17obj-$(CONFIG_PPC_PSERIES) += pseries/ 18obj-$(CONFIG_PPC_PSERIES) += pseries/
18obj-$(CONFIG_PPC_ISERIES) += iseries/ 19obj-$(CONFIG_PPC_ISERIES) += iseries/
19obj-$(CONFIG_PPC_MAPLE) += maple/ 20obj-$(CONFIG_PPC_MAPLE) += maple/
diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c
index 26a067122a54..fc46fcac3921 100644
--- a/arch/powerpc/platforms/cell/iommu.c
+++ b/arch/powerpc/platforms/cell/iommu.c
@@ -1159,6 +1159,26 @@ static int __init setup_iommu_fixed(char *str)
1159} 1159}
1160__setup("iommu_fixed=", setup_iommu_fixed); 1160__setup("iommu_fixed=", setup_iommu_fixed);
1161 1161
1162static u64 cell_dma_get_required_mask(struct device *dev)
1163{
1164 struct dma_map_ops *dma_ops;
1165
1166 if (!dev->dma_mask)
1167 return 0;
1168
1169 if (!iommu_fixed_disabled &&
1170 cell_iommu_get_fixed_address(dev) != OF_BAD_ADDR)
1171 return DMA_BIT_MASK(64);
1172
1173 dma_ops = get_dma_ops(dev);
1174 if (dma_ops->get_required_mask)
1175 return dma_ops->get_required_mask(dev);
1176
1177 WARN_ONCE(1, "no get_required_mask in %p ops", dma_ops);
1178
1179 return DMA_BIT_MASK(64);
1180}
1181
1162static int __init cell_iommu_init(void) 1182static int __init cell_iommu_init(void)
1163{ 1183{
1164 struct device_node *np; 1184 struct device_node *np;
@@ -1175,6 +1195,7 @@ static int __init cell_iommu_init(void)
1175 1195
1176 /* Setup various ppc_md. callbacks */ 1196 /* Setup various ppc_md. callbacks */
1177 ppc_md.pci_dma_dev_setup = cell_pci_dma_dev_setup; 1197 ppc_md.pci_dma_dev_setup = cell_pci_dma_dev_setup;
1198 ppc_md.dma_get_required_mask = cell_dma_get_required_mask;
1178 ppc_md.tce_build = tce_build_cell; 1199 ppc_md.tce_build = tce_build_cell;
1179 ppc_md.tce_free = tce_free_cell; 1200 ppc_md.tce_free = tce_free_cell;
1180 1201
diff --git a/arch/powerpc/platforms/powernv/Kconfig b/arch/powerpc/platforms/powernv/Kconfig
new file mode 100644
index 000000000000..74fea5c21839
--- /dev/null
+++ b/arch/powerpc/platforms/powernv/Kconfig
@@ -0,0 +1,16 @@
1config PPC_POWERNV
2 depends on PPC64 && PPC_BOOK3S
3 bool "IBM PowerNV (Non-Virtualized) platform support"
4 select PPC_NATIVE
5 select PPC_XICS
6 select PPC_ICP_NATIVE
7 select PPC_P7_NAP
8 select PPC_PCI_CHOICE if EMBEDDED
9 default y
10
11config PPC_POWERNV_RTAS
12 depends on PPC_POWERNV
13 bool "Support for RTAS based PowerNV platforms such as BML"
14 default y
15 select PPC_ICS_RTAS
16 select PPC_RTAS
diff --git a/arch/powerpc/platforms/powernv/Makefile b/arch/powerpc/platforms/powernv/Makefile
new file mode 100644
index 000000000000..31853008b418
--- /dev/null
+++ b/arch/powerpc/platforms/powernv/Makefile
@@ -0,0 +1,5 @@
1obj-y += setup.o opal-takeover.o opal-wrappers.o opal.o
2obj-y += opal-rtc.o opal-nvram.o
3
4obj-$(CONFIG_SMP) += smp.o
5obj-$(CONFIG_PCI) += pci.o pci-p5ioc2.o
diff --git a/arch/powerpc/platforms/powernv/opal-nvram.c b/arch/powerpc/platforms/powernv/opal-nvram.c
new file mode 100644
index 000000000000..3f83e1ae26ac
--- /dev/null
+++ b/arch/powerpc/platforms/powernv/opal-nvram.c
@@ -0,0 +1,88 @@
1/*
2 * PowerNV nvram code.
3 *
4 * Copyright 2011 IBM Corp.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#define DEBUG
13
14#include <linux/kernel.h>
15#include <linux/init.h>
16#include <linux/of.h>
17
18#include <asm/opal.h>
19#include <asm/machdep.h>
20
21static unsigned int nvram_size;
22
23static ssize_t opal_nvram_size(void)
24{
25 return nvram_size;
26}
27
28static ssize_t opal_nvram_read(char *buf, size_t count, loff_t *index)
29{
30 s64 rc;
31 int off;
32
33 if (*index >= nvram_size)
34 return 0;
35 off = *index;
36 if ((off + count) > nvram_size)
37 count = nvram_size - off;
38 rc = opal_read_nvram(__pa(buf), count, off);
39 if (rc != OPAL_SUCCESS)
40 return -EIO;
41 *index += count;
42 return count;
43}
44
45static ssize_t opal_nvram_write(char *buf, size_t count, loff_t *index)
46{
47 s64 rc = OPAL_BUSY;
48 int off;
49
50 if (*index >= nvram_size)
51 return 0;
52 off = *index;
53 if ((off + count) > nvram_size)
54 count = nvram_size - off;
55
56 while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) {
57 rc = opal_write_nvram(__pa(buf), count, off);
58 if (rc == OPAL_BUSY_EVENT)
59 opal_poll_events(NULL);
60 }
61 *index += count;
62 return count;
63}
64
65void __init opal_nvram_init(void)
66{
67 struct device_node *np;
68 const u32 *nbytes_p;
69
70 np = of_find_compatible_node(NULL, NULL, "ibm,opal-nvram");
71 if (np == NULL)
72 return;
73
74 nbytes_p = of_get_property(np, "#bytes", NULL);
75 if (!nbytes_p) {
76 of_node_put(np);
77 return;
78 }
79 nvram_size = *nbytes_p;
80
81 printk(KERN_INFO "OPAL nvram setup, %u bytes\n", nvram_size);
82 of_node_put(np);
83
84 ppc_md.nvram_read = opal_nvram_read;
85 ppc_md.nvram_write = opal_nvram_write;
86 ppc_md.nvram_size = opal_nvram_size;
87}
88
diff --git a/arch/powerpc/platforms/powernv/opal-rtc.c b/arch/powerpc/platforms/powernv/opal-rtc.c
new file mode 100644
index 000000000000..2aa7641aac9b
--- /dev/null
+++ b/arch/powerpc/platforms/powernv/opal-rtc.c
@@ -0,0 +1,97 @@
1/*
2 * PowerNV Real Time Clock.
3 *
4 * Copyright 2011 IBM Corp.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12
13#include <linux/kernel.h>
14#include <linux/time.h>
15#include <linux/bcd.h>
16#include <linux/rtc.h>
17#include <linux/delay.h>
18
19#include <asm/opal.h>
20#include <asm/firmware.h>
21
22static void opal_to_tm(u32 y_m_d, u64 h_m_s_ms, struct rtc_time *tm)
23{
24 tm->tm_year = ((bcd2bin(y_m_d >> 24) * 100) +
25 bcd2bin((y_m_d >> 16) & 0xff)) - 1900;
26 tm->tm_mon = bcd2bin((y_m_d >> 8) & 0xff) - 1;
27 tm->tm_mday = bcd2bin(y_m_d & 0xff);
28 tm->tm_hour = bcd2bin((h_m_s_ms >> 56) & 0xff);
29 tm->tm_min = bcd2bin((h_m_s_ms >> 48) & 0xff);
30 tm->tm_sec = bcd2bin((h_m_s_ms >> 40) & 0xff);
31
32 GregorianDay(tm);
33}
34
35unsigned long __init opal_get_boot_time(void)
36{
37 struct rtc_time tm;
38 u32 y_m_d;
39 u64 h_m_s_ms;
40 long rc = OPAL_BUSY;
41
42 while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) {
43 rc = opal_rtc_read(&y_m_d, &h_m_s_ms);
44 if (rc == OPAL_BUSY_EVENT)
45 opal_poll_events(NULL);
46 else
47 mdelay(10);
48 }
49 if (rc != OPAL_SUCCESS)
50 return 0;
51 opal_to_tm(y_m_d, h_m_s_ms, &tm);
52 return mktime(tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday,
53 tm.tm_hour, tm.tm_min, tm.tm_sec);
54}
55
56void opal_get_rtc_time(struct rtc_time *tm)
57{
58 long rc = OPAL_BUSY;
59 u32 y_m_d;
60 u64 h_m_s_ms;
61
62 while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) {
63 rc = opal_rtc_read(&y_m_d, &h_m_s_ms);
64 if (rc == OPAL_BUSY_EVENT)
65 opal_poll_events(NULL);
66 else
67 mdelay(10);
68 }
69 if (rc != OPAL_SUCCESS)
70 return;
71 opal_to_tm(y_m_d, h_m_s_ms, tm);
72}
73
74int opal_set_rtc_time(struct rtc_time *tm)
75{
76 long rc = OPAL_BUSY;
77 u32 y_m_d = 0;
78 u64 h_m_s_ms = 0;
79
80 y_m_d |= ((u32)bin2bcd((tm->tm_year + 1900) / 100)) << 24;
81 y_m_d |= ((u32)bin2bcd((tm->tm_year + 1900) % 100)) << 16;
82 y_m_d |= ((u32)bin2bcd((tm->tm_mon + 1))) << 8;
83 y_m_d |= ((u32)bin2bcd(tm->tm_mday));
84
85 h_m_s_ms |= ((u64)bin2bcd(tm->tm_hour)) << 56;
86 h_m_s_ms |= ((u64)bin2bcd(tm->tm_min)) << 48;
87 h_m_s_ms |= ((u64)bin2bcd(tm->tm_sec)) << 40;
88
89 while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) {
90 rc = opal_rtc_write(y_m_d, h_m_s_ms);
91 if (rc == OPAL_BUSY_EVENT)
92 opal_poll_events(NULL);
93 else
94 mdelay(10);
95 }
96 return rc == OPAL_SUCCESS ? 0 : -EIO;
97}
diff --git a/arch/powerpc/platforms/powernv/opal-takeover.S b/arch/powerpc/platforms/powernv/opal-takeover.S
new file mode 100644
index 000000000000..77b48b2b9309
--- /dev/null
+++ b/arch/powerpc/platforms/powernv/opal-takeover.S
@@ -0,0 +1,140 @@
1/*
2 * PowerNV OPAL takeover assembly code, for use by prom_init.c
3 *
4 * Copyright 2011 IBM Corp.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#include <asm/ppc_asm.h>
13#include <asm/hvcall.h>
14#include <asm/asm-offsets.h>
15#include <asm/opal.h>
16
17#define STK_PARAM(i) (48 + ((i)-3)*8)
18
19#define H_HAL_TAKEOVER 0x5124
20#define H_HAL_TAKEOVER_QUERY_MAGIC -1
21
22 .text
23_GLOBAL(opal_query_takeover)
24 mfcr r0
25 stw r0,8(r1)
26 std r3,STK_PARAM(r3)(r1)
27 std r4,STK_PARAM(r4)(r1)
28 li r3,H_HAL_TAKEOVER
29 li r4,H_HAL_TAKEOVER_QUERY_MAGIC
30 HVSC
31 ld r10,STK_PARAM(r3)(r1)
32 std r4,0(r10)
33 ld r10,STK_PARAM(r4)(r1)
34 std r5,0(r10)
35 lwz r0,8(r1)
36 mtcrf 0xff,r0
37 blr
38
39_GLOBAL(opal_do_takeover)
40 mfcr r0
41 stw r0,8(r1)
42 mflr r0
43 std r0,16(r1)
44 bl __opal_do_takeover
45 ld r0,16(r1)
46 mtlr r0
47 lwz r0,8(r1)
48 mtcrf 0xff,r0
49 blr
50
51__opal_do_takeover:
52 ld r4,0(r3)
53 ld r5,0x8(r3)
54 ld r6,0x10(r3)
55 ld r7,0x18(r3)
56 ld r8,0x20(r3)
57 ld r9,0x28(r3)
58 ld r10,0x30(r3)
59 ld r11,0x38(r3)
60 li r3,H_HAL_TAKEOVER
61 HVSC
62 blr
63
64 .globl opal_secondary_entry
65opal_secondary_entry:
66 mr r31,r3
67 mfmsr r11
68 li r12,(MSR_SF | MSR_ISF)@highest
69 sldi r12,r12,48
70 or r11,r11,r12
71 mtmsrd r11
72 isync
73 mfspr r4,SPRN_PIR
74 std r4,0(r3)
751: HMT_LOW
76 ld r4,8(r3)
77 cmpli cr0,r4,0
78 beq 1b
79 HMT_MEDIUM
801: addi r3,r31,16
81 bl __opal_do_takeover
82 b 1b
83
84_GLOBAL(opal_enter_rtas)
85 mflr r0
86 std r0,16(r1)
87 stdu r1,-PROM_FRAME_SIZE(r1) /* Save SP and create stack space */
88
89 /* Because PROM is running in 32b mode, it clobbers the high order half
90 * of all registers that it saves. We therefore save those registers
91 * PROM might touch to the stack. (r0, r3-r13 are caller saved)
92 */
93 SAVE_GPR(2, r1)
94 SAVE_GPR(13, r1)
95 SAVE_8GPRS(14, r1)
96 SAVE_10GPRS(22, r1)
97 mfcr r10
98 mfmsr r11
99 std r10,_CCR(r1)
100 std r11,_MSR(r1)
101
102 /* Get the PROM entrypoint */
103 mtlr r5
104
105 /* Switch MSR to 32 bits mode
106 */
107 li r12,1
108 rldicr r12,r12,MSR_SF_LG,(63-MSR_SF_LG)
109 andc r11,r11,r12
110 li r12,1
111 rldicr r12,r12,MSR_ISF_LG,(63-MSR_ISF_LG)
112 andc r11,r11,r12
113 mtmsrd r11
114 isync
115
116 /* Enter RTAS here... */
117 blrl
118
119 /* Just make sure that r1 top 32 bits didn't get
120 * corrupt by OF
121 */
122 rldicl r1,r1,0,32
123
124 /* Restore the MSR (back to 64 bits) */
125 ld r0,_MSR(r1)
126 MTMSRD(r0)
127 isync
128
129 /* Restore other registers */
130 REST_GPR(2, r1)
131 REST_GPR(13, r1)
132 REST_8GPRS(14, r1)
133 REST_10GPRS(22, r1)
134 ld r4,_CCR(r1)
135 mtcr r4
136
137 addi r1,r1,PROM_FRAME_SIZE
138 ld r0,16(r1)
139 mtlr r0
140 blr
diff --git a/arch/powerpc/platforms/powernv/opal-wrappers.S b/arch/powerpc/platforms/powernv/opal-wrappers.S
new file mode 100644
index 000000000000..4a3f46d8533e
--- /dev/null
+++ b/arch/powerpc/platforms/powernv/opal-wrappers.S
@@ -0,0 +1,101 @@
1/*
2 * PowerNV OPAL API wrappers
3 *
4 * Copyright 2011 IBM Corp.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#include <asm/ppc_asm.h>
13#include <asm/hvcall.h>
14#include <asm/asm-offsets.h>
15#include <asm/opal.h>
16
17/* TODO:
18 *
19 * - Trace irqs in/off (needs saving/restoring all args, argh...)
20 * - Get r11 feed up by Dave so I can have better register usage
21 */
22#define OPAL_CALL(name, token) \
23 _GLOBAL(name); \
24 mflr r0; \
25 mfcr r12; \
26 std r0,16(r1); \
27 std r12,8(r1); \
28 std r1,PACAR1(r13); \
29 li r0,0; \
30 mfmsr r12; \
31 ori r0,r0,MSR_EE; \
32 std r12,PACASAVEDMSR(r13); \
33 andc r12,r12,r0; \
34 mtmsrd r12,1; \
35 LOAD_REG_ADDR(r0,.opal_return); \
36 mtlr r0; \
37 li r0,MSR_DR|MSR_IR; \
38 andc r12,r12,r0; \
39 li r0,token; \
40 mtspr SPRN_HSRR1,r12; \
41 LOAD_REG_ADDR(r11,opal); \
42 ld r12,8(r11); \
43 ld r2,0(r11); \
44 mtspr SPRN_HSRR0,r12; \
45 hrfid
46
47_STATIC(opal_return)
48 ld r2,PACATOC(r13);
49 ld r4,8(r1);
50 ld r5,16(r1);
51 ld r6,PACASAVEDMSR(r13);
52 mtspr SPRN_SRR0,r5;
53 mtspr SPRN_SRR1,r6;
54 mtcr r4;
55 rfid
56
57OPAL_CALL(opal_console_write, OPAL_CONSOLE_WRITE);
58OPAL_CALL(opal_console_read, OPAL_CONSOLE_READ);
59OPAL_CALL(opal_console_write_buffer_space, OPAL_CONSOLE_WRITE_BUFFER_SPACE);
60OPAL_CALL(opal_rtc_read, OPAL_RTC_READ);
61OPAL_CALL(opal_rtc_write, OPAL_RTC_WRITE);
62OPAL_CALL(opal_cec_power_down, OPAL_CEC_POWER_DOWN);
63OPAL_CALL(opal_cec_reboot, OPAL_CEC_REBOOT);
64OPAL_CALL(opal_read_nvram, OPAL_READ_NVRAM);
65OPAL_CALL(opal_write_nvram, OPAL_WRITE_NVRAM);
66OPAL_CALL(opal_handle_interrupt, OPAL_HANDLE_INTERRUPT);
67OPAL_CALL(opal_poll_events, OPAL_POLL_EVENTS);
68OPAL_CALL(opal_pci_set_hub_tce_memory, OPAL_PCI_SET_HUB_TCE_MEMORY);
69OPAL_CALL(opal_pci_set_phb_tce_memory, OPAL_PCI_SET_PHB_TCE_MEMORY);
70OPAL_CALL(opal_pci_config_read_byte, OPAL_PCI_CONFIG_READ_BYTE);
71OPAL_CALL(opal_pci_config_read_half_word, OPAL_PCI_CONFIG_READ_HALF_WORD);
72OPAL_CALL(opal_pci_config_read_word, OPAL_PCI_CONFIG_READ_WORD);
73OPAL_CALL(opal_pci_config_write_byte, OPAL_PCI_CONFIG_WRITE_BYTE);
74OPAL_CALL(opal_pci_config_write_half_word, OPAL_PCI_CONFIG_WRITE_HALF_WORD);
75OPAL_CALL(opal_pci_config_write_word, OPAL_PCI_CONFIG_WRITE_WORD);
76OPAL_CALL(opal_set_xive, OPAL_SET_XIVE);
77OPAL_CALL(opal_get_xive, OPAL_GET_XIVE);
78OPAL_CALL(opal_register_exception_handler, OPAL_REGISTER_OPAL_EXCEPTION_HANDLER);
79OPAL_CALL(opal_pci_eeh_freeze_status, OPAL_PCI_EEH_FREEZE_STATUS);
80OPAL_CALL(opal_pci_eeh_freeze_clear, OPAL_PCI_EEH_FREEZE_CLEAR);
81OPAL_CALL(opal_pci_shpc, OPAL_PCI_SHPC);
82OPAL_CALL(opal_pci_phb_mmio_enable, OPAL_PCI_PHB_MMIO_ENABLE);
83OPAL_CALL(opal_pci_set_phb_mem_window, OPAL_PCI_SET_PHB_MEM_WINDOW);
84OPAL_CALL(opal_pci_map_pe_mmio_window, OPAL_PCI_MAP_PE_MMIO_WINDOW);
85OPAL_CALL(opal_pci_set_phb_table_memory, OPAL_PCI_SET_PHB_TABLE_MEMORY);
86OPAL_CALL(opal_pci_set_pe, OPAL_PCI_SET_PE);
87OPAL_CALL(opal_pci_set_peltv, OPAL_PCI_SET_PELTV);
88OPAL_CALL(opal_pci_set_mve, OPAL_PCI_SET_MVE);
89OPAL_CALL(opal_pci_set_mve_enable, OPAL_PCI_SET_MVE_ENABLE);
90OPAL_CALL(opal_pci_get_xive_reissue, OPAL_PCI_GET_XIVE_REISSUE);
91OPAL_CALL(opal_pci_set_xive_reissue, OPAL_PCI_SET_XIVE_REISSUE);
92OPAL_CALL(opal_pci_set_xive_pe, OPAL_PCI_SET_XIVE_PE);
93OPAL_CALL(opal_get_xive_source, OPAL_GET_XIVE_SOURCE);
94OPAL_CALL(opal_get_msi_32, OPAL_GET_MSI_32);
95OPAL_CALL(opal_get_msi_64, OPAL_GET_MSI_64);
96OPAL_CALL(opal_start_cpu, OPAL_START_CPU);
97OPAL_CALL(opal_query_cpu_status, OPAL_QUERY_CPU_STATUS);
98OPAL_CALL(opal_write_oppanel, OPAL_WRITE_OPPANEL);
99OPAL_CALL(opal_pci_map_pe_dma_window, OPAL_PCI_MAP_PE_DMA_WINDOW);
100OPAL_CALL(opal_pci_map_pe_dma_window_real, OPAL_PCI_MAP_PE_DMA_WINDOW_REAL);
101OPAL_CALL(opal_pci_reset, OPAL_PCI_RESET);
diff --git a/arch/powerpc/platforms/powernv/opal.c b/arch/powerpc/platforms/powernv/opal.c
new file mode 100644
index 000000000000..aaa0dba49471
--- /dev/null
+++ b/arch/powerpc/platforms/powernv/opal.c
@@ -0,0 +1,322 @@
1/*
2 * PowerNV OPAL high level interfaces
3 *
4 * Copyright 2011 IBM Corp.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#undef DEBUG
13
14#include <linux/types.h>
15#include <linux/of.h>
16#include <linux/of_platform.h>
17#include <linux/interrupt.h>
18#include <asm/opal.h>
19#include <asm/firmware.h>
20
21#include "powernv.h"
22
23struct opal {
24 u64 base;
25 u64 entry;
26} opal;
27
28static struct device_node *opal_node;
29static DEFINE_SPINLOCK(opal_write_lock);
30extern u64 opal_mc_secondary_handler[];
31
32int __init early_init_dt_scan_opal(unsigned long node,
33 const char *uname, int depth, void *data)
34{
35 const void *basep, *entryp;
36 unsigned long basesz, entrysz;
37 u64 glue;
38
39 if (depth != 1 || strcmp(uname, "ibm,opal") != 0)
40 return 0;
41
42 basep = of_get_flat_dt_prop(node, "opal-base-address", &basesz);
43 entryp = of_get_flat_dt_prop(node, "opal-entry-address", &entrysz);
44
45 if (!basep || !entryp)
46 return 1;
47
48 opal.base = of_read_number(basep, basesz/4);
49 opal.entry = of_read_number(entryp, entrysz/4);
50
51 pr_debug("OPAL Base = 0x%llx (basep=%p basesz=%ld)\n",
52 opal.base, basep, basesz);
53 pr_debug("OPAL Entry = 0x%llx (entryp=%p basesz=%ld)\n",
54 opal.entry, entryp, entrysz);
55
56 powerpc_firmware_features |= FW_FEATURE_OPAL;
57 if (of_flat_dt_is_compatible(node, "ibm,opal-v2")) {
58 powerpc_firmware_features |= FW_FEATURE_OPALv2;
59 printk("OPAL V2 detected !\n");
60 } else {
61 printk("OPAL V1 detected !\n");
62 }
63
64 /* Hookup some exception handlers. We use the fwnmi area at 0x7000
65 * to provide the glue space to OPAL
66 */
67 glue = 0x7000;
68 opal_register_exception_handler(OPAL_MACHINE_CHECK_HANDLER,
69 __pa(opal_mc_secondary_handler[0]),
70 glue);
71 glue += 128;
72 opal_register_exception_handler(OPAL_HYPERVISOR_MAINTENANCE_HANDLER,
73 0, glue);
74 glue += 128;
75 opal_register_exception_handler(OPAL_SOFTPATCH_HANDLER, 0, glue);
76
77 return 1;
78}
79
80int opal_get_chars(uint32_t vtermno, char *buf, int count)
81{
82 s64 len, rc;
83 u64 evt;
84
85 if (!opal.entry)
86 return -ENODEV;
87 opal_poll_events(&evt);
88 if ((evt & OPAL_EVENT_CONSOLE_INPUT) == 0)
89 return 0;
90 len = count;
91 rc = opal_console_read(vtermno, &len, buf);
92 if (rc == OPAL_SUCCESS)
93 return len;
94 return 0;
95}
96
97int opal_put_chars(uint32_t vtermno, const char *data, int total_len)
98{
99 int written = 0;
100 s64 len, rc;
101 unsigned long flags;
102 u64 evt;
103
104 if (!opal.entry)
105 return -ENODEV;
106
107 /* We want put_chars to be atomic to avoid mangling of hvsi
108 * packets. To do that, we first test for room and return
109 * -EAGAIN if there isn't enough.
110 *
111 * Unfortunately, opal_console_write_buffer_space() doesn't
112 * appear to work on opal v1, so we just assume there is
113 * enough room and be done with it
114 */
115 spin_lock_irqsave(&opal_write_lock, flags);
116 if (firmware_has_feature(FW_FEATURE_OPALv2)) {
117 rc = opal_console_write_buffer_space(vtermno, &len);
118 if (rc || len < total_len) {
119 spin_unlock_irqrestore(&opal_write_lock, flags);
120 /* Closed -> drop characters */
121 if (rc)
122 return total_len;
123 opal_poll_events(&evt);
124 return -EAGAIN;
125 }
126 }
127
128 /* We still try to handle partial completions, though they
129 * should no longer happen.
130 */
131 rc = OPAL_BUSY;
132 while(total_len > 0 && (rc == OPAL_BUSY ||
133 rc == OPAL_BUSY_EVENT || rc == OPAL_SUCCESS)) {
134 len = total_len;
135 rc = opal_console_write(vtermno, &len, data);
136 if (rc == OPAL_SUCCESS) {
137 total_len -= len;
138 data += len;
139 written += len;
140 }
141 /* This is a bit nasty but we need that for the console to
142 * flush when there aren't any interrupts. We will clean
143 * things a bit later to limit that to synchronous path
144 * such as the kernel console and xmon/udbg
145 */
146 do
147 opal_poll_events(&evt);
148 while(rc == OPAL_SUCCESS && (evt & OPAL_EVENT_CONSOLE_OUTPUT));
149 }
150 spin_unlock_irqrestore(&opal_write_lock, flags);
151 return written;
152}
153
154int opal_machine_check(struct pt_regs *regs)
155{
156 struct opal_machine_check_event *opal_evt = get_paca()->opal_mc_evt;
157 struct opal_machine_check_event evt;
158 const char *level, *sevstr, *subtype;
159 static const char *opal_mc_ue_types[] = {
160 "Indeterminate",
161 "Instruction fetch",
162 "Page table walk ifetch",
163 "Load/Store",
164 "Page table walk Load/Store",
165 };
166 static const char *opal_mc_slb_types[] = {
167 "Indeterminate",
168 "Parity",
169 "Multihit",
170 };
171 static const char *opal_mc_erat_types[] = {
172 "Indeterminate",
173 "Parity",
174 "Multihit",
175 };
176 static const char *opal_mc_tlb_types[] = {
177 "Indeterminate",
178 "Parity",
179 "Multihit",
180 };
181
182 /* Copy the event structure and release the original */
183 evt = *opal_evt;
184 opal_evt->in_use = 0;
185
186 /* Print things out */
187 if (evt.version != OpalMCE_V1) {
188 pr_err("Machine Check Exception, Unknown event version %d !\n",
189 evt.version);
190 return 0;
191 }
192 switch(evt.severity) {
193 case OpalMCE_SEV_NO_ERROR:
194 level = KERN_INFO;
195 sevstr = "Harmless";
196 break;
197 case OpalMCE_SEV_WARNING:
198 level = KERN_WARNING;
199 sevstr = "";
200 break;
201 case OpalMCE_SEV_ERROR_SYNC:
202 level = KERN_ERR;
203 sevstr = "Severe";
204 break;
205 case OpalMCE_SEV_FATAL:
206 default:
207 level = KERN_ERR;
208 sevstr = "Fatal";
209 break;
210 }
211
212 printk("%s%s Machine check interrupt [%s]\n", level, sevstr,
213 evt.disposition == OpalMCE_DISPOSITION_RECOVERED ?
214 "Recovered" : "[Not recovered");
215 printk("%s Initiator: %s\n", level,
216 evt.initiator == OpalMCE_INITIATOR_CPU ? "CPU" : "Unknown");
217 switch(evt.error_type) {
218 case OpalMCE_ERROR_TYPE_UE:
219 subtype = evt.u.ue_error.ue_error_type <
220 ARRAY_SIZE(opal_mc_ue_types) ?
221 opal_mc_ue_types[evt.u.ue_error.ue_error_type]
222 : "Unknown";
223 printk("%s Error type: UE [%s]\n", level, subtype);
224 if (evt.u.ue_error.effective_address_provided)
225 printk("%s Effective address: %016llx\n",
226 level, evt.u.ue_error.effective_address);
227 if (evt.u.ue_error.physical_address_provided)
228 printk("%s Physial address: %016llx\n",
229 level, evt.u.ue_error.physical_address);
230 break;
231 case OpalMCE_ERROR_TYPE_SLB:
232 subtype = evt.u.slb_error.slb_error_type <
233 ARRAY_SIZE(opal_mc_slb_types) ?
234 opal_mc_slb_types[evt.u.slb_error.slb_error_type]
235 : "Unknown";
236 printk("%s Error type: SLB [%s]\n", level, subtype);
237 if (evt.u.slb_error.effective_address_provided)
238 printk("%s Effective address: %016llx\n",
239 level, evt.u.slb_error.effective_address);
240 break;
241 case OpalMCE_ERROR_TYPE_ERAT:
242 subtype = evt.u.erat_error.erat_error_type <
243 ARRAY_SIZE(opal_mc_erat_types) ?
244 opal_mc_erat_types[evt.u.erat_error.erat_error_type]
245 : "Unknown";
246 printk("%s Error type: ERAT [%s]\n", level, subtype);
247 if (evt.u.erat_error.effective_address_provided)
248 printk("%s Effective address: %016llx\n",
249 level, evt.u.erat_error.effective_address);
250 break;
251 case OpalMCE_ERROR_TYPE_TLB:
252 subtype = evt.u.tlb_error.tlb_error_type <
253 ARRAY_SIZE(opal_mc_tlb_types) ?
254 opal_mc_tlb_types[evt.u.tlb_error.tlb_error_type]
255 : "Unknown";
256 printk("%s Error type: TLB [%s]\n", level, subtype);
257 if (evt.u.tlb_error.effective_address_provided)
258 printk("%s Effective address: %016llx\n",
259 level, evt.u.tlb_error.effective_address);
260 break;
261 default:
262 case OpalMCE_ERROR_TYPE_UNKNOWN:
263 printk("%s Error type: Unknown\n", level);
264 break;
265 }
266 return evt.severity == OpalMCE_SEV_FATAL ? 0 : 1;
267}
268
269static irqreturn_t opal_interrupt(int irq, void *data)
270{
271 uint64_t events;
272
273 opal_handle_interrupt(virq_to_hw(irq), &events);
274
275 /* XXX TODO: Do something with the events */
276
277 return IRQ_HANDLED;
278}
279
280static int __init opal_init(void)
281{
282 struct device_node *np, *consoles;
283 const u32 *irqs;
284 int rc, i, irqlen;
285
286 opal_node = of_find_node_by_path("/ibm,opal");
287 if (!opal_node) {
288 pr_warn("opal: Node not found\n");
289 return -ENODEV;
290 }
291 if (firmware_has_feature(FW_FEATURE_OPALv2))
292 consoles = of_find_node_by_path("/ibm,opal/consoles");
293 else
294 consoles = of_node_get(opal_node);
295
296 /* Register serial ports */
297 for_each_child_of_node(consoles, np) {
298 if (strcmp(np->name, "serial"))
299 continue;
300 of_platform_device_create(np, NULL, NULL);
301 }
302 of_node_put(consoles);
303
304 /* Find all OPAL interrupts and request them */
305 irqs = of_get_property(opal_node, "opal-interrupts", &irqlen);
306 pr_debug("opal: Found %d interrupts reserved for OPAL\n",
307 irqs ? (irqlen / 4) : 0);
308 for (i = 0; irqs && i < (irqlen / 4); i++, irqs++) {
309 unsigned int hwirq = be32_to_cpup(irqs);
310 unsigned int irq = irq_create_mapping(NULL, hwirq);
311 if (irq == NO_IRQ) {
312 pr_warning("opal: Failed to map irq 0x%x\n", hwirq);
313 continue;
314 }
315 rc = request_irq(irq, opal_interrupt, 0, "opal", NULL);
316 if (rc)
317 pr_warning("opal: Error %d requesting irq %d"
318 " (0x%x)\n", rc, irq, hwirq);
319 }
320 return 0;
321}
322subsys_initcall(opal_init);
diff --git a/arch/powerpc/platforms/powernv/pci-p5ioc2.c b/arch/powerpc/platforms/powernv/pci-p5ioc2.c
new file mode 100644
index 000000000000..4c80f7c77d56
--- /dev/null
+++ b/arch/powerpc/platforms/powernv/pci-p5ioc2.c
@@ -0,0 +1,234 @@
1/*
2 * Support PCI/PCIe on PowerNV platforms
3 *
4 * Currently supports only P5IOC2
5 *
6 * Copyright 2011 Benjamin Herrenschmidt, IBM Corp.
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 */
13
14#include <linux/kernel.h>
15#include <linux/pci.h>
16#include <linux/delay.h>
17#include <linux/string.h>
18#include <linux/init.h>
19#include <linux/bootmem.h>
20#include <linux/irq.h>
21#include <linux/io.h>
22#include <linux/msi.h>
23
24#include <asm/sections.h>
25#include <asm/io.h>
26#include <asm/prom.h>
27#include <asm/pci-bridge.h>
28#include <asm/machdep.h>
29#include <asm/ppc-pci.h>
30#include <asm/opal.h>
31#include <asm/iommu.h>
32#include <asm/tce.h>
33#include <asm/abs_addr.h>
34
35#include "powernv.h"
36#include "pci.h"
37
38/* For now, use a fixed amount of TCE memory for each p5ioc2
39 * hub, 16M will do
40 */
41#define P5IOC2_TCE_MEMORY 0x01000000
42
43#ifdef CONFIG_PCI_MSI
44static int pnv_pci_p5ioc2_msi_setup(struct pnv_phb *phb, struct pci_dev *dev,
45 unsigned int hwirq, unsigned int is_64,
46 struct msi_msg *msg)
47{
48 if (WARN_ON(!is_64))
49 return -ENXIO;
50 msg->data = hwirq - phb->msi_base;
51 msg->address_hi = 0x10000000;
52 msg->address_lo = 0;
53
54 return 0;
55}
56
57static void pnv_pci_init_p5ioc2_msis(struct pnv_phb *phb)
58{
59 unsigned int bmap_size;
60 const __be32 *prop = of_get_property(phb->hose->dn,
61 "ibm,opal-msi-ranges", NULL);
62 if (!prop)
63 return;
64
65 /* Don't do MSI's on p5ioc2 PCI-X are they are not properly
66 * verified in HW
67 */
68 if (of_device_is_compatible(phb->hose->dn, "ibm,p5ioc2-pcix"))
69 return;
70 phb->msi_base = be32_to_cpup(prop);
71 phb->msi_count = be32_to_cpup(prop + 1);
72 bmap_size = BITS_TO_LONGS(phb->msi_count) * sizeof(unsigned long);
73 phb->msi_map = zalloc_maybe_bootmem(bmap_size, GFP_KERNEL);
74 if (!phb->msi_map) {
75 pr_err("PCI %d: Failed to allocate MSI bitmap !\n",
76 phb->hose->global_number);
77 return;
78 }
79 phb->msi_setup = pnv_pci_p5ioc2_msi_setup;
80 phb->msi32_support = 0;
81 pr_info(" Allocated bitmap for %d MSIs (base IRQ 0x%x)\n",
82 phb->msi_count, phb->msi_base);
83}
84#else
85static void pnv_pci_init_p5ioc2_msis(struct pnv_phb *phb) { }
86#endif /* CONFIG_PCI_MSI */
87
88static void __devinit pnv_pci_p5ioc2_dma_dev_setup(struct pnv_phb *phb,
89 struct pci_dev *pdev)
90{
91 if (phb->p5ioc2.iommu_table.it_map == NULL)
92 iommu_init_table(&phb->p5ioc2.iommu_table, phb->hose->node);
93
94 set_iommu_table_base(&pdev->dev, &phb->p5ioc2.iommu_table);
95}
96
97static void __init pnv_pci_init_p5ioc2_phb(struct device_node *np,
98 void *tce_mem, u64 tce_size)
99{
100 struct pnv_phb *phb;
101 const u64 *prop64;
102 u64 phb_id;
103 int64_t rc;
104 static int primary = 1;
105
106 pr_info(" Initializing p5ioc2 PHB %s\n", np->full_name);
107
108 prop64 = of_get_property(np, "ibm,opal-phbid", NULL);
109 if (!prop64) {
110 pr_err(" Missing \"ibm,opal-phbid\" property !\n");
111 return;
112 }
113 phb_id = be64_to_cpup(prop64);
114 pr_devel(" PHB-ID : 0x%016llx\n", phb_id);
115 pr_devel(" TCE AT : 0x%016lx\n", __pa(tce_mem));
116 pr_devel(" TCE SZ : 0x%016llx\n", tce_size);
117
118 rc = opal_pci_set_phb_tce_memory(phb_id, __pa(tce_mem), tce_size);
119 if (rc != OPAL_SUCCESS) {
120 pr_err(" Failed to set TCE memory, OPAL error %lld\n", rc);
121 return;
122 }
123
124 phb = alloc_bootmem(sizeof(struct pnv_phb));
125 if (phb) {
126 memset(phb, 0, sizeof(struct pnv_phb));
127 phb->hose = pcibios_alloc_controller(np);
128 }
129 if (!phb || !phb->hose) {
130 pr_err(" Failed to allocate PCI controller\n");
131 return;
132 }
133
134 spin_lock_init(&phb->lock);
135 phb->hose->first_busno = 0;
136 phb->hose->last_busno = 0xff;
137 phb->hose->private_data = phb;
138 phb->opal_id = phb_id;
139 phb->type = PNV_PHB_P5IOC2;
140
141 phb->regs = of_iomap(np, 0);
142
143 if (phb->regs == NULL)
144 pr_err(" Failed to map registers !\n");
145 else {
146 pr_devel(" P_BUID = 0x%08x\n", in_be32(phb->regs + 0x100));
147 pr_devel(" P_IOSZ = 0x%08x\n", in_be32(phb->regs + 0x1b0));
148 pr_devel(" P_IO_ST = 0x%08x\n", in_be32(phb->regs + 0x1e0));
149 pr_devel(" P_MEM1_H = 0x%08x\n", in_be32(phb->regs + 0x1a0));
150 pr_devel(" P_MEM1_L = 0x%08x\n", in_be32(phb->regs + 0x190));
151 pr_devel(" P_MSZ1_L = 0x%08x\n", in_be32(phb->regs + 0x1c0));
152 pr_devel(" P_MEM_ST = 0x%08x\n", in_be32(phb->regs + 0x1d0));
153 pr_devel(" P_MEM2_H = 0x%08x\n", in_be32(phb->regs + 0x2c0));
154 pr_devel(" P_MEM2_L = 0x%08x\n", in_be32(phb->regs + 0x2b0));
155 pr_devel(" P_MSZ2_H = 0x%08x\n", in_be32(phb->regs + 0x2d0));
156 pr_devel(" P_MSZ2_L = 0x%08x\n", in_be32(phb->regs + 0x2e0));
157 }
158
159 /* Interpret the "ranges" property */
160 /* This also maps the I/O region and sets isa_io/mem_base */
161 pci_process_bridge_OF_ranges(phb->hose, np, primary);
162 primary = 0;
163
164 phb->hose->ops = &pnv_pci_ops;
165
166 /* Setup MSI support */
167 pnv_pci_init_p5ioc2_msis(phb);
168
169 /* Setup TCEs */
170 phb->dma_dev_setup = pnv_pci_p5ioc2_dma_dev_setup;
171 pnv_pci_setup_iommu_table(&phb->p5ioc2.iommu_table,
172 tce_mem, tce_size, 0);
173}
174
175void __init pnv_pci_init_p5ioc2_hub(struct device_node *np)
176{
177 struct device_node *phbn;
178 const u64 *prop64;
179 u64 hub_id;
180 void *tce_mem;
181 uint64_t tce_per_phb;
182 int64_t rc;
183 int phb_count = 0;
184
185 pr_info("Probing p5ioc2 IO-Hub %s\n", np->full_name);
186
187 prop64 = of_get_property(np, "ibm,opal-hubid", NULL);
188 if (!prop64) {
189 pr_err(" Missing \"ibm,opal-hubid\" property !\n");
190 return;
191 }
192 hub_id = be64_to_cpup(prop64);
193 pr_info(" HUB-ID : 0x%016llx\n", hub_id);
194
195 /* Currently allocate 16M of TCE memory for every Hub
196 *
197 * XXX TODO: Make it chip local if possible
198 */
199 tce_mem = __alloc_bootmem(P5IOC2_TCE_MEMORY, P5IOC2_TCE_MEMORY,
200 __pa(MAX_DMA_ADDRESS));
201 if (!tce_mem) {
202 pr_err(" Failed to allocate TCE Memory !\n");
203 return;
204 }
205 pr_debug(" TCE : 0x%016lx..0x%016lx\n",
206 __pa(tce_mem), __pa(tce_mem) + P5IOC2_TCE_MEMORY - 1);
207 rc = opal_pci_set_hub_tce_memory(hub_id, __pa(tce_mem),
208 P5IOC2_TCE_MEMORY);
209 if (rc != OPAL_SUCCESS) {
210 pr_err(" Failed to allocate TCE memory, OPAL error %lld\n", rc);
211 return;
212 }
213
214 /* Count child PHBs */
215 for_each_child_of_node(np, phbn) {
216 if (of_device_is_compatible(phbn, "ibm,p5ioc2-pcix") ||
217 of_device_is_compatible(phbn, "ibm,p5ioc2-pciex"))
218 phb_count++;
219 }
220
221 /* Calculate how much TCE space we can give per PHB */
222 tce_per_phb = __rounddown_pow_of_two(P5IOC2_TCE_MEMORY / phb_count);
223 pr_info(" Allocating %lld MB of TCE memory per PHB\n",
224 tce_per_phb >> 20);
225
226 /* Initialize PHBs */
227 for_each_child_of_node(np, phbn) {
228 if (of_device_is_compatible(phbn, "ibm,p5ioc2-pcix") ||
229 of_device_is_compatible(phbn, "ibm,p5ioc2-pciex")) {
230 pnv_pci_init_p5ioc2_phb(phbn, tce_mem, tce_per_phb);
231 tce_mem += tce_per_phb;
232 }
233 }
234}
diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c
new file mode 100644
index 000000000000..85bb66d7f933
--- /dev/null
+++ b/arch/powerpc/platforms/powernv/pci.c
@@ -0,0 +1,427 @@
1/*
2 * Support PCI/PCIe on PowerNV platforms
3 *
4 * Currently supports only P5IOC2
5 *
6 * Copyright 2011 Benjamin Herrenschmidt, IBM Corp.
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 */
13
14#include <linux/kernel.h>
15#include <linux/pci.h>
16#include <linux/delay.h>
17#include <linux/string.h>
18#include <linux/init.h>
19#include <linux/bootmem.h>
20#include <linux/irq.h>
21#include <linux/io.h>
22#include <linux/msi.h>
23
24#include <asm/sections.h>
25#include <asm/io.h>
26#include <asm/prom.h>
27#include <asm/pci-bridge.h>
28#include <asm/machdep.h>
29#include <asm/ppc-pci.h>
30#include <asm/opal.h>
31#include <asm/iommu.h>
32#include <asm/tce.h>
33#include <asm/abs_addr.h>
34
35#include "powernv.h"
36#include "pci.h"
37
38/* Delay in usec */
39#define PCI_RESET_DELAY_US 3000000
40
41#define cfg_dbg(fmt...) do { } while(0)
42//#define cfg_dbg(fmt...) printk(fmt)
43
44#ifdef CONFIG_PCI_MSI
45static int pnv_msi_check_device(struct pci_dev* pdev, int nvec, int type)
46{
47 struct pci_controller *hose = pci_bus_to_host(pdev->bus);
48 struct pnv_phb *phb = hose->private_data;
49
50 return (phb && phb->msi_map) ? 0 : -ENODEV;
51}
52
53static unsigned int pnv_get_one_msi(struct pnv_phb *phb)
54{
55 unsigned int id;
56
57 spin_lock(&phb->lock);
58 id = find_next_zero_bit(phb->msi_map, phb->msi_count, phb->msi_next);
59 if (id >= phb->msi_count && phb->msi_next)
60 id = find_next_zero_bit(phb->msi_map, phb->msi_count, 0);
61 if (id >= phb->msi_count) {
62 spin_unlock(&phb->lock);
63 return 0;
64 }
65 __set_bit(id, phb->msi_map);
66 spin_unlock(&phb->lock);
67 return id + phb->msi_base;
68}
69
70static void pnv_put_msi(struct pnv_phb *phb, unsigned int hwirq)
71{
72 unsigned int id;
73
74 if (WARN_ON(hwirq < phb->msi_base ||
75 hwirq >= (phb->msi_base + phb->msi_count)))
76 return;
77 id = hwirq - phb->msi_base;
78 spin_lock(&phb->lock);
79 __clear_bit(id, phb->msi_map);
80 spin_unlock(&phb->lock);
81}
82
83static int pnv_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
84{
85 struct pci_controller *hose = pci_bus_to_host(pdev->bus);
86 struct pnv_phb *phb = hose->private_data;
87 struct msi_desc *entry;
88 struct msi_msg msg;
89 unsigned int hwirq, virq;
90 int rc;
91
92 if (WARN_ON(!phb))
93 return -ENODEV;
94
95 list_for_each_entry(entry, &pdev->msi_list, list) {
96 if (!entry->msi_attrib.is_64 && !phb->msi32_support) {
97 pr_warn("%s: Supports only 64-bit MSIs\n",
98 pci_name(pdev));
99 return -ENXIO;
100 }
101 hwirq = pnv_get_one_msi(phb);
102 if (!hwirq) {
103 pr_warn("%s: Failed to find a free MSI\n",
104 pci_name(pdev));
105 return -ENOSPC;
106 }
107 virq = irq_create_mapping(NULL, hwirq);
108 if (virq == NO_IRQ) {
109 pr_warn("%s: Failed to map MSI to linux irq\n",
110 pci_name(pdev));
111 pnv_put_msi(phb, hwirq);
112 return -ENOMEM;
113 }
114 rc = phb->msi_setup(phb, pdev, hwirq, entry->msi_attrib.is_64,
115 &msg);
116 if (rc) {
117 pr_warn("%s: Failed to setup MSI\n", pci_name(pdev));
118 irq_dispose_mapping(virq);
119 pnv_put_msi(phb, hwirq);
120 return rc;
121 }
122 irq_set_msi_desc(virq, entry);
123 write_msi_msg(virq, &msg);
124 }
125 return 0;
126}
127
128static void pnv_teardown_msi_irqs(struct pci_dev *pdev)
129{
130 struct pci_controller *hose = pci_bus_to_host(pdev->bus);
131 struct pnv_phb *phb = hose->private_data;
132 struct msi_desc *entry;
133
134 if (WARN_ON(!phb))
135 return;
136
137 list_for_each_entry(entry, &pdev->msi_list, list) {
138 if (entry->irq == NO_IRQ)
139 continue;
140 irq_set_msi_desc(entry->irq, NULL);
141 pnv_put_msi(phb, virq_to_hw(entry->irq));
142 irq_dispose_mapping(entry->irq);
143 }
144}
145#endif /* CONFIG_PCI_MSI */
146
147static void pnv_pci_config_check_eeh(struct pnv_phb *phb, struct pci_bus *bus,
148 u32 bdfn)
149{
150 s64 rc;
151 u8 fstate;
152 u16 pcierr;
153 u32 pe_no;
154
155 /* Get PE# if we support IODA */
156 pe_no = phb->bdfn_to_pe ? phb->bdfn_to_pe(phb, bus, bdfn & 0xff) : 0;
157
158 /* Read freeze status */
159 rc = opal_pci_eeh_freeze_status(phb->opal_id, pe_no, &fstate, &pcierr,
160 NULL);
161 if (rc) {
162 pr_warning("PCI %d: Failed to read EEH status for PE#%d,"
163 " err %lld\n", phb->hose->global_number, pe_no, rc);
164 return;
165 }
166 cfg_dbg(" -> EEH check, bdfn=%04x PE%d fstate=%x\n",
167 bdfn, pe_no, fstate);
168 if (fstate != 0) {
169 rc = opal_pci_eeh_freeze_clear(phb->opal_id, pe_no,
170 OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
171 if (rc) {
172 pr_warning("PCI %d: Failed to clear EEH freeze state"
173 " for PE#%d, err %lld\n",
174 phb->hose->global_number, pe_no, rc);
175 }
176 }
177}
178
179static int pnv_pci_read_config(struct pci_bus *bus,
180 unsigned int devfn,
181 int where, int size, u32 *val)
182{
183 struct pci_controller *hose = pci_bus_to_host(bus);
184 struct pnv_phb *phb = hose->private_data;
185 u32 bdfn = (((uint64_t)bus->number) << 8) | devfn;
186 s64 rc;
187
188 if (hose == NULL)
189 return PCIBIOS_DEVICE_NOT_FOUND;
190
191 switch (size) {
192 case 1: {
193 u8 v8;
194 rc = opal_pci_config_read_byte(phb->opal_id, bdfn, where, &v8);
195 *val = (rc == OPAL_SUCCESS) ? v8 : 0xff;
196 break;
197 }
198 case 2: {
199 u16 v16;
200 rc = opal_pci_config_read_half_word(phb->opal_id, bdfn, where,
201 &v16);
202 *val = (rc == OPAL_SUCCESS) ? v16 : 0xffff;
203 break;
204 }
205 case 4: {
206 u32 v32;
207 rc = opal_pci_config_read_word(phb->opal_id, bdfn, where, &v32);
208 *val = (rc == OPAL_SUCCESS) ? v32 : 0xffffffff;
209 break;
210 }
211 default:
212 return PCIBIOS_FUNC_NOT_SUPPORTED;
213 }
214 cfg_dbg("pnv_pci_read_config bus: %x devfn: %x +%x/%x -> %08x\n",
215 bus->number, devfn, where, size, *val);
216
217 /* Check if the PHB got frozen due to an error (no response) */
218 pnv_pci_config_check_eeh(phb, bus, bdfn);
219
220 return PCIBIOS_SUCCESSFUL;
221}
222
223static int pnv_pci_write_config(struct pci_bus *bus,
224 unsigned int devfn,
225 int where, int size, u32 val)
226{
227 struct pci_controller *hose = pci_bus_to_host(bus);
228 struct pnv_phb *phb = hose->private_data;
229 u32 bdfn = (((uint64_t)bus->number) << 8) | devfn;
230
231 if (hose == NULL)
232 return PCIBIOS_DEVICE_NOT_FOUND;
233
234 cfg_dbg("pnv_pci_write_config bus: %x devfn: %x +%x/%x -> %08x\n",
235 bus->number, devfn, where, size, val);
236 switch (size) {
237 case 1:
238 opal_pci_config_write_byte(phb->opal_id, bdfn, where, val);
239 break;
240 case 2:
241 opal_pci_config_write_half_word(phb->opal_id, bdfn, where, val);
242 break;
243 case 4:
244 opal_pci_config_write_word(phb->opal_id, bdfn, where, val);
245 break;
246 default:
247 return PCIBIOS_FUNC_NOT_SUPPORTED;
248 }
249 /* Check if the PHB got frozen due to an error (no response) */
250 pnv_pci_config_check_eeh(phb, bus, bdfn);
251
252 return PCIBIOS_SUCCESSFUL;
253}
254
255struct pci_ops pnv_pci_ops = {
256 .read = pnv_pci_read_config,
257 .write = pnv_pci_write_config,
258};
259
260static int pnv_tce_build(struct iommu_table *tbl, long index, long npages,
261 unsigned long uaddr, enum dma_data_direction direction,
262 struct dma_attrs *attrs)
263{
264 u64 proto_tce;
265 u64 *tcep;
266 u64 rpn;
267
268 proto_tce = TCE_PCI_READ; // Read allowed
269
270 if (direction != DMA_TO_DEVICE)
271 proto_tce |= TCE_PCI_WRITE;
272
273 tcep = ((u64 *)tbl->it_base) + index;
274
275 while (npages--) {
276 /* can't move this out since we might cross LMB boundary */
277 rpn = (virt_to_abs(uaddr)) >> TCE_SHIFT;
278 *tcep = proto_tce | (rpn & TCE_RPN_MASK) << TCE_RPN_SHIFT;
279
280 uaddr += TCE_PAGE_SIZE;
281 tcep++;
282 }
283 return 0;
284}
285
286static void pnv_tce_free(struct iommu_table *tbl, long index, long npages)
287{
288 u64 *tcep = ((u64 *)tbl->it_base) + index;
289
290 while (npages--)
291 *(tcep++) = 0;
292}
293
294void pnv_pci_setup_iommu_table(struct iommu_table *tbl,
295 void *tce_mem, u64 tce_size,
296 u64 dma_offset)
297{
298 tbl->it_blocksize = 16;
299 tbl->it_base = (unsigned long)tce_mem;
300 tbl->it_offset = dma_offset >> IOMMU_PAGE_SHIFT;
301 tbl->it_index = 0;
302 tbl->it_size = tce_size >> 3;
303 tbl->it_busno = 0;
304 tbl->it_type = TCE_PCI;
305}
306
307static struct iommu_table * __devinit
308pnv_pci_setup_bml_iommu(struct pci_controller *hose)
309{
310 struct iommu_table *tbl;
311 const __be64 *basep;
312 const __be32 *sizep;
313
314 basep = of_get_property(hose->dn, "linux,tce-base", NULL);
315 sizep = of_get_property(hose->dn, "linux,tce-size", NULL);
316 if (basep == NULL || sizep == NULL) {
317 pr_err("PCI: %s has missing tce entries !\n", hose->dn->full_name);
318 return NULL;
319 }
320 tbl = kzalloc_node(sizeof(struct iommu_table), GFP_KERNEL, hose->node);
321 if (WARN_ON(!tbl))
322 return NULL;
323 pnv_pci_setup_iommu_table(tbl, __va(be64_to_cpup(basep)),
324 be32_to_cpup(sizep), 0);
325 iommu_init_table(tbl, hose->node);
326 return tbl;
327}
328
329static void __devinit pnv_pci_dma_fallback_setup(struct pci_controller *hose,
330 struct pci_dev *pdev)
331{
332 struct device_node *np = pci_bus_to_OF_node(hose->bus);
333 struct pci_dn *pdn;
334
335 if (np == NULL)
336 return;
337 pdn = PCI_DN(np);
338 if (!pdn->iommu_table)
339 pdn->iommu_table = pnv_pci_setup_bml_iommu(hose);
340 if (!pdn->iommu_table)
341 return;
342 set_iommu_table_base(&pdev->dev, pdn->iommu_table);
343}
344
345static void __devinit pnv_pci_dma_dev_setup(struct pci_dev *pdev)
346{
347 struct pci_controller *hose = pci_bus_to_host(pdev->bus);
348 struct pnv_phb *phb = hose->private_data;
349
350 /* If we have no phb structure, try to setup a fallback based on
351 * the device-tree (RTAS PCI for example)
352 */
353 if (phb && phb->dma_dev_setup)
354 phb->dma_dev_setup(phb, pdev);
355 else
356 pnv_pci_dma_fallback_setup(hose, pdev);
357}
358
359static int pnv_pci_probe_mode(struct pci_bus *bus)
360{
361 struct pci_controller *hose = pci_bus_to_host(bus);
362 const __be64 *tstamp;
363 u64 now, target;
364
365
366 /* We hijack this as a way to ensure we have waited long
367 * enough since the reset was lifted on the PCI bus
368 */
369 if (bus != hose->bus)
370 return PCI_PROBE_NORMAL;
371 tstamp = of_get_property(hose->dn, "reset-clear-timestamp", NULL);
372 if (!tstamp || !*tstamp)
373 return PCI_PROBE_NORMAL;
374
375 now = mftb() / tb_ticks_per_usec;
376 target = (be64_to_cpup(tstamp) / tb_ticks_per_usec)
377 + PCI_RESET_DELAY_US;
378
379 pr_devel("pci %04d: Reset target: 0x%llx now: 0x%llx\n",
380 hose->global_number, target, now);
381
382 if (now < target)
383 msleep((target - now + 999) / 1000);
384
385 return PCI_PROBE_NORMAL;
386}
387
388void __init pnv_pci_init(void)
389{
390 struct device_node *np;
391
392 pci_set_flags(PCI_CAN_SKIP_ISA_ALIGN);
393
394 /* We do not want to just probe */
395 pci_probe_only = 0;
396
397 /* OPAL absent, try POPAL first then RTAS detection of PHBs */
398 if (!firmware_has_feature(FW_FEATURE_OPAL)) {
399#ifdef CONFIG_PPC_POWERNV_RTAS
400 init_pci_config_tokens();
401 find_and_init_phbs();
402#endif /* CONFIG_PPC_POWERNV_RTAS */
403 } else {
404 /* OPAL is here, do our normal stuff */
405
406 /* Look for p5ioc2 IO-Hubs */
407 for_each_compatible_node(np, NULL, "ibm,p5ioc2")
408 pnv_pci_init_p5ioc2_hub(np);
409 }
410
411 /* Setup the linkage between OF nodes and PHBs */
412 pci_devs_phb_init();
413
414 /* Configure IOMMU DMA hooks */
415 ppc_md.pci_dma_dev_setup = pnv_pci_dma_dev_setup;
416 ppc_md.tce_build = pnv_tce_build;
417 ppc_md.tce_free = pnv_tce_free;
418 ppc_md.pci_probe_mode = pnv_pci_probe_mode;
419 set_pci_dma_ops(&dma_iommu_ops);
420
421 /* Configure MSIs */
422#ifdef CONFIG_PCI_MSI
423 ppc_md.msi_check_device = pnv_msi_check_device;
424 ppc_md.setup_msi_irqs = pnv_setup_msi_irqs;
425 ppc_md.teardown_msi_irqs = pnv_teardown_msi_irqs;
426#endif
427}
diff --git a/arch/powerpc/platforms/powernv/pci.h b/arch/powerpc/platforms/powernv/pci.h
new file mode 100644
index 000000000000..d4dbc4950936
--- /dev/null
+++ b/arch/powerpc/platforms/powernv/pci.h
@@ -0,0 +1,48 @@
1#ifndef __POWERNV_PCI_H
2#define __POWERNV_PCI_H
3
4struct pci_dn;
5
6enum pnv_phb_type {
7 PNV_PHB_P5IOC2,
8 PNV_PHB_IODA1,
9 PNV_PHB_IODA2,
10};
11
12struct pnv_phb {
13 struct pci_controller *hose;
14 enum pnv_phb_type type;
15 u64 opal_id;
16 void __iomem *regs;
17 spinlock_t lock;
18
19#ifdef CONFIG_PCI_MSI
20 unsigned long *msi_map;
21 unsigned int msi_base;
22 unsigned int msi_count;
23 unsigned int msi_next;
24 unsigned int msi32_support;
25#endif
26 int (*msi_setup)(struct pnv_phb *phb, struct pci_dev *dev,
27 unsigned int hwirq, unsigned int is_64,
28 struct msi_msg *msg);
29 void (*dma_dev_setup)(struct pnv_phb *phb, struct pci_dev *pdev);
30 void (*fixup_phb)(struct pci_controller *hose);
31 u32 (*bdfn_to_pe)(struct pnv_phb *phb, struct pci_bus *bus, u32 devfn);
32
33 union {
34 struct {
35 struct iommu_table iommu_table;
36 } p5ioc2;
37 };
38};
39
40extern struct pci_ops pnv_pci_ops;
41
42extern void pnv_pci_setup_iommu_table(struct iommu_table *tbl,
43 void *tce_mem, u64 tce_size,
44 u64 dma_offset);
45extern void pnv_pci_init_p5ioc2_hub(struct device_node *np);
46
47
48#endif /* __POWERNV_PCI_H */
diff --git a/arch/powerpc/platforms/powernv/powernv.h b/arch/powerpc/platforms/powernv/powernv.h
new file mode 100644
index 000000000000..8a9df7f9667e
--- /dev/null
+++ b/arch/powerpc/platforms/powernv/powernv.h
@@ -0,0 +1,16 @@
1#ifndef _POWERNV_H
2#define _POWERNV_H
3
4#ifdef CONFIG_SMP
5extern void pnv_smp_init(void);
6#else
7static inline void pnv_smp_init(void) { }
8#endif
9
10#ifdef CONFIG_PCI
11extern void pnv_pci_init(void);
12#else
13static inline void pnv_pci_init(void) { }
14#endif
15
16#endif /* _POWERNV_H */
diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c
new file mode 100644
index 000000000000..467bd4ac6824
--- /dev/null
+++ b/arch/powerpc/platforms/powernv/setup.c
@@ -0,0 +1,196 @@
1/*
2 * PowerNV setup code.
3 *
4 * Copyright 2011 IBM Corp.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#undef DEBUG
13
14#include <linux/cpu.h>
15#include <linux/errno.h>
16#include <linux/sched.h>
17#include <linux/kernel.h>
18#include <linux/tty.h>
19#include <linux/reboot.h>
20#include <linux/init.h>
21#include <linux/console.h>
22#include <linux/delay.h>
23#include <linux/irq.h>
24#include <linux/seq_file.h>
25#include <linux/of.h>
26#include <linux/interrupt.h>
27#include <linux/bug.h>
28
29#include <asm/machdep.h>
30#include <asm/firmware.h>
31#include <asm/xics.h>
32#include <asm/rtas.h>
33#include <asm/opal.h>
34#include <asm/xics.h>
35
36#include "powernv.h"
37
38static void __init pnv_setup_arch(void)
39{
40 /* Initialize SMP */
41 pnv_smp_init();
42
43 /* Setup PCI */
44 pnv_pci_init();
45
46 /* Setup RTC and NVRAM callbacks */
47 if (firmware_has_feature(FW_FEATURE_OPAL))
48 opal_nvram_init();
49
50 /* Enable NAP mode */
51 powersave_nap = 1;
52
53 /* XXX PMCS */
54}
55
56static void __init pnv_init_early(void)
57{
58#ifdef CONFIG_HVC_OPAL
59 if (firmware_has_feature(FW_FEATURE_OPAL))
60 hvc_opal_init_early();
61 else
62#endif
63 add_preferred_console("hvc", 0, NULL);
64}
65
66static void __init pnv_init_IRQ(void)
67{
68 xics_init();
69
70 WARN_ON(!ppc_md.get_irq);
71}
72
73static void pnv_show_cpuinfo(struct seq_file *m)
74{
75 struct device_node *root;
76 const char *model = "";
77
78 root = of_find_node_by_path("/");
79 if (root)
80 model = of_get_property(root, "model", NULL);
81 seq_printf(m, "machine\t\t: PowerNV %s\n", model);
82 if (firmware_has_feature(FW_FEATURE_OPALv2))
83 seq_printf(m, "firmware\t: OPAL v2\n");
84 else if (firmware_has_feature(FW_FEATURE_OPAL))
85 seq_printf(m, "firmware\t: OPAL v1\n");
86 else
87 seq_printf(m, "firmware\t: BML\n");
88 of_node_put(root);
89}
90
91static void __noreturn pnv_restart(char *cmd)
92{
93 long rc = OPAL_BUSY;
94
95 while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) {
96 rc = opal_cec_reboot();
97 if (rc == OPAL_BUSY_EVENT)
98 opal_poll_events(NULL);
99 else
100 mdelay(10);
101 }
102 for (;;)
103 opal_poll_events(NULL);
104}
105
106static void __noreturn pnv_power_off(void)
107{
108 long rc = OPAL_BUSY;
109
110 while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) {
111 rc = opal_cec_power_down(0);
112 if (rc == OPAL_BUSY_EVENT)
113 opal_poll_events(NULL);
114 else
115 mdelay(10);
116 }
117 for (;;)
118 opal_poll_events(NULL);
119}
120
121static void __noreturn pnv_halt(void)
122{
123 pnv_power_off();
124}
125
126static void pnv_progress(char *s, unsigned short hex)
127{
128}
129
130#ifdef CONFIG_KEXEC
131static void pnv_kexec_cpu_down(int crash_shutdown, int secondary)
132{
133 xics_kexec_teardown_cpu(secondary);
134}
135#endif /* CONFIG_KEXEC */
136
137static void __init pnv_setup_machdep_opal(void)
138{
139 ppc_md.get_boot_time = opal_get_boot_time;
140 ppc_md.get_rtc_time = opal_get_rtc_time;
141 ppc_md.set_rtc_time = opal_set_rtc_time;
142 ppc_md.restart = pnv_restart;
143 ppc_md.power_off = pnv_power_off;
144 ppc_md.halt = pnv_halt;
145 ppc_md.machine_check_exception = opal_machine_check;
146}
147
148#ifdef CONFIG_PPC_POWERNV_RTAS
149static void __init pnv_setup_machdep_rtas(void)
150{
151 if (rtas_token("get-time-of-day") != RTAS_UNKNOWN_SERVICE) {
152 ppc_md.get_boot_time = rtas_get_boot_time;
153 ppc_md.get_rtc_time = rtas_get_rtc_time;
154 ppc_md.set_rtc_time = rtas_set_rtc_time;
155 }
156 ppc_md.restart = rtas_restart;
157 ppc_md.power_off = rtas_power_off;
158 ppc_md.halt = rtas_halt;
159}
160#endif /* CONFIG_PPC_POWERNV_RTAS */
161
162static int __init pnv_probe(void)
163{
164 unsigned long root = of_get_flat_dt_root();
165
166 if (!of_flat_dt_is_compatible(root, "ibm,powernv"))
167 return 0;
168
169 hpte_init_native();
170
171 if (firmware_has_feature(FW_FEATURE_OPAL))
172 pnv_setup_machdep_opal();
173#ifdef CONFIG_PPC_POWERNV_RTAS
174 else if (rtas.base)
175 pnv_setup_machdep_rtas();
176#endif /* CONFIG_PPC_POWERNV_RTAS */
177
178 pr_debug("PowerNV detected !\n");
179
180 return 1;
181}
182
183define_machine(powernv) {
184 .name = "PowerNV",
185 .probe = pnv_probe,
186 .init_early = pnv_init_early,
187 .setup_arch = pnv_setup_arch,
188 .init_IRQ = pnv_init_IRQ,
189 .show_cpuinfo = pnv_show_cpuinfo,
190 .progress = pnv_progress,
191 .power_save = power7_idle,
192 .calibrate_decr = generic_calibrate_decr,
193#ifdef CONFIG_KEXEC
194 .kexec_cpu_down = pnv_kexec_cpu_down,
195#endif
196};
diff --git a/arch/powerpc/platforms/powernv/smp.c b/arch/powerpc/platforms/powernv/smp.c
new file mode 100644
index 000000000000..e87736685243
--- /dev/null
+++ b/arch/powerpc/platforms/powernv/smp.c
@@ -0,0 +1,182 @@
1/*
2 * SMP support for PowerNV machines.
3 *
4 * Copyright 2011 IBM Corp.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#include <linux/kernel.h>
13#include <linux/module.h>
14#include <linux/sched.h>
15#include <linux/smp.h>
16#include <linux/interrupt.h>
17#include <linux/delay.h>
18#include <linux/init.h>
19#include <linux/spinlock.h>
20#include <linux/cpu.h>
21
22#include <asm/irq.h>
23#include <asm/smp.h>
24#include <asm/paca.h>
25#include <asm/machdep.h>
26#include <asm/cputable.h>
27#include <asm/firmware.h>
28#include <asm/system.h>
29#include <asm/rtas.h>
30#include <asm/vdso_datapage.h>
31#include <asm/cputhreads.h>
32#include <asm/xics.h>
33#include <asm/opal.h>
34
35#include "powernv.h"
36
37#ifdef DEBUG
38#include <asm/udbg.h>
39#define DBG(fmt...) udbg_printf(fmt)
40#else
41#define DBG(fmt...)
42#endif
43
44static void __cpuinit pnv_smp_setup_cpu(int cpu)
45{
46 if (cpu != boot_cpuid)
47 xics_setup_cpu();
48}
49
50static int pnv_smp_cpu_bootable(unsigned int nr)
51{
52 /* Special case - we inhibit secondary thread startup
53 * during boot if the user requests it.
54 */
55 if (system_state < SYSTEM_RUNNING && cpu_has_feature(CPU_FTR_SMT)) {
56 if (!smt_enabled_at_boot && cpu_thread_in_core(nr) != 0)
57 return 0;
58 if (smt_enabled_at_boot
59 && cpu_thread_in_core(nr) >= smt_enabled_at_boot)
60 return 0;
61 }
62
63 return 1;
64}
65
66int __devinit pnv_smp_kick_cpu(int nr)
67{
68 unsigned int pcpu = get_hard_smp_processor_id(nr);
69 unsigned long start_here = __pa(*((unsigned long *)
70 generic_secondary_smp_init));
71 long rc;
72
73 BUG_ON(nr < 0 || nr >= NR_CPUS);
74
75 /* On OPAL v2 the CPU are still spinning inside OPAL itself,
76 * get them back now
77 */
78 if (firmware_has_feature(FW_FEATURE_OPALv2)) {
79 pr_devel("OPAL: Starting CPU %d (HW 0x%x)...\n", nr, pcpu);
80 rc = opal_start_cpu(pcpu, start_here);
81 if (rc != OPAL_SUCCESS)
82 pr_warn("OPAL Error %ld starting CPU %d\n",
83 rc, nr);
84 }
85 return smp_generic_kick_cpu(nr);
86}
87
88#ifdef CONFIG_HOTPLUG_CPU
89
90static int pnv_smp_cpu_disable(void)
91{
92 int cpu = smp_processor_id();
93
94 /* This is identical to pSeries... might consolidate by
95 * moving migrate_irqs_away to a ppc_md with default to
96 * the generic fixup_irqs. --BenH.
97 */
98 set_cpu_online(cpu, false);
99 vdso_data->processorCount--;
100 if (cpu == boot_cpuid)
101 boot_cpuid = cpumask_any(cpu_online_mask);
102 xics_migrate_irqs_away();
103 return 0;
104}
105
106static void pnv_smp_cpu_kill_self(void)
107{
108 unsigned int cpu;
109
110 /* If powersave_nap is enabled, use NAP mode, else just
111 * spin aimlessly
112 */
113 if (!powersave_nap) {
114 generic_mach_cpu_die();
115 return;
116 }
117
118 /* Standard hot unplug procedure */
119 local_irq_disable();
120 idle_task_exit();
121 current->active_mm = NULL; /* for sanity */
122 cpu = smp_processor_id();
123 DBG("CPU%d offline\n", cpu);
124 generic_set_cpu_dead(cpu);
125 smp_wmb();
126
127 /* We don't want to take decrementer interrupts while we are offline,
128 * so clear LPCR:PECE1. We keep PECE2 enabled.
129 */
130 mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) & ~(u64)LPCR_PECE1);
131 while (!generic_check_cpu_restart(cpu)) {
132 power7_idle();
133 if (!generic_check_cpu_restart(cpu)) {
134 DBG("CPU%d Unexpected exit while offline !\n", cpu);
135 /* We may be getting an IPI, so we re-enable
136 * interrupts to process it, it will be ignored
137 * since we aren't online (hopefully)
138 */
139 local_irq_enable();
140 local_irq_disable();
141 }
142 }
143 mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) | LPCR_PECE1);
144 DBG("CPU%d coming online...\n", cpu);
145}
146
147#endif /* CONFIG_HOTPLUG_CPU */
148
149static struct smp_ops_t pnv_smp_ops = {
150 .message_pass = smp_muxed_ipi_message_pass,
151 .cause_ipi = NULL, /* Filled at runtime by xics_smp_probe() */
152 .probe = xics_smp_probe,
153 .kick_cpu = pnv_smp_kick_cpu,
154 .setup_cpu = pnv_smp_setup_cpu,
155 .cpu_bootable = pnv_smp_cpu_bootable,
156#ifdef CONFIG_HOTPLUG_CPU
157 .cpu_disable = pnv_smp_cpu_disable,
158 .cpu_die = generic_cpu_die,
159#endif /* CONFIG_HOTPLUG_CPU */
160};
161
162/* This is called very early during platform setup_arch */
163void __init pnv_smp_init(void)
164{
165 smp_ops = &pnv_smp_ops;
166
167 /* XXX We don't yet have a proper entry point from HAL, for
168 * now we rely on kexec-style entry from BML
169 */
170
171#ifdef CONFIG_PPC_RTAS
172 /* Non-lpar has additional take/give timebase */
173 if (rtas_token("freeze-time-base") != RTAS_UNKNOWN_SERVICE) {
174 smp_ops->give_timebase = rtas_give_timebase;
175 smp_ops->take_timebase = rtas_take_timebase;
176 }
177#endif /* CONFIG_PPC_RTAS */
178
179#ifdef CONFIG_HOTPLUG_CPU
180 ppc_md.cpu_die = pnv_smp_cpu_kill_self;
181#endif
182}
diff --git a/arch/powerpc/platforms/ps3/Kconfig b/arch/powerpc/platforms/ps3/Kconfig
index dfe316b161a9..476d9d9b2405 100644
--- a/arch/powerpc/platforms/ps3/Kconfig
+++ b/arch/powerpc/platforms/ps3/Kconfig
@@ -148,4 +148,16 @@ config PS3_LPM
148 profiling support of the Cell processor with programs like 148 profiling support of the Cell processor with programs like
149 oprofile and perfmon2, then say Y or M, otherwise say N. 149 oprofile and perfmon2, then say Y or M, otherwise say N.
150 150
151config PS3GELIC_UDBG
152 bool "PS3 udbg output via UDP broadcasts on Ethernet"
153 depends on PPC_PS3
154 help
155 Enables udbg early debugging output by sending broadcast UDP
156 via the Ethernet port (UDP port number 18194).
157
158 This driver uses a trivial implementation and is independent
159 from the main network driver.
160
161 If in doubt, say N here.
162
151endmenu 163endmenu
diff --git a/arch/powerpc/platforms/ps3/Makefile b/arch/powerpc/platforms/ps3/Makefile
index ac1bdf844eca..02b9e636dab7 100644
--- a/arch/powerpc/platforms/ps3/Makefile
+++ b/arch/powerpc/platforms/ps3/Makefile
@@ -2,6 +2,7 @@ obj-y += setup.o mm.o time.o hvcall.o htab.o repository.o
2obj-y += interrupt.o exports.o os-area.o 2obj-y += interrupt.o exports.o os-area.o
3obj-y += system-bus.o 3obj-y += system-bus.o
4 4
5obj-$(CONFIG_PS3GELIC_UDBG) += gelic_udbg.o
5obj-$(CONFIG_SMP) += smp.o 6obj-$(CONFIG_SMP) += smp.o
6obj-$(CONFIG_SPU_BASE) += spu.o 7obj-$(CONFIG_SPU_BASE) += spu.o
7obj-y += device-init.o 8obj-y += device-init.o
diff --git a/arch/powerpc/platforms/ps3/gelic_udbg.c b/arch/powerpc/platforms/ps3/gelic_udbg.c
new file mode 100644
index 000000000000..20b46a19a48f
--- /dev/null
+++ b/arch/powerpc/platforms/ps3/gelic_udbg.c
@@ -0,0 +1,273 @@
1/*
2 * udbg debug output routine via GELIC UDP broadcasts
3 *
4 * Copyright (C) 2007 Sony Computer Entertainment Inc.
5 * Copyright 2006, 2007 Sony Corporation
6 * Copyright (C) 2010 Hector Martin <hector@marcansoft.com>
7 * Copyright (C) 2011 Andre Heider <a.heider@gmail.com>
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version 2
12 * of the License, or (at your option) any later version.
13 *
14 */
15
16#include <asm/io.h>
17#include <asm/udbg.h>
18#include <asm/lv1call.h>
19
20#define GELIC_BUS_ID 1
21#define GELIC_DEVICE_ID 0
22#define GELIC_DEBUG_PORT 18194
23#define GELIC_MAX_MESSAGE_SIZE 1000
24
25#define GELIC_LV1_GET_MAC_ADDRESS 1
26#define GELIC_LV1_GET_VLAN_ID 4
27#define GELIC_LV1_VLAN_TX_ETHERNET_0 2
28
29#define GELIC_DESCR_DMA_STAT_MASK 0xf0000000
30#define GELIC_DESCR_DMA_CARDOWNED 0xa0000000
31
32#define GELIC_DESCR_TX_DMA_IKE 0x00080000
33#define GELIC_DESCR_TX_DMA_NO_CHKSUM 0x00000000
34#define GELIC_DESCR_TX_DMA_FRAME_TAIL 0x00040000
35
36#define GELIC_DESCR_DMA_CMD_NO_CHKSUM (GELIC_DESCR_DMA_CARDOWNED | \
37 GELIC_DESCR_TX_DMA_IKE | \
38 GELIC_DESCR_TX_DMA_NO_CHKSUM)
39
40static u64 bus_addr;
41
42struct gelic_descr {
43 /* as defined by the hardware */
44 __be32 buf_addr;
45 __be32 buf_size;
46 __be32 next_descr_addr;
47 __be32 dmac_cmd_status;
48 __be32 result_size;
49 __be32 valid_size; /* all zeroes for tx */
50 __be32 data_status;
51 __be32 data_error; /* all zeroes for tx */
52} __attribute__((aligned(32)));
53
54struct debug_block {
55 struct gelic_descr descr;
56 u8 pkt[1520];
57} __packed;
58
59struct ethhdr {
60 u8 dest[6];
61 u8 src[6];
62 u16 type;
63} __packed;
64
65struct vlantag {
66 u16 vlan;
67 u16 subtype;
68} __packed;
69
70struct iphdr {
71 u8 ver_len;
72 u8 dscp_ecn;
73 u16 total_length;
74 u16 ident;
75 u16 frag_off_flags;
76 u8 ttl;
77 u8 proto;
78 u16 checksum;
79 u32 src;
80 u32 dest;
81} __packed;
82
83struct udphdr {
84 u16 src;
85 u16 dest;
86 u16 len;
87 u16 checksum;
88} __packed;
89
90static __iomem struct ethhdr *h_eth;
91static __iomem struct vlantag *h_vlan;
92static __iomem struct iphdr *h_ip;
93static __iomem struct udphdr *h_udp;
94
95static __iomem char *pmsg;
96static __iomem char *pmsgc;
97
98static __iomem struct debug_block dbg __attribute__((aligned(32)));
99
100static int header_size;
101
102static void map_dma_mem(int bus_id, int dev_id, void *start, size_t len,
103 u64 *real_bus_addr)
104{
105 s64 result;
106 u64 real_addr = ((u64)start) & 0x0fffffffffffffffUL;
107 u64 real_end = real_addr + len;
108 u64 map_start = real_addr & ~0xfff;
109 u64 map_end = (real_end + 0xfff) & ~0xfff;
110 u64 bus_addr = 0;
111
112 u64 flags = 0xf800000000000000UL;
113
114 result = lv1_allocate_device_dma_region(bus_id, dev_id,
115 map_end - map_start, 12, 0,
116 &bus_addr);
117 if (result)
118 lv1_panic(0);
119
120 result = lv1_map_device_dma_region(bus_id, dev_id, map_start,
121 bus_addr, map_end - map_start,
122 flags);
123 if (result)
124 lv1_panic(0);
125
126 *real_bus_addr = bus_addr + real_addr - map_start;
127}
128
129static int unmap_dma_mem(int bus_id, int dev_id, u64 bus_addr, size_t len)
130{
131 s64 result;
132 u64 real_bus_addr;
133
134 real_bus_addr = bus_addr & ~0xfff;
135 len += bus_addr - real_bus_addr;
136 len = (len + 0xfff) & ~0xfff;
137
138 result = lv1_unmap_device_dma_region(bus_id, dev_id, real_bus_addr,
139 len);
140 if (result)
141 return result;
142
143 return lv1_free_device_dma_region(bus_id, dev_id, real_bus_addr);
144}
145
146static void gelic_debug_init(void)
147{
148 s64 result;
149 u64 v2;
150 u64 mac;
151 u64 vlan_id;
152
153 result = lv1_open_device(GELIC_BUS_ID, GELIC_DEVICE_ID, 0);
154 if (result)
155 lv1_panic(0);
156
157 map_dma_mem(GELIC_BUS_ID, GELIC_DEVICE_ID, &dbg, sizeof(dbg),
158 &bus_addr);
159
160 memset(&dbg, 0, sizeof(dbg));
161
162 dbg.descr.buf_addr = bus_addr + offsetof(struct debug_block, pkt);
163
164 wmb();
165
166 result = lv1_net_control(GELIC_BUS_ID, GELIC_DEVICE_ID,
167 GELIC_LV1_GET_MAC_ADDRESS, 0, 0, 0,
168 &mac, &v2);
169 if (result)
170 lv1_panic(0);
171
172 mac <<= 16;
173
174 h_eth = (struct ethhdr *)dbg.pkt;
175
176 memset(&h_eth->dest, 0xff, 6);
177 memcpy(&h_eth->src, &mac, 6);
178
179 header_size = sizeof(struct ethhdr);
180
181 result = lv1_net_control(GELIC_BUS_ID, GELIC_DEVICE_ID,
182 GELIC_LV1_GET_VLAN_ID,
183 GELIC_LV1_VLAN_TX_ETHERNET_0, 0, 0,
184 &vlan_id, &v2);
185 if (!result) {
186 h_eth->type = 0x8100;
187
188 header_size += sizeof(struct vlantag);
189 h_vlan = (struct vlantag *)(h_eth + 1);
190 h_vlan->vlan = vlan_id;
191 h_vlan->subtype = 0x0800;
192 h_ip = (struct iphdr *)(h_vlan + 1);
193 } else {
194 h_eth->type = 0x0800;
195 h_ip = (struct iphdr *)(h_eth + 1);
196 }
197
198 header_size += sizeof(struct iphdr);
199 h_ip->ver_len = 0x45;
200 h_ip->ttl = 10;
201 h_ip->proto = 0x11;
202 h_ip->src = 0x00000000;
203 h_ip->dest = 0xffffffff;
204
205 header_size += sizeof(struct udphdr);
206 h_udp = (struct udphdr *)(h_ip + 1);
207 h_udp->src = GELIC_DEBUG_PORT;
208 h_udp->dest = GELIC_DEBUG_PORT;
209
210 pmsgc = pmsg = (char *)(h_udp + 1);
211}
212
213static void gelic_debug_shutdown(void)
214{
215 if (bus_addr)
216 unmap_dma_mem(GELIC_BUS_ID, GELIC_DEVICE_ID,
217 bus_addr, sizeof(dbg));
218 lv1_close_device(GELIC_BUS_ID, GELIC_DEVICE_ID);
219}
220
221static void gelic_sendbuf(int msgsize)
222{
223 u16 *p;
224 u32 sum;
225 int i;
226
227 dbg.descr.buf_size = header_size + msgsize;
228 h_ip->total_length = msgsize + sizeof(struct udphdr) +
229 sizeof(struct iphdr);
230 h_udp->len = msgsize + sizeof(struct udphdr);
231
232 h_ip->checksum = 0;
233 sum = 0;
234 p = (u16 *)h_ip;
235 for (i = 0; i < 5; i++)
236 sum += *p++;
237 h_ip->checksum = ~(sum + (sum >> 16));
238
239 dbg.descr.dmac_cmd_status = GELIC_DESCR_DMA_CMD_NO_CHKSUM |
240 GELIC_DESCR_TX_DMA_FRAME_TAIL;
241 dbg.descr.result_size = 0;
242 dbg.descr.data_status = 0;
243
244 wmb();
245
246 lv1_net_start_tx_dma(GELIC_BUS_ID, GELIC_DEVICE_ID, bus_addr, 0);
247
248 while ((dbg.descr.dmac_cmd_status & GELIC_DESCR_DMA_STAT_MASK) ==
249 GELIC_DESCR_DMA_CARDOWNED)
250 cpu_relax();
251}
252
253static void ps3gelic_udbg_putc(char ch)
254{
255 *pmsgc++ = ch;
256 if (ch == '\n' || (pmsgc-pmsg) >= GELIC_MAX_MESSAGE_SIZE) {
257 gelic_sendbuf(pmsgc-pmsg);
258 pmsgc = pmsg;
259 }
260}
261
262void __init udbg_init_ps3gelic(void)
263{
264 gelic_debug_init();
265 udbg_putc = ps3gelic_udbg_putc;
266}
267
268void udbg_shutdown_ps3gelic(void)
269{
270 udbg_putc = NULL;
271 gelic_debug_shutdown();
272}
273EXPORT_SYMBOL(udbg_shutdown_ps3gelic);
diff --git a/arch/powerpc/platforms/ps3/system-bus.c b/arch/powerpc/platforms/ps3/system-bus.c
index 23083c397528..688141c76e03 100644
--- a/arch/powerpc/platforms/ps3/system-bus.c
+++ b/arch/powerpc/platforms/ps3/system-bus.c
@@ -695,12 +695,18 @@ static int ps3_dma_supported(struct device *_dev, u64 mask)
695 return mask >= DMA_BIT_MASK(32); 695 return mask >= DMA_BIT_MASK(32);
696} 696}
697 697
698static u64 ps3_dma_get_required_mask(struct device *_dev)
699{
700 return DMA_BIT_MASK(32);
701}
702
698static struct dma_map_ops ps3_sb_dma_ops = { 703static struct dma_map_ops ps3_sb_dma_ops = {
699 .alloc_coherent = ps3_alloc_coherent, 704 .alloc_coherent = ps3_alloc_coherent,
700 .free_coherent = ps3_free_coherent, 705 .free_coherent = ps3_free_coherent,
701 .map_sg = ps3_sb_map_sg, 706 .map_sg = ps3_sb_map_sg,
702 .unmap_sg = ps3_sb_unmap_sg, 707 .unmap_sg = ps3_sb_unmap_sg,
703 .dma_supported = ps3_dma_supported, 708 .dma_supported = ps3_dma_supported,
709 .get_required_mask = ps3_dma_get_required_mask,
704 .map_page = ps3_sb_map_page, 710 .map_page = ps3_sb_map_page,
705 .unmap_page = ps3_unmap_page, 711 .unmap_page = ps3_unmap_page,
706}; 712};
@@ -711,6 +717,7 @@ static struct dma_map_ops ps3_ioc0_dma_ops = {
711 .map_sg = ps3_ioc0_map_sg, 717 .map_sg = ps3_ioc0_map_sg,
712 .unmap_sg = ps3_ioc0_unmap_sg, 718 .unmap_sg = ps3_ioc0_unmap_sg,
713 .dma_supported = ps3_dma_supported, 719 .dma_supported = ps3_dma_supported,
720 .get_required_mask = ps3_dma_get_required_mask,
714 .map_page = ps3_ioc0_map_page, 721 .map_page = ps3_ioc0_map_page,
715 .unmap_page = ps3_unmap_page, 722 .unmap_page = ps3_unmap_page,
716}; 723};
diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig
index 05cf4769b88c..c81f6bb9c10f 100644
--- a/arch/powerpc/platforms/pseries/Kconfig
+++ b/arch/powerpc/platforms/pseries/Kconfig
@@ -15,6 +15,7 @@ config PPC_PSERIES
15 select PPC_UDBG_16550 15 select PPC_UDBG_16550
16 select PPC_NATIVE 16 select PPC_NATIVE
17 select PPC_PCI_CHOICE if EXPERT 17 select PPC_PCI_CHOICE if EXPERT
18 select ZLIB_DEFLATE
18 default y 19 default y
19 20
20config PPC_SPLPAR 21config PPC_SPLPAR
diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c
index e9be25bc571b..0f1b706506ed 100644
--- a/arch/powerpc/platforms/pseries/dlpar.c
+++ b/arch/powerpc/platforms/pseries/dlpar.c
@@ -112,6 +112,7 @@ void dlpar_free_cc_nodes(struct device_node *dn)
112 dlpar_free_one_cc_node(dn); 112 dlpar_free_one_cc_node(dn);
113} 113}
114 114
115#define COMPLETE 0
115#define NEXT_SIBLING 1 116#define NEXT_SIBLING 1
116#define NEXT_CHILD 2 117#define NEXT_CHILD 2
117#define NEXT_PROPERTY 3 118#define NEXT_PROPERTY 3
@@ -158,6 +159,9 @@ struct device_node *dlpar_configure_connector(u32 drc_index)
158 spin_unlock(&rtas_data_buf_lock); 159 spin_unlock(&rtas_data_buf_lock);
159 160
160 switch (rc) { 161 switch (rc) {
162 case COMPLETE:
163 break;
164
161 case NEXT_SIBLING: 165 case NEXT_SIBLING:
162 dn = dlpar_parse_cc_node(ccwa); 166 dn = dlpar_parse_cc_node(ccwa);
163 if (!dn) 167 if (!dn)
diff --git a/arch/powerpc/platforms/pseries/eeh.c b/arch/powerpc/platforms/pseries/eeh.c
index ada6e07532ec..d42f37d8a440 100644
--- a/arch/powerpc/platforms/pseries/eeh.c
+++ b/arch/powerpc/platforms/pseries/eeh.c
@@ -1338,7 +1338,7 @@ static const struct file_operations proc_eeh_operations = {
1338static int __init eeh_init_proc(void) 1338static int __init eeh_init_proc(void)
1339{ 1339{
1340 if (machine_is(pseries)) 1340 if (machine_is(pseries))
1341 proc_create("ppc64/eeh", 0, NULL, &proc_eeh_operations); 1341 proc_create("powerpc/eeh", 0, NULL, &proc_eeh_operations);
1342 return 0; 1342 return 0;
1343} 1343}
1344__initcall(eeh_init_proc); 1344__initcall(eeh_init_proc);
diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
index 01faab9456ca..5905a3b9f7e6 100644
--- a/arch/powerpc/platforms/pseries/iommu.c
+++ b/arch/powerpc/platforms/pseries/iommu.c
@@ -939,14 +939,14 @@ static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn)
939 if (ret) { 939 if (ret) {
940 dev_info(&dev->dev, "failed to map direct window for %s: %d\n", 940 dev_info(&dev->dev, "failed to map direct window for %s: %d\n",
941 dn->full_name, ret); 941 dn->full_name, ret);
942 goto out_clear_window; 942 goto out_free_window;
943 } 943 }
944 944
945 ret = prom_add_property(pdn, win64); 945 ret = prom_add_property(pdn, win64);
946 if (ret) { 946 if (ret) {
947 dev_err(&dev->dev, "unable to add dma window property for %s: %d", 947 dev_err(&dev->dev, "unable to add dma window property for %s: %d",
948 pdn->full_name, ret); 948 pdn->full_name, ret);
949 goto out_clear_window; 949 goto out_free_window;
950 } 950 }
951 951
952 window->device = pdn; 952 window->device = pdn;
@@ -958,6 +958,9 @@ static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn)
958 dma_addr = of_read_number(&create.addr_hi, 2); 958 dma_addr = of_read_number(&create.addr_hi, 2);
959 goto out_unlock; 959 goto out_unlock;
960 960
961out_free_window:
962 kfree(window);
963
961out_clear_window: 964out_clear_window:
962 remove_ddw(pdn); 965 remove_ddw(pdn);
963 966
@@ -1077,12 +1080,38 @@ check_mask:
1077 return 0; 1080 return 0;
1078} 1081}
1079 1082
1083static u64 dma_get_required_mask_pSeriesLP(struct device *dev)
1084{
1085 if (!dev->dma_mask)
1086 return 0;
1087
1088 if (!disable_ddw && dev_is_pci(dev)) {
1089 struct pci_dev *pdev = to_pci_dev(dev);
1090 struct device_node *dn;
1091
1092 dn = pci_device_to_OF_node(pdev);
1093
1094 /* search upwards for ibm,dma-window */
1095 for (; dn && PCI_DN(dn) && !PCI_DN(dn)->iommu_table;
1096 dn = dn->parent)
1097 if (of_get_property(dn, "ibm,dma-window", NULL))
1098 break;
1099 /* if there is a ibm,ddw-applicable property require 64 bits */
1100 if (dn && PCI_DN(dn) &&
1101 of_get_property(dn, "ibm,ddw-applicable", NULL))
1102 return DMA_BIT_MASK(64);
1103 }
1104
1105 return dma_iommu_ops.get_required_mask(dev);
1106}
1107
1080#else /* CONFIG_PCI */ 1108#else /* CONFIG_PCI */
1081#define pci_dma_bus_setup_pSeries NULL 1109#define pci_dma_bus_setup_pSeries NULL
1082#define pci_dma_dev_setup_pSeries NULL 1110#define pci_dma_dev_setup_pSeries NULL
1083#define pci_dma_bus_setup_pSeriesLP NULL 1111#define pci_dma_bus_setup_pSeriesLP NULL
1084#define pci_dma_dev_setup_pSeriesLP NULL 1112#define pci_dma_dev_setup_pSeriesLP NULL
1085#define dma_set_mask_pSeriesLP NULL 1113#define dma_set_mask_pSeriesLP NULL
1114#define dma_get_required_mask_pSeriesLP NULL
1086#endif /* !CONFIG_PCI */ 1115#endif /* !CONFIG_PCI */
1087 1116
1088static int iommu_mem_notifier(struct notifier_block *nb, unsigned long action, 1117static int iommu_mem_notifier(struct notifier_block *nb, unsigned long action,
@@ -1186,6 +1215,7 @@ void iommu_init_early_pSeries(void)
1186 ppc_md.pci_dma_bus_setup = pci_dma_bus_setup_pSeriesLP; 1215 ppc_md.pci_dma_bus_setup = pci_dma_bus_setup_pSeriesLP;
1187 ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_pSeriesLP; 1216 ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_pSeriesLP;
1188 ppc_md.dma_set_mask = dma_set_mask_pSeriesLP; 1217 ppc_md.dma_set_mask = dma_set_mask_pSeriesLP;
1218 ppc_md.dma_get_required_mask = dma_get_required_mask_pSeriesLP;
1189 } else { 1219 } else {
1190 ppc_md.tce_build = tce_build_pSeries; 1220 ppc_md.tce_build = tce_build_pSeries;
1191 ppc_md.tce_free = tce_free_pSeries; 1221 ppc_md.tce_free = tce_free_pSeries;
diff --git a/arch/powerpc/platforms/pseries/nvram.c b/arch/powerpc/platforms/pseries/nvram.c
index 00cc3a094885..a76b22844d18 100644
--- a/arch/powerpc/platforms/pseries/nvram.c
+++ b/arch/powerpc/platforms/pseries/nvram.c
@@ -18,6 +18,8 @@
18#include <linux/spinlock.h> 18#include <linux/spinlock.h>
19#include <linux/slab.h> 19#include <linux/slab.h>
20#include <linux/kmsg_dump.h> 20#include <linux/kmsg_dump.h>
21#include <linux/ctype.h>
22#include <linux/zlib.h>
21#include <asm/uaccess.h> 23#include <asm/uaccess.h>
22#include <asm/nvram.h> 24#include <asm/nvram.h>
23#include <asm/rtas.h> 25#include <asm/rtas.h>
@@ -78,8 +80,41 @@ static struct kmsg_dumper nvram_kmsg_dumper = {
78#define NVRAM_RTAS_READ_TIMEOUT 5 /* seconds */ 80#define NVRAM_RTAS_READ_TIMEOUT 5 /* seconds */
79static unsigned long last_unread_rtas_event; /* timestamp */ 81static unsigned long last_unread_rtas_event; /* timestamp */
80 82
81/* We preallocate oops_buf during init to avoid kmalloc during oops/panic. */ 83/*
82static char *oops_buf; 84 * For capturing and compressing an oops or panic report...
85
86 * big_oops_buf[] holds the uncompressed text we're capturing.
87 *
88 * oops_buf[] holds the compressed text, preceded by a prefix.
89 * The prefix is just a u16 holding the length of the compressed* text.
90 * (*Or uncompressed, if compression fails.) oops_buf[] gets written
91 * to NVRAM.
92 *
93 * oops_len points to the prefix. oops_data points to the compressed text.
94 *
95 * +- oops_buf
96 * | +- oops_data
97 * v v
98 * +------------+-----------------------------------------------+
99 * | length | text |
100 * | (2 bytes) | (oops_data_sz bytes) |
101 * +------------+-----------------------------------------------+
102 * ^
103 * +- oops_len
104 *
105 * We preallocate these buffers during init to avoid kmalloc during oops/panic.
106 */
107static size_t big_oops_buf_sz;
108static char *big_oops_buf, *oops_buf;
109static u16 *oops_len;
110static char *oops_data;
111static size_t oops_data_sz;
112
113/* Compression parameters */
114#define COMPR_LEVEL 6
115#define WINDOW_BITS 12
116#define MEM_LEVEL 4
117static struct z_stream_s stream;
83 118
84static ssize_t pSeries_nvram_read(char *buf, size_t count, loff_t *index) 119static ssize_t pSeries_nvram_read(char *buf, size_t count, loff_t *index)
85{ 120{
@@ -387,11 +422,44 @@ static void __init nvram_init_oops_partition(int rtas_partition_exists)
387 sizeof(rtas_log_partition)); 422 sizeof(rtas_log_partition));
388 } 423 }
389 oops_buf = kmalloc(oops_log_partition.size, GFP_KERNEL); 424 oops_buf = kmalloc(oops_log_partition.size, GFP_KERNEL);
425 if (!oops_buf) {
426 pr_err("nvram: No memory for %s partition\n",
427 oops_log_partition.name);
428 return;
429 }
430 oops_len = (u16*) oops_buf;
431 oops_data = oops_buf + sizeof(u16);
432 oops_data_sz = oops_log_partition.size - sizeof(u16);
433
434 /*
435 * Figure compression (preceded by elimination of each line's <n>
436 * severity prefix) will reduce the oops/panic report to at most
437 * 45% of its original size.
438 */
439 big_oops_buf_sz = (oops_data_sz * 100) / 45;
440 big_oops_buf = kmalloc(big_oops_buf_sz, GFP_KERNEL);
441 if (big_oops_buf) {
442 stream.workspace = kmalloc(zlib_deflate_workspacesize(
443 WINDOW_BITS, MEM_LEVEL), GFP_KERNEL);
444 if (!stream.workspace) {
445 pr_err("nvram: No memory for compression workspace; "
446 "skipping compression of %s partition data\n",
447 oops_log_partition.name);
448 kfree(big_oops_buf);
449 big_oops_buf = NULL;
450 }
451 } else {
452 pr_err("No memory for uncompressed %s data; "
453 "skipping compression\n", oops_log_partition.name);
454 stream.workspace = NULL;
455 }
456
390 rc = kmsg_dump_register(&nvram_kmsg_dumper); 457 rc = kmsg_dump_register(&nvram_kmsg_dumper);
391 if (rc != 0) { 458 if (rc != 0) {
392 pr_err("nvram: kmsg_dump_register() failed; returned %d\n", rc); 459 pr_err("nvram: kmsg_dump_register() failed; returned %d\n", rc);
393 kfree(oops_buf); 460 kfree(oops_buf);
394 return; 461 kfree(big_oops_buf);
462 kfree(stream.workspace);
395 } 463 }
396} 464}
397 465
@@ -473,7 +541,83 @@ static int clobbering_unread_rtas_event(void)
473 NVRAM_RTAS_READ_TIMEOUT); 541 NVRAM_RTAS_READ_TIMEOUT);
474} 542}
475 543
476/* our kmsg_dump callback */ 544/* Squeeze out each line's <n> severity prefix. */
545static size_t elide_severities(char *buf, size_t len)
546{
547 char *in, *out, *buf_end = buf + len;
548 /* Assume a <n> at the very beginning marks the start of a line. */
549 int newline = 1;
550
551 in = out = buf;
552 while (in < buf_end) {
553 if (newline && in+3 <= buf_end &&
554 *in == '<' && isdigit(in[1]) && in[2] == '>') {
555 in += 3;
556 newline = 0;
557 } else {
558 newline = (*in == '\n');
559 *out++ = *in++;
560 }
561 }
562 return out - buf;
563}
564
565/* Derived from logfs_compress() */
566static int nvram_compress(const void *in, void *out, size_t inlen,
567 size_t outlen)
568{
569 int err, ret;
570
571 ret = -EIO;
572 err = zlib_deflateInit2(&stream, COMPR_LEVEL, Z_DEFLATED, WINDOW_BITS,
573 MEM_LEVEL, Z_DEFAULT_STRATEGY);
574 if (err != Z_OK)
575 goto error;
576
577 stream.next_in = in;
578 stream.avail_in = inlen;
579 stream.total_in = 0;
580 stream.next_out = out;
581 stream.avail_out = outlen;
582 stream.total_out = 0;
583
584 err = zlib_deflate(&stream, Z_FINISH);
585 if (err != Z_STREAM_END)
586 goto error;
587
588 err = zlib_deflateEnd(&stream);
589 if (err != Z_OK)
590 goto error;
591
592 if (stream.total_out >= stream.total_in)
593 goto error;
594
595 ret = stream.total_out;
596error:
597 return ret;
598}
599
600/* Compress the text from big_oops_buf into oops_buf. */
601static int zip_oops(size_t text_len)
602{
603 int zipped_len = nvram_compress(big_oops_buf, oops_data, text_len,
604 oops_data_sz);
605 if (zipped_len < 0) {
606 pr_err("nvram: compression failed; returned %d\n", zipped_len);
607 pr_err("nvram: logging uncompressed oops/panic report\n");
608 return -1;
609 }
610 *oops_len = (u16) zipped_len;
611 return 0;
612}
613
614/*
615 * This is our kmsg_dump callback, called after an oops or panic report
616 * has been written to the printk buffer. We want to capture as much
617 * of the printk buffer as possible. First, capture as much as we can
618 * that we think will compress sufficiently to fit in the lnx,oops-log
619 * partition. If that's too much, go back and capture uncompressed text.
620 */
477static void oops_to_nvram(struct kmsg_dumper *dumper, 621static void oops_to_nvram(struct kmsg_dumper *dumper,
478 enum kmsg_dump_reason reason, 622 enum kmsg_dump_reason reason,
479 const char *old_msgs, unsigned long old_len, 623 const char *old_msgs, unsigned long old_len,
@@ -482,6 +626,8 @@ static void oops_to_nvram(struct kmsg_dumper *dumper,
482 static unsigned int oops_count = 0; 626 static unsigned int oops_count = 0;
483 static bool panicking = false; 627 static bool panicking = false;
484 size_t text_len; 628 size_t text_len;
629 unsigned int err_type = ERR_TYPE_KERNEL_PANIC_GZ;
630 int rc = -1;
485 631
486 switch (reason) { 632 switch (reason) {
487 case KMSG_DUMP_RESTART: 633 case KMSG_DUMP_RESTART:
@@ -509,8 +655,19 @@ static void oops_to_nvram(struct kmsg_dumper *dumper,
509 if (clobbering_unread_rtas_event()) 655 if (clobbering_unread_rtas_event())
510 return; 656 return;
511 657
512 text_len = capture_last_msgs(old_msgs, old_len, new_msgs, new_len, 658 if (big_oops_buf) {
513 oops_buf, oops_log_partition.size); 659 text_len = capture_last_msgs(old_msgs, old_len,
660 new_msgs, new_len, big_oops_buf, big_oops_buf_sz);
661 text_len = elide_severities(big_oops_buf, text_len);
662 rc = zip_oops(text_len);
663 }
664 if (rc != 0) {
665 text_len = capture_last_msgs(old_msgs, old_len,
666 new_msgs, new_len, oops_data, oops_data_sz);
667 err_type = ERR_TYPE_KERNEL_PANIC;
668 *oops_len = (u16) text_len;
669 }
670
514 (void) nvram_write_os_partition(&oops_log_partition, oops_buf, 671 (void) nvram_write_os_partition(&oops_log_partition, oops_buf,
515 (int) text_len, ERR_TYPE_KERNEL_PANIC, ++oops_count); 672 (int) (sizeof(*oops_len) + *oops_len), err_type, ++oops_count);
516} 673}
diff --git a/arch/powerpc/platforms/wsp/Kconfig b/arch/powerpc/platforms/wsp/Kconfig
index c3c48eb62cc1..f4fb837873fb 100644
--- a/arch/powerpc/platforms/wsp/Kconfig
+++ b/arch/powerpc/platforms/wsp/Kconfig
@@ -1,5 +1,12 @@
1config PPC_WSP 1config PPC_WSP
2 bool 2 bool
3 select PPC_A2
4 select PPC_SCOM
5 select PPC_XICS
6 select PPC_ICP_NATIVE
7 select PCI
8 select PPC_IO_WORKAROUNDS if PCI
9 select PPC_INDIRECT_PIO if PCI
3 default n 10 default n
4 11
5menu "WSP platform selection" 12menu "WSP platform selection"
@@ -7,13 +14,9 @@ menu "WSP platform selection"
7 14
8config PPC_PSR2 15config PPC_PSR2
9 bool "PSR-2 platform" 16 bool "PSR-2 platform"
10 select PPC_A2
11 select GENERIC_TBSYNC 17 select GENERIC_TBSYNC
12 select PPC_SCOM
13 select EPAPR_BOOT 18 select EPAPR_BOOT
14 select PPC_WSP 19 select PPC_WSP
15 select PPC_XICS
16 select PPC_ICP_NATIVE
17 default y 20 default y
18 21
19endmenu 22endmenu
diff --git a/arch/powerpc/platforms/wsp/Makefile b/arch/powerpc/platforms/wsp/Makefile
index 095be73d6cd4..a1486b436f02 100644
--- a/arch/powerpc/platforms/wsp/Makefile
+++ b/arch/powerpc/platforms/wsp/Makefile
@@ -4,3 +4,5 @@ obj-y += setup.o ics.o
4obj-$(CONFIG_PPC_PSR2) += psr2.o opb_pic.o 4obj-$(CONFIG_PPC_PSR2) += psr2.o opb_pic.o
5obj-$(CONFIG_PPC_WSP) += scom_wsp.o 5obj-$(CONFIG_PPC_WSP) += scom_wsp.o
6obj-$(CONFIG_SMP) += smp.o scom_smp.o 6obj-$(CONFIG_SMP) += smp.o scom_smp.o
7obj-$(CONFIG_PCI) += wsp_pci.o
8obj-$(CONFIG_PCI_MSI) += msi.o \ No newline at end of file
diff --git a/arch/powerpc/platforms/wsp/ics.c b/arch/powerpc/platforms/wsp/ics.c
index e53bd9e7b125..576874392543 100644
--- a/arch/powerpc/platforms/wsp/ics.c
+++ b/arch/powerpc/platforms/wsp/ics.c
@@ -710,3 +710,51 @@ void __init wsp_init_irq(void)
710 /* We need to patch our irq chip's EOI to point to the right ICP */ 710 /* We need to patch our irq chip's EOI to point to the right ICP */
711 wsp_irq_chip.irq_eoi = icp_ops->eoi; 711 wsp_irq_chip.irq_eoi = icp_ops->eoi;
712} 712}
713
714#ifdef CONFIG_PCI_MSI
715static void wsp_ics_msi_unmask_irq(struct irq_data *d)
716{
717 wsp_chip_unmask_irq(d);
718 unmask_msi_irq(d);
719}
720
721static unsigned int wsp_ics_msi_startup(struct irq_data *d)
722{
723 wsp_ics_msi_unmask_irq(d);
724 return 0;
725}
726
727static void wsp_ics_msi_mask_irq(struct irq_data *d)
728{
729 mask_msi_irq(d);
730 wsp_chip_mask_irq(d);
731}
732
733/*
734 * we do it this way because we reassinge default EOI handling in
735 * irq_init() above
736 */
737static void wsp_ics_eoi(struct irq_data *data)
738{
739 wsp_irq_chip.irq_eoi(data);
740}
741
742static struct irq_chip wsp_ics_msi = {
743 .name = "WSP ICS MSI",
744 .irq_startup = wsp_ics_msi_startup,
745 .irq_mask = wsp_ics_msi_mask_irq,
746 .irq_unmask = wsp_ics_msi_unmask_irq,
747 .irq_eoi = wsp_ics_eoi,
748 .irq_set_affinity = wsp_chip_set_affinity
749};
750
751void wsp_ics_set_msi_chip(unsigned int irq)
752{
753 irq_set_chip(irq, &wsp_ics_msi);
754}
755
756void wsp_ics_set_std_chip(unsigned int irq)
757{
758 irq_set_chip(irq, &wsp_irq_chip);
759}
760#endif /* CONFIG_PCI_MSI */
diff --git a/arch/powerpc/platforms/wsp/ics.h b/arch/powerpc/platforms/wsp/ics.h
index e34d53102640..07b644e0cf97 100644
--- a/arch/powerpc/platforms/wsp/ics.h
+++ b/arch/powerpc/platforms/wsp/ics.h
@@ -17,4 +17,9 @@ extern void wsp_init_irq(void);
17extern int wsp_ics_alloc_irq(struct device_node *dn, int num); 17extern int wsp_ics_alloc_irq(struct device_node *dn, int num);
18extern void wsp_ics_free_irq(struct device_node *dn, unsigned int irq); 18extern void wsp_ics_free_irq(struct device_node *dn, unsigned int irq);
19 19
20#ifdef CONFIG_PCI_MSI
21extern void wsp_ics_set_msi_chip(unsigned int irq);
22extern void wsp_ics_set_std_chip(unsigned int irq);
23#endif /* CONFIG_PCI_MSI */
24
20#endif /* __ICS_H */ 25#endif /* __ICS_H */
diff --git a/arch/powerpc/platforms/wsp/msi.c b/arch/powerpc/platforms/wsp/msi.c
new file mode 100644
index 000000000000..380882f27add
--- /dev/null
+++ b/arch/powerpc/platforms/wsp/msi.c
@@ -0,0 +1,102 @@
1/*
2 * Copyright 2011 Michael Ellerman, IBM Corp.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9
10#include <linux/kernel.h>
11#include <linux/pci.h>
12#include <linux/msi.h>
13#include <linux/irq.h>
14#include <linux/interrupt.h>
15
16#include "msi.h"
17#include "ics.h"
18#include "wsp_pci.h"
19
20/* Magic addresses for 32 & 64-bit MSIs with hardcoded MVE 0 */
21#define MSI_ADDR_32 0xFFFF0000ul
22#define MSI_ADDR_64 0x1000000000000000ul
23
24int wsp_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
25{
26 struct pci_controller *phb;
27 struct msi_desc *entry;
28 struct msi_msg msg;
29 unsigned int virq;
30 int hwirq;
31
32 phb = pci_bus_to_host(dev->bus);
33 if (!phb)
34 return -ENOENT;
35
36 entry = list_first_entry(&dev->msi_list, struct msi_desc, list);
37 if (entry->msi_attrib.is_64) {
38 msg.address_lo = 0;
39 msg.address_hi = MSI_ADDR_64 >> 32;
40 } else {
41 msg.address_lo = MSI_ADDR_32;
42 msg.address_hi = 0;
43 }
44
45 list_for_each_entry(entry, &dev->msi_list, list) {
46 hwirq = wsp_ics_alloc_irq(phb->dn, 1);
47 if (hwirq < 0) {
48 dev_warn(&dev->dev, "wsp_msi: hwirq alloc failed!\n");
49 return hwirq;
50 }
51
52 virq = irq_create_mapping(NULL, hwirq);
53 if (virq == NO_IRQ) {
54 dev_warn(&dev->dev, "wsp_msi: virq alloc failed!\n");
55 return -1;
56 }
57
58 dev_dbg(&dev->dev, "wsp_msi: allocated irq %#x/%#x\n",
59 hwirq, virq);
60
61 wsp_ics_set_msi_chip(virq);
62 irq_set_msi_desc(virq, entry);
63 msg.data = hwirq & XIVE_ADDR_MASK;
64 write_msi_msg(virq, &msg);
65 }
66
67 return 0;
68}
69
70void wsp_teardown_msi_irqs(struct pci_dev *dev)
71{
72 struct pci_controller *phb;
73 struct msi_desc *entry;
74 int hwirq;
75
76 phb = pci_bus_to_host(dev->bus);
77
78 dev_dbg(&dev->dev, "wsp_msi: tearing down msi irqs\n");
79
80 list_for_each_entry(entry, &dev->msi_list, list) {
81 if (entry->irq == NO_IRQ)
82 continue;
83
84 irq_set_msi_desc(entry->irq, NULL);
85 wsp_ics_set_std_chip(entry->irq);
86
87 hwirq = virq_to_hw(entry->irq);
88 /* In this order to avoid racing with irq_create_mapping() */
89 irq_dispose_mapping(entry->irq);
90 wsp_ics_free_irq(phb->dn, hwirq);
91 }
92}
93
94void wsp_setup_phb_msi(struct pci_controller *phb)
95{
96 /* Create a single MVE at offset 0 that matches everything */
97 out_be64(phb->cfg_data + PCIE_REG_IODA_ADDR, PCIE_REG_IODA_AD_TBL_MVT);
98 out_be64(phb->cfg_data + PCIE_REG_IODA_DATA0, 1ull << 63);
99
100 ppc_md.setup_msi_irqs = wsp_setup_msi_irqs;
101 ppc_md.teardown_msi_irqs = wsp_teardown_msi_irqs;
102}
diff --git a/arch/powerpc/platforms/wsp/msi.h b/arch/powerpc/platforms/wsp/msi.h
new file mode 100644
index 000000000000..0ab27b71b24d
--- /dev/null
+++ b/arch/powerpc/platforms/wsp/msi.h
@@ -0,0 +1,19 @@
1/*
2 * Copyright 2011 Michael Ellerman, IBM Corp.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9
10#ifndef __WSP_MSI_H
11#define __WSP_MSI_H
12
13#ifdef CONFIG_PCI_MSI
14extern void wsp_setup_phb_msi(struct pci_controller *phb);
15#else
16static inline void wsp_setup_phb_msi(struct pci_controller *phb) { }
17#endif
18
19#endif /* __WSP_MSI_H */
diff --git a/arch/powerpc/platforms/wsp/psr2.c b/arch/powerpc/platforms/wsp/psr2.c
index 40f28916ff6c..166f2e4b4bee 100644
--- a/arch/powerpc/platforms/wsp/psr2.c
+++ b/arch/powerpc/platforms/wsp/psr2.c
@@ -63,6 +63,10 @@ static void __init psr2_setup_arch(void)
63#ifdef CONFIG_SMP 63#ifdef CONFIG_SMP
64 a2_setup_smp(); 64 a2_setup_smp();
65#endif 65#endif
66#ifdef CONFIG_PCI
67 wsp_setup_pci();
68#endif
69
66} 70}
67 71
68static int __init psr2_probe(void) 72static int __init psr2_probe(void)
diff --git a/arch/powerpc/platforms/wsp/wsp.h b/arch/powerpc/platforms/wsp/wsp.h
index 7c3e087fd2f2..33479818f62a 100644
--- a/arch/powerpc/platforms/wsp/wsp.h
+++ b/arch/powerpc/platforms/wsp/wsp.h
@@ -3,6 +3,9 @@
3 3
4#include <asm/wsp.h> 4#include <asm/wsp.h>
5 5
6/* Devtree compatible strings for major devices */
7#define PCIE_COMPATIBLE "ibm,wsp-pciex"
8
6extern void wsp_setup_pci(void); 9extern void wsp_setup_pci(void);
7extern void scom_init_wsp(void); 10extern void scom_init_wsp(void);
8 11
diff --git a/arch/powerpc/platforms/wsp/wsp_pci.c b/arch/powerpc/platforms/wsp/wsp_pci.c
new file mode 100644
index 000000000000..e0262cd0e2d3
--- /dev/null
+++ b/arch/powerpc/platforms/wsp/wsp_pci.c
@@ -0,0 +1,1133 @@
1/*
2 * Copyright 2010 Ben Herrenschmidt, IBM Corporation
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9
10#define DEBUG
11
12#include <linux/kernel.h>
13#include <linux/pci.h>
14#include <linux/delay.h>
15#include <linux/string.h>
16#include <linux/init.h>
17#include <linux/bootmem.h>
18#include <linux/irq.h>
19#include <linux/interrupt.h>
20#include <linux/debugfs.h>
21
22#include <asm/sections.h>
23#include <asm/io.h>
24#include <asm/prom.h>
25#include <asm/pci-bridge.h>
26#include <asm/machdep.h>
27#include <asm/ppc-pci.h>
28#include <asm/iommu.h>
29#include <asm/io-workarounds.h>
30
31#include "wsp.h"
32#include "wsp_pci.h"
33#include "msi.h"
34
35
36/* Max number of TVTs for one table. Only 32-bit tables can use
37 * multiple TVTs and so the max currently supported is thus 8
38 * since only 2G of DMA space is supported
39 */
40#define MAX_TABLE_TVT_COUNT 8
41
42struct wsp_dma_table {
43 struct list_head link;
44 struct iommu_table table;
45 struct wsp_phb *phb;
46 struct page *tces[MAX_TABLE_TVT_COUNT];
47};
48
49/* We support DMA regions from 0...2G in 32bit space (no support for
50 * 64-bit DMA just yet). Each device gets a separate TCE table (TVT
51 * entry) with validation enabled (though not supported by SimiCS
52 * just yet).
53 *
54 * To simplify things, we divide this 2G space into N regions based
55 * on the constant below which could be turned into a tunable eventually
56 *
57 * We then assign dynamically those regions to devices as they show up.
58 *
59 * We use a bitmap as an allocator for these.
60 *
61 * Tables are allocated/created dynamically as devices are discovered,
62 * multiple TVT entries are used if needed
63 *
64 * When 64-bit DMA support is added we should simply use a separate set
65 * of larger regions (the HW supports 64 TVT entries). We can
66 * additionally create a bypass region in 64-bit space for performances
67 * though that would have a cost in term of security.
68 *
69 * If you set NUM_DMA32_REGIONS to 1, then a single table is shared
70 * for all devices and bus/dev/fn validation is disabled
71 *
72 * Note that a DMA32 region cannot be smaller than 256M so the max
73 * supported here for now is 8. We don't yet support sharing regions
74 * between multiple devices so the max number of devices supported
75 * is MAX_TABLE_TVT_COUNT.
76 */
77#define NUM_DMA32_REGIONS 1
78
79struct wsp_phb {
80 struct pci_controller *hose;
81
82 /* Lock controlling access to the list of dma tables.
83 * It does -not- protect against dma_* operations on
84 * those tables, those should be stopped before an entry
85 * is removed from the list.
86 *
87 * The lock is also used for error handling operations
88 */
89 spinlock_t lock;
90 struct list_head dma_tables;
91 unsigned long dma32_map;
92 unsigned long dma32_base;
93 unsigned int dma32_num_regions;
94 unsigned long dma32_region_size;
95
96 /* Debugfs stuff */
97 struct dentry *ddir;
98
99 struct list_head all;
100};
101static LIST_HEAD(wsp_phbs);
102
103//#define cfg_debug(fmt...) pr_debug(fmt)
104#define cfg_debug(fmt...)
105
106
107static int wsp_pcie_read_config(struct pci_bus *bus, unsigned int devfn,
108 int offset, int len, u32 *val)
109{
110 struct pci_controller *hose;
111 int suboff;
112 u64 addr;
113
114 hose = pci_bus_to_host(bus);
115 if (hose == NULL)
116 return PCIBIOS_DEVICE_NOT_FOUND;
117 if (offset >= 0x1000)
118 return PCIBIOS_BAD_REGISTER_NUMBER;
119 addr = PCIE_REG_CA_ENABLE |
120 ((u64)bus->number) << PCIE_REG_CA_BUS_SHIFT |
121 ((u64)devfn) << PCIE_REG_CA_FUNC_SHIFT |
122 ((u64)offset & ~3) << PCIE_REG_CA_REG_SHIFT;
123 suboff = offset & 3;
124
125 /*
126 * Note: the caller has already checked that offset is
127 * suitably aligned and that len is 1, 2 or 4.
128 */
129
130 switch (len) {
131 case 1:
132 addr |= (0x8ul >> suboff) << PCIE_REG_CA_BE_SHIFT;
133 out_be64(hose->cfg_data + PCIE_REG_CONFIG_ADDRESS, addr);
134 *val = (in_le32(hose->cfg_data + PCIE_REG_CONFIG_DATA)
135 >> (suboff << 3)) & 0xff;
136 cfg_debug("read 1 %02x:%02x:%02x + %02x/%x addr=0x%llx val=%02x\n",
137 bus->number, devfn >> 3, devfn & 7,
138 offset, suboff, addr, *val);
139 break;
140 case 2:
141 addr |= (0xcul >> suboff) << PCIE_REG_CA_BE_SHIFT;
142 out_be64(hose->cfg_data + PCIE_REG_CONFIG_ADDRESS, addr);
143 *val = (in_le32(hose->cfg_data + PCIE_REG_CONFIG_DATA)
144 >> (suboff << 3)) & 0xffff;
145 cfg_debug("read 2 %02x:%02x:%02x + %02x/%x addr=0x%llx val=%04x\n",
146 bus->number, devfn >> 3, devfn & 7,
147 offset, suboff, addr, *val);
148 break;
149 default:
150 addr |= 0xful << PCIE_REG_CA_BE_SHIFT;
151 out_be64(hose->cfg_data + PCIE_REG_CONFIG_ADDRESS, addr);
152 *val = in_le32(hose->cfg_data + PCIE_REG_CONFIG_DATA);
153 cfg_debug("read 4 %02x:%02x:%02x + %02x/%x addr=0x%llx val=%08x\n",
154 bus->number, devfn >> 3, devfn & 7,
155 offset, suboff, addr, *val);
156 break;
157 }
158 return PCIBIOS_SUCCESSFUL;
159}
160
161static int wsp_pcie_write_config(struct pci_bus *bus, unsigned int devfn,
162 int offset, int len, u32 val)
163{
164 struct pci_controller *hose;
165 int suboff;
166 u64 addr;
167
168 hose = pci_bus_to_host(bus);
169 if (hose == NULL)
170 return PCIBIOS_DEVICE_NOT_FOUND;
171 if (offset >= 0x1000)
172 return PCIBIOS_BAD_REGISTER_NUMBER;
173 addr = PCIE_REG_CA_ENABLE |
174 ((u64)bus->number) << PCIE_REG_CA_BUS_SHIFT |
175 ((u64)devfn) << PCIE_REG_CA_FUNC_SHIFT |
176 ((u64)offset & ~3) << PCIE_REG_CA_REG_SHIFT;
177 suboff = offset & 3;
178
179 /*
180 * Note: the caller has already checked that offset is
181 * suitably aligned and that len is 1, 2 or 4.
182 */
183 switch (len) {
184 case 1:
185 addr |= (0x8ul >> suboff) << PCIE_REG_CA_BE_SHIFT;
186 val <<= suboff << 3;
187 out_be64(hose->cfg_data + PCIE_REG_CONFIG_ADDRESS, addr);
188 out_le32(hose->cfg_data + PCIE_REG_CONFIG_DATA, val);
189 cfg_debug("write 1 %02x:%02x:%02x + %02x/%x addr=0x%llx val=%02x\n",
190 bus->number, devfn >> 3, devfn & 7,
191 offset, suboff, addr, val);
192 break;
193 case 2:
194 addr |= (0xcul >> suboff) << PCIE_REG_CA_BE_SHIFT;
195 val <<= suboff << 3;
196 out_be64(hose->cfg_data + PCIE_REG_CONFIG_ADDRESS, addr);
197 out_le32(hose->cfg_data + PCIE_REG_CONFIG_DATA, val);
198 cfg_debug("write 2 %02x:%02x:%02x + %02x/%x addr=0x%llx val=%04x\n",
199 bus->number, devfn >> 3, devfn & 7,
200 offset, suboff, addr, val);
201 break;
202 default:
203 addr |= 0xful << PCIE_REG_CA_BE_SHIFT;
204 out_be64(hose->cfg_data + PCIE_REG_CONFIG_ADDRESS, addr);
205 out_le32(hose->cfg_data + PCIE_REG_CONFIG_DATA, val);
206 cfg_debug("write 4 %02x:%02x:%02x + %02x/%x addr=0x%llx val=%08x\n",
207 bus->number, devfn >> 3, devfn & 7,
208 offset, suboff, addr, val);
209 break;
210 }
211 return PCIBIOS_SUCCESSFUL;
212}
213
214static struct pci_ops wsp_pcie_pci_ops =
215{
216 .read = wsp_pcie_read_config,
217 .write = wsp_pcie_write_config,
218};
219
220#define TCE_SHIFT 12
221#define TCE_PAGE_SIZE (1 << TCE_SHIFT)
222#define TCE_PCI_WRITE 0x2 /* write from PCI allowed */
223#define TCE_PCI_READ 0x1 /* read from PCI allowed */
224#define TCE_RPN_MASK 0x3fffffffffful /* 42-bit RPN (4K pages) */
225#define TCE_RPN_SHIFT 12
226
227//#define dma_debug(fmt...) pr_debug(fmt)
228#define dma_debug(fmt...)
229
230static int tce_build_wsp(struct iommu_table *tbl, long index, long npages,
231 unsigned long uaddr, enum dma_data_direction direction,
232 struct dma_attrs *attrs)
233{
234 struct wsp_dma_table *ptbl = container_of(tbl,
235 struct wsp_dma_table,
236 table);
237 u64 proto_tce;
238 u64 *tcep;
239 u64 rpn;
240
241 proto_tce = TCE_PCI_READ;
242#ifdef CONFIG_WSP_DD1_WORKAROUND_DD1_TCE_BUGS
243 proto_tce |= TCE_PCI_WRITE;
244#else
245 if (direction != DMA_TO_DEVICE)
246 proto_tce |= TCE_PCI_WRITE;
247#endif
248
249 /* XXX Make this faster by factoring out the page address for
250 * within a TCE table
251 */
252 while (npages--) {
253 /* We don't use it->base as the table can be scattered */
254 tcep = (u64 *)page_address(ptbl->tces[index >> 16]);
255 tcep += (index & 0xffff);
256
257 /* can't move this out since we might cross LMB boundary */
258 rpn = __pa(uaddr) >> TCE_SHIFT;
259 *tcep = proto_tce | (rpn & TCE_RPN_MASK) << TCE_RPN_SHIFT;
260
261 dma_debug("[DMA] TCE %p set to 0x%016llx (dma addr: 0x%lx)\n",
262 tcep, *tcep, (tbl->it_offset + index) << IOMMU_PAGE_SHIFT);
263
264 uaddr += TCE_PAGE_SIZE;
265 index++;
266 }
267 return 0;
268}
269
270static void tce_free_wsp(struct iommu_table *tbl, long index, long npages)
271{
272 struct wsp_dma_table *ptbl = container_of(tbl,
273 struct wsp_dma_table,
274 table);
275#ifndef CONFIG_WSP_DD1_WORKAROUND_DD1_TCE_BUGS
276 struct pci_controller *hose = ptbl->phb->hose;
277#endif
278 u64 *tcep;
279
280 /* XXX Make this faster by factoring out the page address for
281 * within a TCE table. Also use line-kill option to kill multiple
282 * TCEs at once
283 */
284 while (npages--) {
285 /* We don't use it->base as the table can be scattered */
286 tcep = (u64 *)page_address(ptbl->tces[index >> 16]);
287 tcep += (index & 0xffff);
288 dma_debug("[DMA] TCE %p cleared\n", tcep);
289 *tcep = 0;
290#ifndef CONFIG_WSP_DD1_WORKAROUND_DD1_TCE_BUGS
291 /* Don't write there since it would pollute other MMIO accesses */
292 out_be64(hose->cfg_data + PCIE_REG_TCE_KILL,
293 PCIE_REG_TCEKILL_SINGLE | PCIE_REG_TCEKILL_PS_4K |
294 (__pa(tcep) & PCIE_REG_TCEKILL_ADDR_MASK));
295#endif
296 index++;
297 }
298}
299
300static struct wsp_dma_table *wsp_pci_create_dma32_table(struct wsp_phb *phb,
301 unsigned int region,
302 struct pci_dev *validate)
303{
304 struct pci_controller *hose = phb->hose;
305 unsigned long size = phb->dma32_region_size;
306 unsigned long addr = phb->dma32_region_size * region + phb->dma32_base;
307 struct wsp_dma_table *tbl;
308 int tvts_per_table, i, tvt, nid;
309 unsigned long flags;
310
311 nid = of_node_to_nid(phb->hose->dn);
312
313 /* Calculate how many TVTs are needed */
314 tvts_per_table = size / 0x10000000;
315 if (tvts_per_table == 0)
316 tvts_per_table = 1;
317
318 /* Calculate the base TVT index. We know all tables have the same
319 * size so we just do a simple multiply here
320 */
321 tvt = region * tvts_per_table;
322
323 pr_debug(" Region : %d\n", region);
324 pr_debug(" DMA range : 0x%08lx..0x%08lx\n", addr, addr + size - 1);
325 pr_debug(" Number of TVTs : %d\n", tvts_per_table);
326 pr_debug(" Base TVT : %d\n", tvt);
327 pr_debug(" Node : %d\n", nid);
328
329 tbl = kzalloc_node(sizeof(struct wsp_dma_table), GFP_KERNEL, nid);
330 if (!tbl)
331 return ERR_PTR(-ENOMEM);
332 tbl->phb = phb;
333
334 /* Create as many TVTs as needed, each represents 256M at most */
335 for (i = 0; i < tvts_per_table; i++) {
336 u64 tvt_data1, tvt_data0;
337
338 /* Allocate table. We use a 4K TCE size for now always so
339 * one table is always 8 * (258M / 4K) == 512K
340 */
341 tbl->tces[i] = alloc_pages_node(nid, GFP_KERNEL, get_order(0x80000));
342 if (tbl->tces[i] == NULL)
343 goto fail;
344 memset(page_address(tbl->tces[i]), 0, 0x80000);
345
346 pr_debug(" TCE table %d at : %p\n", i, page_address(tbl->tces[i]));
347
348 /* Table size. We currently set it to be the whole 256M region */
349 tvt_data0 = 2ull << IODA_TVT0_TCE_TABLE_SIZE_SHIFT;
350 /* IO page size set to 4K */
351 tvt_data1 = 1ull << IODA_TVT1_IO_PAGE_SIZE_SHIFT;
352 /* Shift in the address */
353 tvt_data0 |= __pa(page_address(tbl->tces[i])) << IODA_TVT0_TTA_SHIFT;
354
355 /* Validation stuff. We only validate fully bus/dev/fn for now
356 * one day maybe we can group devices but that isn't the case
357 * at the moment
358 */
359 if (validate) {
360 tvt_data0 |= IODA_TVT0_BUSNUM_VALID_MASK;
361 tvt_data0 |= validate->bus->number;
362 tvt_data1 |= IODA_TVT1_DEVNUM_VALID;
363 tvt_data1 |= ((u64)PCI_SLOT(validate->devfn))
364 << IODA_TVT1_DEVNUM_VALUE_SHIFT;
365 tvt_data1 |= IODA_TVT1_FUNCNUM_VALID;
366 tvt_data1 |= ((u64)PCI_FUNC(validate->devfn))
367 << IODA_TVT1_FUNCNUM_VALUE_SHIFT;
368 }
369
370 /* XX PE number is always 0 for now */
371
372 /* Program the values using the PHB lock */
373 spin_lock_irqsave(&phb->lock, flags);
374 out_be64(hose->cfg_data + PCIE_REG_IODA_ADDR,
375 (tvt + i) | PCIE_REG_IODA_AD_TBL_TVT);
376 out_be64(hose->cfg_data + PCIE_REG_IODA_DATA1, tvt_data1);
377 out_be64(hose->cfg_data + PCIE_REG_IODA_DATA0, tvt_data0);
378 spin_unlock_irqrestore(&phb->lock, flags);
379 }
380
381 /* Init bits and pieces */
382 tbl->table.it_blocksize = 16;
383 tbl->table.it_offset = addr >> IOMMU_PAGE_SHIFT;
384 tbl->table.it_size = size >> IOMMU_PAGE_SHIFT;
385
386 /*
387 * It's already blank but we clear it anyway.
388 * Consider an aditiona interface that makes cleaing optional
389 */
390 iommu_init_table(&tbl->table, nid);
391
392 list_add(&tbl->link, &phb->dma_tables);
393 return tbl;
394
395 fail:
396 pr_debug(" Failed to allocate a 256M TCE table !\n");
397 for (i = 0; i < tvts_per_table; i++)
398 if (tbl->tces[i])
399 __free_pages(tbl->tces[i], get_order(0x80000));
400 kfree(tbl);
401 return ERR_PTR(-ENOMEM);
402}
403
404static void __devinit wsp_pci_dma_dev_setup(struct pci_dev *pdev)
405{
406 struct dev_archdata *archdata = &pdev->dev.archdata;
407 struct pci_controller *hose = pci_bus_to_host(pdev->bus);
408 struct wsp_phb *phb = hose->private_data;
409 struct wsp_dma_table *table = NULL;
410 unsigned long flags;
411 int i;
412
413 /* Don't assign an iommu table to a bridge */
414 if (pdev->hdr_type == PCI_HEADER_TYPE_BRIDGE)
415 return;
416
417 pr_debug("%s: Setting up DMA...\n", pci_name(pdev));
418
419 spin_lock_irqsave(&phb->lock, flags);
420
421 /* If only one region, check if it already exist */
422 if (phb->dma32_num_regions == 1) {
423 spin_unlock_irqrestore(&phb->lock, flags);
424 if (list_empty(&phb->dma_tables))
425 table = wsp_pci_create_dma32_table(phb, 0, NULL);
426 else
427 table = list_first_entry(&phb->dma_tables,
428 struct wsp_dma_table,
429 link);
430 } else {
431 /* else find a free region */
432 for (i = 0; i < phb->dma32_num_regions && !table; i++) {
433 if (__test_and_set_bit(i, &phb->dma32_map))
434 continue;
435 spin_unlock_irqrestore(&phb->lock, flags);
436 table = wsp_pci_create_dma32_table(phb, i, pdev);
437 }
438 }
439
440 /* Check if we got an error */
441 if (IS_ERR(table)) {
442 pr_err("%s: Failed to create DMA table, err %ld !\n",
443 pci_name(pdev), PTR_ERR(table));
444 return;
445 }
446
447 /* Or a valid table */
448 if (table) {
449 pr_info("%s: Setup iommu: 32-bit DMA region 0x%08lx..0x%08lx\n",
450 pci_name(pdev),
451 table->table.it_offset << IOMMU_PAGE_SHIFT,
452 (table->table.it_offset << IOMMU_PAGE_SHIFT)
453 + phb->dma32_region_size - 1);
454 archdata->dma_data.iommu_table_base = &table->table;
455 return;
456 }
457
458 /* Or no room */
459 spin_unlock_irqrestore(&phb->lock, flags);
460 pr_err("%s: Out of DMA space !\n", pci_name(pdev));
461}
462
463static void __init wsp_pcie_configure_hw(struct pci_controller *hose)
464{
465 u64 val;
466 int i;
467
468#define DUMP_REG(x) \
469 pr_debug("%-30s : 0x%016llx\n", #x, in_be64(hose->cfg_data + x))
470
471#ifdef CONFIG_WSP_DD1_WORKAROUND_BAD_PCIE_CLASS
472 /* WSP DD1 has a bogus class code by default in the PCI-E
473 * root complex's built-in P2P bridge */
474 val = in_be64(hose->cfg_data + PCIE_REG_SYS_CFG1);
475 pr_debug("PCI-E SYS_CFG1 : 0x%llx\n", val);
476 out_be64(hose->cfg_data + PCIE_REG_SYS_CFG1,
477 (val & ~PCIE_REG_SYS_CFG1_CLASS_CODE) | (PCI_CLASS_BRIDGE_PCI << 8));
478 pr_debug("PCI-E SYS_CFG1 : 0x%llx\n", in_be64(hose->cfg_data + PCIE_REG_SYS_CFG1));
479#endif /* CONFIG_WSP_DD1_WORKAROUND_BAD_PCIE_CLASS */
480
481#ifdef CONFIG_WSP_DD1_WORKAROUND_DD1_TCE_BUGS
482 /* XXX Disable TCE caching, it doesn't work on DD1 */
483 out_be64(hose->cfg_data + 0xe50,
484 in_be64(hose->cfg_data + 0xe50) | (3ull << 62));
485 printk("PCI-E DEBUG CONTROL 5 = 0x%llx\n", in_be64(hose->cfg_data + 0xe50));
486#endif
487
488 /* Configure M32A and IO. IO is hard wired to be 1M for now */
489 out_be64(hose->cfg_data + PCIE_REG_IO_BASE_ADDR, hose->io_base_phys);
490 out_be64(hose->cfg_data + PCIE_REG_IO_BASE_MASK,
491 (~(hose->io_resource.end - hose->io_resource.start)) &
492 0x3fffffff000ul);
493 out_be64(hose->cfg_data + PCIE_REG_IO_START_ADDR, 0 | 1);
494
495 out_be64(hose->cfg_data + PCIE_REG_M32A_BASE_ADDR,
496 hose->mem_resources[0].start);
497 printk("Want to write to M32A_BASE_MASK : 0x%llx\n",
498 (~(hose->mem_resources[0].end -
499 hose->mem_resources[0].start)) & 0x3ffffff0000ul);
500 out_be64(hose->cfg_data + PCIE_REG_M32A_BASE_MASK,
501 (~(hose->mem_resources[0].end -
502 hose->mem_resources[0].start)) & 0x3ffffff0000ul);
503 out_be64(hose->cfg_data + PCIE_REG_M32A_START_ADDR,
504 (hose->mem_resources[0].start - hose->pci_mem_offset) | 1);
505
506 /* Clear all TVT entries
507 *
508 * XX Might get TVT count from device-tree
509 */
510 for (i = 0; i < IODA_TVT_COUNT; i++) {
511 out_be64(hose->cfg_data + PCIE_REG_IODA_ADDR,
512 PCIE_REG_IODA_AD_TBL_TVT | i);
513 out_be64(hose->cfg_data + PCIE_REG_IODA_DATA1, 0);
514 out_be64(hose->cfg_data + PCIE_REG_IODA_DATA0, 0);
515 }
516
517 /* Kill the TCE cache */
518 out_be64(hose->cfg_data + PCIE_REG_PHB_CONFIG,
519 in_be64(hose->cfg_data + PCIE_REG_PHB_CONFIG) |
520 PCIE_REG_PHBC_64B_TCE_EN);
521
522 /* Enable 32 & 64-bit MSIs, IO space and M32A */
523 val = PCIE_REG_PHBC_32BIT_MSI_EN |
524 PCIE_REG_PHBC_IO_EN |
525 PCIE_REG_PHBC_64BIT_MSI_EN |
526 PCIE_REG_PHBC_M32A_EN;
527 if (iommu_is_off)
528 val |= PCIE_REG_PHBC_DMA_XLATE_BYPASS;
529 pr_debug("Will write config: 0x%llx\n", val);
530 out_be64(hose->cfg_data + PCIE_REG_PHB_CONFIG, val);
531
532 /* Enable error reporting */
533 out_be64(hose->cfg_data + 0xe00,
534 in_be64(hose->cfg_data + 0xe00) | 0x0008000000000000ull);
535
536 /* Mask an error that's generated when doing config space probe
537 *
538 * XXX Maybe we should only mask it around config space cycles... that or
539 * ignore it when we know we had a config space cycle recently ?
540 */
541 out_be64(hose->cfg_data + PCIE_REG_DMA_ERR_STATUS_MASK, 0x8000000000000000ull);
542 out_be64(hose->cfg_data + PCIE_REG_DMA_ERR1_STATUS_MASK, 0x8000000000000000ull);
543
544 /* Enable UTL errors, for now, all of them got to UTL irq 1
545 *
546 * We similarily mask one UTL error caused apparently during normal
547 * probing. We also mask the link up error
548 */
549 out_be64(hose->cfg_data + PCIE_UTL_SYS_BUS_AGENT_ERR_SEV, 0);
550 out_be64(hose->cfg_data + PCIE_UTL_RC_ERR_SEVERITY, 0);
551 out_be64(hose->cfg_data + PCIE_UTL_PCIE_PORT_ERROR_SEV, 0);
552 out_be64(hose->cfg_data + PCIE_UTL_SYS_BUS_AGENT_IRQ_EN, 0xffffffff00000000ull);
553 out_be64(hose->cfg_data + PCIE_UTL_PCIE_PORT_IRQ_EN, 0xff5fffff00000000ull);
554 out_be64(hose->cfg_data + PCIE_UTL_EP_ERR_IRQ_EN, 0xffffffff00000000ull);
555
556 DUMP_REG(PCIE_REG_IO_BASE_ADDR);
557 DUMP_REG(PCIE_REG_IO_BASE_MASK);
558 DUMP_REG(PCIE_REG_IO_START_ADDR);
559 DUMP_REG(PCIE_REG_M32A_BASE_ADDR);
560 DUMP_REG(PCIE_REG_M32A_BASE_MASK);
561 DUMP_REG(PCIE_REG_M32A_START_ADDR);
562 DUMP_REG(PCIE_REG_M32B_BASE_ADDR);
563 DUMP_REG(PCIE_REG_M32B_BASE_MASK);
564 DUMP_REG(PCIE_REG_M32B_START_ADDR);
565 DUMP_REG(PCIE_REG_M64_BASE_ADDR);
566 DUMP_REG(PCIE_REG_M64_BASE_MASK);
567 DUMP_REG(PCIE_REG_M64_START_ADDR);
568 DUMP_REG(PCIE_REG_PHB_CONFIG);
569}
570
571static void wsp_pci_wait_io_idle(struct wsp_phb *phb, unsigned long port)
572{
573 u64 val;
574 int i;
575
576 for (i = 0; i < 10000; i++) {
577 val = in_be64(phb->hose->cfg_data + 0xe08);
578 if ((val & 0x1900000000000000ull) == 0x0100000000000000ull)
579 return;
580 udelay(1);
581 }
582 pr_warning("PCI IO timeout on domain %d port 0x%lx\n",
583 phb->hose->global_number, port);
584}
585
586#define DEF_PCI_AC_RET_pio(name, ret, at, al, aa) \
587static ret wsp_pci_##name at \
588{ \
589 struct iowa_bus *bus; \
590 struct wsp_phb *phb; \
591 unsigned long flags; \
592 ret rval; \
593 bus = iowa_pio_find_bus(aa); \
594 WARN_ON(!bus); \
595 phb = bus->private; \
596 spin_lock_irqsave(&phb->lock, flags); \
597 wsp_pci_wait_io_idle(phb, aa); \
598 rval = __do_##name al; \
599 spin_unlock_irqrestore(&phb->lock, flags); \
600 return rval; \
601}
602
603#define DEF_PCI_AC_NORET_pio(name, at, al, aa) \
604static void wsp_pci_##name at \
605{ \
606 struct iowa_bus *bus; \
607 struct wsp_phb *phb; \
608 unsigned long flags; \
609 bus = iowa_pio_find_bus(aa); \
610 WARN_ON(!bus); \
611 phb = bus->private; \
612 spin_lock_irqsave(&phb->lock, flags); \
613 wsp_pci_wait_io_idle(phb, aa); \
614 __do_##name al; \
615 spin_unlock_irqrestore(&phb->lock, flags); \
616}
617
618#define DEF_PCI_AC_RET_mem(name, ret, at, al, aa)
619#define DEF_PCI_AC_NORET_mem(name, at, al, aa)
620
621#define DEF_PCI_AC_RET(name, ret, at, al, space, aa) \
622 DEF_PCI_AC_RET_##space(name, ret, at, al, aa)
623
624#define DEF_PCI_AC_NORET(name, at, al, space, aa) \
625 DEF_PCI_AC_NORET_##space(name, at, al, aa) \
626
627
628#include <asm/io-defs.h>
629
630#undef DEF_PCI_AC_RET
631#undef DEF_PCI_AC_NORET
632
633static struct ppc_pci_io wsp_pci_iops = {
634 .inb = wsp_pci_inb,
635 .inw = wsp_pci_inw,
636 .inl = wsp_pci_inl,
637 .outb = wsp_pci_outb,
638 .outw = wsp_pci_outw,
639 .outl = wsp_pci_outl,
640 .insb = wsp_pci_insb,
641 .insw = wsp_pci_insw,
642 .insl = wsp_pci_insl,
643 .outsb = wsp_pci_outsb,
644 .outsw = wsp_pci_outsw,
645 .outsl = wsp_pci_outsl,
646};
647
648static int __init wsp_setup_one_phb(struct device_node *np)
649{
650 struct pci_controller *hose;
651 struct wsp_phb *phb;
652
653 pr_info("PCI: Setting up PCIe host bridge 0x%s\n", np->full_name);
654
655 phb = zalloc_maybe_bootmem(sizeof(struct wsp_phb), GFP_KERNEL);
656 if (!phb)
657 return -ENOMEM;
658 hose = pcibios_alloc_controller(np);
659 if (!hose) {
660 /* Can't really free the phb */
661 return -ENOMEM;
662 }
663 hose->private_data = phb;
664 phb->hose = hose;
665
666 INIT_LIST_HEAD(&phb->dma_tables);
667 spin_lock_init(&phb->lock);
668
669 /* XXX Use bus-range property ? */
670 hose->first_busno = 0;
671 hose->last_busno = 0xff;
672
673 /* We use cfg_data as the address for the whole bridge MMIO space
674 */
675 hose->cfg_data = of_iomap(hose->dn, 0);
676
677 pr_debug("PCIe registers mapped at 0x%p\n", hose->cfg_data);
678
679 /* Get the ranges of the device-tree */
680 pci_process_bridge_OF_ranges(hose, np, 0);
681
682 /* XXX Force re-assigning of everything for now */
683 pci_add_flags(PCI_REASSIGN_ALL_BUS | PCI_REASSIGN_ALL_RSRC |
684 PCI_ENABLE_PROC_DOMAINS);
685 pci_probe_only = 0;
686
687 /* Calculate how the TCE space is divided */
688 phb->dma32_base = 0;
689 phb->dma32_num_regions = NUM_DMA32_REGIONS;
690 if (phb->dma32_num_regions > MAX_TABLE_TVT_COUNT) {
691 pr_warning("IOMMU: Clamped to %d DMA32 regions\n",
692 MAX_TABLE_TVT_COUNT);
693 phb->dma32_num_regions = MAX_TABLE_TVT_COUNT;
694 }
695 phb->dma32_region_size = 0x80000000 / phb->dma32_num_regions;
696
697 BUG_ON(!is_power_of_2(phb->dma32_region_size));
698
699 /* Setup config ops */
700 hose->ops = &wsp_pcie_pci_ops;
701
702 /* Configure the HW */
703 wsp_pcie_configure_hw(hose);
704
705 /* Instanciate IO workarounds */
706 iowa_register_bus(hose, &wsp_pci_iops, NULL, phb);
707#ifdef CONFIG_PCI_MSI
708 wsp_setup_phb_msi(hose);
709#endif
710
711 /* Add to global list */
712 list_add(&phb->all, &wsp_phbs);
713
714 return 0;
715}
716
717void __init wsp_setup_pci(void)
718{
719 struct device_node *np;
720 int rc;
721
722 /* Find host bridges */
723 for_each_compatible_node(np, "pciex", PCIE_COMPATIBLE) {
724 rc = wsp_setup_one_phb(np);
725 if (rc)
726 pr_err("Failed to setup PCIe bridge %s, rc=%d\n",
727 np->full_name, rc);
728 }
729
730 /* Establish device-tree linkage */
731 pci_devs_phb_init();
732
733 /* Set DMA ops to use TCEs */
734 if (iommu_is_off) {
735 pr_info("PCI-E: Disabled TCEs, using direct DMA\n");
736 set_pci_dma_ops(&dma_direct_ops);
737 } else {
738 ppc_md.pci_dma_dev_setup = wsp_pci_dma_dev_setup;
739 ppc_md.tce_build = tce_build_wsp;
740 ppc_md.tce_free = tce_free_wsp;
741 set_pci_dma_ops(&dma_iommu_ops);
742 }
743}
744
745#define err_debug(fmt...) pr_debug(fmt)
746//#define err_debug(fmt...)
747
748static int __init wsp_pci_get_err_irq_no_dt(struct device_node *np)
749{
750 const u32 *prop;
751 int hw_irq;
752
753 /* Ok, no interrupts property, let's try to find our child P2P */
754 np = of_get_next_child(np, NULL);
755 if (np == NULL)
756 return 0;
757
758 /* Grab it's interrupt map */
759 prop = of_get_property(np, "interrupt-map", NULL);
760 if (prop == NULL)
761 return 0;
762
763 /* Grab one of the interrupts in there, keep the low 4 bits */
764 hw_irq = prop[5] & 0xf;
765
766 /* 0..4 for PHB 0 and 5..9 for PHB 1 */
767 if (hw_irq < 5)
768 hw_irq = 4;
769 else
770 hw_irq = 9;
771 hw_irq |= prop[5] & ~0xf;
772
773 err_debug("PCI: Using 0x%x as error IRQ for %s\n",
774 hw_irq, np->parent->full_name);
775 return irq_create_mapping(NULL, hw_irq);
776}
777
778static const struct {
779 u32 offset;
780 const char *name;
781} wsp_pci_regs[] = {
782#define DREG(x) { PCIE_REG_##x, #x }
783#define DUTL(x) { PCIE_UTL_##x, "UTL_" #x }
784 /* Architected registers except CONFIG_ and IODA
785 * to avoid side effects
786 */
787 DREG(DMA_CHAN_STATUS),
788 DREG(CPU_LOADSTORE_STATUS),
789 DREG(LOCK0),
790 DREG(LOCK1),
791 DREG(PHB_CONFIG),
792 DREG(IO_BASE_ADDR),
793 DREG(IO_BASE_MASK),
794 DREG(IO_START_ADDR),
795 DREG(M32A_BASE_ADDR),
796 DREG(M32A_BASE_MASK),
797 DREG(M32A_START_ADDR),
798 DREG(M32B_BASE_ADDR),
799 DREG(M32B_BASE_MASK),
800 DREG(M32B_START_ADDR),
801 DREG(M64_BASE_ADDR),
802 DREG(M64_BASE_MASK),
803 DREG(M64_START_ADDR),
804 DREG(TCE_KILL),
805 DREG(LOCK2),
806 DREG(PHB_GEN_CAP),
807 DREG(PHB_TCE_CAP),
808 DREG(PHB_IRQ_CAP),
809 DREG(PHB_EEH_CAP),
810 DREG(PAPR_ERR_INJ_CONTROL),
811 DREG(PAPR_ERR_INJ_ADDR),
812 DREG(PAPR_ERR_INJ_MASK),
813
814 /* UTL core regs */
815 DUTL(SYS_BUS_CONTROL),
816 DUTL(STATUS),
817 DUTL(SYS_BUS_AGENT_STATUS),
818 DUTL(SYS_BUS_AGENT_ERR_SEV),
819 DUTL(SYS_BUS_AGENT_IRQ_EN),
820 DUTL(SYS_BUS_BURST_SZ_CONF),
821 DUTL(REVISION_ID),
822 DUTL(OUT_POST_HDR_BUF_ALLOC),
823 DUTL(OUT_POST_DAT_BUF_ALLOC),
824 DUTL(IN_POST_HDR_BUF_ALLOC),
825 DUTL(IN_POST_DAT_BUF_ALLOC),
826 DUTL(OUT_NP_BUF_ALLOC),
827 DUTL(IN_NP_BUF_ALLOC),
828 DUTL(PCIE_TAGS_ALLOC),
829 DUTL(GBIF_READ_TAGS_ALLOC),
830
831 DUTL(PCIE_PORT_CONTROL),
832 DUTL(PCIE_PORT_STATUS),
833 DUTL(PCIE_PORT_ERROR_SEV),
834 DUTL(PCIE_PORT_IRQ_EN),
835 DUTL(RC_STATUS),
836 DUTL(RC_ERR_SEVERITY),
837 DUTL(RC_IRQ_EN),
838 DUTL(EP_STATUS),
839 DUTL(EP_ERR_SEVERITY),
840 DUTL(EP_ERR_IRQ_EN),
841 DUTL(PCI_PM_CTRL1),
842 DUTL(PCI_PM_CTRL2),
843
844 /* PCIe stack regs */
845 DREG(SYSTEM_CONFIG1),
846 DREG(SYSTEM_CONFIG2),
847 DREG(EP_SYSTEM_CONFIG),
848 DREG(EP_FLR),
849 DREG(EP_BAR_CONFIG),
850 DREG(LINK_CONFIG),
851 DREG(PM_CONFIG),
852 DREG(DLP_CONTROL),
853 DREG(DLP_STATUS),
854 DREG(ERR_REPORT_CONTROL),
855 DREG(SLOT_CONTROL1),
856 DREG(SLOT_CONTROL2),
857 DREG(UTL_CONFIG),
858 DREG(BUFFERS_CONFIG),
859 DREG(ERROR_INJECT),
860 DREG(SRIOV_CONFIG),
861 DREG(PF0_SRIOV_STATUS),
862 DREG(PF1_SRIOV_STATUS),
863 DREG(PORT_NUMBER),
864 DREG(POR_SYSTEM_CONFIG),
865
866 /* Internal logic regs */
867 DREG(PHB_VERSION),
868 DREG(RESET),
869 DREG(PHB_CONTROL),
870 DREG(PHB_TIMEOUT_CONTROL1),
871 DREG(PHB_QUIESCE_DMA),
872 DREG(PHB_DMA_READ_TAG_ACTV),
873 DREG(PHB_TCE_READ_TAG_ACTV),
874
875 /* FIR registers */
876 DREG(LEM_FIR_ACCUM),
877 DREG(LEM_FIR_AND_MASK),
878 DREG(LEM_FIR_OR_MASK),
879 DREG(LEM_ACTION0),
880 DREG(LEM_ACTION1),
881 DREG(LEM_ERROR_MASK),
882 DREG(LEM_ERROR_AND_MASK),
883 DREG(LEM_ERROR_OR_MASK),
884
885 /* Error traps registers */
886 DREG(PHB_ERR_STATUS),
887 DREG(PHB_ERR_STATUS),
888 DREG(PHB_ERR1_STATUS),
889 DREG(PHB_ERR_INJECT),
890 DREG(PHB_ERR_LEM_ENABLE),
891 DREG(PHB_ERR_IRQ_ENABLE),
892 DREG(PHB_ERR_FREEZE_ENABLE),
893 DREG(PHB_ERR_SIDE_ENABLE),
894 DREG(PHB_ERR_LOG_0),
895 DREG(PHB_ERR_LOG_1),
896 DREG(PHB_ERR_STATUS_MASK),
897 DREG(PHB_ERR1_STATUS_MASK),
898 DREG(MMIO_ERR_STATUS),
899 DREG(MMIO_ERR1_STATUS),
900 DREG(MMIO_ERR_INJECT),
901 DREG(MMIO_ERR_LEM_ENABLE),
902 DREG(MMIO_ERR_IRQ_ENABLE),
903 DREG(MMIO_ERR_FREEZE_ENABLE),
904 DREG(MMIO_ERR_SIDE_ENABLE),
905 DREG(MMIO_ERR_LOG_0),
906 DREG(MMIO_ERR_LOG_1),
907 DREG(MMIO_ERR_STATUS_MASK),
908 DREG(MMIO_ERR1_STATUS_MASK),
909 DREG(DMA_ERR_STATUS),
910 DREG(DMA_ERR1_STATUS),
911 DREG(DMA_ERR_INJECT),
912 DREG(DMA_ERR_LEM_ENABLE),
913 DREG(DMA_ERR_IRQ_ENABLE),
914 DREG(DMA_ERR_FREEZE_ENABLE),
915 DREG(DMA_ERR_SIDE_ENABLE),
916 DREG(DMA_ERR_LOG_0),
917 DREG(DMA_ERR_LOG_1),
918 DREG(DMA_ERR_STATUS_MASK),
919 DREG(DMA_ERR1_STATUS_MASK),
920
921 /* Debug and Trace registers */
922 DREG(PHB_DEBUG_CONTROL0),
923 DREG(PHB_DEBUG_STATUS0),
924 DREG(PHB_DEBUG_CONTROL1),
925 DREG(PHB_DEBUG_STATUS1),
926 DREG(PHB_DEBUG_CONTROL2),
927 DREG(PHB_DEBUG_STATUS2),
928 DREG(PHB_DEBUG_CONTROL3),
929 DREG(PHB_DEBUG_STATUS3),
930 DREG(PHB_DEBUG_CONTROL4),
931 DREG(PHB_DEBUG_STATUS4),
932 DREG(PHB_DEBUG_CONTROL5),
933 DREG(PHB_DEBUG_STATUS5),
934
935 /* Don't seem to exist ...
936 DREG(PHB_DEBUG_CONTROL6),
937 DREG(PHB_DEBUG_STATUS6),
938 */
939};
940
941static int wsp_pci_regs_show(struct seq_file *m, void *private)
942{
943 struct wsp_phb *phb = m->private;
944 struct pci_controller *hose = phb->hose;
945 int i;
946
947 for (i = 0; i < ARRAY_SIZE(wsp_pci_regs); i++) {
948 /* Skip write-only regs */
949 if (wsp_pci_regs[i].offset == 0xc08 ||
950 wsp_pci_regs[i].offset == 0xc10 ||
951 wsp_pci_regs[i].offset == 0xc38 ||
952 wsp_pci_regs[i].offset == 0xc40)
953 continue;
954 seq_printf(m, "0x%03x: 0x%016llx %s\n",
955 wsp_pci_regs[i].offset,
956 in_be64(hose->cfg_data + wsp_pci_regs[i].offset),
957 wsp_pci_regs[i].name);
958 }
959 return 0;
960}
961
962static int wsp_pci_regs_open(struct inode *inode, struct file *file)
963{
964 return single_open(file, wsp_pci_regs_show, inode->i_private);
965}
966
967static const struct file_operations wsp_pci_regs_fops = {
968 .open = wsp_pci_regs_open,
969 .read = seq_read,
970 .llseek = seq_lseek,
971 .release = single_release,
972};
973
974static int wsp_pci_reg_set(void *data, u64 val)
975{
976 out_be64((void __iomem *)data, val);
977 return 0;
978}
979
980static int wsp_pci_reg_get(void *data, u64 *val)
981{
982 *val = in_be64((void __iomem *)data);
983 return 0;
984}
985
986DEFINE_SIMPLE_ATTRIBUTE(wsp_pci_reg_fops, wsp_pci_reg_get, wsp_pci_reg_set, "0x%llx\n");
987
988static irqreturn_t wsp_pci_err_irq(int irq, void *dev_id)
989{
990 struct wsp_phb *phb = dev_id;
991 struct pci_controller *hose = phb->hose;
992 irqreturn_t handled = IRQ_NONE;
993 struct wsp_pcie_err_log_data ed;
994
995 pr_err("PCI: Error interrupt on %s (PHB %d)\n",
996 hose->dn->full_name, hose->global_number);
997 again:
998 memset(&ed, 0, sizeof(ed));
999
1000 /* Read and clear UTL errors */
1001 ed.utl_sys_err = in_be64(hose->cfg_data + PCIE_UTL_SYS_BUS_AGENT_STATUS);
1002 if (ed.utl_sys_err)
1003 out_be64(hose->cfg_data + PCIE_UTL_SYS_BUS_AGENT_STATUS, ed.utl_sys_err);
1004 ed.utl_port_err = in_be64(hose->cfg_data + PCIE_UTL_PCIE_PORT_STATUS);
1005 if (ed.utl_port_err)
1006 out_be64(hose->cfg_data + PCIE_UTL_PCIE_PORT_STATUS, ed.utl_port_err);
1007 ed.utl_rc_err = in_be64(hose->cfg_data + PCIE_UTL_RC_STATUS);
1008 if (ed.utl_rc_err)
1009 out_be64(hose->cfg_data + PCIE_UTL_RC_STATUS, ed.utl_rc_err);
1010
1011 /* Read and clear main trap errors */
1012 ed.phb_err = in_be64(hose->cfg_data + PCIE_REG_PHB_ERR_STATUS);
1013 if (ed.phb_err) {
1014 ed.phb_err1 = in_be64(hose->cfg_data + PCIE_REG_PHB_ERR1_STATUS);
1015 ed.phb_log0 = in_be64(hose->cfg_data + PCIE_REG_PHB_ERR_LOG_0);
1016 ed.phb_log1 = in_be64(hose->cfg_data + PCIE_REG_PHB_ERR_LOG_1);
1017 out_be64(hose->cfg_data + PCIE_REG_PHB_ERR1_STATUS, 0);
1018 out_be64(hose->cfg_data + PCIE_REG_PHB_ERR_STATUS, 0);
1019 }
1020 ed.mmio_err = in_be64(hose->cfg_data + PCIE_REG_MMIO_ERR_STATUS);
1021 if (ed.mmio_err) {
1022 ed.mmio_err1 = in_be64(hose->cfg_data + PCIE_REG_MMIO_ERR1_STATUS);
1023 ed.mmio_log0 = in_be64(hose->cfg_data + PCIE_REG_MMIO_ERR_LOG_0);
1024 ed.mmio_log1 = in_be64(hose->cfg_data + PCIE_REG_MMIO_ERR_LOG_1);
1025 out_be64(hose->cfg_data + PCIE_REG_MMIO_ERR1_STATUS, 0);
1026 out_be64(hose->cfg_data + PCIE_REG_MMIO_ERR_STATUS, 0);
1027 }
1028 ed.dma_err = in_be64(hose->cfg_data + PCIE_REG_DMA_ERR_STATUS);
1029 if (ed.dma_err) {
1030 ed.dma_err1 = in_be64(hose->cfg_data + PCIE_REG_DMA_ERR1_STATUS);
1031 ed.dma_log0 = in_be64(hose->cfg_data + PCIE_REG_DMA_ERR_LOG_0);
1032 ed.dma_log1 = in_be64(hose->cfg_data + PCIE_REG_DMA_ERR_LOG_1);
1033 out_be64(hose->cfg_data + PCIE_REG_DMA_ERR1_STATUS, 0);
1034 out_be64(hose->cfg_data + PCIE_REG_DMA_ERR_STATUS, 0);
1035 }
1036
1037 /* Now print things out */
1038 if (ed.phb_err) {
1039 pr_err(" PHB Error Status : 0x%016llx\n", ed.phb_err);
1040 pr_err(" PHB First Error Status: 0x%016llx\n", ed.phb_err1);
1041 pr_err(" PHB Error Log 0 : 0x%016llx\n", ed.phb_log0);
1042 pr_err(" PHB Error Log 1 : 0x%016llx\n", ed.phb_log1);
1043 }
1044 if (ed.mmio_err) {
1045 pr_err(" MMIO Error Status : 0x%016llx\n", ed.mmio_err);
1046 pr_err(" MMIO First Error Status: 0x%016llx\n", ed.mmio_err1);
1047 pr_err(" MMIO Error Log 0 : 0x%016llx\n", ed.mmio_log0);
1048 pr_err(" MMIO Error Log 1 : 0x%016llx\n", ed.mmio_log1);
1049 }
1050 if (ed.dma_err) {
1051 pr_err(" DMA Error Status : 0x%016llx\n", ed.dma_err);
1052 pr_err(" DMA First Error Status: 0x%016llx\n", ed.dma_err1);
1053 pr_err(" DMA Error Log 0 : 0x%016llx\n", ed.dma_log0);
1054 pr_err(" DMA Error Log 1 : 0x%016llx\n", ed.dma_log1);
1055 }
1056 if (ed.utl_sys_err)
1057 pr_err(" UTL Sys Error Status : 0x%016llx\n", ed.utl_sys_err);
1058 if (ed.utl_port_err)
1059 pr_err(" UTL Port Error Status : 0x%016llx\n", ed.utl_port_err);
1060 if (ed.utl_rc_err)
1061 pr_err(" UTL RC Error Status : 0x%016llx\n", ed.utl_rc_err);
1062
1063 /* Interrupts are caused by the error traps. If we had any error there
1064 * we loop again in case the UTL buffered some new stuff between
1065 * going there and going to the traps
1066 */
1067 if (ed.dma_err || ed.mmio_err || ed.phb_err) {
1068 handled = IRQ_HANDLED;
1069 goto again;
1070 }
1071 return handled;
1072}
1073
1074static void __init wsp_setup_pci_err_reporting(struct wsp_phb *phb)
1075{
1076 struct pci_controller *hose = phb->hose;
1077 int err_irq, i, rc;
1078 char fname[16];
1079
1080 /* Create a debugfs file for that PHB */
1081 sprintf(fname, "phb%d", phb->hose->global_number);
1082 phb->ddir = debugfs_create_dir(fname, powerpc_debugfs_root);
1083
1084 /* Some useful debug output */
1085 if (phb->ddir) {
1086 struct dentry *d = debugfs_create_dir("regs", phb->ddir);
1087 char tmp[64];
1088
1089 for (i = 0; i < ARRAY_SIZE(wsp_pci_regs); i++) {
1090 sprintf(tmp, "%03x_%s", wsp_pci_regs[i].offset,
1091 wsp_pci_regs[i].name);
1092 debugfs_create_file(tmp, 0600, d,
1093 hose->cfg_data + wsp_pci_regs[i].offset,
1094 &wsp_pci_reg_fops);
1095 }
1096 debugfs_create_file("all_regs", 0600, phb->ddir, phb, &wsp_pci_regs_fops);
1097 }
1098
1099 /* Find the IRQ number for that PHB */
1100 err_irq = irq_of_parse_and_map(hose->dn, 0);
1101 if (err_irq == 0)
1102 /* XXX Error IRQ lacking from device-tree */
1103 err_irq = wsp_pci_get_err_irq_no_dt(hose->dn);
1104 if (err_irq == 0) {
1105 pr_err("PCI: Failed to fetch error interrupt for %s\n",
1106 hose->dn->full_name);
1107 return;
1108 }
1109 /* Request it */
1110 rc = request_irq(err_irq, wsp_pci_err_irq, 0, "wsp_pci error", phb);
1111 if (rc) {
1112 pr_err("PCI: Failed to request interrupt for %s\n",
1113 hose->dn->full_name);
1114 }
1115 /* Enable interrupts for all errors for now */
1116 out_be64(hose->cfg_data + PCIE_REG_PHB_ERR_IRQ_ENABLE, 0xffffffffffffffffull);
1117 out_be64(hose->cfg_data + PCIE_REG_MMIO_ERR_IRQ_ENABLE, 0xffffffffffffffffull);
1118 out_be64(hose->cfg_data + PCIE_REG_DMA_ERR_IRQ_ENABLE, 0xffffffffffffffffull);
1119}
1120
1121/*
1122 * This is called later to hookup with the error interrupt
1123 */
1124static int __init wsp_setup_pci_late(void)
1125{
1126 struct wsp_phb *phb;
1127
1128 list_for_each_entry(phb, &wsp_phbs, all)
1129 wsp_setup_pci_err_reporting(phb);
1130
1131 return 0;
1132}
1133arch_initcall(wsp_setup_pci_late);
diff --git a/arch/powerpc/platforms/wsp/wsp_pci.h b/arch/powerpc/platforms/wsp/wsp_pci.h
new file mode 100644
index 000000000000..52e9bd95250d
--- /dev/null
+++ b/arch/powerpc/platforms/wsp/wsp_pci.h
@@ -0,0 +1,268 @@
1/*
2 * Copyright 2010 Ben Herrenschmidt, IBM Corporation
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9
10#ifndef __WSP_PCI_H
11#define __WSP_PCI_H
12
13/* Architected registers */
14#define PCIE_REG_DMA_CHAN_STATUS 0x110
15#define PCIE_REG_CPU_LOADSTORE_STATUS 0x120
16
17#define PCIE_REG_CONFIG_DATA 0x130
18#define PCIE_REG_LOCK0 0x138
19#define PCIE_REG_CONFIG_ADDRESS 0x140
20#define PCIE_REG_CA_ENABLE 0x8000000000000000ull
21#define PCIE_REG_CA_BUS_MASK 0x0ff0000000000000ull
22#define PCIE_REG_CA_BUS_SHIFT (20+32)
23#define PCIE_REG_CA_DEV_MASK 0x000f800000000000ull
24#define PCIE_REG_CA_DEV_SHIFT (15+32)
25#define PCIE_REG_CA_FUNC_MASK 0x0000700000000000ull
26#define PCIE_REG_CA_FUNC_SHIFT (12+32)
27#define PCIE_REG_CA_REG_MASK 0x00000fff00000000ull
28#define PCIE_REG_CA_REG_SHIFT ( 0+32)
29#define PCIE_REG_CA_BE_MASK 0x00000000f0000000ull
30#define PCIE_REG_CA_BE_SHIFT ( 28)
31#define PCIE_REG_LOCK1 0x148
32
33#define PCIE_REG_PHB_CONFIG 0x160
34#define PCIE_REG_PHBC_64B_TCE_EN 0x2000000000000000ull
35#define PCIE_REG_PHBC_MMIO_DMA_FREEZE_EN 0x1000000000000000ull
36#define PCIE_REG_PHBC_32BIT_MSI_EN 0x0080000000000000ull
37#define PCIE_REG_PHBC_M64_EN 0x0040000000000000ull
38#define PCIE_REG_PHBC_IO_EN 0x0008000000000000ull
39#define PCIE_REG_PHBC_64BIT_MSI_EN 0x0002000000000000ull
40#define PCIE_REG_PHBC_M32A_EN 0x0000800000000000ull
41#define PCIE_REG_PHBC_M32B_EN 0x0000400000000000ull
42#define PCIE_REG_PHBC_MSI_PE_VALIDATE 0x0000200000000000ull
43#define PCIE_REG_PHBC_DMA_XLATE_BYPASS 0x0000100000000000ull
44
45#define PCIE_REG_IO_BASE_ADDR 0x170
46#define PCIE_REG_IO_BASE_MASK 0x178
47#define PCIE_REG_IO_START_ADDR 0x180
48
49#define PCIE_REG_M32A_BASE_ADDR 0x190
50#define PCIE_REG_M32A_BASE_MASK 0x198
51#define PCIE_REG_M32A_START_ADDR 0x1a0
52
53#define PCIE_REG_M32B_BASE_ADDR 0x1b0
54#define PCIE_REG_M32B_BASE_MASK 0x1b8
55#define PCIE_REG_M32B_START_ADDR 0x1c0
56
57#define PCIE_REG_M64_BASE_ADDR 0x1e0
58#define PCIE_REG_M64_BASE_MASK 0x1e8
59#define PCIE_REG_M64_START_ADDR 0x1f0
60
61#define PCIE_REG_TCE_KILL 0x210
62#define PCIE_REG_TCEKILL_SINGLE 0x8000000000000000ull
63#define PCIE_REG_TCEKILL_ADDR_MASK 0x000003fffffffff8ull
64#define PCIE_REG_TCEKILL_PS_4K 0
65#define PCIE_REG_TCEKILL_PS_64K 1
66#define PCIE_REG_TCEKILL_PS_16M 2
67#define PCIE_REG_TCEKILL_PS_16G 3
68
69#define PCIE_REG_IODA_ADDR 0x220
70#define PCIE_REG_IODA_AD_AUTOINC 0x8000000000000000ull
71#define PCIE_REG_IODA_AD_TBL_MVT 0x0005000000000000ull
72#define PCIE_REG_IODA_AD_TBL_PELT 0x0006000000000000ull
73#define PCIE_REG_IODA_AD_TBL_PESTA 0x0007000000000000ull
74#define PCIE_REG_IODA_AD_TBL_PESTB 0x0008000000000000ull
75#define PCIE_REG_IODA_AD_TBL_TVT 0x0009000000000000ull
76#define PCIE_REG_IODA_AD_TBL_TCE 0x000a000000000000ull
77#define PCIE_REG_IODA_DATA0 0x228
78#define PCIE_REG_IODA_DATA1 0x230
79
80#define PCIE_REG_LOCK2 0x240
81
82#define PCIE_REG_PHB_GEN_CAP 0x250
83#define PCIE_REG_PHB_TCE_CAP 0x258
84#define PCIE_REG_PHB_IRQ_CAP 0x260
85#define PCIE_REG_PHB_EEH_CAP 0x268
86
87#define PCIE_REG_PAPR_ERR_INJ_CONTROL 0x2b0
88#define PCIE_REG_PAPR_ERR_INJ_ADDR 0x2b8
89#define PCIE_REG_PAPR_ERR_INJ_MASK 0x2c0
90
91
92#define PCIE_REG_SYS_CFG1 0x600
93#define PCIE_REG_SYS_CFG1_CLASS_CODE 0x0000000000ffffffull
94
95#define IODA_TVT0_TTA_MASK 0x000fffffffff0000ull
96#define IODA_TVT0_TTA_SHIFT 4
97#define IODA_TVT0_BUSNUM_VALID_MASK 0x000000000000e000ull
98#define IODA_TVT0_TCE_TABLE_SIZE_MASK 0x0000000000001f00ull
99#define IODA_TVT0_TCE_TABLE_SIZE_SHIFT 8
100#define IODA_TVT0_BUSNUM_VALUE_MASK 0x00000000000000ffull
101#define IODA_TVT0_BUSNUM_VALID_SHIFT 0
102#define IODA_TVT1_DEVNUM_VALID 0x2000000000000000ull
103#define IODA_TVT1_DEVNUM_VALUE_MASK 0x1f00000000000000ull
104#define IODA_TVT1_DEVNUM_VALUE_SHIFT 56
105#define IODA_TVT1_FUNCNUM_VALID 0x0008000000000000ull
106#define IODA_TVT1_FUNCNUM_VALUE_MASK 0x0007000000000000ull
107#define IODA_TVT1_FUNCNUM_VALUE_SHIFT 48
108#define IODA_TVT1_IO_PAGE_SIZE_MASK 0x00001f0000000000ull
109#define IODA_TVT1_IO_PAGE_SIZE_SHIFT 40
110#define IODA_TVT1_PE_NUMBER_MASK 0x000000000000003full
111#define IODA_TVT1_PE_NUMBER_SHIFT 0
112
113#define IODA_TVT_COUNT 64
114
115/* UTL Core registers */
116#define PCIE_UTL_SYS_BUS_CONTROL 0x400
117#define PCIE_UTL_STATUS 0x408
118#define PCIE_UTL_SYS_BUS_AGENT_STATUS 0x410
119#define PCIE_UTL_SYS_BUS_AGENT_ERR_SEV 0x418
120#define PCIE_UTL_SYS_BUS_AGENT_IRQ_EN 0x420
121#define PCIE_UTL_SYS_BUS_BURST_SZ_CONF 0x440
122#define PCIE_UTL_REVISION_ID 0x448
123
124#define PCIE_UTL_OUT_POST_HDR_BUF_ALLOC 0x4c0
125#define PCIE_UTL_OUT_POST_DAT_BUF_ALLOC 0x4d0
126#define PCIE_UTL_IN_POST_HDR_BUF_ALLOC 0x4e0
127#define PCIE_UTL_IN_POST_DAT_BUF_ALLOC 0x4f0
128#define PCIE_UTL_OUT_NP_BUF_ALLOC 0x500
129#define PCIE_UTL_IN_NP_BUF_ALLOC 0x510
130#define PCIE_UTL_PCIE_TAGS_ALLOC 0x520
131#define PCIE_UTL_GBIF_READ_TAGS_ALLOC 0x530
132
133#define PCIE_UTL_PCIE_PORT_CONTROL 0x540
134#define PCIE_UTL_PCIE_PORT_STATUS 0x548
135#define PCIE_UTL_PCIE_PORT_ERROR_SEV 0x550
136#define PCIE_UTL_PCIE_PORT_IRQ_EN 0x558
137#define PCIE_UTL_RC_STATUS 0x560
138#define PCIE_UTL_RC_ERR_SEVERITY 0x568
139#define PCIE_UTL_RC_IRQ_EN 0x570
140#define PCIE_UTL_EP_STATUS 0x578
141#define PCIE_UTL_EP_ERR_SEVERITY 0x580
142#define PCIE_UTL_EP_ERR_IRQ_EN 0x588
143
144#define PCIE_UTL_PCI_PM_CTRL1 0x590
145#define PCIE_UTL_PCI_PM_CTRL2 0x598
146
147/* PCIe stack registers */
148#define PCIE_REG_SYSTEM_CONFIG1 0x600
149#define PCIE_REG_SYSTEM_CONFIG2 0x608
150#define PCIE_REG_EP_SYSTEM_CONFIG 0x618
151#define PCIE_REG_EP_FLR 0x620
152#define PCIE_REG_EP_BAR_CONFIG 0x628
153#define PCIE_REG_LINK_CONFIG 0x630
154#define PCIE_REG_PM_CONFIG 0x640
155#define PCIE_REG_DLP_CONTROL 0x650
156#define PCIE_REG_DLP_STATUS 0x658
157#define PCIE_REG_ERR_REPORT_CONTROL 0x660
158#define PCIE_REG_SLOT_CONTROL1 0x670
159#define PCIE_REG_SLOT_CONTROL2 0x678
160#define PCIE_REG_UTL_CONFIG 0x680
161#define PCIE_REG_BUFFERS_CONFIG 0x690
162#define PCIE_REG_ERROR_INJECT 0x698
163#define PCIE_REG_SRIOV_CONFIG 0x6a0
164#define PCIE_REG_PF0_SRIOV_STATUS 0x6a8
165#define PCIE_REG_PF1_SRIOV_STATUS 0x6b0
166#define PCIE_REG_PORT_NUMBER 0x700
167#define PCIE_REG_POR_SYSTEM_CONFIG 0x708
168
169/* PHB internal logic registers */
170#define PCIE_REG_PHB_VERSION 0x800
171#define PCIE_REG_RESET 0x808
172#define PCIE_REG_PHB_CONTROL 0x810
173#define PCIE_REG_PHB_TIMEOUT_CONTROL1 0x878
174#define PCIE_REG_PHB_QUIESCE_DMA 0x888
175#define PCIE_REG_PHB_DMA_READ_TAG_ACTV 0x900
176#define PCIE_REG_PHB_TCE_READ_TAG_ACTV 0x908
177
178/* FIR registers */
179#define PCIE_REG_LEM_FIR_ACCUM 0xc00
180#define PCIE_REG_LEM_FIR_AND_MASK 0xc08
181#define PCIE_REG_LEM_FIR_OR_MASK 0xc10
182#define PCIE_REG_LEM_ACTION0 0xc18
183#define PCIE_REG_LEM_ACTION1 0xc20
184#define PCIE_REG_LEM_ERROR_MASK 0xc30
185#define PCIE_REG_LEM_ERROR_AND_MASK 0xc38
186#define PCIE_REG_LEM_ERROR_OR_MASK 0xc40
187
188/* PHB Error registers */
189#define PCIE_REG_PHB_ERR_STATUS 0xc80
190#define PCIE_REG_PHB_ERR1_STATUS 0xc88
191#define PCIE_REG_PHB_ERR_INJECT 0xc90
192#define PCIE_REG_PHB_ERR_LEM_ENABLE 0xc98
193#define PCIE_REG_PHB_ERR_IRQ_ENABLE 0xca0
194#define PCIE_REG_PHB_ERR_FREEZE_ENABLE 0xca8
195#define PCIE_REG_PHB_ERR_SIDE_ENABLE 0xcb8
196#define PCIE_REG_PHB_ERR_LOG_0 0xcc0
197#define PCIE_REG_PHB_ERR_LOG_1 0xcc8
198#define PCIE_REG_PHB_ERR_STATUS_MASK 0xcd0
199#define PCIE_REG_PHB_ERR1_STATUS_MASK 0xcd8
200
201#define PCIE_REG_MMIO_ERR_STATUS 0xd00
202#define PCIE_REG_MMIO_ERR1_STATUS 0xd08
203#define PCIE_REG_MMIO_ERR_INJECT 0xd10
204#define PCIE_REG_MMIO_ERR_LEM_ENABLE 0xd18
205#define PCIE_REG_MMIO_ERR_IRQ_ENABLE 0xd20
206#define PCIE_REG_MMIO_ERR_FREEZE_ENABLE 0xd28
207#define PCIE_REG_MMIO_ERR_SIDE_ENABLE 0xd38
208#define PCIE_REG_MMIO_ERR_LOG_0 0xd40
209#define PCIE_REG_MMIO_ERR_LOG_1 0xd48
210#define PCIE_REG_MMIO_ERR_STATUS_MASK 0xd50
211#define PCIE_REG_MMIO_ERR1_STATUS_MASK 0xd58
212
213#define PCIE_REG_DMA_ERR_STATUS 0xd80
214#define PCIE_REG_DMA_ERR1_STATUS 0xd88
215#define PCIE_REG_DMA_ERR_INJECT 0xd90
216#define PCIE_REG_DMA_ERR_LEM_ENABLE 0xd98
217#define PCIE_REG_DMA_ERR_IRQ_ENABLE 0xda0
218#define PCIE_REG_DMA_ERR_FREEZE_ENABLE 0xda8
219#define PCIE_REG_DMA_ERR_SIDE_ENABLE 0xdb8
220#define PCIE_REG_DMA_ERR_LOG_0 0xdc0
221#define PCIE_REG_DMA_ERR_LOG_1 0xdc8
222#define PCIE_REG_DMA_ERR_STATUS_MASK 0xdd0
223#define PCIE_REG_DMA_ERR1_STATUS_MASK 0xdd8
224
225/* Shortcuts for access to the above using the PHB definitions
226 * with an offset
227 */
228#define PCIE_REG_ERR_PHB_OFFSET 0x0
229#define PCIE_REG_ERR_MMIO_OFFSET 0x80
230#define PCIE_REG_ERR_DMA_OFFSET 0x100
231
232/* Debug and Trace registers */
233#define PCIE_REG_PHB_DEBUG_CONTROL0 0xe00
234#define PCIE_REG_PHB_DEBUG_STATUS0 0xe08
235#define PCIE_REG_PHB_DEBUG_CONTROL1 0xe10
236#define PCIE_REG_PHB_DEBUG_STATUS1 0xe18
237#define PCIE_REG_PHB_DEBUG_CONTROL2 0xe20
238#define PCIE_REG_PHB_DEBUG_STATUS2 0xe28
239#define PCIE_REG_PHB_DEBUG_CONTROL3 0xe30
240#define PCIE_REG_PHB_DEBUG_STATUS3 0xe38
241#define PCIE_REG_PHB_DEBUG_CONTROL4 0xe40
242#define PCIE_REG_PHB_DEBUG_STATUS4 0xe48
243#define PCIE_REG_PHB_DEBUG_CONTROL5 0xe50
244#define PCIE_REG_PHB_DEBUG_STATUS5 0xe58
245#define PCIE_REG_PHB_DEBUG_CONTROL6 0xe60
246#define PCIE_REG_PHB_DEBUG_STATUS6 0xe68
247
248/* Definition for PCIe errors */
249struct wsp_pcie_err_log_data {
250 __u64 phb_err;
251 __u64 phb_err1;
252 __u64 phb_log0;
253 __u64 phb_log1;
254 __u64 mmio_err;
255 __u64 mmio_err1;
256 __u64 mmio_log0;
257 __u64 mmio_log1;
258 __u64 dma_err;
259 __u64 dma_err1;
260 __u64 dma_log0;
261 __u64 dma_log1;
262 __u64 utl_sys_err;
263 __u64 utl_port_err;
264 __u64 utl_rc_err;
265 __u64 unused;
266};
267
268#endif /* __WSP_PCI_H */