diff options
Diffstat (limited to 'arch/powerpc/platforms')
173 files changed, 6345 insertions, 3097 deletions
diff --git a/arch/powerpc/platforms/40x/Kconfig b/arch/powerpc/platforms/40x/Kconfig index b72176434ebe..d733d7ca939c 100644 --- a/arch/powerpc/platforms/40x/Kconfig +++ b/arch/powerpc/platforms/40x/Kconfig | |||
@@ -57,6 +57,8 @@ config KILAUEA | |||
57 | select 405EX | 57 | select 405EX |
58 | select PPC40x_SIMPLE | 58 | select PPC40x_SIMPLE |
59 | select PPC4xx_PCI_EXPRESS | 59 | select PPC4xx_PCI_EXPRESS |
60 | select PCI_MSI | ||
61 | select PPC4xx_MSI | ||
60 | help | 62 | help |
61 | This option enables support for the AMCC PPC405EX evaluation board. | 63 | This option enables support for the AMCC PPC405EX evaluation board. |
62 | 64 | ||
diff --git a/arch/powerpc/platforms/40x/ppc40x_simple.c b/arch/powerpc/platforms/40x/ppc40x_simple.c index 546bbc229d19..2521d93ef136 100644 --- a/arch/powerpc/platforms/40x/ppc40x_simple.c +++ b/arch/powerpc/platforms/40x/ppc40x_simple.c | |||
@@ -50,7 +50,7 @@ machine_device_initcall(ppc40x_simple, ppc40x_device_probe); | |||
50 | * Again, if your board needs to do things differently then create a | 50 | * Again, if your board needs to do things differently then create a |
51 | * board.c file for it rather than adding it to this list. | 51 | * board.c file for it rather than adding it to this list. |
52 | */ | 52 | */ |
53 | static char *board[] __initdata = { | 53 | static const char *board[] __initdata = { |
54 | "amcc,acadia", | 54 | "amcc,acadia", |
55 | "amcc,haleakala", | 55 | "amcc,haleakala", |
56 | "amcc,kilauea", | 56 | "amcc,kilauea", |
@@ -60,14 +60,9 @@ static char *board[] __initdata = { | |||
60 | 60 | ||
61 | static int __init ppc40x_probe(void) | 61 | static int __init ppc40x_probe(void) |
62 | { | 62 | { |
63 | unsigned long root = of_get_flat_dt_root(); | 63 | if (of_flat_dt_match(of_get_flat_dt_root(), board)) { |
64 | int i = 0; | 64 | ppc_pci_set_flags(PPC_PCI_REASSIGN_ALL_RSRC); |
65 | 65 | return 1; | |
66 | for (i = 0; i < ARRAY_SIZE(board); i++) { | ||
67 | if (of_flat_dt_is_compatible(root, board[i])) { | ||
68 | ppc_pci_set_flags(PPC_PCI_REASSIGN_ALL_RSRC); | ||
69 | return 1; | ||
70 | } | ||
71 | } | 66 | } |
72 | 67 | ||
73 | return 0; | 68 | return 0; |
diff --git a/arch/powerpc/platforms/44x/44x.h b/arch/powerpc/platforms/44x/44x.h index dbc4d2b4301a..63f703ecd23c 100644 --- a/arch/powerpc/platforms/44x/44x.h +++ b/arch/powerpc/platforms/44x/44x.h | |||
@@ -4,4 +4,8 @@ | |||
4 | extern u8 as1_readb(volatile u8 __iomem *addr); | 4 | extern u8 as1_readb(volatile u8 __iomem *addr); |
5 | extern void as1_writeb(u8 data, volatile u8 __iomem *addr); | 5 | extern void as1_writeb(u8 data, volatile u8 __iomem *addr); |
6 | 6 | ||
7 | #define GPIO0_OSRH 0xC | ||
8 | #define GPIO0_TSRH 0x14 | ||
9 | #define GPIO0_ISR1H 0x34 | ||
10 | |||
7 | #endif /* __POWERPC_PLATFORMS_44X_44X_H */ | 11 | #endif /* __POWERPC_PLATFORMS_44X_44X_H */ |
diff --git a/arch/powerpc/platforms/44x/Kconfig b/arch/powerpc/platforms/44x/Kconfig index 69d668c072ae..e958b6f48ec2 100644 --- a/arch/powerpc/platforms/44x/Kconfig +++ b/arch/powerpc/platforms/44x/Kconfig | |||
@@ -17,6 +17,16 @@ config BAMBOO | |||
17 | help | 17 | help |
18 | This option enables support for the IBM PPC440EP evaluation board. | 18 | This option enables support for the IBM PPC440EP evaluation board. |
19 | 19 | ||
20 | config BLUESTONE | ||
21 | bool "Bluestone" | ||
22 | depends on 44x | ||
23 | default n | ||
24 | select PPC44x_SIMPLE | ||
25 | select APM821xx | ||
26 | select IBM_NEW_EMAC_RGMII | ||
27 | help | ||
28 | This option enables support for the APM APM821xx Evaluation board. | ||
29 | |||
20 | config EBONY | 30 | config EBONY |
21 | bool "Ebony" | 31 | bool "Ebony" |
22 | depends on 44x | 32 | depends on 44x |
@@ -64,6 +74,8 @@ config KATMAI | |||
64 | select 440SPe | 74 | select 440SPe |
65 | select PCI | 75 | select PCI |
66 | select PPC4xx_PCI_EXPRESS | 76 | select PPC4xx_PCI_EXPRESS |
77 | select PCI_MSI | ||
78 | select PCC4xx_MSI | ||
67 | help | 79 | help |
68 | This option enables support for the AMCC PPC440SPe evaluation board. | 80 | This option enables support for the AMCC PPC440SPe evaluation board. |
69 | 81 | ||
@@ -105,10 +117,11 @@ config CANYONLANDS | |||
105 | bool "Canyonlands" | 117 | bool "Canyonlands" |
106 | depends on 44x | 118 | depends on 44x |
107 | default n | 119 | default n |
108 | select PPC44x_SIMPLE | ||
109 | select 460EX | 120 | select 460EX |
110 | select PCI | 121 | select PCI |
111 | select PPC4xx_PCI_EXPRESS | 122 | select PPC4xx_PCI_EXPRESS |
123 | select PCI_MSI | ||
124 | select PPC4xx_MSI | ||
112 | select IBM_NEW_EMAC_RGMII | 125 | select IBM_NEW_EMAC_RGMII |
113 | select IBM_NEW_EMAC_ZMII | 126 | select IBM_NEW_EMAC_ZMII |
114 | help | 127 | help |
@@ -135,6 +148,8 @@ config REDWOOD | |||
135 | select 460SX | 148 | select 460SX |
136 | select PCI | 149 | select PCI |
137 | select PPC4xx_PCI_EXPRESS | 150 | select PPC4xx_PCI_EXPRESS |
151 | select PCI_MSI | ||
152 | select PPC4xx_MSI | ||
138 | help | 153 | help |
139 | This option enables support for the AMCC PPC460SX Redwood board. | 154 | This option enables support for the AMCC PPC460SX Redwood board. |
140 | 155 | ||
@@ -293,6 +308,12 @@ config 460SX | |||
293 | select IBM_NEW_EMAC_ZMII | 308 | select IBM_NEW_EMAC_ZMII |
294 | select IBM_NEW_EMAC_TAH | 309 | select IBM_NEW_EMAC_TAH |
295 | 310 | ||
311 | config APM821xx | ||
312 | bool | ||
313 | select PPC_FPU | ||
314 | select IBM_NEW_EMAC_EMAC4 | ||
315 | select IBM_NEW_EMAC_TAH | ||
316 | |||
296 | # 44x errata/workaround config symbols, selected by the CPU models above | 317 | # 44x errata/workaround config symbols, selected by the CPU models above |
297 | config IBM440EP_ERR42 | 318 | config IBM440EP_ERR42 |
298 | bool | 319 | bool |
diff --git a/arch/powerpc/platforms/44x/Makefile b/arch/powerpc/platforms/44x/Makefile index 82ff326e0795..553db6007217 100644 --- a/arch/powerpc/platforms/44x/Makefile +++ b/arch/powerpc/platforms/44x/Makefile | |||
@@ -1,4 +1,7 @@ | |||
1 | obj-$(CONFIG_44x) := misc_44x.o idle.o | 1 | obj-$(CONFIG_44x) += misc_44x.o |
2 | ifneq ($(CONFIG_PPC4xx_CPM),y) | ||
3 | obj-$(CONFIG_44x) += idle.o | ||
4 | endif | ||
2 | obj-$(CONFIG_PPC44x_SIMPLE) += ppc44x_simple.o | 5 | obj-$(CONFIG_PPC44x_SIMPLE) += ppc44x_simple.o |
3 | obj-$(CONFIG_EBONY) += ebony.o | 6 | obj-$(CONFIG_EBONY) += ebony.o |
4 | obj-$(CONFIG_SAM440EP) += sam440ep.o | 7 | obj-$(CONFIG_SAM440EP) += sam440ep.o |
@@ -6,3 +9,4 @@ obj-$(CONFIG_WARP) += warp.o | |||
6 | obj-$(CONFIG_XILINX_VIRTEX_5_FXT) += virtex.o | 9 | obj-$(CONFIG_XILINX_VIRTEX_5_FXT) += virtex.o |
7 | obj-$(CONFIG_XILINX_ML510) += virtex_ml510.o | 10 | obj-$(CONFIG_XILINX_ML510) += virtex_ml510.o |
8 | obj-$(CONFIG_ISS4xx) += iss4xx.o | 11 | obj-$(CONFIG_ISS4xx) += iss4xx.o |
12 | obj-$(CONFIG_CANYONLANDS)+= canyonlands.o | ||
diff --git a/arch/powerpc/platforms/44x/canyonlands.c b/arch/powerpc/platforms/44x/canyonlands.c new file mode 100644 index 000000000000..afc5e8ea3775 --- /dev/null +++ b/arch/powerpc/platforms/44x/canyonlands.c | |||
@@ -0,0 +1,134 @@ | |||
1 | /* | ||
2 | * This contain platform specific code for APM PPC460EX based Canyonlands | ||
3 | * board. | ||
4 | * | ||
5 | * Copyright (c) 2010, Applied Micro Circuits Corporation | ||
6 | * Author: Rupjyoti Sarmah <rsarmah@apm.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or | ||
9 | * modify it under the terms of the GNU General Public License as | ||
10 | * published by the Free Software Foundation; either version 2 of | ||
11 | * the License, or (at your option) any later version. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, | ||
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
16 | * GNU General Public License for more details. | ||
17 | * | ||
18 | * You should have received a copy of the GNU General Public License | ||
19 | * along with this program; if not, write to the Free Software | ||
20 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, | ||
21 | * MA 02111-1307 USA | ||
22 | * | ||
23 | */ | ||
24 | #include <linux/kernel.h> | ||
25 | #include <linux/init.h> | ||
26 | #include <asm/pci-bridge.h> | ||
27 | #include <asm/ppc4xx.h> | ||
28 | #include <asm/udbg.h> | ||
29 | #include <asm/uic.h> | ||
30 | #include <linux/of_platform.h> | ||
31 | #include <linux/delay.h> | ||
32 | #include "44x.h" | ||
33 | |||
34 | #define BCSR_USB_EN 0x11 | ||
35 | |||
36 | static __initdata struct of_device_id ppc460ex_of_bus[] = { | ||
37 | { .compatible = "ibm,plb4", }, | ||
38 | { .compatible = "ibm,opb", }, | ||
39 | { .compatible = "ibm,ebc", }, | ||
40 | { .compatible = "simple-bus", }, | ||
41 | {}, | ||
42 | }; | ||
43 | |||
44 | static int __init ppc460ex_device_probe(void) | ||
45 | { | ||
46 | of_platform_bus_probe(NULL, ppc460ex_of_bus, NULL); | ||
47 | |||
48 | return 0; | ||
49 | } | ||
50 | machine_device_initcall(canyonlands, ppc460ex_device_probe); | ||
51 | |||
52 | /* Using this code only for the Canyonlands board. */ | ||
53 | |||
54 | static int __init ppc460ex_probe(void) | ||
55 | { | ||
56 | unsigned long root = of_get_flat_dt_root(); | ||
57 | if (of_flat_dt_is_compatible(root, "amcc,canyonlands")) { | ||
58 | ppc_pci_set_flags(PPC_PCI_REASSIGN_ALL_RSRC); | ||
59 | return 1; | ||
60 | } | ||
61 | return 0; | ||
62 | } | ||
63 | |||
64 | /* USB PHY fixup code on Canyonlands kit. */ | ||
65 | |||
66 | static int __init ppc460ex_canyonlands_fixup(void) | ||
67 | { | ||
68 | u8 __iomem *bcsr ; | ||
69 | void __iomem *vaddr; | ||
70 | struct device_node *np; | ||
71 | int ret = 0; | ||
72 | |||
73 | np = of_find_compatible_node(NULL, NULL, "amcc,ppc460ex-bcsr"); | ||
74 | if (!np) { | ||
75 | printk(KERN_ERR "failed did not find amcc, ppc460ex bcsr node\n"); | ||
76 | return -ENODEV; | ||
77 | } | ||
78 | |||
79 | bcsr = of_iomap(np, 0); | ||
80 | of_node_put(np); | ||
81 | |||
82 | if (!bcsr) { | ||
83 | printk(KERN_CRIT "Could not remap bcsr\n"); | ||
84 | ret = -ENODEV; | ||
85 | goto err_bcsr; | ||
86 | } | ||
87 | |||
88 | np = of_find_compatible_node(NULL, NULL, "ibm,ppc4xx-gpio"); | ||
89 | if (!np) { | ||
90 | printk(KERN_ERR "failed did not find ibm,ppc4xx-gpio node\n"); | ||
91 | return -ENODEV; | ||
92 | } | ||
93 | |||
94 | vaddr = of_iomap(np, 0); | ||
95 | of_node_put(np); | ||
96 | |||
97 | if (!vaddr) { | ||
98 | printk(KERN_CRIT "Could not get gpio node address\n"); | ||
99 | ret = -ENODEV; | ||
100 | goto err_gpio; | ||
101 | } | ||
102 | /* Disable USB, through the BCSR7 bits */ | ||
103 | setbits8(&bcsr[7], BCSR_USB_EN); | ||
104 | |||
105 | /* Wait for a while after reset */ | ||
106 | msleep(100); | ||
107 | |||
108 | /* Enable USB here */ | ||
109 | clrbits8(&bcsr[7], BCSR_USB_EN); | ||
110 | |||
111 | /* | ||
112 | * Configure multiplexed gpio16 and gpio19 as alternate1 output | ||
113 | * source after USB reset. In this configuration gpio16 will be | ||
114 | * USB2HStop and gpio19 will be USB2DStop. For more details refer to | ||
115 | * table 34-7 of PPC460EX user manual. | ||
116 | */ | ||
117 | setbits32((vaddr + GPIO0_OSRH), 0x42000000); | ||
118 | setbits32((vaddr + GPIO0_TSRH), 0x42000000); | ||
119 | err_gpio: | ||
120 | iounmap(vaddr); | ||
121 | err_bcsr: | ||
122 | iounmap(bcsr); | ||
123 | return ret; | ||
124 | } | ||
125 | machine_device_initcall(canyonlands, ppc460ex_canyonlands_fixup); | ||
126 | define_machine(canyonlands) { | ||
127 | .name = "Canyonlands", | ||
128 | .probe = ppc460ex_probe, | ||
129 | .progress = udbg_progress, | ||
130 | .init_IRQ = uic_init_tree, | ||
131 | .get_irq = uic_get_irq, | ||
132 | .restart = ppc4xx_reset_system, | ||
133 | .calibrate_decr = generic_calibrate_decr, | ||
134 | }; | ||
diff --git a/arch/powerpc/platforms/44x/iss4xx.c b/arch/powerpc/platforms/44x/iss4xx.c index aa46e9d1e771..19395f18b1db 100644 --- a/arch/powerpc/platforms/44x/iss4xx.c +++ b/arch/powerpc/platforms/44x/iss4xx.c | |||
@@ -87,7 +87,7 @@ static void __cpuinit smp_iss4xx_setup_cpu(int cpu) | |||
87 | mpic_setup_this_cpu(); | 87 | mpic_setup_this_cpu(); |
88 | } | 88 | } |
89 | 89 | ||
90 | static void __cpuinit smp_iss4xx_kick_cpu(int cpu) | 90 | static int __cpuinit smp_iss4xx_kick_cpu(int cpu) |
91 | { | 91 | { |
92 | struct device_node *cpunode = of_get_cpu_node(cpu, NULL); | 92 | struct device_node *cpunode = of_get_cpu_node(cpu, NULL); |
93 | const u64 *spin_table_addr_prop; | 93 | const u64 *spin_table_addr_prop; |
@@ -104,7 +104,7 @@ static void __cpuinit smp_iss4xx_kick_cpu(int cpu) | |||
104 | NULL); | 104 | NULL); |
105 | if (spin_table_addr_prop == NULL) { | 105 | if (spin_table_addr_prop == NULL) { |
106 | pr_err("CPU%d: Can't start, missing cpu-release-addr !\n", cpu); | 106 | pr_err("CPU%d: Can't start, missing cpu-release-addr !\n", cpu); |
107 | return; | 107 | return -ENOENT; |
108 | } | 108 | } |
109 | 109 | ||
110 | /* Assume it's mapped as part of the linear mapping. This is a bit | 110 | /* Assume it's mapped as part of the linear mapping. This is a bit |
@@ -117,6 +117,8 @@ static void __cpuinit smp_iss4xx_kick_cpu(int cpu) | |||
117 | smp_wmb(); | 117 | smp_wmb(); |
118 | spin_table[1] = __pa(start_secondary_47x); | 118 | spin_table[1] = __pa(start_secondary_47x); |
119 | mb(); | 119 | mb(); |
120 | |||
121 | return 0; | ||
120 | } | 122 | } |
121 | 123 | ||
122 | static struct smp_ops_t iss_smp_ops = { | 124 | static struct smp_ops_t iss_smp_ops = { |
diff --git a/arch/powerpc/platforms/44x/ppc44x_simple.c b/arch/powerpc/platforms/44x/ppc44x_simple.c index 5f7a29d7f590..c81c19c0b3d4 100644 --- a/arch/powerpc/platforms/44x/ppc44x_simple.c +++ b/arch/powerpc/platforms/44x/ppc44x_simple.c | |||
@@ -52,7 +52,7 @@ machine_device_initcall(ppc44x_simple, ppc44x_device_probe); | |||
52 | static char *board[] __initdata = { | 52 | static char *board[] __initdata = { |
53 | "amcc,arches", | 53 | "amcc,arches", |
54 | "amcc,bamboo", | 54 | "amcc,bamboo", |
55 | "amcc,canyonlands", | 55 | "amcc,bluestone", |
56 | "amcc,glacier", | 56 | "amcc,glacier", |
57 | "ibm,ebony", | 57 | "ibm,ebony", |
58 | "amcc,eiger", | 58 | "amcc,eiger", |
diff --git a/arch/powerpc/platforms/512x/mpc5121_ads_cpld.c b/arch/powerpc/platforms/512x/mpc5121_ads_cpld.c index 4ecf4cf9a51b..9f09319352c0 100644 --- a/arch/powerpc/platforms/512x/mpc5121_ads_cpld.c +++ b/arch/powerpc/platforms/512x/mpc5121_ads_cpld.c | |||
@@ -59,9 +59,9 @@ irq_to_pic_bit(unsigned int irq) | |||
59 | } | 59 | } |
60 | 60 | ||
61 | static void | 61 | static void |
62 | cpld_mask_irq(unsigned int irq) | 62 | cpld_mask_irq(struct irq_data *d) |
63 | { | 63 | { |
64 | unsigned int cpld_irq = (unsigned int)irq_map[irq].hwirq; | 64 | unsigned int cpld_irq = (unsigned int)irqd_to_hwirq(d); |
65 | void __iomem *pic_mask = irq_to_pic_mask(cpld_irq); | 65 | void __iomem *pic_mask = irq_to_pic_mask(cpld_irq); |
66 | 66 | ||
67 | out_8(pic_mask, | 67 | out_8(pic_mask, |
@@ -69,9 +69,9 @@ cpld_mask_irq(unsigned int irq) | |||
69 | } | 69 | } |
70 | 70 | ||
71 | static void | 71 | static void |
72 | cpld_unmask_irq(unsigned int irq) | 72 | cpld_unmask_irq(struct irq_data *d) |
73 | { | 73 | { |
74 | unsigned int cpld_irq = (unsigned int)irq_map[irq].hwirq; | 74 | unsigned int cpld_irq = (unsigned int)irqd_to_hwirq(d); |
75 | void __iomem *pic_mask = irq_to_pic_mask(cpld_irq); | 75 | void __iomem *pic_mask = irq_to_pic_mask(cpld_irq); |
76 | 76 | ||
77 | out_8(pic_mask, | 77 | out_8(pic_mask, |
@@ -80,9 +80,9 @@ cpld_unmask_irq(unsigned int irq) | |||
80 | 80 | ||
81 | static struct irq_chip cpld_pic = { | 81 | static struct irq_chip cpld_pic = { |
82 | .name = "CPLD PIC", | 82 | .name = "CPLD PIC", |
83 | .mask = cpld_mask_irq, | 83 | .irq_mask = cpld_mask_irq, |
84 | .ack = cpld_mask_irq, | 84 | .irq_ack = cpld_mask_irq, |
85 | .unmask = cpld_unmask_irq, | 85 | .irq_unmask = cpld_unmask_irq, |
86 | }; | 86 | }; |
87 | 87 | ||
88 | static int | 88 | static int |
@@ -97,7 +97,7 @@ cpld_pic_get_irq(int offset, u8 ignore, u8 __iomem *statusp, | |||
97 | status |= (ignore | mask); | 97 | status |= (ignore | mask); |
98 | 98 | ||
99 | if (status == 0xff) | 99 | if (status == 0xff) |
100 | return NO_IRQ_IGNORE; | 100 | return NO_IRQ; |
101 | 101 | ||
102 | cpld_irq = ffz(status) + offset; | 102 | cpld_irq = ffz(status) + offset; |
103 | 103 | ||
@@ -109,14 +109,14 @@ cpld_pic_cascade(unsigned int irq, struct irq_desc *desc) | |||
109 | { | 109 | { |
110 | irq = cpld_pic_get_irq(0, PCI_IGNORE, &cpld_regs->pci_status, | 110 | irq = cpld_pic_get_irq(0, PCI_IGNORE, &cpld_regs->pci_status, |
111 | &cpld_regs->pci_mask); | 111 | &cpld_regs->pci_mask); |
112 | if (irq != NO_IRQ && irq != NO_IRQ_IGNORE) { | 112 | if (irq != NO_IRQ) { |
113 | generic_handle_irq(irq); | 113 | generic_handle_irq(irq); |
114 | return; | 114 | return; |
115 | } | 115 | } |
116 | 116 | ||
117 | irq = cpld_pic_get_irq(8, MISC_IGNORE, &cpld_regs->misc_status, | 117 | irq = cpld_pic_get_irq(8, MISC_IGNORE, &cpld_regs->misc_status, |
118 | &cpld_regs->misc_mask); | 118 | &cpld_regs->misc_mask); |
119 | if (irq != NO_IRQ && irq != NO_IRQ_IGNORE) { | 119 | if (irq != NO_IRQ) { |
120 | generic_handle_irq(irq); | 120 | generic_handle_irq(irq); |
121 | return; | 121 | return; |
122 | } | 122 | } |
@@ -132,8 +132,8 @@ static int | |||
132 | cpld_pic_host_map(struct irq_host *h, unsigned int virq, | 132 | cpld_pic_host_map(struct irq_host *h, unsigned int virq, |
133 | irq_hw_number_t hw) | 133 | irq_hw_number_t hw) |
134 | { | 134 | { |
135 | irq_to_desc(virq)->status |= IRQ_LEVEL; | 135 | irq_set_status_flags(virq, IRQ_LEVEL); |
136 | set_irq_chip_and_handler(virq, &cpld_pic, handle_level_irq); | 136 | irq_set_chip_and_handler(virq, &cpld_pic, handle_level_irq); |
137 | return 0; | 137 | return 0; |
138 | } | 138 | } |
139 | 139 | ||
@@ -198,7 +198,7 @@ mpc5121_ads_cpld_pic_init(void) | |||
198 | goto end; | 198 | goto end; |
199 | } | 199 | } |
200 | 200 | ||
201 | set_irq_chained_handler(cascade_irq, cpld_pic_cascade); | 201 | irq_set_chained_handler(cascade_irq, cpld_pic_cascade); |
202 | end: | 202 | end: |
203 | of_node_put(np); | 203 | of_node_put(np); |
204 | } | 204 | } |
diff --git a/arch/powerpc/platforms/512x/mpc5121_generic.c b/arch/powerpc/platforms/512x/mpc5121_generic.c index e487eb06ec6b..926731f1ff01 100644 --- a/arch/powerpc/platforms/512x/mpc5121_generic.c +++ b/arch/powerpc/platforms/512x/mpc5121_generic.c | |||
@@ -26,7 +26,7 @@ | |||
26 | /* | 26 | /* |
27 | * list of supported boards | 27 | * list of supported boards |
28 | */ | 28 | */ |
29 | static char *board[] __initdata = { | 29 | static const char *board[] __initdata = { |
30 | "prt,prtlvt", | 30 | "prt,prtlvt", |
31 | NULL | 31 | NULL |
32 | }; | 32 | }; |
@@ -36,16 +36,7 @@ static char *board[] __initdata = { | |||
36 | */ | 36 | */ |
37 | static int __init mpc5121_generic_probe(void) | 37 | static int __init mpc5121_generic_probe(void) |
38 | { | 38 | { |
39 | unsigned long node = of_get_flat_dt_root(); | 39 | return of_flat_dt_match(of_get_flat_dt_root(), board); |
40 | int i = 0; | ||
41 | |||
42 | while (board[i]) { | ||
43 | if (of_flat_dt_is_compatible(node, board[i])) | ||
44 | break; | ||
45 | i++; | ||
46 | } | ||
47 | |||
48 | return board[i] != NULL; | ||
49 | } | 40 | } |
50 | 41 | ||
51 | define_machine(mpc5121_generic) { | 42 | define_machine(mpc5121_generic) { |
diff --git a/arch/powerpc/platforms/52xx/lite5200.c b/arch/powerpc/platforms/52xx/lite5200.c index de55bc0584b5..01ffa64d2aa7 100644 --- a/arch/powerpc/platforms/52xx/lite5200.c +++ b/arch/powerpc/platforms/52xx/lite5200.c | |||
@@ -172,20 +172,18 @@ static void __init lite5200_setup_arch(void) | |||
172 | mpc52xx_setup_pci(); | 172 | mpc52xx_setup_pci(); |
173 | } | 173 | } |
174 | 174 | ||
175 | static const char *board[] __initdata = { | ||
176 | "fsl,lite5200", | ||
177 | "fsl,lite5200b", | ||
178 | NULL, | ||
179 | }; | ||
180 | |||
175 | /* | 181 | /* |
176 | * Called very early, MMU is off, device-tree isn't unflattened | 182 | * Called very early, MMU is off, device-tree isn't unflattened |
177 | */ | 183 | */ |
178 | static int __init lite5200_probe(void) | 184 | static int __init lite5200_probe(void) |
179 | { | 185 | { |
180 | unsigned long node = of_get_flat_dt_root(); | 186 | return of_flat_dt_match(of_get_flat_dt_root(), board); |
181 | const char *model = of_get_flat_dt_prop(node, "model", NULL); | ||
182 | |||
183 | if (!of_flat_dt_is_compatible(node, "fsl,lite5200") && | ||
184 | !of_flat_dt_is_compatible(node, "fsl,lite5200b")) | ||
185 | return 0; | ||
186 | pr_debug("%s board found\n", model ? model : "unknown"); | ||
187 | |||
188 | return 1; | ||
189 | } | 187 | } |
190 | 188 | ||
191 | define_machine(lite5200) { | 189 | define_machine(lite5200) { |
diff --git a/arch/powerpc/platforms/52xx/lite5200_pm.c b/arch/powerpc/platforms/52xx/lite5200_pm.c index 80234e5921f5..eda0fc2a3914 100644 --- a/arch/powerpc/platforms/52xx/lite5200_pm.c +++ b/arch/powerpc/platforms/52xx/lite5200_pm.c | |||
@@ -232,7 +232,7 @@ static void lite5200_pm_end(void) | |||
232 | lite5200_pm_target_state = PM_SUSPEND_ON; | 232 | lite5200_pm_target_state = PM_SUSPEND_ON; |
233 | } | 233 | } |
234 | 234 | ||
235 | static struct platform_suspend_ops lite5200_pm_ops = { | 235 | static const struct platform_suspend_ops lite5200_pm_ops = { |
236 | .valid = lite5200_pm_valid, | 236 | .valid = lite5200_pm_valid, |
237 | .begin = lite5200_pm_begin, | 237 | .begin = lite5200_pm_begin, |
238 | .prepare = lite5200_pm_prepare, | 238 | .prepare = lite5200_pm_prepare, |
diff --git a/arch/powerpc/platforms/52xx/media5200.c b/arch/powerpc/platforms/52xx/media5200.c index 0bac3a3dbecf..96f85e5e0cd3 100644 --- a/arch/powerpc/platforms/52xx/media5200.c +++ b/arch/powerpc/platforms/52xx/media5200.c | |||
@@ -49,45 +49,46 @@ struct media5200_irq { | |||
49 | }; | 49 | }; |
50 | struct media5200_irq media5200_irq; | 50 | struct media5200_irq media5200_irq; |
51 | 51 | ||
52 | static void media5200_irq_unmask(unsigned int virq) | 52 | static void media5200_irq_unmask(struct irq_data *d) |
53 | { | 53 | { |
54 | unsigned long flags; | 54 | unsigned long flags; |
55 | u32 val; | 55 | u32 val; |
56 | 56 | ||
57 | spin_lock_irqsave(&media5200_irq.lock, flags); | 57 | spin_lock_irqsave(&media5200_irq.lock, flags); |
58 | val = in_be32(media5200_irq.regs + MEDIA5200_IRQ_ENABLE); | 58 | val = in_be32(media5200_irq.regs + MEDIA5200_IRQ_ENABLE); |
59 | val |= 1 << (MEDIA5200_IRQ_SHIFT + irq_map[virq].hwirq); | 59 | val |= 1 << (MEDIA5200_IRQ_SHIFT + irqd_to_hwirq(d)); |
60 | out_be32(media5200_irq.regs + MEDIA5200_IRQ_ENABLE, val); | 60 | out_be32(media5200_irq.regs + MEDIA5200_IRQ_ENABLE, val); |
61 | spin_unlock_irqrestore(&media5200_irq.lock, flags); | 61 | spin_unlock_irqrestore(&media5200_irq.lock, flags); |
62 | } | 62 | } |
63 | 63 | ||
64 | static void media5200_irq_mask(unsigned int virq) | 64 | static void media5200_irq_mask(struct irq_data *d) |
65 | { | 65 | { |
66 | unsigned long flags; | 66 | unsigned long flags; |
67 | u32 val; | 67 | u32 val; |
68 | 68 | ||
69 | spin_lock_irqsave(&media5200_irq.lock, flags); | 69 | spin_lock_irqsave(&media5200_irq.lock, flags); |
70 | val = in_be32(media5200_irq.regs + MEDIA5200_IRQ_ENABLE); | 70 | val = in_be32(media5200_irq.regs + MEDIA5200_IRQ_ENABLE); |
71 | val &= ~(1 << (MEDIA5200_IRQ_SHIFT + irq_map[virq].hwirq)); | 71 | val &= ~(1 << (MEDIA5200_IRQ_SHIFT + irqd_to_hwirq(d))); |
72 | out_be32(media5200_irq.regs + MEDIA5200_IRQ_ENABLE, val); | 72 | out_be32(media5200_irq.regs + MEDIA5200_IRQ_ENABLE, val); |
73 | spin_unlock_irqrestore(&media5200_irq.lock, flags); | 73 | spin_unlock_irqrestore(&media5200_irq.lock, flags); |
74 | } | 74 | } |
75 | 75 | ||
76 | static struct irq_chip media5200_irq_chip = { | 76 | static struct irq_chip media5200_irq_chip = { |
77 | .name = "Media5200 FPGA", | 77 | .name = "Media5200 FPGA", |
78 | .unmask = media5200_irq_unmask, | 78 | .irq_unmask = media5200_irq_unmask, |
79 | .mask = media5200_irq_mask, | 79 | .irq_mask = media5200_irq_mask, |
80 | .mask_ack = media5200_irq_mask, | 80 | .irq_mask_ack = media5200_irq_mask, |
81 | }; | 81 | }; |
82 | 82 | ||
83 | void media5200_irq_cascade(unsigned int virq, struct irq_desc *desc) | 83 | void media5200_irq_cascade(unsigned int virq, struct irq_desc *desc) |
84 | { | 84 | { |
85 | struct irq_chip *chip = irq_desc_get_chip(desc); | ||
85 | int sub_virq, val; | 86 | int sub_virq, val; |
86 | u32 status, enable; | 87 | u32 status, enable; |
87 | 88 | ||
88 | /* Mask off the cascaded IRQ */ | 89 | /* Mask off the cascaded IRQ */ |
89 | raw_spin_lock(&desc->lock); | 90 | raw_spin_lock(&desc->lock); |
90 | desc->chip->mask(virq); | 91 | chip->irq_mask(&desc->irq_data); |
91 | raw_spin_unlock(&desc->lock); | 92 | raw_spin_unlock(&desc->lock); |
92 | 93 | ||
93 | /* Ask the FPGA for IRQ status. If 'val' is 0, then no irqs | 94 | /* Ask the FPGA for IRQ status. If 'val' is 0, then no irqs |
@@ -105,24 +106,19 @@ void media5200_irq_cascade(unsigned int virq, struct irq_desc *desc) | |||
105 | 106 | ||
106 | /* Processing done; can reenable the cascade now */ | 107 | /* Processing done; can reenable the cascade now */ |
107 | raw_spin_lock(&desc->lock); | 108 | raw_spin_lock(&desc->lock); |
108 | desc->chip->ack(virq); | 109 | chip->irq_ack(&desc->irq_data); |
109 | if (!(desc->status & IRQ_DISABLED)) | 110 | if (!irqd_irq_disabled(&desc->irq_data)) |
110 | desc->chip->unmask(virq); | 111 | chip->irq_unmask(&desc->irq_data); |
111 | raw_spin_unlock(&desc->lock); | 112 | raw_spin_unlock(&desc->lock); |
112 | } | 113 | } |
113 | 114 | ||
114 | static int media5200_irq_map(struct irq_host *h, unsigned int virq, | 115 | static int media5200_irq_map(struct irq_host *h, unsigned int virq, |
115 | irq_hw_number_t hw) | 116 | irq_hw_number_t hw) |
116 | { | 117 | { |
117 | struct irq_desc *desc = irq_to_desc(virq); | ||
118 | |||
119 | pr_debug("%s: h=%p, virq=%i, hwirq=%i\n", __func__, h, virq, (int)hw); | 118 | pr_debug("%s: h=%p, virq=%i, hwirq=%i\n", __func__, h, virq, (int)hw); |
120 | set_irq_chip_data(virq, &media5200_irq); | 119 | irq_set_chip_data(virq, &media5200_irq); |
121 | set_irq_chip_and_handler(virq, &media5200_irq_chip, handle_level_irq); | 120 | irq_set_chip_and_handler(virq, &media5200_irq_chip, handle_level_irq); |
122 | set_irq_type(virq, IRQ_TYPE_LEVEL_LOW); | 121 | irq_set_status_flags(virq, IRQ_LEVEL); |
123 | desc->status &= ~(IRQ_TYPE_SENSE_MASK | IRQ_LEVEL); | ||
124 | desc->status |= IRQ_TYPE_LEVEL_LOW | IRQ_LEVEL; | ||
125 | |||
126 | return 0; | 122 | return 0; |
127 | } | 123 | } |
128 | 124 | ||
@@ -186,8 +182,8 @@ static void __init media5200_init_irq(void) | |||
186 | 182 | ||
187 | media5200_irq.irqhost->host_data = &media5200_irq; | 183 | media5200_irq.irqhost->host_data = &media5200_irq; |
188 | 184 | ||
189 | set_irq_data(cascade_virq, &media5200_irq); | 185 | irq_set_handler_data(cascade_virq, &media5200_irq); |
190 | set_irq_chained_handler(cascade_virq, media5200_irq_cascade); | 186 | irq_set_chained_handler(cascade_virq, media5200_irq_cascade); |
191 | 187 | ||
192 | return; | 188 | return; |
193 | 189 | ||
@@ -239,7 +235,7 @@ static void __init media5200_setup_arch(void) | |||
239 | } | 235 | } |
240 | 236 | ||
241 | /* list of the supported boards */ | 237 | /* list of the supported boards */ |
242 | static char *board[] __initdata = { | 238 | static const char *board[] __initdata = { |
243 | "fsl,media5200", | 239 | "fsl,media5200", |
244 | NULL | 240 | NULL |
245 | }; | 241 | }; |
@@ -249,16 +245,7 @@ static char *board[] __initdata = { | |||
249 | */ | 245 | */ |
250 | static int __init media5200_probe(void) | 246 | static int __init media5200_probe(void) |
251 | { | 247 | { |
252 | unsigned long node = of_get_flat_dt_root(); | 248 | return of_flat_dt_match(of_get_flat_dt_root(), board); |
253 | int i = 0; | ||
254 | |||
255 | while (board[i]) { | ||
256 | if (of_flat_dt_is_compatible(node, board[i])) | ||
257 | break; | ||
258 | i++; | ||
259 | } | ||
260 | |||
261 | return (board[i] != NULL); | ||
262 | } | 249 | } |
263 | 250 | ||
264 | define_machine(media5200_platform) { | 251 | define_machine(media5200_platform) { |
diff --git a/arch/powerpc/platforms/52xx/mpc5200_simple.c b/arch/powerpc/platforms/52xx/mpc5200_simple.c index d45be5b5ad49..e36d6e232ae6 100644 --- a/arch/powerpc/platforms/52xx/mpc5200_simple.c +++ b/arch/powerpc/platforms/52xx/mpc5200_simple.c | |||
@@ -49,7 +49,7 @@ static void __init mpc5200_simple_setup_arch(void) | |||
49 | } | 49 | } |
50 | 50 | ||
51 | /* list of the supported boards */ | 51 | /* list of the supported boards */ |
52 | static char *board[] __initdata = { | 52 | static const char *board[] __initdata = { |
53 | "intercontrol,digsy-mtc", | 53 | "intercontrol,digsy-mtc", |
54 | "manroland,mucmc52", | 54 | "manroland,mucmc52", |
55 | "manroland,uc101", | 55 | "manroland,uc101", |
@@ -66,16 +66,7 @@ static char *board[] __initdata = { | |||
66 | */ | 66 | */ |
67 | static int __init mpc5200_simple_probe(void) | 67 | static int __init mpc5200_simple_probe(void) |
68 | { | 68 | { |
69 | unsigned long node = of_get_flat_dt_root(); | 69 | return of_flat_dt_match(of_get_flat_dt_root(), board); |
70 | int i = 0; | ||
71 | |||
72 | while (board[i]) { | ||
73 | if (of_flat_dt_is_compatible(node, board[i])) | ||
74 | break; | ||
75 | i++; | ||
76 | } | ||
77 | |||
78 | return (board[i] != NULL); | ||
79 | } | 70 | } |
80 | 71 | ||
81 | define_machine(mpc5200_simple_platform) { | 72 | define_machine(mpc5200_simple_platform) { |
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_gpio.c b/arch/powerpc/platforms/52xx/mpc52xx_gpio.c index 0dad9a935eb5..1757d1db4b51 100644 --- a/arch/powerpc/platforms/52xx/mpc52xx_gpio.c +++ b/arch/powerpc/platforms/52xx/mpc52xx_gpio.c | |||
@@ -147,8 +147,7 @@ mpc52xx_wkup_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val) | |||
147 | return 0; | 147 | return 0; |
148 | } | 148 | } |
149 | 149 | ||
150 | static int __devinit mpc52xx_wkup_gpiochip_probe(struct platform_device *ofdev, | 150 | static int __devinit mpc52xx_wkup_gpiochip_probe(struct platform_device *ofdev) |
151 | const struct of_device_id *match) | ||
152 | { | 151 | { |
153 | struct mpc52xx_gpiochip *chip; | 152 | struct mpc52xx_gpiochip *chip; |
154 | struct mpc52xx_gpio_wkup __iomem *regs; | 153 | struct mpc52xx_gpio_wkup __iomem *regs; |
@@ -191,7 +190,7 @@ static const struct of_device_id mpc52xx_wkup_gpiochip_match[] = { | |||
191 | {} | 190 | {} |
192 | }; | 191 | }; |
193 | 192 | ||
194 | static struct of_platform_driver mpc52xx_wkup_gpiochip_driver = { | 193 | static struct platform_driver mpc52xx_wkup_gpiochip_driver = { |
195 | .driver = { | 194 | .driver = { |
196 | .name = "gpio_wkup", | 195 | .name = "gpio_wkup", |
197 | .owner = THIS_MODULE, | 196 | .owner = THIS_MODULE, |
@@ -310,8 +309,7 @@ mpc52xx_simple_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val) | |||
310 | return 0; | 309 | return 0; |
311 | } | 310 | } |
312 | 311 | ||
313 | static int __devinit mpc52xx_simple_gpiochip_probe(struct platform_device *ofdev, | 312 | static int __devinit mpc52xx_simple_gpiochip_probe(struct platform_device *ofdev) |
314 | const struct of_device_id *match) | ||
315 | { | 313 | { |
316 | struct mpc52xx_gpiochip *chip; | 314 | struct mpc52xx_gpiochip *chip; |
317 | struct gpio_chip *gc; | 315 | struct gpio_chip *gc; |
@@ -349,7 +347,7 @@ static const struct of_device_id mpc52xx_simple_gpiochip_match[] = { | |||
349 | {} | 347 | {} |
350 | }; | 348 | }; |
351 | 349 | ||
352 | static struct of_platform_driver mpc52xx_simple_gpiochip_driver = { | 350 | static struct platform_driver mpc52xx_simple_gpiochip_driver = { |
353 | .driver = { | 351 | .driver = { |
354 | .name = "gpio", | 352 | .name = "gpio", |
355 | .owner = THIS_MODULE, | 353 | .owner = THIS_MODULE, |
@@ -361,10 +359,10 @@ static struct of_platform_driver mpc52xx_simple_gpiochip_driver = { | |||
361 | 359 | ||
362 | static int __init mpc52xx_gpio_init(void) | 360 | static int __init mpc52xx_gpio_init(void) |
363 | { | 361 | { |
364 | if (of_register_platform_driver(&mpc52xx_wkup_gpiochip_driver)) | 362 | if (platform_driver_register(&mpc52xx_wkup_gpiochip_driver)) |
365 | printk(KERN_ERR "Unable to register wakeup GPIO driver\n"); | 363 | printk(KERN_ERR "Unable to register wakeup GPIO driver\n"); |
366 | 364 | ||
367 | if (of_register_platform_driver(&mpc52xx_simple_gpiochip_driver)) | 365 | if (platform_driver_register(&mpc52xx_simple_gpiochip_driver)) |
368 | printk(KERN_ERR "Unable to register simple GPIO driver\n"); | 366 | printk(KERN_ERR "Unable to register simple GPIO driver\n"); |
369 | 367 | ||
370 | return 0; | 368 | return 0; |
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_gpt.c b/arch/powerpc/platforms/52xx/mpc52xx_gpt.c index fea833e18ad5..6c39b9cc2fa3 100644 --- a/arch/powerpc/platforms/52xx/mpc52xx_gpt.c +++ b/arch/powerpc/platforms/52xx/mpc52xx_gpt.c | |||
@@ -63,6 +63,7 @@ | |||
63 | #include <linux/of_gpio.h> | 63 | #include <linux/of_gpio.h> |
64 | #include <linux/kernel.h> | 64 | #include <linux/kernel.h> |
65 | #include <linux/slab.h> | 65 | #include <linux/slab.h> |
66 | #include <linux/fs.h> | ||
66 | #include <linux/watchdog.h> | 67 | #include <linux/watchdog.h> |
67 | #include <linux/miscdevice.h> | 68 | #include <linux/miscdevice.h> |
68 | #include <linux/uaccess.h> | 69 | #include <linux/uaccess.h> |
@@ -134,9 +135,9 @@ DEFINE_MUTEX(mpc52xx_gpt_list_mutex); | |||
134 | * Cascaded interrupt controller hooks | 135 | * Cascaded interrupt controller hooks |
135 | */ | 136 | */ |
136 | 137 | ||
137 | static void mpc52xx_gpt_irq_unmask(unsigned int virq) | 138 | static void mpc52xx_gpt_irq_unmask(struct irq_data *d) |
138 | { | 139 | { |
139 | struct mpc52xx_gpt_priv *gpt = get_irq_chip_data(virq); | 140 | struct mpc52xx_gpt_priv *gpt = irq_data_get_irq_chip_data(d); |
140 | unsigned long flags; | 141 | unsigned long flags; |
141 | 142 | ||
142 | spin_lock_irqsave(&gpt->lock, flags); | 143 | spin_lock_irqsave(&gpt->lock, flags); |
@@ -144,9 +145,9 @@ static void mpc52xx_gpt_irq_unmask(unsigned int virq) | |||
144 | spin_unlock_irqrestore(&gpt->lock, flags); | 145 | spin_unlock_irqrestore(&gpt->lock, flags); |
145 | } | 146 | } |
146 | 147 | ||
147 | static void mpc52xx_gpt_irq_mask(unsigned int virq) | 148 | static void mpc52xx_gpt_irq_mask(struct irq_data *d) |
148 | { | 149 | { |
149 | struct mpc52xx_gpt_priv *gpt = get_irq_chip_data(virq); | 150 | struct mpc52xx_gpt_priv *gpt = irq_data_get_irq_chip_data(d); |
150 | unsigned long flags; | 151 | unsigned long flags; |
151 | 152 | ||
152 | spin_lock_irqsave(&gpt->lock, flags); | 153 | spin_lock_irqsave(&gpt->lock, flags); |
@@ -154,20 +155,20 @@ static void mpc52xx_gpt_irq_mask(unsigned int virq) | |||
154 | spin_unlock_irqrestore(&gpt->lock, flags); | 155 | spin_unlock_irqrestore(&gpt->lock, flags); |
155 | } | 156 | } |
156 | 157 | ||
157 | static void mpc52xx_gpt_irq_ack(unsigned int virq) | 158 | static void mpc52xx_gpt_irq_ack(struct irq_data *d) |
158 | { | 159 | { |
159 | struct mpc52xx_gpt_priv *gpt = get_irq_chip_data(virq); | 160 | struct mpc52xx_gpt_priv *gpt = irq_data_get_irq_chip_data(d); |
160 | 161 | ||
161 | out_be32(&gpt->regs->status, MPC52xx_GPT_STATUS_IRQMASK); | 162 | out_be32(&gpt->regs->status, MPC52xx_GPT_STATUS_IRQMASK); |
162 | } | 163 | } |
163 | 164 | ||
164 | static int mpc52xx_gpt_irq_set_type(unsigned int virq, unsigned int flow_type) | 165 | static int mpc52xx_gpt_irq_set_type(struct irq_data *d, unsigned int flow_type) |
165 | { | 166 | { |
166 | struct mpc52xx_gpt_priv *gpt = get_irq_chip_data(virq); | 167 | struct mpc52xx_gpt_priv *gpt = irq_data_get_irq_chip_data(d); |
167 | unsigned long flags; | 168 | unsigned long flags; |
168 | u32 reg; | 169 | u32 reg; |
169 | 170 | ||
170 | dev_dbg(gpt->dev, "%s: virq=%i type=%x\n", __func__, virq, flow_type); | 171 | dev_dbg(gpt->dev, "%s: virq=%i type=%x\n", __func__, d->irq, flow_type); |
171 | 172 | ||
172 | spin_lock_irqsave(&gpt->lock, flags); | 173 | spin_lock_irqsave(&gpt->lock, flags); |
173 | reg = in_be32(&gpt->regs->mode) & ~MPC52xx_GPT_MODE_ICT_MASK; | 174 | reg = in_be32(&gpt->regs->mode) & ~MPC52xx_GPT_MODE_ICT_MASK; |
@@ -183,15 +184,15 @@ static int mpc52xx_gpt_irq_set_type(unsigned int virq, unsigned int flow_type) | |||
183 | 184 | ||
184 | static struct irq_chip mpc52xx_gpt_irq_chip = { | 185 | static struct irq_chip mpc52xx_gpt_irq_chip = { |
185 | .name = "MPC52xx GPT", | 186 | .name = "MPC52xx GPT", |
186 | .unmask = mpc52xx_gpt_irq_unmask, | 187 | .irq_unmask = mpc52xx_gpt_irq_unmask, |
187 | .mask = mpc52xx_gpt_irq_mask, | 188 | .irq_mask = mpc52xx_gpt_irq_mask, |
188 | .ack = mpc52xx_gpt_irq_ack, | 189 | .irq_ack = mpc52xx_gpt_irq_ack, |
189 | .set_type = mpc52xx_gpt_irq_set_type, | 190 | .irq_set_type = mpc52xx_gpt_irq_set_type, |
190 | }; | 191 | }; |
191 | 192 | ||
192 | void mpc52xx_gpt_irq_cascade(unsigned int virq, struct irq_desc *desc) | 193 | void mpc52xx_gpt_irq_cascade(unsigned int virq, struct irq_desc *desc) |
193 | { | 194 | { |
194 | struct mpc52xx_gpt_priv *gpt = get_irq_data(virq); | 195 | struct mpc52xx_gpt_priv *gpt = irq_get_handler_data(virq); |
195 | int sub_virq; | 196 | int sub_virq; |
196 | u32 status; | 197 | u32 status; |
197 | 198 | ||
@@ -208,8 +209,8 @@ static int mpc52xx_gpt_irq_map(struct irq_host *h, unsigned int virq, | |||
208 | struct mpc52xx_gpt_priv *gpt = h->host_data; | 209 | struct mpc52xx_gpt_priv *gpt = h->host_data; |
209 | 210 | ||
210 | dev_dbg(gpt->dev, "%s: h=%p, virq=%i\n", __func__, h, virq); | 211 | dev_dbg(gpt->dev, "%s: h=%p, virq=%i\n", __func__, h, virq); |
211 | set_irq_chip_data(virq, gpt); | 212 | irq_set_chip_data(virq, gpt); |
212 | set_irq_chip_and_handler(virq, &mpc52xx_gpt_irq_chip, handle_edge_irq); | 213 | irq_set_chip_and_handler(virq, &mpc52xx_gpt_irq_chip, handle_edge_irq); |
213 | 214 | ||
214 | return 0; | 215 | return 0; |
215 | } | 216 | } |
@@ -258,8 +259,8 @@ mpc52xx_gpt_irq_setup(struct mpc52xx_gpt_priv *gpt, struct device_node *node) | |||
258 | } | 259 | } |
259 | 260 | ||
260 | gpt->irqhost->host_data = gpt; | 261 | gpt->irqhost->host_data = gpt; |
261 | set_irq_data(cascade_virq, gpt); | 262 | irq_set_handler_data(cascade_virq, gpt); |
262 | set_irq_chained_handler(cascade_virq, mpc52xx_gpt_irq_cascade); | 263 | irq_set_chained_handler(cascade_virq, mpc52xx_gpt_irq_cascade); |
263 | 264 | ||
264 | /* If the GPT is currently disabled, then change it to be in Input | 265 | /* If the GPT is currently disabled, then change it to be in Input |
265 | * Capture mode. If the mode is non-zero, then the pin could be | 266 | * Capture mode. If the mode is non-zero, then the pin could be |
@@ -720,8 +721,7 @@ static inline int mpc52xx_gpt_wdt_setup(struct mpc52xx_gpt_priv *gpt, | |||
720 | /* --------------------------------------------------------------------- | 721 | /* --------------------------------------------------------------------- |
721 | * of_platform bus binding code | 722 | * of_platform bus binding code |
722 | */ | 723 | */ |
723 | static int __devinit mpc52xx_gpt_probe(struct platform_device *ofdev, | 724 | static int __devinit mpc52xx_gpt_probe(struct platform_device *ofdev) |
724 | const struct of_device_id *match) | ||
725 | { | 725 | { |
726 | struct mpc52xx_gpt_priv *gpt; | 726 | struct mpc52xx_gpt_priv *gpt; |
727 | 727 | ||
@@ -780,7 +780,7 @@ static const struct of_device_id mpc52xx_gpt_match[] = { | |||
780 | {} | 780 | {} |
781 | }; | 781 | }; |
782 | 782 | ||
783 | static struct of_platform_driver mpc52xx_gpt_driver = { | 783 | static struct platform_driver mpc52xx_gpt_driver = { |
784 | .driver = { | 784 | .driver = { |
785 | .name = "mpc52xx-gpt", | 785 | .name = "mpc52xx-gpt", |
786 | .owner = THIS_MODULE, | 786 | .owner = THIS_MODULE, |
@@ -792,10 +792,7 @@ static struct of_platform_driver mpc52xx_gpt_driver = { | |||
792 | 792 | ||
793 | static int __init mpc52xx_gpt_init(void) | 793 | static int __init mpc52xx_gpt_init(void) |
794 | { | 794 | { |
795 | if (of_register_platform_driver(&mpc52xx_gpt_driver)) | 795 | return platform_driver_register(&mpc52xx_gpt_driver); |
796 | pr_err("error registering MPC52xx GPT driver\n"); | ||
797 | |||
798 | return 0; | ||
799 | } | 796 | } |
800 | 797 | ||
801 | /* Make sure GPIOs and IRQs get set up before anyone tries to use them */ | 798 | /* Make sure GPIOs and IRQs get set up before anyone tries to use them */ |
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c b/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c index f4ac213c89c0..9940ce8a2d4e 100644 --- a/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c +++ b/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c | |||
@@ -57,7 +57,7 @@ struct mpc52xx_lpbfifo { | |||
57 | static struct mpc52xx_lpbfifo lpbfifo; | 57 | static struct mpc52xx_lpbfifo lpbfifo; |
58 | 58 | ||
59 | /** | 59 | /** |
60 | * mpc52xx_lpbfifo_kick - Trigger the next block of data to be transfered | 60 | * mpc52xx_lpbfifo_kick - Trigger the next block of data to be transferred |
61 | */ | 61 | */ |
62 | static void mpc52xx_lpbfifo_kick(struct mpc52xx_lpbfifo_request *req) | 62 | static void mpc52xx_lpbfifo_kick(struct mpc52xx_lpbfifo_request *req) |
63 | { | 63 | { |
@@ -179,7 +179,7 @@ static void mpc52xx_lpbfifo_kick(struct mpc52xx_lpbfifo_request *req) | |||
179 | * | 179 | * |
180 | * On transmit, the dma completion irq triggers before the fifo completion | 180 | * On transmit, the dma completion irq triggers before the fifo completion |
181 | * triggers. Handle the dma completion here instead of the LPB FIFO Bestcomm | 181 | * triggers. Handle the dma completion here instead of the LPB FIFO Bestcomm |
182 | * task completion irq becuase everyting is not really done until the LPB FIFO | 182 | * task completion irq because everything is not really done until the LPB FIFO |
183 | * completion irq triggers. | 183 | * completion irq triggers. |
184 | * | 184 | * |
185 | * In other words: | 185 | * In other words: |
@@ -195,7 +195,7 @@ static void mpc52xx_lpbfifo_kick(struct mpc52xx_lpbfifo_request *req) | |||
195 | * Exit conditions: | 195 | * Exit conditions: |
196 | * 1) Transfer aborted | 196 | * 1) Transfer aborted |
197 | * 2) FIFO complete without DMA; more data to do | 197 | * 2) FIFO complete without DMA; more data to do |
198 | * 3) FIFO complete without DMA; all data transfered | 198 | * 3) FIFO complete without DMA; all data transferred |
199 | * 4) FIFO complete using DMA | 199 | * 4) FIFO complete using DMA |
200 | * | 200 | * |
201 | * Condition 1 can occur regardless of whether or not DMA is used. | 201 | * Condition 1 can occur regardless of whether or not DMA is used. |
@@ -436,8 +436,7 @@ void mpc52xx_lpbfifo_abort(struct mpc52xx_lpbfifo_request *req) | |||
436 | } | 436 | } |
437 | EXPORT_SYMBOL(mpc52xx_lpbfifo_abort); | 437 | EXPORT_SYMBOL(mpc52xx_lpbfifo_abort); |
438 | 438 | ||
439 | static int __devinit mpc52xx_lpbfifo_probe(struct platform_device *op, | 439 | static int __devinit mpc52xx_lpbfifo_probe(struct platform_device *op) |
440 | const struct of_device_id *match) | ||
441 | { | 440 | { |
442 | struct resource res; | 441 | struct resource res; |
443 | int rc = -ENOMEM; | 442 | int rc = -ENOMEM; |
@@ -536,7 +535,7 @@ static struct of_device_id mpc52xx_lpbfifo_match[] __devinitconst = { | |||
536 | {}, | 535 | {}, |
537 | }; | 536 | }; |
538 | 537 | ||
539 | static struct of_platform_driver mpc52xx_lpbfifo_driver = { | 538 | static struct platform_driver mpc52xx_lpbfifo_driver = { |
540 | .driver = { | 539 | .driver = { |
541 | .name = "mpc52xx-lpbfifo", | 540 | .name = "mpc52xx-lpbfifo", |
542 | .owner = THIS_MODULE, | 541 | .owner = THIS_MODULE, |
@@ -551,14 +550,12 @@ static struct of_platform_driver mpc52xx_lpbfifo_driver = { | |||
551 | */ | 550 | */ |
552 | static int __init mpc52xx_lpbfifo_init(void) | 551 | static int __init mpc52xx_lpbfifo_init(void) |
553 | { | 552 | { |
554 | pr_debug("Registering LocalPlus bus FIFO driver\n"); | 553 | return platform_driver_register(&mpc52xx_lpbfifo_driver); |
555 | return of_register_platform_driver(&mpc52xx_lpbfifo_driver); | ||
556 | } | 554 | } |
557 | module_init(mpc52xx_lpbfifo_init); | 555 | module_init(mpc52xx_lpbfifo_init); |
558 | 556 | ||
559 | static void __exit mpc52xx_lpbfifo_exit(void) | 557 | static void __exit mpc52xx_lpbfifo_exit(void) |
560 | { | 558 | { |
561 | pr_debug("Unregistering LocalPlus bus FIFO driver\n"); | 559 | platform_driver_unregister(&mpc52xx_lpbfifo_driver); |
562 | of_unregister_platform_driver(&mpc52xx_lpbfifo_driver); | ||
563 | } | 560 | } |
564 | module_exit(mpc52xx_lpbfifo_exit); | 561 | module_exit(mpc52xx_lpbfifo_exit); |
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_pic.c b/arch/powerpc/platforms/52xx/mpc52xx_pic.c index 4bf4bf7b063e..1a9a49570579 100644 --- a/arch/powerpc/platforms/52xx/mpc52xx_pic.c +++ b/arch/powerpc/platforms/52xx/mpc52xx_pic.c | |||
@@ -155,50 +155,32 @@ static inline void io_be_clrbit(u32 __iomem *addr, int bitno) | |||
155 | /* | 155 | /* |
156 | * IRQ[0-3] interrupt irq_chip | 156 | * IRQ[0-3] interrupt irq_chip |
157 | */ | 157 | */ |
158 | static void mpc52xx_extirq_mask(unsigned int virq) | 158 | static void mpc52xx_extirq_mask(struct irq_data *d) |
159 | { | 159 | { |
160 | int irq; | 160 | int l2irq = irqd_to_hwirq(d) & MPC52xx_IRQ_L2_MASK; |
161 | int l2irq; | ||
162 | |||
163 | irq = irq_map[virq].hwirq; | ||
164 | l2irq = irq & MPC52xx_IRQ_L2_MASK; | ||
165 | |||
166 | io_be_clrbit(&intr->ctrl, 11 - l2irq); | 161 | io_be_clrbit(&intr->ctrl, 11 - l2irq); |
167 | } | 162 | } |
168 | 163 | ||
169 | static void mpc52xx_extirq_unmask(unsigned int virq) | 164 | static void mpc52xx_extirq_unmask(struct irq_data *d) |
170 | { | 165 | { |
171 | int irq; | 166 | int l2irq = irqd_to_hwirq(d) & MPC52xx_IRQ_L2_MASK; |
172 | int l2irq; | ||
173 | |||
174 | irq = irq_map[virq].hwirq; | ||
175 | l2irq = irq & MPC52xx_IRQ_L2_MASK; | ||
176 | |||
177 | io_be_setbit(&intr->ctrl, 11 - l2irq); | 167 | io_be_setbit(&intr->ctrl, 11 - l2irq); |
178 | } | 168 | } |
179 | 169 | ||
180 | static void mpc52xx_extirq_ack(unsigned int virq) | 170 | static void mpc52xx_extirq_ack(struct irq_data *d) |
181 | { | 171 | { |
182 | int irq; | 172 | int l2irq = irqd_to_hwirq(d) & MPC52xx_IRQ_L2_MASK; |
183 | int l2irq; | ||
184 | |||
185 | irq = irq_map[virq].hwirq; | ||
186 | l2irq = irq & MPC52xx_IRQ_L2_MASK; | ||
187 | |||
188 | io_be_setbit(&intr->ctrl, 27-l2irq); | 173 | io_be_setbit(&intr->ctrl, 27-l2irq); |
189 | } | 174 | } |
190 | 175 | ||
191 | static int mpc52xx_extirq_set_type(unsigned int virq, unsigned int flow_type) | 176 | static int mpc52xx_extirq_set_type(struct irq_data *d, unsigned int flow_type) |
192 | { | 177 | { |
193 | u32 ctrl_reg, type; | 178 | u32 ctrl_reg, type; |
194 | int irq; | 179 | int l2irq = irqd_to_hwirq(d) & MPC52xx_IRQ_L2_MASK; |
195 | int l2irq; | ||
196 | void *handler = handle_level_irq; | 180 | void *handler = handle_level_irq; |
197 | 181 | ||
198 | irq = irq_map[virq].hwirq; | 182 | pr_debug("%s: irq=%x. l2=%d flow_type=%d\n", __func__, |
199 | l2irq = irq & MPC52xx_IRQ_L2_MASK; | 183 | (int) irqd_to_hwirq(d), l2irq, flow_type); |
200 | |||
201 | pr_debug("%s: irq=%x. l2=%d flow_type=%d\n", __func__, irq, l2irq, flow_type); | ||
202 | 184 | ||
203 | switch (flow_type) { | 185 | switch (flow_type) { |
204 | case IRQF_TRIGGER_HIGH: type = 0; break; | 186 | case IRQF_TRIGGER_HIGH: type = 0; break; |
@@ -214,132 +196,97 @@ static int mpc52xx_extirq_set_type(unsigned int virq, unsigned int flow_type) | |||
214 | ctrl_reg |= (type << (22 - (l2irq * 2))); | 196 | ctrl_reg |= (type << (22 - (l2irq * 2))); |
215 | out_be32(&intr->ctrl, ctrl_reg); | 197 | out_be32(&intr->ctrl, ctrl_reg); |
216 | 198 | ||
217 | __set_irq_handler_unlocked(virq, handler); | 199 | __irq_set_handler_locked(d->irq, handler); |
218 | 200 | ||
219 | return 0; | 201 | return 0; |
220 | } | 202 | } |
221 | 203 | ||
222 | static struct irq_chip mpc52xx_extirq_irqchip = { | 204 | static struct irq_chip mpc52xx_extirq_irqchip = { |
223 | .name = "MPC52xx External", | 205 | .name = "MPC52xx External", |
224 | .mask = mpc52xx_extirq_mask, | 206 | .irq_mask = mpc52xx_extirq_mask, |
225 | .unmask = mpc52xx_extirq_unmask, | 207 | .irq_unmask = mpc52xx_extirq_unmask, |
226 | .ack = mpc52xx_extirq_ack, | 208 | .irq_ack = mpc52xx_extirq_ack, |
227 | .set_type = mpc52xx_extirq_set_type, | 209 | .irq_set_type = mpc52xx_extirq_set_type, |
228 | }; | 210 | }; |
229 | 211 | ||
230 | /* | 212 | /* |
231 | * Main interrupt irq_chip | 213 | * Main interrupt irq_chip |
232 | */ | 214 | */ |
233 | static int mpc52xx_null_set_type(unsigned int virq, unsigned int flow_type) | 215 | static int mpc52xx_null_set_type(struct irq_data *d, unsigned int flow_type) |
234 | { | 216 | { |
235 | return 0; /* Do nothing so that the sense mask will get updated */ | 217 | return 0; /* Do nothing so that the sense mask will get updated */ |
236 | } | 218 | } |
237 | 219 | ||
238 | static void mpc52xx_main_mask(unsigned int virq) | 220 | static void mpc52xx_main_mask(struct irq_data *d) |
239 | { | 221 | { |
240 | int irq; | 222 | int l2irq = irqd_to_hwirq(d) & MPC52xx_IRQ_L2_MASK; |
241 | int l2irq; | ||
242 | |||
243 | irq = irq_map[virq].hwirq; | ||
244 | l2irq = irq & MPC52xx_IRQ_L2_MASK; | ||
245 | |||
246 | io_be_setbit(&intr->main_mask, 16 - l2irq); | 223 | io_be_setbit(&intr->main_mask, 16 - l2irq); |
247 | } | 224 | } |
248 | 225 | ||
249 | static void mpc52xx_main_unmask(unsigned int virq) | 226 | static void mpc52xx_main_unmask(struct irq_data *d) |
250 | { | 227 | { |
251 | int irq; | 228 | int l2irq = irqd_to_hwirq(d) & MPC52xx_IRQ_L2_MASK; |
252 | int l2irq; | ||
253 | |||
254 | irq = irq_map[virq].hwirq; | ||
255 | l2irq = irq & MPC52xx_IRQ_L2_MASK; | ||
256 | |||
257 | io_be_clrbit(&intr->main_mask, 16 - l2irq); | 229 | io_be_clrbit(&intr->main_mask, 16 - l2irq); |
258 | } | 230 | } |
259 | 231 | ||
260 | static struct irq_chip mpc52xx_main_irqchip = { | 232 | static struct irq_chip mpc52xx_main_irqchip = { |
261 | .name = "MPC52xx Main", | 233 | .name = "MPC52xx Main", |
262 | .mask = mpc52xx_main_mask, | 234 | .irq_mask = mpc52xx_main_mask, |
263 | .mask_ack = mpc52xx_main_mask, | 235 | .irq_mask_ack = mpc52xx_main_mask, |
264 | .unmask = mpc52xx_main_unmask, | 236 | .irq_unmask = mpc52xx_main_unmask, |
265 | .set_type = mpc52xx_null_set_type, | 237 | .irq_set_type = mpc52xx_null_set_type, |
266 | }; | 238 | }; |
267 | 239 | ||
268 | /* | 240 | /* |
269 | * Peripherals interrupt irq_chip | 241 | * Peripherals interrupt irq_chip |
270 | */ | 242 | */ |
271 | static void mpc52xx_periph_mask(unsigned int virq) | 243 | static void mpc52xx_periph_mask(struct irq_data *d) |
272 | { | 244 | { |
273 | int irq; | 245 | int l2irq = irqd_to_hwirq(d) & MPC52xx_IRQ_L2_MASK; |
274 | int l2irq; | ||
275 | |||
276 | irq = irq_map[virq].hwirq; | ||
277 | l2irq = irq & MPC52xx_IRQ_L2_MASK; | ||
278 | |||
279 | io_be_setbit(&intr->per_mask, 31 - l2irq); | 246 | io_be_setbit(&intr->per_mask, 31 - l2irq); |
280 | } | 247 | } |
281 | 248 | ||
282 | static void mpc52xx_periph_unmask(unsigned int virq) | 249 | static void mpc52xx_periph_unmask(struct irq_data *d) |
283 | { | 250 | { |
284 | int irq; | 251 | int l2irq = irqd_to_hwirq(d) & MPC52xx_IRQ_L2_MASK; |
285 | int l2irq; | ||
286 | |||
287 | irq = irq_map[virq].hwirq; | ||
288 | l2irq = irq & MPC52xx_IRQ_L2_MASK; | ||
289 | |||
290 | io_be_clrbit(&intr->per_mask, 31 - l2irq); | 252 | io_be_clrbit(&intr->per_mask, 31 - l2irq); |
291 | } | 253 | } |
292 | 254 | ||
293 | static struct irq_chip mpc52xx_periph_irqchip = { | 255 | static struct irq_chip mpc52xx_periph_irqchip = { |
294 | .name = "MPC52xx Peripherals", | 256 | .name = "MPC52xx Peripherals", |
295 | .mask = mpc52xx_periph_mask, | 257 | .irq_mask = mpc52xx_periph_mask, |
296 | .mask_ack = mpc52xx_periph_mask, | 258 | .irq_mask_ack = mpc52xx_periph_mask, |
297 | .unmask = mpc52xx_periph_unmask, | 259 | .irq_unmask = mpc52xx_periph_unmask, |
298 | .set_type = mpc52xx_null_set_type, | 260 | .irq_set_type = mpc52xx_null_set_type, |
299 | }; | 261 | }; |
300 | 262 | ||
301 | /* | 263 | /* |
302 | * SDMA interrupt irq_chip | 264 | * SDMA interrupt irq_chip |
303 | */ | 265 | */ |
304 | static void mpc52xx_sdma_mask(unsigned int virq) | 266 | static void mpc52xx_sdma_mask(struct irq_data *d) |
305 | { | 267 | { |
306 | int irq; | 268 | int l2irq = irqd_to_hwirq(d) & MPC52xx_IRQ_L2_MASK; |
307 | int l2irq; | ||
308 | |||
309 | irq = irq_map[virq].hwirq; | ||
310 | l2irq = irq & MPC52xx_IRQ_L2_MASK; | ||
311 | |||
312 | io_be_setbit(&sdma->IntMask, l2irq); | 269 | io_be_setbit(&sdma->IntMask, l2irq); |
313 | } | 270 | } |
314 | 271 | ||
315 | static void mpc52xx_sdma_unmask(unsigned int virq) | 272 | static void mpc52xx_sdma_unmask(struct irq_data *d) |
316 | { | 273 | { |
317 | int irq; | 274 | int l2irq = irqd_to_hwirq(d) & MPC52xx_IRQ_L2_MASK; |
318 | int l2irq; | ||
319 | |||
320 | irq = irq_map[virq].hwirq; | ||
321 | l2irq = irq & MPC52xx_IRQ_L2_MASK; | ||
322 | |||
323 | io_be_clrbit(&sdma->IntMask, l2irq); | 275 | io_be_clrbit(&sdma->IntMask, l2irq); |
324 | } | 276 | } |
325 | 277 | ||
326 | static void mpc52xx_sdma_ack(unsigned int virq) | 278 | static void mpc52xx_sdma_ack(struct irq_data *d) |
327 | { | 279 | { |
328 | int irq; | 280 | int l2irq = irqd_to_hwirq(d) & MPC52xx_IRQ_L2_MASK; |
329 | int l2irq; | ||
330 | |||
331 | irq = irq_map[virq].hwirq; | ||
332 | l2irq = irq & MPC52xx_IRQ_L2_MASK; | ||
333 | |||
334 | out_be32(&sdma->IntPend, 1 << l2irq); | 281 | out_be32(&sdma->IntPend, 1 << l2irq); |
335 | } | 282 | } |
336 | 283 | ||
337 | static struct irq_chip mpc52xx_sdma_irqchip = { | 284 | static struct irq_chip mpc52xx_sdma_irqchip = { |
338 | .name = "MPC52xx SDMA", | 285 | .name = "MPC52xx SDMA", |
339 | .mask = mpc52xx_sdma_mask, | 286 | .irq_mask = mpc52xx_sdma_mask, |
340 | .unmask = mpc52xx_sdma_unmask, | 287 | .irq_unmask = mpc52xx_sdma_unmask, |
341 | .ack = mpc52xx_sdma_ack, | 288 | .irq_ack = mpc52xx_sdma_ack, |
342 | .set_type = mpc52xx_null_set_type, | 289 | .irq_set_type = mpc52xx_null_set_type, |
343 | }; | 290 | }; |
344 | 291 | ||
345 | /** | 292 | /** |
@@ -414,7 +361,7 @@ static int mpc52xx_irqhost_map(struct irq_host *h, unsigned int virq, | |||
414 | else | 361 | else |
415 | hndlr = handle_level_irq; | 362 | hndlr = handle_level_irq; |
416 | 363 | ||
417 | set_irq_chip_and_handler(virq, &mpc52xx_extirq_irqchip, hndlr); | 364 | irq_set_chip_and_handler(virq, &mpc52xx_extirq_irqchip, hndlr); |
418 | pr_debug("%s: External IRQ%i virq=%x, hw=%x. type=%x\n", | 365 | pr_debug("%s: External IRQ%i virq=%x, hw=%x. type=%x\n", |
419 | __func__, l2irq, virq, (int)irq, type); | 366 | __func__, l2irq, virq, (int)irq, type); |
420 | return 0; | 367 | return 0; |
@@ -431,7 +378,7 @@ static int mpc52xx_irqhost_map(struct irq_host *h, unsigned int virq, | |||
431 | return -EINVAL; | 378 | return -EINVAL; |
432 | } | 379 | } |
433 | 380 | ||
434 | set_irq_chip_and_handler(virq, irqchip, handle_level_irq); | 381 | irq_set_chip_and_handler(virq, irqchip, handle_level_irq); |
435 | pr_debug("%s: virq=%x, l1=%i, l2=%i\n", __func__, virq, l1irq, l2irq); | 382 | pr_debug("%s: virq=%x, l1=%i, l2=%i\n", __func__, virq, l1irq, l2irq); |
436 | 383 | ||
437 | return 0; | 384 | return 0; |
@@ -512,7 +459,7 @@ void __init mpc52xx_init_irq(void) | |||
512 | /** | 459 | /** |
513 | * mpc52xx_get_irq - Get pending interrupt number hook function | 460 | * mpc52xx_get_irq - Get pending interrupt number hook function |
514 | * | 461 | * |
515 | * Called by the interupt handler to determine what IRQ handler needs to be | 462 | * Called by the interrupt handler to determine what IRQ handler needs to be |
516 | * executed. | 463 | * executed. |
517 | * | 464 | * |
518 | * Status of pending interrupts is determined by reading the encoded status | 465 | * Status of pending interrupts is determined by reading the encoded status |
@@ -539,7 +486,7 @@ void __init mpc52xx_init_irq(void) | |||
539 | unsigned int mpc52xx_get_irq(void) | 486 | unsigned int mpc52xx_get_irq(void) |
540 | { | 487 | { |
541 | u32 status; | 488 | u32 status; |
542 | int irq = NO_IRQ_IGNORE; | 489 | int irq; |
543 | 490 | ||
544 | status = in_be32(&intr->enc_status); | 491 | status = in_be32(&intr->enc_status); |
545 | if (status & 0x00000400) { /* critical */ | 492 | if (status & 0x00000400) { /* critical */ |
@@ -562,6 +509,8 @@ unsigned int mpc52xx_get_irq(void) | |||
562 | } else { | 509 | } else { |
563 | irq |= (MPC52xx_IRQ_L1_PERP << MPC52xx_IRQ_L1_OFFSET); | 510 | irq |= (MPC52xx_IRQ_L1_PERP << MPC52xx_IRQ_L1_OFFSET); |
564 | } | 511 | } |
512 | } else { | ||
513 | return NO_IRQ; | ||
565 | } | 514 | } |
566 | 515 | ||
567 | return irq_linear_revmap(mpc52xx_irqhost, irq); | 516 | return irq_linear_revmap(mpc52xx_irqhost, irq); |
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_pm.c b/arch/powerpc/platforms/52xx/mpc52xx_pm.c index 568cef636275..8310e8b5b57f 100644 --- a/arch/powerpc/platforms/52xx/mpc52xx_pm.c +++ b/arch/powerpc/platforms/52xx/mpc52xx_pm.c | |||
@@ -186,7 +186,7 @@ void mpc52xx_pm_finish(void) | |||
186 | iounmap(mbar); | 186 | iounmap(mbar); |
187 | } | 187 | } |
188 | 188 | ||
189 | static struct platform_suspend_ops mpc52xx_pm_ops = { | 189 | static const struct platform_suspend_ops mpc52xx_pm_ops = { |
190 | .valid = mpc52xx_pm_valid, | 190 | .valid = mpc52xx_pm_valid, |
191 | .prepare = mpc52xx_pm_prepare, | 191 | .prepare = mpc52xx_pm_prepare, |
192 | .enter = mpc52xx_pm_enter, | 192 | .enter = mpc52xx_pm_enter, |
diff --git a/arch/powerpc/platforms/82xx/Makefile b/arch/powerpc/platforms/82xx/Makefile index d982793f4dbd..455fe21e37c4 100644 --- a/arch/powerpc/platforms/82xx/Makefile +++ b/arch/powerpc/platforms/82xx/Makefile | |||
@@ -6,4 +6,4 @@ obj-$(CONFIG_CPM2) += pq2.o | |||
6 | obj-$(CONFIG_PQ2_ADS_PCI_PIC) += pq2ads-pci-pic.o | 6 | obj-$(CONFIG_PQ2_ADS_PCI_PIC) += pq2ads-pci-pic.o |
7 | obj-$(CONFIG_PQ2FADS) += pq2fads.o | 7 | obj-$(CONFIG_PQ2FADS) += pq2fads.o |
8 | obj-$(CONFIG_EP8248E) += ep8248e.o | 8 | obj-$(CONFIG_EP8248E) += ep8248e.o |
9 | obj-$(CONFIG_MGCOGE) += mgcoge.o | 9 | obj-$(CONFIG_MGCOGE) += km82xx.o |
diff --git a/arch/powerpc/platforms/82xx/ep8248e.c b/arch/powerpc/platforms/82xx/ep8248e.c index 1565e0446dc8..10ff526cd046 100644 --- a/arch/powerpc/platforms/82xx/ep8248e.c +++ b/arch/powerpc/platforms/82xx/ep8248e.c | |||
@@ -111,8 +111,7 @@ static struct mdiobb_ctrl ep8248e_mdio_ctrl = { | |||
111 | .ops = &ep8248e_mdio_ops, | 111 | .ops = &ep8248e_mdio_ops, |
112 | }; | 112 | }; |
113 | 113 | ||
114 | static int __devinit ep8248e_mdio_probe(struct platform_device *ofdev, | 114 | static int __devinit ep8248e_mdio_probe(struct platform_device *ofdev) |
115 | const struct of_device_id *match) | ||
116 | { | 115 | { |
117 | struct mii_bus *bus; | 116 | struct mii_bus *bus; |
118 | struct resource res; | 117 | struct resource res; |
@@ -167,7 +166,7 @@ static const struct of_device_id ep8248e_mdio_match[] = { | |||
167 | {}, | 166 | {}, |
168 | }; | 167 | }; |
169 | 168 | ||
170 | static struct of_platform_driver ep8248e_mdio_driver = { | 169 | static struct platform_driver ep8248e_mdio_driver = { |
171 | .driver = { | 170 | .driver = { |
172 | .name = "ep8248e-mdio-bitbang", | 171 | .name = "ep8248e-mdio-bitbang", |
173 | .owner = THIS_MODULE, | 172 | .owner = THIS_MODULE, |
@@ -308,7 +307,7 @@ static __initdata struct of_device_id of_bus_ids[] = { | |||
308 | static int __init declare_of_platform_devices(void) | 307 | static int __init declare_of_platform_devices(void) |
309 | { | 308 | { |
310 | of_platform_bus_probe(NULL, of_bus_ids, NULL); | 309 | of_platform_bus_probe(NULL, of_bus_ids, NULL); |
311 | of_register_platform_driver(&ep8248e_mdio_driver); | 310 | platform_driver_register(&ep8248e_mdio_driver); |
312 | 311 | ||
313 | return 0; | 312 | return 0; |
314 | } | 313 | } |
diff --git a/arch/powerpc/platforms/82xx/mgcoge.c b/arch/powerpc/platforms/82xx/km82xx.c index 7a5de9eb3c73..428c5e0a0e75 100644 --- a/arch/powerpc/platforms/82xx/mgcoge.c +++ b/arch/powerpc/platforms/82xx/km82xx.c | |||
@@ -1,6 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * Keymile mgcoge support | 2 | * Keymile km82xx support |
3 | * Copyright 2008 DENX Software Engineering GmbH | 3 | * Copyright 2008-2011 DENX Software Engineering GmbH |
4 | * Author: Heiko Schocher <hs@denx.de> | 4 | * Author: Heiko Schocher <hs@denx.de> |
5 | * | 5 | * |
6 | * based on code from: | 6 | * based on code from: |
@@ -31,9 +31,10 @@ | |||
31 | 31 | ||
32 | #include "pq2.h" | 32 | #include "pq2.h" |
33 | 33 | ||
34 | static void __init mgcoge_pic_init(void) | 34 | static void __init km82xx_pic_init(void) |
35 | { | 35 | { |
36 | struct device_node *np = of_find_compatible_node(NULL, NULL, "fsl,pq2-pic"); | 36 | struct device_node *np = of_find_compatible_node(NULL, NULL, |
37 | "fsl,pq2-pic"); | ||
37 | if (!np) { | 38 | if (!np) { |
38 | printk(KERN_ERR "PIC init: can not find cpm-pic node\n"); | 39 | printk(KERN_ERR "PIC init: can not find cpm-pic node\n"); |
39 | return; | 40 | return; |
@@ -47,12 +48,18 @@ struct cpm_pin { | |||
47 | int port, pin, flags; | 48 | int port, pin, flags; |
48 | }; | 49 | }; |
49 | 50 | ||
50 | static __initdata struct cpm_pin mgcoge_pins[] = { | 51 | static __initdata struct cpm_pin km82xx_pins[] = { |
51 | 52 | ||
52 | /* SMC2 */ | 53 | /* SMC2 */ |
53 | {0, 8, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, | 54 | {0, 8, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, |
54 | {0, 9, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY}, | 55 | {0, 9, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY}, |
55 | 56 | ||
57 | /* SCC1 */ | ||
58 | {2, 21, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, | ||
59 | {2, 15, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, | ||
60 | {3, 31, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, | ||
61 | {3, 30, CPM_PIN_OUTPUT | CPM_PIN_SECONDARY}, | ||
62 | |||
56 | /* SCC4 */ | 63 | /* SCC4 */ |
57 | {2, 25, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, | 64 | {2, 25, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, |
58 | {2, 24, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, | 65 | {2, 24, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, |
@@ -107,30 +114,49 @@ static __initdata struct cpm_pin mgcoge_pins[] = { | |||
107 | {3, 14, CPM_PIN_INPUT | CPM_PIN_SECONDARY | CPM_PIN_OPENDRAIN}, | 114 | {3, 14, CPM_PIN_INPUT | CPM_PIN_SECONDARY | CPM_PIN_OPENDRAIN}, |
108 | {3, 15, CPM_PIN_INPUT | CPM_PIN_SECONDARY | CPM_PIN_OPENDRAIN}, | 115 | {3, 15, CPM_PIN_INPUT | CPM_PIN_SECONDARY | CPM_PIN_OPENDRAIN}, |
109 | #endif | 116 | #endif |
117 | |||
118 | /* USB */ | ||
119 | {0, 10, CPM_PIN_OUTPUT | CPM_PIN_GPIO}, /* FULL_SPEED */ | ||
120 | {0, 11, CPM_PIN_OUTPUT | CPM_PIN_GPIO}, /*/SLAVE */ | ||
121 | {2, 10, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, /* RXN */ | ||
122 | {2, 11, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, /* RXP */ | ||
123 | {2, 20, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY}, /* /OE */ | ||
124 | {2, 27, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, /* RXCLK */ | ||
125 | {3, 23, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY}, /* TXP */ | ||
126 | {3, 24, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY}, /* TXN */ | ||
127 | {3, 25, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, /* RXD */ | ||
110 | }; | 128 | }; |
111 | 129 | ||
112 | static void __init init_ioports(void) | 130 | static void __init init_ioports(void) |
113 | { | 131 | { |
114 | int i; | 132 | int i; |
115 | 133 | ||
116 | for (i = 0; i < ARRAY_SIZE(mgcoge_pins); i++) { | 134 | for (i = 0; i < ARRAY_SIZE(km82xx_pins); i++) { |
117 | const struct cpm_pin *pin = &mgcoge_pins[i]; | 135 | const struct cpm_pin *pin = &km82xx_pins[i]; |
118 | cpm2_set_pin(pin->port, pin->pin, pin->flags); | 136 | cpm2_set_pin(pin->port, pin->pin, pin->flags); |
119 | } | 137 | } |
120 | 138 | ||
121 | cpm2_smc_clk_setup(CPM_CLK_SMC2, CPM_BRG8); | 139 | cpm2_smc_clk_setup(CPM_CLK_SMC2, CPM_BRG8); |
140 | cpm2_clk_setup(CPM_CLK_SCC1, CPM_CLK11, CPM_CLK_RX); | ||
141 | cpm2_clk_setup(CPM_CLK_SCC1, CPM_CLK11, CPM_CLK_TX); | ||
142 | cpm2_clk_setup(CPM_CLK_SCC3, CPM_CLK5, CPM_CLK_RTX); | ||
122 | cpm2_clk_setup(CPM_CLK_SCC4, CPM_CLK7, CPM_CLK_RX); | 143 | cpm2_clk_setup(CPM_CLK_SCC4, CPM_CLK7, CPM_CLK_RX); |
123 | cpm2_clk_setup(CPM_CLK_SCC4, CPM_CLK8, CPM_CLK_TX); | 144 | cpm2_clk_setup(CPM_CLK_SCC4, CPM_CLK8, CPM_CLK_TX); |
124 | cpm2_clk_setup(CPM_CLK_FCC1, CPM_CLK10, CPM_CLK_RX); | 145 | cpm2_clk_setup(CPM_CLK_FCC1, CPM_CLK10, CPM_CLK_RX); |
125 | cpm2_clk_setup(CPM_CLK_FCC1, CPM_CLK9, CPM_CLK_TX); | 146 | cpm2_clk_setup(CPM_CLK_FCC1, CPM_CLK9, CPM_CLK_TX); |
126 | cpm2_clk_setup(CPM_CLK_FCC2, CPM_CLK13, CPM_CLK_RX); | 147 | cpm2_clk_setup(CPM_CLK_FCC2, CPM_CLK13, CPM_CLK_RX); |
127 | cpm2_clk_setup(CPM_CLK_FCC2, CPM_CLK14, CPM_CLK_TX); | 148 | cpm2_clk_setup(CPM_CLK_FCC2, CPM_CLK14, CPM_CLK_TX); |
149 | |||
150 | /* Force USB FULL SPEED bit to '1' */ | ||
151 | setbits32(&cpm2_immr->im_ioport.iop_pdata, 1 << (31 - 10)); | ||
152 | /* clear USB_SLAVE */ | ||
153 | clrbits32(&cpm2_immr->im_ioport.iop_pdata, 1 << (31 - 11)); | ||
128 | } | 154 | } |
129 | 155 | ||
130 | static void __init mgcoge_setup_arch(void) | 156 | static void __init km82xx_setup_arch(void) |
131 | { | 157 | { |
132 | if (ppc_md.progress) | 158 | if (ppc_md.progress) |
133 | ppc_md.progress("mgcoge_setup_arch()", 0); | 159 | ppc_md.progress("km82xx_setup_arch()", 0); |
134 | 160 | ||
135 | cpm2_reset(); | 161 | cpm2_reset(); |
136 | 162 | ||
@@ -142,7 +168,7 @@ static void __init mgcoge_setup_arch(void) | |||
142 | init_ioports(); | 168 | init_ioports(); |
143 | 169 | ||
144 | if (ppc_md.progress) | 170 | if (ppc_md.progress) |
145 | ppc_md.progress("mgcoge_setup_arch(), finish", 0); | 171 | ppc_md.progress("km82xx_setup_arch(), finish", 0); |
146 | } | 172 | } |
147 | 173 | ||
148 | static __initdata struct of_device_id of_bus_ids[] = { | 174 | static __initdata struct of_device_id of_bus_ids[] = { |
@@ -156,23 +182,23 @@ static int __init declare_of_platform_devices(void) | |||
156 | 182 | ||
157 | return 0; | 183 | return 0; |
158 | } | 184 | } |
159 | machine_device_initcall(mgcoge, declare_of_platform_devices); | 185 | machine_device_initcall(km82xx, declare_of_platform_devices); |
160 | 186 | ||
161 | /* | 187 | /* |
162 | * Called very early, device-tree isn't unflattened | 188 | * Called very early, device-tree isn't unflattened |
163 | */ | 189 | */ |
164 | static int __init mgcoge_probe(void) | 190 | static int __init km82xx_probe(void) |
165 | { | 191 | { |
166 | unsigned long root = of_get_flat_dt_root(); | 192 | unsigned long root = of_get_flat_dt_root(); |
167 | return of_flat_dt_is_compatible(root, "keymile,mgcoge"); | 193 | return of_flat_dt_is_compatible(root, "keymile,km82xx"); |
168 | } | 194 | } |
169 | 195 | ||
170 | define_machine(mgcoge) | 196 | define_machine(km82xx) |
171 | { | 197 | { |
172 | .name = "Keymile MGCOGE", | 198 | .name = "Keymile km82xx", |
173 | .probe = mgcoge_probe, | 199 | .probe = km82xx_probe, |
174 | .setup_arch = mgcoge_setup_arch, | 200 | .setup_arch = km82xx_setup_arch, |
175 | .init_IRQ = mgcoge_pic_init, | 201 | .init_IRQ = km82xx_pic_init, |
176 | .get_irq = cpm2_get_irq, | 202 | .get_irq = cpm2_get_irq, |
177 | .calibrate_decr = generic_calibrate_decr, | 203 | .calibrate_decr = generic_calibrate_decr, |
178 | .restart = pq2_restart, | 204 | .restart = pq2_restart, |
diff --git a/arch/powerpc/platforms/82xx/pq2ads-pci-pic.c b/arch/powerpc/platforms/82xx/pq2ads-pci-pic.c index 5a55d87d6bd6..8ccf9ed62fe2 100644 --- a/arch/powerpc/platforms/82xx/pq2ads-pci-pic.c +++ b/arch/powerpc/platforms/82xx/pq2ads-pci-pic.c | |||
@@ -39,10 +39,10 @@ struct pq2ads_pci_pic { | |||
39 | 39 | ||
40 | #define NUM_IRQS 32 | 40 | #define NUM_IRQS 32 |
41 | 41 | ||
42 | static void pq2ads_pci_mask_irq(unsigned int virq) | 42 | static void pq2ads_pci_mask_irq(struct irq_data *d) |
43 | { | 43 | { |
44 | struct pq2ads_pci_pic *priv = get_irq_chip_data(virq); | 44 | struct pq2ads_pci_pic *priv = irq_data_get_irq_chip_data(d); |
45 | int irq = NUM_IRQS - virq_to_hw(virq) - 1; | 45 | int irq = NUM_IRQS - irqd_to_hwirq(d) - 1; |
46 | 46 | ||
47 | if (irq != -1) { | 47 | if (irq != -1) { |
48 | unsigned long flags; | 48 | unsigned long flags; |
@@ -55,10 +55,10 @@ static void pq2ads_pci_mask_irq(unsigned int virq) | |||
55 | } | 55 | } |
56 | } | 56 | } |
57 | 57 | ||
58 | static void pq2ads_pci_unmask_irq(unsigned int virq) | 58 | static void pq2ads_pci_unmask_irq(struct irq_data *d) |
59 | { | 59 | { |
60 | struct pq2ads_pci_pic *priv = get_irq_chip_data(virq); | 60 | struct pq2ads_pci_pic *priv = irq_data_get_irq_chip_data(d); |
61 | int irq = NUM_IRQS - virq_to_hw(virq) - 1; | 61 | int irq = NUM_IRQS - irqd_to_hwirq(d) - 1; |
62 | 62 | ||
63 | if (irq != -1) { | 63 | if (irq != -1) { |
64 | unsigned long flags; | 64 | unsigned long flags; |
@@ -71,18 +71,17 @@ static void pq2ads_pci_unmask_irq(unsigned int virq) | |||
71 | 71 | ||
72 | static struct irq_chip pq2ads_pci_ic = { | 72 | static struct irq_chip pq2ads_pci_ic = { |
73 | .name = "PQ2 ADS PCI", | 73 | .name = "PQ2 ADS PCI", |
74 | .end = pq2ads_pci_unmask_irq, | 74 | .irq_mask = pq2ads_pci_mask_irq, |
75 | .mask = pq2ads_pci_mask_irq, | 75 | .irq_mask_ack = pq2ads_pci_mask_irq, |
76 | .mask_ack = pq2ads_pci_mask_irq, | 76 | .irq_ack = pq2ads_pci_mask_irq, |
77 | .ack = pq2ads_pci_mask_irq, | 77 | .irq_unmask = pq2ads_pci_unmask_irq, |
78 | .unmask = pq2ads_pci_unmask_irq, | 78 | .irq_enable = pq2ads_pci_unmask_irq, |
79 | .enable = pq2ads_pci_unmask_irq, | 79 | .irq_disable = pq2ads_pci_mask_irq |
80 | .disable = pq2ads_pci_mask_irq | ||
81 | }; | 80 | }; |
82 | 81 | ||
83 | static void pq2ads_pci_irq_demux(unsigned int irq, struct irq_desc *desc) | 82 | static void pq2ads_pci_irq_demux(unsigned int irq, struct irq_desc *desc) |
84 | { | 83 | { |
85 | struct pq2ads_pci_pic *priv = desc->handler_data; | 84 | struct pq2ads_pci_pic *priv = irq_desc_get_handler_data(desc); |
86 | u32 stat, mask, pend; | 85 | u32 stat, mask, pend; |
87 | int bit; | 86 | int bit; |
88 | 87 | ||
@@ -107,22 +106,14 @@ static void pq2ads_pci_irq_demux(unsigned int irq, struct irq_desc *desc) | |||
107 | static int pci_pic_host_map(struct irq_host *h, unsigned int virq, | 106 | static int pci_pic_host_map(struct irq_host *h, unsigned int virq, |
108 | irq_hw_number_t hw) | 107 | irq_hw_number_t hw) |
109 | { | 108 | { |
110 | irq_to_desc(virq)->status |= IRQ_LEVEL; | 109 | irq_set_status_flags(virq, IRQ_LEVEL); |
111 | set_irq_chip_data(virq, h->host_data); | 110 | irq_set_chip_data(virq, h->host_data); |
112 | set_irq_chip_and_handler(virq, &pq2ads_pci_ic, handle_level_irq); | 111 | irq_set_chip_and_handler(virq, &pq2ads_pci_ic, handle_level_irq); |
113 | return 0; | 112 | return 0; |
114 | } | 113 | } |
115 | 114 | ||
116 | static void pci_host_unmap(struct irq_host *h, unsigned int virq) | ||
117 | { | ||
118 | /* remove chip and handler */ | ||
119 | set_irq_chip_data(virq, NULL); | ||
120 | set_irq_chip(virq, NULL); | ||
121 | } | ||
122 | |||
123 | static struct irq_host_ops pci_pic_host_ops = { | 115 | static struct irq_host_ops pci_pic_host_ops = { |
124 | .map = pci_pic_host_map, | 116 | .map = pci_pic_host_map, |
125 | .unmap = pci_host_unmap, | ||
126 | }; | 117 | }; |
127 | 118 | ||
128 | int __init pq2ads_pci_init_irq(void) | 119 | int __init pq2ads_pci_init_irq(void) |
@@ -176,8 +167,8 @@ int __init pq2ads_pci_init_irq(void) | |||
176 | 167 | ||
177 | priv->host = host; | 168 | priv->host = host; |
178 | host->host_data = priv; | 169 | host->host_data = priv; |
179 | set_irq_data(irq, priv); | 170 | irq_set_handler_data(irq, priv); |
180 | set_irq_chained_handler(irq, pq2ads_pci_irq_demux); | 171 | irq_set_chained_handler(irq, pq2ads_pci_irq_demux); |
181 | 172 | ||
182 | of_node_put(np); | 173 | of_node_put(np); |
183 | return 0; | 174 | return 0; |
diff --git a/arch/powerpc/platforms/83xx/Kconfig b/arch/powerpc/platforms/83xx/Kconfig index 021763a32c2f..73f4135f3a1a 100644 --- a/arch/powerpc/platforms/83xx/Kconfig +++ b/arch/powerpc/platforms/83xx/Kconfig | |||
@@ -10,12 +10,12 @@ menuconfig PPC_83xx | |||
10 | if PPC_83xx | 10 | if PPC_83xx |
11 | 11 | ||
12 | config MPC830x_RDB | 12 | config MPC830x_RDB |
13 | bool "Freescale MPC830x RDB" | 13 | bool "Freescale MPC830x RDB and derivatives" |
14 | select DEFAULT_UIMAGE | 14 | select DEFAULT_UIMAGE |
15 | select PPC_MPC831x | 15 | select PPC_MPC831x |
16 | select FSL_GTM | 16 | select FSL_GTM |
17 | help | 17 | help |
18 | This option enables support for the MPC8308 RDB board. | 18 | This option enables support for the MPC8308 RDB and MPC8308 P1M boards. |
19 | 19 | ||
20 | config MPC831x_RDB | 20 | config MPC831x_RDB |
21 | bool "Freescale MPC831x RDB" | 21 | bool "Freescale MPC831x RDB" |
diff --git a/arch/powerpc/platforms/83xx/Makefile b/arch/powerpc/platforms/83xx/Makefile index 6e8bbbbcfdf8..ed95bfcbcbff 100644 --- a/arch/powerpc/platforms/83xx/Makefile +++ b/arch/powerpc/platforms/83xx/Makefile | |||
@@ -16,4 +16,4 @@ obj-$(CONFIG_MPC837x_MDS) += mpc837x_mds.o | |||
16 | obj-$(CONFIG_SBC834x) += sbc834x.o | 16 | obj-$(CONFIG_SBC834x) += sbc834x.o |
17 | obj-$(CONFIG_MPC837x_RDB) += mpc837x_rdb.o | 17 | obj-$(CONFIG_MPC837x_RDB) += mpc837x_rdb.o |
18 | obj-$(CONFIG_ASP834x) += asp834x.o | 18 | obj-$(CONFIG_ASP834x) += asp834x.o |
19 | obj-$(CONFIG_KMETER1) += kmeter1.o | 19 | obj-$(CONFIG_KMETER1) += km83xx.o |
diff --git a/arch/powerpc/platforms/83xx/kmeter1.c b/arch/powerpc/platforms/83xx/km83xx.c index 903acfd851ac..a2b9b9ef1240 100644 --- a/arch/powerpc/platforms/83xx/kmeter1.c +++ b/arch/powerpc/platforms/83xx/km83xx.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright 2008 DENX Software Engineering GmbH | 2 | * Copyright 2008-2011 DENX Software Engineering GmbH |
3 | * Author: Heiko Schocher <hs@denx.de> | 3 | * Author: Heiko Schocher <hs@denx.de> |
4 | * | 4 | * |
5 | * Description: | 5 | * Description: |
@@ -49,12 +49,12 @@ | |||
49 | * Setup the architecture | 49 | * Setup the architecture |
50 | * | 50 | * |
51 | */ | 51 | */ |
52 | static void __init kmeter1_setup_arch(void) | 52 | static void __init mpc83xx_km_setup_arch(void) |
53 | { | 53 | { |
54 | struct device_node *np; | 54 | struct device_node *np; |
55 | 55 | ||
56 | if (ppc_md.progress) | 56 | if (ppc_md.progress) |
57 | ppc_md.progress("kmeter1_setup_arch()", 0); | 57 | ppc_md.progress("kmpbec83xx_setup_arch()", 0); |
58 | 58 | ||
59 | #ifdef CONFIG_PCI | 59 | #ifdef CONFIG_PCI |
60 | for_each_compatible_node(np, "pci", "fsl,mpc8349-pci") | 60 | for_each_compatible_node(np, "pci", "fsl,mpc8349-pci") |
@@ -69,6 +69,9 @@ static void __init kmeter1_setup_arch(void) | |||
69 | par_io_init(np); | 69 | par_io_init(np); |
70 | of_node_put(np); | 70 | of_node_put(np); |
71 | 71 | ||
72 | for_each_node_by_name(np, "spi") | ||
73 | par_io_of_config(np); | ||
74 | |||
72 | for (np = NULL; (np = of_find_node_by_name(np, "ucc")) != NULL;) | 75 | for (np = NULL; (np = of_find_node_by_name(np, "ucc")) != NULL;) |
73 | par_io_of_config(np); | 76 | par_io_of_config(np); |
74 | } | 77 | } |
@@ -119,7 +122,7 @@ static void __init kmeter1_setup_arch(void) | |||
119 | #endif /* CONFIG_QUICC_ENGINE */ | 122 | #endif /* CONFIG_QUICC_ENGINE */ |
120 | } | 123 | } |
121 | 124 | ||
122 | static struct of_device_id kmeter_ids[] = { | 125 | static struct of_device_id kmpbec83xx_ids[] = { |
123 | { .type = "soc", }, | 126 | { .type = "soc", }, |
124 | { .compatible = "soc", }, | 127 | { .compatible = "soc", }, |
125 | { .compatible = "simple-bus", }, | 128 | { .compatible = "simple-bus", }, |
@@ -131,13 +134,13 @@ static struct of_device_id kmeter_ids[] = { | |||
131 | static int __init kmeter_declare_of_platform_devices(void) | 134 | static int __init kmeter_declare_of_platform_devices(void) |
132 | { | 135 | { |
133 | /* Publish the QE devices */ | 136 | /* Publish the QE devices */ |
134 | of_platform_bus_probe(NULL, kmeter_ids, NULL); | 137 | of_platform_bus_probe(NULL, kmpbec83xx_ids, NULL); |
135 | 138 | ||
136 | return 0; | 139 | return 0; |
137 | } | 140 | } |
138 | machine_device_initcall(kmeter1, kmeter_declare_of_platform_devices); | 141 | machine_device_initcall(mpc83xx_km, kmeter_declare_of_platform_devices); |
139 | 142 | ||
140 | static void __init kmeter1_init_IRQ(void) | 143 | static void __init mpc83xx_km_init_IRQ(void) |
141 | { | 144 | { |
142 | struct device_node *np; | 145 | struct device_node *np; |
143 | 146 | ||
@@ -168,21 +171,34 @@ static void __init kmeter1_init_IRQ(void) | |||
168 | #endif /* CONFIG_QUICC_ENGINE */ | 171 | #endif /* CONFIG_QUICC_ENGINE */ |
169 | } | 172 | } |
170 | 173 | ||
174 | /* list of the supported boards */ | ||
175 | static char *board[] __initdata = { | ||
176 | "Keymile,KMETER1", | ||
177 | "Keymile,kmpbec8321", | ||
178 | NULL | ||
179 | }; | ||
180 | |||
171 | /* | 181 | /* |
172 | * Called very early, MMU is off, device-tree isn't unflattened | 182 | * Called very early, MMU is off, device-tree isn't unflattened |
173 | */ | 183 | */ |
174 | static int __init kmeter1_probe(void) | 184 | static int __init mpc83xx_km_probe(void) |
175 | { | 185 | { |
176 | unsigned long root = of_get_flat_dt_root(); | 186 | unsigned long node = of_get_flat_dt_root(); |
187 | int i = 0; | ||
177 | 188 | ||
178 | return of_flat_dt_is_compatible(root, "keymile,KMETER1"); | 189 | while (board[i]) { |
190 | if (of_flat_dt_is_compatible(node, board[i])) | ||
191 | break; | ||
192 | i++; | ||
193 | } | ||
194 | return (board[i] != NULL); | ||
179 | } | 195 | } |
180 | 196 | ||
181 | define_machine(kmeter1) { | 197 | define_machine(mpc83xx_km) { |
182 | .name = "KMETER1", | 198 | .name = "mpc83xx-km-platform", |
183 | .probe = kmeter1_probe, | 199 | .probe = mpc83xx_km_probe, |
184 | .setup_arch = kmeter1_setup_arch, | 200 | .setup_arch = mpc83xx_km_setup_arch, |
185 | .init_IRQ = kmeter1_init_IRQ, | 201 | .init_IRQ = mpc83xx_km_init_IRQ, |
186 | .get_irq = ipic_get_irq, | 202 | .get_irq = ipic_get_irq, |
187 | .restart = mpc83xx_restart, | 203 | .restart = mpc83xx_restart, |
188 | .time_init = mpc83xx_time_init, | 204 | .time_init = mpc83xx_time_init, |
diff --git a/arch/powerpc/platforms/83xx/mpc830x_rdb.c b/arch/powerpc/platforms/83xx/mpc830x_rdb.c index ac102ee9abe8..d0c4e15b7794 100644 --- a/arch/powerpc/platforms/83xx/mpc830x_rdb.c +++ b/arch/powerpc/platforms/83xx/mpc830x_rdb.c | |||
@@ -57,15 +57,19 @@ static void __init mpc830x_rdb_init_IRQ(void) | |||
57 | ipic_set_default_priority(); | 57 | ipic_set_default_priority(); |
58 | } | 58 | } |
59 | 59 | ||
60 | static const char *board[] __initdata = { | ||
61 | "MPC8308RDB", | ||
62 | "fsl,mpc8308rdb", | ||
63 | "denx,mpc8308_p1m", | ||
64 | NULL | ||
65 | }; | ||
66 | |||
60 | /* | 67 | /* |
61 | * Called very early, MMU is off, device-tree isn't unflattened | 68 | * Called very early, MMU is off, device-tree isn't unflattened |
62 | */ | 69 | */ |
63 | static int __init mpc830x_rdb_probe(void) | 70 | static int __init mpc830x_rdb_probe(void) |
64 | { | 71 | { |
65 | unsigned long root = of_get_flat_dt_root(); | 72 | return of_flat_dt_match(of_get_flat_dt_root(), board); |
66 | |||
67 | return of_flat_dt_is_compatible(root, "MPC8308RDB") || | ||
68 | of_flat_dt_is_compatible(root, "fsl,mpc8308rdb"); | ||
69 | } | 73 | } |
70 | 74 | ||
71 | static struct of_device_id __initdata of_bus_ids[] = { | 75 | static struct of_device_id __initdata of_bus_ids[] = { |
diff --git a/arch/powerpc/platforms/83xx/mpc831x_rdb.c b/arch/powerpc/platforms/83xx/mpc831x_rdb.c index ae525e4745d2..f859ead49a8d 100644 --- a/arch/powerpc/platforms/83xx/mpc831x_rdb.c +++ b/arch/powerpc/platforms/83xx/mpc831x_rdb.c | |||
@@ -60,15 +60,18 @@ static void __init mpc831x_rdb_init_IRQ(void) | |||
60 | ipic_set_default_priority(); | 60 | ipic_set_default_priority(); |
61 | } | 61 | } |
62 | 62 | ||
63 | static const char *board[] __initdata = { | ||
64 | "MPC8313ERDB", | ||
65 | "fsl,mpc8315erdb", | ||
66 | NULL | ||
67 | }; | ||
68 | |||
63 | /* | 69 | /* |
64 | * Called very early, MMU is off, device-tree isn't unflattened | 70 | * Called very early, MMU is off, device-tree isn't unflattened |
65 | */ | 71 | */ |
66 | static int __init mpc831x_rdb_probe(void) | 72 | static int __init mpc831x_rdb_probe(void) |
67 | { | 73 | { |
68 | unsigned long root = of_get_flat_dt_root(); | 74 | return of_flat_dt_match(of_get_flat_dt_root(), board); |
69 | |||
70 | return of_flat_dt_is_compatible(root, "MPC8313ERDB") || | ||
71 | of_flat_dt_is_compatible(root, "fsl,mpc8315erdb"); | ||
72 | } | 75 | } |
73 | 76 | ||
74 | static struct of_device_id __initdata of_bus_ids[] = { | 77 | static struct of_device_id __initdata of_bus_ids[] = { |
diff --git a/arch/powerpc/platforms/83xx/mpc837x_rdb.c b/arch/powerpc/platforms/83xx/mpc837x_rdb.c index 910caa6b5810..7bafbf2ec0f9 100644 --- a/arch/powerpc/platforms/83xx/mpc837x_rdb.c +++ b/arch/powerpc/platforms/83xx/mpc837x_rdb.c | |||
@@ -101,17 +101,20 @@ static void __init mpc837x_rdb_init_IRQ(void) | |||
101 | ipic_set_default_priority(); | 101 | ipic_set_default_priority(); |
102 | } | 102 | } |
103 | 103 | ||
104 | static const char *board[] __initdata = { | ||
105 | "fsl,mpc8377rdb", | ||
106 | "fsl,mpc8378rdb", | ||
107 | "fsl,mpc8379rdb", | ||
108 | "fsl,mpc8377wlan", | ||
109 | NULL | ||
110 | }; | ||
111 | |||
104 | /* | 112 | /* |
105 | * Called very early, MMU is off, device-tree isn't unflattened | 113 | * Called very early, MMU is off, device-tree isn't unflattened |
106 | */ | 114 | */ |
107 | static int __init mpc837x_rdb_probe(void) | 115 | static int __init mpc837x_rdb_probe(void) |
108 | { | 116 | { |
109 | unsigned long root = of_get_flat_dt_root(); | 117 | return of_flat_dt_match(of_get_flat_dt_root(), board); |
110 | |||
111 | return of_flat_dt_is_compatible(root, "fsl,mpc8377rdb") || | ||
112 | of_flat_dt_is_compatible(root, "fsl,mpc8378rdb") || | ||
113 | of_flat_dt_is_compatible(root, "fsl,mpc8379rdb") || | ||
114 | of_flat_dt_is_compatible(root, "fsl,mpc8377wlan"); | ||
115 | } | 118 | } |
116 | 119 | ||
117 | define_machine(mpc837x_rdb) { | 120 | define_machine(mpc837x_rdb) { |
diff --git a/arch/powerpc/platforms/83xx/mpc83xx.h b/arch/powerpc/platforms/83xx/mpc83xx.h index 0fea8811d45b..82a434510d83 100644 --- a/arch/powerpc/platforms/83xx/mpc83xx.h +++ b/arch/powerpc/platforms/83xx/mpc83xx.h | |||
@@ -35,6 +35,8 @@ | |||
35 | 35 | ||
36 | /* system i/o configuration register high */ | 36 | /* system i/o configuration register high */ |
37 | #define MPC83XX_SICRH_OFFS 0x118 | 37 | #define MPC83XX_SICRH_OFFS 0x118 |
38 | #define MPC8308_SICRH_USB_MASK 0x000c0000 | ||
39 | #define MPC8308_SICRH_USB_ULPI 0x00040000 | ||
38 | #define MPC834X_SICRH_USB_UTMI 0x00020000 | 40 | #define MPC834X_SICRH_USB_UTMI 0x00020000 |
39 | #define MPC831X_SICRH_USB_MASK 0x000000e0 | 41 | #define MPC831X_SICRH_USB_MASK 0x000000e0 |
40 | #define MPC831X_SICRH_USB_ULPI 0x000000a0 | 42 | #define MPC831X_SICRH_USB_ULPI 0x000000a0 |
diff --git a/arch/powerpc/platforms/83xx/suspend-asm.S b/arch/powerpc/platforms/83xx/suspend-asm.S index 1930543c98d3..3d1ecd211776 100644 --- a/arch/powerpc/platforms/83xx/suspend-asm.S +++ b/arch/powerpc/platforms/83xx/suspend-asm.S | |||
@@ -231,7 +231,7 @@ _GLOBAL(mpc83xx_enter_deep_sleep) | |||
231 | ori r4, r4, 0x002a | 231 | ori r4, r4, 0x002a |
232 | mtspr SPRN_DBAT0L, r4 | 232 | mtspr SPRN_DBAT0L, r4 |
233 | lis r8, TMP_VIRT_IMMR@h | 233 | lis r8, TMP_VIRT_IMMR@h |
234 | ori r4, r8, 0x001e /* 1 MByte accessable from Kernel Space only */ | 234 | ori r4, r8, 0x001e /* 1 MByte accessible from Kernel Space only */ |
235 | mtspr SPRN_DBAT0U, r4 | 235 | mtspr SPRN_DBAT0U, r4 |
236 | isync | 236 | isync |
237 | 237 | ||
@@ -241,7 +241,7 @@ _GLOBAL(mpc83xx_enter_deep_sleep) | |||
241 | ori r4, r4, 0x002a | 241 | ori r4, r4, 0x002a |
242 | mtspr SPRN_DBAT1L, r4 | 242 | mtspr SPRN_DBAT1L, r4 |
243 | lis r9, (TMP_VIRT_IMMR + 0x01000000)@h | 243 | lis r9, (TMP_VIRT_IMMR + 0x01000000)@h |
244 | ori r4, r9, 0x001e /* 1 MByte accessable from Kernel Space only */ | 244 | ori r4, r9, 0x001e /* 1 MByte accessible from Kernel Space only */ |
245 | mtspr SPRN_DBAT1U, r4 | 245 | mtspr SPRN_DBAT1U, r4 |
246 | isync | 246 | isync |
247 | 247 | ||
@@ -253,7 +253,7 @@ _GLOBAL(mpc83xx_enter_deep_sleep) | |||
253 | li r4, 0x0002 | 253 | li r4, 0x0002 |
254 | mtspr SPRN_DBAT2L, r4 | 254 | mtspr SPRN_DBAT2L, r4 |
255 | lis r4, KERNELBASE@h | 255 | lis r4, KERNELBASE@h |
256 | ori r4, r4, 0x001e /* 1 MByte accessable from Kernel Space only */ | 256 | ori r4, r4, 0x001e /* 1 MByte accessible from Kernel Space only */ |
257 | mtspr SPRN_DBAT2U, r4 | 257 | mtspr SPRN_DBAT2U, r4 |
258 | isync | 258 | isync |
259 | 259 | ||
diff --git a/arch/powerpc/platforms/83xx/suspend.c b/arch/powerpc/platforms/83xx/suspend.c index 75ae77f1af6a..104faa8aa23c 100644 --- a/arch/powerpc/platforms/83xx/suspend.c +++ b/arch/powerpc/platforms/83xx/suspend.c | |||
@@ -311,21 +311,28 @@ static int mpc83xx_is_pci_agent(void) | |||
311 | return ret; | 311 | return ret; |
312 | } | 312 | } |
313 | 313 | ||
314 | static struct platform_suspend_ops mpc83xx_suspend_ops = { | 314 | static const struct platform_suspend_ops mpc83xx_suspend_ops = { |
315 | .valid = mpc83xx_suspend_valid, | 315 | .valid = mpc83xx_suspend_valid, |
316 | .begin = mpc83xx_suspend_begin, | 316 | .begin = mpc83xx_suspend_begin, |
317 | .enter = mpc83xx_suspend_enter, | 317 | .enter = mpc83xx_suspend_enter, |
318 | .end = mpc83xx_suspend_end, | 318 | .end = mpc83xx_suspend_end, |
319 | }; | 319 | }; |
320 | 320 | ||
321 | static int pmc_probe(struct platform_device *ofdev, | 321 | static struct of_device_id pmc_match[]; |
322 | const struct of_device_id *match) | 322 | static int pmc_probe(struct platform_device *ofdev) |
323 | { | 323 | { |
324 | const struct of_device_id *match; | ||
324 | struct device_node *np = ofdev->dev.of_node; | 325 | struct device_node *np = ofdev->dev.of_node; |
325 | struct resource res; | 326 | struct resource res; |
326 | struct pmc_type *type = match->data; | 327 | struct pmc_type *type; |
327 | int ret = 0; | 328 | int ret = 0; |
328 | 329 | ||
330 | match = of_match_device(pmc_match, &ofdev->dev); | ||
331 | if (!match) | ||
332 | return -EINVAL; | ||
333 | |||
334 | type = match->data; | ||
335 | |||
329 | if (!of_device_is_available(np)) | 336 | if (!of_device_is_available(np)) |
330 | return -ENODEV; | 337 | return -ENODEV; |
331 | 338 | ||
@@ -422,7 +429,7 @@ static struct of_device_id pmc_match[] = { | |||
422 | {} | 429 | {} |
423 | }; | 430 | }; |
424 | 431 | ||
425 | static struct of_platform_driver pmc_driver = { | 432 | static struct platform_driver pmc_driver = { |
426 | .driver = { | 433 | .driver = { |
427 | .name = "mpc83xx-pmc", | 434 | .name = "mpc83xx-pmc", |
428 | .owner = THIS_MODULE, | 435 | .owner = THIS_MODULE, |
@@ -434,7 +441,7 @@ static struct of_platform_driver pmc_driver = { | |||
434 | 441 | ||
435 | static int pmc_init(void) | 442 | static int pmc_init(void) |
436 | { | 443 | { |
437 | return of_register_platform_driver(&pmc_driver); | 444 | return platform_driver_register(&pmc_driver); |
438 | } | 445 | } |
439 | 446 | ||
440 | module_init(pmc_init); | 447 | module_init(pmc_init); |
diff --git a/arch/powerpc/platforms/83xx/usb.c b/arch/powerpc/platforms/83xx/usb.c index 3ba4bb7d41bb..2c64164722d0 100644 --- a/arch/powerpc/platforms/83xx/usb.c +++ b/arch/powerpc/platforms/83xx/usb.c | |||
@@ -127,7 +127,8 @@ int mpc831x_usb_cfg(void) | |||
127 | 127 | ||
128 | /* Configure clock */ | 128 | /* Configure clock */ |
129 | immr_node = of_get_parent(np); | 129 | immr_node = of_get_parent(np); |
130 | if (immr_node && of_device_is_compatible(immr_node, "fsl,mpc8315-immr")) | 130 | if (immr_node && (of_device_is_compatible(immr_node, "fsl,mpc8315-immr") || |
131 | of_device_is_compatible(immr_node, "fsl,mpc8308-immr"))) | ||
131 | clrsetbits_be32(immap + MPC83XX_SCCR_OFFS, | 132 | clrsetbits_be32(immap + MPC83XX_SCCR_OFFS, |
132 | MPC8315_SCCR_USB_MASK, | 133 | MPC8315_SCCR_USB_MASK, |
133 | MPC8315_SCCR_USB_DRCM_01); | 134 | MPC8315_SCCR_USB_DRCM_01); |
@@ -138,7 +139,11 @@ int mpc831x_usb_cfg(void) | |||
138 | 139 | ||
139 | /* Configure pin mux for ULPI. There is no pin mux for UTMI */ | 140 | /* Configure pin mux for ULPI. There is no pin mux for UTMI */ |
140 | if (prop && !strcmp(prop, "ulpi")) { | 141 | if (prop && !strcmp(prop, "ulpi")) { |
141 | if (of_device_is_compatible(immr_node, "fsl,mpc8315-immr")) { | 142 | if (of_device_is_compatible(immr_node, "fsl,mpc8308-immr")) { |
143 | clrsetbits_be32(immap + MPC83XX_SICRH_OFFS, | ||
144 | MPC8308_SICRH_USB_MASK, | ||
145 | MPC8308_SICRH_USB_ULPI); | ||
146 | } else if (of_device_is_compatible(immr_node, "fsl,mpc8315-immr")) { | ||
142 | clrsetbits_be32(immap + MPC83XX_SICRL_OFFS, | 147 | clrsetbits_be32(immap + MPC83XX_SICRL_OFFS, |
143 | MPC8315_SICRL_USB_MASK, | 148 | MPC8315_SICRL_USB_MASK, |
144 | MPC8315_SICRL_USB_ULPI); | 149 | MPC8315_SICRL_USB_ULPI); |
@@ -173,6 +178,9 @@ int mpc831x_usb_cfg(void) | |||
173 | !strcmp(prop, "utmi"))) { | 178 | !strcmp(prop, "utmi"))) { |
174 | u32 refsel; | 179 | u32 refsel; |
175 | 180 | ||
181 | if (of_device_is_compatible(immr_node, "fsl,mpc8308-immr")) | ||
182 | goto out; | ||
183 | |||
176 | if (of_device_is_compatible(immr_node, "fsl,mpc8315-immr")) | 184 | if (of_device_is_compatible(immr_node, "fsl,mpc8315-immr")) |
177 | refsel = CONTROL_REFSEL_24MHZ; | 185 | refsel = CONTROL_REFSEL_24MHZ; |
178 | else | 186 | else |
@@ -186,9 +194,11 @@ int mpc831x_usb_cfg(void) | |||
186 | temp = CONTROL_PHY_CLK_SEL_ULPI; | 194 | temp = CONTROL_PHY_CLK_SEL_ULPI; |
187 | #ifdef CONFIG_USB_OTG | 195 | #ifdef CONFIG_USB_OTG |
188 | /* Set OTG_PORT */ | 196 | /* Set OTG_PORT */ |
189 | dr_mode = of_get_property(np, "dr_mode", NULL); | 197 | if (!of_device_is_compatible(immr_node, "fsl,mpc8308-immr")) { |
190 | if (dr_mode && !strcmp(dr_mode, "otg")) | 198 | dr_mode = of_get_property(np, "dr_mode", NULL); |
191 | temp |= CONTROL_OTG_PORT; | 199 | if (dr_mode && !strcmp(dr_mode, "otg")) |
200 | temp |= CONTROL_OTG_PORT; | ||
201 | } | ||
192 | #endif /* CONFIG_USB_OTG */ | 202 | #endif /* CONFIG_USB_OTG */ |
193 | out_be32(usb_regs + FSL_USB2_CONTROL_OFFS, temp); | 203 | out_be32(usb_regs + FSL_USB2_CONTROL_OFFS, temp); |
194 | } else { | 204 | } else { |
@@ -196,6 +206,7 @@ int mpc831x_usb_cfg(void) | |||
196 | ret = -EINVAL; | 206 | ret = -EINVAL; |
197 | } | 207 | } |
198 | 208 | ||
209 | out: | ||
199 | iounmap(usb_regs); | 210 | iounmap(usb_regs); |
200 | of_node_put(np); | 211 | of_node_put(np); |
201 | return ret; | 212 | return ret; |
diff --git a/arch/powerpc/platforms/85xx/Kconfig b/arch/powerpc/platforms/85xx/Kconfig index bea1f5905ad4..b6976e1726e4 100644 --- a/arch/powerpc/platforms/85xx/Kconfig +++ b/arch/powerpc/platforms/85xx/Kconfig | |||
@@ -11,6 +11,8 @@ menuconfig FSL_SOC_BOOKE | |||
11 | 11 | ||
12 | if FSL_SOC_BOOKE | 12 | if FSL_SOC_BOOKE |
13 | 13 | ||
14 | if PPC32 | ||
15 | |||
14 | config MPC8540_ADS | 16 | config MPC8540_ADS |
15 | bool "Freescale MPC8540 ADS" | 17 | bool "Freescale MPC8540 ADS" |
16 | select DEFAULT_UIMAGE | 18 | select DEFAULT_UIMAGE |
@@ -153,10 +155,20 @@ config SBC8560 | |||
153 | help | 155 | help |
154 | This option enables support for the Wind River SBC8560 board | 156 | This option enables support for the Wind River SBC8560 board |
155 | 157 | ||
158 | config P3041_DS | ||
159 | bool "Freescale P3041 DS" | ||
160 | select DEFAULT_UIMAGE | ||
161 | select PPC_E500MC | ||
162 | select PHYS_64BIT | ||
163 | select SWIOTLB | ||
164 | select MPC8xxx_GPIO | ||
165 | select HAS_RAPIDIO | ||
166 | help | ||
167 | This option enables support for the P3041 DS board | ||
168 | |||
156 | config P4080_DS | 169 | config P4080_DS |
157 | bool "Freescale P4080 DS" | 170 | bool "Freescale P4080 DS" |
158 | select DEFAULT_UIMAGE | 171 | select DEFAULT_UIMAGE |
159 | select PPC_FSL_BOOK3E | ||
160 | select PPC_E500MC | 172 | select PPC_E500MC |
161 | select PHYS_64BIT | 173 | select PHYS_64BIT |
162 | select SWIOTLB | 174 | select SWIOTLB |
@@ -165,6 +177,20 @@ config P4080_DS | |||
165 | help | 177 | help |
166 | This option enables support for the P4080 DS board | 178 | This option enables support for the P4080 DS board |
167 | 179 | ||
180 | endif # PPC32 | ||
181 | |||
182 | config P5020_DS | ||
183 | bool "Freescale P5020 DS" | ||
184 | select DEFAULT_UIMAGE | ||
185 | select E500 | ||
186 | select PPC_E500MC | ||
187 | select PHYS_64BIT | ||
188 | select SWIOTLB | ||
189 | select MPC8xxx_GPIO | ||
190 | select HAS_RAPIDIO | ||
191 | help | ||
192 | This option enables support for the P5020 DS board | ||
193 | |||
168 | endif # FSL_SOC_BOOKE | 194 | endif # FSL_SOC_BOOKE |
169 | 195 | ||
170 | config TQM85xx | 196 | config TQM85xx |
diff --git a/arch/powerpc/platforms/85xx/Makefile b/arch/powerpc/platforms/85xx/Makefile index a2ec3f8f4d06..dd70db77d63e 100644 --- a/arch/powerpc/platforms/85xx/Makefile +++ b/arch/powerpc/platforms/85xx/Makefile | |||
@@ -11,7 +11,9 @@ obj-$(CONFIG_MPC85xx_DS) += mpc85xx_ds.o | |||
11 | obj-$(CONFIG_MPC85xx_MDS) += mpc85xx_mds.o | 11 | obj-$(CONFIG_MPC85xx_MDS) += mpc85xx_mds.o |
12 | obj-$(CONFIG_MPC85xx_RDB) += mpc85xx_rdb.o | 12 | obj-$(CONFIG_MPC85xx_RDB) += mpc85xx_rdb.o |
13 | obj-$(CONFIG_P1022_DS) += p1022_ds.o | 13 | obj-$(CONFIG_P1022_DS) += p1022_ds.o |
14 | obj-$(CONFIG_P3041_DS) += p3041_ds.o corenet_ds.o | ||
14 | obj-$(CONFIG_P4080_DS) += p4080_ds.o corenet_ds.o | 15 | obj-$(CONFIG_P4080_DS) += p4080_ds.o corenet_ds.o |
16 | obj-$(CONFIG_P5020_DS) += p5020_ds.o corenet_ds.o | ||
15 | obj-$(CONFIG_STX_GP3) += stx_gp3.o | 17 | obj-$(CONFIG_STX_GP3) += stx_gp3.o |
16 | obj-$(CONFIG_TQM85xx) += tqm85xx.o | 18 | obj-$(CONFIG_TQM85xx) += tqm85xx.o |
17 | obj-$(CONFIG_SBC8560) += sbc8560.o | 19 | obj-$(CONFIG_SBC8560) += sbc8560.o |
diff --git a/arch/powerpc/platforms/85xx/ksi8560.c b/arch/powerpc/platforms/85xx/ksi8560.c index f4d36b5a2e00..c46f9359be15 100644 --- a/arch/powerpc/platforms/85xx/ksi8560.c +++ b/arch/powerpc/platforms/85xx/ksi8560.c | |||
@@ -56,12 +56,13 @@ static void machine_restart(char *cmd) | |||
56 | 56 | ||
57 | static void cpm2_cascade(unsigned int irq, struct irq_desc *desc) | 57 | static void cpm2_cascade(unsigned int irq, struct irq_desc *desc) |
58 | { | 58 | { |
59 | struct irq_chip *chip = irq_desc_get_chip(desc); | ||
59 | int cascade_irq; | 60 | int cascade_irq; |
60 | 61 | ||
61 | while ((cascade_irq = cpm2_get_irq()) >= 0) | 62 | while ((cascade_irq = cpm2_get_irq()) >= 0) |
62 | generic_handle_irq(cascade_irq); | 63 | generic_handle_irq(cascade_irq); |
63 | 64 | ||
64 | desc->chip->eoi(irq); | 65 | chip->irq_eoi(&desc->irq_data); |
65 | } | 66 | } |
66 | 67 | ||
67 | static void __init ksi8560_pic_init(void) | 68 | static void __init ksi8560_pic_init(void) |
@@ -105,7 +106,7 @@ static void __init ksi8560_pic_init(void) | |||
105 | 106 | ||
106 | cpm2_pic_init(np); | 107 | cpm2_pic_init(np); |
107 | of_node_put(np); | 108 | of_node_put(np); |
108 | set_irq_chained_handler(irq, cpm2_cascade); | 109 | irq_set_chained_handler(irq, cpm2_cascade); |
109 | #endif | 110 | #endif |
110 | } | 111 | } |
111 | 112 | ||
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_ads.c b/arch/powerpc/platforms/85xx/mpc85xx_ads.c index 9438a892afc4..3b2c9bb66199 100644 --- a/arch/powerpc/platforms/85xx/mpc85xx_ads.c +++ b/arch/powerpc/platforms/85xx/mpc85xx_ads.c | |||
@@ -50,12 +50,13 @@ static int mpc85xx_exclude_device(struct pci_controller *hose, | |||
50 | 50 | ||
51 | static void cpm2_cascade(unsigned int irq, struct irq_desc *desc) | 51 | static void cpm2_cascade(unsigned int irq, struct irq_desc *desc) |
52 | { | 52 | { |
53 | struct irq_chip *chip = irq_desc_get_chip(desc); | ||
53 | int cascade_irq; | 54 | int cascade_irq; |
54 | 55 | ||
55 | while ((cascade_irq = cpm2_get_irq()) >= 0) | 56 | while ((cascade_irq = cpm2_get_irq()) >= 0) |
56 | generic_handle_irq(cascade_irq); | 57 | generic_handle_irq(cascade_irq); |
57 | 58 | ||
58 | desc->chip->eoi(irq); | 59 | chip->irq_eoi(&desc->irq_data); |
59 | } | 60 | } |
60 | 61 | ||
61 | #endif /* CONFIG_CPM2 */ | 62 | #endif /* CONFIG_CPM2 */ |
@@ -100,7 +101,7 @@ static void __init mpc85xx_ads_pic_init(void) | |||
100 | 101 | ||
101 | cpm2_pic_init(np); | 102 | cpm2_pic_init(np); |
102 | of_node_put(np); | 103 | of_node_put(np); |
103 | set_irq_chained_handler(irq, cpm2_cascade); | 104 | irq_set_chained_handler(irq, cpm2_cascade); |
104 | #endif | 105 | #endif |
105 | } | 106 | } |
106 | 107 | ||
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_cds.c b/arch/powerpc/platforms/85xx/mpc85xx_cds.c index 458d91fba91d..6299a2a51ae8 100644 --- a/arch/powerpc/platforms/85xx/mpc85xx_cds.c +++ b/arch/powerpc/platforms/85xx/mpc85xx_cds.c | |||
@@ -255,7 +255,7 @@ static int mpc85xx_cds_8259_attach(void) | |||
255 | } | 255 | } |
256 | 256 | ||
257 | /* Success. Connect our low-level cascade handler. */ | 257 | /* Success. Connect our low-level cascade handler. */ |
258 | set_irq_handler(cascade_irq, mpc85xx_8259_cascade_handler); | 258 | irq_set_handler(cascade_irq, mpc85xx_8259_cascade_handler); |
259 | 259 | ||
260 | return 0; | 260 | return 0; |
261 | } | 261 | } |
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_ds.c b/arch/powerpc/platforms/85xx/mpc85xx_ds.c index 8190bc25bf27..c7b97f70312e 100644 --- a/arch/powerpc/platforms/85xx/mpc85xx_ds.c +++ b/arch/powerpc/platforms/85xx/mpc85xx_ds.c | |||
@@ -47,12 +47,13 @@ | |||
47 | #ifdef CONFIG_PPC_I8259 | 47 | #ifdef CONFIG_PPC_I8259 |
48 | static void mpc85xx_8259_cascade(unsigned int irq, struct irq_desc *desc) | 48 | static void mpc85xx_8259_cascade(unsigned int irq, struct irq_desc *desc) |
49 | { | 49 | { |
50 | struct irq_chip *chip = irq_desc_get_chip(desc); | ||
50 | unsigned int cascade_irq = i8259_irq(); | 51 | unsigned int cascade_irq = i8259_irq(); |
51 | 52 | ||
52 | if (cascade_irq != NO_IRQ) { | 53 | if (cascade_irq != NO_IRQ) { |
53 | generic_handle_irq(cascade_irq); | 54 | generic_handle_irq(cascade_irq); |
54 | } | 55 | } |
55 | desc->chip->eoi(irq); | 56 | chip->irq_eoi(&desc->irq_data); |
56 | } | 57 | } |
57 | #endif /* CONFIG_PPC_I8259 */ | 58 | #endif /* CONFIG_PPC_I8259 */ |
58 | 59 | ||
@@ -121,7 +122,7 @@ void __init mpc85xx_ds_pic_init(void) | |||
121 | i8259_init(cascade_node, 0); | 122 | i8259_init(cascade_node, 0); |
122 | of_node_put(cascade_node); | 123 | of_node_put(cascade_node); |
123 | 124 | ||
124 | set_irq_chained_handler(cascade_irq, mpc85xx_8259_cascade); | 125 | irq_set_chained_handler(cascade_irq, mpc85xx_8259_cascade); |
125 | #endif /* CONFIG_PPC_I8259 */ | 126 | #endif /* CONFIG_PPC_I8259 */ |
126 | } | 127 | } |
127 | 128 | ||
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_mds.c b/arch/powerpc/platforms/85xx/mpc85xx_mds.c index aa34cac4eb5c..747d1ee661fd 100644 --- a/arch/powerpc/platforms/85xx/mpc85xx_mds.c +++ b/arch/powerpc/platforms/85xx/mpc85xx_mds.c | |||
@@ -309,7 +309,7 @@ static void __init mpc85xx_mds_qe_init(void) | |||
309 | /* P1021 has pins muxed for QE and other functions. To | 309 | /* P1021 has pins muxed for QE and other functions. To |
310 | * enable QE UEC mode, we need to set bit QE0 for UCC1 | 310 | * enable QE UEC mode, we need to set bit QE0 for UCC1 |
311 | * in Eth mode, QE0 and QE3 for UCC5 in Eth mode, QE9 | 311 | * in Eth mode, QE0 and QE3 for UCC5 in Eth mode, QE9 |
312 | * and QE12 for QE MII management singals in PMUXCR | 312 | * and QE12 for QE MII management signals in PMUXCR |
313 | * register. | 313 | * register. |
314 | */ | 314 | */ |
315 | setbits32(pmuxcr, MPC85xx_PMUXCR_QE0 | | 315 | setbits32(pmuxcr, MPC85xx_PMUXCR_QE0 | |
diff --git a/arch/powerpc/platforms/85xx/p1022_ds.c b/arch/powerpc/platforms/85xx/p1022_ds.c index 34e00902ce86..7eb5c40c069f 100644 --- a/arch/powerpc/platforms/85xx/p1022_ds.c +++ b/arch/powerpc/platforms/85xx/p1022_ds.c | |||
@@ -8,7 +8,6 @@ | |||
8 | * Copyright 2010 Freescale Semiconductor, Inc. | 8 | * Copyright 2010 Freescale Semiconductor, Inc. |
9 | * | 9 | * |
10 | * This file is taken from the Freescale P1022DS BSP, with modifications: | 10 | * This file is taken from the Freescale P1022DS BSP, with modifications: |
11 | * 1) No DIU support (pending rewrite of DIU code) | ||
12 | * 2) No AMP support | 11 | * 2) No AMP support |
13 | * 3) No PCI endpoint support | 12 | * 3) No PCI endpoint support |
14 | * | 13 | * |
@@ -20,12 +19,211 @@ | |||
20 | #include <linux/pci.h> | 19 | #include <linux/pci.h> |
21 | #include <linux/of_platform.h> | 20 | #include <linux/of_platform.h> |
22 | #include <linux/memblock.h> | 21 | #include <linux/memblock.h> |
23 | 22 | #include <asm/div64.h> | |
24 | #include <asm/mpic.h> | 23 | #include <asm/mpic.h> |
25 | #include <asm/swiotlb.h> | 24 | #include <asm/swiotlb.h> |
26 | 25 | ||
27 | #include <sysdev/fsl_soc.h> | 26 | #include <sysdev/fsl_soc.h> |
28 | #include <sysdev/fsl_pci.h> | 27 | #include <sysdev/fsl_pci.h> |
28 | #include <asm/fsl_guts.h> | ||
29 | |||
30 | #if defined(CONFIG_FB_FSL_DIU) || defined(CONFIG_FB_FSL_DIU_MODULE) | ||
31 | |||
32 | /* | ||
33 | * Board-specific initialization of the DIU. This code should probably be | ||
34 | * executed when the DIU is opened, rather than in arch code, but the DIU | ||
35 | * driver does not have a mechanism for this (yet). | ||
36 | * | ||
37 | * This is especially problematic on the P1022DS because the local bus (eLBC) | ||
38 | * and the DIU video signals share the same pins, which means that enabling the | ||
39 | * DIU will disable access to NOR flash. | ||
40 | */ | ||
41 | |||
42 | /* DIU Pixel Clock bits of the CLKDVDR Global Utilities register */ | ||
43 | #define CLKDVDR_PXCKEN 0x80000000 | ||
44 | #define CLKDVDR_PXCKINV 0x10000000 | ||
45 | #define CLKDVDR_PXCKDLY 0x06000000 | ||
46 | #define CLKDVDR_PXCLK_MASK 0x00FF0000 | ||
47 | |||
48 | /* Some ngPIXIS register definitions */ | ||
49 | #define PX_BRDCFG1_DVIEN 0x80 | ||
50 | #define PX_BRDCFG1_DFPEN 0x40 | ||
51 | #define PX_BRDCFG1_BACKLIGHT 0x20 | ||
52 | #define PX_BRDCFG1_DDCEN 0x10 | ||
53 | |||
54 | /* | ||
55 | * DIU Area Descriptor | ||
56 | * | ||
57 | * Note that we need to byte-swap the value before it's written to the AD | ||
58 | * register. So even though the registers don't look like they're in the same | ||
59 | * bit positions as they are on the MPC8610, the same value is written to the | ||
60 | * AD register on the MPC8610 and on the P1022. | ||
61 | */ | ||
62 | #define AD_BYTE_F 0x10000000 | ||
63 | #define AD_ALPHA_C_MASK 0x0E000000 | ||
64 | #define AD_ALPHA_C_SHIFT 25 | ||
65 | #define AD_BLUE_C_MASK 0x01800000 | ||
66 | #define AD_BLUE_C_SHIFT 23 | ||
67 | #define AD_GREEN_C_MASK 0x00600000 | ||
68 | #define AD_GREEN_C_SHIFT 21 | ||
69 | #define AD_RED_C_MASK 0x00180000 | ||
70 | #define AD_RED_C_SHIFT 19 | ||
71 | #define AD_PALETTE 0x00040000 | ||
72 | #define AD_PIXEL_S_MASK 0x00030000 | ||
73 | #define AD_PIXEL_S_SHIFT 16 | ||
74 | #define AD_COMP_3_MASK 0x0000F000 | ||
75 | #define AD_COMP_3_SHIFT 12 | ||
76 | #define AD_COMP_2_MASK 0x00000F00 | ||
77 | #define AD_COMP_2_SHIFT 8 | ||
78 | #define AD_COMP_1_MASK 0x000000F0 | ||
79 | #define AD_COMP_1_SHIFT 4 | ||
80 | #define AD_COMP_0_MASK 0x0000000F | ||
81 | #define AD_COMP_0_SHIFT 0 | ||
82 | |||
83 | #define MAKE_AD(alpha, red, blue, green, size, c0, c1, c2, c3) \ | ||
84 | cpu_to_le32(AD_BYTE_F | (alpha << AD_ALPHA_C_SHIFT) | \ | ||
85 | (blue << AD_BLUE_C_SHIFT) | (green << AD_GREEN_C_SHIFT) | \ | ||
86 | (red << AD_RED_C_SHIFT) | (c3 << AD_COMP_3_SHIFT) | \ | ||
87 | (c2 << AD_COMP_2_SHIFT) | (c1 << AD_COMP_1_SHIFT) | \ | ||
88 | (c0 << AD_COMP_0_SHIFT) | (size << AD_PIXEL_S_SHIFT)) | ||
89 | |||
90 | /** | ||
91 | * p1022ds_get_pixel_format: return the Area Descriptor for a given pixel depth | ||
92 | * | ||
93 | * The Area Descriptor is a 32-bit value that determine which bits in each | ||
94 | * pixel are to be used for each color. | ||
95 | */ | ||
96 | static unsigned int p1022ds_get_pixel_format(unsigned int bits_per_pixel, | ||
97 | int monitor_port) | ||
98 | { | ||
99 | switch (bits_per_pixel) { | ||
100 | case 32: | ||
101 | /* 0x88883316 */ | ||
102 | return MAKE_AD(3, 2, 0, 1, 3, 8, 8, 8, 8); | ||
103 | case 24: | ||
104 | /* 0x88082219 */ | ||
105 | return MAKE_AD(4, 0, 1, 2, 2, 0, 8, 8, 8); | ||
106 | case 16: | ||
107 | /* 0x65053118 */ | ||
108 | return MAKE_AD(4, 2, 1, 0, 1, 5, 6, 5, 0); | ||
109 | default: | ||
110 | pr_err("fsl-diu: unsupported pixel depth %u\n", bits_per_pixel); | ||
111 | return 0; | ||
112 | } | ||
113 | } | ||
114 | |||
115 | /** | ||
116 | * p1022ds_set_gamma_table: update the gamma table, if necessary | ||
117 | * | ||
118 | * On some boards, the gamma table for some ports may need to be modified. | ||
119 | * This is not the case on the P1022DS, so we do nothing. | ||
120 | */ | ||
121 | static void p1022ds_set_gamma_table(int monitor_port, char *gamma_table_base) | ||
122 | { | ||
123 | } | ||
124 | |||
125 | /** | ||
126 | * p1022ds_set_monitor_port: switch the output to a different monitor port | ||
127 | * | ||
128 | */ | ||
129 | static void p1022ds_set_monitor_port(int monitor_port) | ||
130 | { | ||
131 | struct device_node *pixis_node; | ||
132 | u8 __iomem *brdcfg1; | ||
133 | |||
134 | pixis_node = of_find_compatible_node(NULL, NULL, "fsl,p1022ds-pixis"); | ||
135 | if (!pixis_node) { | ||
136 | pr_err("p1022ds: missing ngPIXIS node\n"); | ||
137 | return; | ||
138 | } | ||
139 | |||
140 | brdcfg1 = of_iomap(pixis_node, 0); | ||
141 | if (!brdcfg1) { | ||
142 | pr_err("p1022ds: could not map ngPIXIS registers\n"); | ||
143 | return; | ||
144 | } | ||
145 | brdcfg1 += 9; /* BRDCFG1 is at offset 9 in the ngPIXIS */ | ||
146 | |||
147 | switch (monitor_port) { | ||
148 | case 0: /* DVI */ | ||
149 | /* Enable the DVI port, disable the DFP and the backlight */ | ||
150 | clrsetbits_8(brdcfg1, PX_BRDCFG1_DFPEN | PX_BRDCFG1_BACKLIGHT, | ||
151 | PX_BRDCFG1_DVIEN); | ||
152 | break; | ||
153 | case 1: /* Single link LVDS */ | ||
154 | /* Enable the DFP port, disable the DVI and the backlight */ | ||
155 | clrsetbits_8(brdcfg1, PX_BRDCFG1_DVIEN | PX_BRDCFG1_BACKLIGHT, | ||
156 | PX_BRDCFG1_DFPEN); | ||
157 | break; | ||
158 | default: | ||
159 | pr_err("p1022ds: unsupported monitor port %i\n", monitor_port); | ||
160 | } | ||
161 | } | ||
162 | |||
163 | /** | ||
164 | * p1022ds_set_pixel_clock: program the DIU's clock | ||
165 | * | ||
166 | * @pixclock: the wavelength, in picoseconds, of the clock | ||
167 | */ | ||
168 | void p1022ds_set_pixel_clock(unsigned int pixclock) | ||
169 | { | ||
170 | struct device_node *guts_np = NULL; | ||
171 | struct ccsr_guts_85xx __iomem *guts; | ||
172 | unsigned long freq; | ||
173 | u64 temp; | ||
174 | u32 pxclk; | ||
175 | |||
176 | /* Map the global utilities registers. */ | ||
177 | guts_np = of_find_compatible_node(NULL, NULL, "fsl,p1022-guts"); | ||
178 | if (!guts_np) { | ||
179 | pr_err("p1022ds: missing global utilties device node\n"); | ||
180 | return; | ||
181 | } | ||
182 | |||
183 | guts = of_iomap(guts_np, 0); | ||
184 | of_node_put(guts_np); | ||
185 | if (!guts) { | ||
186 | pr_err("p1022ds: could not map global utilties device\n"); | ||
187 | return; | ||
188 | } | ||
189 | |||
190 | /* Convert pixclock from a wavelength to a frequency */ | ||
191 | temp = 1000000000000ULL; | ||
192 | do_div(temp, pixclock); | ||
193 | freq = temp; | ||
194 | |||
195 | /* pixclk is the ratio of the platform clock to the pixel clock */ | ||
196 | pxclk = DIV_ROUND_CLOSEST(fsl_get_sys_freq(), freq); | ||
197 | |||
198 | /* Disable the pixel clock, and set it to non-inverted and no delay */ | ||
199 | clrbits32(&guts->clkdvdr, | ||
200 | CLKDVDR_PXCKEN | CLKDVDR_PXCKDLY | CLKDVDR_PXCLK_MASK); | ||
201 | |||
202 | /* Enable the clock and set the pxclk */ | ||
203 | setbits32(&guts->clkdvdr, CLKDVDR_PXCKEN | (pxclk << 16)); | ||
204 | } | ||
205 | |||
206 | /** | ||
207 | * p1022ds_show_monitor_port: show the current monitor | ||
208 | * | ||
209 | * This function returns a string indicating whether the current monitor is | ||
210 | * set to DVI or LVDS. | ||
211 | */ | ||
212 | ssize_t p1022ds_show_monitor_port(int monitor_port, char *buf) | ||
213 | { | ||
214 | return sprintf(buf, "%c0 - DVI\n%c1 - Single link LVDS\n", | ||
215 | monitor_port == 0 ? '*' : ' ', monitor_port == 1 ? '*' : ' '); | ||
216 | } | ||
217 | |||
218 | /** | ||
219 | * p1022ds_set_sysfs_monitor_port: set the monitor port for sysfs | ||
220 | */ | ||
221 | int p1022ds_set_sysfs_monitor_port(int val) | ||
222 | { | ||
223 | return val < 2 ? val : 0; | ||
224 | } | ||
225 | |||
226 | #endif | ||
29 | 227 | ||
30 | void __init p1022_ds_pic_init(void) | 228 | void __init p1022_ds_pic_init(void) |
31 | { | 229 | { |
@@ -92,6 +290,15 @@ static void __init p1022_ds_setup_arch(void) | |||
92 | } | 290 | } |
93 | #endif | 291 | #endif |
94 | 292 | ||
293 | #if defined(CONFIG_FB_FSL_DIU) || defined(CONFIG_FB_FSL_DIU_MODULE) | ||
294 | diu_ops.get_pixel_format = p1022ds_get_pixel_format; | ||
295 | diu_ops.set_gamma_table = p1022ds_set_gamma_table; | ||
296 | diu_ops.set_monitor_port = p1022ds_set_monitor_port; | ||
297 | diu_ops.set_pixel_clock = p1022ds_set_pixel_clock; | ||
298 | diu_ops.show_monitor_port = p1022ds_show_monitor_port; | ||
299 | diu_ops.set_sysfs_monitor_port = p1022ds_set_sysfs_monitor_port; | ||
300 | #endif | ||
301 | |||
95 | #ifdef CONFIG_SMP | 302 | #ifdef CONFIG_SMP |
96 | mpc85xx_smp_init(); | 303 | mpc85xx_smp_init(); |
97 | #endif | 304 | #endif |
@@ -112,6 +319,8 @@ static struct of_device_id __initdata p1022_ds_ids[] = { | |||
112 | { .compatible = "soc", }, | 319 | { .compatible = "soc", }, |
113 | { .compatible = "simple-bus", }, | 320 | { .compatible = "simple-bus", }, |
114 | { .compatible = "gianfar", }, | 321 | { .compatible = "gianfar", }, |
322 | /* So that the DMA channel nodes can be probed individually: */ | ||
323 | { .compatible = "fsl,eloplus-dma", }, | ||
115 | {}, | 324 | {}, |
116 | }; | 325 | }; |
117 | 326 | ||
diff --git a/arch/powerpc/platforms/85xx/p3041_ds.c b/arch/powerpc/platforms/85xx/p3041_ds.c new file mode 100644 index 000000000000..0ed52e18298c --- /dev/null +++ b/arch/powerpc/platforms/85xx/p3041_ds.c | |||
@@ -0,0 +1,64 @@ | |||
1 | /* | ||
2 | * P3041 DS Setup | ||
3 | * | ||
4 | * Maintained by Kumar Gala (see MAINTAINERS for contact information) | ||
5 | * | ||
6 | * Copyright 2009-2010 Freescale Semiconductor Inc. | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify it | ||
9 | * under the terms of the GNU General Public License as published by the | ||
10 | * Free Software Foundation; either version 2 of the License, or (at your | ||
11 | * option) any later version. | ||
12 | */ | ||
13 | |||
14 | #include <linux/kernel.h> | ||
15 | #include <linux/pci.h> | ||
16 | #include <linux/kdev_t.h> | ||
17 | #include <linux/delay.h> | ||
18 | #include <linux/interrupt.h> | ||
19 | #include <linux/phy.h> | ||
20 | |||
21 | #include <asm/system.h> | ||
22 | #include <asm/time.h> | ||
23 | #include <asm/machdep.h> | ||
24 | #include <asm/pci-bridge.h> | ||
25 | #include <mm/mmu_decl.h> | ||
26 | #include <asm/prom.h> | ||
27 | #include <asm/udbg.h> | ||
28 | #include <asm/mpic.h> | ||
29 | |||
30 | #include <linux/of_platform.h> | ||
31 | #include <sysdev/fsl_soc.h> | ||
32 | #include <sysdev/fsl_pci.h> | ||
33 | |||
34 | #include "corenet_ds.h" | ||
35 | |||
36 | /* | ||
37 | * Called very early, device-tree isn't unflattened | ||
38 | */ | ||
39 | static int __init p3041_ds_probe(void) | ||
40 | { | ||
41 | unsigned long root = of_get_flat_dt_root(); | ||
42 | |||
43 | return of_flat_dt_is_compatible(root, "fsl,P3041DS"); | ||
44 | } | ||
45 | |||
46 | define_machine(p3041_ds) { | ||
47 | .name = "P3041 DS", | ||
48 | .probe = p3041_ds_probe, | ||
49 | .setup_arch = corenet_ds_setup_arch, | ||
50 | .init_IRQ = corenet_ds_pic_init, | ||
51 | #ifdef CONFIG_PCI | ||
52 | .pcibios_fixup_bus = fsl_pcibios_fixup_bus, | ||
53 | #endif | ||
54 | .get_irq = mpic_get_coreint_irq, | ||
55 | .restart = fsl_rstcr_restart, | ||
56 | .calibrate_decr = generic_calibrate_decr, | ||
57 | .progress = udbg_progress, | ||
58 | }; | ||
59 | |||
60 | machine_device_initcall(p3041_ds, corenet_ds_publish_devices); | ||
61 | |||
62 | #ifdef CONFIG_SWIOTLB | ||
63 | machine_arch_initcall(p3041_ds, swiotlb_setup_bus_notifier); | ||
64 | #endif | ||
diff --git a/arch/powerpc/platforms/85xx/p5020_ds.c b/arch/powerpc/platforms/85xx/p5020_ds.c new file mode 100644 index 000000000000..7467b712ee00 --- /dev/null +++ b/arch/powerpc/platforms/85xx/p5020_ds.c | |||
@@ -0,0 +1,69 @@ | |||
1 | /* | ||
2 | * P5020 DS Setup | ||
3 | * | ||
4 | * Maintained by Kumar Gala (see MAINTAINERS for contact information) | ||
5 | * | ||
6 | * Copyright 2009-2010 Freescale Semiconductor Inc. | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify it | ||
9 | * under the terms of the GNU General Public License as published by the | ||
10 | * Free Software Foundation; either version 2 of the License, or (at your | ||
11 | * option) any later version. | ||
12 | */ | ||
13 | |||
14 | #include <linux/kernel.h> | ||
15 | #include <linux/pci.h> | ||
16 | #include <linux/kdev_t.h> | ||
17 | #include <linux/delay.h> | ||
18 | #include <linux/interrupt.h> | ||
19 | #include <linux/phy.h> | ||
20 | |||
21 | #include <asm/system.h> | ||
22 | #include <asm/time.h> | ||
23 | #include <asm/machdep.h> | ||
24 | #include <asm/pci-bridge.h> | ||
25 | #include <mm/mmu_decl.h> | ||
26 | #include <asm/prom.h> | ||
27 | #include <asm/udbg.h> | ||
28 | #include <asm/mpic.h> | ||
29 | |||
30 | #include <linux/of_platform.h> | ||
31 | #include <sysdev/fsl_soc.h> | ||
32 | #include <sysdev/fsl_pci.h> | ||
33 | |||
34 | #include "corenet_ds.h" | ||
35 | |||
36 | /* | ||
37 | * Called very early, device-tree isn't unflattened | ||
38 | */ | ||
39 | static int __init p5020_ds_probe(void) | ||
40 | { | ||
41 | unsigned long root = of_get_flat_dt_root(); | ||
42 | |||
43 | return of_flat_dt_is_compatible(root, "fsl,P5020DS"); | ||
44 | } | ||
45 | |||
46 | define_machine(p5020_ds) { | ||
47 | .name = "P5020 DS", | ||
48 | .probe = p5020_ds_probe, | ||
49 | .setup_arch = corenet_ds_setup_arch, | ||
50 | .init_IRQ = corenet_ds_pic_init, | ||
51 | #ifdef CONFIG_PCI | ||
52 | .pcibios_fixup_bus = fsl_pcibios_fixup_bus, | ||
53 | #endif | ||
54 | /* coreint doesn't play nice with lazy EE, use legacy mpic for now */ | ||
55 | #ifdef CONFIG_PPC64 | ||
56 | .get_irq = mpic_get_irq, | ||
57 | #else | ||
58 | .get_irq = mpic_get_coreint_irq, | ||
59 | #endif | ||
60 | .restart = fsl_rstcr_restart, | ||
61 | .calibrate_decr = generic_calibrate_decr, | ||
62 | .progress = udbg_progress, | ||
63 | }; | ||
64 | |||
65 | machine_device_initcall(p5020_ds, corenet_ds_publish_devices); | ||
66 | |||
67 | #ifdef CONFIG_SWIOTLB | ||
68 | machine_arch_initcall(p5020_ds, swiotlb_setup_bus_notifier); | ||
69 | #endif | ||
diff --git a/arch/powerpc/platforms/85xx/sbc8560.c b/arch/powerpc/platforms/85xx/sbc8560.c index a5ad1c7794bf..d2dfd465fbf6 100644 --- a/arch/powerpc/platforms/85xx/sbc8560.c +++ b/arch/powerpc/platforms/85xx/sbc8560.c | |||
@@ -41,12 +41,13 @@ | |||
41 | 41 | ||
42 | static void cpm2_cascade(unsigned int irq, struct irq_desc *desc) | 42 | static void cpm2_cascade(unsigned int irq, struct irq_desc *desc) |
43 | { | 43 | { |
44 | struct irq_chip *chip = irq_desc_get_chip(desc); | ||
44 | int cascade_irq; | 45 | int cascade_irq; |
45 | 46 | ||
46 | while ((cascade_irq = cpm2_get_irq()) >= 0) | 47 | while ((cascade_irq = cpm2_get_irq()) >= 0) |
47 | generic_handle_irq(cascade_irq); | 48 | generic_handle_irq(cascade_irq); |
48 | 49 | ||
49 | desc->chip->eoi(irq); | 50 | chip->irq_eoi(&desc->irq_data); |
50 | } | 51 | } |
51 | 52 | ||
52 | #endif /* CONFIG_CPM2 */ | 53 | #endif /* CONFIG_CPM2 */ |
@@ -91,7 +92,7 @@ static void __init sbc8560_pic_init(void) | |||
91 | 92 | ||
92 | cpm2_pic_init(np); | 93 | cpm2_pic_init(np); |
93 | of_node_put(np); | 94 | of_node_put(np); |
94 | set_irq_chained_handler(irq, cpm2_cascade); | 95 | irq_set_chained_handler(irq, cpm2_cascade); |
95 | #endif | 96 | #endif |
96 | } | 97 | } |
97 | 98 | ||
diff --git a/arch/powerpc/platforms/85xx/smp.c b/arch/powerpc/platforms/85xx/smp.c index a6b106557be4..d6a93a10c0f5 100644 --- a/arch/powerpc/platforms/85xx/smp.c +++ b/arch/powerpc/platforms/85xx/smp.c | |||
@@ -16,6 +16,7 @@ | |||
16 | #include <linux/delay.h> | 16 | #include <linux/delay.h> |
17 | #include <linux/of.h> | 17 | #include <linux/of.h> |
18 | #include <linux/kexec.h> | 18 | #include <linux/kexec.h> |
19 | #include <linux/highmem.h> | ||
19 | 20 | ||
20 | #include <asm/machdep.h> | 21 | #include <asm/machdep.h> |
21 | #include <asm/pgtable.h> | 22 | #include <asm/pgtable.h> |
@@ -40,7 +41,7 @@ extern void __early_start(void); | |||
40 | #define NUM_BOOT_ENTRY 8 | 41 | #define NUM_BOOT_ENTRY 8 |
41 | #define SIZE_BOOT_ENTRY (NUM_BOOT_ENTRY * sizeof(u32)) | 42 | #define SIZE_BOOT_ENTRY (NUM_BOOT_ENTRY * sizeof(u32)) |
42 | 43 | ||
43 | static void __init | 44 | static int __init |
44 | smp_85xx_kick_cpu(int nr) | 45 | smp_85xx_kick_cpu(int nr) |
45 | { | 46 | { |
46 | unsigned long flags; | 47 | unsigned long flags; |
@@ -59,7 +60,7 @@ smp_85xx_kick_cpu(int nr) | |||
59 | 60 | ||
60 | if (cpu_rel_addr == NULL) { | 61 | if (cpu_rel_addr == NULL) { |
61 | printk(KERN_ERR "No cpu-release-addr for cpu %d\n", nr); | 62 | printk(KERN_ERR "No cpu-release-addr for cpu %d\n", nr); |
62 | return; | 63 | return -ENOENT; |
63 | } | 64 | } |
64 | 65 | ||
65 | /* | 66 | /* |
@@ -79,6 +80,7 @@ smp_85xx_kick_cpu(int nr) | |||
79 | local_irq_save(flags); | 80 | local_irq_save(flags); |
80 | 81 | ||
81 | out_be32(bptr_vaddr + BOOT_ENTRY_PIR, nr); | 82 | out_be32(bptr_vaddr + BOOT_ENTRY_PIR, nr); |
83 | #ifdef CONFIG_PPC32 | ||
82 | out_be32(bptr_vaddr + BOOT_ENTRY_ADDR_LOWER, __pa(__early_start)); | 84 | out_be32(bptr_vaddr + BOOT_ENTRY_ADDR_LOWER, __pa(__early_start)); |
83 | 85 | ||
84 | if (!ioremappable) | 86 | if (!ioremappable) |
@@ -88,6 +90,16 @@ smp_85xx_kick_cpu(int nr) | |||
88 | /* Wait a bit for the CPU to ack. */ | 90 | /* Wait a bit for the CPU to ack. */ |
89 | while ((__secondary_hold_acknowledge != nr) && (++n < 1000)) | 91 | while ((__secondary_hold_acknowledge != nr) && (++n < 1000)) |
90 | mdelay(1); | 92 | mdelay(1); |
93 | #else | ||
94 | smp_generic_kick_cpu(nr); | ||
95 | |||
96 | out_be64((u64 *)(bptr_vaddr + BOOT_ENTRY_ADDR_UPPER), | ||
97 | __pa((u64)*((unsigned long long *) generic_secondary_smp_init))); | ||
98 | |||
99 | if (!ioremappable) | ||
100 | flush_dcache_range((ulong)bptr_vaddr, | ||
101 | (ulong)(bptr_vaddr + SIZE_BOOT_ENTRY)); | ||
102 | #endif | ||
91 | 103 | ||
92 | local_irq_restore(flags); | 104 | local_irq_restore(flags); |
93 | 105 | ||
@@ -95,6 +107,8 @@ smp_85xx_kick_cpu(int nr) | |||
95 | iounmap(bptr_vaddr); | 107 | iounmap(bptr_vaddr); |
96 | 108 | ||
97 | pr_debug("waited %d msecs for CPU #%d.\n", n, nr); | 109 | pr_debug("waited %d msecs for CPU #%d.\n", n, nr); |
110 | |||
111 | return 0; | ||
98 | } | 112 | } |
99 | 113 | ||
100 | static void __init | 114 | static void __init |
@@ -114,19 +128,15 @@ struct smp_ops_t smp_85xx_ops = { | |||
114 | }; | 128 | }; |
115 | 129 | ||
116 | #ifdef CONFIG_KEXEC | 130 | #ifdef CONFIG_KEXEC |
117 | static int kexec_down_cpus = 0; | 131 | atomic_t kexec_down_cpus = ATOMIC_INIT(0); |
118 | 132 | ||
119 | void mpc85xx_smp_kexec_cpu_down(int crash_shutdown, int secondary) | 133 | void mpc85xx_smp_kexec_cpu_down(int crash_shutdown, int secondary) |
120 | { | 134 | { |
121 | mpic_teardown_this_cpu(1); | 135 | local_irq_disable(); |
122 | |||
123 | /* When crashing, this gets called on all CPU's we only | ||
124 | * take down the non-boot cpus */ | ||
125 | if (smp_processor_id() != boot_cpuid) | ||
126 | { | ||
127 | local_irq_disable(); | ||
128 | kexec_down_cpus++; | ||
129 | 136 | ||
137 | if (secondary) { | ||
138 | atomic_inc(&kexec_down_cpus); | ||
139 | /* loop forever */ | ||
130 | while (1); | 140 | while (1); |
131 | } | 141 | } |
132 | } | 142 | } |
@@ -137,16 +147,65 @@ static void mpc85xx_smp_kexec_down(void *arg) | |||
137 | ppc_md.kexec_cpu_down(0,1); | 147 | ppc_md.kexec_cpu_down(0,1); |
138 | } | 148 | } |
139 | 149 | ||
140 | static void mpc85xx_smp_machine_kexec(struct kimage *image) | 150 | static void map_and_flush(unsigned long paddr) |
141 | { | 151 | { |
142 | int timeout = 2000; | 152 | struct page *page = pfn_to_page(paddr >> PAGE_SHIFT); |
153 | unsigned long kaddr = (unsigned long)kmap(page); | ||
154 | |||
155 | flush_dcache_range(kaddr, kaddr + PAGE_SIZE); | ||
156 | kunmap(page); | ||
157 | } | ||
158 | |||
159 | /** | ||
160 | * Before we reset the other cores, we need to flush relevant cache | ||
161 | * out to memory so we don't get anything corrupted, some of these flushes | ||
162 | * are performed out of an overabundance of caution as interrupts are not | ||
163 | * disabled yet and we can switch cores | ||
164 | */ | ||
165 | static void mpc85xx_smp_flush_dcache_kexec(struct kimage *image) | ||
166 | { | ||
167 | kimage_entry_t *ptr, entry; | ||
168 | unsigned long paddr; | ||
143 | int i; | 169 | int i; |
144 | 170 | ||
145 | set_cpus_allowed(current, cpumask_of_cpu(boot_cpuid)); | 171 | if (image->type == KEXEC_TYPE_DEFAULT) { |
172 | /* normal kexec images are stored in temporary pages */ | ||
173 | for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE); | ||
174 | ptr = (entry & IND_INDIRECTION) ? | ||
175 | phys_to_virt(entry & PAGE_MASK) : ptr + 1) { | ||
176 | if (!(entry & IND_DESTINATION)) { | ||
177 | map_and_flush(entry); | ||
178 | } | ||
179 | } | ||
180 | /* flush out last IND_DONE page */ | ||
181 | map_and_flush(entry); | ||
182 | } else { | ||
183 | /* crash type kexec images are copied to the crash region */ | ||
184 | for (i = 0; i < image->nr_segments; i++) { | ||
185 | struct kexec_segment *seg = &image->segment[i]; | ||
186 | for (paddr = seg->mem; paddr < seg->mem + seg->memsz; | ||
187 | paddr += PAGE_SIZE) { | ||
188 | map_and_flush(paddr); | ||
189 | } | ||
190 | } | ||
191 | } | ||
192 | |||
193 | /* also flush the kimage struct to be passed in as well */ | ||
194 | flush_dcache_range((unsigned long)image, | ||
195 | (unsigned long)image + sizeof(*image)); | ||
196 | } | ||
197 | |||
198 | static void mpc85xx_smp_machine_kexec(struct kimage *image) | ||
199 | { | ||
200 | int timeout = INT_MAX; | ||
201 | int i, num_cpus = num_present_cpus(); | ||
202 | |||
203 | mpc85xx_smp_flush_dcache_kexec(image); | ||
146 | 204 | ||
147 | smp_call_function(mpc85xx_smp_kexec_down, NULL, 0); | 205 | if (image->type == KEXEC_TYPE_DEFAULT) |
206 | smp_call_function(mpc85xx_smp_kexec_down, NULL, 0); | ||
148 | 207 | ||
149 | while ( (kexec_down_cpus != (num_online_cpus() - 1)) && | 208 | while ( (atomic_read(&kexec_down_cpus) != (num_cpus - 1)) && |
150 | ( timeout > 0 ) ) | 209 | ( timeout > 0 ) ) |
151 | { | 210 | { |
152 | timeout--; | 211 | timeout--; |
@@ -155,7 +214,7 @@ static void mpc85xx_smp_machine_kexec(struct kimage *image) | |||
155 | if ( !timeout ) | 214 | if ( !timeout ) |
156 | printk(KERN_ERR "Unable to bring down secondary cpu(s)"); | 215 | printk(KERN_ERR "Unable to bring down secondary cpu(s)"); |
157 | 216 | ||
158 | for (i = 0; i < num_present_cpus(); i++) | 217 | for (i = 0; i < num_cpus; i++) |
159 | { | 218 | { |
160 | if ( i == smp_processor_id() ) continue; | 219 | if ( i == smp_processor_id() ) continue; |
161 | mpic_reset_core(i); | 220 | mpic_reset_core(i); |
@@ -176,8 +235,10 @@ void __init mpc85xx_smp_init(void) | |||
176 | smp_85xx_ops.message_pass = smp_mpic_message_pass; | 235 | smp_85xx_ops.message_pass = smp_mpic_message_pass; |
177 | } | 236 | } |
178 | 237 | ||
179 | if (cpu_has_feature(CPU_FTR_DBELL)) | 238 | if (cpu_has_feature(CPU_FTR_DBELL)) { |
180 | smp_85xx_ops.message_pass = doorbell_message_pass; | 239 | smp_85xx_ops.message_pass = smp_muxed_ipi_message_pass; |
240 | smp_85xx_ops.cause_ipi = doorbell_cause_ipi; | ||
241 | } | ||
181 | 242 | ||
182 | BUG_ON(!smp_85xx_ops.message_pass); | 243 | BUG_ON(!smp_85xx_ops.message_pass); |
183 | 244 | ||
diff --git a/arch/powerpc/platforms/85xx/socrates_fpga_pic.c b/arch/powerpc/platforms/85xx/socrates_fpga_pic.c index d48527ffc425..12cb9bb2cc68 100644 --- a/arch/powerpc/platforms/85xx/socrates_fpga_pic.c +++ b/arch/powerpc/platforms/85xx/socrates_fpga_pic.c | |||
@@ -48,8 +48,6 @@ static struct socrates_fpga_irq_info fpga_irqs[SOCRATES_FPGA_NUM_IRQS] = { | |||
48 | [8] = {0, IRQ_TYPE_LEVEL_HIGH}, | 48 | [8] = {0, IRQ_TYPE_LEVEL_HIGH}, |
49 | }; | 49 | }; |
50 | 50 | ||
51 | #define socrates_fpga_irq_to_hw(virq) ((unsigned int)irq_map[virq].hwirq) | ||
52 | |||
53 | static DEFINE_RAW_SPINLOCK(socrates_fpga_pic_lock); | 51 | static DEFINE_RAW_SPINLOCK(socrates_fpga_pic_lock); |
54 | 52 | ||
55 | static void __iomem *socrates_fpga_pic_iobase; | 53 | static void __iomem *socrates_fpga_pic_iobase; |
@@ -93,6 +91,7 @@ static inline unsigned int socrates_fpga_pic_get_irq(unsigned int irq) | |||
93 | 91 | ||
94 | void socrates_fpga_pic_cascade(unsigned int irq, struct irq_desc *desc) | 92 | void socrates_fpga_pic_cascade(unsigned int irq, struct irq_desc *desc) |
95 | { | 93 | { |
94 | struct irq_chip *chip = irq_desc_get_chip(desc); | ||
96 | unsigned int cascade_irq; | 95 | unsigned int cascade_irq; |
97 | 96 | ||
98 | /* | 97 | /* |
@@ -103,18 +102,15 @@ void socrates_fpga_pic_cascade(unsigned int irq, struct irq_desc *desc) | |||
103 | 102 | ||
104 | if (cascade_irq != NO_IRQ) | 103 | if (cascade_irq != NO_IRQ) |
105 | generic_handle_irq(cascade_irq); | 104 | generic_handle_irq(cascade_irq); |
106 | desc->chip->eoi(irq); | 105 | chip->irq_eoi(&desc->irq_data); |
107 | |||
108 | } | 106 | } |
109 | 107 | ||
110 | static void socrates_fpga_pic_ack(unsigned int virq) | 108 | static void socrates_fpga_pic_ack(struct irq_data *d) |
111 | { | 109 | { |
112 | unsigned long flags; | 110 | unsigned long flags; |
113 | unsigned int hwirq, irq_line; | 111 | unsigned int irq_line, hwirq = irqd_to_hwirq(d); |
114 | uint32_t mask; | 112 | uint32_t mask; |
115 | 113 | ||
116 | hwirq = socrates_fpga_irq_to_hw(virq); | ||
117 | |||
118 | irq_line = fpga_irqs[hwirq].irq_line; | 114 | irq_line = fpga_irqs[hwirq].irq_line; |
119 | raw_spin_lock_irqsave(&socrates_fpga_pic_lock, flags); | 115 | raw_spin_lock_irqsave(&socrates_fpga_pic_lock, flags); |
120 | mask = socrates_fpga_pic_read(FPGA_PIC_IRQMASK(irq_line)) | 116 | mask = socrates_fpga_pic_read(FPGA_PIC_IRQMASK(irq_line)) |
@@ -124,15 +120,13 @@ static void socrates_fpga_pic_ack(unsigned int virq) | |||
124 | raw_spin_unlock_irqrestore(&socrates_fpga_pic_lock, flags); | 120 | raw_spin_unlock_irqrestore(&socrates_fpga_pic_lock, flags); |
125 | } | 121 | } |
126 | 122 | ||
127 | static void socrates_fpga_pic_mask(unsigned int virq) | 123 | static void socrates_fpga_pic_mask(struct irq_data *d) |
128 | { | 124 | { |
129 | unsigned long flags; | 125 | unsigned long flags; |
130 | unsigned int hwirq; | 126 | unsigned int hwirq = irqd_to_hwirq(d); |
131 | int irq_line; | 127 | int irq_line; |
132 | u32 mask; | 128 | u32 mask; |
133 | 129 | ||
134 | hwirq = socrates_fpga_irq_to_hw(virq); | ||
135 | |||
136 | irq_line = fpga_irqs[hwirq].irq_line; | 130 | irq_line = fpga_irqs[hwirq].irq_line; |
137 | raw_spin_lock_irqsave(&socrates_fpga_pic_lock, flags); | 131 | raw_spin_lock_irqsave(&socrates_fpga_pic_lock, flags); |
138 | mask = socrates_fpga_pic_read(FPGA_PIC_IRQMASK(irq_line)) | 132 | mask = socrates_fpga_pic_read(FPGA_PIC_IRQMASK(irq_line)) |
@@ -142,15 +136,13 @@ static void socrates_fpga_pic_mask(unsigned int virq) | |||
142 | raw_spin_unlock_irqrestore(&socrates_fpga_pic_lock, flags); | 136 | raw_spin_unlock_irqrestore(&socrates_fpga_pic_lock, flags); |
143 | } | 137 | } |
144 | 138 | ||
145 | static void socrates_fpga_pic_mask_ack(unsigned int virq) | 139 | static void socrates_fpga_pic_mask_ack(struct irq_data *d) |
146 | { | 140 | { |
147 | unsigned long flags; | 141 | unsigned long flags; |
148 | unsigned int hwirq; | 142 | unsigned int hwirq = irqd_to_hwirq(d); |
149 | int irq_line; | 143 | int irq_line; |
150 | u32 mask; | 144 | u32 mask; |
151 | 145 | ||
152 | hwirq = socrates_fpga_irq_to_hw(virq); | ||
153 | |||
154 | irq_line = fpga_irqs[hwirq].irq_line; | 146 | irq_line = fpga_irqs[hwirq].irq_line; |
155 | raw_spin_lock_irqsave(&socrates_fpga_pic_lock, flags); | 147 | raw_spin_lock_irqsave(&socrates_fpga_pic_lock, flags); |
156 | mask = socrates_fpga_pic_read(FPGA_PIC_IRQMASK(irq_line)) | 148 | mask = socrates_fpga_pic_read(FPGA_PIC_IRQMASK(irq_line)) |
@@ -161,15 +153,13 @@ static void socrates_fpga_pic_mask_ack(unsigned int virq) | |||
161 | raw_spin_unlock_irqrestore(&socrates_fpga_pic_lock, flags); | 153 | raw_spin_unlock_irqrestore(&socrates_fpga_pic_lock, flags); |
162 | } | 154 | } |
163 | 155 | ||
164 | static void socrates_fpga_pic_unmask(unsigned int virq) | 156 | static void socrates_fpga_pic_unmask(struct irq_data *d) |
165 | { | 157 | { |
166 | unsigned long flags; | 158 | unsigned long flags; |
167 | unsigned int hwirq; | 159 | unsigned int hwirq = irqd_to_hwirq(d); |
168 | int irq_line; | 160 | int irq_line; |
169 | u32 mask; | 161 | u32 mask; |
170 | 162 | ||
171 | hwirq = socrates_fpga_irq_to_hw(virq); | ||
172 | |||
173 | irq_line = fpga_irqs[hwirq].irq_line; | 163 | irq_line = fpga_irqs[hwirq].irq_line; |
174 | raw_spin_lock_irqsave(&socrates_fpga_pic_lock, flags); | 164 | raw_spin_lock_irqsave(&socrates_fpga_pic_lock, flags); |
175 | mask = socrates_fpga_pic_read(FPGA_PIC_IRQMASK(irq_line)) | 165 | mask = socrates_fpga_pic_read(FPGA_PIC_IRQMASK(irq_line)) |
@@ -179,15 +169,13 @@ static void socrates_fpga_pic_unmask(unsigned int virq) | |||
179 | raw_spin_unlock_irqrestore(&socrates_fpga_pic_lock, flags); | 169 | raw_spin_unlock_irqrestore(&socrates_fpga_pic_lock, flags); |
180 | } | 170 | } |
181 | 171 | ||
182 | static void socrates_fpga_pic_eoi(unsigned int virq) | 172 | static void socrates_fpga_pic_eoi(struct irq_data *d) |
183 | { | 173 | { |
184 | unsigned long flags; | 174 | unsigned long flags; |
185 | unsigned int hwirq; | 175 | unsigned int hwirq = irqd_to_hwirq(d); |
186 | int irq_line; | 176 | int irq_line; |
187 | u32 mask; | 177 | u32 mask; |
188 | 178 | ||
189 | hwirq = socrates_fpga_irq_to_hw(virq); | ||
190 | |||
191 | irq_line = fpga_irqs[hwirq].irq_line; | 179 | irq_line = fpga_irqs[hwirq].irq_line; |
192 | raw_spin_lock_irqsave(&socrates_fpga_pic_lock, flags); | 180 | raw_spin_lock_irqsave(&socrates_fpga_pic_lock, flags); |
193 | mask = socrates_fpga_pic_read(FPGA_PIC_IRQMASK(irq_line)) | 181 | mask = socrates_fpga_pic_read(FPGA_PIC_IRQMASK(irq_line)) |
@@ -197,16 +185,14 @@ static void socrates_fpga_pic_eoi(unsigned int virq) | |||
197 | raw_spin_unlock_irqrestore(&socrates_fpga_pic_lock, flags); | 185 | raw_spin_unlock_irqrestore(&socrates_fpga_pic_lock, flags); |
198 | } | 186 | } |
199 | 187 | ||
200 | static int socrates_fpga_pic_set_type(unsigned int virq, | 188 | static int socrates_fpga_pic_set_type(struct irq_data *d, |
201 | unsigned int flow_type) | 189 | unsigned int flow_type) |
202 | { | 190 | { |
203 | unsigned long flags; | 191 | unsigned long flags; |
204 | unsigned int hwirq; | 192 | unsigned int hwirq = irqd_to_hwirq(d); |
205 | int polarity; | 193 | int polarity; |
206 | u32 mask; | 194 | u32 mask; |
207 | 195 | ||
208 | hwirq = socrates_fpga_irq_to_hw(virq); | ||
209 | |||
210 | if (fpga_irqs[hwirq].type != IRQ_TYPE_NONE) | 196 | if (fpga_irqs[hwirq].type != IRQ_TYPE_NONE) |
211 | return -EINVAL; | 197 | return -EINVAL; |
212 | 198 | ||
@@ -233,21 +219,21 @@ static int socrates_fpga_pic_set_type(unsigned int virq, | |||
233 | 219 | ||
234 | static struct irq_chip socrates_fpga_pic_chip = { | 220 | static struct irq_chip socrates_fpga_pic_chip = { |
235 | .name = "FPGA-PIC", | 221 | .name = "FPGA-PIC", |
236 | .ack = socrates_fpga_pic_ack, | 222 | .irq_ack = socrates_fpga_pic_ack, |
237 | .mask = socrates_fpga_pic_mask, | 223 | .irq_mask = socrates_fpga_pic_mask, |
238 | .mask_ack = socrates_fpga_pic_mask_ack, | 224 | .irq_mask_ack = socrates_fpga_pic_mask_ack, |
239 | .unmask = socrates_fpga_pic_unmask, | 225 | .irq_unmask = socrates_fpga_pic_unmask, |
240 | .eoi = socrates_fpga_pic_eoi, | 226 | .irq_eoi = socrates_fpga_pic_eoi, |
241 | .set_type = socrates_fpga_pic_set_type, | 227 | .irq_set_type = socrates_fpga_pic_set_type, |
242 | }; | 228 | }; |
243 | 229 | ||
244 | static int socrates_fpga_pic_host_map(struct irq_host *h, unsigned int virq, | 230 | static int socrates_fpga_pic_host_map(struct irq_host *h, unsigned int virq, |
245 | irq_hw_number_t hwirq) | 231 | irq_hw_number_t hwirq) |
246 | { | 232 | { |
247 | /* All interrupts are LEVEL sensitive */ | 233 | /* All interrupts are LEVEL sensitive */ |
248 | irq_to_desc(virq)->status |= IRQ_LEVEL; | 234 | irq_set_status_flags(virq, IRQ_LEVEL); |
249 | set_irq_chip_and_handler(virq, &socrates_fpga_pic_chip, | 235 | irq_set_chip_and_handler(virq, &socrates_fpga_pic_chip, |
250 | handle_fasteoi_irq); | 236 | handle_fasteoi_irq); |
251 | 237 | ||
252 | return 0; | 238 | return 0; |
253 | } | 239 | } |
@@ -308,8 +294,8 @@ void socrates_fpga_pic_init(struct device_node *pic) | |||
308 | pr_warning("FPGA PIC: can't get irq%d.\n", i); | 294 | pr_warning("FPGA PIC: can't get irq%d.\n", i); |
309 | continue; | 295 | continue; |
310 | } | 296 | } |
311 | set_irq_chained_handler(socrates_fpga_irqs[i], | 297 | irq_set_chained_handler(socrates_fpga_irqs[i], |
312 | socrates_fpga_pic_cascade); | 298 | socrates_fpga_pic_cascade); |
313 | } | 299 | } |
314 | 300 | ||
315 | socrates_fpga_pic_iobase = of_iomap(pic, 0); | 301 | socrates_fpga_pic_iobase = of_iomap(pic, 0); |
diff --git a/arch/powerpc/platforms/85xx/stx_gp3.c b/arch/powerpc/platforms/85xx/stx_gp3.c index bc33d1859ae7..5387e9f06bdb 100644 --- a/arch/powerpc/platforms/85xx/stx_gp3.c +++ b/arch/powerpc/platforms/85xx/stx_gp3.c | |||
@@ -46,12 +46,13 @@ | |||
46 | 46 | ||
47 | static void cpm2_cascade(unsigned int irq, struct irq_desc *desc) | 47 | static void cpm2_cascade(unsigned int irq, struct irq_desc *desc) |
48 | { | 48 | { |
49 | struct irq_chip *chip = irq_desc_get_chip(desc); | ||
49 | int cascade_irq; | 50 | int cascade_irq; |
50 | 51 | ||
51 | while ((cascade_irq = cpm2_get_irq()) >= 0) | 52 | while ((cascade_irq = cpm2_get_irq()) >= 0) |
52 | generic_handle_irq(cascade_irq); | 53 | generic_handle_irq(cascade_irq); |
53 | 54 | ||
54 | desc->chip->eoi(irq); | 55 | chip->irq_eoi(&desc->irq_data); |
55 | } | 56 | } |
56 | #endif /* CONFIG_CPM2 */ | 57 | #endif /* CONFIG_CPM2 */ |
57 | 58 | ||
@@ -101,7 +102,7 @@ static void __init stx_gp3_pic_init(void) | |||
101 | 102 | ||
102 | cpm2_pic_init(np); | 103 | cpm2_pic_init(np); |
103 | of_node_put(np); | 104 | of_node_put(np); |
104 | set_irq_chained_handler(irq, cpm2_cascade); | 105 | irq_set_chained_handler(irq, cpm2_cascade); |
105 | #endif | 106 | #endif |
106 | } | 107 | } |
107 | 108 | ||
diff --git a/arch/powerpc/platforms/85xx/tqm85xx.c b/arch/powerpc/platforms/85xx/tqm85xx.c index 8f29bbce5360..325de772725a 100644 --- a/arch/powerpc/platforms/85xx/tqm85xx.c +++ b/arch/powerpc/platforms/85xx/tqm85xx.c | |||
@@ -44,12 +44,13 @@ | |||
44 | 44 | ||
45 | static void cpm2_cascade(unsigned int irq, struct irq_desc *desc) | 45 | static void cpm2_cascade(unsigned int irq, struct irq_desc *desc) |
46 | { | 46 | { |
47 | struct irq_chip *chip = irq_desc_get_chip(desc); | ||
47 | int cascade_irq; | 48 | int cascade_irq; |
48 | 49 | ||
49 | while ((cascade_irq = cpm2_get_irq()) >= 0) | 50 | while ((cascade_irq = cpm2_get_irq()) >= 0) |
50 | generic_handle_irq(cascade_irq); | 51 | generic_handle_irq(cascade_irq); |
51 | 52 | ||
52 | desc->chip->eoi(irq); | 53 | chip->irq_eoi(&desc->irq_data); |
53 | } | 54 | } |
54 | #endif /* CONFIG_CPM2 */ | 55 | #endif /* CONFIG_CPM2 */ |
55 | 56 | ||
@@ -99,7 +100,7 @@ static void __init tqm85xx_pic_init(void) | |||
99 | 100 | ||
100 | cpm2_pic_init(np); | 101 | cpm2_pic_init(np); |
101 | of_node_put(np); | 102 | of_node_put(np); |
102 | set_irq_chained_handler(irq, cpm2_cascade); | 103 | irq_set_chained_handler(irq, cpm2_cascade); |
103 | #endif | 104 | #endif |
104 | } | 105 | } |
105 | 106 | ||
@@ -186,21 +187,21 @@ static int __init declare_of_platform_devices(void) | |||
186 | } | 187 | } |
187 | machine_device_initcall(tqm85xx, declare_of_platform_devices); | 188 | machine_device_initcall(tqm85xx, declare_of_platform_devices); |
188 | 189 | ||
190 | static const char *board[] __initdata = { | ||
191 | "tqc,tqm8540", | ||
192 | "tqc,tqm8541", | ||
193 | "tqc,tqm8548", | ||
194 | "tqc,tqm8555", | ||
195 | "tqc,tqm8560", | ||
196 | NULL | ||
197 | }; | ||
198 | |||
189 | /* | 199 | /* |
190 | * Called very early, device-tree isn't unflattened | 200 | * Called very early, device-tree isn't unflattened |
191 | */ | 201 | */ |
192 | static int __init tqm85xx_probe(void) | 202 | static int __init tqm85xx_probe(void) |
193 | { | 203 | { |
194 | unsigned long root = of_get_flat_dt_root(); | 204 | return of_flat_dt_match(of_get_flat_dt_root(), board); |
195 | |||
196 | if ((of_flat_dt_is_compatible(root, "tqc,tqm8540")) || | ||
197 | (of_flat_dt_is_compatible(root, "tqc,tqm8541")) || | ||
198 | (of_flat_dt_is_compatible(root, "tqc,tqm8548")) || | ||
199 | (of_flat_dt_is_compatible(root, "tqc,tqm8555")) || | ||
200 | (of_flat_dt_is_compatible(root, "tqc,tqm8560"))) | ||
201 | return 1; | ||
202 | |||
203 | return 0; | ||
204 | } | 205 | } |
205 | 206 | ||
206 | define_machine(tqm85xx) { | 207 | define_machine(tqm85xx) { |
diff --git a/arch/powerpc/platforms/86xx/gef_pic.c b/arch/powerpc/platforms/86xx/gef_pic.c index 6df9e2561c06..94594e58594c 100644 --- a/arch/powerpc/platforms/86xx/gef_pic.c +++ b/arch/powerpc/platforms/86xx/gef_pic.c | |||
@@ -46,8 +46,6 @@ | |||
46 | #define GEF_PIC_CPU0_MCP_MASK GEF_PIC_MCP_MASK(0) | 46 | #define GEF_PIC_CPU0_MCP_MASK GEF_PIC_MCP_MASK(0) |
47 | #define GEF_PIC_CPU1_MCP_MASK GEF_PIC_MCP_MASK(1) | 47 | #define GEF_PIC_CPU1_MCP_MASK GEF_PIC_MCP_MASK(1) |
48 | 48 | ||
49 | #define gef_irq_to_hw(virq) ((unsigned int)irq_map[virq].hwirq) | ||
50 | |||
51 | 49 | ||
52 | static DEFINE_RAW_SPINLOCK(gef_pic_lock); | 50 | static DEFINE_RAW_SPINLOCK(gef_pic_lock); |
53 | 51 | ||
@@ -95,6 +93,7 @@ static int gef_pic_cascade_irq; | |||
95 | 93 | ||
96 | void gef_pic_cascade(unsigned int irq, struct irq_desc *desc) | 94 | void gef_pic_cascade(unsigned int irq, struct irq_desc *desc) |
97 | { | 95 | { |
96 | struct irq_chip *chip = irq_desc_get_chip(desc); | ||
98 | unsigned int cascade_irq; | 97 | unsigned int cascade_irq; |
99 | 98 | ||
100 | /* | 99 | /* |
@@ -106,18 +105,15 @@ void gef_pic_cascade(unsigned int irq, struct irq_desc *desc) | |||
106 | if (cascade_irq != NO_IRQ) | 105 | if (cascade_irq != NO_IRQ) |
107 | generic_handle_irq(cascade_irq); | 106 | generic_handle_irq(cascade_irq); |
108 | 107 | ||
109 | desc->chip->eoi(irq); | 108 | chip->irq_eoi(&desc->irq_data); |
110 | |||
111 | } | 109 | } |
112 | 110 | ||
113 | static void gef_pic_mask(unsigned int virq) | 111 | static void gef_pic_mask(struct irq_data *d) |
114 | { | 112 | { |
115 | unsigned long flags; | 113 | unsigned long flags; |
116 | unsigned int hwirq; | 114 | unsigned int hwirq = irqd_to_hwirq(d); |
117 | u32 mask; | 115 | u32 mask; |
118 | 116 | ||
119 | hwirq = gef_irq_to_hw(virq); | ||
120 | |||
121 | raw_spin_lock_irqsave(&gef_pic_lock, flags); | 117 | raw_spin_lock_irqsave(&gef_pic_lock, flags); |
122 | mask = in_be32(gef_pic_irq_reg_base + GEF_PIC_INTR_MASK(0)); | 118 | mask = in_be32(gef_pic_irq_reg_base + GEF_PIC_INTR_MASK(0)); |
123 | mask &= ~(1 << hwirq); | 119 | mask &= ~(1 << hwirq); |
@@ -125,22 +121,20 @@ static void gef_pic_mask(unsigned int virq) | |||
125 | raw_spin_unlock_irqrestore(&gef_pic_lock, flags); | 121 | raw_spin_unlock_irqrestore(&gef_pic_lock, flags); |
126 | } | 122 | } |
127 | 123 | ||
128 | static void gef_pic_mask_ack(unsigned int virq) | 124 | static void gef_pic_mask_ack(struct irq_data *d) |
129 | { | 125 | { |
130 | /* Don't think we actually have to do anything to ack an interrupt, | 126 | /* Don't think we actually have to do anything to ack an interrupt, |
131 | * we just need to clear down the devices interrupt and it will go away | 127 | * we just need to clear down the devices interrupt and it will go away |
132 | */ | 128 | */ |
133 | gef_pic_mask(virq); | 129 | gef_pic_mask(d); |
134 | } | 130 | } |
135 | 131 | ||
136 | static void gef_pic_unmask(unsigned int virq) | 132 | static void gef_pic_unmask(struct irq_data *d) |
137 | { | 133 | { |
138 | unsigned long flags; | 134 | unsigned long flags; |
139 | unsigned int hwirq; | 135 | unsigned int hwirq = irqd_to_hwirq(d); |
140 | u32 mask; | 136 | u32 mask; |
141 | 137 | ||
142 | hwirq = gef_irq_to_hw(virq); | ||
143 | |||
144 | raw_spin_lock_irqsave(&gef_pic_lock, flags); | 138 | raw_spin_lock_irqsave(&gef_pic_lock, flags); |
145 | mask = in_be32(gef_pic_irq_reg_base + GEF_PIC_INTR_MASK(0)); | 139 | mask = in_be32(gef_pic_irq_reg_base + GEF_PIC_INTR_MASK(0)); |
146 | mask |= (1 << hwirq); | 140 | mask |= (1 << hwirq); |
@@ -150,9 +144,9 @@ static void gef_pic_unmask(unsigned int virq) | |||
150 | 144 | ||
151 | static struct irq_chip gef_pic_chip = { | 145 | static struct irq_chip gef_pic_chip = { |
152 | .name = "gefp", | 146 | .name = "gefp", |
153 | .mask = gef_pic_mask, | 147 | .irq_mask = gef_pic_mask, |
154 | .mask_ack = gef_pic_mask_ack, | 148 | .irq_mask_ack = gef_pic_mask_ack, |
155 | .unmask = gef_pic_unmask, | 149 | .irq_unmask = gef_pic_unmask, |
156 | }; | 150 | }; |
157 | 151 | ||
158 | 152 | ||
@@ -163,8 +157,8 @@ static int gef_pic_host_map(struct irq_host *h, unsigned int virq, | |||
163 | irq_hw_number_t hwirq) | 157 | irq_hw_number_t hwirq) |
164 | { | 158 | { |
165 | /* All interrupts are LEVEL sensitive */ | 159 | /* All interrupts are LEVEL sensitive */ |
166 | irq_to_desc(virq)->status |= IRQ_LEVEL; | 160 | irq_set_status_flags(virq, IRQ_LEVEL); |
167 | set_irq_chip_and_handler(virq, &gef_pic_chip, handle_level_irq); | 161 | irq_set_chip_and_handler(virq, &gef_pic_chip, handle_level_irq); |
168 | 162 | ||
169 | return 0; | 163 | return 0; |
170 | } | 164 | } |
@@ -225,7 +219,7 @@ void __init gef_pic_init(struct device_node *np) | |||
225 | return; | 219 | return; |
226 | 220 | ||
227 | /* Chain with parent controller */ | 221 | /* Chain with parent controller */ |
228 | set_irq_chained_handler(gef_pic_cascade_irq, gef_pic_cascade); | 222 | irq_set_chained_handler(gef_pic_cascade_irq, gef_pic_cascade); |
229 | } | 223 | } |
230 | 224 | ||
231 | /* | 225 | /* |
diff --git a/arch/powerpc/platforms/86xx/mpc8610_hpcd.c b/arch/powerpc/platforms/86xx/mpc8610_hpcd.c index 018cc67be426..a896511690c2 100644 --- a/arch/powerpc/platforms/86xx/mpc8610_hpcd.c +++ b/arch/powerpc/platforms/86xx/mpc8610_hpcd.c | |||
@@ -66,7 +66,7 @@ static void __init mpc8610_suspend_init(void) | |||
66 | return; | 66 | return; |
67 | } | 67 | } |
68 | 68 | ||
69 | ret = request_irq(irq, mpc8610_sw9_irq, 0, "sw9/wakeup", NULL); | 69 | ret = request_irq(irq, mpc8610_sw9_irq, 0, "sw9:wakeup", NULL); |
70 | if (ret) { | 70 | if (ret) { |
71 | pr_err("%s: can't request pixis event IRQ: %d\n", | 71 | pr_err("%s: can't request pixis event IRQ: %d\n", |
72 | __func__, ret); | 72 | __func__, ret); |
@@ -105,45 +105,77 @@ machine_device_initcall(mpc86xx_hpcd, mpc8610_declare_of_platform_devices); | |||
105 | 105 | ||
106 | #if defined(CONFIG_FB_FSL_DIU) || defined(CONFIG_FB_FSL_DIU_MODULE) | 106 | #if defined(CONFIG_FB_FSL_DIU) || defined(CONFIG_FB_FSL_DIU_MODULE) |
107 | 107 | ||
108 | static u32 get_busfreq(void) | 108 | /* |
109 | { | 109 | * DIU Area Descriptor |
110 | struct device_node *node; | 110 | * |
111 | 111 | * The MPC8610 reference manual shows the bits of the AD register in | |
112 | u32 fs_busfreq = 0; | 112 | * little-endian order, which causes the BLUE_C field to be split into two |
113 | node = of_find_node_by_type(NULL, "cpu"); | 113 | * parts. To simplify the definition of the MAKE_AD() macro, we define the |
114 | if (node) { | 114 | * fields in big-endian order and byte-swap the result. |
115 | unsigned int size; | 115 | * |
116 | const unsigned int *prop = | 116 | * So even though the registers don't look like they're in the |
117 | of_get_property(node, "bus-frequency", &size); | 117 | * same bit positions as they are on the P1022, the same value is written to |
118 | if (prop) | 118 | * the AD register on the MPC8610 and on the P1022. |
119 | fs_busfreq = *prop; | 119 | */ |
120 | of_node_put(node); | 120 | #define AD_BYTE_F 0x10000000 |
121 | }; | 121 | #define AD_ALPHA_C_MASK 0x0E000000 |
122 | return fs_busfreq; | 122 | #define AD_ALPHA_C_SHIFT 25 |
123 | } | 123 | #define AD_BLUE_C_MASK 0x01800000 |
124 | #define AD_BLUE_C_SHIFT 23 | ||
125 | #define AD_GREEN_C_MASK 0x00600000 | ||
126 | #define AD_GREEN_C_SHIFT 21 | ||
127 | #define AD_RED_C_MASK 0x00180000 | ||
128 | #define AD_RED_C_SHIFT 19 | ||
129 | #define AD_PALETTE 0x00040000 | ||
130 | #define AD_PIXEL_S_MASK 0x00030000 | ||
131 | #define AD_PIXEL_S_SHIFT 16 | ||
132 | #define AD_COMP_3_MASK 0x0000F000 | ||
133 | #define AD_COMP_3_SHIFT 12 | ||
134 | #define AD_COMP_2_MASK 0x00000F00 | ||
135 | #define AD_COMP_2_SHIFT 8 | ||
136 | #define AD_COMP_1_MASK 0x000000F0 | ||
137 | #define AD_COMP_1_SHIFT 4 | ||
138 | #define AD_COMP_0_MASK 0x0000000F | ||
139 | #define AD_COMP_0_SHIFT 0 | ||
140 | |||
141 | #define MAKE_AD(alpha, red, blue, green, size, c0, c1, c2, c3) \ | ||
142 | cpu_to_le32(AD_BYTE_F | (alpha << AD_ALPHA_C_SHIFT) | \ | ||
143 | (blue << AD_BLUE_C_SHIFT) | (green << AD_GREEN_C_SHIFT) | \ | ||
144 | (red << AD_RED_C_SHIFT) | (c3 << AD_COMP_3_SHIFT) | \ | ||
145 | (c2 << AD_COMP_2_SHIFT) | (c1 << AD_COMP_1_SHIFT) | \ | ||
146 | (c0 << AD_COMP_0_SHIFT) | (size << AD_PIXEL_S_SHIFT)) | ||
124 | 147 | ||
125 | unsigned int mpc8610hpcd_get_pixel_format(unsigned int bits_per_pixel, | 148 | unsigned int mpc8610hpcd_get_pixel_format(unsigned int bits_per_pixel, |
126 | int monitor_port) | 149 | int monitor_port) |
127 | { | 150 | { |
128 | static const unsigned long pixelformat[][3] = { | 151 | static const unsigned long pixelformat[][3] = { |
129 | {0x88882317, 0x88083218, 0x65052119}, | 152 | { |
130 | {0x88883316, 0x88082219, 0x65053118}, | 153 | MAKE_AD(3, 0, 2, 1, 3, 8, 8, 8, 8), |
154 | MAKE_AD(4, 2, 0, 1, 2, 8, 8, 8, 0), | ||
155 | MAKE_AD(4, 0, 2, 1, 1, 5, 6, 5, 0) | ||
156 | }, | ||
157 | { | ||
158 | MAKE_AD(3, 2, 0, 1, 3, 8, 8, 8, 8), | ||
159 | MAKE_AD(4, 0, 2, 1, 2, 8, 8, 8, 0), | ||
160 | MAKE_AD(4, 2, 0, 1, 1, 5, 6, 5, 0) | ||
161 | }, | ||
131 | }; | 162 | }; |
132 | unsigned int pix_fmt, arch_monitor; | 163 | unsigned int arch_monitor; |
133 | 164 | ||
165 | /* The DVI port is mis-wired on revision 1 of this board. */ | ||
134 | arch_monitor = ((*pixis_arch == 0x01) && (monitor_port == 0))? 0 : 1; | 166 | arch_monitor = ((*pixis_arch == 0x01) && (monitor_port == 0))? 0 : 1; |
135 | /* DVI port for board version 0x01 */ | 167 | |
136 | 168 | switch (bits_per_pixel) { | |
137 | if (bits_per_pixel == 32) | 169 | case 32: |
138 | pix_fmt = pixelformat[arch_monitor][0]; | 170 | return pixelformat[arch_monitor][0]; |
139 | else if (bits_per_pixel == 24) | 171 | case 24: |
140 | pix_fmt = pixelformat[arch_monitor][1]; | 172 | return pixelformat[arch_monitor][1]; |
141 | else if (bits_per_pixel == 16) | 173 | case 16: |
142 | pix_fmt = pixelformat[arch_monitor][2]; | 174 | return pixelformat[arch_monitor][2]; |
143 | else | 175 | default: |
144 | pix_fmt = pixelformat[1][0]; | 176 | pr_err("fsl-diu: unsupported pixel depth %u\n", bits_per_pixel); |
145 | 177 | return 0; | |
146 | return pix_fmt; | 178 | } |
147 | } | 179 | } |
148 | 180 | ||
149 | void mpc8610hpcd_set_gamma_table(int monitor_port, char *gamma_table_base) | 181 | void mpc8610hpcd_set_gamma_table(int monitor_port, char *gamma_table_base) |
@@ -190,8 +222,7 @@ void mpc8610hpcd_set_pixel_clock(unsigned int pixclock) | |||
190 | } | 222 | } |
191 | 223 | ||
192 | /* Pixel Clock configuration */ | 224 | /* Pixel Clock configuration */ |
193 | pr_debug("DIU: Bus Frequency = %d\n", get_busfreq()); | 225 | speed_ccb = fsl_get_sys_freq(); |
194 | speed_ccb = get_busfreq(); | ||
195 | 226 | ||
196 | /* Calculate the pixel clock with the smallest error */ | 227 | /* Calculate the pixel clock with the smallest error */ |
197 | /* calculate the following in steps to avoid overflow */ | 228 | /* calculate the following in steps to avoid overflow */ |
diff --git a/arch/powerpc/platforms/86xx/mpc86xx_smp.c b/arch/powerpc/platforms/86xx/mpc86xx_smp.c index eacea0e3fcc8..af09baee22cb 100644 --- a/arch/powerpc/platforms/86xx/mpc86xx_smp.c +++ b/arch/powerpc/platforms/86xx/mpc86xx_smp.c | |||
@@ -56,7 +56,7 @@ smp_86xx_release_core(int nr) | |||
56 | } | 56 | } |
57 | 57 | ||
58 | 58 | ||
59 | static void __init | 59 | static int __init |
60 | smp_86xx_kick_cpu(int nr) | 60 | smp_86xx_kick_cpu(int nr) |
61 | { | 61 | { |
62 | unsigned int save_vector; | 62 | unsigned int save_vector; |
@@ -65,7 +65,7 @@ smp_86xx_kick_cpu(int nr) | |||
65 | unsigned int *vector = (unsigned int *)(KERNELBASE + 0x100); | 65 | unsigned int *vector = (unsigned int *)(KERNELBASE + 0x100); |
66 | 66 | ||
67 | if (nr < 0 || nr >= NR_CPUS) | 67 | if (nr < 0 || nr >= NR_CPUS) |
68 | return; | 68 | return -ENOENT; |
69 | 69 | ||
70 | pr_debug("smp_86xx_kick_cpu: kick CPU #%d\n", nr); | 70 | pr_debug("smp_86xx_kick_cpu: kick CPU #%d\n", nr); |
71 | 71 | ||
@@ -92,6 +92,8 @@ smp_86xx_kick_cpu(int nr) | |||
92 | local_irq_restore(flags); | 92 | local_irq_restore(flags); |
93 | 93 | ||
94 | pr_debug("wait CPU #%d for %d msecs.\n", nr, n); | 94 | pr_debug("wait CPU #%d for %d msecs.\n", nr, n); |
95 | |||
96 | return 0; | ||
95 | } | 97 | } |
96 | 98 | ||
97 | 99 | ||
diff --git a/arch/powerpc/platforms/86xx/pic.c b/arch/powerpc/platforms/86xx/pic.c index 668275d9e668..8ef8960abda6 100644 --- a/arch/powerpc/platforms/86xx/pic.c +++ b/arch/powerpc/platforms/86xx/pic.c | |||
@@ -19,10 +19,13 @@ | |||
19 | #ifdef CONFIG_PPC_I8259 | 19 | #ifdef CONFIG_PPC_I8259 |
20 | static void mpc86xx_8259_cascade(unsigned int irq, struct irq_desc *desc) | 20 | static void mpc86xx_8259_cascade(unsigned int irq, struct irq_desc *desc) |
21 | { | 21 | { |
22 | struct irq_chip *chip = irq_desc_get_chip(desc); | ||
22 | unsigned int cascade_irq = i8259_irq(); | 23 | unsigned int cascade_irq = i8259_irq(); |
24 | |||
23 | if (cascade_irq != NO_IRQ) | 25 | if (cascade_irq != NO_IRQ) |
24 | generic_handle_irq(cascade_irq); | 26 | generic_handle_irq(cascade_irq); |
25 | desc->chip->eoi(irq); | 27 | |
28 | chip->irq_eoi(&desc->irq_data); | ||
26 | } | 29 | } |
27 | #endif /* CONFIG_PPC_I8259 */ | 30 | #endif /* CONFIG_PPC_I8259 */ |
28 | 31 | ||
@@ -74,6 +77,6 @@ void __init mpc86xx_init_irq(void) | |||
74 | i8259_init(cascade_node, 0); | 77 | i8259_init(cascade_node, 0); |
75 | of_node_put(cascade_node); | 78 | of_node_put(cascade_node); |
76 | 79 | ||
77 | set_irq_chained_handler(cascade_irq, mpc86xx_8259_cascade); | 80 | irq_set_chained_handler(cascade_irq, mpc86xx_8259_cascade); |
78 | #endif | 81 | #endif |
79 | } | 82 | } |
diff --git a/arch/powerpc/platforms/8xx/Kconfig b/arch/powerpc/platforms/8xx/Kconfig index dd35ce081cff..ee56a9ea6a79 100644 --- a/arch/powerpc/platforms/8xx/Kconfig +++ b/arch/powerpc/platforms/8xx/Kconfig | |||
@@ -49,12 +49,6 @@ config PPC_ADDER875 | |||
49 | This enables support for the Analogue & Micro Adder 875 | 49 | This enables support for the Analogue & Micro Adder 875 |
50 | board. | 50 | board. |
51 | 51 | ||
52 | config PPC_MGSUVD | ||
53 | bool "MGSUVD" | ||
54 | select CPM1 | ||
55 | help | ||
56 | This enables support for the Keymile MGSUVD board. | ||
57 | |||
58 | config TQM8XX | 52 | config TQM8XX |
59 | bool "TQM8XX" | 53 | bool "TQM8XX" |
60 | select CPM1 | 54 | select CPM1 |
diff --git a/arch/powerpc/platforms/8xx/Makefile b/arch/powerpc/platforms/8xx/Makefile index a491fe6b94fc..76a81c3350a8 100644 --- a/arch/powerpc/platforms/8xx/Makefile +++ b/arch/powerpc/platforms/8xx/Makefile | |||
@@ -6,5 +6,4 @@ obj-$(CONFIG_MPC885ADS) += mpc885ads_setup.o | |||
6 | obj-$(CONFIG_MPC86XADS) += mpc86xads_setup.o | 6 | obj-$(CONFIG_MPC86XADS) += mpc86xads_setup.o |
7 | obj-$(CONFIG_PPC_EP88XC) += ep88xc.o | 7 | obj-$(CONFIG_PPC_EP88XC) += ep88xc.o |
8 | obj-$(CONFIG_PPC_ADDER875) += adder875.o | 8 | obj-$(CONFIG_PPC_ADDER875) += adder875.o |
9 | obj-$(CONFIG_PPC_MGSUVD) += mgsuvd.o | ||
10 | obj-$(CONFIG_TQM8XX) += tqm8xx_setup.o | 9 | obj-$(CONFIG_TQM8XX) += tqm8xx_setup.o |
diff --git a/arch/powerpc/platforms/8xx/m8xx_setup.c b/arch/powerpc/platforms/8xx/m8xx_setup.c index 60168c1f98fe..1e121088826f 100644 --- a/arch/powerpc/platforms/8xx/m8xx_setup.c +++ b/arch/powerpc/platforms/8xx/m8xx_setup.c | |||
@@ -150,7 +150,7 @@ void __init mpc8xx_calibrate_decr(void) | |||
150 | */ | 150 | */ |
151 | cpu = of_find_node_by_type(NULL, "cpu"); | 151 | cpu = of_find_node_by_type(NULL, "cpu"); |
152 | virq= irq_of_parse_and_map(cpu, 0); | 152 | virq= irq_of_parse_and_map(cpu, 0); |
153 | irq = irq_map[virq].hwirq; | 153 | irq = virq_to_hw(virq); |
154 | 154 | ||
155 | sys_tmr2 = immr_map(im_sit); | 155 | sys_tmr2 = immr_map(im_sit); |
156 | out_be16(&sys_tmr2->sit_tbscr, ((1 << (7 - (irq/2))) << 8) | | 156 | out_be16(&sys_tmr2->sit_tbscr, ((1 << (7 - (irq/2))) << 8) | |
@@ -218,15 +218,20 @@ void mpc8xx_restart(char *cmd) | |||
218 | 218 | ||
219 | static void cpm_cascade(unsigned int irq, struct irq_desc *desc) | 219 | static void cpm_cascade(unsigned int irq, struct irq_desc *desc) |
220 | { | 220 | { |
221 | struct irq_chip *chip; | ||
221 | int cascade_irq; | 222 | int cascade_irq; |
222 | 223 | ||
223 | if ((cascade_irq = cpm_get_irq()) >= 0) { | 224 | if ((cascade_irq = cpm_get_irq()) >= 0) { |
224 | struct irq_desc *cdesc = irq_to_desc(cascade_irq); | 225 | struct irq_desc *cdesc = irq_to_desc(cascade_irq); |
225 | 226 | ||
226 | generic_handle_irq(cascade_irq); | 227 | generic_handle_irq(cascade_irq); |
227 | cdesc->chip->eoi(cascade_irq); | 228 | |
229 | chip = irq_desc_get_chip(cdesc); | ||
230 | chip->irq_eoi(&cdesc->irq_data); | ||
228 | } | 231 | } |
229 | desc->chip->eoi(irq); | 232 | |
233 | chip = irq_desc_get_chip(desc); | ||
234 | chip->irq_eoi(&desc->irq_data); | ||
230 | } | 235 | } |
231 | 236 | ||
232 | /* Initialize the internal interrupt controllers. The number of | 237 | /* Initialize the internal interrupt controllers. The number of |
@@ -246,5 +251,5 @@ void __init mpc8xx_pics_init(void) | |||
246 | 251 | ||
247 | irq = cpm_pic_init(); | 252 | irq = cpm_pic_init(); |
248 | if (irq != NO_IRQ) | 253 | if (irq != NO_IRQ) |
249 | set_irq_chained_handler(irq, cpm_cascade); | 254 | irq_set_chained_handler(irq, cpm_cascade); |
250 | } | 255 | } |
diff --git a/arch/powerpc/platforms/8xx/mgsuvd.c b/arch/powerpc/platforms/8xx/mgsuvd.c deleted file mode 100644 index ca3cb071772c..000000000000 --- a/arch/powerpc/platforms/8xx/mgsuvd.c +++ /dev/null | |||
@@ -1,92 +0,0 @@ | |||
1 | /* | ||
2 | * | ||
3 | * Platform setup for the Keymile mgsuvd board | ||
4 | * | ||
5 | * Heiko Schocher <hs@denx.de> | ||
6 | * | ||
7 | * Copyright 2008 DENX Software Engineering GmbH | ||
8 | * | ||
9 | * This file is licensed under the terms of the GNU General Public License | ||
10 | * version 2. This program is licensed "as is" without any warranty of any | ||
11 | * kind, whether express or implied. | ||
12 | */ | ||
13 | |||
14 | #include <linux/ioport.h> | ||
15 | #include <linux/of_platform.h> | ||
16 | |||
17 | #include <asm/io.h> | ||
18 | #include <asm/machdep.h> | ||
19 | #include <asm/processor.h> | ||
20 | #include <asm/cpm1.h> | ||
21 | #include <asm/prom.h> | ||
22 | #include <asm/fs_pd.h> | ||
23 | |||
24 | #include "mpc8xx.h" | ||
25 | |||
26 | struct cpm_pin { | ||
27 | int port, pin, flags; | ||
28 | }; | ||
29 | |||
30 | static __initdata struct cpm_pin mgsuvd_pins[] = { | ||
31 | /* SMC1 */ | ||
32 | {CPM_PORTB, 24, CPM_PIN_INPUT}, /* RX */ | ||
33 | {CPM_PORTB, 25, CPM_PIN_INPUT | CPM_PIN_SECONDARY}, /* TX */ | ||
34 | |||
35 | /* SCC3 */ | ||
36 | {CPM_PORTA, 10, CPM_PIN_INPUT}, | ||
37 | {CPM_PORTA, 11, CPM_PIN_INPUT}, | ||
38 | {CPM_PORTA, 3, CPM_PIN_INPUT}, | ||
39 | {CPM_PORTA, 2, CPM_PIN_INPUT}, | ||
40 | {CPM_PORTC, 13, CPM_PIN_INPUT}, | ||
41 | }; | ||
42 | |||
43 | static void __init init_ioports(void) | ||
44 | { | ||
45 | int i; | ||
46 | |||
47 | for (i = 0; i < ARRAY_SIZE(mgsuvd_pins); i++) { | ||
48 | struct cpm_pin *pin = &mgsuvd_pins[i]; | ||
49 | cpm1_set_pin(pin->port, pin->pin, pin->flags); | ||
50 | } | ||
51 | |||
52 | setbits16(&mpc8xx_immr->im_ioport.iop_pcso, 0x300); | ||
53 | cpm1_clk_setup(CPM_CLK_SCC3, CPM_CLK5, CPM_CLK_RX); | ||
54 | cpm1_clk_setup(CPM_CLK_SCC3, CPM_CLK6, CPM_CLK_TX); | ||
55 | cpm1_clk_setup(CPM_CLK_SMC1, CPM_BRG1, CPM_CLK_RTX); | ||
56 | } | ||
57 | |||
58 | static void __init mgsuvd_setup_arch(void) | ||
59 | { | ||
60 | cpm_reset(); | ||
61 | init_ioports(); | ||
62 | } | ||
63 | |||
64 | static __initdata struct of_device_id of_bus_ids[] = { | ||
65 | { .compatible = "simple-bus" }, | ||
66 | {}, | ||
67 | }; | ||
68 | |||
69 | static int __init declare_of_platform_devices(void) | ||
70 | { | ||
71 | of_platform_bus_probe(NULL, of_bus_ids, NULL); | ||
72 | return 0; | ||
73 | } | ||
74 | machine_device_initcall(mgsuvd, declare_of_platform_devices); | ||
75 | |||
76 | static int __init mgsuvd_probe(void) | ||
77 | { | ||
78 | unsigned long root = of_get_flat_dt_root(); | ||
79 | return of_flat_dt_is_compatible(root, "keymile,mgsuvd"); | ||
80 | } | ||
81 | |||
82 | define_machine(mgsuvd) { | ||
83 | .name = "MGSUVD", | ||
84 | .probe = mgsuvd_probe, | ||
85 | .setup_arch = mgsuvd_setup_arch, | ||
86 | .init_IRQ = mpc8xx_pics_init, | ||
87 | .get_irq = mpc8xx_get_irq, | ||
88 | .restart = mpc8xx_restart, | ||
89 | .calibrate_decr = mpc8xx_calibrate_decr, | ||
90 | .set_rtc_time = mpc8xx_set_rtc_time, | ||
91 | .get_rtc_time = mpc8xx_get_rtc_time, | ||
92 | }; | ||
diff --git a/arch/powerpc/platforms/Kconfig b/arch/powerpc/platforms/Kconfig index 81c9208025fa..f970ca2b180c 100644 --- a/arch/powerpc/platforms/Kconfig +++ b/arch/powerpc/platforms/Kconfig | |||
@@ -20,6 +20,17 @@ source "arch/powerpc/platforms/embedded6xx/Kconfig" | |||
20 | source "arch/powerpc/platforms/44x/Kconfig" | 20 | source "arch/powerpc/platforms/44x/Kconfig" |
21 | source "arch/powerpc/platforms/40x/Kconfig" | 21 | source "arch/powerpc/platforms/40x/Kconfig" |
22 | source "arch/powerpc/platforms/amigaone/Kconfig" | 22 | source "arch/powerpc/platforms/amigaone/Kconfig" |
23 | source "arch/powerpc/platforms/wsp/Kconfig" | ||
24 | |||
25 | config KVM_GUEST | ||
26 | bool "KVM Guest support" | ||
27 | default y | ||
28 | ---help--- | ||
29 | This option enables various optimizations for running under the KVM | ||
30 | hypervisor. Overhead for the kernel when not running inside KVM should | ||
31 | be minimal. | ||
32 | |||
33 | In case of doubt, say Y | ||
23 | 34 | ||
24 | config PPC_NATIVE | 35 | config PPC_NATIVE |
25 | bool | 36 | bool |
@@ -36,7 +47,7 @@ config PPC_OF_BOOT_TRAMPOLINE | |||
36 | help | 47 | help |
37 | Support from booting from Open Firmware or yaboot using an | 48 | Support from booting from Open Firmware or yaboot using an |
38 | Open Firmware client interface. This enables the kernel to | 49 | Open Firmware client interface. This enables the kernel to |
39 | communicate with open firmware to retrieve system informations | 50 | communicate with open firmware to retrieve system information |
40 | such as the device tree. | 51 | such as the device tree. |
41 | 52 | ||
42 | In case of doubt, say Y | 53 | In case of doubt, say Y |
@@ -46,16 +57,19 @@ config UDBG_RTAS_CONSOLE | |||
46 | depends on PPC_RTAS | 57 | depends on PPC_RTAS |
47 | default n | 58 | default n |
48 | 59 | ||
60 | config PPC_SMP_MUXED_IPI | ||
61 | bool | ||
62 | help | ||
63 | Select this opton if your platform supports SMP and your | ||
64 | interrupt controller provides less than 4 interrupts to each | ||
65 | cpu. This will enable the generic code to multiplex the 4 | ||
66 | messages on to one ipi. | ||
67 | |||
49 | config PPC_UDBG_BEAT | 68 | config PPC_UDBG_BEAT |
50 | bool "BEAT based debug console" | 69 | bool "BEAT based debug console" |
51 | depends on PPC_CELLEB | 70 | depends on PPC_CELLEB |
52 | default n | 71 | default n |
53 | 72 | ||
54 | config XICS | ||
55 | depends on PPC_PSERIES | ||
56 | bool | ||
57 | default y | ||
58 | |||
59 | config IPIC | 73 | config IPIC |
60 | bool | 74 | bool |
61 | default n | 75 | default n |
@@ -137,14 +151,27 @@ config PPC_970_NAP | |||
137 | bool | 151 | bool |
138 | default n | 152 | default n |
139 | 153 | ||
154 | config PPC_P7_NAP | ||
155 | bool | ||
156 | default n | ||
157 | |||
140 | config PPC_INDIRECT_IO | 158 | config PPC_INDIRECT_IO |
141 | bool | 159 | bool |
142 | select GENERIC_IOMAP | 160 | select GENERIC_IOMAP |
143 | default n | 161 | |
162 | config PPC_INDIRECT_PIO | ||
163 | bool | ||
164 | select PPC_INDIRECT_IO | ||
165 | |||
166 | config PPC_INDIRECT_MMIO | ||
167 | bool | ||
168 | select PPC_INDIRECT_IO | ||
169 | |||
170 | config PPC_IO_WORKAROUNDS | ||
171 | bool | ||
144 | 172 | ||
145 | config GENERIC_IOMAP | 173 | config GENERIC_IOMAP |
146 | bool | 174 | bool |
147 | default n | ||
148 | 175 | ||
149 | source "drivers/cpufreq/Kconfig" | 176 | source "drivers/cpufreq/Kconfig" |
150 | 177 | ||
@@ -303,13 +330,14 @@ config OF_RTC | |||
303 | source "arch/powerpc/sysdev/bestcomm/Kconfig" | 330 | source "arch/powerpc/sysdev/bestcomm/Kconfig" |
304 | 331 | ||
305 | config MPC8xxx_GPIO | 332 | config MPC8xxx_GPIO |
306 | bool "MPC8xxx GPIO support" | 333 | bool "MPC512x/MPC8xxx GPIO support" |
307 | depends on PPC_MPC831x || PPC_MPC834x || PPC_MPC837x || FSL_SOC_BOOKE || PPC_86xx | 334 | depends on PPC_MPC512x || PPC_MPC831x || PPC_MPC834x || PPC_MPC837x || \ |
335 | FSL_SOC_BOOKE || PPC_86xx | ||
308 | select GENERIC_GPIO | 336 | select GENERIC_GPIO |
309 | select ARCH_REQUIRE_GPIOLIB | 337 | select ARCH_REQUIRE_GPIOLIB |
310 | help | 338 | help |
311 | Say Y here if you're going to use hardware that connects to the | 339 | Say Y here if you're going to use hardware that connects to the |
312 | MPC831x/834x/837x/8572/8610 GPIOs. | 340 | MPC512x/831x/834x/837x/8572/8610 GPIOs. |
313 | 341 | ||
314 | config SIMPLE_GPIO | 342 | config SIMPLE_GPIO |
315 | bool "Support for simple, memory-mapped GPIO controllers" | 343 | bool "Support for simple, memory-mapped GPIO controllers" |
diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype index d361f8119b1e..2165b65876f9 100644 --- a/arch/powerpc/platforms/Kconfig.cputype +++ b/arch/powerpc/platforms/Kconfig.cputype | |||
@@ -73,6 +73,7 @@ config PPC_BOOK3S_64 | |||
73 | config PPC_BOOK3E_64 | 73 | config PPC_BOOK3E_64 |
74 | bool "Embedded processors" | 74 | bool "Embedded processors" |
75 | select PPC_FPU # Make it a choice ? | 75 | select PPC_FPU # Make it a choice ? |
76 | select PPC_SMP_MUXED_IPI | ||
76 | 77 | ||
77 | endchoice | 78 | endchoice |
78 | 79 | ||
@@ -107,6 +108,10 @@ config POWER4 | |||
107 | depends on PPC64 && PPC_BOOK3S | 108 | depends on PPC64 && PPC_BOOK3S |
108 | def_bool y | 109 | def_bool y |
109 | 110 | ||
111 | config PPC_A2 | ||
112 | bool | ||
113 | depends on PPC_BOOK3E_64 | ||
114 | |||
110 | config TUNE_CELL | 115 | config TUNE_CELL |
111 | bool "Optimize for Cell Broadband Engine" | 116 | bool "Optimize for Cell Broadband Engine" |
112 | depends on PPC64 && PPC_BOOK3S | 117 | depends on PPC64 && PPC_BOOK3S |
@@ -125,6 +130,7 @@ config 8xx | |||
125 | 130 | ||
126 | config E500 | 131 | config E500 |
127 | select FSL_EMB_PERFMON | 132 | select FSL_EMB_PERFMON |
133 | select PPC_FSL_BOOK3E | ||
128 | bool | 134 | bool |
129 | 135 | ||
130 | config PPC_E500MC | 136 | config PPC_E500MC |
@@ -166,9 +172,15 @@ config BOOKE | |||
166 | 172 | ||
167 | config FSL_BOOKE | 173 | config FSL_BOOKE |
168 | bool | 174 | bool |
169 | depends on E200 || E500 | 175 | depends on (E200 || E500) && PPC32 |
170 | default y | 176 | default y |
171 | 177 | ||
178 | # this is for common code between PPC32 & PPC64 FSL BOOKE | ||
179 | config PPC_FSL_BOOK3E | ||
180 | bool | ||
181 | select FSL_EMB_PERFMON | ||
182 | select PPC_SMP_MUXED_IPI | ||
183 | default y if FSL_BOOKE | ||
172 | 184 | ||
173 | config PTE_64BIT | 185 | config PTE_64BIT |
174 | bool | 186 | bool |
@@ -220,6 +232,24 @@ config VSX | |||
220 | 232 | ||
221 | If in doubt, say Y here. | 233 | If in doubt, say Y here. |
222 | 234 | ||
235 | config PPC_ICSWX | ||
236 | bool "Support for PowerPC icswx coprocessor instruction" | ||
237 | depends on POWER4 | ||
238 | default n | ||
239 | ---help--- | ||
240 | |||
241 | This option enables kernel support for the PowerPC Initiate | ||
242 | Coprocessor Store Word (icswx) coprocessor instruction on POWER7 | ||
243 | or newer processors. | ||
244 | |||
245 | This option is only useful if you have a processor that supports | ||
246 | the icswx coprocessor instruction. It does not have any effect | ||
247 | on processors without the icswx coprocessor instruction. | ||
248 | |||
249 | This option slightly increases kernel memory usage. | ||
250 | |||
251 | If in doubt, say N here. | ||
252 | |||
223 | config SPE | 253 | config SPE |
224 | bool "SPE Support" | 254 | bool "SPE Support" |
225 | depends on E200 || (E500 && !PPC_E500MC) | 255 | depends on E200 || (E500 && !PPC_E500MC) |
diff --git a/arch/powerpc/platforms/Makefile b/arch/powerpc/platforms/Makefile index fdb9f0b0d7a8..73e2116cfeed 100644 --- a/arch/powerpc/platforms/Makefile +++ b/arch/powerpc/platforms/Makefile | |||
@@ -22,3 +22,4 @@ obj-$(CONFIG_PPC_CELL) += cell/ | |||
22 | obj-$(CONFIG_PPC_PS3) += ps3/ | 22 | obj-$(CONFIG_PPC_PS3) += ps3/ |
23 | obj-$(CONFIG_EMBEDDED6xx) += embedded6xx/ | 23 | obj-$(CONFIG_EMBEDDED6xx) += embedded6xx/ |
24 | obj-$(CONFIG_AMIGAONE) += amigaone/ | 24 | obj-$(CONFIG_AMIGAONE) += amigaone/ |
25 | obj-$(CONFIG_PPC_WSP) += wsp/ | ||
diff --git a/arch/powerpc/platforms/cell/Kconfig b/arch/powerpc/platforms/cell/Kconfig index 48cd7d2e1b75..67d5009b4e86 100644 --- a/arch/powerpc/platforms/cell/Kconfig +++ b/arch/powerpc/platforms/cell/Kconfig | |||
@@ -6,14 +6,17 @@ config PPC_CELL_COMMON | |||
6 | bool | 6 | bool |
7 | select PPC_CELL | 7 | select PPC_CELL |
8 | select PPC_DCR_MMIO | 8 | select PPC_DCR_MMIO |
9 | select PPC_INDIRECT_IO | 9 | select PPC_INDIRECT_PIO |
10 | select PPC_INDIRECT_MMIO | ||
10 | select PPC_NATIVE | 11 | select PPC_NATIVE |
11 | select PPC_RTAS | 12 | select PPC_RTAS |
13 | select IRQ_EDGE_EOI_HANDLER | ||
12 | 14 | ||
13 | config PPC_CELL_NATIVE | 15 | config PPC_CELL_NATIVE |
14 | bool | 16 | bool |
15 | select PPC_CELL_COMMON | 17 | select PPC_CELL_COMMON |
16 | select MPIC | 18 | select MPIC |
19 | select PPC_IO_WORKAROUNDS | ||
17 | select IBM_NEW_EMAC_EMAC4 | 20 | select IBM_NEW_EMAC_EMAC4 |
18 | select IBM_NEW_EMAC_RGMII | 21 | select IBM_NEW_EMAC_RGMII |
19 | select IBM_NEW_EMAC_ZMII #test only | 22 | select IBM_NEW_EMAC_ZMII #test only |
diff --git a/arch/powerpc/platforms/cell/Makefile b/arch/powerpc/platforms/cell/Makefile index 83fafe922641..a4a89350bcfc 100644 --- a/arch/powerpc/platforms/cell/Makefile +++ b/arch/powerpc/platforms/cell/Makefile | |||
@@ -1,7 +1,7 @@ | |||
1 | obj-$(CONFIG_PPC_CELL_COMMON) += cbe_regs.o interrupt.o pervasive.o | 1 | obj-$(CONFIG_PPC_CELL_COMMON) += cbe_regs.o interrupt.o pervasive.o |
2 | 2 | ||
3 | obj-$(CONFIG_PPC_CELL_NATIVE) += iommu.o setup.o spider-pic.o \ | 3 | obj-$(CONFIG_PPC_CELL_NATIVE) += iommu.o setup.o spider-pic.o \ |
4 | pmu.o io-workarounds.o spider-pci.o | 4 | pmu.o spider-pci.o |
5 | obj-$(CONFIG_CBE_RAS) += ras.o | 5 | obj-$(CONFIG_CBE_RAS) += ras.o |
6 | 6 | ||
7 | obj-$(CONFIG_CBE_THERM) += cbe_thermal.o | 7 | obj-$(CONFIG_CBE_THERM) += cbe_thermal.o |
@@ -39,11 +39,10 @@ obj-y += celleb_setup.o \ | |||
39 | celleb_pci.o celleb_scc_epci.o \ | 39 | celleb_pci.o celleb_scc_epci.o \ |
40 | celleb_scc_pciex.o \ | 40 | celleb_scc_pciex.o \ |
41 | celleb_scc_uhc.o \ | 41 | celleb_scc_uhc.o \ |
42 | io-workarounds.o spider-pci.o \ | 42 | spider-pci.o beat.o beat_htab.o \ |
43 | beat.o beat_htab.o beat_hvCall.o \ | 43 | beat_hvCall.o beat_interrupt.o \ |
44 | beat_interrupt.o beat_iommu.o | 44 | beat_iommu.o |
45 | 45 | ||
46 | obj-$(CONFIG_SMP) += beat_smp.o | ||
47 | obj-$(CONFIG_PPC_UDBG_BEAT) += beat_udbg.o | 46 | obj-$(CONFIG_PPC_UDBG_BEAT) += beat_udbg.o |
48 | obj-$(CONFIG_SERIAL_TXX9) += celleb_scc_sio.o | 47 | obj-$(CONFIG_SERIAL_TXX9) += celleb_scc_sio.o |
49 | obj-$(CONFIG_SPU_BASE) += beat_spu_priv1.o | 48 | obj-$(CONFIG_SPU_BASE) += beat_spu_priv1.o |
diff --git a/arch/powerpc/platforms/cell/axon_msi.c b/arch/powerpc/platforms/cell/axon_msi.c index 97085530aa63..ac06903e136a 100644 --- a/arch/powerpc/platforms/cell/axon_msi.c +++ b/arch/powerpc/platforms/cell/axon_msi.c | |||
@@ -93,7 +93,8 @@ static void msic_dcr_write(struct axon_msic *msic, unsigned int dcr_n, u32 val) | |||
93 | 93 | ||
94 | static void axon_msi_cascade(unsigned int irq, struct irq_desc *desc) | 94 | static void axon_msi_cascade(unsigned int irq, struct irq_desc *desc) |
95 | { | 95 | { |
96 | struct axon_msic *msic = get_irq_data(irq); | 96 | struct irq_chip *chip = irq_desc_get_chip(desc); |
97 | struct axon_msic *msic = irq_get_handler_data(irq); | ||
97 | u32 write_offset, msi; | 98 | u32 write_offset, msi; |
98 | int idx; | 99 | int idx; |
99 | int retry = 0; | 100 | int retry = 0; |
@@ -112,7 +113,7 @@ static void axon_msi_cascade(unsigned int irq, struct irq_desc *desc) | |||
112 | pr_devel("axon_msi: woff %x roff %x msi %x\n", | 113 | pr_devel("axon_msi: woff %x roff %x msi %x\n", |
113 | write_offset, msic->read_offset, msi); | 114 | write_offset, msic->read_offset, msi); |
114 | 115 | ||
115 | if (msi < NR_IRQS && irq_map[msi].host == msic->irq_host) { | 116 | if (msi < NR_IRQS && irq_get_chip_data(msi) == msic) { |
116 | generic_handle_irq(msi); | 117 | generic_handle_irq(msi); |
117 | msic->fifo_virt[idx] = cpu_to_le32(0xffffffff); | 118 | msic->fifo_virt[idx] = cpu_to_le32(0xffffffff); |
118 | } else { | 119 | } else { |
@@ -145,7 +146,7 @@ static void axon_msi_cascade(unsigned int irq, struct irq_desc *desc) | |||
145 | msic->read_offset &= MSIC_FIFO_SIZE_MASK; | 146 | msic->read_offset &= MSIC_FIFO_SIZE_MASK; |
146 | } | 147 | } |
147 | 148 | ||
148 | desc->chip->eoi(irq); | 149 | chip->irq_eoi(&desc->irq_data); |
149 | } | 150 | } |
150 | 151 | ||
151 | static struct axon_msic *find_msi_translator(struct pci_dev *dev) | 152 | static struct axon_msic *find_msi_translator(struct pci_dev *dev) |
@@ -286,7 +287,7 @@ static int axon_msi_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) | |||
286 | } | 287 | } |
287 | dev_dbg(&dev->dev, "axon_msi: allocated virq 0x%x\n", virq); | 288 | dev_dbg(&dev->dev, "axon_msi: allocated virq 0x%x\n", virq); |
288 | 289 | ||
289 | set_irq_msi(virq, entry); | 290 | irq_set_msi_desc(virq, entry); |
290 | msg.data = virq; | 291 | msg.data = virq; |
291 | write_msi_msg(virq, &msg); | 292 | write_msi_msg(virq, &msg); |
292 | } | 293 | } |
@@ -304,22 +305,23 @@ static void axon_msi_teardown_msi_irqs(struct pci_dev *dev) | |||
304 | if (entry->irq == NO_IRQ) | 305 | if (entry->irq == NO_IRQ) |
305 | continue; | 306 | continue; |
306 | 307 | ||
307 | set_irq_msi(entry->irq, NULL); | 308 | irq_set_msi_desc(entry->irq, NULL); |
308 | irq_dispose_mapping(entry->irq); | 309 | irq_dispose_mapping(entry->irq); |
309 | } | 310 | } |
310 | } | 311 | } |
311 | 312 | ||
312 | static struct irq_chip msic_irq_chip = { | 313 | static struct irq_chip msic_irq_chip = { |
313 | .mask = mask_msi_irq, | 314 | .irq_mask = mask_msi_irq, |
314 | .unmask = unmask_msi_irq, | 315 | .irq_unmask = unmask_msi_irq, |
315 | .shutdown = unmask_msi_irq, | 316 | .irq_shutdown = mask_msi_irq, |
316 | .name = "AXON-MSI", | 317 | .name = "AXON-MSI", |
317 | }; | 318 | }; |
318 | 319 | ||
319 | static int msic_host_map(struct irq_host *h, unsigned int virq, | 320 | static int msic_host_map(struct irq_host *h, unsigned int virq, |
320 | irq_hw_number_t hw) | 321 | irq_hw_number_t hw) |
321 | { | 322 | { |
322 | set_irq_chip_and_handler(virq, &msic_irq_chip, handle_simple_irq); | 323 | irq_set_chip_data(virq, h->host_data); |
324 | irq_set_chip_and_handler(virq, &msic_irq_chip, handle_simple_irq); | ||
323 | 325 | ||
324 | return 0; | 326 | return 0; |
325 | } | 327 | } |
@@ -328,7 +330,7 @@ static struct irq_host_ops msic_host_ops = { | |||
328 | .map = msic_host_map, | 330 | .map = msic_host_map, |
329 | }; | 331 | }; |
330 | 332 | ||
331 | static int axon_msi_shutdown(struct platform_device *device) | 333 | static void axon_msi_shutdown(struct platform_device *device) |
332 | { | 334 | { |
333 | struct axon_msic *msic = dev_get_drvdata(&device->dev); | 335 | struct axon_msic *msic = dev_get_drvdata(&device->dev); |
334 | u32 tmp; | 336 | u32 tmp; |
@@ -338,12 +340,9 @@ static int axon_msi_shutdown(struct platform_device *device) | |||
338 | tmp = dcr_read(msic->dcr_host, MSIC_CTRL_REG); | 340 | tmp = dcr_read(msic->dcr_host, MSIC_CTRL_REG); |
339 | tmp &= ~MSIC_CTRL_ENABLE & ~MSIC_CTRL_IRQ_ENABLE; | 341 | tmp &= ~MSIC_CTRL_ENABLE & ~MSIC_CTRL_IRQ_ENABLE; |
340 | msic_dcr_write(msic, MSIC_CTRL_REG, tmp); | 342 | msic_dcr_write(msic, MSIC_CTRL_REG, tmp); |
341 | |||
342 | return 0; | ||
343 | } | 343 | } |
344 | 344 | ||
345 | static int axon_msi_probe(struct platform_device *device, | 345 | static int axon_msi_probe(struct platform_device *device) |
346 | const struct of_device_id *device_id) | ||
347 | { | 346 | { |
348 | struct device_node *dn = device->dev.of_node; | 347 | struct device_node *dn = device->dev.of_node; |
349 | struct axon_msic *msic; | 348 | struct axon_msic *msic; |
@@ -402,8 +401,8 @@ static int axon_msi_probe(struct platform_device *device, | |||
402 | 401 | ||
403 | msic->irq_host->host_data = msic; | 402 | msic->irq_host->host_data = msic; |
404 | 403 | ||
405 | set_irq_data(virq, msic); | 404 | irq_set_handler_data(virq, msic); |
406 | set_irq_chained_handler(virq, axon_msi_cascade); | 405 | irq_set_chained_handler(virq, axon_msi_cascade); |
407 | pr_devel("axon_msi: irq 0x%x setup for axon_msi\n", virq); | 406 | pr_devel("axon_msi: irq 0x%x setup for axon_msi\n", virq); |
408 | 407 | ||
409 | /* Enable the MSIC hardware */ | 408 | /* Enable the MSIC hardware */ |
@@ -446,7 +445,7 @@ static const struct of_device_id axon_msi_device_id[] = { | |||
446 | {} | 445 | {} |
447 | }; | 446 | }; |
448 | 447 | ||
449 | static struct of_platform_driver axon_msi_driver = { | 448 | static struct platform_driver axon_msi_driver = { |
450 | .probe = axon_msi_probe, | 449 | .probe = axon_msi_probe, |
451 | .shutdown = axon_msi_shutdown, | 450 | .shutdown = axon_msi_shutdown, |
452 | .driver = { | 451 | .driver = { |
@@ -458,7 +457,7 @@ static struct of_platform_driver axon_msi_driver = { | |||
458 | 457 | ||
459 | static int __init axon_msi_init(void) | 458 | static int __init axon_msi_init(void) |
460 | { | 459 | { |
461 | return of_register_platform_driver(&axon_msi_driver); | 460 | return platform_driver_register(&axon_msi_driver); |
462 | } | 461 | } |
463 | subsys_initcall(axon_msi_init); | 462 | subsys_initcall(axon_msi_init); |
464 | 463 | ||
diff --git a/arch/powerpc/platforms/cell/beat_interrupt.c b/arch/powerpc/platforms/cell/beat_interrupt.c index 682af97321a8..55015e1f6939 100644 --- a/arch/powerpc/platforms/cell/beat_interrupt.c +++ b/arch/powerpc/platforms/cell/beat_interrupt.c | |||
@@ -61,59 +61,59 @@ static inline void beatic_update_irq_mask(unsigned int irq_plug) | |||
61 | panic("Failed to set mask IRQ!"); | 61 | panic("Failed to set mask IRQ!"); |
62 | } | 62 | } |
63 | 63 | ||
64 | static void beatic_mask_irq(unsigned int irq_plug) | 64 | static void beatic_mask_irq(struct irq_data *d) |
65 | { | 65 | { |
66 | unsigned long flags; | 66 | unsigned long flags; |
67 | 67 | ||
68 | raw_spin_lock_irqsave(&beatic_irq_mask_lock, flags); | 68 | raw_spin_lock_irqsave(&beatic_irq_mask_lock, flags); |
69 | beatic_irq_mask_enable[irq_plug/64] &= ~(1UL << (63 - (irq_plug%64))); | 69 | beatic_irq_mask_enable[d->irq/64] &= ~(1UL << (63 - (d->irq%64))); |
70 | beatic_update_irq_mask(irq_plug); | 70 | beatic_update_irq_mask(d->irq); |
71 | raw_spin_unlock_irqrestore(&beatic_irq_mask_lock, flags); | 71 | raw_spin_unlock_irqrestore(&beatic_irq_mask_lock, flags); |
72 | } | 72 | } |
73 | 73 | ||
74 | static void beatic_unmask_irq(unsigned int irq_plug) | 74 | static void beatic_unmask_irq(struct irq_data *d) |
75 | { | 75 | { |
76 | unsigned long flags; | 76 | unsigned long flags; |
77 | 77 | ||
78 | raw_spin_lock_irqsave(&beatic_irq_mask_lock, flags); | 78 | raw_spin_lock_irqsave(&beatic_irq_mask_lock, flags); |
79 | beatic_irq_mask_enable[irq_plug/64] |= 1UL << (63 - (irq_plug%64)); | 79 | beatic_irq_mask_enable[d->irq/64] |= 1UL << (63 - (d->irq%64)); |
80 | beatic_update_irq_mask(irq_plug); | 80 | beatic_update_irq_mask(d->irq); |
81 | raw_spin_unlock_irqrestore(&beatic_irq_mask_lock, flags); | 81 | raw_spin_unlock_irqrestore(&beatic_irq_mask_lock, flags); |
82 | } | 82 | } |
83 | 83 | ||
84 | static void beatic_ack_irq(unsigned int irq_plug) | 84 | static void beatic_ack_irq(struct irq_data *d) |
85 | { | 85 | { |
86 | unsigned long flags; | 86 | unsigned long flags; |
87 | 87 | ||
88 | raw_spin_lock_irqsave(&beatic_irq_mask_lock, flags); | 88 | raw_spin_lock_irqsave(&beatic_irq_mask_lock, flags); |
89 | beatic_irq_mask_ack[irq_plug/64] &= ~(1UL << (63 - (irq_plug%64))); | 89 | beatic_irq_mask_ack[d->irq/64] &= ~(1UL << (63 - (d->irq%64))); |
90 | beatic_update_irq_mask(irq_plug); | 90 | beatic_update_irq_mask(d->irq); |
91 | raw_spin_unlock_irqrestore(&beatic_irq_mask_lock, flags); | 91 | raw_spin_unlock_irqrestore(&beatic_irq_mask_lock, flags); |
92 | } | 92 | } |
93 | 93 | ||
94 | static void beatic_end_irq(unsigned int irq_plug) | 94 | static void beatic_end_irq(struct irq_data *d) |
95 | { | 95 | { |
96 | s64 err; | 96 | s64 err; |
97 | unsigned long flags; | 97 | unsigned long flags; |
98 | 98 | ||
99 | err = beat_downcount_of_interrupt(irq_plug); | 99 | err = beat_downcount_of_interrupt(d->irq); |
100 | if (err != 0) { | 100 | if (err != 0) { |
101 | if ((err & 0xFFFFFFFF) != 0xFFFFFFF5) /* -11: wrong state */ | 101 | if ((err & 0xFFFFFFFF) != 0xFFFFFFF5) /* -11: wrong state */ |
102 | panic("Failed to downcount IRQ! Error = %16llx", err); | 102 | panic("Failed to downcount IRQ! Error = %16llx", err); |
103 | 103 | ||
104 | printk(KERN_ERR "IRQ over-downcounted, plug %d\n", irq_plug); | 104 | printk(KERN_ERR "IRQ over-downcounted, plug %d\n", d->irq); |
105 | } | 105 | } |
106 | raw_spin_lock_irqsave(&beatic_irq_mask_lock, flags); | 106 | raw_spin_lock_irqsave(&beatic_irq_mask_lock, flags); |
107 | beatic_irq_mask_ack[irq_plug/64] |= 1UL << (63 - (irq_plug%64)); | 107 | beatic_irq_mask_ack[d->irq/64] |= 1UL << (63 - (d->irq%64)); |
108 | beatic_update_irq_mask(irq_plug); | 108 | beatic_update_irq_mask(d->irq); |
109 | raw_spin_unlock_irqrestore(&beatic_irq_mask_lock, flags); | 109 | raw_spin_unlock_irqrestore(&beatic_irq_mask_lock, flags); |
110 | } | 110 | } |
111 | 111 | ||
112 | static struct irq_chip beatic_pic = { | 112 | static struct irq_chip beatic_pic = { |
113 | .name = "CELL-BEAT", | 113 | .name = "CELL-BEAT", |
114 | .unmask = beatic_unmask_irq, | 114 | .irq_unmask = beatic_unmask_irq, |
115 | .mask = beatic_mask_irq, | 115 | .irq_mask = beatic_mask_irq, |
116 | .eoi = beatic_end_irq, | 116 | .irq_eoi = beatic_end_irq, |
117 | }; | 117 | }; |
118 | 118 | ||
119 | /* | 119 | /* |
@@ -136,29 +136,18 @@ static void beatic_pic_host_unmap(struct irq_host *h, unsigned int virq) | |||
136 | static int beatic_pic_host_map(struct irq_host *h, unsigned int virq, | 136 | static int beatic_pic_host_map(struct irq_host *h, unsigned int virq, |
137 | irq_hw_number_t hw) | 137 | irq_hw_number_t hw) |
138 | { | 138 | { |
139 | struct irq_desc *desc = irq_to_desc(virq); | ||
140 | int64_t err; | 139 | int64_t err; |
141 | 140 | ||
142 | err = beat_construct_and_connect_irq_plug(virq, hw); | 141 | err = beat_construct_and_connect_irq_plug(virq, hw); |
143 | if (err < 0) | 142 | if (err < 0) |
144 | return -EIO; | 143 | return -EIO; |
145 | 144 | ||
146 | desc->status |= IRQ_LEVEL; | 145 | irq_set_status_flags(virq, IRQ_LEVEL); |
147 | set_irq_chip_and_handler(virq, &beatic_pic, handle_fasteoi_irq); | 146 | irq_set_chip_and_handler(virq, &beatic_pic, handle_fasteoi_irq); |
148 | return 0; | 147 | return 0; |
149 | } | 148 | } |
150 | 149 | ||
151 | /* | 150 | /* |
152 | * Update binding hardware IRQ number (hw) and Virtuql | ||
153 | * IRQ number (virq). This is called only once for a given mapping. | ||
154 | */ | ||
155 | static void beatic_pic_host_remap(struct irq_host *h, unsigned int virq, | ||
156 | irq_hw_number_t hw) | ||
157 | { | ||
158 | beat_construct_and_connect_irq_plug(virq, hw); | ||
159 | } | ||
160 | |||
161 | /* | ||
162 | * Translate device-tree interrupt spec to irq_hw_number_t style (ulong), | 151 | * Translate device-tree interrupt spec to irq_hw_number_t style (ulong), |
163 | * to pass away to irq_create_mapping(). | 152 | * to pass away to irq_create_mapping(). |
164 | * | 153 | * |
@@ -185,7 +174,6 @@ static int beatic_pic_host_match(struct irq_host *h, struct device_node *np) | |||
185 | 174 | ||
186 | static struct irq_host_ops beatic_pic_host_ops = { | 175 | static struct irq_host_ops beatic_pic_host_ops = { |
187 | .map = beatic_pic_host_map, | 176 | .map = beatic_pic_host_map, |
188 | .remap = beatic_pic_host_remap, | ||
189 | .unmap = beatic_pic_host_unmap, | 177 | .unmap = beatic_pic_host_unmap, |
190 | .xlate = beatic_pic_host_xlate, | 178 | .xlate = beatic_pic_host_xlate, |
191 | .match = beatic_pic_host_match, | 179 | .match = beatic_pic_host_match, |
@@ -232,7 +220,7 @@ unsigned int beatic_get_irq(void) | |||
232 | 220 | ||
233 | ret = beatic_get_irq_plug(); | 221 | ret = beatic_get_irq_plug(); |
234 | if (ret != NO_IRQ) | 222 | if (ret != NO_IRQ) |
235 | beatic_ack_irq(ret); | 223 | beatic_ack_irq(irq_get_irq_data(ret)); |
236 | return ret; | 224 | return ret; |
237 | } | 225 | } |
238 | 226 | ||
@@ -258,22 +246,6 @@ void __init beatic_init_IRQ(void) | |||
258 | irq_set_default_host(beatic_host); | 246 | irq_set_default_host(beatic_host); |
259 | } | 247 | } |
260 | 248 | ||
261 | #ifdef CONFIG_SMP | ||
262 | |||
263 | /* Nullified to compile with SMP mode */ | ||
264 | void beatic_setup_cpu(int cpu) | ||
265 | { | ||
266 | } | ||
267 | |||
268 | void beatic_cause_IPI(int cpu, int mesg) | ||
269 | { | ||
270 | } | ||
271 | |||
272 | void beatic_request_IPIs(void) | ||
273 | { | ||
274 | } | ||
275 | #endif /* CONFIG_SMP */ | ||
276 | |||
277 | void beatic_deinit_IRQ(void) | 249 | void beatic_deinit_IRQ(void) |
278 | { | 250 | { |
279 | int i; | 251 | int i; |
diff --git a/arch/powerpc/platforms/cell/beat_interrupt.h b/arch/powerpc/platforms/cell/beat_interrupt.h index b470fd0051f1..a7e52f91a078 100644 --- a/arch/powerpc/platforms/cell/beat_interrupt.h +++ b/arch/powerpc/platforms/cell/beat_interrupt.h | |||
@@ -24,9 +24,6 @@ | |||
24 | 24 | ||
25 | extern void beatic_init_IRQ(void); | 25 | extern void beatic_init_IRQ(void); |
26 | extern unsigned int beatic_get_irq(void); | 26 | extern unsigned int beatic_get_irq(void); |
27 | extern void beatic_cause_IPI(int cpu, int mesg); | ||
28 | extern void beatic_request_IPIs(void); | ||
29 | extern void beatic_setup_cpu(int); | ||
30 | extern void beatic_deinit_IRQ(void); | 27 | extern void beatic_deinit_IRQ(void); |
31 | 28 | ||
32 | #endif | 29 | #endif |
diff --git a/arch/powerpc/platforms/cell/beat_iommu.c b/arch/powerpc/platforms/cell/beat_iommu.c index beec405eb6f8..3ce685568935 100644 --- a/arch/powerpc/platforms/cell/beat_iommu.c +++ b/arch/powerpc/platforms/cell/beat_iommu.c | |||
@@ -76,7 +76,7 @@ static void __init celleb_init_direct_mapping(void) | |||
76 | 76 | ||
77 | static void celleb_dma_dev_setup(struct device *dev) | 77 | static void celleb_dma_dev_setup(struct device *dev) |
78 | { | 78 | { |
79 | dev->archdata.dma_ops = get_pci_dma_ops(); | 79 | set_dma_ops(dev, &dma_direct_ops); |
80 | set_dma_offset(dev, celleb_dma_direct_offset); | 80 | set_dma_offset(dev, celleb_dma_direct_offset); |
81 | } | 81 | } |
82 | 82 | ||
@@ -106,7 +106,6 @@ static struct notifier_block celleb_of_bus_notifier = { | |||
106 | static int __init celleb_init_iommu(void) | 106 | static int __init celleb_init_iommu(void) |
107 | { | 107 | { |
108 | celleb_init_direct_mapping(); | 108 | celleb_init_direct_mapping(); |
109 | set_pci_dma_ops(&dma_direct_ops); | ||
110 | ppc_md.pci_dma_dev_setup = celleb_pci_dma_dev_setup; | 109 | ppc_md.pci_dma_dev_setup = celleb_pci_dma_dev_setup; |
111 | bus_register_notifier(&platform_bus_type, &celleb_of_bus_notifier); | 110 | bus_register_notifier(&platform_bus_type, &celleb_of_bus_notifier); |
112 | 111 | ||
diff --git a/arch/powerpc/platforms/cell/beat_smp.c b/arch/powerpc/platforms/cell/beat_smp.c deleted file mode 100644 index 26efc204c47f..000000000000 --- a/arch/powerpc/platforms/cell/beat_smp.c +++ /dev/null | |||
@@ -1,124 +0,0 @@ | |||
1 | /* | ||
2 | * SMP support for Celleb platform. (Incomplete) | ||
3 | * | ||
4 | * (C) Copyright 2006 TOSHIBA CORPORATION | ||
5 | * | ||
6 | * This code is based on arch/powerpc/platforms/cell/smp.c: | ||
7 | * Dave Engebretsen, Peter Bergner, and | ||
8 | * Mike Corrigan {engebret|bergner|mikec}@us.ibm.com | ||
9 | * Plus various changes from other IBM teams... | ||
10 | * | ||
11 | * This program is free software; you can redistribute it and/or modify | ||
12 | * it under the terms of the GNU General Public License as published by | ||
13 | * the Free Software Foundation; either version 2 of the License, or | ||
14 | * (at your option) any later version. | ||
15 | * | ||
16 | * This program is distributed in the hope that it will be useful, | ||
17 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
18 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
19 | * GNU General Public License for more details. | ||
20 | * | ||
21 | * You should have received a copy of the GNU General Public License along | ||
22 | * with this program; if not, write to the Free Software Foundation, Inc., | ||
23 | * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||
24 | */ | ||
25 | |||
26 | #undef DEBUG | ||
27 | |||
28 | #include <linux/kernel.h> | ||
29 | #include <linux/smp.h> | ||
30 | #include <linux/interrupt.h> | ||
31 | #include <linux/init.h> | ||
32 | #include <linux/threads.h> | ||
33 | #include <linux/cpu.h> | ||
34 | |||
35 | #include <asm/irq.h> | ||
36 | #include <asm/smp.h> | ||
37 | #include <asm/machdep.h> | ||
38 | #include <asm/udbg.h> | ||
39 | |||
40 | #include "beat_interrupt.h" | ||
41 | |||
42 | #ifdef DEBUG | ||
43 | #define DBG(fmt...) udbg_printf(fmt) | ||
44 | #else | ||
45 | #define DBG(fmt...) | ||
46 | #endif | ||
47 | |||
48 | /* | ||
49 | * The primary thread of each non-boot processor is recorded here before | ||
50 | * smp init. | ||
51 | */ | ||
52 | /* static cpumask_t of_spin_map; */ | ||
53 | |||
54 | /** | ||
55 | * smp_startup_cpu() - start the given cpu | ||
56 | * | ||
57 | * At boot time, there is nothing to do for primary threads which were | ||
58 | * started from Open Firmware. For anything else, call RTAS with the | ||
59 | * appropriate start location. | ||
60 | * | ||
61 | * Returns: | ||
62 | * 0 - failure | ||
63 | * 1 - success | ||
64 | */ | ||
65 | static inline int __devinit smp_startup_cpu(unsigned int lcpu) | ||
66 | { | ||
67 | return 0; | ||
68 | } | ||
69 | |||
70 | static void smp_beatic_message_pass(int target, int msg) | ||
71 | { | ||
72 | unsigned int i; | ||
73 | |||
74 | if (target < NR_CPUS) { | ||
75 | beatic_cause_IPI(target, msg); | ||
76 | } else { | ||
77 | for_each_online_cpu(i) { | ||
78 | if (target == MSG_ALL_BUT_SELF | ||
79 | && i == smp_processor_id()) | ||
80 | continue; | ||
81 | beatic_cause_IPI(i, msg); | ||
82 | } | ||
83 | } | ||
84 | } | ||
85 | |||
86 | static int __init smp_beatic_probe(void) | ||
87 | { | ||
88 | return cpus_weight(cpu_possible_map); | ||
89 | } | ||
90 | |||
91 | static void __devinit smp_beatic_setup_cpu(int cpu) | ||
92 | { | ||
93 | beatic_setup_cpu(cpu); | ||
94 | } | ||
95 | |||
96 | static void __devinit smp_celleb_kick_cpu(int nr) | ||
97 | { | ||
98 | BUG_ON(nr < 0 || nr >= NR_CPUS); | ||
99 | |||
100 | if (!smp_startup_cpu(nr)) | ||
101 | return; | ||
102 | } | ||
103 | |||
104 | static int smp_celleb_cpu_bootable(unsigned int nr) | ||
105 | { | ||
106 | return 1; | ||
107 | } | ||
108 | static struct smp_ops_t bpa_beatic_smp_ops = { | ||
109 | .message_pass = smp_beatic_message_pass, | ||
110 | .probe = smp_beatic_probe, | ||
111 | .kick_cpu = smp_celleb_kick_cpu, | ||
112 | .setup_cpu = smp_beatic_setup_cpu, | ||
113 | .cpu_bootable = smp_celleb_cpu_bootable, | ||
114 | }; | ||
115 | |||
116 | /* This is called very early */ | ||
117 | void __init smp_init_celleb(void) | ||
118 | { | ||
119 | DBG(" -> smp_init_celleb()\n"); | ||
120 | |||
121 | smp_ops = &bpa_beatic_smp_ops; | ||
122 | |||
123 | DBG(" <- smp_init_celleb()\n"); | ||
124 | } | ||
diff --git a/arch/powerpc/platforms/cell/cbe_regs.c b/arch/powerpc/platforms/cell/cbe_regs.c index dbc338f187a2..f3917e7a5b44 100644 --- a/arch/powerpc/platforms/cell/cbe_regs.c +++ b/arch/powerpc/platforms/cell/cbe_regs.c | |||
@@ -45,8 +45,8 @@ static struct cbe_thread_map | |||
45 | unsigned int cbe_id; | 45 | unsigned int cbe_id; |
46 | } cbe_thread_map[NR_CPUS]; | 46 | } cbe_thread_map[NR_CPUS]; |
47 | 47 | ||
48 | static cpumask_t cbe_local_mask[MAX_CBE] = { [0 ... MAX_CBE-1] = CPU_MASK_NONE }; | 48 | static cpumask_t cbe_local_mask[MAX_CBE] = { [0 ... MAX_CBE-1] = {CPU_BITS_NONE} }; |
49 | static cpumask_t cbe_first_online_cpu = CPU_MASK_NONE; | 49 | static cpumask_t cbe_first_online_cpu = { CPU_BITS_NONE }; |
50 | 50 | ||
51 | static struct cbe_regs_map *cbe_find_map(struct device_node *np) | 51 | static struct cbe_regs_map *cbe_find_map(struct device_node *np) |
52 | { | 52 | { |
@@ -159,7 +159,8 @@ EXPORT_SYMBOL_GPL(cbe_cpu_to_node); | |||
159 | 159 | ||
160 | u32 cbe_node_to_cpu(int node) | 160 | u32 cbe_node_to_cpu(int node) |
161 | { | 161 | { |
162 | return find_first_bit( (unsigned long *) &cbe_local_mask[node], sizeof(cpumask_t)); | 162 | return cpumask_first(&cbe_local_mask[node]); |
163 | |||
163 | } | 164 | } |
164 | EXPORT_SYMBOL_GPL(cbe_node_to_cpu); | 165 | EXPORT_SYMBOL_GPL(cbe_node_to_cpu); |
165 | 166 | ||
@@ -268,9 +269,9 @@ void __init cbe_regs_init(void) | |||
268 | thread->regs = map; | 269 | thread->regs = map; |
269 | thread->cbe_id = cbe_id; | 270 | thread->cbe_id = cbe_id; |
270 | map->be_node = thread->be_node; | 271 | map->be_node = thread->be_node; |
271 | cpu_set(i, cbe_local_mask[cbe_id]); | 272 | cpumask_set_cpu(i, &cbe_local_mask[cbe_id]); |
272 | if(thread->thread_id == 0) | 273 | if(thread->thread_id == 0) |
273 | cpu_set(i, cbe_first_online_cpu); | 274 | cpumask_set_cpu(i, &cbe_first_online_cpu); |
274 | } | 275 | } |
275 | } | 276 | } |
276 | 277 | ||
diff --git a/arch/powerpc/platforms/cell/celleb_pci.c b/arch/powerpc/platforms/cell/celleb_pci.c index 404d1fc04d59..5822141aa63f 100644 --- a/arch/powerpc/platforms/cell/celleb_pci.c +++ b/arch/powerpc/platforms/cell/celleb_pci.c | |||
@@ -41,7 +41,6 @@ | |||
41 | #include <asm/pci-bridge.h> | 41 | #include <asm/pci-bridge.h> |
42 | #include <asm/ppc-pci.h> | 42 | #include <asm/ppc-pci.h> |
43 | 43 | ||
44 | #include "io-workarounds.h" | ||
45 | #include "celleb_pci.h" | 44 | #include "celleb_pci.h" |
46 | 45 | ||
47 | #define MAX_PCI_DEVICES 32 | 46 | #define MAX_PCI_DEVICES 32 |
@@ -320,7 +319,7 @@ static int __init celleb_setup_fake_pci_device(struct device_node *node, | |||
320 | 319 | ||
321 | size = 256; | 320 | size = 256; |
322 | config = &private->fake_config[devno][fn]; | 321 | config = &private->fake_config[devno][fn]; |
323 | *config = alloc_maybe_bootmem(size, GFP_KERNEL); | 322 | *config = zalloc_maybe_bootmem(size, GFP_KERNEL); |
324 | if (*config == NULL) { | 323 | if (*config == NULL) { |
325 | printk(KERN_ERR "PCI: " | 324 | printk(KERN_ERR "PCI: " |
326 | "not enough memory for fake configuration space\n"); | 325 | "not enough memory for fake configuration space\n"); |
@@ -331,7 +330,7 @@ static int __init celleb_setup_fake_pci_device(struct device_node *node, | |||
331 | 330 | ||
332 | size = sizeof(struct celleb_pci_resource); | 331 | size = sizeof(struct celleb_pci_resource); |
333 | res = &private->res[devno][fn]; | 332 | res = &private->res[devno][fn]; |
334 | *res = alloc_maybe_bootmem(size, GFP_KERNEL); | 333 | *res = zalloc_maybe_bootmem(size, GFP_KERNEL); |
335 | if (*res == NULL) { | 334 | if (*res == NULL) { |
336 | printk(KERN_ERR | 335 | printk(KERN_ERR |
337 | "PCI: not enough memory for resource data space\n"); | 336 | "PCI: not enough memory for resource data space\n"); |
@@ -432,7 +431,7 @@ static int __init phb_set_bus_ranges(struct device_node *dev, | |||
432 | static void __init celleb_alloc_private_mem(struct pci_controller *hose) | 431 | static void __init celleb_alloc_private_mem(struct pci_controller *hose) |
433 | { | 432 | { |
434 | hose->private_data = | 433 | hose->private_data = |
435 | alloc_maybe_bootmem(sizeof(struct celleb_pci_private), | 434 | zalloc_maybe_bootmem(sizeof(struct celleb_pci_private), |
436 | GFP_KERNEL); | 435 | GFP_KERNEL); |
437 | } | 436 | } |
438 | 437 | ||
@@ -469,18 +468,6 @@ static struct of_device_id celleb_phb_match[] __initdata = { | |||
469 | }, | 468 | }, |
470 | }; | 469 | }; |
471 | 470 | ||
472 | static int __init celleb_io_workaround_init(struct pci_controller *phb, | ||
473 | struct celleb_phb_spec *phb_spec) | ||
474 | { | ||
475 | if (phb_spec->ops) { | ||
476 | iowa_register_bus(phb, phb_spec->ops, phb_spec->iowa_init, | ||
477 | phb_spec->iowa_data); | ||
478 | io_workaround_init(); | ||
479 | } | ||
480 | |||
481 | return 0; | ||
482 | } | ||
483 | |||
484 | int __init celleb_setup_phb(struct pci_controller *phb) | 471 | int __init celleb_setup_phb(struct pci_controller *phb) |
485 | { | 472 | { |
486 | struct device_node *dev = phb->dn; | 473 | struct device_node *dev = phb->dn; |
@@ -500,7 +487,11 @@ int __init celleb_setup_phb(struct pci_controller *phb) | |||
500 | if (rc) | 487 | if (rc) |
501 | return 1; | 488 | return 1; |
502 | 489 | ||
503 | return celleb_io_workaround_init(phb, phb_spec); | 490 | if (phb_spec->ops) |
491 | iowa_register_bus(phb, phb_spec->ops, | ||
492 | phb_spec->iowa_init, | ||
493 | phb_spec->iowa_data); | ||
494 | return 0; | ||
504 | } | 495 | } |
505 | 496 | ||
506 | int celleb_pci_probe_mode(struct pci_bus *bus) | 497 | int celleb_pci_probe_mode(struct pci_bus *bus) |
diff --git a/arch/powerpc/platforms/cell/celleb_pci.h b/arch/powerpc/platforms/cell/celleb_pci.h index 4cba1523ec50..a801fcc5f389 100644 --- a/arch/powerpc/platforms/cell/celleb_pci.h +++ b/arch/powerpc/platforms/cell/celleb_pci.h | |||
@@ -26,8 +26,9 @@ | |||
26 | #include <asm/pci-bridge.h> | 26 | #include <asm/pci-bridge.h> |
27 | #include <asm/prom.h> | 27 | #include <asm/prom.h> |
28 | #include <asm/ppc-pci.h> | 28 | #include <asm/ppc-pci.h> |
29 | #include <asm/io-workarounds.h> | ||
29 | 30 | ||
30 | #include "io-workarounds.h" | 31 | struct iowa_bus; |
31 | 32 | ||
32 | struct celleb_phb_spec { | 33 | struct celleb_phb_spec { |
33 | int (*setup)(struct device_node *, struct pci_controller *); | 34 | int (*setup)(struct device_node *, struct pci_controller *); |
diff --git a/arch/powerpc/platforms/cell/celleb_setup.c b/arch/powerpc/platforms/cell/celleb_setup.c index e53845579770..d58d9bae4b9b 100644 --- a/arch/powerpc/platforms/cell/celleb_setup.c +++ b/arch/powerpc/platforms/cell/celleb_setup.c | |||
@@ -128,10 +128,6 @@ static void __init celleb_setup_arch_beat(void) | |||
128 | spu_management_ops = &spu_management_of_ops; | 128 | spu_management_ops = &spu_management_of_ops; |
129 | #endif | 129 | #endif |
130 | 130 | ||
131 | #ifdef CONFIG_SMP | ||
132 | smp_init_celleb(); | ||
133 | #endif | ||
134 | |||
135 | celleb_setup_arch_common(); | 131 | celleb_setup_arch_common(); |
136 | } | 132 | } |
137 | 133 | ||
diff --git a/arch/powerpc/platforms/cell/cpufreq_spudemand.c b/arch/powerpc/platforms/cell/cpufreq_spudemand.c index 968c1c0b4d5b..d809836bcf5f 100644 --- a/arch/powerpc/platforms/cell/cpufreq_spudemand.c +++ b/arch/powerpc/platforms/cell/cpufreq_spudemand.c | |||
@@ -39,8 +39,6 @@ struct spu_gov_info_struct { | |||
39 | }; | 39 | }; |
40 | static DEFINE_PER_CPU(struct spu_gov_info_struct, spu_gov_info); | 40 | static DEFINE_PER_CPU(struct spu_gov_info_struct, spu_gov_info); |
41 | 41 | ||
42 | static struct workqueue_struct *kspugov_wq; | ||
43 | |||
44 | static int calc_freq(struct spu_gov_info_struct *info) | 42 | static int calc_freq(struct spu_gov_info_struct *info) |
45 | { | 43 | { |
46 | int cpu; | 44 | int cpu; |
@@ -71,14 +69,14 @@ static void spu_gov_work(struct work_struct *work) | |||
71 | __cpufreq_driver_target(info->policy, target_freq, CPUFREQ_RELATION_H); | 69 | __cpufreq_driver_target(info->policy, target_freq, CPUFREQ_RELATION_H); |
72 | 70 | ||
73 | delay = usecs_to_jiffies(info->poll_int); | 71 | delay = usecs_to_jiffies(info->poll_int); |
74 | queue_delayed_work_on(info->policy->cpu, kspugov_wq, &info->work, delay); | 72 | schedule_delayed_work_on(info->policy->cpu, &info->work, delay); |
75 | } | 73 | } |
76 | 74 | ||
77 | static void spu_gov_init_work(struct spu_gov_info_struct *info) | 75 | static void spu_gov_init_work(struct spu_gov_info_struct *info) |
78 | { | 76 | { |
79 | int delay = usecs_to_jiffies(info->poll_int); | 77 | int delay = usecs_to_jiffies(info->poll_int); |
80 | INIT_DELAYED_WORK_DEFERRABLE(&info->work, spu_gov_work); | 78 | INIT_DELAYED_WORK_DEFERRABLE(&info->work, spu_gov_work); |
81 | queue_delayed_work_on(info->policy->cpu, kspugov_wq, &info->work, delay); | 79 | schedule_delayed_work_on(info->policy->cpu, &info->work, delay); |
82 | } | 80 | } |
83 | 81 | ||
84 | static void spu_gov_cancel_work(struct spu_gov_info_struct *info) | 82 | static void spu_gov_cancel_work(struct spu_gov_info_struct *info) |
@@ -152,27 +150,15 @@ static int __init spu_gov_init(void) | |||
152 | { | 150 | { |
153 | int ret; | 151 | int ret; |
154 | 152 | ||
155 | kspugov_wq = create_workqueue("kspugov"); | ||
156 | if (!kspugov_wq) { | ||
157 | printk(KERN_ERR "creation of kspugov failed\n"); | ||
158 | ret = -EFAULT; | ||
159 | goto out; | ||
160 | } | ||
161 | |||
162 | ret = cpufreq_register_governor(&spu_governor); | 153 | ret = cpufreq_register_governor(&spu_governor); |
163 | if (ret) { | 154 | if (ret) |
164 | printk(KERN_ERR "registration of governor failed\n"); | 155 | printk(KERN_ERR "registration of governor failed\n"); |
165 | destroy_workqueue(kspugov_wq); | ||
166 | goto out; | ||
167 | } | ||
168 | out: | ||
169 | return ret; | 156 | return ret; |
170 | } | 157 | } |
171 | 158 | ||
172 | static void __exit spu_gov_exit(void) | 159 | static void __exit spu_gov_exit(void) |
173 | { | 160 | { |
174 | cpufreq_unregister_governor(&spu_governor); | 161 | cpufreq_unregister_governor(&spu_governor); |
175 | destroy_workqueue(kspugov_wq); | ||
176 | } | 162 | } |
177 | 163 | ||
178 | 164 | ||
diff --git a/arch/powerpc/platforms/cell/interrupt.c b/arch/powerpc/platforms/cell/interrupt.c index 10eb1a443626..3e4eba603e6b 100644 --- a/arch/powerpc/platforms/cell/interrupt.c +++ b/arch/powerpc/platforms/cell/interrupt.c | |||
@@ -72,15 +72,15 @@ static irq_hw_number_t iic_pending_to_hwnum(struct cbe_iic_pending_bits bits) | |||
72 | return (node << IIC_IRQ_NODE_SHIFT) | (class << 4) | unit; | 72 | return (node << IIC_IRQ_NODE_SHIFT) | (class << 4) | unit; |
73 | } | 73 | } |
74 | 74 | ||
75 | static void iic_mask(unsigned int irq) | 75 | static void iic_mask(struct irq_data *d) |
76 | { | 76 | { |
77 | } | 77 | } |
78 | 78 | ||
79 | static void iic_unmask(unsigned int irq) | 79 | static void iic_unmask(struct irq_data *d) |
80 | { | 80 | { |
81 | } | 81 | } |
82 | 82 | ||
83 | static void iic_eoi(unsigned int irq) | 83 | static void iic_eoi(struct irq_data *d) |
84 | { | 84 | { |
85 | struct iic *iic = &__get_cpu_var(cpu_iic); | 85 | struct iic *iic = &__get_cpu_var(cpu_iic); |
86 | out_be64(&iic->regs->prio, iic->eoi_stack[--iic->eoi_ptr]); | 86 | out_be64(&iic->regs->prio, iic->eoi_stack[--iic->eoi_ptr]); |
@@ -89,19 +89,21 @@ static void iic_eoi(unsigned int irq) | |||
89 | 89 | ||
90 | static struct irq_chip iic_chip = { | 90 | static struct irq_chip iic_chip = { |
91 | .name = "CELL-IIC", | 91 | .name = "CELL-IIC", |
92 | .mask = iic_mask, | 92 | .irq_mask = iic_mask, |
93 | .unmask = iic_unmask, | 93 | .irq_unmask = iic_unmask, |
94 | .eoi = iic_eoi, | 94 | .irq_eoi = iic_eoi, |
95 | }; | 95 | }; |
96 | 96 | ||
97 | 97 | ||
98 | static void iic_ioexc_eoi(unsigned int irq) | 98 | static void iic_ioexc_eoi(struct irq_data *d) |
99 | { | 99 | { |
100 | } | 100 | } |
101 | 101 | ||
102 | static void iic_ioexc_cascade(unsigned int irq, struct irq_desc *desc) | 102 | static void iic_ioexc_cascade(unsigned int irq, struct irq_desc *desc) |
103 | { | 103 | { |
104 | struct cbe_iic_regs __iomem *node_iic = (void __iomem *)desc->handler_data; | 104 | struct irq_chip *chip = irq_desc_get_chip(desc); |
105 | struct cbe_iic_regs __iomem *node_iic = | ||
106 | (void __iomem *)irq_desc_get_handler_data(desc); | ||
105 | unsigned int base = (irq & 0xffffff00) | IIC_IRQ_TYPE_IOEXC; | 107 | unsigned int base = (irq & 0xffffff00) | IIC_IRQ_TYPE_IOEXC; |
106 | unsigned long bits, ack; | 108 | unsigned long bits, ack; |
107 | int cascade; | 109 | int cascade; |
@@ -128,15 +130,15 @@ static void iic_ioexc_cascade(unsigned int irq, struct irq_desc *desc) | |||
128 | if (ack) | 130 | if (ack) |
129 | out_be64(&node_iic->iic_is, ack); | 131 | out_be64(&node_iic->iic_is, ack); |
130 | } | 132 | } |
131 | desc->chip->eoi(irq); | 133 | chip->irq_eoi(&desc->irq_data); |
132 | } | 134 | } |
133 | 135 | ||
134 | 136 | ||
135 | static struct irq_chip iic_ioexc_chip = { | 137 | static struct irq_chip iic_ioexc_chip = { |
136 | .name = "CELL-IOEX", | 138 | .name = "CELL-IOEX", |
137 | .mask = iic_mask, | 139 | .irq_mask = iic_mask, |
138 | .unmask = iic_unmask, | 140 | .irq_unmask = iic_unmask, |
139 | .eoi = iic_ioexc_eoi, | 141 | .irq_eoi = iic_ioexc_eoi, |
140 | }; | 142 | }; |
141 | 143 | ||
142 | /* Get an IRQ number from the pending state register of the IIC */ | 144 | /* Get an IRQ number from the pending state register of the IIC */ |
@@ -174,14 +176,14 @@ EXPORT_SYMBOL_GPL(iic_get_target_id); | |||
174 | #ifdef CONFIG_SMP | 176 | #ifdef CONFIG_SMP |
175 | 177 | ||
176 | /* Use the highest interrupt priorities for IPI */ | 178 | /* Use the highest interrupt priorities for IPI */ |
177 | static inline int iic_ipi_to_irq(int ipi) | 179 | static inline int iic_msg_to_irq(int msg) |
178 | { | 180 | { |
179 | return IIC_IRQ_TYPE_IPI + 0xf - ipi; | 181 | return IIC_IRQ_TYPE_IPI + 0xf - msg; |
180 | } | 182 | } |
181 | 183 | ||
182 | void iic_cause_IPI(int cpu, int mesg) | 184 | void iic_message_pass(int cpu, int msg) |
183 | { | 185 | { |
184 | out_be64(&per_cpu(cpu_iic, cpu).regs->generate, (0xf - mesg) << 4); | 186 | out_be64(&per_cpu(cpu_iic, cpu).regs->generate, (0xf - msg) << 4); |
185 | } | 187 | } |
186 | 188 | ||
187 | struct irq_host *iic_get_irq_host(int node) | 189 | struct irq_host *iic_get_irq_host(int node) |
@@ -190,38 +192,31 @@ struct irq_host *iic_get_irq_host(int node) | |||
190 | } | 192 | } |
191 | EXPORT_SYMBOL_GPL(iic_get_irq_host); | 193 | EXPORT_SYMBOL_GPL(iic_get_irq_host); |
192 | 194 | ||
193 | static irqreturn_t iic_ipi_action(int irq, void *dev_id) | 195 | static void iic_request_ipi(int msg) |
194 | { | ||
195 | int ipi = (int)(long)dev_id; | ||
196 | |||
197 | smp_message_recv(ipi); | ||
198 | |||
199 | return IRQ_HANDLED; | ||
200 | } | ||
201 | static void iic_request_ipi(int ipi, const char *name) | ||
202 | { | 196 | { |
203 | int virq; | 197 | int virq; |
204 | 198 | ||
205 | virq = irq_create_mapping(iic_host, iic_ipi_to_irq(ipi)); | 199 | virq = irq_create_mapping(iic_host, iic_msg_to_irq(msg)); |
206 | if (virq == NO_IRQ) { | 200 | if (virq == NO_IRQ) { |
207 | printk(KERN_ERR | 201 | printk(KERN_ERR |
208 | "iic: failed to map IPI %s\n", name); | 202 | "iic: failed to map IPI %s\n", smp_ipi_name[msg]); |
209 | return; | 203 | return; |
210 | } | 204 | } |
211 | if (request_irq(virq, iic_ipi_action, IRQF_DISABLED, name, | 205 | |
212 | (void *)(long)ipi)) | 206 | /* |
213 | printk(KERN_ERR | 207 | * If smp_request_message_ipi encounters an error it will notify |
214 | "iic: failed to request IPI %s\n", name); | 208 | * the error. If a message is not needed it will return non-zero. |
209 | */ | ||
210 | if (smp_request_message_ipi(virq, msg)) | ||
211 | irq_dispose_mapping(virq); | ||
215 | } | 212 | } |
216 | 213 | ||
217 | void iic_request_IPIs(void) | 214 | void iic_request_IPIs(void) |
218 | { | 215 | { |
219 | iic_request_ipi(PPC_MSG_CALL_FUNCTION, "IPI-call"); | 216 | iic_request_ipi(PPC_MSG_CALL_FUNCTION); |
220 | iic_request_ipi(PPC_MSG_RESCHEDULE, "IPI-resched"); | 217 | iic_request_ipi(PPC_MSG_RESCHEDULE); |
221 | iic_request_ipi(PPC_MSG_CALL_FUNC_SINGLE, "IPI-call-single"); | 218 | iic_request_ipi(PPC_MSG_CALL_FUNC_SINGLE); |
222 | #ifdef CONFIG_DEBUGGER | 219 | iic_request_ipi(PPC_MSG_DEBUGGER_BREAK); |
223 | iic_request_ipi(PPC_MSG_DEBUGGER_BREAK, "IPI-debug"); | ||
224 | #endif /* CONFIG_DEBUGGER */ | ||
225 | } | 220 | } |
226 | 221 | ||
227 | #endif /* CONFIG_SMP */ | 222 | #endif /* CONFIG_SMP */ |
@@ -233,65 +228,19 @@ static int iic_host_match(struct irq_host *h, struct device_node *node) | |||
233 | "IBM,CBEA-Internal-Interrupt-Controller"); | 228 | "IBM,CBEA-Internal-Interrupt-Controller"); |
234 | } | 229 | } |
235 | 230 | ||
236 | extern int noirqdebug; | ||
237 | |||
238 | static void handle_iic_irq(unsigned int irq, struct irq_desc *desc) | ||
239 | { | ||
240 | raw_spin_lock(&desc->lock); | ||
241 | |||
242 | desc->status &= ~(IRQ_REPLAY | IRQ_WAITING); | ||
243 | |||
244 | /* | ||
245 | * If we're currently running this IRQ, or its disabled, | ||
246 | * we shouldn't process the IRQ. Mark it pending, handle | ||
247 | * the necessary masking and go out | ||
248 | */ | ||
249 | if (unlikely((desc->status & (IRQ_INPROGRESS | IRQ_DISABLED)) || | ||
250 | !desc->action)) { | ||
251 | desc->status |= IRQ_PENDING; | ||
252 | goto out_eoi; | ||
253 | } | ||
254 | |||
255 | kstat_incr_irqs_this_cpu(irq, desc); | ||
256 | |||
257 | /* Mark the IRQ currently in progress.*/ | ||
258 | desc->status |= IRQ_INPROGRESS; | ||
259 | |||
260 | do { | ||
261 | struct irqaction *action = desc->action; | ||
262 | irqreturn_t action_ret; | ||
263 | |||
264 | if (unlikely(!action)) | ||
265 | goto out_eoi; | ||
266 | |||
267 | desc->status &= ~IRQ_PENDING; | ||
268 | raw_spin_unlock(&desc->lock); | ||
269 | action_ret = handle_IRQ_event(irq, action); | ||
270 | if (!noirqdebug) | ||
271 | note_interrupt(irq, desc, action_ret); | ||
272 | raw_spin_lock(&desc->lock); | ||
273 | |||
274 | } while ((desc->status & (IRQ_PENDING | IRQ_DISABLED)) == IRQ_PENDING); | ||
275 | |||
276 | desc->status &= ~IRQ_INPROGRESS; | ||
277 | out_eoi: | ||
278 | desc->chip->eoi(irq); | ||
279 | raw_spin_unlock(&desc->lock); | ||
280 | } | ||
281 | |||
282 | static int iic_host_map(struct irq_host *h, unsigned int virq, | 231 | static int iic_host_map(struct irq_host *h, unsigned int virq, |
283 | irq_hw_number_t hw) | 232 | irq_hw_number_t hw) |
284 | { | 233 | { |
285 | switch (hw & IIC_IRQ_TYPE_MASK) { | 234 | switch (hw & IIC_IRQ_TYPE_MASK) { |
286 | case IIC_IRQ_TYPE_IPI: | 235 | case IIC_IRQ_TYPE_IPI: |
287 | set_irq_chip_and_handler(virq, &iic_chip, handle_percpu_irq); | 236 | irq_set_chip_and_handler(virq, &iic_chip, handle_percpu_irq); |
288 | break; | 237 | break; |
289 | case IIC_IRQ_TYPE_IOEXC: | 238 | case IIC_IRQ_TYPE_IOEXC: |
290 | set_irq_chip_and_handler(virq, &iic_ioexc_chip, | 239 | irq_set_chip_and_handler(virq, &iic_ioexc_chip, |
291 | handle_iic_irq); | 240 | handle_edge_eoi_irq); |
292 | break; | 241 | break; |
293 | default: | 242 | default: |
294 | set_irq_chip_and_handler(virq, &iic_chip, handle_iic_irq); | 243 | irq_set_chip_and_handler(virq, &iic_chip, handle_edge_eoi_irq); |
295 | } | 244 | } |
296 | return 0; | 245 | return 0; |
297 | } | 246 | } |
@@ -408,8 +357,8 @@ static int __init setup_iic(void) | |||
408 | * irq_data is a generic pointer that gets passed back | 357 | * irq_data is a generic pointer that gets passed back |
409 | * to us later, so the forced cast is fine. | 358 | * to us later, so the forced cast is fine. |
410 | */ | 359 | */ |
411 | set_irq_data(cascade, (void __force *)node_iic); | 360 | irq_set_handler_data(cascade, (void __force *)node_iic); |
412 | set_irq_chained_handler(cascade , iic_ioexc_cascade); | 361 | irq_set_chained_handler(cascade, iic_ioexc_cascade); |
413 | out_be64(&node_iic->iic_ir, | 362 | out_be64(&node_iic->iic_ir, |
414 | (1 << 12) /* priority */ | | 363 | (1 << 12) /* priority */ | |
415 | (node << 4) /* dest node */ | | 364 | (node << 4) /* dest node */ | |
diff --git a/arch/powerpc/platforms/cell/interrupt.h b/arch/powerpc/platforms/cell/interrupt.h index 942dc39d6045..4f60ae6ca358 100644 --- a/arch/powerpc/platforms/cell/interrupt.h +++ b/arch/powerpc/platforms/cell/interrupt.h | |||
@@ -75,7 +75,7 @@ enum { | |||
75 | }; | 75 | }; |
76 | 76 | ||
77 | extern void iic_init_IRQ(void); | 77 | extern void iic_init_IRQ(void); |
78 | extern void iic_cause_IPI(int cpu, int mesg); | 78 | extern void iic_message_pass(int cpu, int msg); |
79 | extern void iic_request_IPIs(void); | 79 | extern void iic_request_IPIs(void); |
80 | extern void iic_setup_cpu(void); | 80 | extern void iic_setup_cpu(void); |
81 | 81 | ||
diff --git a/arch/powerpc/platforms/cell/io-workarounds.c b/arch/powerpc/platforms/cell/io-workarounds.c deleted file mode 100644 index 5c1118e31940..000000000000 --- a/arch/powerpc/platforms/cell/io-workarounds.c +++ /dev/null | |||
@@ -1,185 +0,0 @@ | |||
1 | /* | ||
2 | * Support PCI IO workaround | ||
3 | * | ||
4 | * Copyright (C) 2006 Benjamin Herrenschmidt <benh@kernel.crashing.org> | ||
5 | * IBM, Corp. | ||
6 | * (C) Copyright 2007-2008 TOSHIBA CORPORATION | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License version 2 as | ||
10 | * published by the Free Software Foundation. | ||
11 | */ | ||
12 | #undef DEBUG | ||
13 | |||
14 | #include <linux/kernel.h> | ||
15 | |||
16 | #include <asm/io.h> | ||
17 | #include <asm/machdep.h> | ||
18 | #include <asm/pgtable.h> | ||
19 | #include <asm/ppc-pci.h> | ||
20 | |||
21 | #include "io-workarounds.h" | ||
22 | |||
23 | #define IOWA_MAX_BUS 8 | ||
24 | |||
25 | static struct iowa_bus iowa_busses[IOWA_MAX_BUS]; | ||
26 | static unsigned int iowa_bus_count; | ||
27 | |||
28 | static struct iowa_bus *iowa_pci_find(unsigned long vaddr, unsigned long paddr) | ||
29 | { | ||
30 | int i, j; | ||
31 | struct resource *res; | ||
32 | unsigned long vstart, vend; | ||
33 | |||
34 | for (i = 0; i < iowa_bus_count; i++) { | ||
35 | struct iowa_bus *bus = &iowa_busses[i]; | ||
36 | struct pci_controller *phb = bus->phb; | ||
37 | |||
38 | if (vaddr) { | ||
39 | vstart = (unsigned long)phb->io_base_virt; | ||
40 | vend = vstart + phb->pci_io_size - 1; | ||
41 | if ((vaddr >= vstart) && (vaddr <= vend)) | ||
42 | return bus; | ||
43 | } | ||
44 | |||
45 | if (paddr) | ||
46 | for (j = 0; j < 3; j++) { | ||
47 | res = &phb->mem_resources[j]; | ||
48 | if (paddr >= res->start && paddr <= res->end) | ||
49 | return bus; | ||
50 | } | ||
51 | } | ||
52 | |||
53 | return NULL; | ||
54 | } | ||
55 | |||
56 | struct iowa_bus *iowa_mem_find_bus(const PCI_IO_ADDR addr) | ||
57 | { | ||
58 | struct iowa_bus *bus; | ||
59 | int token; | ||
60 | |||
61 | token = PCI_GET_ADDR_TOKEN(addr); | ||
62 | |||
63 | if (token && token <= iowa_bus_count) | ||
64 | bus = &iowa_busses[token - 1]; | ||
65 | else { | ||
66 | unsigned long vaddr, paddr; | ||
67 | pte_t *ptep; | ||
68 | |||
69 | vaddr = (unsigned long)PCI_FIX_ADDR(addr); | ||
70 | if (vaddr < PHB_IO_BASE || vaddr >= PHB_IO_END) | ||
71 | return NULL; | ||
72 | |||
73 | ptep = find_linux_pte(init_mm.pgd, vaddr); | ||
74 | if (ptep == NULL) | ||
75 | paddr = 0; | ||
76 | else | ||
77 | paddr = pte_pfn(*ptep) << PAGE_SHIFT; | ||
78 | bus = iowa_pci_find(vaddr, paddr); | ||
79 | |||
80 | if (bus == NULL) | ||
81 | return NULL; | ||
82 | } | ||
83 | |||
84 | return bus; | ||
85 | } | ||
86 | |||
87 | struct iowa_bus *iowa_pio_find_bus(unsigned long port) | ||
88 | { | ||
89 | unsigned long vaddr = (unsigned long)pci_io_base + port; | ||
90 | return iowa_pci_find(vaddr, 0); | ||
91 | } | ||
92 | |||
93 | |||
94 | #define DEF_PCI_AC_RET(name, ret, at, al, space, aa) \ | ||
95 | static ret iowa_##name at \ | ||
96 | { \ | ||
97 | struct iowa_bus *bus; \ | ||
98 | bus = iowa_##space##_find_bus(aa); \ | ||
99 | if (bus && bus->ops && bus->ops->name) \ | ||
100 | return bus->ops->name al; \ | ||
101 | return __do_##name al; \ | ||
102 | } | ||
103 | |||
104 | #define DEF_PCI_AC_NORET(name, at, al, space, aa) \ | ||
105 | static void iowa_##name at \ | ||
106 | { \ | ||
107 | struct iowa_bus *bus; \ | ||
108 | bus = iowa_##space##_find_bus(aa); \ | ||
109 | if (bus && bus->ops && bus->ops->name) { \ | ||
110 | bus->ops->name al; \ | ||
111 | return; \ | ||
112 | } \ | ||
113 | __do_##name al; \ | ||
114 | } | ||
115 | |||
116 | #include <asm/io-defs.h> | ||
117 | |||
118 | #undef DEF_PCI_AC_RET | ||
119 | #undef DEF_PCI_AC_NORET | ||
120 | |||
121 | static const struct ppc_pci_io __devinitconst iowa_pci_io = { | ||
122 | |||
123 | #define DEF_PCI_AC_RET(name, ret, at, al, space, aa) .name = iowa_##name, | ||
124 | #define DEF_PCI_AC_NORET(name, at, al, space, aa) .name = iowa_##name, | ||
125 | |||
126 | #include <asm/io-defs.h> | ||
127 | |||
128 | #undef DEF_PCI_AC_RET | ||
129 | #undef DEF_PCI_AC_NORET | ||
130 | |||
131 | }; | ||
132 | |||
133 | static void __iomem *iowa_ioremap(phys_addr_t addr, unsigned long size, | ||
134 | unsigned long flags, void *caller) | ||
135 | { | ||
136 | struct iowa_bus *bus; | ||
137 | void __iomem *res = __ioremap_caller(addr, size, flags, caller); | ||
138 | int busno; | ||
139 | |||
140 | bus = iowa_pci_find(0, (unsigned long)addr); | ||
141 | if (bus != NULL) { | ||
142 | busno = bus - iowa_busses; | ||
143 | PCI_SET_ADDR_TOKEN(res, busno + 1); | ||
144 | } | ||
145 | return res; | ||
146 | } | ||
147 | |||
148 | /* Regist new bus to support workaround */ | ||
149 | void __devinit iowa_register_bus(struct pci_controller *phb, | ||
150 | struct ppc_pci_io *ops, | ||
151 | int (*initfunc)(struct iowa_bus *, void *), void *data) | ||
152 | { | ||
153 | struct iowa_bus *bus; | ||
154 | struct device_node *np = phb->dn; | ||
155 | |||
156 | if (iowa_bus_count >= IOWA_MAX_BUS) { | ||
157 | pr_err("IOWA:Too many pci bridges, " | ||
158 | "workarounds disabled for %s\n", np->full_name); | ||
159 | return; | ||
160 | } | ||
161 | |||
162 | bus = &iowa_busses[iowa_bus_count]; | ||
163 | bus->phb = phb; | ||
164 | bus->ops = ops; | ||
165 | |||
166 | if (initfunc) | ||
167 | if ((*initfunc)(bus, data)) | ||
168 | return; | ||
169 | |||
170 | iowa_bus_count++; | ||
171 | |||
172 | pr_debug("IOWA:[%d]Add bus, %s.\n", iowa_bus_count-1, np->full_name); | ||
173 | } | ||
174 | |||
175 | /* enable IO workaround */ | ||
176 | void __devinit io_workaround_init(void) | ||
177 | { | ||
178 | static int io_workaround_inited; | ||
179 | |||
180 | if (io_workaround_inited) | ||
181 | return; | ||
182 | ppc_pci_io = iowa_pci_io; | ||
183 | ppc_md.ioremap = iowa_ioremap; | ||
184 | io_workaround_inited = 1; | ||
185 | } | ||
diff --git a/arch/powerpc/platforms/cell/io-workarounds.h b/arch/powerpc/platforms/cell/io-workarounds.h deleted file mode 100644 index 6efc7782ebf2..000000000000 --- a/arch/powerpc/platforms/cell/io-workarounds.h +++ /dev/null | |||
@@ -1,49 +0,0 @@ | |||
1 | /* | ||
2 | * Support PCI IO workaround | ||
3 | * | ||
4 | * (C) Copyright 2007-2008 TOSHIBA CORPORATION | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; either version 2 of the License, or | ||
9 | * (at your option) any later version. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License along | ||
17 | * with this program; if not, write to the Free Software Foundation, Inc., | ||
18 | * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||
19 | */ | ||
20 | |||
21 | #ifndef _IO_WORKAROUNDS_H | ||
22 | #define _IO_WORKAROUNDS_H | ||
23 | |||
24 | #include <linux/io.h> | ||
25 | #include <asm/pci-bridge.h> | ||
26 | |||
27 | /* Bus info */ | ||
28 | struct iowa_bus { | ||
29 | struct pci_controller *phb; | ||
30 | struct ppc_pci_io *ops; | ||
31 | void *private; | ||
32 | }; | ||
33 | |||
34 | void __devinit io_workaround_init(void); | ||
35 | void __devinit iowa_register_bus(struct pci_controller *, struct ppc_pci_io *, | ||
36 | int (*)(struct iowa_bus *, void *), void *); | ||
37 | struct iowa_bus *iowa_mem_find_bus(const PCI_IO_ADDR); | ||
38 | struct iowa_bus *iowa_pio_find_bus(unsigned long); | ||
39 | |||
40 | extern struct ppc_pci_io spiderpci_ops; | ||
41 | extern int spiderpci_iowa_init(struct iowa_bus *, void *); | ||
42 | |||
43 | #define SPIDER_PCI_REG_BASE 0xd000 | ||
44 | #define SPIDER_PCI_REG_SIZE 0x1000 | ||
45 | #define SPIDER_PCI_VCI_CNTL_STAT 0x0110 | ||
46 | #define SPIDER_PCI_DUMMY_READ 0x0810 | ||
47 | #define SPIDER_PCI_DUMMY_READ_BASE 0x0814 | ||
48 | |||
49 | #endif /* _IO_WORKAROUNDS_H */ | ||
diff --git a/arch/powerpc/platforms/cell/qpace_setup.c b/arch/powerpc/platforms/cell/qpace_setup.c index 1b5749042756..51e290126bc1 100644 --- a/arch/powerpc/platforms/cell/qpace_setup.c +++ b/arch/powerpc/platforms/cell/qpace_setup.c | |||
@@ -42,7 +42,6 @@ | |||
42 | #include "interrupt.h" | 42 | #include "interrupt.h" |
43 | #include "pervasive.h" | 43 | #include "pervasive.h" |
44 | #include "ras.h" | 44 | #include "ras.h" |
45 | #include "io-workarounds.h" | ||
46 | 45 | ||
47 | static void qpace_show_cpuinfo(struct seq_file *m) | 46 | static void qpace_show_cpuinfo(struct seq_file *m) |
48 | { | 47 | { |
@@ -145,9 +144,4 @@ define_machine(qpace) { | |||
145 | .calibrate_decr = generic_calibrate_decr, | 144 | .calibrate_decr = generic_calibrate_decr, |
146 | .progress = qpace_progress, | 145 | .progress = qpace_progress, |
147 | .init_IRQ = iic_init_IRQ, | 146 | .init_IRQ = iic_init_IRQ, |
148 | #ifdef CONFIG_KEXEC | ||
149 | .machine_kexec = default_machine_kexec, | ||
150 | .machine_kexec_prepare = default_machine_kexec_prepare, | ||
151 | .machine_crash_shutdown = default_machine_crash_shutdown, | ||
152 | #endif | ||
153 | }; | 147 | }; |
diff --git a/arch/powerpc/platforms/cell/ras.c b/arch/powerpc/platforms/cell/ras.c index 1d3c4effea10..5ec1e47a0d77 100644 --- a/arch/powerpc/platforms/cell/ras.c +++ b/arch/powerpc/platforms/cell/ras.c | |||
@@ -173,8 +173,10 @@ static int __init cbe_ptcal_enable(void) | |||
173 | return -ENODEV; | 173 | return -ENODEV; |
174 | 174 | ||
175 | size = of_get_property(np, "ibm,cbe-ptcal-size", NULL); | 175 | size = of_get_property(np, "ibm,cbe-ptcal-size", NULL); |
176 | if (!size) | 176 | if (!size) { |
177 | of_node_put(np); | ||
177 | return -ENODEV; | 178 | return -ENODEV; |
179 | } | ||
178 | 180 | ||
179 | pr_debug("%s: enabling PTCAL, size = 0x%x\n", __func__, *size); | 181 | pr_debug("%s: enabling PTCAL, size = 0x%x\n", __func__, *size); |
180 | order = get_order(*size); | 182 | order = get_order(*size); |
diff --git a/arch/powerpc/platforms/cell/setup.c b/arch/powerpc/platforms/cell/setup.c index 691995761b3d..c73cf4c43fc2 100644 --- a/arch/powerpc/platforms/cell/setup.c +++ b/arch/powerpc/platforms/cell/setup.c | |||
@@ -51,11 +51,11 @@ | |||
51 | #include <asm/udbg.h> | 51 | #include <asm/udbg.h> |
52 | #include <asm/mpic.h> | 52 | #include <asm/mpic.h> |
53 | #include <asm/cell-regs.h> | 53 | #include <asm/cell-regs.h> |
54 | #include <asm/io-workarounds.h> | ||
54 | 55 | ||
55 | #include "interrupt.h" | 56 | #include "interrupt.h" |
56 | #include "pervasive.h" | 57 | #include "pervasive.h" |
57 | #include "ras.h" | 58 | #include "ras.h" |
58 | #include "io-workarounds.h" | ||
59 | 59 | ||
60 | #ifdef DEBUG | 60 | #ifdef DEBUG |
61 | #define DBG(fmt...) udbg_printf(fmt) | 61 | #define DBG(fmt...) udbg_printf(fmt) |
@@ -136,8 +136,6 @@ static int __devinit cell_setup_phb(struct pci_controller *phb) | |||
136 | 136 | ||
137 | iowa_register_bus(phb, &spiderpci_ops, &spiderpci_iowa_init, | 137 | iowa_register_bus(phb, &spiderpci_ops, &spiderpci_iowa_init, |
138 | (void *)SPIDER_PCI_REG_BASE); | 138 | (void *)SPIDER_PCI_REG_BASE); |
139 | io_workaround_init(); | ||
140 | |||
141 | return 0; | 139 | return 0; |
142 | } | 140 | } |
143 | 141 | ||
@@ -187,13 +185,15 @@ machine_subsys_initcall(cell, cell_publish_devices); | |||
187 | 185 | ||
188 | static void cell_mpic_cascade(unsigned int irq, struct irq_desc *desc) | 186 | static void cell_mpic_cascade(unsigned int irq, struct irq_desc *desc) |
189 | { | 187 | { |
190 | struct mpic *mpic = desc->handler_data; | 188 | struct irq_chip *chip = irq_desc_get_chip(desc); |
189 | struct mpic *mpic = irq_desc_get_handler_data(desc); | ||
191 | unsigned int virq; | 190 | unsigned int virq; |
192 | 191 | ||
193 | virq = mpic_get_one_irq(mpic); | 192 | virq = mpic_get_one_irq(mpic); |
194 | if (virq != NO_IRQ) | 193 | if (virq != NO_IRQ) |
195 | generic_handle_irq(virq); | 194 | generic_handle_irq(virq); |
196 | desc->chip->eoi(irq); | 195 | |
196 | chip->irq_eoi(&desc->irq_data); | ||
197 | } | 197 | } |
198 | 198 | ||
199 | static void __init mpic_init_IRQ(void) | 199 | static void __init mpic_init_IRQ(void) |
@@ -221,8 +221,8 @@ static void __init mpic_init_IRQ(void) | |||
221 | 221 | ||
222 | printk(KERN_INFO "%s : hooking up to IRQ %d\n", | 222 | printk(KERN_INFO "%s : hooking up to IRQ %d\n", |
223 | dn->full_name, virq); | 223 | dn->full_name, virq); |
224 | set_irq_data(virq, mpic); | 224 | irq_set_handler_data(virq, mpic); |
225 | set_irq_chained_handler(virq, cell_mpic_cascade); | 225 | irq_set_chained_handler(virq, cell_mpic_cascade); |
226 | } | 226 | } |
227 | } | 227 | } |
228 | 228 | ||
diff --git a/arch/powerpc/platforms/cell/smp.c b/arch/powerpc/platforms/cell/smp.c index f774530075b7..dbb641ea90dd 100644 --- a/arch/powerpc/platforms/cell/smp.c +++ b/arch/powerpc/platforms/cell/smp.c | |||
@@ -77,7 +77,7 @@ static inline int __devinit smp_startup_cpu(unsigned int lcpu) | |||
77 | unsigned int pcpu; | 77 | unsigned int pcpu; |
78 | int start_cpu; | 78 | int start_cpu; |
79 | 79 | ||
80 | if (cpu_isset(lcpu, of_spin_map)) | 80 | if (cpumask_test_cpu(lcpu, &of_spin_map)) |
81 | /* Already started by OF and sitting in spin loop */ | 81 | /* Already started by OF and sitting in spin loop */ |
82 | return 1; | 82 | return 1; |
83 | 83 | ||
@@ -103,27 +103,11 @@ static inline int __devinit smp_startup_cpu(unsigned int lcpu) | |||
103 | return 1; | 103 | return 1; |
104 | } | 104 | } |
105 | 105 | ||
106 | static void smp_iic_message_pass(int target, int msg) | ||
107 | { | ||
108 | unsigned int i; | ||
109 | |||
110 | if (target < NR_CPUS) { | ||
111 | iic_cause_IPI(target, msg); | ||
112 | } else { | ||
113 | for_each_online_cpu(i) { | ||
114 | if (target == MSG_ALL_BUT_SELF | ||
115 | && i == smp_processor_id()) | ||
116 | continue; | ||
117 | iic_cause_IPI(i, msg); | ||
118 | } | ||
119 | } | ||
120 | } | ||
121 | |||
122 | static int __init smp_iic_probe(void) | 106 | static int __init smp_iic_probe(void) |
123 | { | 107 | { |
124 | iic_request_IPIs(); | 108 | iic_request_IPIs(); |
125 | 109 | ||
126 | return cpus_weight(cpu_possible_map); | 110 | return cpumask_weight(cpu_possible_mask); |
127 | } | 111 | } |
128 | 112 | ||
129 | static void __devinit smp_cell_setup_cpu(int cpu) | 113 | static void __devinit smp_cell_setup_cpu(int cpu) |
@@ -137,12 +121,12 @@ static void __devinit smp_cell_setup_cpu(int cpu) | |||
137 | mtspr(SPRN_DABRX, DABRX_KERNEL | DABRX_USER); | 121 | mtspr(SPRN_DABRX, DABRX_KERNEL | DABRX_USER); |
138 | } | 122 | } |
139 | 123 | ||
140 | static void __devinit smp_cell_kick_cpu(int nr) | 124 | static int __devinit smp_cell_kick_cpu(int nr) |
141 | { | 125 | { |
142 | BUG_ON(nr < 0 || nr >= NR_CPUS); | 126 | BUG_ON(nr < 0 || nr >= NR_CPUS); |
143 | 127 | ||
144 | if (!smp_startup_cpu(nr)) | 128 | if (!smp_startup_cpu(nr)) |
145 | return; | 129 | return -ENOENT; |
146 | 130 | ||
147 | /* | 131 | /* |
148 | * The processor is currently spinning, waiting for the | 132 | * The processor is currently spinning, waiting for the |
@@ -150,6 +134,8 @@ static void __devinit smp_cell_kick_cpu(int nr) | |||
150 | * the processor will continue on to secondary_start | 134 | * the processor will continue on to secondary_start |
151 | */ | 135 | */ |
152 | paca[nr].cpu_start = 1; | 136 | paca[nr].cpu_start = 1; |
137 | |||
138 | return 0; | ||
153 | } | 139 | } |
154 | 140 | ||
155 | static int smp_cell_cpu_bootable(unsigned int nr) | 141 | static int smp_cell_cpu_bootable(unsigned int nr) |
@@ -166,7 +152,7 @@ static int smp_cell_cpu_bootable(unsigned int nr) | |||
166 | return 1; | 152 | return 1; |
167 | } | 153 | } |
168 | static struct smp_ops_t bpa_iic_smp_ops = { | 154 | static struct smp_ops_t bpa_iic_smp_ops = { |
169 | .message_pass = smp_iic_message_pass, | 155 | .message_pass = iic_message_pass, |
170 | .probe = smp_iic_probe, | 156 | .probe = smp_iic_probe, |
171 | .kick_cpu = smp_cell_kick_cpu, | 157 | .kick_cpu = smp_cell_kick_cpu, |
172 | .setup_cpu = smp_cell_setup_cpu, | 158 | .setup_cpu = smp_cell_setup_cpu, |
@@ -186,13 +172,12 @@ void __init smp_init_cell(void) | |||
186 | if (cpu_has_feature(CPU_FTR_SMT)) { | 172 | if (cpu_has_feature(CPU_FTR_SMT)) { |
187 | for_each_present_cpu(i) { | 173 | for_each_present_cpu(i) { |
188 | if (cpu_thread_in_core(i) == 0) | 174 | if (cpu_thread_in_core(i) == 0) |
189 | cpu_set(i, of_spin_map); | 175 | cpumask_set_cpu(i, &of_spin_map); |
190 | } | 176 | } |
191 | } else { | 177 | } else |
192 | of_spin_map = cpu_present_map; | 178 | cpumask_copy(&of_spin_map, cpu_present_mask); |
193 | } | ||
194 | 179 | ||
195 | cpu_clear(boot_cpuid, of_spin_map); | 180 | cpumask_clear_cpu(boot_cpuid, &of_spin_map); |
196 | 181 | ||
197 | /* Non-lpar has additional take/give timebase */ | 182 | /* Non-lpar has additional take/give timebase */ |
198 | if (rtas_token("freeze-time-base") != RTAS_UNKNOWN_SERVICE) { | 183 | if (rtas_token("freeze-time-base") != RTAS_UNKNOWN_SERVICE) { |
diff --git a/arch/powerpc/platforms/cell/spider-pci.c b/arch/powerpc/platforms/cell/spider-pci.c index ca7731c0b595..f1f7878893f3 100644 --- a/arch/powerpc/platforms/cell/spider-pci.c +++ b/arch/powerpc/platforms/cell/spider-pci.c | |||
@@ -27,8 +27,7 @@ | |||
27 | 27 | ||
28 | #include <asm/ppc-pci.h> | 28 | #include <asm/ppc-pci.h> |
29 | #include <asm/pci-bridge.h> | 29 | #include <asm/pci-bridge.h> |
30 | 30 | #include <asm/io-workarounds.h> | |
31 | #include "io-workarounds.h" | ||
32 | 31 | ||
33 | #define SPIDER_PCI_DISABLE_PREFETCH | 32 | #define SPIDER_PCI_DISABLE_PREFETCH |
34 | 33 | ||
diff --git a/arch/powerpc/platforms/cell/spider-pic.c b/arch/powerpc/platforms/cell/spider-pic.c index 5876e888e412..442c28c00f88 100644 --- a/arch/powerpc/platforms/cell/spider-pic.c +++ b/arch/powerpc/platforms/cell/spider-pic.c | |||
@@ -68,9 +68,9 @@ struct spider_pic { | |||
68 | }; | 68 | }; |
69 | static struct spider_pic spider_pics[SPIDER_CHIP_COUNT]; | 69 | static struct spider_pic spider_pics[SPIDER_CHIP_COUNT]; |
70 | 70 | ||
71 | static struct spider_pic *spider_virq_to_pic(unsigned int virq) | 71 | static struct spider_pic *spider_irq_data_to_pic(struct irq_data *d) |
72 | { | 72 | { |
73 | return irq_map[virq].host->host_data; | 73 | return irq_data_get_irq_chip_data(d); |
74 | } | 74 | } |
75 | 75 | ||
76 | static void __iomem *spider_get_irq_config(struct spider_pic *pic, | 76 | static void __iomem *spider_get_irq_config(struct spider_pic *pic, |
@@ -79,30 +79,30 @@ static void __iomem *spider_get_irq_config(struct spider_pic *pic, | |||
79 | return pic->regs + TIR_CFGA + 8 * src; | 79 | return pic->regs + TIR_CFGA + 8 * src; |
80 | } | 80 | } |
81 | 81 | ||
82 | static void spider_unmask_irq(unsigned int virq) | 82 | static void spider_unmask_irq(struct irq_data *d) |
83 | { | 83 | { |
84 | struct spider_pic *pic = spider_virq_to_pic(virq); | 84 | struct spider_pic *pic = spider_irq_data_to_pic(d); |
85 | void __iomem *cfg = spider_get_irq_config(pic, irq_map[virq].hwirq); | 85 | void __iomem *cfg = spider_get_irq_config(pic, irqd_to_hwirq(d)); |
86 | 86 | ||
87 | out_be32(cfg, in_be32(cfg) | 0x30000000u); | 87 | out_be32(cfg, in_be32(cfg) | 0x30000000u); |
88 | } | 88 | } |
89 | 89 | ||
90 | static void spider_mask_irq(unsigned int virq) | 90 | static void spider_mask_irq(struct irq_data *d) |
91 | { | 91 | { |
92 | struct spider_pic *pic = spider_virq_to_pic(virq); | 92 | struct spider_pic *pic = spider_irq_data_to_pic(d); |
93 | void __iomem *cfg = spider_get_irq_config(pic, irq_map[virq].hwirq); | 93 | void __iomem *cfg = spider_get_irq_config(pic, irqd_to_hwirq(d)); |
94 | 94 | ||
95 | out_be32(cfg, in_be32(cfg) & ~0x30000000u); | 95 | out_be32(cfg, in_be32(cfg) & ~0x30000000u); |
96 | } | 96 | } |
97 | 97 | ||
98 | static void spider_ack_irq(unsigned int virq) | 98 | static void spider_ack_irq(struct irq_data *d) |
99 | { | 99 | { |
100 | struct spider_pic *pic = spider_virq_to_pic(virq); | 100 | struct spider_pic *pic = spider_irq_data_to_pic(d); |
101 | unsigned int src = irq_map[virq].hwirq; | 101 | unsigned int src = irqd_to_hwirq(d); |
102 | 102 | ||
103 | /* Reset edge detection logic if necessary | 103 | /* Reset edge detection logic if necessary |
104 | */ | 104 | */ |
105 | if (irq_to_desc(virq)->status & IRQ_LEVEL) | 105 | if (irqd_is_level_type(d)) |
106 | return; | 106 | return; |
107 | 107 | ||
108 | /* Only interrupts 47 to 50 can be set to edge */ | 108 | /* Only interrupts 47 to 50 can be set to edge */ |
@@ -113,13 +113,12 @@ static void spider_ack_irq(unsigned int virq) | |||
113 | out_be32(pic->regs + TIR_EDC, 0x100 | (src & 0xf)); | 113 | out_be32(pic->regs + TIR_EDC, 0x100 | (src & 0xf)); |
114 | } | 114 | } |
115 | 115 | ||
116 | static int spider_set_irq_type(unsigned int virq, unsigned int type) | 116 | static int spider_set_irq_type(struct irq_data *d, unsigned int type) |
117 | { | 117 | { |
118 | unsigned int sense = type & IRQ_TYPE_SENSE_MASK; | 118 | unsigned int sense = type & IRQ_TYPE_SENSE_MASK; |
119 | struct spider_pic *pic = spider_virq_to_pic(virq); | 119 | struct spider_pic *pic = spider_irq_data_to_pic(d); |
120 | unsigned int hw = irq_map[virq].hwirq; | 120 | unsigned int hw = irqd_to_hwirq(d); |
121 | void __iomem *cfg = spider_get_irq_config(pic, hw); | 121 | void __iomem *cfg = spider_get_irq_config(pic, hw); |
122 | struct irq_desc *desc = irq_to_desc(virq); | ||
123 | u32 old_mask; | 122 | u32 old_mask; |
124 | u32 ic; | 123 | u32 ic; |
125 | 124 | ||
@@ -147,12 +146,6 @@ static int spider_set_irq_type(unsigned int virq, unsigned int type) | |||
147 | return -EINVAL; | 146 | return -EINVAL; |
148 | } | 147 | } |
149 | 148 | ||
150 | /* Update irq_desc */ | ||
151 | desc->status &= ~(IRQ_TYPE_SENSE_MASK | IRQ_LEVEL); | ||
152 | desc->status |= type & IRQ_TYPE_SENSE_MASK; | ||
153 | if (type & (IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW)) | ||
154 | desc->status |= IRQ_LEVEL; | ||
155 | |||
156 | /* Configure the source. One gross hack that was there before and | 149 | /* Configure the source. One gross hack that was there before and |
157 | * that I've kept around is the priority to the BE which I set to | 150 | * that I've kept around is the priority to the BE which I set to |
158 | * be the same as the interrupt source number. I don't know wether | 151 | * be the same as the interrupt source number. I don't know wether |
@@ -169,19 +162,20 @@ static int spider_set_irq_type(unsigned int virq, unsigned int type) | |||
169 | 162 | ||
170 | static struct irq_chip spider_pic = { | 163 | static struct irq_chip spider_pic = { |
171 | .name = "SPIDER", | 164 | .name = "SPIDER", |
172 | .unmask = spider_unmask_irq, | 165 | .irq_unmask = spider_unmask_irq, |
173 | .mask = spider_mask_irq, | 166 | .irq_mask = spider_mask_irq, |
174 | .ack = spider_ack_irq, | 167 | .irq_ack = spider_ack_irq, |
175 | .set_type = spider_set_irq_type, | 168 | .irq_set_type = spider_set_irq_type, |
176 | }; | 169 | }; |
177 | 170 | ||
178 | static int spider_host_map(struct irq_host *h, unsigned int virq, | 171 | static int spider_host_map(struct irq_host *h, unsigned int virq, |
179 | irq_hw_number_t hw) | 172 | irq_hw_number_t hw) |
180 | { | 173 | { |
181 | set_irq_chip_and_handler(virq, &spider_pic, handle_level_irq); | 174 | irq_set_chip_data(virq, h->host_data); |
175 | irq_set_chip_and_handler(virq, &spider_pic, handle_level_irq); | ||
182 | 176 | ||
183 | /* Set default irq type */ | 177 | /* Set default irq type */ |
184 | set_irq_type(virq, IRQ_TYPE_NONE); | 178 | irq_set_irq_type(virq, IRQ_TYPE_NONE); |
185 | 179 | ||
186 | return 0; | 180 | return 0; |
187 | } | 181 | } |
@@ -207,7 +201,8 @@ static struct irq_host_ops spider_host_ops = { | |||
207 | 201 | ||
208 | static void spider_irq_cascade(unsigned int irq, struct irq_desc *desc) | 202 | static void spider_irq_cascade(unsigned int irq, struct irq_desc *desc) |
209 | { | 203 | { |
210 | struct spider_pic *pic = desc->handler_data; | 204 | struct irq_chip *chip = irq_desc_get_chip(desc); |
205 | struct spider_pic *pic = irq_desc_get_handler_data(desc); | ||
211 | unsigned int cs, virq; | 206 | unsigned int cs, virq; |
212 | 207 | ||
213 | cs = in_be32(pic->regs + TIR_CS) >> 24; | 208 | cs = in_be32(pic->regs + TIR_CS) >> 24; |
@@ -215,9 +210,11 @@ static void spider_irq_cascade(unsigned int irq, struct irq_desc *desc) | |||
215 | virq = NO_IRQ; | 210 | virq = NO_IRQ; |
216 | else | 211 | else |
217 | virq = irq_linear_revmap(pic->host, cs); | 212 | virq = irq_linear_revmap(pic->host, cs); |
213 | |||
218 | if (virq != NO_IRQ) | 214 | if (virq != NO_IRQ) |
219 | generic_handle_irq(virq); | 215 | generic_handle_irq(virq); |
220 | desc->chip->eoi(irq); | 216 | |
217 | chip->irq_eoi(&desc->irq_data); | ||
221 | } | 218 | } |
222 | 219 | ||
223 | /* For hooking up the cascace we have a problem. Our device-tree is | 220 | /* For hooking up the cascace we have a problem. Our device-tree is |
@@ -258,8 +255,10 @@ static unsigned int __init spider_find_cascade_and_node(struct spider_pic *pic) | |||
258 | return NO_IRQ; | 255 | return NO_IRQ; |
259 | imap += intsize + 1; | 256 | imap += intsize + 1; |
260 | tmp = of_get_property(iic, "#interrupt-cells", NULL); | 257 | tmp = of_get_property(iic, "#interrupt-cells", NULL); |
261 | if (tmp == NULL) | 258 | if (tmp == NULL) { |
259 | of_node_put(iic); | ||
262 | return NO_IRQ; | 260 | return NO_IRQ; |
261 | } | ||
263 | intsize = *tmp; | 262 | intsize = *tmp; |
264 | /* Assume unit is last entry of interrupt specifier */ | 263 | /* Assume unit is last entry of interrupt specifier */ |
265 | unit = imap[intsize - 1]; | 264 | unit = imap[intsize - 1]; |
@@ -323,8 +322,8 @@ static void __init spider_init_one(struct device_node *of_node, int chip, | |||
323 | virq = spider_find_cascade_and_node(pic); | 322 | virq = spider_find_cascade_and_node(pic); |
324 | if (virq == NO_IRQ) | 323 | if (virq == NO_IRQ) |
325 | return; | 324 | return; |
326 | set_irq_data(virq, pic); | 325 | irq_set_handler_data(virq, pic); |
327 | set_irq_chained_handler(virq, spider_irq_cascade); | 326 | irq_set_chained_handler(virq, spider_irq_cascade); |
328 | 327 | ||
329 | printk(KERN_INFO "spider_pic: node %d, addr: 0x%lx %s\n", | 328 | printk(KERN_INFO "spider_pic: node %d, addr: 0x%lx %s\n", |
330 | pic->node_id, addr, of_node->full_name); | 329 | pic->node_id, addr, of_node->full_name); |
diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c index 8547e86bfb42..3675da73623f 100644 --- a/arch/powerpc/platforms/cell/spu_base.c +++ b/arch/powerpc/platforms/cell/spu_base.c | |||
@@ -32,11 +32,13 @@ | |||
32 | #include <linux/io.h> | 32 | #include <linux/io.h> |
33 | #include <linux/mutex.h> | 33 | #include <linux/mutex.h> |
34 | #include <linux/linux_logo.h> | 34 | #include <linux/linux_logo.h> |
35 | #include <linux/syscore_ops.h> | ||
35 | #include <asm/spu.h> | 36 | #include <asm/spu.h> |
36 | #include <asm/spu_priv1.h> | 37 | #include <asm/spu_priv1.h> |
37 | #include <asm/spu_csa.h> | 38 | #include <asm/spu_csa.h> |
38 | #include <asm/xmon.h> | 39 | #include <asm/xmon.h> |
39 | #include <asm/prom.h> | 40 | #include <asm/prom.h> |
41 | #include <asm/kexec.h> | ||
40 | 42 | ||
41 | const struct spu_management_ops *spu_management_ops; | 43 | const struct spu_management_ops *spu_management_ops; |
42 | EXPORT_SYMBOL_GPL(spu_management_ops); | 44 | EXPORT_SYMBOL_GPL(spu_management_ops); |
@@ -520,18 +522,8 @@ void spu_init_channels(struct spu *spu) | |||
520 | } | 522 | } |
521 | EXPORT_SYMBOL_GPL(spu_init_channels); | 523 | EXPORT_SYMBOL_GPL(spu_init_channels); |
522 | 524 | ||
523 | static int spu_shutdown(struct sys_device *sysdev) | ||
524 | { | ||
525 | struct spu *spu = container_of(sysdev, struct spu, sysdev); | ||
526 | |||
527 | spu_free_irqs(spu); | ||
528 | spu_destroy_spu(spu); | ||
529 | return 0; | ||
530 | } | ||
531 | |||
532 | static struct sysdev_class spu_sysdev_class = { | 525 | static struct sysdev_class spu_sysdev_class = { |
533 | .name = "spu", | 526 | .name = "spu", |
534 | .shutdown = spu_shutdown, | ||
535 | }; | 527 | }; |
536 | 528 | ||
537 | int spu_add_sysdev_attr(struct sysdev_attribute *attr) | 529 | int spu_add_sysdev_attr(struct sysdev_attribute *attr) |
@@ -727,6 +719,91 @@ static ssize_t spu_stat_show(struct sys_device *sysdev, | |||
727 | 719 | ||
728 | static SYSDEV_ATTR(stat, 0644, spu_stat_show, NULL); | 720 | static SYSDEV_ATTR(stat, 0644, spu_stat_show, NULL); |
729 | 721 | ||
722 | #ifdef CONFIG_KEXEC | ||
723 | |||
724 | struct crash_spu_info { | ||
725 | struct spu *spu; | ||
726 | u32 saved_spu_runcntl_RW; | ||
727 | u32 saved_spu_status_R; | ||
728 | u32 saved_spu_npc_RW; | ||
729 | u64 saved_mfc_sr1_RW; | ||
730 | u64 saved_mfc_dar; | ||
731 | u64 saved_mfc_dsisr; | ||
732 | }; | ||
733 | |||
734 | #define CRASH_NUM_SPUS 16 /* Enough for current hardware */ | ||
735 | static struct crash_spu_info crash_spu_info[CRASH_NUM_SPUS]; | ||
736 | |||
737 | static void crash_kexec_stop_spus(void) | ||
738 | { | ||
739 | struct spu *spu; | ||
740 | int i; | ||
741 | u64 tmp; | ||
742 | |||
743 | for (i = 0; i < CRASH_NUM_SPUS; i++) { | ||
744 | if (!crash_spu_info[i].spu) | ||
745 | continue; | ||
746 | |||
747 | spu = crash_spu_info[i].spu; | ||
748 | |||
749 | crash_spu_info[i].saved_spu_runcntl_RW = | ||
750 | in_be32(&spu->problem->spu_runcntl_RW); | ||
751 | crash_spu_info[i].saved_spu_status_R = | ||
752 | in_be32(&spu->problem->spu_status_R); | ||
753 | crash_spu_info[i].saved_spu_npc_RW = | ||
754 | in_be32(&spu->problem->spu_npc_RW); | ||
755 | |||
756 | crash_spu_info[i].saved_mfc_dar = spu_mfc_dar_get(spu); | ||
757 | crash_spu_info[i].saved_mfc_dsisr = spu_mfc_dsisr_get(spu); | ||
758 | tmp = spu_mfc_sr1_get(spu); | ||
759 | crash_spu_info[i].saved_mfc_sr1_RW = tmp; | ||
760 | |||
761 | tmp &= ~MFC_STATE1_MASTER_RUN_CONTROL_MASK; | ||
762 | spu_mfc_sr1_set(spu, tmp); | ||
763 | |||
764 | __delay(200); | ||
765 | } | ||
766 | } | ||
767 | |||
768 | static void crash_register_spus(struct list_head *list) | ||
769 | { | ||
770 | struct spu *spu; | ||
771 | int ret; | ||
772 | |||
773 | list_for_each_entry(spu, list, full_list) { | ||
774 | if (WARN_ON(spu->number >= CRASH_NUM_SPUS)) | ||
775 | continue; | ||
776 | |||
777 | crash_spu_info[spu->number].spu = spu; | ||
778 | } | ||
779 | |||
780 | ret = crash_shutdown_register(&crash_kexec_stop_spus); | ||
781 | if (ret) | ||
782 | printk(KERN_ERR "Could not register SPU crash handler"); | ||
783 | } | ||
784 | |||
785 | #else | ||
786 | static inline void crash_register_spus(struct list_head *list) | ||
787 | { | ||
788 | } | ||
789 | #endif | ||
790 | |||
791 | static void spu_shutdown(void) | ||
792 | { | ||
793 | struct spu *spu; | ||
794 | |||
795 | mutex_lock(&spu_full_list_mutex); | ||
796 | list_for_each_entry(spu, &spu_full_list, full_list) { | ||
797 | spu_free_irqs(spu); | ||
798 | spu_destroy_spu(spu); | ||
799 | } | ||
800 | mutex_unlock(&spu_full_list_mutex); | ||
801 | } | ||
802 | |||
803 | static struct syscore_ops spu_syscore_ops = { | ||
804 | .shutdown = spu_shutdown, | ||
805 | }; | ||
806 | |||
730 | static int __init init_spu_base(void) | 807 | static int __init init_spu_base(void) |
731 | { | 808 | { |
732 | int i, ret = 0; | 809 | int i, ret = 0; |
@@ -760,6 +837,7 @@ static int __init init_spu_base(void) | |||
760 | crash_register_spus(&spu_full_list); | 837 | crash_register_spus(&spu_full_list); |
761 | mutex_unlock(&spu_full_list_mutex); | 838 | mutex_unlock(&spu_full_list_mutex); |
762 | spu_add_sysdev_attr(&attr_stat); | 839 | spu_add_sysdev_attr(&attr_stat); |
840 | register_syscore_ops(&spu_syscore_ops); | ||
763 | 841 | ||
764 | spu_init_affinity(); | 842 | spu_init_affinity(); |
765 | 843 | ||
diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c index 1a40da92154c..3c7c3f82d842 100644 --- a/arch/powerpc/platforms/cell/spufs/file.c +++ b/arch/powerpc/platforms/cell/spufs/file.c | |||
@@ -154,6 +154,7 @@ static const struct file_operations __fops = { \ | |||
154 | .release = spufs_attr_release, \ | 154 | .release = spufs_attr_release, \ |
155 | .read = spufs_attr_read, \ | 155 | .read = spufs_attr_read, \ |
156 | .write = spufs_attr_write, \ | 156 | .write = spufs_attr_write, \ |
157 | .llseek = generic_file_llseek, \ | ||
157 | }; | 158 | }; |
158 | 159 | ||
159 | 160 | ||
@@ -218,24 +219,17 @@ spufs_mem_write(struct file *file, const char __user *buffer, | |||
218 | loff_t pos = *ppos; | 219 | loff_t pos = *ppos; |
219 | int ret; | 220 | int ret; |
220 | 221 | ||
221 | if (pos < 0) | ||
222 | return -EINVAL; | ||
223 | if (pos > LS_SIZE) | 222 | if (pos > LS_SIZE) |
224 | return -EFBIG; | 223 | return -EFBIG; |
225 | if (size > LS_SIZE - pos) | ||
226 | size = LS_SIZE - pos; | ||
227 | 224 | ||
228 | ret = spu_acquire(ctx); | 225 | ret = spu_acquire(ctx); |
229 | if (ret) | 226 | if (ret) |
230 | return ret; | 227 | return ret; |
231 | 228 | ||
232 | local_store = ctx->ops->get_ls(ctx); | 229 | local_store = ctx->ops->get_ls(ctx); |
233 | ret = copy_from_user(local_store + pos, buffer, size); | 230 | size = simple_write_to_buffer(local_store, LS_SIZE, ppos, buffer, size); |
234 | spu_release(ctx); | 231 | spu_release(ctx); |
235 | 232 | ||
236 | if (ret) | ||
237 | return -EFAULT; | ||
238 | *ppos = pos + size; | ||
239 | return size; | 233 | return size; |
240 | } | 234 | } |
241 | 235 | ||
@@ -521,6 +515,7 @@ static const struct file_operations spufs_cntl_fops = { | |||
521 | .release = spufs_cntl_release, | 515 | .release = spufs_cntl_release, |
522 | .read = simple_attr_read, | 516 | .read = simple_attr_read, |
523 | .write = simple_attr_write, | 517 | .write = simple_attr_write, |
518 | .llseek = generic_file_llseek, | ||
524 | .mmap = spufs_cntl_mmap, | 519 | .mmap = spufs_cntl_mmap, |
525 | }; | 520 | }; |
526 | 521 | ||
@@ -572,18 +567,15 @@ spufs_regs_write(struct file *file, const char __user *buffer, | |||
572 | if (*pos >= sizeof(lscsa->gprs)) | 567 | if (*pos >= sizeof(lscsa->gprs)) |
573 | return -EFBIG; | 568 | return -EFBIG; |
574 | 569 | ||
575 | size = min_t(ssize_t, sizeof(lscsa->gprs) - *pos, size); | ||
576 | *pos += size; | ||
577 | |||
578 | ret = spu_acquire_saved(ctx); | 570 | ret = spu_acquire_saved(ctx); |
579 | if (ret) | 571 | if (ret) |
580 | return ret; | 572 | return ret; |
581 | 573 | ||
582 | ret = copy_from_user((char *)lscsa->gprs + *pos - size, | 574 | size = simple_write_to_buffer(lscsa->gprs, sizeof(lscsa->gprs), pos, |
583 | buffer, size) ? -EFAULT : size; | 575 | buffer, size); |
584 | 576 | ||
585 | spu_release_saved(ctx); | 577 | spu_release_saved(ctx); |
586 | return ret; | 578 | return size; |
587 | } | 579 | } |
588 | 580 | ||
589 | static const struct file_operations spufs_regs_fops = { | 581 | static const struct file_operations spufs_regs_fops = { |
@@ -628,18 +620,15 @@ spufs_fpcr_write(struct file *file, const char __user * buffer, | |||
628 | if (*pos >= sizeof(lscsa->fpcr)) | 620 | if (*pos >= sizeof(lscsa->fpcr)) |
629 | return -EFBIG; | 621 | return -EFBIG; |
630 | 622 | ||
631 | size = min_t(ssize_t, sizeof(lscsa->fpcr) - *pos, size); | ||
632 | |||
633 | ret = spu_acquire_saved(ctx); | 623 | ret = spu_acquire_saved(ctx); |
634 | if (ret) | 624 | if (ret) |
635 | return ret; | 625 | return ret; |
636 | 626 | ||
637 | *pos += size; | 627 | size = simple_write_to_buffer(&lscsa->fpcr, sizeof(lscsa->fpcr), pos, |
638 | ret = copy_from_user((char *)&lscsa->fpcr + *pos - size, | 628 | buffer, size); |
639 | buffer, size) ? -EFAULT : size; | ||
640 | 629 | ||
641 | spu_release_saved(ctx); | 630 | spu_release_saved(ctx); |
642 | return ret; | 631 | return size; |
643 | } | 632 | } |
644 | 633 | ||
645 | static const struct file_operations spufs_fpcr_fops = { | 634 | static const struct file_operations spufs_fpcr_fops = { |
@@ -714,6 +703,7 @@ static ssize_t spufs_mbox_read(struct file *file, char __user *buf, | |||
714 | static const struct file_operations spufs_mbox_fops = { | 703 | static const struct file_operations spufs_mbox_fops = { |
715 | .open = spufs_pipe_open, | 704 | .open = spufs_pipe_open, |
716 | .read = spufs_mbox_read, | 705 | .read = spufs_mbox_read, |
706 | .llseek = no_llseek, | ||
717 | }; | 707 | }; |
718 | 708 | ||
719 | static ssize_t spufs_mbox_stat_read(struct file *file, char __user *buf, | 709 | static ssize_t spufs_mbox_stat_read(struct file *file, char __user *buf, |
@@ -743,6 +733,7 @@ static ssize_t spufs_mbox_stat_read(struct file *file, char __user *buf, | |||
743 | static const struct file_operations spufs_mbox_stat_fops = { | 733 | static const struct file_operations spufs_mbox_stat_fops = { |
744 | .open = spufs_pipe_open, | 734 | .open = spufs_pipe_open, |
745 | .read = spufs_mbox_stat_read, | 735 | .read = spufs_mbox_stat_read, |
736 | .llseek = no_llseek, | ||
746 | }; | 737 | }; |
747 | 738 | ||
748 | /* low-level ibox access function */ | 739 | /* low-level ibox access function */ |
@@ -863,6 +854,7 @@ static const struct file_operations spufs_ibox_fops = { | |||
863 | .read = spufs_ibox_read, | 854 | .read = spufs_ibox_read, |
864 | .poll = spufs_ibox_poll, | 855 | .poll = spufs_ibox_poll, |
865 | .fasync = spufs_ibox_fasync, | 856 | .fasync = spufs_ibox_fasync, |
857 | .llseek = no_llseek, | ||
866 | }; | 858 | }; |
867 | 859 | ||
868 | static ssize_t spufs_ibox_stat_read(struct file *file, char __user *buf, | 860 | static ssize_t spufs_ibox_stat_read(struct file *file, char __user *buf, |
@@ -890,6 +882,7 @@ static ssize_t spufs_ibox_stat_read(struct file *file, char __user *buf, | |||
890 | static const struct file_operations spufs_ibox_stat_fops = { | 882 | static const struct file_operations spufs_ibox_stat_fops = { |
891 | .open = spufs_pipe_open, | 883 | .open = spufs_pipe_open, |
892 | .read = spufs_ibox_stat_read, | 884 | .read = spufs_ibox_stat_read, |
885 | .llseek = no_llseek, | ||
893 | }; | 886 | }; |
894 | 887 | ||
895 | /* low-level mailbox write */ | 888 | /* low-level mailbox write */ |
@@ -1011,6 +1004,7 @@ static const struct file_operations spufs_wbox_fops = { | |||
1011 | .write = spufs_wbox_write, | 1004 | .write = spufs_wbox_write, |
1012 | .poll = spufs_wbox_poll, | 1005 | .poll = spufs_wbox_poll, |
1013 | .fasync = spufs_wbox_fasync, | 1006 | .fasync = spufs_wbox_fasync, |
1007 | .llseek = no_llseek, | ||
1014 | }; | 1008 | }; |
1015 | 1009 | ||
1016 | static ssize_t spufs_wbox_stat_read(struct file *file, char __user *buf, | 1010 | static ssize_t spufs_wbox_stat_read(struct file *file, char __user *buf, |
@@ -1038,6 +1032,7 @@ static ssize_t spufs_wbox_stat_read(struct file *file, char __user *buf, | |||
1038 | static const struct file_operations spufs_wbox_stat_fops = { | 1032 | static const struct file_operations spufs_wbox_stat_fops = { |
1039 | .open = spufs_pipe_open, | 1033 | .open = spufs_pipe_open, |
1040 | .read = spufs_wbox_stat_read, | 1034 | .read = spufs_wbox_stat_read, |
1035 | .llseek = no_llseek, | ||
1041 | }; | 1036 | }; |
1042 | 1037 | ||
1043 | static int spufs_signal1_open(struct inode *inode, struct file *file) | 1038 | static int spufs_signal1_open(struct inode *inode, struct file *file) |
@@ -1166,6 +1161,7 @@ static const struct file_operations spufs_signal1_fops = { | |||
1166 | .read = spufs_signal1_read, | 1161 | .read = spufs_signal1_read, |
1167 | .write = spufs_signal1_write, | 1162 | .write = spufs_signal1_write, |
1168 | .mmap = spufs_signal1_mmap, | 1163 | .mmap = spufs_signal1_mmap, |
1164 | .llseek = no_llseek, | ||
1169 | }; | 1165 | }; |
1170 | 1166 | ||
1171 | static const struct file_operations spufs_signal1_nosched_fops = { | 1167 | static const struct file_operations spufs_signal1_nosched_fops = { |
@@ -1173,6 +1169,7 @@ static const struct file_operations spufs_signal1_nosched_fops = { | |||
1173 | .release = spufs_signal1_release, | 1169 | .release = spufs_signal1_release, |
1174 | .write = spufs_signal1_write, | 1170 | .write = spufs_signal1_write, |
1175 | .mmap = spufs_signal1_mmap, | 1171 | .mmap = spufs_signal1_mmap, |
1172 | .llseek = no_llseek, | ||
1176 | }; | 1173 | }; |
1177 | 1174 | ||
1178 | static int spufs_signal2_open(struct inode *inode, struct file *file) | 1175 | static int spufs_signal2_open(struct inode *inode, struct file *file) |
@@ -1305,6 +1302,7 @@ static const struct file_operations spufs_signal2_fops = { | |||
1305 | .read = spufs_signal2_read, | 1302 | .read = spufs_signal2_read, |
1306 | .write = spufs_signal2_write, | 1303 | .write = spufs_signal2_write, |
1307 | .mmap = spufs_signal2_mmap, | 1304 | .mmap = spufs_signal2_mmap, |
1305 | .llseek = no_llseek, | ||
1308 | }; | 1306 | }; |
1309 | 1307 | ||
1310 | static const struct file_operations spufs_signal2_nosched_fops = { | 1308 | static const struct file_operations spufs_signal2_nosched_fops = { |
@@ -1312,6 +1310,7 @@ static const struct file_operations spufs_signal2_nosched_fops = { | |||
1312 | .release = spufs_signal2_release, | 1310 | .release = spufs_signal2_release, |
1313 | .write = spufs_signal2_write, | 1311 | .write = spufs_signal2_write, |
1314 | .mmap = spufs_signal2_mmap, | 1312 | .mmap = spufs_signal2_mmap, |
1313 | .llseek = no_llseek, | ||
1315 | }; | 1314 | }; |
1316 | 1315 | ||
1317 | /* | 1316 | /* |
@@ -1451,6 +1450,7 @@ static const struct file_operations spufs_mss_fops = { | |||
1451 | .open = spufs_mss_open, | 1450 | .open = spufs_mss_open, |
1452 | .release = spufs_mss_release, | 1451 | .release = spufs_mss_release, |
1453 | .mmap = spufs_mss_mmap, | 1452 | .mmap = spufs_mss_mmap, |
1453 | .llseek = no_llseek, | ||
1454 | }; | 1454 | }; |
1455 | 1455 | ||
1456 | static int | 1456 | static int |
@@ -1508,6 +1508,7 @@ static const struct file_operations spufs_psmap_fops = { | |||
1508 | .open = spufs_psmap_open, | 1508 | .open = spufs_psmap_open, |
1509 | .release = spufs_psmap_release, | 1509 | .release = spufs_psmap_release, |
1510 | .mmap = spufs_psmap_mmap, | 1510 | .mmap = spufs_psmap_mmap, |
1511 | .llseek = no_llseek, | ||
1511 | }; | 1512 | }; |
1512 | 1513 | ||
1513 | 1514 | ||
@@ -1871,6 +1872,7 @@ static const struct file_operations spufs_mfc_fops = { | |||
1871 | .fsync = spufs_mfc_fsync, | 1872 | .fsync = spufs_mfc_fsync, |
1872 | .fasync = spufs_mfc_fasync, | 1873 | .fasync = spufs_mfc_fasync, |
1873 | .mmap = spufs_mfc_mmap, | 1874 | .mmap = spufs_mfc_mmap, |
1875 | .llseek = no_llseek, | ||
1874 | }; | 1876 | }; |
1875 | 1877 | ||
1876 | static int spufs_npc_set(void *data, u64 val) | 1878 | static int spufs_npc_set(void *data, u64 val) |
@@ -2246,6 +2248,7 @@ static ssize_t spufs_dma_info_read(struct file *file, char __user *buf, | |||
2246 | static const struct file_operations spufs_dma_info_fops = { | 2248 | static const struct file_operations spufs_dma_info_fops = { |
2247 | .open = spufs_info_open, | 2249 | .open = spufs_info_open, |
2248 | .read = spufs_dma_info_read, | 2250 | .read = spufs_dma_info_read, |
2251 | .llseek = no_llseek, | ||
2249 | }; | 2252 | }; |
2250 | 2253 | ||
2251 | static ssize_t __spufs_proxydma_info_read(struct spu_context *ctx, | 2254 | static ssize_t __spufs_proxydma_info_read(struct spu_context *ctx, |
@@ -2299,6 +2302,7 @@ static ssize_t spufs_proxydma_info_read(struct file *file, char __user *buf, | |||
2299 | static const struct file_operations spufs_proxydma_info_fops = { | 2302 | static const struct file_operations spufs_proxydma_info_fops = { |
2300 | .open = spufs_info_open, | 2303 | .open = spufs_info_open, |
2301 | .read = spufs_proxydma_info_read, | 2304 | .read = spufs_proxydma_info_read, |
2305 | .llseek = no_llseek, | ||
2302 | }; | 2306 | }; |
2303 | 2307 | ||
2304 | static int spufs_show_tid(struct seq_file *s, void *private) | 2308 | static int spufs_show_tid(struct seq_file *s, void *private) |
@@ -2585,6 +2589,7 @@ static const struct file_operations spufs_switch_log_fops = { | |||
2585 | .read = spufs_switch_log_read, | 2589 | .read = spufs_switch_log_read, |
2586 | .poll = spufs_switch_log_poll, | 2590 | .poll = spufs_switch_log_poll, |
2587 | .release = spufs_switch_log_release, | 2591 | .release = spufs_switch_log_release, |
2592 | .llseek = no_llseek, | ||
2588 | }; | 2593 | }; |
2589 | 2594 | ||
2590 | /** | 2595 | /** |
diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c index 5dec408d6703..856e9c398068 100644 --- a/arch/powerpc/platforms/cell/spufs/inode.c +++ b/arch/powerpc/platforms/cell/spufs/inode.c | |||
@@ -71,12 +71,18 @@ spufs_alloc_inode(struct super_block *sb) | |||
71 | return &ei->vfs_inode; | 71 | return &ei->vfs_inode; |
72 | } | 72 | } |
73 | 73 | ||
74 | static void | 74 | static void spufs_i_callback(struct rcu_head *head) |
75 | spufs_destroy_inode(struct inode *inode) | ||
76 | { | 75 | { |
76 | struct inode *inode = container_of(head, struct inode, i_rcu); | ||
77 | INIT_LIST_HEAD(&inode->i_dentry); | ||
77 | kmem_cache_free(spufs_inode_cache, SPUFS_I(inode)); | 78 | kmem_cache_free(spufs_inode_cache, SPUFS_I(inode)); |
78 | } | 79 | } |
79 | 80 | ||
81 | static void spufs_destroy_inode(struct inode *inode) | ||
82 | { | ||
83 | call_rcu(&inode->i_rcu, spufs_i_callback); | ||
84 | } | ||
85 | |||
80 | static void | 86 | static void |
81 | spufs_init_once(void *p) | 87 | spufs_init_once(void *p) |
82 | { | 88 | { |
@@ -159,18 +165,18 @@ static void spufs_prune_dir(struct dentry *dir) | |||
159 | 165 | ||
160 | mutex_lock(&dir->d_inode->i_mutex); | 166 | mutex_lock(&dir->d_inode->i_mutex); |
161 | list_for_each_entry_safe(dentry, tmp, &dir->d_subdirs, d_u.d_child) { | 167 | list_for_each_entry_safe(dentry, tmp, &dir->d_subdirs, d_u.d_child) { |
162 | spin_lock(&dcache_lock); | ||
163 | spin_lock(&dentry->d_lock); | 168 | spin_lock(&dentry->d_lock); |
164 | if (!(d_unhashed(dentry)) && dentry->d_inode) { | 169 | if (!(d_unhashed(dentry)) && dentry->d_inode) { |
165 | dget_locked(dentry); | 170 | dget_dlock(dentry); |
166 | __d_drop(dentry); | 171 | __d_drop(dentry); |
167 | spin_unlock(&dentry->d_lock); | 172 | spin_unlock(&dentry->d_lock); |
168 | simple_unlink(dir->d_inode, dentry); | 173 | simple_unlink(dir->d_inode, dentry); |
169 | spin_unlock(&dcache_lock); | 174 | /* XXX: what was dcache_lock protecting here? Other |
175 | * filesystems (IB, configfs) release dcache_lock | ||
176 | * before unlink */ | ||
170 | dput(dentry); | 177 | dput(dentry); |
171 | } else { | 178 | } else { |
172 | spin_unlock(&dentry->d_lock); | 179 | spin_unlock(&dentry->d_lock); |
173 | spin_unlock(&dcache_lock); | ||
174 | } | 180 | } |
175 | } | 181 | } |
176 | shrink_dcache_parent(dir); | 182 | shrink_dcache_parent(dir); |
@@ -798,17 +804,17 @@ spufs_fill_super(struct super_block *sb, void *data, int silent) | |||
798 | return spufs_create_root(sb, data); | 804 | return spufs_create_root(sb, data); |
799 | } | 805 | } |
800 | 806 | ||
801 | static int | 807 | static struct dentry * |
802 | spufs_get_sb(struct file_system_type *fstype, int flags, | 808 | spufs_mount(struct file_system_type *fstype, int flags, |
803 | const char *name, void *data, struct vfsmount *mnt) | 809 | const char *name, void *data) |
804 | { | 810 | { |
805 | return get_sb_single(fstype, flags, data, spufs_fill_super, mnt); | 811 | return mount_single(fstype, flags, data, spufs_fill_super); |
806 | } | 812 | } |
807 | 813 | ||
808 | static struct file_system_type spufs_type = { | 814 | static struct file_system_type spufs_type = { |
809 | .owner = THIS_MODULE, | 815 | .owner = THIS_MODULE, |
810 | .name = "spufs", | 816 | .name = "spufs", |
811 | .get_sb = spufs_get_sb, | 817 | .mount = spufs_mount, |
812 | .kill_sb = kill_litter_super, | 818 | .kill_sb = kill_litter_super, |
813 | }; | 819 | }; |
814 | 820 | ||
diff --git a/arch/powerpc/platforms/cell/spufs/lscsa_alloc.c b/arch/powerpc/platforms/cell/spufs/lscsa_alloc.c index a101abf17504..147069938cfe 100644 --- a/arch/powerpc/platforms/cell/spufs/lscsa_alloc.c +++ b/arch/powerpc/platforms/cell/spufs/lscsa_alloc.c | |||
@@ -36,10 +36,9 @@ static int spu_alloc_lscsa_std(struct spu_state *csa) | |||
36 | struct spu_lscsa *lscsa; | 36 | struct spu_lscsa *lscsa; |
37 | unsigned char *p; | 37 | unsigned char *p; |
38 | 38 | ||
39 | lscsa = vmalloc(sizeof(struct spu_lscsa)); | 39 | lscsa = vzalloc(sizeof(struct spu_lscsa)); |
40 | if (!lscsa) | 40 | if (!lscsa) |
41 | return -ENOMEM; | 41 | return -ENOMEM; |
42 | memset(lscsa, 0, sizeof(struct spu_lscsa)); | ||
43 | csa->lscsa = lscsa; | 42 | csa->lscsa = lscsa; |
44 | 43 | ||
45 | /* Set LS pages reserved to allow for user-space mapping. */ | 44 | /* Set LS pages reserved to allow for user-space mapping. */ |
@@ -91,7 +90,7 @@ int spu_alloc_lscsa(struct spu_state *csa) | |||
91 | */ | 90 | */ |
92 | for (i = 0; i < SPU_LSCSA_NUM_BIG_PAGES; i++) { | 91 | for (i = 0; i < SPU_LSCSA_NUM_BIG_PAGES; i++) { |
93 | /* XXX This is likely to fail, we should use a special pool | 92 | /* XXX This is likely to fail, we should use a special pool |
94 | * similiar to what hugetlbfs does. | 93 | * similar to what hugetlbfs does. |
95 | */ | 94 | */ |
96 | csa->lscsa_pages[i] = alloc_pages(GFP_KERNEL, | 95 | csa->lscsa_pages[i] = alloc_pages(GFP_KERNEL, |
97 | SPU_64K_PAGE_ORDER); | 96 | SPU_64K_PAGE_ORDER); |
diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c index 0b0466284932..32cb4e66d2cd 100644 --- a/arch/powerpc/platforms/cell/spufs/sched.c +++ b/arch/powerpc/platforms/cell/spufs/sched.c | |||
@@ -141,7 +141,7 @@ void __spu_update_sched_info(struct spu_context *ctx) | |||
141 | * runqueue. The context will be rescheduled on the proper node | 141 | * runqueue. The context will be rescheduled on the proper node |
142 | * if it is timesliced or preempted. | 142 | * if it is timesliced or preempted. |
143 | */ | 143 | */ |
144 | ctx->cpus_allowed = current->cpus_allowed; | 144 | cpumask_copy(&ctx->cpus_allowed, tsk_cpus_allowed(current)); |
145 | 145 | ||
146 | /* Save the current cpu id for spu interrupt routing. */ | 146 | /* Save the current cpu id for spu interrupt routing. */ |
147 | ctx->last_ran = raw_smp_processor_id(); | 147 | ctx->last_ran = raw_smp_processor_id(); |
@@ -846,7 +846,7 @@ static struct spu_context *grab_runnable_context(int prio, int node) | |||
846 | struct list_head *rq = &spu_prio->runq[best]; | 846 | struct list_head *rq = &spu_prio->runq[best]; |
847 | 847 | ||
848 | list_for_each_entry(ctx, rq, rq) { | 848 | list_for_each_entry(ctx, rq, rq) { |
849 | /* XXX(hch): check for affinity here aswell */ | 849 | /* XXX(hch): check for affinity here as well */ |
850 | if (__node_allowed(ctx, node)) { | 850 | if (__node_allowed(ctx, node)) { |
851 | __spu_del_from_rq(ctx); | 851 | __spu_del_from_rq(ctx); |
852 | goto found; | 852 | goto found; |
diff --git a/arch/powerpc/platforms/cell/spufs/spu_restore.c b/arch/powerpc/platforms/cell/spufs/spu_restore.c index 21a9c952d88b..72c905f1ee7a 100644 --- a/arch/powerpc/platforms/cell/spufs/spu_restore.c +++ b/arch/powerpc/platforms/cell/spufs/spu_restore.c | |||
@@ -284,7 +284,7 @@ static inline void restore_complete(void) | |||
284 | exit_instrs[3] = BR_INSTR; | 284 | exit_instrs[3] = BR_INSTR; |
285 | break; | 285 | break; |
286 | default: | 286 | default: |
287 | /* SPU_Status[R]=1. No additonal instructions. */ | 287 | /* SPU_Status[R]=1. No additional instructions. */ |
288 | break; | 288 | break; |
289 | } | 289 | } |
290 | spu_sync(); | 290 | spu_sync(); |
diff --git a/arch/powerpc/platforms/cell/spufs/syscalls.c b/arch/powerpc/platforms/cell/spufs/syscalls.c index 187a7d32f86a..a3d2ce54ea2e 100644 --- a/arch/powerpc/platforms/cell/spufs/syscalls.c +++ b/arch/powerpc/platforms/cell/spufs/syscalls.c | |||
@@ -70,7 +70,7 @@ static long do_spu_create(const char __user *pathname, unsigned int flags, | |||
70 | if (!IS_ERR(tmp)) { | 70 | if (!IS_ERR(tmp)) { |
71 | struct nameidata nd; | 71 | struct nameidata nd; |
72 | 72 | ||
73 | ret = path_lookup(tmp, LOOKUP_PARENT, &nd); | 73 | ret = kern_path_parent(tmp, &nd); |
74 | if (!ret) { | 74 | if (!ret) { |
75 | nd.flags |= LOOKUP_OPEN | LOOKUP_CREATE; | 75 | nd.flags |= LOOKUP_OPEN | LOOKUP_CREATE; |
76 | ret = spufs_create(&nd, flags, mode, neighbor); | 76 | ret = spufs_create(&nd, flags, mode, neighbor); |
diff --git a/arch/powerpc/platforms/chrp/nvram.c b/arch/powerpc/platforms/chrp/nvram.c index ba3588f2d8e0..d3ceff04ffc7 100644 --- a/arch/powerpc/platforms/chrp/nvram.c +++ b/arch/powerpc/platforms/chrp/nvram.c | |||
@@ -74,8 +74,10 @@ void __init chrp_nvram_init(void) | |||
74 | return; | 74 | return; |
75 | 75 | ||
76 | nbytes_p = of_get_property(nvram, "#bytes", &proplen); | 76 | nbytes_p = of_get_property(nvram, "#bytes", &proplen); |
77 | if (nbytes_p == NULL || proplen != sizeof(unsigned int)) | 77 | if (nbytes_p == NULL || proplen != sizeof(unsigned int)) { |
78 | of_node_put(nvram); | ||
78 | return; | 79 | return; |
80 | } | ||
79 | 81 | ||
80 | nvram_size = *nbytes_p; | 82 | nvram_size = *nbytes_p; |
81 | 83 | ||
diff --git a/arch/powerpc/platforms/chrp/setup.c b/arch/powerpc/platforms/chrp/setup.c index 8553cc49e0d6..122786498419 100644 --- a/arch/powerpc/platforms/chrp/setup.c +++ b/arch/powerpc/platforms/chrp/setup.c | |||
@@ -365,10 +365,13 @@ void __init chrp_setup_arch(void) | |||
365 | 365 | ||
366 | static void chrp_8259_cascade(unsigned int irq, struct irq_desc *desc) | 366 | static void chrp_8259_cascade(unsigned int irq, struct irq_desc *desc) |
367 | { | 367 | { |
368 | struct irq_chip *chip = irq_desc_get_chip(desc); | ||
368 | unsigned int cascade_irq = i8259_irq(); | 369 | unsigned int cascade_irq = i8259_irq(); |
370 | |||
369 | if (cascade_irq != NO_IRQ) | 371 | if (cascade_irq != NO_IRQ) |
370 | generic_handle_irq(cascade_irq); | 372 | generic_handle_irq(cascade_irq); |
371 | desc->chip->eoi(irq); | 373 | |
374 | chip->irq_eoi(&desc->irq_data); | ||
372 | } | 375 | } |
373 | 376 | ||
374 | /* | 377 | /* |
@@ -514,7 +517,7 @@ static void __init chrp_find_8259(void) | |||
514 | if (cascade_irq == NO_IRQ) | 517 | if (cascade_irq == NO_IRQ) |
515 | printk(KERN_ERR "i8259: failed to map cascade irq\n"); | 518 | printk(KERN_ERR "i8259: failed to map cascade irq\n"); |
516 | else | 519 | else |
517 | set_irq_chained_handler(cascade_irq, | 520 | irq_set_chained_handler(cascade_irq, |
518 | chrp_8259_cascade); | 521 | chrp_8259_cascade); |
519 | } | 522 | } |
520 | } | 523 | } |
diff --git a/arch/powerpc/platforms/chrp/smp.c b/arch/powerpc/platforms/chrp/smp.c index 02cafecc90e3..a800122e4dda 100644 --- a/arch/powerpc/platforms/chrp/smp.c +++ b/arch/powerpc/platforms/chrp/smp.c | |||
@@ -30,10 +30,12 @@ | |||
30 | #include <asm/mpic.h> | 30 | #include <asm/mpic.h> |
31 | #include <asm/rtas.h> | 31 | #include <asm/rtas.h> |
32 | 32 | ||
33 | static void __devinit smp_chrp_kick_cpu(int nr) | 33 | static int __devinit smp_chrp_kick_cpu(int nr) |
34 | { | 34 | { |
35 | *(unsigned long *)KERNELBASE = nr; | 35 | *(unsigned long *)KERNELBASE = nr; |
36 | asm volatile("dcbf 0,%0"::"r"(KERNELBASE):"memory"); | 36 | asm volatile("dcbf 0,%0"::"r"(KERNELBASE):"memory"); |
37 | |||
38 | return 0; | ||
37 | } | 39 | } |
38 | 40 | ||
39 | static void __devinit smp_chrp_setup_cpu(int cpu_nr) | 41 | static void __devinit smp_chrp_setup_cpu(int cpu_nr) |
diff --git a/arch/powerpc/platforms/chrp/time.c b/arch/powerpc/platforms/chrp/time.c index 054dfe5b8e77..f803f4b8ab6f 100644 --- a/arch/powerpc/platforms/chrp/time.c +++ b/arch/powerpc/platforms/chrp/time.c | |||
@@ -29,6 +29,10 @@ | |||
29 | 29 | ||
30 | extern spinlock_t rtc_lock; | 30 | extern spinlock_t rtc_lock; |
31 | 31 | ||
32 | #define NVRAM_AS0 0x74 | ||
33 | #define NVRAM_AS1 0x75 | ||
34 | #define NVRAM_DATA 0x77 | ||
35 | |||
32 | static int nvram_as1 = NVRAM_AS1; | 36 | static int nvram_as1 = NVRAM_AS1; |
33 | static int nvram_as0 = NVRAM_AS0; | 37 | static int nvram_as0 = NVRAM_AS0; |
34 | static int nvram_data = NVRAM_DATA; | 38 | static int nvram_data = NVRAM_DATA; |
diff --git a/arch/powerpc/platforms/embedded6xx/flipper-pic.c b/arch/powerpc/platforms/embedded6xx/flipper-pic.c index c278bd3a8fec..f61a2dd96b99 100644 --- a/arch/powerpc/platforms/embedded6xx/flipper-pic.c +++ b/arch/powerpc/platforms/embedded6xx/flipper-pic.c | |||
@@ -46,10 +46,10 @@ | |||
46 | * | 46 | * |
47 | */ | 47 | */ |
48 | 48 | ||
49 | static void flipper_pic_mask_and_ack(unsigned int virq) | 49 | static void flipper_pic_mask_and_ack(struct irq_data *d) |
50 | { | 50 | { |
51 | int irq = virq_to_hw(virq); | 51 | int irq = irqd_to_hwirq(d); |
52 | void __iomem *io_base = get_irq_chip_data(virq); | 52 | void __iomem *io_base = irq_data_get_irq_chip_data(d); |
53 | u32 mask = 1 << irq; | 53 | u32 mask = 1 << irq; |
54 | 54 | ||
55 | clrbits32(io_base + FLIPPER_IMR, mask); | 55 | clrbits32(io_base + FLIPPER_IMR, mask); |
@@ -57,27 +57,27 @@ static void flipper_pic_mask_and_ack(unsigned int virq) | |||
57 | out_be32(io_base + FLIPPER_ICR, mask); | 57 | out_be32(io_base + FLIPPER_ICR, mask); |
58 | } | 58 | } |
59 | 59 | ||
60 | static void flipper_pic_ack(unsigned int virq) | 60 | static void flipper_pic_ack(struct irq_data *d) |
61 | { | 61 | { |
62 | int irq = virq_to_hw(virq); | 62 | int irq = irqd_to_hwirq(d); |
63 | void __iomem *io_base = get_irq_chip_data(virq); | 63 | void __iomem *io_base = irq_data_get_irq_chip_data(d); |
64 | 64 | ||
65 | /* this is at least needed for RSW */ | 65 | /* this is at least needed for RSW */ |
66 | out_be32(io_base + FLIPPER_ICR, 1 << irq); | 66 | out_be32(io_base + FLIPPER_ICR, 1 << irq); |
67 | } | 67 | } |
68 | 68 | ||
69 | static void flipper_pic_mask(unsigned int virq) | 69 | static void flipper_pic_mask(struct irq_data *d) |
70 | { | 70 | { |
71 | int irq = virq_to_hw(virq); | 71 | int irq = irqd_to_hwirq(d); |
72 | void __iomem *io_base = get_irq_chip_data(virq); | 72 | void __iomem *io_base = irq_data_get_irq_chip_data(d); |
73 | 73 | ||
74 | clrbits32(io_base + FLIPPER_IMR, 1 << irq); | 74 | clrbits32(io_base + FLIPPER_IMR, 1 << irq); |
75 | } | 75 | } |
76 | 76 | ||
77 | static void flipper_pic_unmask(unsigned int virq) | 77 | static void flipper_pic_unmask(struct irq_data *d) |
78 | { | 78 | { |
79 | int irq = virq_to_hw(virq); | 79 | int irq = irqd_to_hwirq(d); |
80 | void __iomem *io_base = get_irq_chip_data(virq); | 80 | void __iomem *io_base = irq_data_get_irq_chip_data(d); |
81 | 81 | ||
82 | setbits32(io_base + FLIPPER_IMR, 1 << irq); | 82 | setbits32(io_base + FLIPPER_IMR, 1 << irq); |
83 | } | 83 | } |
@@ -85,10 +85,10 @@ static void flipper_pic_unmask(unsigned int virq) | |||
85 | 85 | ||
86 | static struct irq_chip flipper_pic = { | 86 | static struct irq_chip flipper_pic = { |
87 | .name = "flipper-pic", | 87 | .name = "flipper-pic", |
88 | .ack = flipper_pic_ack, | 88 | .irq_ack = flipper_pic_ack, |
89 | .mask_ack = flipper_pic_mask_and_ack, | 89 | .irq_mask_ack = flipper_pic_mask_and_ack, |
90 | .mask = flipper_pic_mask, | 90 | .irq_mask = flipper_pic_mask, |
91 | .unmask = flipper_pic_unmask, | 91 | .irq_unmask = flipper_pic_unmask, |
92 | }; | 92 | }; |
93 | 93 | ||
94 | /* | 94 | /* |
@@ -101,18 +101,12 @@ static struct irq_host *flipper_irq_host; | |||
101 | static int flipper_pic_map(struct irq_host *h, unsigned int virq, | 101 | static int flipper_pic_map(struct irq_host *h, unsigned int virq, |
102 | irq_hw_number_t hwirq) | 102 | irq_hw_number_t hwirq) |
103 | { | 103 | { |
104 | set_irq_chip_data(virq, h->host_data); | 104 | irq_set_chip_data(virq, h->host_data); |
105 | irq_to_desc(virq)->status |= IRQ_LEVEL; | 105 | irq_set_status_flags(virq, IRQ_LEVEL); |
106 | set_irq_chip_and_handler(virq, &flipper_pic, handle_level_irq); | 106 | irq_set_chip_and_handler(virq, &flipper_pic, handle_level_irq); |
107 | return 0; | 107 | return 0; |
108 | } | 108 | } |
109 | 109 | ||
110 | static void flipper_pic_unmap(struct irq_host *h, unsigned int irq) | ||
111 | { | ||
112 | set_irq_chip_data(irq, NULL); | ||
113 | set_irq_chip(irq, NULL); | ||
114 | } | ||
115 | |||
116 | static int flipper_pic_match(struct irq_host *h, struct device_node *np) | 110 | static int flipper_pic_match(struct irq_host *h, struct device_node *np) |
117 | { | 111 | { |
118 | return 1; | 112 | return 1; |
@@ -121,7 +115,6 @@ static int flipper_pic_match(struct irq_host *h, struct device_node *np) | |||
121 | 115 | ||
122 | static struct irq_host_ops flipper_irq_host_ops = { | 116 | static struct irq_host_ops flipper_irq_host_ops = { |
123 | .map = flipper_pic_map, | 117 | .map = flipper_pic_map, |
124 | .unmap = flipper_pic_unmap, | ||
125 | .match = flipper_pic_match, | 118 | .match = flipper_pic_match, |
126 | }; | 119 | }; |
127 | 120 | ||
diff --git a/arch/powerpc/platforms/embedded6xx/gamecube.c b/arch/powerpc/platforms/embedded6xx/gamecube.c index 1106fd99627f..a138e14bad2e 100644 --- a/arch/powerpc/platforms/embedded6xx/gamecube.c +++ b/arch/powerpc/platforms/embedded6xx/gamecube.c | |||
@@ -75,14 +75,6 @@ static void gamecube_shutdown(void) | |||
75 | flipper_quiesce(); | 75 | flipper_quiesce(); |
76 | } | 76 | } |
77 | 77 | ||
78 | #ifdef CONFIG_KEXEC | ||
79 | static int gamecube_kexec_prepare(struct kimage *image) | ||
80 | { | ||
81 | return 0; | ||
82 | } | ||
83 | #endif /* CONFIG_KEXEC */ | ||
84 | |||
85 | |||
86 | define_machine(gamecube) { | 78 | define_machine(gamecube) { |
87 | .name = "gamecube", | 79 | .name = "gamecube", |
88 | .probe = gamecube_probe, | 80 | .probe = gamecube_probe, |
@@ -95,9 +87,6 @@ define_machine(gamecube) { | |||
95 | .calibrate_decr = generic_calibrate_decr, | 87 | .calibrate_decr = generic_calibrate_decr, |
96 | .progress = udbg_progress, | 88 | .progress = udbg_progress, |
97 | .machine_shutdown = gamecube_shutdown, | 89 | .machine_shutdown = gamecube_shutdown, |
98 | #ifdef CONFIG_KEXEC | ||
99 | .machine_kexec_prepare = gamecube_kexec_prepare, | ||
100 | #endif | ||
101 | }; | 90 | }; |
102 | 91 | ||
103 | 92 | ||
diff --git a/arch/powerpc/platforms/embedded6xx/hlwd-pic.c b/arch/powerpc/platforms/embedded6xx/hlwd-pic.c index a771f91e215b..e4919170c6bc 100644 --- a/arch/powerpc/platforms/embedded6xx/hlwd-pic.c +++ b/arch/powerpc/platforms/embedded6xx/hlwd-pic.c | |||
@@ -41,36 +41,36 @@ | |||
41 | * | 41 | * |
42 | */ | 42 | */ |
43 | 43 | ||
44 | static void hlwd_pic_mask_and_ack(unsigned int virq) | 44 | static void hlwd_pic_mask_and_ack(struct irq_data *d) |
45 | { | 45 | { |
46 | int irq = virq_to_hw(virq); | 46 | int irq = irqd_to_hwirq(d); |
47 | void __iomem *io_base = get_irq_chip_data(virq); | 47 | void __iomem *io_base = irq_data_get_irq_chip_data(d); |
48 | u32 mask = 1 << irq; | 48 | u32 mask = 1 << irq; |
49 | 49 | ||
50 | clrbits32(io_base + HW_BROADWAY_IMR, mask); | 50 | clrbits32(io_base + HW_BROADWAY_IMR, mask); |
51 | out_be32(io_base + HW_BROADWAY_ICR, mask); | 51 | out_be32(io_base + HW_BROADWAY_ICR, mask); |
52 | } | 52 | } |
53 | 53 | ||
54 | static void hlwd_pic_ack(unsigned int virq) | 54 | static void hlwd_pic_ack(struct irq_data *d) |
55 | { | 55 | { |
56 | int irq = virq_to_hw(virq); | 56 | int irq = irqd_to_hwirq(d); |
57 | void __iomem *io_base = get_irq_chip_data(virq); | 57 | void __iomem *io_base = irq_data_get_irq_chip_data(d); |
58 | 58 | ||
59 | out_be32(io_base + HW_BROADWAY_ICR, 1 << irq); | 59 | out_be32(io_base + HW_BROADWAY_ICR, 1 << irq); |
60 | } | 60 | } |
61 | 61 | ||
62 | static void hlwd_pic_mask(unsigned int virq) | 62 | static void hlwd_pic_mask(struct irq_data *d) |
63 | { | 63 | { |
64 | int irq = virq_to_hw(virq); | 64 | int irq = irqd_to_hwirq(d); |
65 | void __iomem *io_base = get_irq_chip_data(virq); | 65 | void __iomem *io_base = irq_data_get_irq_chip_data(d); |
66 | 66 | ||
67 | clrbits32(io_base + HW_BROADWAY_IMR, 1 << irq); | 67 | clrbits32(io_base + HW_BROADWAY_IMR, 1 << irq); |
68 | } | 68 | } |
69 | 69 | ||
70 | static void hlwd_pic_unmask(unsigned int virq) | 70 | static void hlwd_pic_unmask(struct irq_data *d) |
71 | { | 71 | { |
72 | int irq = virq_to_hw(virq); | 72 | int irq = irqd_to_hwirq(d); |
73 | void __iomem *io_base = get_irq_chip_data(virq); | 73 | void __iomem *io_base = irq_data_get_irq_chip_data(d); |
74 | 74 | ||
75 | setbits32(io_base + HW_BROADWAY_IMR, 1 << irq); | 75 | setbits32(io_base + HW_BROADWAY_IMR, 1 << irq); |
76 | } | 76 | } |
@@ -78,10 +78,10 @@ static void hlwd_pic_unmask(unsigned int virq) | |||
78 | 78 | ||
79 | static struct irq_chip hlwd_pic = { | 79 | static struct irq_chip hlwd_pic = { |
80 | .name = "hlwd-pic", | 80 | .name = "hlwd-pic", |
81 | .ack = hlwd_pic_ack, | 81 | .irq_ack = hlwd_pic_ack, |
82 | .mask_ack = hlwd_pic_mask_and_ack, | 82 | .irq_mask_ack = hlwd_pic_mask_and_ack, |
83 | .mask = hlwd_pic_mask, | 83 | .irq_mask = hlwd_pic_mask, |
84 | .unmask = hlwd_pic_unmask, | 84 | .irq_unmask = hlwd_pic_unmask, |
85 | }; | 85 | }; |
86 | 86 | ||
87 | /* | 87 | /* |
@@ -94,21 +94,14 @@ static struct irq_host *hlwd_irq_host; | |||
94 | static int hlwd_pic_map(struct irq_host *h, unsigned int virq, | 94 | static int hlwd_pic_map(struct irq_host *h, unsigned int virq, |
95 | irq_hw_number_t hwirq) | 95 | irq_hw_number_t hwirq) |
96 | { | 96 | { |
97 | set_irq_chip_data(virq, h->host_data); | 97 | irq_set_chip_data(virq, h->host_data); |
98 | irq_to_desc(virq)->status |= IRQ_LEVEL; | 98 | irq_set_status_flags(virq, IRQ_LEVEL); |
99 | set_irq_chip_and_handler(virq, &hlwd_pic, handle_level_irq); | 99 | irq_set_chip_and_handler(virq, &hlwd_pic, handle_level_irq); |
100 | return 0; | 100 | return 0; |
101 | } | 101 | } |
102 | 102 | ||
103 | static void hlwd_pic_unmap(struct irq_host *h, unsigned int irq) | ||
104 | { | ||
105 | set_irq_chip_data(irq, NULL); | ||
106 | set_irq_chip(irq, NULL); | ||
107 | } | ||
108 | |||
109 | static struct irq_host_ops hlwd_irq_host_ops = { | 103 | static struct irq_host_ops hlwd_irq_host_ops = { |
110 | .map = hlwd_pic_map, | 104 | .map = hlwd_pic_map, |
111 | .unmap = hlwd_pic_unmap, | ||
112 | }; | 105 | }; |
113 | 106 | ||
114 | static unsigned int __hlwd_pic_get_irq(struct irq_host *h) | 107 | static unsigned int __hlwd_pic_get_irq(struct irq_host *h) |
@@ -129,11 +122,12 @@ static unsigned int __hlwd_pic_get_irq(struct irq_host *h) | |||
129 | static void hlwd_pic_irq_cascade(unsigned int cascade_virq, | 122 | static void hlwd_pic_irq_cascade(unsigned int cascade_virq, |
130 | struct irq_desc *desc) | 123 | struct irq_desc *desc) |
131 | { | 124 | { |
132 | struct irq_host *irq_host = get_irq_data(cascade_virq); | 125 | struct irq_chip *chip = irq_desc_get_chip(desc); |
126 | struct irq_host *irq_host = irq_get_handler_data(cascade_virq); | ||
133 | unsigned int virq; | 127 | unsigned int virq; |
134 | 128 | ||
135 | raw_spin_lock(&desc->lock); | 129 | raw_spin_lock(&desc->lock); |
136 | desc->chip->mask(cascade_virq); /* IRQ_LEVEL */ | 130 | chip->irq_mask(&desc->irq_data); /* IRQ_LEVEL */ |
137 | raw_spin_unlock(&desc->lock); | 131 | raw_spin_unlock(&desc->lock); |
138 | 132 | ||
139 | virq = __hlwd_pic_get_irq(irq_host); | 133 | virq = __hlwd_pic_get_irq(irq_host); |
@@ -143,9 +137,9 @@ static void hlwd_pic_irq_cascade(unsigned int cascade_virq, | |||
143 | pr_err("spurious interrupt!\n"); | 137 | pr_err("spurious interrupt!\n"); |
144 | 138 | ||
145 | raw_spin_lock(&desc->lock); | 139 | raw_spin_lock(&desc->lock); |
146 | desc->chip->ack(cascade_virq); /* IRQ_LEVEL */ | 140 | chip->irq_ack(&desc->irq_data); /* IRQ_LEVEL */ |
147 | if (!(desc->status & IRQ_DISABLED) && desc->chip->unmask) | 141 | if (!irqd_irq_disabled(&desc->irq_data) && chip->irq_unmask) |
148 | desc->chip->unmask(cascade_virq); | 142 | chip->irq_unmask(&desc->irq_data); |
149 | raw_spin_unlock(&desc->lock); | 143 | raw_spin_unlock(&desc->lock); |
150 | } | 144 | } |
151 | 145 | ||
@@ -217,8 +211,8 @@ void hlwd_pic_probe(void) | |||
217 | host = hlwd_pic_init(np); | 211 | host = hlwd_pic_init(np); |
218 | BUG_ON(!host); | 212 | BUG_ON(!host); |
219 | cascade_virq = irq_of_parse_and_map(np, 0); | 213 | cascade_virq = irq_of_parse_and_map(np, 0); |
220 | set_irq_data(cascade_virq, host); | 214 | irq_set_handler_data(cascade_virq, host); |
221 | set_irq_chained_handler(cascade_virq, | 215 | irq_set_chained_handler(cascade_virq, |
222 | hlwd_pic_irq_cascade); | 216 | hlwd_pic_irq_cascade); |
223 | hlwd_irq_host = host; | 217 | hlwd_irq_host = host; |
224 | break; | 218 | break; |
diff --git a/arch/powerpc/platforms/embedded6xx/holly.c b/arch/powerpc/platforms/embedded6xx/holly.c index b21fde589ca7..487bda0d18d8 100644 --- a/arch/powerpc/platforms/embedded6xx/holly.c +++ b/arch/powerpc/platforms/embedded6xx/holly.c | |||
@@ -198,8 +198,8 @@ static void __init holly_init_IRQ(void) | |||
198 | cascade_pci_irq = irq_of_parse_and_map(tsi_pci, 0); | 198 | cascade_pci_irq = irq_of_parse_and_map(tsi_pci, 0); |
199 | pr_debug("%s: tsi108 cascade_pci_irq = 0x%x\n", __func__, (u32) cascade_pci_irq); | 199 | pr_debug("%s: tsi108 cascade_pci_irq = 0x%x\n", __func__, (u32) cascade_pci_irq); |
200 | tsi108_pci_int_init(cascade_node); | 200 | tsi108_pci_int_init(cascade_node); |
201 | set_irq_data(cascade_pci_irq, mpic); | 201 | irq_set_handler_data(cascade_pci_irq, mpic); |
202 | set_irq_chained_handler(cascade_pci_irq, tsi108_irq_cascade); | 202 | irq_set_chained_handler(cascade_pci_irq, tsi108_irq_cascade); |
203 | #endif | 203 | #endif |
204 | /* Configure MPIC outputs to CPU0 */ | 204 | /* Configure MPIC outputs to CPU0 */ |
205 | tsi108_write_reg(TSI108_MPIC_OFFSET + 0x30c, 0); | 205 | tsi108_write_reg(TSI108_MPIC_OFFSET + 0x30c, 0); |
diff --git a/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c b/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c index 7a2ba39d7811..1cb907c94359 100644 --- a/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c +++ b/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c | |||
@@ -153,8 +153,8 @@ static void __init mpc7448_hpc2_init_IRQ(void) | |||
153 | DBG("%s: tsi108 cascade_pci_irq = 0x%x\n", __func__, | 153 | DBG("%s: tsi108 cascade_pci_irq = 0x%x\n", __func__, |
154 | (u32) cascade_pci_irq); | 154 | (u32) cascade_pci_irq); |
155 | tsi108_pci_int_init(cascade_node); | 155 | tsi108_pci_int_init(cascade_node); |
156 | set_irq_data(cascade_pci_irq, mpic); | 156 | irq_set_handler_data(cascade_pci_irq, mpic); |
157 | set_irq_chained_handler(cascade_pci_irq, tsi108_irq_cascade); | 157 | irq_set_chained_handler(cascade_pci_irq, tsi108_irq_cascade); |
158 | #endif | 158 | #endif |
159 | /* Configure MPIC outputs to CPU0 */ | 159 | /* Configure MPIC outputs to CPU0 */ |
160 | tsi108_write_reg(TSI108_MPIC_OFFSET + 0x30c, 0); | 160 | tsi108_write_reg(TSI108_MPIC_OFFSET + 0x30c, 0); |
diff --git a/arch/powerpc/platforms/embedded6xx/wii.c b/arch/powerpc/platforms/embedded6xx/wii.c index 5cdcc7c8d973..1b5dc1a2e145 100644 --- a/arch/powerpc/platforms/embedded6xx/wii.c +++ b/arch/powerpc/platforms/embedded6xx/wii.c | |||
@@ -18,7 +18,6 @@ | |||
18 | #include <linux/init.h> | 18 | #include <linux/init.h> |
19 | #include <linux/irq.h> | 19 | #include <linux/irq.h> |
20 | #include <linux/seq_file.h> | 20 | #include <linux/seq_file.h> |
21 | #include <linux/kexec.h> | ||
22 | #include <linux/of_platform.h> | 21 | #include <linux/of_platform.h> |
23 | #include <linux/memblock.h> | 22 | #include <linux/memblock.h> |
24 | #include <mm/mmu_decl.h> | 23 | #include <mm/mmu_decl.h> |
@@ -65,7 +64,7 @@ static int __init page_aligned(unsigned long x) | |||
65 | 64 | ||
66 | void __init wii_memory_fixups(void) | 65 | void __init wii_memory_fixups(void) |
67 | { | 66 | { |
68 | struct memblock_property *p = memblock.memory.region; | 67 | struct memblock_region *p = memblock.memory.regions; |
69 | 68 | ||
70 | /* | 69 | /* |
71 | * This is part of a workaround to allow the use of two | 70 | * This is part of a workaround to allow the use of two |
@@ -226,13 +225,6 @@ static void wii_shutdown(void) | |||
226 | flipper_quiesce(); | 225 | flipper_quiesce(); |
227 | } | 226 | } |
228 | 227 | ||
229 | #ifdef CONFIG_KEXEC | ||
230 | static int wii_machine_kexec_prepare(struct kimage *image) | ||
231 | { | ||
232 | return 0; | ||
233 | } | ||
234 | #endif /* CONFIG_KEXEC */ | ||
235 | |||
236 | define_machine(wii) { | 228 | define_machine(wii) { |
237 | .name = "wii", | 229 | .name = "wii", |
238 | .probe = wii_probe, | 230 | .probe = wii_probe, |
@@ -246,9 +238,6 @@ define_machine(wii) { | |||
246 | .calibrate_decr = generic_calibrate_decr, | 238 | .calibrate_decr = generic_calibrate_decr, |
247 | .progress = udbg_progress, | 239 | .progress = udbg_progress, |
248 | .machine_shutdown = wii_shutdown, | 240 | .machine_shutdown = wii_shutdown, |
249 | #ifdef CONFIG_KEXEC | ||
250 | .machine_kexec_prepare = wii_machine_kexec_prepare, | ||
251 | #endif | ||
252 | }; | 241 | }; |
253 | 242 | ||
254 | static struct of_device_id wii_of_bus[] = { | 243 | static struct of_device_id wii_of_bus[] = { |
diff --git a/arch/powerpc/platforms/iseries/Kconfig b/arch/powerpc/platforms/iseries/Kconfig index 47a20cfb4486..b57cda3a0817 100644 --- a/arch/powerpc/platforms/iseries/Kconfig +++ b/arch/powerpc/platforms/iseries/Kconfig | |||
@@ -1,8 +1,10 @@ | |||
1 | config PPC_ISERIES | 1 | config PPC_ISERIES |
2 | bool "IBM Legacy iSeries" | 2 | bool "IBM Legacy iSeries" |
3 | depends on PPC64 && PPC_BOOK3S | 3 | depends on PPC64 && PPC_BOOK3S |
4 | select PPC_INDIRECT_IO | 4 | select PPC_SMP_MUXED_IPI |
5 | select PPC_PCI_CHOICE if EMBEDDED | 5 | select PPC_INDIRECT_PIO |
6 | select PPC_INDIRECT_MMIO | ||
7 | select PPC_PCI_CHOICE if EXPERT | ||
6 | 8 | ||
7 | menu "iSeries device drivers" | 9 | menu "iSeries device drivers" |
8 | depends on PPC_ISERIES | 10 | depends on PPC_ISERIES |
diff --git a/arch/powerpc/platforms/iseries/Makefile b/arch/powerpc/platforms/iseries/Makefile index ce014928d460..a7602b11ed9d 100644 --- a/arch/powerpc/platforms/iseries/Makefile +++ b/arch/powerpc/platforms/iseries/Makefile | |||
@@ -1,4 +1,4 @@ | |||
1 | EXTRA_CFLAGS += -mno-minimal-toc | 1 | ccflags-y := -mno-minimal-toc |
2 | 2 | ||
3 | obj-y += exception.o | 3 | obj-y += exception.o |
4 | obj-y += hvlog.o hvlpconfig.o lpardata.o setup.o dt.o mf.o lpevents.o \ | 4 | obj-y += hvlog.o hvlpconfig.o lpardata.o setup.o dt.o mf.o lpevents.o \ |
diff --git a/arch/powerpc/platforms/iseries/dt.c b/arch/powerpc/platforms/iseries/dt.c index 7f45a51fe793..f0491cc28900 100644 --- a/arch/powerpc/platforms/iseries/dt.c +++ b/arch/powerpc/platforms/iseries/dt.c | |||
@@ -242,7 +242,7 @@ static void __init dt_cpus(struct iseries_flat_dt *dt) | |||
242 | pft_size[0] = 0; /* NUMA CEC cookie, 0 for non NUMA */ | 242 | pft_size[0] = 0; /* NUMA CEC cookie, 0 for non NUMA */ |
243 | pft_size[1] = __ilog2(HvCallHpt_getHptPages() * HW_PAGE_SIZE); | 243 | pft_size[1] = __ilog2(HvCallHpt_getHptPages() * HW_PAGE_SIZE); |
244 | 244 | ||
245 | for (i = 0; i < NR_CPUS; i++) { | 245 | for (i = 0; i < NR_LPPACAS; i++) { |
246 | if (lppaca[i].dyn_proc_status >= 2) | 246 | if (lppaca[i].dyn_proc_status >= 2) |
247 | continue; | 247 | continue; |
248 | 248 | ||
diff --git a/arch/powerpc/platforms/iseries/exception.S b/arch/powerpc/platforms/iseries/exception.S index 32a56c6dfa72..29c02f36b32f 100644 --- a/arch/powerpc/platforms/iseries/exception.S +++ b/arch/powerpc/platforms/iseries/exception.S | |||
@@ -31,6 +31,7 @@ | |||
31 | #include <asm/thread_info.h> | 31 | #include <asm/thread_info.h> |
32 | #include <asm/ptrace.h> | 32 | #include <asm/ptrace.h> |
33 | #include <asm/cputable.h> | 33 | #include <asm/cputable.h> |
34 | #include <asm/mmu.h> | ||
34 | 35 | ||
35 | #include "exception.h" | 36 | #include "exception.h" |
36 | 37 | ||
@@ -60,29 +61,31 @@ system_reset_iSeries: | |||
60 | /* Spin on __secondary_hold_spinloop until it is updated by the boot cpu. */ | 61 | /* Spin on __secondary_hold_spinloop until it is updated by the boot cpu. */ |
61 | /* In the UP case we'll yield() later, and we will not access the paca anyway */ | 62 | /* In the UP case we'll yield() later, and we will not access the paca anyway */ |
62 | #ifdef CONFIG_SMP | 63 | #ifdef CONFIG_SMP |
63 | 1: | 64 | iSeries_secondary_wait_paca: |
64 | HMT_LOW | 65 | HMT_LOW |
65 | LOAD_REG_ADDR(r23, __secondary_hold_spinloop) | 66 | LOAD_REG_ADDR(r23, __secondary_hold_spinloop) |
66 | ld r23,0(r23) | 67 | ld r23,0(r23) |
67 | sync | ||
68 | LOAD_REG_ADDR(r3,current_set) | ||
69 | sldi r28,r24,3 /* get current_set[cpu#] */ | ||
70 | ldx r3,r3,r28 | ||
71 | addi r1,r3,THREAD_SIZE | ||
72 | subi r1,r1,STACK_FRAME_OVERHEAD | ||
73 | 68 | ||
74 | cmpwi 0,r23,0 /* Keep poking the Hypervisor until */ | 69 | cmpdi 0,r23,0 |
75 | bne 2f /* we're released */ | 70 | bne 2f /* go on when the master is ready */ |
76 | /* Let the Hypervisor know we are alive */ | 71 | |
72 | /* Keep poking the Hypervisor until we're released */ | ||
77 | /* 8002 is a call to HvCallCfg::getLps, a harmless Hypervisor function */ | 73 | /* 8002 is a call to HvCallCfg::getLps, a harmless Hypervisor function */ |
78 | lis r3,0x8002 | 74 | lis r3,0x8002 |
79 | rldicr r3,r3,32,15 /* r0 = (r3 << 32) & 0xffff000000000000 */ | 75 | rldicr r3,r3,32,15 /* r0 = (r3 << 32) & 0xffff000000000000 */ |
80 | li r0,-1 /* r0=-1 indicates a Hypervisor call */ | 76 | li r0,-1 /* r0=-1 indicates a Hypervisor call */ |
81 | sc /* Invoke the hypervisor via a system call */ | 77 | sc /* Invoke the hypervisor via a system call */ |
82 | b 1b | 78 | b iSeries_secondary_wait_paca |
83 | #endif | ||
84 | 79 | ||
85 | 2: | 80 | 2: |
81 | HMT_MEDIUM | ||
82 | sync | ||
83 | |||
84 | LOAD_REG_ADDR(r3, nr_cpu_ids) /* get number of pacas allocated */ | ||
85 | lwz r3,0(r3) /* nr_cpus= or NR_CPUS can limit */ | ||
86 | cmpld 0,r24,r3 /* is our cpu number allocated? */ | ||
87 | bge iSeries_secondary_yield /* no, yield forever */ | ||
88 | |||
86 | /* Load our paca now that it's been allocated */ | 89 | /* Load our paca now that it's been allocated */ |
87 | LOAD_REG_ADDR(r13, paca) | 90 | LOAD_REG_ADDR(r13, paca) |
88 | ld r13,0(r13) | 91 | ld r13,0(r13) |
@@ -93,10 +96,24 @@ system_reset_iSeries: | |||
93 | ori r23,r23,MSR_RI | 96 | ori r23,r23,MSR_RI |
94 | mtmsrd r23 /* RI on */ | 97 | mtmsrd r23 /* RI on */ |
95 | 98 | ||
96 | HMT_LOW | 99 | iSeries_secondary_smp_loop: |
97 | #ifdef CONFIG_SMP | ||
98 | lbz r23,PACAPROCSTART(r13) /* Test if this processor | 100 | lbz r23,PACAPROCSTART(r13) /* Test if this processor |
99 | * should start */ | 101 | * should start */ |
102 | cmpwi 0,r23,0 | ||
103 | bne 3f /* go on when we are told */ | ||
104 | |||
105 | HMT_LOW | ||
106 | /* Let the Hypervisor know we are alive */ | ||
107 | /* 8002 is a call to HvCallCfg::getLps, a harmless Hypervisor function */ | ||
108 | lis r3,0x8002 | ||
109 | rldicr r3,r3,32,15 /* r0 = (r3 << 32) & 0xffff000000000000 */ | ||
110 | li r0,-1 /* r0=-1 indicates a Hypervisor call */ | ||
111 | sc /* Invoke the hypervisor via a system call */ | ||
112 | mfspr r13,SPRN_SPRG_PACA /* Put r13 back ???? */ | ||
113 | b iSeries_secondary_smp_loop /* wait for signal to start */ | ||
114 | |||
115 | 3: | ||
116 | HMT_MEDIUM | ||
100 | sync | 117 | sync |
101 | LOAD_REG_ADDR(r3,current_set) | 118 | LOAD_REG_ADDR(r3,current_set) |
102 | sldi r28,r24,3 /* get current_set[cpu#] */ | 119 | sldi r28,r24,3 /* get current_set[cpu#] */ |
@@ -104,27 +121,22 @@ system_reset_iSeries: | |||
104 | addi r1,r3,THREAD_SIZE | 121 | addi r1,r3,THREAD_SIZE |
105 | subi r1,r1,STACK_FRAME_OVERHEAD | 122 | subi r1,r1,STACK_FRAME_OVERHEAD |
106 | 123 | ||
107 | cmpwi 0,r23,0 | ||
108 | beq iSeries_secondary_smp_loop /* Loop until told to go */ | ||
109 | b __secondary_start /* Loop until told to go */ | 124 | b __secondary_start /* Loop until told to go */ |
110 | iSeries_secondary_smp_loop: | 125 | #endif /* CONFIG_SMP */ |
111 | /* Let the Hypervisor know we are alive */ | 126 | |
112 | /* 8002 is a call to HvCallCfg::getLps, a harmless Hypervisor function */ | 127 | iSeries_secondary_yield: |
113 | lis r3,0x8002 | ||
114 | rldicr r3,r3,32,15 /* r0 = (r3 << 32) & 0xffff000000000000 */ | ||
115 | #else /* CONFIG_SMP */ | ||
116 | /* Yield the processor. This is required for non-SMP kernels | 128 | /* Yield the processor. This is required for non-SMP kernels |
117 | which are running on multi-threaded machines. */ | 129 | which are running on multi-threaded machines. */ |
130 | HMT_LOW | ||
118 | lis r3,0x8000 | 131 | lis r3,0x8000 |
119 | rldicr r3,r3,32,15 /* r3 = (r3 << 32) & 0xffff000000000000 */ | 132 | rldicr r3,r3,32,15 /* r3 = (r3 << 32) & 0xffff000000000000 */ |
120 | addi r3,r3,18 /* r3 = 0x8000000000000012 which is "yield" */ | 133 | addi r3,r3,18 /* r3 = 0x8000000000000012 which is "yield" */ |
121 | li r4,0 /* "yield timed" */ | 134 | li r4,0 /* "yield timed" */ |
122 | li r5,-1 /* "yield forever" */ | 135 | li r5,-1 /* "yield forever" */ |
123 | #endif /* CONFIG_SMP */ | ||
124 | li r0,-1 /* r0=-1 indicates a Hypervisor call */ | 136 | li r0,-1 /* r0=-1 indicates a Hypervisor call */ |
125 | sc /* Invoke the hypervisor via a system call */ | 137 | sc /* Invoke the hypervisor via a system call */ |
126 | mfspr r13,SPRN_SPRG_PACA /* Put r13 back ???? */ | 138 | mfspr r13,SPRN_SPRG_PACA /* Put r13 back ???? */ |
127 | b 2b /* If SMP not configured, secondaries | 139 | b iSeries_secondary_yield /* If SMP not configured, secondaries |
128 | * loop forever */ | 140 | * loop forever */ |
129 | 141 | ||
130 | /*** ISeries-LPAR interrupt handlers ***/ | 142 | /*** ISeries-LPAR interrupt handlers ***/ |
@@ -157,7 +169,7 @@ BEGIN_FTR_SECTION | |||
157 | FTR_SECTION_ELSE | 169 | FTR_SECTION_ELSE |
158 | EXCEPTION_PROLOG_1(PACA_EXGEN) | 170 | EXCEPTION_PROLOG_1(PACA_EXGEN) |
159 | EXCEPTION_PROLOG_ISERIES_1 | 171 | EXCEPTION_PROLOG_ISERIES_1 |
160 | ALT_FTR_SECTION_END_IFCLR(CPU_FTR_SLB) | 172 | ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_SLB) |
161 | b data_access_common | 173 | b data_access_common |
162 | 174 | ||
163 | .do_stab_bolted_iSeries: | 175 | .do_stab_bolted_iSeries: |
diff --git a/arch/powerpc/platforms/iseries/irq.c b/arch/powerpc/platforms/iseries/irq.c index ba446bf355a9..b2103453eb01 100644 --- a/arch/powerpc/platforms/iseries/irq.c +++ b/arch/powerpc/platforms/iseries/irq.c | |||
@@ -42,7 +42,6 @@ | |||
42 | #include "irq.h" | 42 | #include "irq.h" |
43 | #include "pci.h" | 43 | #include "pci.h" |
44 | #include "call_pci.h" | 44 | #include "call_pci.h" |
45 | #include "smp.h" | ||
46 | 45 | ||
47 | #ifdef CONFIG_PCI | 46 | #ifdef CONFIG_PCI |
48 | 47 | ||
@@ -167,11 +166,11 @@ static void pci_event_handler(struct HvLpEvent *event) | |||
167 | * This will be called by device drivers (via enable_IRQ) | 166 | * This will be called by device drivers (via enable_IRQ) |
168 | * to enable INTA in the bridge interrupt status register. | 167 | * to enable INTA in the bridge interrupt status register. |
169 | */ | 168 | */ |
170 | static void iseries_enable_IRQ(unsigned int irq) | 169 | static void iseries_enable_IRQ(struct irq_data *d) |
171 | { | 170 | { |
172 | u32 bus, dev_id, function, mask; | 171 | u32 bus, dev_id, function, mask; |
173 | const u32 sub_bus = 0; | 172 | const u32 sub_bus = 0; |
174 | unsigned int rirq = (unsigned int)irq_map[irq].hwirq; | 173 | unsigned int rirq = (unsigned int)irqd_to_hwirq(d); |
175 | 174 | ||
176 | /* The IRQ has already been locked by the caller */ | 175 | /* The IRQ has already been locked by the caller */ |
177 | bus = REAL_IRQ_TO_BUS(rirq); | 176 | bus = REAL_IRQ_TO_BUS(rirq); |
@@ -184,23 +183,23 @@ static void iseries_enable_IRQ(unsigned int irq) | |||
184 | } | 183 | } |
185 | 184 | ||
186 | /* This is called by iseries_activate_IRQs */ | 185 | /* This is called by iseries_activate_IRQs */ |
187 | static unsigned int iseries_startup_IRQ(unsigned int irq) | 186 | static unsigned int iseries_startup_IRQ(struct irq_data *d) |
188 | { | 187 | { |
189 | u32 bus, dev_id, function, mask; | 188 | u32 bus, dev_id, function, mask; |
190 | const u32 sub_bus = 0; | 189 | const u32 sub_bus = 0; |
191 | unsigned int rirq = (unsigned int)irq_map[irq].hwirq; | 190 | unsigned int rirq = (unsigned int)irqd_to_hwirq(d); |
192 | 191 | ||
193 | bus = REAL_IRQ_TO_BUS(rirq); | 192 | bus = REAL_IRQ_TO_BUS(rirq); |
194 | function = REAL_IRQ_TO_FUNC(rirq); | 193 | function = REAL_IRQ_TO_FUNC(rirq); |
195 | dev_id = (REAL_IRQ_TO_IDSEL(rirq) << 4) + function; | 194 | dev_id = (REAL_IRQ_TO_IDSEL(rirq) << 4) + function; |
196 | 195 | ||
197 | /* Link the IRQ number to the bridge */ | 196 | /* Link the IRQ number to the bridge */ |
198 | HvCallXm_connectBusUnit(bus, sub_bus, dev_id, irq); | 197 | HvCallXm_connectBusUnit(bus, sub_bus, dev_id, d->irq); |
199 | 198 | ||
200 | /* Unmask bridge interrupts in the FISR */ | 199 | /* Unmask bridge interrupts in the FISR */ |
201 | mask = 0x01010000 << function; | 200 | mask = 0x01010000 << function; |
202 | HvCallPci_unmaskFisr(bus, sub_bus, dev_id, mask); | 201 | HvCallPci_unmaskFisr(bus, sub_bus, dev_id, mask); |
203 | iseries_enable_IRQ(irq); | 202 | iseries_enable_IRQ(d); |
204 | return 0; | 203 | return 0; |
205 | } | 204 | } |
206 | 205 | ||
@@ -215,21 +214,26 @@ void __init iSeries_activate_IRQs() | |||
215 | 214 | ||
216 | for_each_irq (irq) { | 215 | for_each_irq (irq) { |
217 | struct irq_desc *desc = irq_to_desc(irq); | 216 | struct irq_desc *desc = irq_to_desc(irq); |
217 | struct irq_chip *chip; | ||
218 | 218 | ||
219 | if (desc && desc->chip && desc->chip->startup) { | 219 | if (!desc) |
220 | continue; | ||
221 | |||
222 | chip = irq_desc_get_chip(desc); | ||
223 | if (chip && chip->irq_startup) { | ||
220 | raw_spin_lock_irqsave(&desc->lock, flags); | 224 | raw_spin_lock_irqsave(&desc->lock, flags); |
221 | desc->chip->startup(irq); | 225 | chip->irq_startup(&desc->irq_data); |
222 | raw_spin_unlock_irqrestore(&desc->lock, flags); | 226 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
223 | } | 227 | } |
224 | } | 228 | } |
225 | } | 229 | } |
226 | 230 | ||
227 | /* this is not called anywhere currently */ | 231 | /* this is not called anywhere currently */ |
228 | static void iseries_shutdown_IRQ(unsigned int irq) | 232 | static void iseries_shutdown_IRQ(struct irq_data *d) |
229 | { | 233 | { |
230 | u32 bus, dev_id, function, mask; | 234 | u32 bus, dev_id, function, mask; |
231 | const u32 sub_bus = 0; | 235 | const u32 sub_bus = 0; |
232 | unsigned int rirq = (unsigned int)irq_map[irq].hwirq; | 236 | unsigned int rirq = (unsigned int)irqd_to_hwirq(d); |
233 | 237 | ||
234 | /* irq should be locked by the caller */ | 238 | /* irq should be locked by the caller */ |
235 | bus = REAL_IRQ_TO_BUS(rirq); | 239 | bus = REAL_IRQ_TO_BUS(rirq); |
@@ -248,11 +252,11 @@ static void iseries_shutdown_IRQ(unsigned int irq) | |||
248 | * This will be called by device drivers (via disable_IRQ) | 252 | * This will be called by device drivers (via disable_IRQ) |
249 | * to disable INTA in the bridge interrupt status register. | 253 | * to disable INTA in the bridge interrupt status register. |
250 | */ | 254 | */ |
251 | static void iseries_disable_IRQ(unsigned int irq) | 255 | static void iseries_disable_IRQ(struct irq_data *d) |
252 | { | 256 | { |
253 | u32 bus, dev_id, function, mask; | 257 | u32 bus, dev_id, function, mask; |
254 | const u32 sub_bus = 0; | 258 | const u32 sub_bus = 0; |
255 | unsigned int rirq = (unsigned int)irq_map[irq].hwirq; | 259 | unsigned int rirq = (unsigned int)irqd_to_hwirq(d); |
256 | 260 | ||
257 | /* The IRQ has already been locked by the caller */ | 261 | /* The IRQ has already been locked by the caller */ |
258 | bus = REAL_IRQ_TO_BUS(rirq); | 262 | bus = REAL_IRQ_TO_BUS(rirq); |
@@ -264,9 +268,9 @@ static void iseries_disable_IRQ(unsigned int irq) | |||
264 | HvCallPci_maskInterrupts(bus, sub_bus, dev_id, mask); | 268 | HvCallPci_maskInterrupts(bus, sub_bus, dev_id, mask); |
265 | } | 269 | } |
266 | 270 | ||
267 | static void iseries_end_IRQ(unsigned int irq) | 271 | static void iseries_end_IRQ(struct irq_data *d) |
268 | { | 272 | { |
269 | unsigned int rirq = (unsigned int)irq_map[irq].hwirq; | 273 | unsigned int rirq = (unsigned int)irqd_to_hwirq(d); |
270 | 274 | ||
271 | HvCallPci_eoi(REAL_IRQ_TO_BUS(rirq), REAL_IRQ_TO_SUBBUS(rirq), | 275 | HvCallPci_eoi(REAL_IRQ_TO_BUS(rirq), REAL_IRQ_TO_SUBBUS(rirq), |
272 | (REAL_IRQ_TO_IDSEL(rirq) << 4) + REAL_IRQ_TO_FUNC(rirq)); | 276 | (REAL_IRQ_TO_IDSEL(rirq) << 4) + REAL_IRQ_TO_FUNC(rirq)); |
@@ -274,11 +278,11 @@ static void iseries_end_IRQ(unsigned int irq) | |||
274 | 278 | ||
275 | static struct irq_chip iseries_pic = { | 279 | static struct irq_chip iseries_pic = { |
276 | .name = "iSeries", | 280 | .name = "iSeries", |
277 | .startup = iseries_startup_IRQ, | 281 | .irq_startup = iseries_startup_IRQ, |
278 | .shutdown = iseries_shutdown_IRQ, | 282 | .irq_shutdown = iseries_shutdown_IRQ, |
279 | .unmask = iseries_enable_IRQ, | 283 | .irq_unmask = iseries_enable_IRQ, |
280 | .mask = iseries_disable_IRQ, | 284 | .irq_mask = iseries_disable_IRQ, |
281 | .eoi = iseries_end_IRQ | 285 | .irq_eoi = iseries_end_IRQ |
282 | }; | 286 | }; |
283 | 287 | ||
284 | /* | 288 | /* |
@@ -311,7 +315,7 @@ unsigned int iSeries_get_irq(void) | |||
311 | #ifdef CONFIG_SMP | 315 | #ifdef CONFIG_SMP |
312 | if (get_lppaca()->int_dword.fields.ipi_cnt) { | 316 | if (get_lppaca()->int_dword.fields.ipi_cnt) { |
313 | get_lppaca()->int_dword.fields.ipi_cnt = 0; | 317 | get_lppaca()->int_dword.fields.ipi_cnt = 0; |
314 | iSeries_smp_message_recv(); | 318 | smp_ipi_demux(); |
315 | } | 319 | } |
316 | #endif /* CONFIG_SMP */ | 320 | #endif /* CONFIG_SMP */ |
317 | if (hvlpevent_is_pending()) | 321 | if (hvlpevent_is_pending()) |
@@ -341,7 +345,7 @@ unsigned int iSeries_get_irq(void) | |||
341 | static int iseries_irq_host_map(struct irq_host *h, unsigned int virq, | 345 | static int iseries_irq_host_map(struct irq_host *h, unsigned int virq, |
342 | irq_hw_number_t hw) | 346 | irq_hw_number_t hw) |
343 | { | 347 | { |
344 | set_irq_chip_and_handler(virq, &iseries_pic, handle_fasteoi_irq); | 348 | irq_set_chip_and_handler(virq, &iseries_pic, handle_fasteoi_irq); |
345 | 349 | ||
346 | return 0; | 350 | return 0; |
347 | } | 351 | } |
diff --git a/arch/powerpc/platforms/iseries/mf.c b/arch/powerpc/platforms/iseries/mf.c index 33e5fc7334fc..62dabe3c2bfa 100644 --- a/arch/powerpc/platforms/iseries/mf.c +++ b/arch/powerpc/platforms/iseries/mf.c | |||
@@ -51,7 +51,7 @@ | |||
51 | static int mf_initialized; | 51 | static int mf_initialized; |
52 | 52 | ||
53 | /* | 53 | /* |
54 | * This is the structure layout for the Machine Facilites LPAR event | 54 | * This is the structure layout for the Machine Facilities LPAR event |
55 | * flows. | 55 | * flows. |
56 | */ | 56 | */ |
57 | struct vsp_cmd_data { | 57 | struct vsp_cmd_data { |
@@ -1045,71 +1045,9 @@ static const struct file_operations mf_side_proc_fops = { | |||
1045 | .write = mf_side_proc_write, | 1045 | .write = mf_side_proc_write, |
1046 | }; | 1046 | }; |
1047 | 1047 | ||
1048 | #if 0 | ||
1049 | static void mf_getSrcHistory(char *buffer, int size) | ||
1050 | { | ||
1051 | struct IplTypeReturnStuff return_stuff; | ||
1052 | struct pending_event *ev = new_pending_event(); | ||
1053 | int rc = 0; | ||
1054 | char *pages[4]; | ||
1055 | |||
1056 | pages[0] = kmalloc(4096, GFP_ATOMIC); | ||
1057 | pages[1] = kmalloc(4096, GFP_ATOMIC); | ||
1058 | pages[2] = kmalloc(4096, GFP_ATOMIC); | ||
1059 | pages[3] = kmalloc(4096, GFP_ATOMIC); | ||
1060 | if ((ev == NULL) || (pages[0] == NULL) || (pages[1] == NULL) | ||
1061 | || (pages[2] == NULL) || (pages[3] == NULL)) | ||
1062 | return -ENOMEM; | ||
1063 | |||
1064 | return_stuff.xType = 0; | ||
1065 | return_stuff.xRc = 0; | ||
1066 | return_stuff.xDone = 0; | ||
1067 | ev->event.hp_lp_event.xSubtype = 6; | ||
1068 | ev->event.hp_lp_event.x.xSubtypeData = | ||
1069 | subtype_data('M', 'F', 'V', 'I'); | ||
1070 | ev->event.data.vsp_cmd.xEvent = &return_stuff; | ||
1071 | ev->event.data.vsp_cmd.cmd = 4; | ||
1072 | ev->event.data.vsp_cmd.lp_index = HvLpConfig_getLpIndex(); | ||
1073 | ev->event.data.vsp_cmd.result_code = 0xFF; | ||
1074 | ev->event.data.vsp_cmd.reserved = 0; | ||
1075 | ev->event.data.vsp_cmd.sub_data.page[0] = iseries_hv_addr(pages[0]); | ||
1076 | ev->event.data.vsp_cmd.sub_data.page[1] = iseries_hv_addr(pages[1]); | ||
1077 | ev->event.data.vsp_cmd.sub_data.page[2] = iseries_hv_addr(pages[2]); | ||
1078 | ev->event.data.vsp_cmd.sub_data.page[3] = iseries_hv_addr(pages[3]); | ||
1079 | mb(); | ||
1080 | if (signal_event(ev) != 0) | ||
1081 | return; | ||
1082 | |||
1083 | while (return_stuff.xDone != 1) | ||
1084 | udelay(10); | ||
1085 | if (return_stuff.xRc == 0) | ||
1086 | memcpy(buffer, pages[0], size); | ||
1087 | kfree(pages[0]); | ||
1088 | kfree(pages[1]); | ||
1089 | kfree(pages[2]); | ||
1090 | kfree(pages[3]); | ||
1091 | } | ||
1092 | #endif | ||
1093 | |||
1094 | static int mf_src_proc_show(struct seq_file *m, void *v) | 1048 | static int mf_src_proc_show(struct seq_file *m, void *v) |
1095 | { | 1049 | { |
1096 | #if 0 | ||
1097 | int len; | ||
1098 | |||
1099 | mf_getSrcHistory(page, count); | ||
1100 | len = count; | ||
1101 | len -= off; | ||
1102 | if (len < count) { | ||
1103 | *eof = 1; | ||
1104 | if (len <= 0) | ||
1105 | return 0; | ||
1106 | } else | ||
1107 | len = count; | ||
1108 | *start = page + off; | ||
1109 | return len; | ||
1110 | #else | ||
1111 | return 0; | 1050 | return 0; |
1112 | #endif | ||
1113 | } | 1051 | } |
1114 | 1052 | ||
1115 | static int mf_src_proc_open(struct inode *inode, struct file *file) | 1053 | static int mf_src_proc_open(struct inode *inode, struct file *file) |
@@ -1249,6 +1187,7 @@ out: | |||
1249 | 1187 | ||
1250 | static const struct file_operations proc_vmlinux_operations = { | 1188 | static const struct file_operations proc_vmlinux_operations = { |
1251 | .write = proc_mf_change_vmlinux, | 1189 | .write = proc_mf_change_vmlinux, |
1190 | .llseek = default_llseek, | ||
1252 | }; | 1191 | }; |
1253 | 1192 | ||
1254 | static int __init mf_proc_init(void) | 1193 | static int __init mf_proc_init(void) |
diff --git a/arch/powerpc/platforms/iseries/setup.c b/arch/powerpc/platforms/iseries/setup.c index b0863410517f..c25a0815c26b 100644 --- a/arch/powerpc/platforms/iseries/setup.c +++ b/arch/powerpc/platforms/iseries/setup.c | |||
@@ -249,7 +249,7 @@ static unsigned long iSeries_process_mainstore_vpd(struct MemoryBlock *mb_array, | |||
249 | unsigned long i; | 249 | unsigned long i; |
250 | unsigned long mem_blocks = 0; | 250 | unsigned long mem_blocks = 0; |
251 | 251 | ||
252 | if (cpu_has_feature(CPU_FTR_SLB)) | 252 | if (mmu_has_feature(MMU_FTR_SLB)) |
253 | mem_blocks = iSeries_process_Regatta_mainstore_vpd(mb_array, | 253 | mem_blocks = iSeries_process_Regatta_mainstore_vpd(mb_array, |
254 | max_entries); | 254 | max_entries); |
255 | else | 255 | else |
@@ -634,7 +634,7 @@ static int __init iseries_probe(void) | |||
634 | 634 | ||
635 | hpte_init_iSeries(); | 635 | hpte_init_iSeries(); |
636 | /* iSeries does not support 16M pages */ | 636 | /* iSeries does not support 16M pages */ |
637 | cur_cpu_spec->cpu_features &= ~CPU_FTR_16M_PAGE; | 637 | cur_cpu_spec->mmu_features &= ~MMU_FTR_16M_PAGE; |
638 | 638 | ||
639 | return 1; | 639 | return 1; |
640 | } | 640 | } |
@@ -680,10 +680,16 @@ void * __init iSeries_early_setup(void) | |||
680 | * on but calling this function multiple times is fine. | 680 | * on but calling this function multiple times is fine. |
681 | */ | 681 | */ |
682 | identify_cpu(0, mfspr(SPRN_PVR)); | 682 | identify_cpu(0, mfspr(SPRN_PVR)); |
683 | initialise_paca(&boot_paca, 0); | ||
683 | 684 | ||
684 | powerpc_firmware_features |= FW_FEATURE_ISERIES; | 685 | powerpc_firmware_features |= FW_FEATURE_ISERIES; |
685 | powerpc_firmware_features |= FW_FEATURE_LPAR; | 686 | powerpc_firmware_features |= FW_FEATURE_LPAR; |
686 | 687 | ||
688 | #ifdef CONFIG_SMP | ||
689 | /* On iSeries we know we can never have more than 64 cpus */ | ||
690 | nr_cpu_ids = max(nr_cpu_ids, 64); | ||
691 | #endif | ||
692 | |||
687 | iSeries_fixup_klimit(); | 693 | iSeries_fixup_klimit(); |
688 | 694 | ||
689 | /* | 695 | /* |
diff --git a/arch/powerpc/platforms/iseries/smp.c b/arch/powerpc/platforms/iseries/smp.c index 6590850045af..e3265adde5d3 100644 --- a/arch/powerpc/platforms/iseries/smp.c +++ b/arch/powerpc/platforms/iseries/smp.c | |||
@@ -42,57 +42,23 @@ | |||
42 | #include <asm/cputable.h> | 42 | #include <asm/cputable.h> |
43 | #include <asm/system.h> | 43 | #include <asm/system.h> |
44 | 44 | ||
45 | #include "smp.h" | 45 | static void smp_iSeries_cause_ipi(int cpu, unsigned long data) |
46 | |||
47 | static unsigned long iSeries_smp_message[NR_CPUS]; | ||
48 | |||
49 | void iSeries_smp_message_recv(void) | ||
50 | { | ||
51 | int cpu = smp_processor_id(); | ||
52 | int msg; | ||
53 | |||
54 | if (num_online_cpus() < 2) | ||
55 | return; | ||
56 | |||
57 | for (msg = 0; msg < 4; msg++) | ||
58 | if (test_and_clear_bit(msg, &iSeries_smp_message[cpu])) | ||
59 | smp_message_recv(msg); | ||
60 | } | ||
61 | |||
62 | static inline void smp_iSeries_do_message(int cpu, int msg) | ||
63 | { | 46 | { |
64 | set_bit(msg, &iSeries_smp_message[cpu]); | ||
65 | HvCall_sendIPI(&(paca[cpu])); | 47 | HvCall_sendIPI(&(paca[cpu])); |
66 | } | 48 | } |
67 | 49 | ||
68 | static void smp_iSeries_message_pass(int target, int msg) | ||
69 | { | ||
70 | int i; | ||
71 | |||
72 | if (target < NR_CPUS) | ||
73 | smp_iSeries_do_message(target, msg); | ||
74 | else { | ||
75 | for_each_online_cpu(i) { | ||
76 | if ((target == MSG_ALL_BUT_SELF) && | ||
77 | (i == smp_processor_id())) | ||
78 | continue; | ||
79 | smp_iSeries_do_message(i, msg); | ||
80 | } | ||
81 | } | ||
82 | } | ||
83 | |||
84 | static int smp_iSeries_probe(void) | 50 | static int smp_iSeries_probe(void) |
85 | { | 51 | { |
86 | return cpumask_weight(cpu_possible_mask); | 52 | return cpumask_weight(cpu_possible_mask); |
87 | } | 53 | } |
88 | 54 | ||
89 | static void smp_iSeries_kick_cpu(int nr) | 55 | static int smp_iSeries_kick_cpu(int nr) |
90 | { | 56 | { |
91 | BUG_ON((nr < 0) || (nr >= NR_CPUS)); | 57 | BUG_ON((nr < 0) || (nr >= NR_CPUS)); |
92 | 58 | ||
93 | /* Verify that our partition has a processor nr */ | 59 | /* Verify that our partition has a processor nr */ |
94 | if (lppaca[nr].dyn_proc_status >= 2) | 60 | if (lppaca_of(nr).dyn_proc_status >= 2) |
95 | return; | 61 | return -ENOENT; |
96 | 62 | ||
97 | /* The processor is currently spinning, waiting | 63 | /* The processor is currently spinning, waiting |
98 | * for the cpu_start field to become non-zero | 64 | * for the cpu_start field to become non-zero |
@@ -100,6 +66,8 @@ static void smp_iSeries_kick_cpu(int nr) | |||
100 | * continue on to secondary_start in iSeries_head.S | 66 | * continue on to secondary_start in iSeries_head.S |
101 | */ | 67 | */ |
102 | paca[nr].cpu_start = 1; | 68 | paca[nr].cpu_start = 1; |
69 | |||
70 | return 0; | ||
103 | } | 71 | } |
104 | 72 | ||
105 | static void __devinit smp_iSeries_setup_cpu(int nr) | 73 | static void __devinit smp_iSeries_setup_cpu(int nr) |
@@ -107,7 +75,8 @@ static void __devinit smp_iSeries_setup_cpu(int nr) | |||
107 | } | 75 | } |
108 | 76 | ||
109 | static struct smp_ops_t iSeries_smp_ops = { | 77 | static struct smp_ops_t iSeries_smp_ops = { |
110 | .message_pass = smp_iSeries_message_pass, | 78 | .message_pass = smp_muxed_ipi_message_pass, |
79 | .cause_ipi = smp_iSeries_cause_ipi, | ||
111 | .probe = smp_iSeries_probe, | 80 | .probe = smp_iSeries_probe, |
112 | .kick_cpu = smp_iSeries_kick_cpu, | 81 | .kick_cpu = smp_iSeries_kick_cpu, |
113 | .setup_cpu = smp_iSeries_setup_cpu, | 82 | .setup_cpu = smp_iSeries_setup_cpu, |
diff --git a/arch/powerpc/platforms/iseries/smp.h b/arch/powerpc/platforms/iseries/smp.h deleted file mode 100644 index d501f7de01e7..000000000000 --- a/arch/powerpc/platforms/iseries/smp.h +++ /dev/null | |||
@@ -1,6 +0,0 @@ | |||
1 | #ifndef _PLATFORMS_ISERIES_SMP_H | ||
2 | #define _PLATFORMS_ISERIES_SMP_H | ||
3 | |||
4 | extern void iSeries_smp_message_recv(void); | ||
5 | |||
6 | #endif /* _PLATFORMS_ISERIES_SMP_H */ | ||
diff --git a/arch/powerpc/platforms/iseries/viopath.c b/arch/powerpc/platforms/iseries/viopath.c index b5f05d943a90..2376069cdc14 100644 --- a/arch/powerpc/platforms/iseries/viopath.c +++ b/arch/powerpc/platforms/iseries/viopath.c | |||
@@ -396,7 +396,7 @@ static void vio_handleEvent(struct HvLpEvent *event) | |||
396 | viopathStatus[remoteLp].mTargetInst)) { | 396 | viopathStatus[remoteLp].mTargetInst)) { |
397 | printk(VIOPATH_KERN_WARN | 397 | printk(VIOPATH_KERN_WARN |
398 | "message from invalid partition. " | 398 | "message from invalid partition. " |
399 | "int msg rcvd, source inst (%d) doesnt match (%d)\n", | 399 | "int msg rcvd, source inst (%d) doesn't match (%d)\n", |
400 | viopathStatus[remoteLp].mTargetInst, | 400 | viopathStatus[remoteLp].mTargetInst, |
401 | event->xSourceInstanceId); | 401 | event->xSourceInstanceId); |
402 | return; | 402 | return; |
@@ -407,7 +407,7 @@ static void vio_handleEvent(struct HvLpEvent *event) | |||
407 | viopathStatus[remoteLp].mSourceInst)) { | 407 | viopathStatus[remoteLp].mSourceInst)) { |
408 | printk(VIOPATH_KERN_WARN | 408 | printk(VIOPATH_KERN_WARN |
409 | "message from invalid partition. " | 409 | "message from invalid partition. " |
410 | "int msg rcvd, target inst (%d) doesnt match (%d)\n", | 410 | "int msg rcvd, target inst (%d) doesn't match (%d)\n", |
411 | viopathStatus[remoteLp].mSourceInst, | 411 | viopathStatus[remoteLp].mSourceInst, |
412 | event->xTargetInstanceId); | 412 | event->xTargetInstanceId); |
413 | return; | 413 | return; |
@@ -418,7 +418,7 @@ static void vio_handleEvent(struct HvLpEvent *event) | |||
418 | viopathStatus[remoteLp].mSourceInst) { | 418 | viopathStatus[remoteLp].mSourceInst) { |
419 | printk(VIOPATH_KERN_WARN | 419 | printk(VIOPATH_KERN_WARN |
420 | "message from invalid partition. " | 420 | "message from invalid partition. " |
421 | "ack msg rcvd, source inst (%d) doesnt match (%d)\n", | 421 | "ack msg rcvd, source inst (%d) doesn't match (%d)\n", |
422 | viopathStatus[remoteLp].mSourceInst, | 422 | viopathStatus[remoteLp].mSourceInst, |
423 | event->xSourceInstanceId); | 423 | event->xSourceInstanceId); |
424 | return; | 424 | return; |
@@ -428,7 +428,7 @@ static void vio_handleEvent(struct HvLpEvent *event) | |||
428 | viopathStatus[remoteLp].mTargetInst) { | 428 | viopathStatus[remoteLp].mTargetInst) { |
429 | printk(VIOPATH_KERN_WARN | 429 | printk(VIOPATH_KERN_WARN |
430 | "message from invalid partition. " | 430 | "message from invalid partition. " |
431 | "viopath: ack msg rcvd, target inst (%d) doesnt match (%d)\n", | 431 | "viopath: ack msg rcvd, target inst (%d) doesn't match (%d)\n", |
432 | viopathStatus[remoteLp].mTargetInst, | 432 | viopathStatus[remoteLp].mTargetInst, |
433 | event->xTargetInstanceId); | 433 | event->xTargetInstanceId); |
434 | return; | 434 | return; |
diff --git a/arch/powerpc/platforms/maple/pci.c b/arch/powerpc/platforms/maple/pci.c index 04296ffff8bf..dd2e48b28508 100644 --- a/arch/powerpc/platforms/maple/pci.c +++ b/arch/powerpc/platforms/maple/pci.c | |||
@@ -498,7 +498,7 @@ void __devinit maple_pci_irq_fixup(struct pci_dev *dev) | |||
498 | printk(KERN_DEBUG "Fixup U4 PCIe IRQ\n"); | 498 | printk(KERN_DEBUG "Fixup U4 PCIe IRQ\n"); |
499 | dev->irq = irq_create_mapping(NULL, 1); | 499 | dev->irq = irq_create_mapping(NULL, 1); |
500 | if (dev->irq != NO_IRQ) | 500 | if (dev->irq != NO_IRQ) |
501 | set_irq_type(dev->irq, IRQ_TYPE_LEVEL_LOW); | 501 | irq_set_irq_type(dev->irq, IRQ_TYPE_LEVEL_LOW); |
502 | } | 502 | } |
503 | 503 | ||
504 | /* Hide AMD8111 IDE interrupt when in legacy mode so | 504 | /* Hide AMD8111 IDE interrupt when in legacy mode so |
diff --git a/arch/powerpc/platforms/maple/setup.c b/arch/powerpc/platforms/maple/setup.c index 3fff8d979b41..fe34c3d9bb74 100644 --- a/arch/powerpc/platforms/maple/setup.c +++ b/arch/powerpc/platforms/maple/setup.c | |||
@@ -358,6 +358,7 @@ static int __init maple_cpc925_edac_setup(void) | |||
358 | model = (const unsigned char *)of_get_property(np, "model", NULL); | 358 | model = (const unsigned char *)of_get_property(np, "model", NULL); |
359 | if (!model) { | 359 | if (!model) { |
360 | printk(KERN_ERR "%s: Unabel to get model info\n", __func__); | 360 | printk(KERN_ERR "%s: Unabel to get model info\n", __func__); |
361 | of_node_put(np); | ||
361 | return -ENODEV; | 362 | return -ENODEV; |
362 | } | 363 | } |
363 | 364 | ||
diff --git a/arch/powerpc/platforms/pasemi/dma_lib.c b/arch/powerpc/platforms/pasemi/dma_lib.c index 09695ae50f91..321a9b3a2d00 100644 --- a/arch/powerpc/platforms/pasemi/dma_lib.c +++ b/arch/powerpc/platforms/pasemi/dma_lib.c | |||
@@ -379,9 +379,9 @@ void pasemi_dma_free_buf(struct pasemi_dmachan *chan, int size, | |||
379 | } | 379 | } |
380 | EXPORT_SYMBOL(pasemi_dma_free_buf); | 380 | EXPORT_SYMBOL(pasemi_dma_free_buf); |
381 | 381 | ||
382 | /* pasemi_dma_alloc_flag - Allocate a flag (event) for channel syncronization | 382 | /* pasemi_dma_alloc_flag - Allocate a flag (event) for channel synchronization |
383 | * | 383 | * |
384 | * Allocates a flag for use with channel syncronization (event descriptors). | 384 | * Allocates a flag for use with channel synchronization (event descriptors). |
385 | * Returns allocated flag (0-63), < 0 on error. | 385 | * Returns allocated flag (0-63), < 0 on error. |
386 | */ | 386 | */ |
387 | int pasemi_dma_alloc_flag(void) | 387 | int pasemi_dma_alloc_flag(void) |
diff --git a/arch/powerpc/platforms/pasemi/gpio_mdio.c b/arch/powerpc/platforms/pasemi/gpio_mdio.c index a5d907b5a4c2..9886296e08da 100644 --- a/arch/powerpc/platforms/pasemi/gpio_mdio.c +++ b/arch/powerpc/platforms/pasemi/gpio_mdio.c | |||
@@ -216,8 +216,7 @@ static int gpio_mdio_reset(struct mii_bus *bus) | |||
216 | } | 216 | } |
217 | 217 | ||
218 | 218 | ||
219 | static int __devinit gpio_mdio_probe(struct platform_device *ofdev, | 219 | static int __devinit gpio_mdio_probe(struct platform_device *ofdev) |
220 | const struct of_device_id *match) | ||
221 | { | 220 | { |
222 | struct device *dev = &ofdev->dev; | 221 | struct device *dev = &ofdev->dev; |
223 | struct device_node *np = ofdev->dev.of_node; | 222 | struct device_node *np = ofdev->dev.of_node; |
@@ -299,7 +298,7 @@ static struct of_device_id gpio_mdio_match[] = | |||
299 | }; | 298 | }; |
300 | MODULE_DEVICE_TABLE(of, gpio_mdio_match); | 299 | MODULE_DEVICE_TABLE(of, gpio_mdio_match); |
301 | 300 | ||
302 | static struct of_platform_driver gpio_mdio_driver = | 301 | static struct platform_driver gpio_mdio_driver = |
303 | { | 302 | { |
304 | .probe = gpio_mdio_probe, | 303 | .probe = gpio_mdio_probe, |
305 | .remove = gpio_mdio_remove, | 304 | .remove = gpio_mdio_remove, |
@@ -326,13 +325,13 @@ int gpio_mdio_init(void) | |||
326 | if (!gpio_regs) | 325 | if (!gpio_regs) |
327 | return -ENODEV; | 326 | return -ENODEV; |
328 | 327 | ||
329 | return of_register_platform_driver(&gpio_mdio_driver); | 328 | return platform_driver_register(&gpio_mdio_driver); |
330 | } | 329 | } |
331 | module_init(gpio_mdio_init); | 330 | module_init(gpio_mdio_init); |
332 | 331 | ||
333 | void gpio_mdio_exit(void) | 332 | void gpio_mdio_exit(void) |
334 | { | 333 | { |
335 | of_unregister_platform_driver(&gpio_mdio_driver); | 334 | platform_driver_unregister(&gpio_mdio_driver); |
336 | if (gpio_regs) | 335 | if (gpio_regs) |
337 | iounmap(gpio_regs); | 336 | iounmap(gpio_regs); |
338 | } | 337 | } |
diff --git a/arch/powerpc/platforms/pasemi/iommu.c b/arch/powerpc/platforms/pasemi/iommu.c index 1f9fb2c57761..14943ef01918 100644 --- a/arch/powerpc/platforms/pasemi/iommu.c +++ b/arch/powerpc/platforms/pasemi/iommu.c | |||
@@ -156,20 +156,12 @@ static void iommu_table_iobmap_setup(void) | |||
156 | 156 | ||
157 | static void pci_dma_bus_setup_pasemi(struct pci_bus *bus) | 157 | static void pci_dma_bus_setup_pasemi(struct pci_bus *bus) |
158 | { | 158 | { |
159 | struct device_node *dn; | ||
160 | |||
161 | pr_debug("pci_dma_bus_setup, bus %p, bus->self %p\n", bus, bus->self); | 159 | pr_debug("pci_dma_bus_setup, bus %p, bus->self %p\n", bus, bus->self); |
162 | 160 | ||
163 | if (!iommu_table_iobmap_inited) { | 161 | if (!iommu_table_iobmap_inited) { |
164 | iommu_table_iobmap_inited = 1; | 162 | iommu_table_iobmap_inited = 1; |
165 | iommu_table_iobmap_setup(); | 163 | iommu_table_iobmap_setup(); |
166 | } | 164 | } |
167 | |||
168 | dn = pci_bus_to_OF_node(bus); | ||
169 | |||
170 | if (dn) | ||
171 | PCI_DN(dn)->iommu_table = &iommu_table_iobmap; | ||
172 | |||
173 | } | 165 | } |
174 | 166 | ||
175 | 167 | ||
@@ -192,9 +184,6 @@ static void pci_dma_dev_setup_pasemi(struct pci_dev *dev) | |||
192 | set_iommu_table_base(&dev->dev, &iommu_table_iobmap); | 184 | set_iommu_table_base(&dev->dev, &iommu_table_iobmap); |
193 | } | 185 | } |
194 | 186 | ||
195 | static void pci_dma_bus_setup_null(struct pci_bus *b) { } | ||
196 | static void pci_dma_dev_setup_null(struct pci_dev *d) { } | ||
197 | |||
198 | int __init iob_init(struct device_node *dn) | 187 | int __init iob_init(struct device_node *dn) |
199 | { | 188 | { |
200 | unsigned long tmp; | 189 | unsigned long tmp; |
@@ -251,14 +240,8 @@ void __init iommu_init_early_pasemi(void) | |||
251 | iommu_off = of_chosen && | 240 | iommu_off = of_chosen && |
252 | of_get_property(of_chosen, "linux,iommu-off", NULL); | 241 | of_get_property(of_chosen, "linux,iommu-off", NULL); |
253 | #endif | 242 | #endif |
254 | if (iommu_off) { | 243 | if (iommu_off) |
255 | /* Direct I/O, IOMMU off */ | ||
256 | ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_null; | ||
257 | ppc_md.pci_dma_bus_setup = pci_dma_bus_setup_null; | ||
258 | set_pci_dma_ops(&dma_direct_ops); | ||
259 | |||
260 | return; | 244 | return; |
261 | } | ||
262 | 245 | ||
263 | iob_init(NULL); | 246 | iob_init(NULL); |
264 | 247 | ||
diff --git a/arch/powerpc/platforms/pasemi/setup.c b/arch/powerpc/platforms/pasemi/setup.c index f372ec1691a3..7c858e6f843c 100644 --- a/arch/powerpc/platforms/pasemi/setup.c +++ b/arch/powerpc/platforms/pasemi/setup.c | |||
@@ -239,8 +239,8 @@ static __init void pas_init_IRQ(void) | |||
239 | if (nmiprop) { | 239 | if (nmiprop) { |
240 | nmi_virq = irq_create_mapping(NULL, *nmiprop); | 240 | nmi_virq = irq_create_mapping(NULL, *nmiprop); |
241 | mpic_irq_set_priority(nmi_virq, 15); | 241 | mpic_irq_set_priority(nmi_virq, 15); |
242 | set_irq_type(nmi_virq, IRQ_TYPE_EDGE_RISING); | 242 | irq_set_irq_type(nmi_virq, IRQ_TYPE_EDGE_RISING); |
243 | mpic_unmask_irq(nmi_virq); | 243 | mpic_unmask_irq(irq_get_irq_data(nmi_virq)); |
244 | } | 244 | } |
245 | 245 | ||
246 | of_node_put(mpic_node); | 246 | of_node_put(mpic_node); |
@@ -266,7 +266,7 @@ static int pas_machine_check_handler(struct pt_regs *regs) | |||
266 | if (nmi_virq != NO_IRQ && mpic_get_mcirq() == nmi_virq) { | 266 | if (nmi_virq != NO_IRQ && mpic_get_mcirq() == nmi_virq) { |
267 | printk(KERN_ERR "NMI delivered\n"); | 267 | printk(KERN_ERR "NMI delivered\n"); |
268 | debugger(regs); | 268 | debugger(regs); |
269 | mpic_end_irq(nmi_virq); | 269 | mpic_end_irq(irq_get_irq_data(nmi_virq)); |
270 | goto out; | 270 | goto out; |
271 | } | 271 | } |
272 | 272 | ||
diff --git a/arch/powerpc/platforms/powermac/Kconfig b/arch/powerpc/platforms/powermac/Kconfig index 1e1a0873e1dd..1afd10f67858 100644 --- a/arch/powerpc/platforms/powermac/Kconfig +++ b/arch/powerpc/platforms/powermac/Kconfig | |||
@@ -18,4 +18,13 @@ config PPC_PMAC64 | |||
18 | select PPC_970_NAP | 18 | select PPC_970_NAP |
19 | default y | 19 | default y |
20 | 20 | ||
21 | 21 | config PPC_PMAC32_PSURGE | |
22 | bool "Support for powersurge upgrade cards" if EXPERT | ||
23 | depends on SMP && PPC32 && PPC_PMAC | ||
24 | select PPC_SMP_MUXED_IPI | ||
25 | default y | ||
26 | help | ||
27 | The powersurge cpu boards can be used in the generation | ||
28 | of powermacs that have a socket for an upgradeable cpu card, | ||
29 | including the 7500, 8500, 9500, 9600. Support exists for | ||
30 | both dual and quad socket upgrade cards. | ||
diff --git a/arch/powerpc/platforms/powermac/Makefile b/arch/powerpc/platforms/powermac/Makefile index 50f169392551..ea47df66fee5 100644 --- a/arch/powerpc/platforms/powermac/Makefile +++ b/arch/powerpc/platforms/powermac/Makefile | |||
@@ -11,7 +11,7 @@ obj-y += pic.o setup.o time.o feature.o pci.o \ | |||
11 | obj-$(CONFIG_PMAC_BACKLIGHT) += backlight.o | 11 | obj-$(CONFIG_PMAC_BACKLIGHT) += backlight.o |
12 | obj-$(CONFIG_CPU_FREQ_PMAC) += cpufreq_32.o | 12 | obj-$(CONFIG_CPU_FREQ_PMAC) += cpufreq_32.o |
13 | obj-$(CONFIG_CPU_FREQ_PMAC64) += cpufreq_64.o | 13 | obj-$(CONFIG_CPU_FREQ_PMAC64) += cpufreq_64.o |
14 | # CONFIG_NVRAM is an arch. independant tristate symbol, for pmac32 we really | 14 | # CONFIG_NVRAM is an arch. independent tristate symbol, for pmac32 we really |
15 | # need this to be a bool. Cheat here and pretend CONFIG_NVRAM=m is really | 15 | # need this to be a bool. Cheat here and pretend CONFIG_NVRAM=m is really |
16 | # CONFIG_NVRAM=y | 16 | # CONFIG_NVRAM=y |
17 | obj-$(CONFIG_NVRAM:m=y) += nvram.o | 17 | obj-$(CONFIG_NVRAM:m=y) += nvram.o |
diff --git a/arch/powerpc/platforms/powermac/cpufreq_32.c b/arch/powerpc/platforms/powermac/cpufreq_32.c index 415ca6d6b273..04af5f48b4eb 100644 --- a/arch/powerpc/platforms/powermac/cpufreq_32.c +++ b/arch/powerpc/platforms/powermac/cpufreq_32.c | |||
@@ -429,7 +429,7 @@ static u32 read_gpio(struct device_node *np) | |||
429 | return offset; | 429 | return offset; |
430 | } | 430 | } |
431 | 431 | ||
432 | static int pmac_cpufreq_suspend(struct cpufreq_policy *policy, pm_message_t pmsg) | 432 | static int pmac_cpufreq_suspend(struct cpufreq_policy *policy) |
433 | { | 433 | { |
434 | /* Ok, this could be made a bit smarter, but let's be robust for now. We | 434 | /* Ok, this could be made a bit smarter, but let's be robust for now. We |
435 | * always force a speed change to high speed before sleep, to make sure | 435 | * always force a speed change to high speed before sleep, to make sure |
diff --git a/arch/powerpc/platforms/powermac/low_i2c.c b/arch/powerpc/platforms/powermac/low_i2c.c index 480567e5fa9a..e9c8a607268e 100644 --- a/arch/powerpc/platforms/powermac/low_i2c.c +++ b/arch/powerpc/platforms/powermac/low_i2c.c | |||
@@ -904,7 +904,7 @@ static void __init smu_i2c_probe(void) | |||
904 | printk(KERN_INFO "SMU i2c %s\n", controller->full_name); | 904 | printk(KERN_INFO "SMU i2c %s\n", controller->full_name); |
905 | 905 | ||
906 | /* Look for childs, note that they might not be of the right | 906 | /* Look for childs, note that they might not be of the right |
907 | * type as older device trees mix i2c busses and other thigns | 907 | * type as older device trees mix i2c busses and other things |
908 | * at the same level | 908 | * at the same level |
909 | */ | 909 | */ |
910 | for (busnode = NULL; | 910 | for (busnode = NULL; |
diff --git a/arch/powerpc/platforms/powermac/pci.c b/arch/powerpc/platforms/powermac/pci.c index 3bc075c788ef..f33e08d573ce 100644 --- a/arch/powerpc/platforms/powermac/pci.c +++ b/arch/powerpc/platforms/powermac/pci.c | |||
@@ -299,7 +299,7 @@ static void __init setup_chaos(struct pci_controller *hose, | |||
299 | * This function deals with some "special cases" devices. | 299 | * This function deals with some "special cases" devices. |
300 | * | 300 | * |
301 | * 0 -> No special case | 301 | * 0 -> No special case |
302 | * 1 -> Skip the device but act as if the access was successfull | 302 | * 1 -> Skip the device but act as if the access was successful |
303 | * (return 0xff's on reads, eventually, cache config space | 303 | * (return 0xff's on reads, eventually, cache config space |
304 | * accesses in a later version) | 304 | * accesses in a later version) |
305 | * -1 -> Hide the device (unsuccessful access) | 305 | * -1 -> Hide the device (unsuccessful access) |
@@ -988,7 +988,7 @@ void __devinit pmac_pci_irq_fixup(struct pci_dev *dev) | |||
988 | dev->vendor == PCI_VENDOR_ID_DEC && | 988 | dev->vendor == PCI_VENDOR_ID_DEC && |
989 | dev->device == PCI_DEVICE_ID_DEC_TULIP_PLUS) { | 989 | dev->device == PCI_DEVICE_ID_DEC_TULIP_PLUS) { |
990 | dev->irq = irq_create_mapping(NULL, 60); | 990 | dev->irq = irq_create_mapping(NULL, 60); |
991 | set_irq_type(dev->irq, IRQ_TYPE_LEVEL_LOW); | 991 | irq_set_irq_type(dev->irq, IRQ_TYPE_LEVEL_LOW); |
992 | } | 992 | } |
993 | #endif /* CONFIG_PPC32 */ | 993 | #endif /* CONFIG_PPC32 */ |
994 | } | 994 | } |
diff --git a/arch/powerpc/platforms/powermac/pfunc_core.c b/arch/powerpc/platforms/powermac/pfunc_core.c index cec635942657..b0c3777528a1 100644 --- a/arch/powerpc/platforms/powermac/pfunc_core.c +++ b/arch/powerpc/platforms/powermac/pfunc_core.c | |||
@@ -837,8 +837,10 @@ struct pmf_function *__pmf_find_function(struct device_node *target, | |||
837 | return NULL; | 837 | return NULL; |
838 | find_it: | 838 | find_it: |
839 | dev = pmf_find_device(actor); | 839 | dev = pmf_find_device(actor); |
840 | if (dev == NULL) | 840 | if (dev == NULL) { |
841 | return NULL; | 841 | result = NULL; |
842 | goto out; | ||
843 | } | ||
842 | 844 | ||
843 | list_for_each_entry(func, &dev->functions, link) { | 845 | list_for_each_entry(func, &dev->functions, link) { |
844 | if (name && strcmp(name, func->name)) | 846 | if (name && strcmp(name, func->name)) |
@@ -850,8 +852,9 @@ struct pmf_function *__pmf_find_function(struct device_node *target, | |||
850 | result = func; | 852 | result = func; |
851 | break; | 853 | break; |
852 | } | 854 | } |
853 | of_node_put(actor); | ||
854 | pmf_put_device(dev); | 855 | pmf_put_device(dev); |
856 | out: | ||
857 | of_node_put(actor); | ||
855 | return result; | 858 | return result; |
856 | } | 859 | } |
857 | 860 | ||
diff --git a/arch/powerpc/platforms/powermac/pic.c b/arch/powerpc/platforms/powermac/pic.c index 890d5f72b198..7667db448aa7 100644 --- a/arch/powerpc/platforms/powermac/pic.c +++ b/arch/powerpc/platforms/powermac/pic.c | |||
@@ -21,7 +21,7 @@ | |||
21 | #include <linux/signal.h> | 21 | #include <linux/signal.h> |
22 | #include <linux/pci.h> | 22 | #include <linux/pci.h> |
23 | #include <linux/interrupt.h> | 23 | #include <linux/interrupt.h> |
24 | #include <linux/sysdev.h> | 24 | #include <linux/syscore_ops.h> |
25 | #include <linux/adb.h> | 25 | #include <linux/adb.h> |
26 | #include <linux/pmu.h> | 26 | #include <linux/pmu.h> |
27 | #include <linux/module.h> | 27 | #include <linux/module.h> |
@@ -82,9 +82,9 @@ static void __pmac_retrigger(unsigned int irq_nr) | |||
82 | } | 82 | } |
83 | } | 83 | } |
84 | 84 | ||
85 | static void pmac_mask_and_ack_irq(unsigned int virq) | 85 | static void pmac_mask_and_ack_irq(struct irq_data *d) |
86 | { | 86 | { |
87 | unsigned int src = irq_map[virq].hwirq; | 87 | unsigned int src = irqd_to_hwirq(d); |
88 | unsigned long bit = 1UL << (src & 0x1f); | 88 | unsigned long bit = 1UL << (src & 0x1f); |
89 | int i = src >> 5; | 89 | int i = src >> 5; |
90 | unsigned long flags; | 90 | unsigned long flags; |
@@ -104,9 +104,9 @@ static void pmac_mask_and_ack_irq(unsigned int virq) | |||
104 | raw_spin_unlock_irqrestore(&pmac_pic_lock, flags); | 104 | raw_spin_unlock_irqrestore(&pmac_pic_lock, flags); |
105 | } | 105 | } |
106 | 106 | ||
107 | static void pmac_ack_irq(unsigned int virq) | 107 | static void pmac_ack_irq(struct irq_data *d) |
108 | { | 108 | { |
109 | unsigned int src = irq_map[virq].hwirq; | 109 | unsigned int src = irqd_to_hwirq(d); |
110 | unsigned long bit = 1UL << (src & 0x1f); | 110 | unsigned long bit = 1UL << (src & 0x1f); |
111 | int i = src >> 5; | 111 | int i = src >> 5; |
112 | unsigned long flags; | 112 | unsigned long flags; |
@@ -149,15 +149,15 @@ static void __pmac_set_irq_mask(unsigned int irq_nr, int nokicklost) | |||
149 | /* When an irq gets requested for the first client, if it's an | 149 | /* When an irq gets requested for the first client, if it's an |
150 | * edge interrupt, we clear any previous one on the controller | 150 | * edge interrupt, we clear any previous one on the controller |
151 | */ | 151 | */ |
152 | static unsigned int pmac_startup_irq(unsigned int virq) | 152 | static unsigned int pmac_startup_irq(struct irq_data *d) |
153 | { | 153 | { |
154 | unsigned long flags; | 154 | unsigned long flags; |
155 | unsigned int src = irq_map[virq].hwirq; | 155 | unsigned int src = irqd_to_hwirq(d); |
156 | unsigned long bit = 1UL << (src & 0x1f); | 156 | unsigned long bit = 1UL << (src & 0x1f); |
157 | int i = src >> 5; | 157 | int i = src >> 5; |
158 | 158 | ||
159 | raw_spin_lock_irqsave(&pmac_pic_lock, flags); | 159 | raw_spin_lock_irqsave(&pmac_pic_lock, flags); |
160 | if ((irq_to_desc(virq)->status & IRQ_LEVEL) == 0) | 160 | if (!irqd_is_level_type(d)) |
161 | out_le32(&pmac_irq_hw[i]->ack, bit); | 161 | out_le32(&pmac_irq_hw[i]->ack, bit); |
162 | __set_bit(src, ppc_cached_irq_mask); | 162 | __set_bit(src, ppc_cached_irq_mask); |
163 | __pmac_set_irq_mask(src, 0); | 163 | __pmac_set_irq_mask(src, 0); |
@@ -166,10 +166,10 @@ static unsigned int pmac_startup_irq(unsigned int virq) | |||
166 | return 0; | 166 | return 0; |
167 | } | 167 | } |
168 | 168 | ||
169 | static void pmac_mask_irq(unsigned int virq) | 169 | static void pmac_mask_irq(struct irq_data *d) |
170 | { | 170 | { |
171 | unsigned long flags; | 171 | unsigned long flags; |
172 | unsigned int src = irq_map[virq].hwirq; | 172 | unsigned int src = irqd_to_hwirq(d); |
173 | 173 | ||
174 | raw_spin_lock_irqsave(&pmac_pic_lock, flags); | 174 | raw_spin_lock_irqsave(&pmac_pic_lock, flags); |
175 | __clear_bit(src, ppc_cached_irq_mask); | 175 | __clear_bit(src, ppc_cached_irq_mask); |
@@ -177,10 +177,10 @@ static void pmac_mask_irq(unsigned int virq) | |||
177 | raw_spin_unlock_irqrestore(&pmac_pic_lock, flags); | 177 | raw_spin_unlock_irqrestore(&pmac_pic_lock, flags); |
178 | } | 178 | } |
179 | 179 | ||
180 | static void pmac_unmask_irq(unsigned int virq) | 180 | static void pmac_unmask_irq(struct irq_data *d) |
181 | { | 181 | { |
182 | unsigned long flags; | 182 | unsigned long flags; |
183 | unsigned int src = irq_map[virq].hwirq; | 183 | unsigned int src = irqd_to_hwirq(d); |
184 | 184 | ||
185 | raw_spin_lock_irqsave(&pmac_pic_lock, flags); | 185 | raw_spin_lock_irqsave(&pmac_pic_lock, flags); |
186 | __set_bit(src, ppc_cached_irq_mask); | 186 | __set_bit(src, ppc_cached_irq_mask); |
@@ -188,24 +188,24 @@ static void pmac_unmask_irq(unsigned int virq) | |||
188 | raw_spin_unlock_irqrestore(&pmac_pic_lock, flags); | 188 | raw_spin_unlock_irqrestore(&pmac_pic_lock, flags); |
189 | } | 189 | } |
190 | 190 | ||
191 | static int pmac_retrigger(unsigned int virq) | 191 | static int pmac_retrigger(struct irq_data *d) |
192 | { | 192 | { |
193 | unsigned long flags; | 193 | unsigned long flags; |
194 | 194 | ||
195 | raw_spin_lock_irqsave(&pmac_pic_lock, flags); | 195 | raw_spin_lock_irqsave(&pmac_pic_lock, flags); |
196 | __pmac_retrigger(irq_map[virq].hwirq); | 196 | __pmac_retrigger(irqd_to_hwirq(d)); |
197 | raw_spin_unlock_irqrestore(&pmac_pic_lock, flags); | 197 | raw_spin_unlock_irqrestore(&pmac_pic_lock, flags); |
198 | return 1; | 198 | return 1; |
199 | } | 199 | } |
200 | 200 | ||
201 | static struct irq_chip pmac_pic = { | 201 | static struct irq_chip pmac_pic = { |
202 | .name = "PMAC-PIC", | 202 | .name = "PMAC-PIC", |
203 | .startup = pmac_startup_irq, | 203 | .irq_startup = pmac_startup_irq, |
204 | .mask = pmac_mask_irq, | 204 | .irq_mask = pmac_mask_irq, |
205 | .ack = pmac_ack_irq, | 205 | .irq_ack = pmac_ack_irq, |
206 | .mask_ack = pmac_mask_and_ack_irq, | 206 | .irq_mask_ack = pmac_mask_and_ack_irq, |
207 | .unmask = pmac_unmask_irq, | 207 | .irq_unmask = pmac_unmask_irq, |
208 | .retrigger = pmac_retrigger, | 208 | .irq_retrigger = pmac_retrigger, |
209 | }; | 209 | }; |
210 | 210 | ||
211 | static irqreturn_t gatwick_action(int cpl, void *dev_id) | 211 | static irqreturn_t gatwick_action(int cpl, void *dev_id) |
@@ -239,15 +239,12 @@ static unsigned int pmac_pic_get_irq(void) | |||
239 | unsigned long bits = 0; | 239 | unsigned long bits = 0; |
240 | unsigned long flags; | 240 | unsigned long flags; |
241 | 241 | ||
242 | #ifdef CONFIG_SMP | 242 | #ifdef CONFIG_PPC_PMAC32_PSURGE |
243 | void psurge_smp_message_recv(void); | 243 | /* IPI's are a hack on the powersurge -- Cort */ |
244 | 244 | if (smp_processor_id() != 0) { | |
245 | /* IPI's are a hack on the powersurge -- Cort */ | 245 | return psurge_secondary_virq; |
246 | if ( smp_processor_id() != 0 ) { | ||
247 | psurge_smp_message_recv(); | ||
248 | return NO_IRQ_IGNORE; /* ignore, already handled */ | ||
249 | } | 246 | } |
250 | #endif /* CONFIG_SMP */ | 247 | #endif /* CONFIG_PPC_PMAC32_PSURGE */ |
251 | raw_spin_lock_irqsave(&pmac_pic_lock, flags); | 248 | raw_spin_lock_irqsave(&pmac_pic_lock, flags); |
252 | for (irq = max_real_irqs; (irq -= 32) >= 0; ) { | 249 | for (irq = max_real_irqs; (irq -= 32) >= 0; ) { |
253 | int i = irq >> 5; | 250 | int i = irq >> 5; |
@@ -289,7 +286,6 @@ static int pmac_pic_host_match(struct irq_host *h, struct device_node *node) | |||
289 | static int pmac_pic_host_map(struct irq_host *h, unsigned int virq, | 286 | static int pmac_pic_host_map(struct irq_host *h, unsigned int virq, |
290 | irq_hw_number_t hw) | 287 | irq_hw_number_t hw) |
291 | { | 288 | { |
292 | struct irq_desc *desc = irq_to_desc(virq); | ||
293 | int level; | 289 | int level; |
294 | 290 | ||
295 | if (hw >= max_irqs) | 291 | if (hw >= max_irqs) |
@@ -300,9 +296,9 @@ static int pmac_pic_host_map(struct irq_host *h, unsigned int virq, | |||
300 | */ | 296 | */ |
301 | level = !!(level_mask[hw >> 5] & (1UL << (hw & 0x1f))); | 297 | level = !!(level_mask[hw >> 5] & (1UL << (hw & 0x1f))); |
302 | if (level) | 298 | if (level) |
303 | desc->status |= IRQ_LEVEL; | 299 | irq_set_status_flags(virq, IRQ_LEVEL); |
304 | set_irq_chip_and_handler(virq, &pmac_pic, level ? | 300 | irq_set_chip_and_handler(virq, &pmac_pic, |
305 | handle_level_irq : handle_edge_irq); | 301 | level ? handle_level_irq : handle_edge_irq); |
306 | return 0; | 302 | return 0; |
307 | } | 303 | } |
308 | 304 | ||
@@ -472,12 +468,14 @@ int of_irq_map_oldworld(struct device_node *device, int index, | |||
472 | 468 | ||
473 | static void pmac_u3_cascade(unsigned int irq, struct irq_desc *desc) | 469 | static void pmac_u3_cascade(unsigned int irq, struct irq_desc *desc) |
474 | { | 470 | { |
475 | struct mpic *mpic = desc->handler_data; | 471 | struct irq_chip *chip = irq_desc_get_chip(desc); |
476 | 472 | struct mpic *mpic = irq_desc_get_handler_data(desc); | |
477 | unsigned int cascade_irq = mpic_get_one_irq(mpic); | 473 | unsigned int cascade_irq = mpic_get_one_irq(mpic); |
474 | |||
478 | if (cascade_irq != NO_IRQ) | 475 | if (cascade_irq != NO_IRQ) |
479 | generic_handle_irq(cascade_irq); | 476 | generic_handle_irq(cascade_irq); |
480 | desc->chip->eoi(irq); | 477 | |
478 | chip->irq_eoi(&desc->irq_data); | ||
481 | } | 479 | } |
482 | 480 | ||
483 | static void __init pmac_pic_setup_mpic_nmi(struct mpic *mpic) | 481 | static void __init pmac_pic_setup_mpic_nmi(struct mpic *mpic) |
@@ -589,8 +587,8 @@ static int __init pmac_pic_probe_mpic(void) | |||
589 | of_node_put(slave); | 587 | of_node_put(slave); |
590 | return 0; | 588 | return 0; |
591 | } | 589 | } |
592 | set_irq_data(cascade, mpic2); | 590 | irq_set_handler_data(cascade, mpic2); |
593 | set_irq_chained_handler(cascade, pmac_u3_cascade); | 591 | irq_set_chained_handler(cascade, pmac_u3_cascade); |
594 | 592 | ||
595 | of_node_put(slave); | 593 | of_node_put(slave); |
596 | return 0; | 594 | return 0; |
@@ -676,7 +674,7 @@ not_found: | |||
676 | return viaint; | 674 | return viaint; |
677 | } | 675 | } |
678 | 676 | ||
679 | static int pmacpic_suspend(struct sys_device *sysdev, pm_message_t state) | 677 | static int pmacpic_suspend(void) |
680 | { | 678 | { |
681 | int viaint = pmacpic_find_viaint(); | 679 | int viaint = pmacpic_find_viaint(); |
682 | 680 | ||
@@ -697,7 +695,7 @@ static int pmacpic_suspend(struct sys_device *sysdev, pm_message_t state) | |||
697 | return 0; | 695 | return 0; |
698 | } | 696 | } |
699 | 697 | ||
700 | static int pmacpic_resume(struct sys_device *sysdev) | 698 | static void pmacpic_resume(void) |
701 | { | 699 | { |
702 | int i; | 700 | int i; |
703 | 701 | ||
@@ -707,40 +705,21 @@ static int pmacpic_resume(struct sys_device *sysdev) | |||
707 | mb(); | 705 | mb(); |
708 | for (i = 0; i < max_real_irqs; ++i) | 706 | for (i = 0; i < max_real_irqs; ++i) |
709 | if (test_bit(i, sleep_save_mask)) | 707 | if (test_bit(i, sleep_save_mask)) |
710 | pmac_unmask_irq(i); | 708 | pmac_unmask_irq(irq_get_irq_data(i)); |
711 | |||
712 | return 0; | ||
713 | } | 709 | } |
714 | 710 | ||
715 | #endif /* CONFIG_PM && CONFIG_PPC32 */ | 711 | static struct syscore_ops pmacpic_syscore_ops = { |
716 | 712 | .suspend = pmacpic_suspend, | |
717 | static struct sysdev_class pmacpic_sysclass = { | 713 | .resume = pmacpic_resume, |
718 | .name = "pmac_pic", | ||
719 | }; | ||
720 | |||
721 | static struct sys_device device_pmacpic = { | ||
722 | .id = 0, | ||
723 | .cls = &pmacpic_sysclass, | ||
724 | }; | 714 | }; |
725 | 715 | ||
726 | static struct sysdev_driver driver_pmacpic = { | 716 | static int __init init_pmacpic_syscore(void) |
727 | #if defined(CONFIG_PM) && defined(CONFIG_PPC32) | ||
728 | .suspend = &pmacpic_suspend, | ||
729 | .resume = &pmacpic_resume, | ||
730 | #endif /* CONFIG_PM && CONFIG_PPC32 */ | ||
731 | }; | ||
732 | |||
733 | static int __init init_pmacpic_sysfs(void) | ||
734 | { | 717 | { |
735 | #ifdef CONFIG_PPC32 | 718 | if (pmac_irq_hw[0]) |
736 | if (max_irqs == 0) | 719 | register_syscore_ops(&pmacpic_syscore_ops); |
737 | return -ENODEV; | ||
738 | #endif | ||
739 | printk(KERN_DEBUG "Registering pmac pic with sysfs...\n"); | ||
740 | sysdev_class_register(&pmacpic_sysclass); | ||
741 | sysdev_register(&device_pmacpic); | ||
742 | sysdev_driver_register(&pmacpic_sysclass, &driver_pmacpic); | ||
743 | return 0; | 720 | return 0; |
744 | } | 721 | } |
745 | machine_subsys_initcall(powermac, init_pmacpic_sysfs); | ||
746 | 722 | ||
723 | machine_subsys_initcall(powermac, init_pmacpic_syscore); | ||
724 | |||
725 | #endif /* CONFIG_PM && CONFIG_PPC32 */ | ||
diff --git a/arch/powerpc/platforms/powermac/pic.h b/arch/powerpc/platforms/powermac/pic.h deleted file mode 100644 index d622a8345aaa..000000000000 --- a/arch/powerpc/platforms/powermac/pic.h +++ /dev/null | |||
@@ -1,11 +0,0 @@ | |||
1 | #ifndef __PPC_PLATFORMS_PMAC_PIC_H | ||
2 | #define __PPC_PLATFORMS_PMAC_PIC_H | ||
3 | |||
4 | #include <linux/irq.h> | ||
5 | |||
6 | extern struct irq_chip pmac_pic; | ||
7 | |||
8 | extern void pmac_pic_init(void); | ||
9 | extern int pmac_get_irq(void); | ||
10 | |||
11 | #endif /* __PPC_PLATFORMS_PMAC_PIC_H */ | ||
diff --git a/arch/powerpc/platforms/powermac/pmac.h b/arch/powerpc/platforms/powermac/pmac.h index f0bc08f6c1f0..8327cce2bdb0 100644 --- a/arch/powerpc/platforms/powermac/pmac.h +++ b/arch/powerpc/platforms/powermac/pmac.h | |||
@@ -33,7 +33,7 @@ extern void pmac_setup_pci_dma(void); | |||
33 | extern void pmac_check_ht_link(void); | 33 | extern void pmac_check_ht_link(void); |
34 | 34 | ||
35 | extern void pmac_setup_smp(void); | 35 | extern void pmac_setup_smp(void); |
36 | extern void pmac32_cpu_die(void); | 36 | extern int psurge_secondary_virq; |
37 | extern void low_cpu_die(void) __attribute__((noreturn)); | 37 | extern void low_cpu_die(void) __attribute__((noreturn)); |
38 | 38 | ||
39 | extern int pmac_nvram_init(void); | 39 | extern int pmac_nvram_init(void); |
diff --git a/arch/powerpc/platforms/powermac/setup.c b/arch/powerpc/platforms/powermac/setup.c index 9deb274841f1..aa45281bd296 100644 --- a/arch/powerpc/platforms/powermac/setup.c +++ b/arch/powerpc/platforms/powermac/setup.c | |||
@@ -506,6 +506,15 @@ static int __init pmac_declare_of_platform_devices(void) | |||
506 | of_platform_device_create(np, "smu", NULL); | 506 | of_platform_device_create(np, "smu", NULL); |
507 | of_node_put(np); | 507 | of_node_put(np); |
508 | } | 508 | } |
509 | np = of_find_node_by_type(NULL, "fcu"); | ||
510 | if (np == NULL) { | ||
511 | /* Some machines have strangely broken device-tree */ | ||
512 | np = of_find_node_by_path("/u3@0,f8000000/i2c@f8001000/fan@15e"); | ||
513 | } | ||
514 | if (np) { | ||
515 | of_platform_device_create(np, "temperature", NULL); | ||
516 | of_node_put(np); | ||
517 | } | ||
509 | 518 | ||
510 | return 0; | 519 | return 0; |
511 | } | 520 | } |
@@ -641,51 +650,6 @@ static int pmac_pci_probe_mode(struct pci_bus *bus) | |||
641 | return PCI_PROBE_NORMAL; | 650 | return PCI_PROBE_NORMAL; |
642 | return PCI_PROBE_DEVTREE; | 651 | return PCI_PROBE_DEVTREE; |
643 | } | 652 | } |
644 | |||
645 | #ifdef CONFIG_HOTPLUG_CPU | ||
646 | /* access per cpu vars from generic smp.c */ | ||
647 | DECLARE_PER_CPU(int, cpu_state); | ||
648 | |||
649 | static void pmac64_cpu_die(void) | ||
650 | { | ||
651 | /* | ||
652 | * turn off as much as possible, we'll be | ||
653 | * kicked out as this will only be invoked | ||
654 | * on core99 platforms for now ... | ||
655 | */ | ||
656 | |||
657 | printk(KERN_INFO "CPU#%d offline\n", smp_processor_id()); | ||
658 | __get_cpu_var(cpu_state) = CPU_DEAD; | ||
659 | smp_wmb(); | ||
660 | |||
661 | /* | ||
662 | * during the path that leads here preemption is disabled, | ||
663 | * reenable it now so that when coming up preempt count is | ||
664 | * zero correctly | ||
665 | */ | ||
666 | preempt_enable(); | ||
667 | |||
668 | /* | ||
669 | * hard-disable interrupts for the non-NAP case, the NAP code | ||
670 | * needs to re-enable interrupts (but soft-disables them) | ||
671 | */ | ||
672 | hard_irq_disable(); | ||
673 | |||
674 | while (1) { | ||
675 | /* let's not take timer interrupts too often ... */ | ||
676 | set_dec(0x7fffffff); | ||
677 | |||
678 | /* should always be true at this point */ | ||
679 | if (cpu_has_feature(CPU_FTR_CAN_NAP)) | ||
680 | power4_cpu_offline_powersave(); | ||
681 | else { | ||
682 | HMT_low(); | ||
683 | HMT_very_low(); | ||
684 | } | ||
685 | } | ||
686 | } | ||
687 | #endif /* CONFIG_HOTPLUG_CPU */ | ||
688 | |||
689 | #endif /* CONFIG_PPC64 */ | 653 | #endif /* CONFIG_PPC64 */ |
690 | 654 | ||
691 | define_machine(powermac) { | 655 | define_machine(powermac) { |
@@ -717,15 +681,4 @@ define_machine(powermac) { | |||
717 | .pcibios_after_init = pmac_pcibios_after_init, | 681 | .pcibios_after_init = pmac_pcibios_after_init, |
718 | .phys_mem_access_prot = pci_phys_mem_access_prot, | 682 | .phys_mem_access_prot = pci_phys_mem_access_prot, |
719 | #endif | 683 | #endif |
720 | #ifdef CONFIG_HOTPLUG_CPU | ||
721 | #ifdef CONFIG_PPC64 | ||
722 | .cpu_die = pmac64_cpu_die, | ||
723 | #endif | ||
724 | #ifdef CONFIG_PPC32 | ||
725 | .cpu_die = pmac32_cpu_die, | ||
726 | #endif | ||
727 | #endif | ||
728 | #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PPC32) | ||
729 | .cpu_die = generic_mach_cpu_die, | ||
730 | #endif | ||
731 | }; | 684 | }; |
diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c index c95215f4f8b6..db092d7c4c5b 100644 --- a/arch/powerpc/platforms/powermac/smp.c +++ b/arch/powerpc/platforms/powermac/smp.c | |||
@@ -70,7 +70,7 @@ static void (*pmac_tb_freeze)(int freeze); | |||
70 | static u64 timebase; | 70 | static u64 timebase; |
71 | static int tb_req; | 71 | static int tb_req; |
72 | 72 | ||
73 | #ifdef CONFIG_PPC32 | 73 | #ifdef CONFIG_PPC_PMAC32_PSURGE |
74 | 74 | ||
75 | /* | 75 | /* |
76 | * Powersurge (old powermac SMP) support. | 76 | * Powersurge (old powermac SMP) support. |
@@ -124,6 +124,10 @@ static volatile u32 __iomem *psurge_start; | |||
124 | /* what sort of powersurge board we have */ | 124 | /* what sort of powersurge board we have */ |
125 | static int psurge_type = PSURGE_NONE; | 125 | static int psurge_type = PSURGE_NONE; |
126 | 126 | ||
127 | /* irq for secondary cpus to report */ | ||
128 | static struct irq_host *psurge_host; | ||
129 | int psurge_secondary_virq; | ||
130 | |||
127 | /* | 131 | /* |
128 | * Set and clear IPIs for powersurge. | 132 | * Set and clear IPIs for powersurge. |
129 | */ | 133 | */ |
@@ -156,51 +160,52 @@ static inline void psurge_clr_ipi(int cpu) | |||
156 | /* | 160 | /* |
157 | * On powersurge (old SMP powermac architecture) we don't have | 161 | * On powersurge (old SMP powermac architecture) we don't have |
158 | * separate IPIs for separate messages like openpic does. Instead | 162 | * separate IPIs for separate messages like openpic does. Instead |
159 | * we have a bitmap for each processor, where a 1 bit means that | 163 | * use the generic demux helpers |
160 | * the corresponding message is pending for that processor. | ||
161 | * Ideally each cpu's entry would be in a different cache line. | ||
162 | * -- paulus. | 164 | * -- paulus. |
163 | */ | 165 | */ |
164 | static unsigned long psurge_smp_message[NR_CPUS]; | 166 | static irqreturn_t psurge_ipi_intr(int irq, void *d) |
165 | |||
166 | void psurge_smp_message_recv(void) | ||
167 | { | 167 | { |
168 | int cpu = smp_processor_id(); | 168 | psurge_clr_ipi(smp_processor_id()); |
169 | int msg; | 169 | smp_ipi_demux(); |
170 | |||
171 | /* clear interrupt */ | ||
172 | psurge_clr_ipi(cpu); | ||
173 | 170 | ||
174 | if (num_online_cpus() < 2) | 171 | return IRQ_HANDLED; |
175 | return; | 172 | } |
176 | 173 | ||
177 | /* make sure there is a message there */ | 174 | static void smp_psurge_cause_ipi(int cpu, unsigned long data) |
178 | for (msg = 0; msg < 4; msg++) | 175 | { |
179 | if (test_and_clear_bit(msg, &psurge_smp_message[cpu])) | 176 | psurge_set_ipi(cpu); |
180 | smp_message_recv(msg); | ||
181 | } | 177 | } |
182 | 178 | ||
183 | irqreturn_t psurge_primary_intr(int irq, void *d) | 179 | static int psurge_host_map(struct irq_host *h, unsigned int virq, |
180 | irq_hw_number_t hw) | ||
184 | { | 181 | { |
185 | psurge_smp_message_recv(); | 182 | irq_set_chip_and_handler(virq, &dummy_irq_chip, handle_percpu_irq); |
186 | return IRQ_HANDLED; | 183 | |
184 | return 0; | ||
187 | } | 185 | } |
188 | 186 | ||
189 | static void smp_psurge_message_pass(int target, int msg) | 187 | struct irq_host_ops psurge_host_ops = { |
188 | .map = psurge_host_map, | ||
189 | }; | ||
190 | |||
191 | static int psurge_secondary_ipi_init(void) | ||
190 | { | 192 | { |
191 | int i; | 193 | int rc = -ENOMEM; |
192 | 194 | ||
193 | if (num_online_cpus() < 2) | 195 | psurge_host = irq_alloc_host(NULL, IRQ_HOST_MAP_NOMAP, 0, |
194 | return; | 196 | &psurge_host_ops, 0); |
195 | 197 | ||
196 | for_each_online_cpu(i) { | 198 | if (psurge_host) |
197 | if (target == MSG_ALL | 199 | psurge_secondary_virq = irq_create_direct_mapping(psurge_host); |
198 | || (target == MSG_ALL_BUT_SELF && i != smp_processor_id()) | 200 | |
199 | || target == i) { | 201 | if (psurge_secondary_virq) |
200 | set_bit(msg, &psurge_smp_message[i]); | 202 | rc = request_irq(psurge_secondary_virq, psurge_ipi_intr, |
201 | psurge_set_ipi(i); | 203 | IRQF_DISABLED|IRQF_PERCPU, "IPI", NULL); |
202 | } | 204 | |
203 | } | 205 | if (rc) |
206 | pr_err("Failed to setup secondary cpu IPI\n"); | ||
207 | |||
208 | return rc; | ||
204 | } | 209 | } |
205 | 210 | ||
206 | /* | 211 | /* |
@@ -311,6 +316,9 @@ static int __init smp_psurge_probe(void) | |||
311 | ncpus = 2; | 316 | ncpus = 2; |
312 | } | 317 | } |
313 | 318 | ||
319 | if (psurge_secondary_ipi_init()) | ||
320 | return 1; | ||
321 | |||
314 | psurge_start = ioremap(PSURGE_START, 4); | 322 | psurge_start = ioremap(PSURGE_START, 4); |
315 | psurge_pri_intr = ioremap(PSURGE_PRI_INTR, 4); | 323 | psurge_pri_intr = ioremap(PSURGE_PRI_INTR, 4); |
316 | 324 | ||
@@ -329,7 +337,7 @@ static int __init smp_psurge_probe(void) | |||
329 | return ncpus; | 337 | return ncpus; |
330 | } | 338 | } |
331 | 339 | ||
332 | static void __init smp_psurge_kick_cpu(int nr) | 340 | static int __init smp_psurge_kick_cpu(int nr) |
333 | { | 341 | { |
334 | unsigned long start = __pa(__secondary_start_pmac_0) + nr * 8; | 342 | unsigned long start = __pa(__secondary_start_pmac_0) + nr * 8; |
335 | unsigned long a, flags; | 343 | unsigned long a, flags; |
@@ -394,11 +402,13 @@ static void __init smp_psurge_kick_cpu(int nr) | |||
394 | psurge_set_ipi(1); | 402 | psurge_set_ipi(1); |
395 | 403 | ||
396 | if (ppc_md.progress) ppc_md.progress("smp_psurge_kick_cpu - done", 0x354); | 404 | if (ppc_md.progress) ppc_md.progress("smp_psurge_kick_cpu - done", 0x354); |
405 | |||
406 | return 0; | ||
397 | } | 407 | } |
398 | 408 | ||
399 | static struct irqaction psurge_irqaction = { | 409 | static struct irqaction psurge_irqaction = { |
400 | .handler = psurge_primary_intr, | 410 | .handler = psurge_ipi_intr, |
401 | .flags = IRQF_DISABLED, | 411 | .flags = IRQF_DISABLED|IRQF_PERCPU, |
402 | .name = "primary IPI", | 412 | .name = "primary IPI", |
403 | }; | 413 | }; |
404 | 414 | ||
@@ -437,14 +447,15 @@ void __init smp_psurge_give_timebase(void) | |||
437 | 447 | ||
438 | /* PowerSurge-style Macs */ | 448 | /* PowerSurge-style Macs */ |
439 | struct smp_ops_t psurge_smp_ops = { | 449 | struct smp_ops_t psurge_smp_ops = { |
440 | .message_pass = smp_psurge_message_pass, | 450 | .message_pass = smp_muxed_ipi_message_pass, |
451 | .cause_ipi = smp_psurge_cause_ipi, | ||
441 | .probe = smp_psurge_probe, | 452 | .probe = smp_psurge_probe, |
442 | .kick_cpu = smp_psurge_kick_cpu, | 453 | .kick_cpu = smp_psurge_kick_cpu, |
443 | .setup_cpu = smp_psurge_setup_cpu, | 454 | .setup_cpu = smp_psurge_setup_cpu, |
444 | .give_timebase = smp_psurge_give_timebase, | 455 | .give_timebase = smp_psurge_give_timebase, |
445 | .take_timebase = smp_psurge_take_timebase, | 456 | .take_timebase = smp_psurge_take_timebase, |
446 | }; | 457 | }; |
447 | #endif /* CONFIG_PPC32 - actually powersurge support */ | 458 | #endif /* CONFIG_PPC_PMAC32_PSURGE */ |
448 | 459 | ||
449 | /* | 460 | /* |
450 | * Core 99 and later support | 461 | * Core 99 and later support |
@@ -791,14 +802,14 @@ static int __init smp_core99_probe(void) | |||
791 | return ncpus; | 802 | return ncpus; |
792 | } | 803 | } |
793 | 804 | ||
794 | static void __devinit smp_core99_kick_cpu(int nr) | 805 | static int __devinit smp_core99_kick_cpu(int nr) |
795 | { | 806 | { |
796 | unsigned int save_vector; | 807 | unsigned int save_vector; |
797 | unsigned long target, flags; | 808 | unsigned long target, flags; |
798 | unsigned int *vector = (unsigned int *)(PAGE_OFFSET+0x100); | 809 | unsigned int *vector = (unsigned int *)(PAGE_OFFSET+0x100); |
799 | 810 | ||
800 | if (nr < 0 || nr > 3) | 811 | if (nr < 0 || nr > 3) |
801 | return; | 812 | return -ENOENT; |
802 | 813 | ||
803 | if (ppc_md.progress) | 814 | if (ppc_md.progress) |
804 | ppc_md.progress("smp_core99_kick_cpu", 0x346); | 815 | ppc_md.progress("smp_core99_kick_cpu", 0x346); |
@@ -830,6 +841,8 @@ static void __devinit smp_core99_kick_cpu(int nr) | |||
830 | 841 | ||
831 | local_irq_restore(flags); | 842 | local_irq_restore(flags); |
832 | if (ppc_md.progress) ppc_md.progress("smp_core99_kick_cpu done", 0x347); | 843 | if (ppc_md.progress) ppc_md.progress("smp_core99_kick_cpu done", 0x347); |
844 | |||
845 | return 0; | ||
833 | } | 846 | } |
834 | 847 | ||
835 | static void __devinit smp_core99_setup_cpu(int cpu_nr) | 848 | static void __devinit smp_core99_setup_cpu(int cpu_nr) |
@@ -840,92 +853,151 @@ static void __devinit smp_core99_setup_cpu(int cpu_nr) | |||
840 | 853 | ||
841 | /* Setup openpic */ | 854 | /* Setup openpic */ |
842 | mpic_setup_this_cpu(); | 855 | mpic_setup_this_cpu(); |
856 | } | ||
843 | 857 | ||
844 | if (cpu_nr == 0) { | ||
845 | #ifdef CONFIG_PPC64 | 858 | #ifdef CONFIG_PPC64 |
846 | extern void g5_phy_disable_cpu1(void); | 859 | #ifdef CONFIG_HOTPLUG_CPU |
860 | static int smp_core99_cpu_notify(struct notifier_block *self, | ||
861 | unsigned long action, void *hcpu) | ||
862 | { | ||
863 | int rc; | ||
847 | 864 | ||
848 | /* Close i2c bus if it was used for tb sync */ | 865 | switch(action) { |
866 | case CPU_UP_PREPARE: | ||
867 | case CPU_UP_PREPARE_FROZEN: | ||
868 | /* Open i2c bus if it was used for tb sync */ | ||
849 | if (pmac_tb_clock_chip_host) { | 869 | if (pmac_tb_clock_chip_host) { |
850 | pmac_i2c_close(pmac_tb_clock_chip_host); | 870 | rc = pmac_i2c_open(pmac_tb_clock_chip_host, 1); |
851 | pmac_tb_clock_chip_host = NULL; | 871 | if (rc) { |
872 | pr_err("Failed to open i2c bus for time sync\n"); | ||
873 | return notifier_from_errno(rc); | ||
874 | } | ||
852 | } | 875 | } |
876 | break; | ||
877 | case CPU_ONLINE: | ||
878 | case CPU_UP_CANCELED: | ||
879 | /* Close i2c bus if it was used for tb sync */ | ||
880 | if (pmac_tb_clock_chip_host) | ||
881 | pmac_i2c_close(pmac_tb_clock_chip_host); | ||
882 | break; | ||
883 | default: | ||
884 | break; | ||
885 | } | ||
886 | return NOTIFY_OK; | ||
887 | } | ||
853 | 888 | ||
854 | /* If we didn't start the second CPU, we must take | 889 | static struct notifier_block __cpuinitdata smp_core99_cpu_nb = { |
855 | * it off the bus | 890 | .notifier_call = smp_core99_cpu_notify, |
856 | */ | 891 | }; |
857 | if (of_machine_is_compatible("MacRISC4") && | 892 | #endif /* CONFIG_HOTPLUG_CPU */ |
858 | num_online_cpus() < 2) | 893 | |
859 | g5_phy_disable_cpu1(); | 894 | static void __init smp_core99_bringup_done(void) |
860 | #endif /* CONFIG_PPC64 */ | 895 | { |
896 | extern void g5_phy_disable_cpu1(void); | ||
861 | 897 | ||
862 | if (ppc_md.progress) | 898 | /* Close i2c bus if it was used for tb sync */ |
863 | ppc_md.progress("core99_setup_cpu 0 done", 0x349); | 899 | if (pmac_tb_clock_chip_host) |
900 | pmac_i2c_close(pmac_tb_clock_chip_host); | ||
901 | |||
902 | /* If we didn't start the second CPU, we must take | ||
903 | * it off the bus. | ||
904 | */ | ||
905 | if (of_machine_is_compatible("MacRISC4") && | ||
906 | num_online_cpus() < 2) { | ||
907 | set_cpu_present(1, false); | ||
908 | g5_phy_disable_cpu1(); | ||
864 | } | 909 | } |
865 | } | 910 | #ifdef CONFIG_HOTPLUG_CPU |
911 | register_cpu_notifier(&smp_core99_cpu_nb); | ||
912 | #endif | ||
866 | 913 | ||
914 | if (ppc_md.progress) | ||
915 | ppc_md.progress("smp_core99_bringup_done", 0x349); | ||
916 | } | ||
917 | #endif /* CONFIG_PPC64 */ | ||
867 | 918 | ||
868 | #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PPC32) | 919 | #ifdef CONFIG_HOTPLUG_CPU |
869 | 920 | ||
870 | int smp_core99_cpu_disable(void) | 921 | static int smp_core99_cpu_disable(void) |
871 | { | 922 | { |
872 | set_cpu_online(smp_processor_id(), false); | 923 | int rc = generic_cpu_disable(); |
924 | if (rc) | ||
925 | return rc; | ||
873 | 926 | ||
874 | /* XXX reset cpu affinity here */ | ||
875 | mpic_cpu_set_priority(0xf); | 927 | mpic_cpu_set_priority(0xf); |
876 | asm volatile("mtdec %0" : : "r" (0x7fffffff)); | 928 | |
877 | mb(); | ||
878 | udelay(20); | ||
879 | asm volatile("mtdec %0" : : "r" (0x7fffffff)); | ||
880 | return 0; | 929 | return 0; |
881 | } | 930 | } |
882 | 931 | ||
883 | static int cpu_dead[NR_CPUS]; | 932 | #ifdef CONFIG_PPC32 |
884 | 933 | ||
885 | void pmac32_cpu_die(void) | 934 | static void pmac_cpu_die(void) |
886 | { | 935 | { |
936 | int cpu = smp_processor_id(); | ||
937 | |||
887 | local_irq_disable(); | 938 | local_irq_disable(); |
888 | cpu_dead[smp_processor_id()] = 1; | 939 | idle_task_exit(); |
940 | pr_debug("CPU%d offline\n", cpu); | ||
941 | generic_set_cpu_dead(cpu); | ||
942 | smp_wmb(); | ||
889 | mb(); | 943 | mb(); |
890 | low_cpu_die(); | 944 | low_cpu_die(); |
891 | } | 945 | } |
892 | 946 | ||
893 | void smp_core99_cpu_die(unsigned int cpu) | 947 | #else /* CONFIG_PPC32 */ |
948 | |||
949 | static void pmac_cpu_die(void) | ||
894 | { | 950 | { |
895 | int timeout; | 951 | int cpu = smp_processor_id(); |
896 | 952 | ||
897 | timeout = 1000; | 953 | local_irq_disable(); |
898 | while (!cpu_dead[cpu]) { | 954 | idle_task_exit(); |
899 | if (--timeout == 0) { | 955 | |
900 | printk("CPU %u refused to die!\n", cpu); | 956 | /* |
901 | break; | 957 | * turn off as much as possible, we'll be |
902 | } | 958 | * kicked out as this will only be invoked |
903 | msleep(1); | 959 | * on core99 platforms for now ... |
960 | */ | ||
961 | |||
962 | printk(KERN_INFO "CPU#%d offline\n", cpu); | ||
963 | generic_set_cpu_dead(cpu); | ||
964 | smp_wmb(); | ||
965 | |||
966 | /* | ||
967 | * Re-enable interrupts. The NAP code needs to enable them | ||
968 | * anyways, do it now so we deal with the case where one already | ||
969 | * happened while soft-disabled. | ||
970 | * We shouldn't get any external interrupts, only decrementer, and the | ||
971 | * decrementer handler is safe for use on offline CPUs | ||
972 | */ | ||
973 | local_irq_enable(); | ||
974 | |||
975 | while (1) { | ||
976 | /* let's not take timer interrupts too often ... */ | ||
977 | set_dec(0x7fffffff); | ||
978 | |||
979 | /* Enter NAP mode */ | ||
980 | power4_idle(); | ||
904 | } | 981 | } |
905 | cpu_dead[cpu] = 0; | ||
906 | } | 982 | } |
907 | 983 | ||
908 | #endif /* CONFIG_HOTPLUG_CPU && CONFIG_PP32 */ | 984 | #endif /* else CONFIG_PPC32 */ |
985 | #endif /* CONFIG_HOTPLUG_CPU */ | ||
909 | 986 | ||
910 | /* Core99 Macs (dual G4s and G5s) */ | 987 | /* Core99 Macs (dual G4s and G5s) */ |
911 | struct smp_ops_t core99_smp_ops = { | 988 | struct smp_ops_t core99_smp_ops = { |
912 | .message_pass = smp_mpic_message_pass, | 989 | .message_pass = smp_mpic_message_pass, |
913 | .probe = smp_core99_probe, | 990 | .probe = smp_core99_probe, |
991 | #ifdef CONFIG_PPC64 | ||
992 | .bringup_done = smp_core99_bringup_done, | ||
993 | #endif | ||
914 | .kick_cpu = smp_core99_kick_cpu, | 994 | .kick_cpu = smp_core99_kick_cpu, |
915 | .setup_cpu = smp_core99_setup_cpu, | 995 | .setup_cpu = smp_core99_setup_cpu, |
916 | .give_timebase = smp_core99_give_timebase, | 996 | .give_timebase = smp_core99_give_timebase, |
917 | .take_timebase = smp_core99_take_timebase, | 997 | .take_timebase = smp_core99_take_timebase, |
918 | #if defined(CONFIG_HOTPLUG_CPU) | 998 | #if defined(CONFIG_HOTPLUG_CPU) |
919 | # if defined(CONFIG_PPC32) | ||
920 | .cpu_disable = smp_core99_cpu_disable, | 999 | .cpu_disable = smp_core99_cpu_disable, |
921 | .cpu_die = smp_core99_cpu_die, | ||
922 | # endif | ||
923 | # if defined(CONFIG_PPC64) | ||
924 | .cpu_disable = generic_cpu_disable, | ||
925 | .cpu_die = generic_cpu_die, | 1000 | .cpu_die = generic_cpu_die, |
926 | /* intentionally do *NOT* assign cpu_enable, | ||
927 | * the generic code will use kick_cpu then! */ | ||
928 | # endif | ||
929 | #endif | 1001 | #endif |
930 | }; | 1002 | }; |
931 | 1003 | ||
@@ -943,7 +1015,7 @@ void __init pmac_setup_smp(void) | |||
943 | of_node_put(np); | 1015 | of_node_put(np); |
944 | smp_ops = &core99_smp_ops; | 1016 | smp_ops = &core99_smp_ops; |
945 | } | 1017 | } |
946 | #ifdef CONFIG_PPC32 | 1018 | #ifdef CONFIG_PPC_PMAC32_PSURGE |
947 | else { | 1019 | else { |
948 | /* We have to set bits in cpu_possible_mask here since the | 1020 | /* We have to set bits in cpu_possible_mask here since the |
949 | * secondary CPU(s) aren't in the device tree. Various | 1021 | * secondary CPU(s) aren't in the device tree. Various |
@@ -956,6 +1028,11 @@ void __init pmac_setup_smp(void) | |||
956 | set_cpu_possible(cpu, true); | 1028 | set_cpu_possible(cpu, true); |
957 | smp_ops = &psurge_smp_ops; | 1029 | smp_ops = &psurge_smp_ops; |
958 | } | 1030 | } |
959 | #endif /* CONFIG_PPC32 */ | 1031 | #endif /* CONFIG_PPC_PMAC32_PSURGE */ |
1032 | |||
1033 | #ifdef CONFIG_HOTPLUG_CPU | ||
1034 | ppc_md.cpu_die = pmac_cpu_die; | ||
1035 | #endif | ||
960 | } | 1036 | } |
961 | 1037 | ||
1038 | |||
diff --git a/arch/powerpc/platforms/ps3/device-init.c b/arch/powerpc/platforms/ps3/device-init.c index b341018326df..6c4b5837fc8a 100644 --- a/arch/powerpc/platforms/ps3/device-init.c +++ b/arch/powerpc/platforms/ps3/device-init.c | |||
@@ -566,10 +566,10 @@ static int ps3_setup_dynamic_device(const struct ps3_repository_device *repo) | |||
566 | case PS3_DEV_TYPE_STOR_DISK: | 566 | case PS3_DEV_TYPE_STOR_DISK: |
567 | result = ps3_setup_storage_dev(repo, PS3_MATCH_ID_STOR_DISK); | 567 | result = ps3_setup_storage_dev(repo, PS3_MATCH_ID_STOR_DISK); |
568 | 568 | ||
569 | /* Some devices are not accessable from the Other OS lpar. */ | 569 | /* Some devices are not accessible from the Other OS lpar. */ |
570 | if (result == -ENODEV) { | 570 | if (result == -ENODEV) { |
571 | result = 0; | 571 | result = 0; |
572 | pr_debug("%s:%u: not accessable\n", __func__, | 572 | pr_debug("%s:%u: not accessible\n", __func__, |
573 | __LINE__); | 573 | __LINE__); |
574 | } | 574 | } |
575 | 575 | ||
diff --git a/arch/powerpc/platforms/ps3/interrupt.c b/arch/powerpc/platforms/ps3/interrupt.c index 59d9712d7364..600ed2c0ed59 100644 --- a/arch/powerpc/platforms/ps3/interrupt.c +++ b/arch/powerpc/platforms/ps3/interrupt.c | |||
@@ -44,7 +44,7 @@ | |||
44 | * @lock: | 44 | * @lock: |
45 | * @ipi_debug_brk_mask: | 45 | * @ipi_debug_brk_mask: |
46 | * | 46 | * |
47 | * The HV mantains per SMT thread mappings of HV outlet to HV plug on | 47 | * The HV maintains per SMT thread mappings of HV outlet to HV plug on |
48 | * behalf of the guest. These mappings are implemented as 256 bit guest | 48 | * behalf of the guest. These mappings are implemented as 256 bit guest |
49 | * supplied bitmaps indexed by plug number. The addresses of the bitmaps | 49 | * supplied bitmaps indexed by plug number. The addresses of the bitmaps |
50 | * are registered with the HV through lv1_configure_irq_state_bitmap(). | 50 | * are registered with the HV through lv1_configure_irq_state_bitmap(). |
@@ -99,16 +99,16 @@ static DEFINE_PER_CPU(struct ps3_private, ps3_private); | |||
99 | * Sets ps3_bmp.mask and calls lv1_did_update_interrupt_mask(). | 99 | * Sets ps3_bmp.mask and calls lv1_did_update_interrupt_mask(). |
100 | */ | 100 | */ |
101 | 101 | ||
102 | static void ps3_chip_mask(unsigned int virq) | 102 | static void ps3_chip_mask(struct irq_data *d) |
103 | { | 103 | { |
104 | struct ps3_private *pd = get_irq_chip_data(virq); | 104 | struct ps3_private *pd = irq_data_get_irq_chip_data(d); |
105 | unsigned long flags; | 105 | unsigned long flags; |
106 | 106 | ||
107 | pr_debug("%s:%d: thread_id %llu, virq %d\n", __func__, __LINE__, | 107 | pr_debug("%s:%d: thread_id %llu, virq %d\n", __func__, __LINE__, |
108 | pd->thread_id, virq); | 108 | pd->thread_id, d->irq); |
109 | 109 | ||
110 | local_irq_save(flags); | 110 | local_irq_save(flags); |
111 | clear_bit(63 - virq, &pd->bmp.mask); | 111 | clear_bit(63 - d->irq, &pd->bmp.mask); |
112 | lv1_did_update_interrupt_mask(pd->ppe_id, pd->thread_id); | 112 | lv1_did_update_interrupt_mask(pd->ppe_id, pd->thread_id); |
113 | local_irq_restore(flags); | 113 | local_irq_restore(flags); |
114 | } | 114 | } |
@@ -120,16 +120,16 @@ static void ps3_chip_mask(unsigned int virq) | |||
120 | * Clears ps3_bmp.mask and calls lv1_did_update_interrupt_mask(). | 120 | * Clears ps3_bmp.mask and calls lv1_did_update_interrupt_mask(). |
121 | */ | 121 | */ |
122 | 122 | ||
123 | static void ps3_chip_unmask(unsigned int virq) | 123 | static void ps3_chip_unmask(struct irq_data *d) |
124 | { | 124 | { |
125 | struct ps3_private *pd = get_irq_chip_data(virq); | 125 | struct ps3_private *pd = irq_data_get_irq_chip_data(d); |
126 | unsigned long flags; | 126 | unsigned long flags; |
127 | 127 | ||
128 | pr_debug("%s:%d: thread_id %llu, virq %d\n", __func__, __LINE__, | 128 | pr_debug("%s:%d: thread_id %llu, virq %d\n", __func__, __LINE__, |
129 | pd->thread_id, virq); | 129 | pd->thread_id, d->irq); |
130 | 130 | ||
131 | local_irq_save(flags); | 131 | local_irq_save(flags); |
132 | set_bit(63 - virq, &pd->bmp.mask); | 132 | set_bit(63 - d->irq, &pd->bmp.mask); |
133 | lv1_did_update_interrupt_mask(pd->ppe_id, pd->thread_id); | 133 | lv1_did_update_interrupt_mask(pd->ppe_id, pd->thread_id); |
134 | local_irq_restore(flags); | 134 | local_irq_restore(flags); |
135 | } | 135 | } |
@@ -141,10 +141,10 @@ static void ps3_chip_unmask(unsigned int virq) | |||
141 | * Calls lv1_end_of_interrupt_ext(). | 141 | * Calls lv1_end_of_interrupt_ext(). |
142 | */ | 142 | */ |
143 | 143 | ||
144 | static void ps3_chip_eoi(unsigned int virq) | 144 | static void ps3_chip_eoi(struct irq_data *d) |
145 | { | 145 | { |
146 | const struct ps3_private *pd = get_irq_chip_data(virq); | 146 | const struct ps3_private *pd = irq_data_get_irq_chip_data(d); |
147 | lv1_end_of_interrupt_ext(pd->ppe_id, pd->thread_id, virq); | 147 | lv1_end_of_interrupt_ext(pd->ppe_id, pd->thread_id, d->irq); |
148 | } | 148 | } |
149 | 149 | ||
150 | /** | 150 | /** |
@@ -153,9 +153,9 @@ static void ps3_chip_eoi(unsigned int virq) | |||
153 | 153 | ||
154 | static struct irq_chip ps3_irq_chip = { | 154 | static struct irq_chip ps3_irq_chip = { |
155 | .name = "ps3", | 155 | .name = "ps3", |
156 | .mask = ps3_chip_mask, | 156 | .irq_mask = ps3_chip_mask, |
157 | .unmask = ps3_chip_unmask, | 157 | .irq_unmask = ps3_chip_unmask, |
158 | .eoi = ps3_chip_eoi, | 158 | .irq_eoi = ps3_chip_eoi, |
159 | }; | 159 | }; |
160 | 160 | ||
161 | /** | 161 | /** |
@@ -194,15 +194,15 @@ static int ps3_virq_setup(enum ps3_cpu_binding cpu, unsigned long outlet, | |||
194 | pr_debug("%s:%d: outlet %lu => cpu %u, virq %u\n", __func__, __LINE__, | 194 | pr_debug("%s:%d: outlet %lu => cpu %u, virq %u\n", __func__, __LINE__, |
195 | outlet, cpu, *virq); | 195 | outlet, cpu, *virq); |
196 | 196 | ||
197 | result = set_irq_chip_data(*virq, pd); | 197 | result = irq_set_chip_data(*virq, pd); |
198 | 198 | ||
199 | if (result) { | 199 | if (result) { |
200 | pr_debug("%s:%d: set_irq_chip_data failed\n", | 200 | pr_debug("%s:%d: irq_set_chip_data failed\n", |
201 | __func__, __LINE__); | 201 | __func__, __LINE__); |
202 | goto fail_set; | 202 | goto fail_set; |
203 | } | 203 | } |
204 | 204 | ||
205 | ps3_chip_mask(*virq); | 205 | ps3_chip_mask(irq_get_irq_data(*virq)); |
206 | 206 | ||
207 | return result; | 207 | return result; |
208 | 208 | ||
@@ -221,12 +221,12 @@ fail_create: | |||
221 | 221 | ||
222 | static int ps3_virq_destroy(unsigned int virq) | 222 | static int ps3_virq_destroy(unsigned int virq) |
223 | { | 223 | { |
224 | const struct ps3_private *pd = get_irq_chip_data(virq); | 224 | const struct ps3_private *pd = irq_get_chip_data(virq); |
225 | 225 | ||
226 | pr_debug("%s:%d: ppe_id %llu, thread_id %llu, virq %u\n", __func__, | 226 | pr_debug("%s:%d: ppe_id %llu, thread_id %llu, virq %u\n", __func__, |
227 | __LINE__, pd->ppe_id, pd->thread_id, virq); | 227 | __LINE__, pd->ppe_id, pd->thread_id, virq); |
228 | 228 | ||
229 | set_irq_chip_data(virq, NULL); | 229 | irq_set_chip_data(virq, NULL); |
230 | irq_dispose_mapping(virq); | 230 | irq_dispose_mapping(virq); |
231 | 231 | ||
232 | pr_debug("%s:%d <-\n", __func__, __LINE__); | 232 | pr_debug("%s:%d <-\n", __func__, __LINE__); |
@@ -256,7 +256,7 @@ int ps3_irq_plug_setup(enum ps3_cpu_binding cpu, unsigned long outlet, | |||
256 | goto fail_setup; | 256 | goto fail_setup; |
257 | } | 257 | } |
258 | 258 | ||
259 | pd = get_irq_chip_data(*virq); | 259 | pd = irq_get_chip_data(*virq); |
260 | 260 | ||
261 | /* Binds outlet to cpu + virq. */ | 261 | /* Binds outlet to cpu + virq. */ |
262 | 262 | ||
@@ -291,12 +291,12 @@ EXPORT_SYMBOL_GPL(ps3_irq_plug_setup); | |||
291 | int ps3_irq_plug_destroy(unsigned int virq) | 291 | int ps3_irq_plug_destroy(unsigned int virq) |
292 | { | 292 | { |
293 | int result; | 293 | int result; |
294 | const struct ps3_private *pd = get_irq_chip_data(virq); | 294 | const struct ps3_private *pd = irq_get_chip_data(virq); |
295 | 295 | ||
296 | pr_debug("%s:%d: ppe_id %llu, thread_id %llu, virq %u\n", __func__, | 296 | pr_debug("%s:%d: ppe_id %llu, thread_id %llu, virq %u\n", __func__, |
297 | __LINE__, pd->ppe_id, pd->thread_id, virq); | 297 | __LINE__, pd->ppe_id, pd->thread_id, virq); |
298 | 298 | ||
299 | ps3_chip_mask(virq); | 299 | ps3_chip_mask(irq_get_irq_data(virq)); |
300 | 300 | ||
301 | result = lv1_disconnect_irq_plug_ext(pd->ppe_id, pd->thread_id, virq); | 301 | result = lv1_disconnect_irq_plug_ext(pd->ppe_id, pd->thread_id, virq); |
302 | 302 | ||
@@ -357,7 +357,7 @@ int ps3_event_receive_port_destroy(unsigned int virq) | |||
357 | 357 | ||
358 | pr_debug(" -> %s:%d virq %u\n", __func__, __LINE__, virq); | 358 | pr_debug(" -> %s:%d virq %u\n", __func__, __LINE__, virq); |
359 | 359 | ||
360 | ps3_chip_mask(virq); | 360 | ps3_chip_mask(irq_get_irq_data(virq)); |
361 | 361 | ||
362 | result = lv1_destruct_event_receive_port(virq_to_hw(virq)); | 362 | result = lv1_destruct_event_receive_port(virq_to_hw(virq)); |
363 | 363 | ||
@@ -492,7 +492,7 @@ int ps3_io_irq_destroy(unsigned int virq) | |||
492 | int result; | 492 | int result; |
493 | unsigned long outlet = virq_to_hw(virq); | 493 | unsigned long outlet = virq_to_hw(virq); |
494 | 494 | ||
495 | ps3_chip_mask(virq); | 495 | ps3_chip_mask(irq_get_irq_data(virq)); |
496 | 496 | ||
497 | /* | 497 | /* |
498 | * lv1_destruct_io_irq_outlet() will destroy the IRQ plug, | 498 | * lv1_destruct_io_irq_outlet() will destroy the IRQ plug, |
@@ -553,7 +553,7 @@ int ps3_vuart_irq_destroy(unsigned int virq) | |||
553 | { | 553 | { |
554 | int result; | 554 | int result; |
555 | 555 | ||
556 | ps3_chip_mask(virq); | 556 | ps3_chip_mask(irq_get_irq_data(virq)); |
557 | result = lv1_deconfigure_virtual_uart_irq(); | 557 | result = lv1_deconfigure_virtual_uart_irq(); |
558 | 558 | ||
559 | if (result) { | 559 | if (result) { |
@@ -605,7 +605,7 @@ int ps3_spe_irq_destroy(unsigned int virq) | |||
605 | { | 605 | { |
606 | int result; | 606 | int result; |
607 | 607 | ||
608 | ps3_chip_mask(virq); | 608 | ps3_chip_mask(irq_get_irq_data(virq)); |
609 | 609 | ||
610 | result = ps3_irq_plug_destroy(virq); | 610 | result = ps3_irq_plug_destroy(virq); |
611 | BUG_ON(result); | 611 | BUG_ON(result); |
@@ -659,18 +659,13 @@ static void __maybe_unused _dump_mask(struct ps3_private *pd, | |||
659 | static void dump_bmp(struct ps3_private* pd) {}; | 659 | static void dump_bmp(struct ps3_private* pd) {}; |
660 | #endif /* defined(DEBUG) */ | 660 | #endif /* defined(DEBUG) */ |
661 | 661 | ||
662 | static void ps3_host_unmap(struct irq_host *h, unsigned int virq) | ||
663 | { | ||
664 | set_irq_chip_data(virq, NULL); | ||
665 | } | ||
666 | |||
667 | static int ps3_host_map(struct irq_host *h, unsigned int virq, | 662 | static int ps3_host_map(struct irq_host *h, unsigned int virq, |
668 | irq_hw_number_t hwirq) | 663 | irq_hw_number_t hwirq) |
669 | { | 664 | { |
670 | pr_debug("%s:%d: hwirq %lu, virq %u\n", __func__, __LINE__, hwirq, | 665 | pr_debug("%s:%d: hwirq %lu, virq %u\n", __func__, __LINE__, hwirq, |
671 | virq); | 666 | virq); |
672 | 667 | ||
673 | set_irq_chip_and_handler(virq, &ps3_irq_chip, handle_fasteoi_irq); | 668 | irq_set_chip_and_handler(virq, &ps3_irq_chip, handle_fasteoi_irq); |
674 | 669 | ||
675 | return 0; | 670 | return 0; |
676 | } | 671 | } |
@@ -683,7 +678,6 @@ static int ps3_host_match(struct irq_host *h, struct device_node *np) | |||
683 | 678 | ||
684 | static struct irq_host_ops ps3_host_ops = { | 679 | static struct irq_host_ops ps3_host_ops = { |
685 | .map = ps3_host_map, | 680 | .map = ps3_host_map, |
686 | .unmap = ps3_host_unmap, | ||
687 | .match = ps3_host_match, | 681 | .match = ps3_host_match, |
688 | }; | 682 | }; |
689 | 683 | ||
diff --git a/arch/powerpc/platforms/ps3/smp.c b/arch/powerpc/platforms/ps3/smp.c index 51ffde40af2b..4c44794faac0 100644 --- a/arch/powerpc/platforms/ps3/smp.c +++ b/arch/powerpc/platforms/ps3/smp.c | |||
@@ -39,7 +39,7 @@ | |||
39 | #define MSG_COUNT 4 | 39 | #define MSG_COUNT 4 |
40 | static DEFINE_PER_CPU(unsigned int [MSG_COUNT], ps3_ipi_virqs); | 40 | static DEFINE_PER_CPU(unsigned int [MSG_COUNT], ps3_ipi_virqs); |
41 | 41 | ||
42 | static void do_message_pass(int target, int msg) | 42 | static void ps3_smp_message_pass(int cpu, int msg) |
43 | { | 43 | { |
44 | int result; | 44 | int result; |
45 | unsigned int virq; | 45 | unsigned int virq; |
@@ -49,28 +49,12 @@ static void do_message_pass(int target, int msg) | |||
49 | return; | 49 | return; |
50 | } | 50 | } |
51 | 51 | ||
52 | virq = per_cpu(ps3_ipi_virqs, target)[msg]; | 52 | virq = per_cpu(ps3_ipi_virqs, cpu)[msg]; |
53 | result = ps3_send_event_locally(virq); | 53 | result = ps3_send_event_locally(virq); |
54 | 54 | ||
55 | if (result) | 55 | if (result) |
56 | DBG("%s:%d: ps3_send_event_locally(%d, %d) failed" | 56 | DBG("%s:%d: ps3_send_event_locally(%d, %d) failed" |
57 | " (%d)\n", __func__, __LINE__, target, msg, result); | 57 | " (%d)\n", __func__, __LINE__, cpu, msg, result); |
58 | } | ||
59 | |||
60 | static void ps3_smp_message_pass(int target, int msg) | ||
61 | { | ||
62 | int cpu; | ||
63 | |||
64 | if (target < NR_CPUS) | ||
65 | do_message_pass(target, msg); | ||
66 | else if (target == MSG_ALL_BUT_SELF) { | ||
67 | for_each_online_cpu(cpu) | ||
68 | if (cpu != smp_processor_id()) | ||
69 | do_message_pass(cpu, msg); | ||
70 | } else { | ||
71 | for_each_online_cpu(cpu) | ||
72 | do_message_pass(cpu, msg); | ||
73 | } | ||
74 | } | 58 | } |
75 | 59 | ||
76 | static int ps3_smp_probe(void) | 60 | static int ps3_smp_probe(void) |
diff --git a/arch/powerpc/platforms/ps3/spu.c b/arch/powerpc/platforms/ps3/spu.c index 39a472e9e80f..375a9f92158d 100644 --- a/arch/powerpc/platforms/ps3/spu.c +++ b/arch/powerpc/platforms/ps3/spu.c | |||
@@ -197,7 +197,7 @@ static void spu_unmap(struct spu *spu) | |||
197 | * The current HV requires the spu shadow regs to be mapped with the | 197 | * The current HV requires the spu shadow regs to be mapped with the |
198 | * PTE page protection bits set as read-only (PP=3). This implementation | 198 | * PTE page protection bits set as read-only (PP=3). This implementation |
199 | * uses the low level __ioremap() to bypass the page protection settings | 199 | * uses the low level __ioremap() to bypass the page protection settings |
200 | * inforced by ioremap_flags() to get the needed PTE bits set for the | 200 | * inforced by ioremap_prot() to get the needed PTE bits set for the |
201 | * shadow regs. | 201 | * shadow regs. |
202 | */ | 202 | */ |
203 | 203 | ||
@@ -214,7 +214,7 @@ static int __init setup_areas(struct spu *spu) | |||
214 | goto fail_ioremap; | 214 | goto fail_ioremap; |
215 | } | 215 | } |
216 | 216 | ||
217 | spu->local_store = (__force void *)ioremap_flags(spu->local_store_phys, | 217 | spu->local_store = (__force void *)ioremap_prot(spu->local_store_phys, |
218 | LS_SIZE, _PAGE_NO_CACHE); | 218 | LS_SIZE, _PAGE_NO_CACHE); |
219 | 219 | ||
220 | if (!spu->local_store) { | 220 | if (!spu->local_store) { |
diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig index c667f0f02c34..71af4c5d6c05 100644 --- a/arch/powerpc/platforms/pseries/Kconfig +++ b/arch/powerpc/platforms/pseries/Kconfig | |||
@@ -3,14 +3,17 @@ config PPC_PSERIES | |||
3 | bool "IBM pSeries & new (POWER5-based) iSeries" | 3 | bool "IBM pSeries & new (POWER5-based) iSeries" |
4 | select MPIC | 4 | select MPIC |
5 | select PCI_MSI | 5 | select PCI_MSI |
6 | select XICS | 6 | select PPC_XICS |
7 | select PPC_ICP_NATIVE | ||
8 | select PPC_ICP_HV | ||
9 | select PPC_ICS_RTAS | ||
7 | select PPC_I8259 | 10 | select PPC_I8259 |
8 | select PPC_RTAS | 11 | select PPC_RTAS |
9 | select PPC_RTAS_DAEMON | 12 | select PPC_RTAS_DAEMON |
10 | select RTAS_ERROR_LOGGING | 13 | select RTAS_ERROR_LOGGING |
11 | select PPC_UDBG_16550 | 14 | select PPC_UDBG_16550 |
12 | select PPC_NATIVE | 15 | select PPC_NATIVE |
13 | select PPC_PCI_CHOICE if EMBEDDED | 16 | select PPC_PCI_CHOICE if EXPERT |
14 | default y | 17 | default y |
15 | 18 | ||
16 | config PPC_SPLPAR | 19 | config PPC_SPLPAR |
@@ -24,19 +27,47 @@ config PPC_SPLPAR | |||
24 | two or more partitions. | 27 | two or more partitions. |
25 | 28 | ||
26 | config EEH | 29 | config EEH |
27 | bool "PCI Extended Error Handling (EEH)" if EMBEDDED | 30 | bool "PCI Extended Error Handling (EEH)" if EXPERT |
28 | depends on PPC_PSERIES && PCI | 31 | depends on PPC_PSERIES && PCI |
29 | default y if !EMBEDDED | 32 | default y if !EXPERT |
30 | 33 | ||
31 | config PSERIES_MSI | 34 | config PSERIES_MSI |
32 | bool | 35 | bool |
33 | depends on PCI_MSI && EEH | 36 | depends on PCI_MSI && EEH |
34 | default y | 37 | default y |
35 | 38 | ||
39 | config PSERIES_ENERGY | ||
40 | tristate "pSeries energy management capabilities driver" | ||
41 | depends on PPC_PSERIES | ||
42 | default y | ||
43 | help | ||
44 | Provides interface to platform energy management capabilities | ||
45 | on supported PSERIES platforms. | ||
46 | Provides: /sys/devices/system/cpu/pseries_(de)activation_hint_list | ||
47 | and /sys/devices/system/cpu/cpuN/pseries_(de)activation_hint | ||
48 | |||
36 | config SCANLOG | 49 | config SCANLOG |
37 | tristate "Scanlog dump interface" | 50 | tristate "Scanlog dump interface" |
38 | depends on RTAS_PROC && PPC_PSERIES | 51 | depends on RTAS_PROC && PPC_PSERIES |
39 | 52 | ||
53 | config IO_EVENT_IRQ | ||
54 | bool "IO Event Interrupt support" | ||
55 | depends on PPC_PSERIES | ||
56 | default y | ||
57 | help | ||
58 | Select this option, if you want to enable support for IO Event | ||
59 | interrupts. IO event interrupt is a mechanism provided by RTAS | ||
60 | to return information about hardware error and non-error events | ||
61 | which may need OS attention. RTAS returns events for multiple | ||
62 | event types and scopes. Device drivers can register their handlers | ||
63 | to receive events. | ||
64 | |||
65 | This option will only enable the IO event platform code. You | ||
66 | will still need to enable or compile the actual drivers | ||
67 | that use this infrastruture to handle IO event interrupts. | ||
68 | |||
69 | Say Y if you are unsure. | ||
70 | |||
40 | config LPARCFG | 71 | config LPARCFG |
41 | bool "LPAR Configuration Data" | 72 | bool "LPAR Configuration Data" |
42 | depends on PPC_PSERIES || PPC_ISERIES | 73 | depends on PPC_PSERIES || PPC_ISERIES |
@@ -47,6 +78,12 @@ config LPARCFG | |||
47 | config PPC_PSERIES_DEBUG | 78 | config PPC_PSERIES_DEBUG |
48 | depends on PPC_PSERIES && PPC_EARLY_DEBUG | 79 | depends on PPC_PSERIES && PPC_EARLY_DEBUG |
49 | bool "Enable extra debug logging in platforms/pseries" | 80 | bool "Enable extra debug logging in platforms/pseries" |
81 | help | ||
82 | Say Y here if you want the pseries core to produce a bunch of | ||
83 | debug messages to the system log. Select this if you are having a | ||
84 | problem with the pseries core and want to see more of what is | ||
85 | going on. This does not enable debugging in lpar.c, which must | ||
86 | be manually done due to its verbosity. | ||
50 | default y | 87 | default y |
51 | 88 | ||
52 | config PPC_SMLPAR | 89 | config PPC_SMLPAR |
diff --git a/arch/powerpc/platforms/pseries/Makefile b/arch/powerpc/platforms/pseries/Makefile index 046ace9c4381..3556e402cbf5 100644 --- a/arch/powerpc/platforms/pseries/Makefile +++ b/arch/powerpc/platforms/pseries/Makefile | |||
@@ -1,21 +1,16 @@ | |||
1 | ifeq ($(CONFIG_PPC64),y) | 1 | ccflags-$(CONFIG_PPC64) := -mno-minimal-toc |
2 | EXTRA_CFLAGS += -mno-minimal-toc | 2 | ccflags-$(CONFIG_PPC_PSERIES_DEBUG) += -DDEBUG |
3 | endif | ||
4 | |||
5 | ifeq ($(CONFIG_PPC_PSERIES_DEBUG),y) | ||
6 | EXTRA_CFLAGS += -DDEBUG | ||
7 | endif | ||
8 | 3 | ||
9 | obj-y := lpar.o hvCall.o nvram.o reconfig.o \ | 4 | obj-y := lpar.o hvCall.o nvram.o reconfig.o \ |
10 | setup.o iommu.o event_sources.o ras.o \ | 5 | setup.o iommu.o event_sources.o ras.o \ |
11 | firmware.o power.o dlpar.o | 6 | firmware.o power.o dlpar.o mobility.o |
12 | obj-$(CONFIG_SMP) += smp.o | 7 | obj-$(CONFIG_SMP) += smp.o |
13 | obj-$(CONFIG_XICS) += xics.o | ||
14 | obj-$(CONFIG_SCANLOG) += scanlog.o | 8 | obj-$(CONFIG_SCANLOG) += scanlog.o |
15 | obj-$(CONFIG_EEH) += eeh.o eeh_cache.o eeh_driver.o eeh_event.o eeh_sysfs.o | 9 | obj-$(CONFIG_EEH) += eeh.o eeh_cache.o eeh_driver.o eeh_event.o eeh_sysfs.o |
16 | obj-$(CONFIG_KEXEC) += kexec.o | 10 | obj-$(CONFIG_KEXEC) += kexec.o |
17 | obj-$(CONFIG_PCI) += pci.o pci_dlpar.o | 11 | obj-$(CONFIG_PCI) += pci.o pci_dlpar.o |
18 | obj-$(CONFIG_PSERIES_MSI) += msi.o | 12 | obj-$(CONFIG_PSERIES_MSI) += msi.o |
13 | obj-$(CONFIG_PSERIES_ENERGY) += pseries_energy.o | ||
19 | 14 | ||
20 | obj-$(CONFIG_HOTPLUG_CPU) += hotplug-cpu.o | 15 | obj-$(CONFIG_HOTPLUG_CPU) += hotplug-cpu.o |
21 | obj-$(CONFIG_MEMORY_HOTPLUG) += hotplug-memory.o | 16 | obj-$(CONFIG_MEMORY_HOTPLUG) += hotplug-memory.o |
@@ -23,9 +18,10 @@ obj-$(CONFIG_MEMORY_HOTPLUG) += hotplug-memory.o | |||
23 | obj-$(CONFIG_HVC_CONSOLE) += hvconsole.o | 18 | obj-$(CONFIG_HVC_CONSOLE) += hvconsole.o |
24 | obj-$(CONFIG_HVCS) += hvcserver.o | 19 | obj-$(CONFIG_HVCS) += hvcserver.o |
25 | obj-$(CONFIG_HCALL_STATS) += hvCall_inst.o | 20 | obj-$(CONFIG_HCALL_STATS) += hvCall_inst.o |
26 | obj-$(CONFIG_PHYP_DUMP) += phyp_dump.o | 21 | obj-$(CONFIG_PHYP_DUMP) += phyp_dump.o |
27 | obj-$(CONFIG_CMM) += cmm.o | 22 | obj-$(CONFIG_CMM) += cmm.o |
28 | obj-$(CONFIG_DTL) += dtl.o | 23 | obj-$(CONFIG_DTL) += dtl.o |
24 | obj-$(CONFIG_IO_EVENT_IRQ) += io_event_irq.o | ||
29 | 25 | ||
30 | ifeq ($(CONFIG_PPC_PSERIES),y) | 26 | ifeq ($(CONFIG_PPC_PSERIES),y) |
31 | obj-$(CONFIG_SUSPEND) += suspend.o | 27 | obj-$(CONFIG_SUSPEND) += suspend.o |
diff --git a/arch/powerpc/platforms/pseries/cmm.c b/arch/powerpc/platforms/pseries/cmm.c index f4803868642c..3cafc306b971 100644 --- a/arch/powerpc/platforms/pseries/cmm.c +++ b/arch/powerpc/platforms/pseries/cmm.c | |||
@@ -508,12 +508,7 @@ static int cmm_memory_isolate_cb(struct notifier_block *self, | |||
508 | if (action == MEM_ISOLATE_COUNT) | 508 | if (action == MEM_ISOLATE_COUNT) |
509 | ret = cmm_count_pages(arg); | 509 | ret = cmm_count_pages(arg); |
510 | 510 | ||
511 | if (ret) | 511 | return notifier_from_errno(ret); |
512 | ret = notifier_from_errno(ret); | ||
513 | else | ||
514 | ret = NOTIFY_OK; | ||
515 | |||
516 | return ret; | ||
517 | } | 512 | } |
518 | 513 | ||
519 | static struct notifier_block cmm_mem_isolate_nb = { | 514 | static struct notifier_block cmm_mem_isolate_nb = { |
@@ -635,12 +630,7 @@ static int cmm_memory_cb(struct notifier_block *self, | |||
635 | break; | 630 | break; |
636 | } | 631 | } |
637 | 632 | ||
638 | if (ret) | 633 | return notifier_from_errno(ret); |
639 | ret = notifier_from_errno(ret); | ||
640 | else | ||
641 | ret = NOTIFY_OK; | ||
642 | |||
643 | return ret; | ||
644 | } | 634 | } |
645 | 635 | ||
646 | static struct notifier_block cmm_mem_nb = { | 636 | static struct notifier_block cmm_mem_nb = { |
diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c index 72d8054fa739..57ceb92b2288 100644 --- a/arch/powerpc/platforms/pseries/dlpar.c +++ b/arch/powerpc/platforms/pseries/dlpar.c | |||
@@ -33,7 +33,7 @@ struct cc_workarea { | |||
33 | u32 prop_offset; | 33 | u32 prop_offset; |
34 | }; | 34 | }; |
35 | 35 | ||
36 | static void dlpar_free_cc_property(struct property *prop) | 36 | void dlpar_free_cc_property(struct property *prop) |
37 | { | 37 | { |
38 | kfree(prop->name); | 38 | kfree(prop->name); |
39 | kfree(prop->value); | 39 | kfree(prop->value); |
@@ -55,13 +55,12 @@ static struct property *dlpar_parse_cc_property(struct cc_workarea *ccwa) | |||
55 | 55 | ||
56 | prop->length = ccwa->prop_length; | 56 | prop->length = ccwa->prop_length; |
57 | value = (char *)ccwa + ccwa->prop_offset; | 57 | value = (char *)ccwa + ccwa->prop_offset; |
58 | prop->value = kzalloc(prop->length, GFP_KERNEL); | 58 | prop->value = kmemdup(value, prop->length, GFP_KERNEL); |
59 | if (!prop->value) { | 59 | if (!prop->value) { |
60 | dlpar_free_cc_property(prop); | 60 | dlpar_free_cc_property(prop); |
61 | return NULL; | 61 | return NULL; |
62 | } | 62 | } |
63 | 63 | ||
64 | memcpy(prop->value, value, prop->length); | ||
65 | return prop; | 64 | return prop; |
66 | } | 65 | } |
67 | 66 | ||
@@ -75,7 +74,7 @@ static struct device_node *dlpar_parse_cc_node(struct cc_workarea *ccwa) | |||
75 | return NULL; | 74 | return NULL; |
76 | 75 | ||
77 | /* The configure connector reported name does not contain a | 76 | /* The configure connector reported name does not contain a |
78 | * preceeding '/', so we allocate a buffer large enough to | 77 | * preceding '/', so we allocate a buffer large enough to |
79 | * prepend this to the full_name. | 78 | * prepend this to the full_name. |
80 | */ | 79 | */ |
81 | name = (char *)ccwa + ccwa->name_offset; | 80 | name = (char *)ccwa + ccwa->name_offset; |
@@ -102,7 +101,7 @@ static void dlpar_free_one_cc_node(struct device_node *dn) | |||
102 | kfree(dn); | 101 | kfree(dn); |
103 | } | 102 | } |
104 | 103 | ||
105 | static void dlpar_free_cc_nodes(struct device_node *dn) | 104 | void dlpar_free_cc_nodes(struct device_node *dn) |
106 | { | 105 | { |
107 | if (dn->child) | 106 | if (dn->child) |
108 | dlpar_free_cc_nodes(dn->child); | 107 | dlpar_free_cc_nodes(dn->child); |
diff --git a/arch/powerpc/platforms/pseries/dtl.c b/arch/powerpc/platforms/pseries/dtl.c index a00addb55945..e9190073bb97 100644 --- a/arch/powerpc/platforms/pseries/dtl.c +++ b/arch/powerpc/platforms/pseries/dtl.c | |||
@@ -23,37 +23,22 @@ | |||
23 | #include <linux/init.h> | 23 | #include <linux/init.h> |
24 | #include <linux/slab.h> | 24 | #include <linux/slab.h> |
25 | #include <linux/debugfs.h> | 25 | #include <linux/debugfs.h> |
26 | #include <linux/spinlock.h> | ||
26 | #include <asm/smp.h> | 27 | #include <asm/smp.h> |
27 | #include <asm/system.h> | 28 | #include <asm/system.h> |
28 | #include <asm/uaccess.h> | 29 | #include <asm/uaccess.h> |
29 | #include <asm/firmware.h> | 30 | #include <asm/firmware.h> |
31 | #include <asm/lppaca.h> | ||
30 | 32 | ||
31 | #include "plpar_wrappers.h" | 33 | #include "plpar_wrappers.h" |
32 | 34 | ||
33 | /* | ||
34 | * Layout of entries in the hypervisor's DTL buffer. Although we don't | ||
35 | * actually access the internals of an entry (we only need to know the size), | ||
36 | * we might as well define it here for reference. | ||
37 | */ | ||
38 | struct dtl_entry { | ||
39 | u8 dispatch_reason; | ||
40 | u8 preempt_reason; | ||
41 | u16 processor_id; | ||
42 | u32 enqueue_to_dispatch_time; | ||
43 | u32 ready_to_enqueue_time; | ||
44 | u32 waiting_to_ready_time; | ||
45 | u64 timebase; | ||
46 | u64 fault_addr; | ||
47 | u64 srr0; | ||
48 | u64 srr1; | ||
49 | }; | ||
50 | |||
51 | struct dtl { | 35 | struct dtl { |
52 | struct dtl_entry *buf; | 36 | struct dtl_entry *buf; |
53 | struct dentry *file; | 37 | struct dentry *file; |
54 | int cpu; | 38 | int cpu; |
55 | int buf_entries; | 39 | int buf_entries; |
56 | u64 last_idx; | 40 | u64 last_idx; |
41 | spinlock_t lock; | ||
57 | }; | 42 | }; |
58 | static DEFINE_PER_CPU(struct dtl, cpu_dtl); | 43 | static DEFINE_PER_CPU(struct dtl, cpu_dtl); |
59 | 44 | ||
@@ -67,34 +52,106 @@ static u8 dtl_event_mask = 0x7; | |||
67 | 52 | ||
68 | 53 | ||
69 | /* | 54 | /* |
70 | * Size of per-cpu log buffers. Default is just under 16 pages worth. | 55 | * Size of per-cpu log buffers. Firmware requires that the buffer does |
56 | * not cross a 4k boundary. | ||
71 | */ | 57 | */ |
72 | static int dtl_buf_entries = (16 * 85); | 58 | static int dtl_buf_entries = N_DISPATCH_LOG; |
59 | |||
60 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
61 | struct dtl_ring { | ||
62 | u64 write_index; | ||
63 | struct dtl_entry *write_ptr; | ||
64 | struct dtl_entry *buf; | ||
65 | struct dtl_entry *buf_end; | ||
66 | u8 saved_dtl_mask; | ||
67 | }; | ||
73 | 68 | ||
69 | static DEFINE_PER_CPU(struct dtl_ring, dtl_rings); | ||
74 | 70 | ||
75 | static int dtl_enable(struct dtl *dtl) | 71 | static atomic_t dtl_count; |
72 | |||
73 | /* | ||
74 | * The cpu accounting code controls the DTL ring buffer, and we get | ||
75 | * given entries as they are processed. | ||
76 | */ | ||
77 | static void consume_dtle(struct dtl_entry *dtle, u64 index) | ||
76 | { | 78 | { |
77 | unsigned long addr; | 79 | struct dtl_ring *dtlr = &__get_cpu_var(dtl_rings); |
78 | int ret, hwcpu; | 80 | struct dtl_entry *wp = dtlr->write_ptr; |
81 | struct lppaca *vpa = local_paca->lppaca_ptr; | ||
79 | 82 | ||
80 | /* only allow one reader */ | 83 | if (!wp) |
81 | if (dtl->buf) | 84 | return; |
82 | return -EBUSY; | ||
83 | 85 | ||
84 | /* we need to store the original allocation size for use during read */ | 86 | *wp = *dtle; |
85 | dtl->buf_entries = dtl_buf_entries; | 87 | barrier(); |
86 | 88 | ||
87 | dtl->buf = kmalloc_node(dtl->buf_entries * sizeof(struct dtl_entry), | 89 | /* check for hypervisor ring buffer overflow, ignore this entry if so */ |
88 | GFP_KERNEL, cpu_to_node(dtl->cpu)); | 90 | if (index + N_DISPATCH_LOG < vpa->dtl_idx) |
89 | if (!dtl->buf) { | 91 | return; |
90 | printk(KERN_WARNING "%s: buffer alloc failed for cpu %d\n", | 92 | |
91 | __func__, dtl->cpu); | 93 | ++wp; |
92 | return -ENOMEM; | 94 | if (wp == dtlr->buf_end) |
93 | } | 95 | wp = dtlr->buf; |
96 | dtlr->write_ptr = wp; | ||
97 | |||
98 | /* incrementing write_index makes the new entry visible */ | ||
99 | smp_wmb(); | ||
100 | ++dtlr->write_index; | ||
101 | } | ||
102 | |||
103 | static int dtl_start(struct dtl *dtl) | ||
104 | { | ||
105 | struct dtl_ring *dtlr = &per_cpu(dtl_rings, dtl->cpu); | ||
106 | |||
107 | dtlr->buf = dtl->buf; | ||
108 | dtlr->buf_end = dtl->buf + dtl->buf_entries; | ||
109 | dtlr->write_index = 0; | ||
110 | |||
111 | /* setting write_ptr enables logging into our buffer */ | ||
112 | smp_wmb(); | ||
113 | dtlr->write_ptr = dtl->buf; | ||
114 | |||
115 | /* enable event logging */ | ||
116 | dtlr->saved_dtl_mask = lppaca_of(dtl->cpu).dtl_enable_mask; | ||
117 | lppaca_of(dtl->cpu).dtl_enable_mask |= dtl_event_mask; | ||
118 | |||
119 | dtl_consumer = consume_dtle; | ||
120 | atomic_inc(&dtl_count); | ||
121 | return 0; | ||
122 | } | ||
123 | |||
124 | static void dtl_stop(struct dtl *dtl) | ||
125 | { | ||
126 | struct dtl_ring *dtlr = &per_cpu(dtl_rings, dtl->cpu); | ||
127 | |||
128 | dtlr->write_ptr = NULL; | ||
129 | smp_wmb(); | ||
130 | |||
131 | dtlr->buf = NULL; | ||
132 | |||
133 | /* restore dtl_enable_mask */ | ||
134 | lppaca_of(dtl->cpu).dtl_enable_mask = dtlr->saved_dtl_mask; | ||
135 | |||
136 | if (atomic_dec_and_test(&dtl_count)) | ||
137 | dtl_consumer = NULL; | ||
138 | } | ||
139 | |||
140 | static u64 dtl_current_index(struct dtl *dtl) | ||
141 | { | ||
142 | return per_cpu(dtl_rings, dtl->cpu).write_index; | ||
143 | } | ||
144 | |||
145 | #else /* CONFIG_VIRT_CPU_ACCOUNTING */ | ||
146 | |||
147 | static int dtl_start(struct dtl *dtl) | ||
148 | { | ||
149 | unsigned long addr; | ||
150 | int ret, hwcpu; | ||
94 | 151 | ||
95 | /* Register our dtl buffer with the hypervisor. The HV expects the | 152 | /* Register our dtl buffer with the hypervisor. The HV expects the |
96 | * buffer size to be passed in the second word of the buffer */ | 153 | * buffer size to be passed in the second word of the buffer */ |
97 | ((u32 *)dtl->buf)[1] = dtl->buf_entries * sizeof(struct dtl_entry); | 154 | ((u32 *)dtl->buf)[1] = DISPATCH_LOG_BYTES; |
98 | 155 | ||
99 | hwcpu = get_hard_smp_processor_id(dtl->cpu); | 156 | hwcpu = get_hard_smp_processor_id(dtl->cpu); |
100 | addr = __pa(dtl->buf); | 157 | addr = __pa(dtl->buf); |
@@ -102,34 +159,84 @@ static int dtl_enable(struct dtl *dtl) | |||
102 | if (ret) { | 159 | if (ret) { |
103 | printk(KERN_WARNING "%s: DTL registration for cpu %d (hw %d) " | 160 | printk(KERN_WARNING "%s: DTL registration for cpu %d (hw %d) " |
104 | "failed with %d\n", __func__, dtl->cpu, hwcpu, ret); | 161 | "failed with %d\n", __func__, dtl->cpu, hwcpu, ret); |
105 | kfree(dtl->buf); | ||
106 | return -EIO; | 162 | return -EIO; |
107 | } | 163 | } |
108 | 164 | ||
109 | /* set our initial buffer indices */ | 165 | /* set our initial buffer indices */ |
110 | dtl->last_idx = lppaca[dtl->cpu].dtl_idx = 0; | 166 | lppaca_of(dtl->cpu).dtl_idx = 0; |
111 | 167 | ||
112 | /* ensure that our updates to the lppaca fields have occurred before | 168 | /* ensure that our updates to the lppaca fields have occurred before |
113 | * we actually enable the logging */ | 169 | * we actually enable the logging */ |
114 | smp_wmb(); | 170 | smp_wmb(); |
115 | 171 | ||
116 | /* enable event logging */ | 172 | /* enable event logging */ |
117 | lppaca[dtl->cpu].dtl_enable_mask = dtl_event_mask; | 173 | lppaca_of(dtl->cpu).dtl_enable_mask = dtl_event_mask; |
118 | 174 | ||
119 | return 0; | 175 | return 0; |
120 | } | 176 | } |
121 | 177 | ||
122 | static void dtl_disable(struct dtl *dtl) | 178 | static void dtl_stop(struct dtl *dtl) |
123 | { | 179 | { |
124 | int hwcpu = get_hard_smp_processor_id(dtl->cpu); | 180 | int hwcpu = get_hard_smp_processor_id(dtl->cpu); |
125 | 181 | ||
126 | lppaca[dtl->cpu].dtl_enable_mask = 0x0; | 182 | lppaca_of(dtl->cpu).dtl_enable_mask = 0x0; |
127 | 183 | ||
128 | unregister_dtl(hwcpu, __pa(dtl->buf)); | 184 | unregister_dtl(hwcpu, __pa(dtl->buf)); |
185 | } | ||
186 | |||
187 | static u64 dtl_current_index(struct dtl *dtl) | ||
188 | { | ||
189 | return lppaca_of(dtl->cpu).dtl_idx; | ||
190 | } | ||
191 | #endif /* CONFIG_VIRT_CPU_ACCOUNTING */ | ||
192 | |||
193 | static int dtl_enable(struct dtl *dtl) | ||
194 | { | ||
195 | long int n_entries; | ||
196 | long int rc; | ||
197 | struct dtl_entry *buf = NULL; | ||
198 | |||
199 | if (!dtl_cache) | ||
200 | return -ENOMEM; | ||
201 | |||
202 | /* only allow one reader */ | ||
203 | if (dtl->buf) | ||
204 | return -EBUSY; | ||
205 | |||
206 | n_entries = dtl_buf_entries; | ||
207 | buf = kmem_cache_alloc_node(dtl_cache, GFP_KERNEL, cpu_to_node(dtl->cpu)); | ||
208 | if (!buf) { | ||
209 | printk(KERN_WARNING "%s: buffer alloc failed for cpu %d\n", | ||
210 | __func__, dtl->cpu); | ||
211 | return -ENOMEM; | ||
212 | } | ||
129 | 213 | ||
130 | kfree(dtl->buf); | 214 | spin_lock(&dtl->lock); |
215 | rc = -EBUSY; | ||
216 | if (!dtl->buf) { | ||
217 | /* store the original allocation size for use during read */ | ||
218 | dtl->buf_entries = n_entries; | ||
219 | dtl->buf = buf; | ||
220 | dtl->last_idx = 0; | ||
221 | rc = dtl_start(dtl); | ||
222 | if (rc) | ||
223 | dtl->buf = NULL; | ||
224 | } | ||
225 | spin_unlock(&dtl->lock); | ||
226 | |||
227 | if (rc) | ||
228 | kmem_cache_free(dtl_cache, buf); | ||
229 | return rc; | ||
230 | } | ||
231 | |||
232 | static void dtl_disable(struct dtl *dtl) | ||
233 | { | ||
234 | spin_lock(&dtl->lock); | ||
235 | dtl_stop(dtl); | ||
236 | kmem_cache_free(dtl_cache, dtl->buf); | ||
131 | dtl->buf = NULL; | 237 | dtl->buf = NULL; |
132 | dtl->buf_entries = 0; | 238 | dtl->buf_entries = 0; |
239 | spin_unlock(&dtl->lock); | ||
133 | } | 240 | } |
134 | 241 | ||
135 | /* file interface */ | 242 | /* file interface */ |
@@ -157,8 +264,9 @@ static int dtl_file_release(struct inode *inode, struct file *filp) | |||
157 | static ssize_t dtl_file_read(struct file *filp, char __user *buf, size_t len, | 264 | static ssize_t dtl_file_read(struct file *filp, char __user *buf, size_t len, |
158 | loff_t *pos) | 265 | loff_t *pos) |
159 | { | 266 | { |
160 | int rc, cur_idx, last_idx, n_read, n_req, read_size; | 267 | long int rc, n_read, n_req, read_size; |
161 | struct dtl *dtl; | 268 | struct dtl *dtl; |
269 | u64 cur_idx, last_idx, i; | ||
162 | 270 | ||
163 | if ((len % sizeof(struct dtl_entry)) != 0) | 271 | if ((len % sizeof(struct dtl_entry)) != 0) |
164 | return -EINVAL; | 272 | return -EINVAL; |
@@ -171,41 +279,48 @@ static ssize_t dtl_file_read(struct file *filp, char __user *buf, size_t len, | |||
171 | /* actual number of entries read */ | 279 | /* actual number of entries read */ |
172 | n_read = 0; | 280 | n_read = 0; |
173 | 281 | ||
174 | cur_idx = lppaca[dtl->cpu].dtl_idx; | 282 | spin_lock(&dtl->lock); |
283 | |||
284 | cur_idx = dtl_current_index(dtl); | ||
175 | last_idx = dtl->last_idx; | 285 | last_idx = dtl->last_idx; |
176 | 286 | ||
177 | if (cur_idx - last_idx > dtl->buf_entries) { | 287 | if (last_idx + dtl->buf_entries <= cur_idx) |
178 | pr_debug("%s: hv buffer overflow for cpu %d, samples lost\n", | 288 | last_idx = cur_idx - dtl->buf_entries + 1; |
179 | __func__, dtl->cpu); | 289 | |
180 | } | 290 | if (last_idx + n_req > cur_idx) |
291 | n_req = cur_idx - last_idx; | ||
292 | |||
293 | if (n_req > 0) | ||
294 | dtl->last_idx = last_idx + n_req; | ||
295 | |||
296 | spin_unlock(&dtl->lock); | ||
297 | |||
298 | if (n_req <= 0) | ||
299 | return 0; | ||
181 | 300 | ||
182 | cur_idx %= dtl->buf_entries; | 301 | i = last_idx % dtl->buf_entries; |
183 | last_idx %= dtl->buf_entries; | ||
184 | 302 | ||
185 | /* read the tail of the buffer if we've wrapped */ | 303 | /* read the tail of the buffer if we've wrapped */ |
186 | if (last_idx > cur_idx) { | 304 | if (i + n_req > dtl->buf_entries) { |
187 | read_size = min(n_req, dtl->buf_entries - last_idx); | 305 | read_size = dtl->buf_entries - i; |
188 | 306 | ||
189 | rc = copy_to_user(buf, &dtl->buf[last_idx], | 307 | rc = copy_to_user(buf, &dtl->buf[i], |
190 | read_size * sizeof(struct dtl_entry)); | 308 | read_size * sizeof(struct dtl_entry)); |
191 | if (rc) | 309 | if (rc) |
192 | return -EFAULT; | 310 | return -EFAULT; |
193 | 311 | ||
194 | last_idx = 0; | 312 | i = 0; |
195 | n_req -= read_size; | 313 | n_req -= read_size; |
196 | n_read += read_size; | 314 | n_read += read_size; |
197 | buf += read_size * sizeof(struct dtl_entry); | 315 | buf += read_size * sizeof(struct dtl_entry); |
198 | } | 316 | } |
199 | 317 | ||
200 | /* .. and now the head */ | 318 | /* .. and now the head */ |
201 | read_size = min(n_req, cur_idx - last_idx); | 319 | rc = copy_to_user(buf, &dtl->buf[i], n_req * sizeof(struct dtl_entry)); |
202 | rc = copy_to_user(buf, &dtl->buf[last_idx], | ||
203 | read_size * sizeof(struct dtl_entry)); | ||
204 | if (rc) | 320 | if (rc) |
205 | return -EFAULT; | 321 | return -EFAULT; |
206 | 322 | ||
207 | n_read += read_size; | 323 | n_read += n_req; |
208 | dtl->last_idx += n_read; | ||
209 | 324 | ||
210 | return n_read * sizeof(struct dtl_entry); | 325 | return n_read * sizeof(struct dtl_entry); |
211 | } | 326 | } |
@@ -252,7 +367,7 @@ static int dtl_init(void) | |||
252 | 367 | ||
253 | event_mask_file = debugfs_create_x8("dtl_event_mask", 0600, | 368 | event_mask_file = debugfs_create_x8("dtl_event_mask", 0600, |
254 | dtl_dir, &dtl_event_mask); | 369 | dtl_dir, &dtl_event_mask); |
255 | buf_entries_file = debugfs_create_u32("dtl_buf_entries", 0600, | 370 | buf_entries_file = debugfs_create_u32("dtl_buf_entries", 0400, |
256 | dtl_dir, &dtl_buf_entries); | 371 | dtl_dir, &dtl_buf_entries); |
257 | 372 | ||
258 | if (!event_mask_file || !buf_entries_file) { | 373 | if (!event_mask_file || !buf_entries_file) { |
@@ -263,6 +378,7 @@ static int dtl_init(void) | |||
263 | /* set up the per-cpu log structures */ | 378 | /* set up the per-cpu log structures */ |
264 | for_each_possible_cpu(i) { | 379 | for_each_possible_cpu(i) { |
265 | struct dtl *dtl = &per_cpu(cpu_dtl, i); | 380 | struct dtl *dtl = &per_cpu(cpu_dtl, i); |
381 | spin_lock_init(&dtl->lock); | ||
266 | dtl->cpu = i; | 382 | dtl->cpu = i; |
267 | 383 | ||
268 | rc = dtl_setup_file(dtl); | 384 | rc = dtl_setup_file(dtl); |
diff --git a/arch/powerpc/platforms/pseries/eeh.c b/arch/powerpc/platforms/pseries/eeh.c index 34b7dc12e731..46b55cf563e3 100644 --- a/arch/powerpc/platforms/pseries/eeh.c +++ b/arch/powerpc/platforms/pseries/eeh.c | |||
@@ -21,8 +21,6 @@ | |||
21 | * Please address comments and feedback to Linas Vepstas <linas@austin.ibm.com> | 21 | * Please address comments and feedback to Linas Vepstas <linas@austin.ibm.com> |
22 | */ | 22 | */ |
23 | 23 | ||
24 | #undef DEBUG | ||
25 | |||
26 | #include <linux/delay.h> | 24 | #include <linux/delay.h> |
27 | #include <linux/init.h> | 25 | #include <linux/init.h> |
28 | #include <linux/list.h> | 26 | #include <linux/list.h> |
@@ -67,7 +65,7 @@ | |||
67 | * with EEH. | 65 | * with EEH. |
68 | * | 66 | * |
69 | * Ideally, a PCI device driver, when suspecting that an isolation | 67 | * Ideally, a PCI device driver, when suspecting that an isolation |
70 | * event has occured (e.g. by reading 0xff's), will then ask EEH | 68 | * event has occurred (e.g. by reading 0xff's), will then ask EEH |
71 | * whether this is the case, and then take appropriate steps to | 69 | * whether this is the case, and then take appropriate steps to |
72 | * reset the PCI slot, the PCI device, and then resume operations. | 70 | * reset the PCI slot, the PCI device, and then resume operations. |
73 | * However, until that day, the checking is done here, with the | 71 | * However, until that day, the checking is done here, with the |
@@ -95,6 +93,7 @@ static int ibm_slot_error_detail; | |||
95 | static int ibm_get_config_addr_info; | 93 | static int ibm_get_config_addr_info; |
96 | static int ibm_get_config_addr_info2; | 94 | static int ibm_get_config_addr_info2; |
97 | static int ibm_configure_bridge; | 95 | static int ibm_configure_bridge; |
96 | static int ibm_configure_pe; | ||
98 | 97 | ||
99 | int eeh_subsystem_enabled; | 98 | int eeh_subsystem_enabled; |
100 | EXPORT_SYMBOL(eeh_subsystem_enabled); | 99 | EXPORT_SYMBOL(eeh_subsystem_enabled); |
@@ -263,6 +262,8 @@ void eeh_slot_error_detail(struct pci_dn *pdn, int severity) | |||
263 | pci_regs_buf[0] = 0; | 262 | pci_regs_buf[0] = 0; |
264 | 263 | ||
265 | rtas_pci_enable(pdn, EEH_THAW_MMIO); | 264 | rtas_pci_enable(pdn, EEH_THAW_MMIO); |
265 | rtas_configure_bridge(pdn); | ||
266 | eeh_restore_bars(pdn); | ||
266 | loglen = gather_pci_data(pdn, pci_regs_buf, EEH_PCI_REGS_LOG_LEN); | 267 | loglen = gather_pci_data(pdn, pci_regs_buf, EEH_PCI_REGS_LOG_LEN); |
267 | 268 | ||
268 | rtas_slot_error_detail(pdn, severity, pci_regs_buf, loglen); | 269 | rtas_slot_error_detail(pdn, severity, pci_regs_buf, loglen); |
@@ -450,6 +451,39 @@ void eeh_clear_slot (struct device_node *dn, int mode_flag) | |||
450 | raw_spin_unlock_irqrestore(&confirm_error_lock, flags); | 451 | raw_spin_unlock_irqrestore(&confirm_error_lock, flags); |
451 | } | 452 | } |
452 | 453 | ||
454 | void __eeh_set_pe_freset(struct device_node *parent, unsigned int *freset) | ||
455 | { | ||
456 | struct device_node *dn; | ||
457 | |||
458 | for_each_child_of_node(parent, dn) { | ||
459 | if (PCI_DN(dn)) { | ||
460 | |||
461 | struct pci_dev *dev = PCI_DN(dn)->pcidev; | ||
462 | |||
463 | if (dev && dev->driver) | ||
464 | *freset |= dev->needs_freset; | ||
465 | |||
466 | __eeh_set_pe_freset(dn, freset); | ||
467 | } | ||
468 | } | ||
469 | } | ||
470 | |||
471 | void eeh_set_pe_freset(struct device_node *dn, unsigned int *freset) | ||
472 | { | ||
473 | struct pci_dev *dev; | ||
474 | dn = find_device_pe(dn); | ||
475 | |||
476 | /* Back up one, since config addrs might be shared */ | ||
477 | if (!pcibios_find_pci_bus(dn) && PCI_DN(dn->parent)) | ||
478 | dn = dn->parent; | ||
479 | |||
480 | dev = PCI_DN(dn)->pcidev; | ||
481 | if (dev) | ||
482 | *freset |= dev->needs_freset; | ||
483 | |||
484 | __eeh_set_pe_freset(dn, freset); | ||
485 | } | ||
486 | |||
453 | /** | 487 | /** |
454 | * eeh_dn_check_failure - check if all 1's data is due to EEH slot freeze | 488 | * eeh_dn_check_failure - check if all 1's data is due to EEH slot freeze |
455 | * @dn device node | 489 | * @dn device node |
@@ -694,15 +728,24 @@ rtas_pci_slot_reset(struct pci_dn *pdn, int state) | |||
694 | if (pdn->eeh_pe_config_addr) | 728 | if (pdn->eeh_pe_config_addr) |
695 | config_addr = pdn->eeh_pe_config_addr; | 729 | config_addr = pdn->eeh_pe_config_addr; |
696 | 730 | ||
697 | rc = rtas_call(ibm_set_slot_reset,4,1, NULL, | 731 | rc = rtas_call(ibm_set_slot_reset, 4, 1, NULL, |
698 | config_addr, | 732 | config_addr, |
699 | BUID_HI(pdn->phb->buid), | 733 | BUID_HI(pdn->phb->buid), |
700 | BUID_LO(pdn->phb->buid), | 734 | BUID_LO(pdn->phb->buid), |
701 | state); | 735 | state); |
702 | if (rc) | 736 | |
703 | printk (KERN_WARNING "EEH: Unable to reset the failed slot," | 737 | /* Fundamental-reset not supported on this PE, try hot-reset */ |
704 | " (%d) #RST=%d dn=%s\n", | 738 | if (rc == -8 && state == 3) { |
705 | rc, state, pdn->node->full_name); | 739 | rc = rtas_call(ibm_set_slot_reset, 4, 1, NULL, |
740 | config_addr, | ||
741 | BUID_HI(pdn->phb->buid), | ||
742 | BUID_LO(pdn->phb->buid), 1); | ||
743 | if (rc) | ||
744 | printk(KERN_WARNING | ||
745 | "EEH: Unable to reset the failed slot," | ||
746 | " #RST=%d dn=%s\n", | ||
747 | rc, pdn->node->full_name); | ||
748 | } | ||
706 | } | 749 | } |
707 | 750 | ||
708 | /** | 751 | /** |
@@ -738,18 +781,21 @@ int pcibios_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state stat | |||
738 | /** | 781 | /** |
739 | * rtas_set_slot_reset -- assert the pci #RST line for 1/4 second | 782 | * rtas_set_slot_reset -- assert the pci #RST line for 1/4 second |
740 | * @pdn: pci device node to be reset. | 783 | * @pdn: pci device node to be reset. |
741 | * | ||
742 | * Return 0 if success, else a non-zero value. | ||
743 | */ | 784 | */ |
744 | 785 | ||
745 | static void __rtas_set_slot_reset(struct pci_dn *pdn) | 786 | static void __rtas_set_slot_reset(struct pci_dn *pdn) |
746 | { | 787 | { |
747 | struct pci_dev *dev = pdn->pcidev; | 788 | unsigned int freset = 0; |
748 | 789 | ||
749 | /* Determine type of EEH reset required by device, | 790 | /* Determine type of EEH reset required for |
750 | * default hot reset or fundamental reset | 791 | * Partitionable Endpoint, a hot-reset (1) |
751 | */ | 792 | * or a fundamental reset (3). |
752 | if (dev && dev->needs_freset) | 793 | * A fundamental reset required by any device under |
794 | * Partitionable Endpoint trumps hot-reset. | ||
795 | */ | ||
796 | eeh_set_pe_freset(pdn->node, &freset); | ||
797 | |||
798 | if (freset) | ||
753 | rtas_pci_slot_reset(pdn, 3); | 799 | rtas_pci_slot_reset(pdn, 3); |
754 | else | 800 | else |
755 | rtas_pci_slot_reset(pdn, 1); | 801 | rtas_pci_slot_reset(pdn, 1); |
@@ -878,7 +924,7 @@ void eeh_restore_bars(struct pci_dn *pdn) | |||
878 | * | 924 | * |
879 | * Save the values of the device bars. Unlike the restore | 925 | * Save the values of the device bars. Unlike the restore |
880 | * routine, this routine is *not* recursive. This is because | 926 | * routine, this routine is *not* recursive. This is because |
881 | * PCI devices are added individuallly; but, for the restore, | 927 | * PCI devices are added individually; but, for the restore, |
882 | * an entire slot is reset at a time. | 928 | * an entire slot is reset at a time. |
883 | */ | 929 | */ |
884 | static void eeh_save_bars(struct pci_dn *pdn) | 930 | static void eeh_save_bars(struct pci_dn *pdn) |
@@ -897,13 +943,20 @@ rtas_configure_bridge(struct pci_dn *pdn) | |||
897 | { | 943 | { |
898 | int config_addr; | 944 | int config_addr; |
899 | int rc; | 945 | int rc; |
946 | int token; | ||
900 | 947 | ||
901 | /* Use PE configuration address, if present */ | 948 | /* Use PE configuration address, if present */ |
902 | config_addr = pdn->eeh_config_addr; | 949 | config_addr = pdn->eeh_config_addr; |
903 | if (pdn->eeh_pe_config_addr) | 950 | if (pdn->eeh_pe_config_addr) |
904 | config_addr = pdn->eeh_pe_config_addr; | 951 | config_addr = pdn->eeh_pe_config_addr; |
905 | 952 | ||
906 | rc = rtas_call(ibm_configure_bridge,3,1, NULL, | 953 | /* Use new configure-pe function, if supported */ |
954 | if (ibm_configure_pe != RTAS_UNKNOWN_SERVICE) | ||
955 | token = ibm_configure_pe; | ||
956 | else | ||
957 | token = ibm_configure_bridge; | ||
958 | |||
959 | rc = rtas_call(token, 3, 1, NULL, | ||
907 | config_addr, | 960 | config_addr, |
908 | BUID_HI(pdn->phb->buid), | 961 | BUID_HI(pdn->phb->buid), |
909 | BUID_LO(pdn->phb->buid)); | 962 | BUID_LO(pdn->phb->buid)); |
@@ -1079,6 +1132,7 @@ void __init eeh_init(void) | |||
1079 | ibm_get_config_addr_info = rtas_token("ibm,get-config-addr-info"); | 1132 | ibm_get_config_addr_info = rtas_token("ibm,get-config-addr-info"); |
1080 | ibm_get_config_addr_info2 = rtas_token("ibm,get-config-addr-info2"); | 1133 | ibm_get_config_addr_info2 = rtas_token("ibm,get-config-addr-info2"); |
1081 | ibm_configure_bridge = rtas_token ("ibm,configure-bridge"); | 1134 | ibm_configure_bridge = rtas_token ("ibm,configure-bridge"); |
1135 | ibm_configure_pe = rtas_token("ibm,configure-pe"); | ||
1082 | 1136 | ||
1083 | if (ibm_set_eeh_option == RTAS_UNKNOWN_SERVICE) | 1137 | if (ibm_set_eeh_option == RTAS_UNKNOWN_SERVICE) |
1084 | return; | 1138 | return; |
diff --git a/arch/powerpc/platforms/pseries/eeh_driver.c b/arch/powerpc/platforms/pseries/eeh_driver.c index b8d70f5d9aa9..1b6cb10589e0 100644 --- a/arch/powerpc/platforms/pseries/eeh_driver.c +++ b/arch/powerpc/platforms/pseries/eeh_driver.c | |||
@@ -328,7 +328,7 @@ struct pci_dn * handle_eeh_events (struct eeh_event *event) | |||
328 | struct pci_bus *frozen_bus; | 328 | struct pci_bus *frozen_bus; |
329 | int rc = 0; | 329 | int rc = 0; |
330 | enum pci_ers_result result = PCI_ERS_RESULT_NONE; | 330 | enum pci_ers_result result = PCI_ERS_RESULT_NONE; |
331 | const char *location, *pci_str, *drv_str; | 331 | const char *location, *pci_str, *drv_str, *bus_pci_str, *bus_drv_str; |
332 | 332 | ||
333 | frozen_dn = find_device_pe(event->dn); | 333 | frozen_dn = find_device_pe(event->dn); |
334 | if (!frozen_dn) { | 334 | if (!frozen_dn) { |
@@ -364,13 +364,8 @@ struct pci_dn * handle_eeh_events (struct eeh_event *event) | |||
364 | frozen_pdn = PCI_DN(frozen_dn); | 364 | frozen_pdn = PCI_DN(frozen_dn); |
365 | frozen_pdn->eeh_freeze_count++; | 365 | frozen_pdn->eeh_freeze_count++; |
366 | 366 | ||
367 | if (frozen_pdn->pcidev) { | 367 | pci_str = eeh_pci_name(event->dev); |
368 | pci_str = pci_name (frozen_pdn->pcidev); | 368 | drv_str = pcid_name(event->dev); |
369 | drv_str = pcid_name (frozen_pdn->pcidev); | ||
370 | } else { | ||
371 | pci_str = eeh_pci_name(event->dev); | ||
372 | drv_str = pcid_name (event->dev); | ||
373 | } | ||
374 | 369 | ||
375 | if (frozen_pdn->eeh_freeze_count > EEH_MAX_ALLOWED_FREEZES) | 370 | if (frozen_pdn->eeh_freeze_count > EEH_MAX_ALLOWED_FREEZES) |
376 | goto excess_failures; | 371 | goto excess_failures; |
@@ -378,8 +373,17 @@ struct pci_dn * handle_eeh_events (struct eeh_event *event) | |||
378 | printk(KERN_WARNING | 373 | printk(KERN_WARNING |
379 | "EEH: This PCI device has failed %d times in the last hour:\n", | 374 | "EEH: This PCI device has failed %d times in the last hour:\n", |
380 | frozen_pdn->eeh_freeze_count); | 375 | frozen_pdn->eeh_freeze_count); |
376 | |||
377 | if (frozen_pdn->pcidev) { | ||
378 | bus_pci_str = pci_name(frozen_pdn->pcidev); | ||
379 | bus_drv_str = pcid_name(frozen_pdn->pcidev); | ||
380 | printk(KERN_WARNING | ||
381 | "EEH: Bus location=%s driver=%s pci addr=%s\n", | ||
382 | location, bus_drv_str, bus_pci_str); | ||
383 | } | ||
384 | |||
381 | printk(KERN_WARNING | 385 | printk(KERN_WARNING |
382 | "EEH: location=%s driver=%s pci addr=%s\n", | 386 | "EEH: Device location=%s driver=%s pci addr=%s\n", |
383 | location, drv_str, pci_str); | 387 | location, drv_str, pci_str); |
384 | 388 | ||
385 | /* Walk the various device drivers attached to this slot through | 389 | /* Walk the various device drivers attached to this slot through |
diff --git a/arch/powerpc/platforms/pseries/eeh_sysfs.c b/arch/powerpc/platforms/pseries/eeh_sysfs.c index 15e13b568904..23982c7892d2 100644 --- a/arch/powerpc/platforms/pseries/eeh_sysfs.c +++ b/arch/powerpc/platforms/pseries/eeh_sysfs.c | |||
@@ -25,7 +25,6 @@ | |||
25 | #include <linux/pci.h> | 25 | #include <linux/pci.h> |
26 | #include <asm/ppc-pci.h> | 26 | #include <asm/ppc-pci.h> |
27 | #include <asm/pci-bridge.h> | 27 | #include <asm/pci-bridge.h> |
28 | #include <linux/kobject.h> | ||
29 | 28 | ||
30 | /** | 29 | /** |
31 | * EEH_SHOW_ATTR -- create sysfs entry for eeh statistic | 30 | * EEH_SHOW_ATTR -- create sysfs entry for eeh statistic |
diff --git a/arch/powerpc/platforms/pseries/firmware.c b/arch/powerpc/platforms/pseries/firmware.c index 0a14d8cd314f..0b0eff0cce35 100644 --- a/arch/powerpc/platforms/pseries/firmware.c +++ b/arch/powerpc/platforms/pseries/firmware.c | |||
@@ -55,6 +55,7 @@ firmware_features_table[FIRMWARE_MAX_FEATURES] = { | |||
55 | {FW_FEATURE_XDABR, "hcall-xdabr"}, | 55 | {FW_FEATURE_XDABR, "hcall-xdabr"}, |
56 | {FW_FEATURE_MULTITCE, "hcall-multi-tce"}, | 56 | {FW_FEATURE_MULTITCE, "hcall-multi-tce"}, |
57 | {FW_FEATURE_SPLPAR, "hcall-splpar"}, | 57 | {FW_FEATURE_SPLPAR, "hcall-splpar"}, |
58 | {FW_FEATURE_VPHN, "hcall-vphn"}, | ||
58 | }; | 59 | }; |
59 | 60 | ||
60 | /* Build up the firmware features bitmask using the contents of | 61 | /* Build up the firmware features bitmask using the contents of |
diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c index fd50ccd4bac1..46f13a3c5d09 100644 --- a/arch/powerpc/platforms/pseries/hotplug-cpu.c +++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c | |||
@@ -19,6 +19,7 @@ | |||
19 | */ | 19 | */ |
20 | 20 | ||
21 | #include <linux/kernel.h> | 21 | #include <linux/kernel.h> |
22 | #include <linux/interrupt.h> | ||
22 | #include <linux/delay.h> | 23 | #include <linux/delay.h> |
23 | #include <linux/cpu.h> | 24 | #include <linux/cpu.h> |
24 | #include <asm/system.h> | 25 | #include <asm/system.h> |
@@ -28,7 +29,7 @@ | |||
28 | #include <asm/machdep.h> | 29 | #include <asm/machdep.h> |
29 | #include <asm/vdso_datapage.h> | 30 | #include <asm/vdso_datapage.h> |
30 | #include <asm/pSeries_reconfig.h> | 31 | #include <asm/pSeries_reconfig.h> |
31 | #include "xics.h" | 32 | #include <asm/xics.h> |
32 | #include "plpar_wrappers.h" | 33 | #include "plpar_wrappers.h" |
33 | #include "offline_states.h" | 34 | #include "offline_states.h" |
34 | 35 | ||
@@ -216,7 +217,7 @@ static void pseries_cpu_die(unsigned int cpu) | |||
216 | cpu, pcpu, cpu_status); | 217 | cpu, pcpu, cpu_status); |
217 | } | 218 | } |
218 | 219 | ||
219 | /* Isolation and deallocation are definatly done by | 220 | /* Isolation and deallocation are definitely done by |
220 | * drslot_chrp_cpu. If they were not they would be | 221 | * drslot_chrp_cpu. If they were not they would be |
221 | * done here. Change isolate state to Isolate and | 222 | * done here. Change isolate state to Isolate and |
222 | * change allocation-state to Unusable. | 223 | * change allocation-state to Unusable. |
@@ -280,7 +281,7 @@ static int pseries_add_processor(struct device_node *np) | |||
280 | } | 281 | } |
281 | 282 | ||
282 | for_each_cpu(cpu, tmp) { | 283 | for_each_cpu(cpu, tmp) { |
283 | BUG_ON(cpumask_test_cpu(cpu, cpu_present_mask)); | 284 | BUG_ON(cpu_present(cpu)); |
284 | set_cpu_present(cpu, true); | 285 | set_cpu_present(cpu, true); |
285 | set_hard_smp_processor_id(cpu, *intserv++); | 286 | set_hard_smp_processor_id(cpu, *intserv++); |
286 | } | 287 | } |
diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c index bc8803664140..9d6a8effeda2 100644 --- a/arch/powerpc/platforms/pseries/hotplug-memory.c +++ b/arch/powerpc/platforms/pseries/hotplug-memory.c | |||
@@ -12,11 +12,67 @@ | |||
12 | #include <linux/of.h> | 12 | #include <linux/of.h> |
13 | #include <linux/memblock.h> | 13 | #include <linux/memblock.h> |
14 | #include <linux/vmalloc.h> | 14 | #include <linux/vmalloc.h> |
15 | #include <linux/memory.h> | ||
16 | |||
15 | #include <asm/firmware.h> | 17 | #include <asm/firmware.h> |
16 | #include <asm/machdep.h> | 18 | #include <asm/machdep.h> |
17 | #include <asm/pSeries_reconfig.h> | 19 | #include <asm/pSeries_reconfig.h> |
18 | #include <asm/sparsemem.h> | 20 | #include <asm/sparsemem.h> |
19 | 21 | ||
22 | static unsigned long get_memblock_size(void) | ||
23 | { | ||
24 | struct device_node *np; | ||
25 | unsigned int memblock_size = MIN_MEMORY_BLOCK_SIZE; | ||
26 | struct resource r; | ||
27 | |||
28 | np = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory"); | ||
29 | if (np) { | ||
30 | const __be64 *size; | ||
31 | |||
32 | size = of_get_property(np, "ibm,lmb-size", NULL); | ||
33 | if (size) | ||
34 | memblock_size = be64_to_cpup(size); | ||
35 | of_node_put(np); | ||
36 | } else if (machine_is(pseries)) { | ||
37 | /* This fallback really only applies to pseries */ | ||
38 | unsigned int memzero_size = 0; | ||
39 | |||
40 | np = of_find_node_by_path("/memory@0"); | ||
41 | if (np) { | ||
42 | if (!of_address_to_resource(np, 0, &r)) | ||
43 | memzero_size = resource_size(&r); | ||
44 | of_node_put(np); | ||
45 | } | ||
46 | |||
47 | if (memzero_size) { | ||
48 | /* We now know the size of memory@0, use this to find | ||
49 | * the first memoryblock and get its size. | ||
50 | */ | ||
51 | char buf[64]; | ||
52 | |||
53 | sprintf(buf, "/memory@%x", memzero_size); | ||
54 | np = of_find_node_by_path(buf); | ||
55 | if (np) { | ||
56 | if (!of_address_to_resource(np, 0, &r)) | ||
57 | memblock_size = resource_size(&r); | ||
58 | of_node_put(np); | ||
59 | } | ||
60 | } | ||
61 | } | ||
62 | return memblock_size; | ||
63 | } | ||
64 | |||
65 | /* WARNING: This is going to override the generic definition whenever | ||
66 | * pseries is built-in regardless of what platform is active at boot | ||
67 | * time. This is fine for now as this is the only "option" and it | ||
68 | * should work everywhere. If not, we'll have to turn this into a | ||
69 | * ppc_md. callback | ||
70 | */ | ||
71 | unsigned long memory_block_size_bytes(void) | ||
72 | { | ||
73 | return get_memblock_size(); | ||
74 | } | ||
75 | |||
20 | static int pseries_remove_memblock(unsigned long base, unsigned int memblock_size) | 76 | static int pseries_remove_memblock(unsigned long base, unsigned int memblock_size) |
21 | { | 77 | { |
22 | unsigned long start, start_pfn; | 78 | unsigned long start, start_pfn; |
@@ -127,30 +183,22 @@ static int pseries_add_memory(struct device_node *np) | |||
127 | 183 | ||
128 | static int pseries_drconf_memory(unsigned long *base, unsigned int action) | 184 | static int pseries_drconf_memory(unsigned long *base, unsigned int action) |
129 | { | 185 | { |
130 | struct device_node *np; | 186 | unsigned long memblock_size; |
131 | const unsigned long *lmb_size; | ||
132 | int rc; | 187 | int rc; |
133 | 188 | ||
134 | np = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory"); | 189 | memblock_size = get_memblock_size(); |
135 | if (!np) | 190 | if (!memblock_size) |
136 | return -EINVAL; | 191 | return -EINVAL; |
137 | 192 | ||
138 | lmb_size = of_get_property(np, "ibm,lmb-size", NULL); | ||
139 | if (!lmb_size) { | ||
140 | of_node_put(np); | ||
141 | return -EINVAL; | ||
142 | } | ||
143 | |||
144 | if (action == PSERIES_DRCONF_MEM_ADD) { | 193 | if (action == PSERIES_DRCONF_MEM_ADD) { |
145 | rc = memblock_add(*base, *lmb_size); | 194 | rc = memblock_add(*base, memblock_size); |
146 | rc = (rc < 0) ? -EINVAL : 0; | 195 | rc = (rc < 0) ? -EINVAL : 0; |
147 | } else if (action == PSERIES_DRCONF_MEM_REMOVE) { | 196 | } else if (action == PSERIES_DRCONF_MEM_REMOVE) { |
148 | rc = pseries_remove_memblock(*base, *lmb_size); | 197 | rc = pseries_remove_memblock(*base, memblock_size); |
149 | } else { | 198 | } else { |
150 | rc = -EINVAL; | 199 | rc = -EINVAL; |
151 | } | 200 | } |
152 | 201 | ||
153 | of_node_put(np); | ||
154 | return rc; | 202 | return rc; |
155 | } | 203 | } |
156 | 204 | ||
diff --git a/arch/powerpc/platforms/pseries/hvCall.S b/arch/powerpc/platforms/pseries/hvCall.S index 48d20573e4de..fd05fdee576a 100644 --- a/arch/powerpc/platforms/pseries/hvCall.S +++ b/arch/powerpc/platforms/pseries/hvCall.S | |||
@@ -11,6 +11,7 @@ | |||
11 | #include <asm/processor.h> | 11 | #include <asm/processor.h> |
12 | #include <asm/ppc_asm.h> | 12 | #include <asm/ppc_asm.h> |
13 | #include <asm/asm-offsets.h> | 13 | #include <asm/asm-offsets.h> |
14 | #include <asm/ptrace.h> | ||
14 | 15 | ||
15 | #define STK_PARM(i) (48 + ((i)-3)*8) | 16 | #define STK_PARM(i) (48 + ((i)-3)*8) |
16 | 17 | ||
diff --git a/arch/powerpc/platforms/pseries/hvCall_inst.c b/arch/powerpc/platforms/pseries/hvCall_inst.c index e19ff021e711..f106662f4381 100644 --- a/arch/powerpc/platforms/pseries/hvCall_inst.c +++ b/arch/powerpc/platforms/pseries/hvCall_inst.c | |||
@@ -55,7 +55,7 @@ static void hc_stop(struct seq_file *m, void *p) | |||
55 | static int hc_show(struct seq_file *m, void *p) | 55 | static int hc_show(struct seq_file *m, void *p) |
56 | { | 56 | { |
57 | unsigned long h_num = (unsigned long)p; | 57 | unsigned long h_num = (unsigned long)p; |
58 | struct hcall_stats *hs = (struct hcall_stats *)m->private; | 58 | struct hcall_stats *hs = m->private; |
59 | 59 | ||
60 | if (hs[h_num].num_calls) { | 60 | if (hs[h_num].num_calls) { |
61 | if (cpu_has_feature(CPU_FTR_PURR)) | 61 | if (cpu_has_feature(CPU_FTR_PURR)) |
diff --git a/arch/powerpc/platforms/pseries/io_event_irq.c b/arch/powerpc/platforms/pseries/io_event_irq.c new file mode 100644 index 000000000000..c829e6067d54 --- /dev/null +++ b/arch/powerpc/platforms/pseries/io_event_irq.c | |||
@@ -0,0 +1,231 @@ | |||
1 | /* | ||
2 | * Copyright 2010 2011 Mark Nelson and Tseng-Hui (Frank) Lin, IBM Corporation | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation; either version | ||
7 | * 2 of the License, or (at your option) any later version. | ||
8 | */ | ||
9 | |||
10 | #include <linux/errno.h> | ||
11 | #include <linux/slab.h> | ||
12 | #include <linux/module.h> | ||
13 | #include <linux/irq.h> | ||
14 | #include <linux/interrupt.h> | ||
15 | #include <linux/of.h> | ||
16 | #include <linux/list.h> | ||
17 | #include <linux/notifier.h> | ||
18 | |||
19 | #include <asm/machdep.h> | ||
20 | #include <asm/rtas.h> | ||
21 | #include <asm/irq.h> | ||
22 | #include <asm/io_event_irq.h> | ||
23 | |||
24 | #include "pseries.h" | ||
25 | |||
26 | /* | ||
27 | * IO event interrupt is a mechanism provided by RTAS to return | ||
28 | * information about hardware error and non-error events. Device | ||
29 | * drivers can register their event handlers to receive events. | ||
30 | * Device drivers are expected to use atomic_notifier_chain_register() | ||
31 | * and atomic_notifier_chain_unregister() to register and unregister | ||
32 | * their event handlers. Since multiple IO event types and scopes | ||
33 | * share an IO event interrupt, the event handlers are called one | ||
34 | * by one until the IO event is claimed by one of the handlers. | ||
35 | * The event handlers are expected to return NOTIFY_OK if the | ||
36 | * event is handled by the event handler or NOTIFY_DONE if the | ||
37 | * event does not belong to the handler. | ||
38 | * | ||
39 | * Usage: | ||
40 | * | ||
41 | * Notifier function: | ||
42 | * #include <asm/io_event_irq.h> | ||
43 | * int event_handler(struct notifier_block *nb, unsigned long val, void *data) { | ||
44 | * p = (struct pseries_io_event_sect_data *) data; | ||
45 | * if (! is_my_event(p->scope, p->event_type)) return NOTIFY_DONE; | ||
46 | * : | ||
47 | * : | ||
48 | * return NOTIFY_OK; | ||
49 | * } | ||
50 | * struct notifier_block event_nb = { | ||
51 | * .notifier_call = event_handler, | ||
52 | * } | ||
53 | * | ||
54 | * Registration: | ||
55 | * atomic_notifier_chain_register(&pseries_ioei_notifier_list, &event_nb); | ||
56 | * | ||
57 | * Unregistration: | ||
58 | * atomic_notifier_chain_unregister(&pseries_ioei_notifier_list, &event_nb); | ||
59 | */ | ||
60 | |||
61 | ATOMIC_NOTIFIER_HEAD(pseries_ioei_notifier_list); | ||
62 | EXPORT_SYMBOL_GPL(pseries_ioei_notifier_list); | ||
63 | |||
64 | static int ioei_check_exception_token; | ||
65 | |||
66 | /* pSeries event log format */ | ||
67 | |||
68 | /* Two bytes ASCII section IDs */ | ||
69 | #define PSERIES_ELOG_SECT_ID_PRIV_HDR (('P' << 8) | 'H') | ||
70 | #define PSERIES_ELOG_SECT_ID_USER_HDR (('U' << 8) | 'H') | ||
71 | #define PSERIES_ELOG_SECT_ID_PRIMARY_SRC (('P' << 8) | 'S') | ||
72 | #define PSERIES_ELOG_SECT_ID_EXTENDED_UH (('E' << 8) | 'H') | ||
73 | #define PSERIES_ELOG_SECT_ID_FAILING_MTMS (('M' << 8) | 'T') | ||
74 | #define PSERIES_ELOG_SECT_ID_SECONDARY_SRC (('S' << 8) | 'S') | ||
75 | #define PSERIES_ELOG_SECT_ID_DUMP_LOCATOR (('D' << 8) | 'H') | ||
76 | #define PSERIES_ELOG_SECT_ID_FW_ERROR (('S' << 8) | 'W') | ||
77 | #define PSERIES_ELOG_SECT_ID_IMPACT_PART_ID (('L' << 8) | 'P') | ||
78 | #define PSERIES_ELOG_SECT_ID_LOGIC_RESOURCE_ID (('L' << 8) | 'R') | ||
79 | #define PSERIES_ELOG_SECT_ID_HMC_ID (('H' << 8) | 'M') | ||
80 | #define PSERIES_ELOG_SECT_ID_EPOW (('E' << 8) | 'P') | ||
81 | #define PSERIES_ELOG_SECT_ID_IO_EVENT (('I' << 8) | 'E') | ||
82 | #define PSERIES_ELOG_SECT_ID_MANUFACT_INFO (('M' << 8) | 'I') | ||
83 | #define PSERIES_ELOG_SECT_ID_CALL_HOME (('C' << 8) | 'H') | ||
84 | #define PSERIES_ELOG_SECT_ID_USER_DEF (('U' << 8) | 'D') | ||
85 | |||
86 | /* Vendor specific Platform Event Log Format, Version 6, section header */ | ||
87 | struct pseries_elog_section { | ||
88 | uint16_t id; /* 0x00 2-byte ASCII section ID */ | ||
89 | uint16_t length; /* 0x02 Section length in bytes */ | ||
90 | uint8_t version; /* 0x04 Section version */ | ||
91 | uint8_t subtype; /* 0x05 Section subtype */ | ||
92 | uint16_t creator_component; /* 0x06 Creator component ID */ | ||
93 | uint8_t data[]; /* 0x08 Start of section data */ | ||
94 | }; | ||
95 | |||
96 | static char ioei_rtas_buf[RTAS_DATA_BUF_SIZE] __cacheline_aligned; | ||
97 | |||
98 | /** | ||
99 | * Find data portion of a specific section in RTAS extended event log. | ||
100 | * @elog: RTAS error/event log. | ||
101 | * @sect_id: secsion ID. | ||
102 | * | ||
103 | * Return: | ||
104 | * pointer to the section data of the specified section | ||
105 | * NULL if not found | ||
106 | */ | ||
107 | static struct pseries_elog_section *find_xelog_section(struct rtas_error_log *elog, | ||
108 | uint16_t sect_id) | ||
109 | { | ||
110 | struct rtas_ext_event_log_v6 *xelog = | ||
111 | (struct rtas_ext_event_log_v6 *) elog->buffer; | ||
112 | struct pseries_elog_section *sect; | ||
113 | unsigned char *p, *log_end; | ||
114 | |||
115 | /* Check that we understand the format */ | ||
116 | if (elog->extended_log_length < sizeof(struct rtas_ext_event_log_v6) || | ||
117 | xelog->log_format != RTAS_V6EXT_LOG_FORMAT_EVENT_LOG || | ||
118 | xelog->company_id != RTAS_V6EXT_COMPANY_ID_IBM) | ||
119 | return NULL; | ||
120 | |||
121 | log_end = elog->buffer + elog->extended_log_length; | ||
122 | p = xelog->vendor_log; | ||
123 | while (p < log_end) { | ||
124 | sect = (struct pseries_elog_section *)p; | ||
125 | if (sect->id == sect_id) | ||
126 | return sect; | ||
127 | p += sect->length; | ||
128 | } | ||
129 | return NULL; | ||
130 | } | ||
131 | |||
132 | /** | ||
133 | * Find the data portion of an IO Event section from event log. | ||
134 | * @elog: RTAS error/event log. | ||
135 | * | ||
136 | * Return: | ||
137 | * pointer to a valid IO event section data. NULL if not found. | ||
138 | */ | ||
139 | static struct pseries_io_event * ioei_find_event(struct rtas_error_log *elog) | ||
140 | { | ||
141 | struct pseries_elog_section *sect; | ||
142 | |||
143 | /* We should only ever get called for io-event interrupts, but if | ||
144 | * we do get called for another type then something went wrong so | ||
145 | * make some noise about it. | ||
146 | * RTAS_TYPE_IO only exists in extended event log version 6 or later. | ||
147 | * No need to check event log version. | ||
148 | */ | ||
149 | if (unlikely(elog->type != RTAS_TYPE_IO)) { | ||
150 | printk_once(KERN_WARNING "io_event_irq: Unexpected event type %d", | ||
151 | elog->type); | ||
152 | return NULL; | ||
153 | } | ||
154 | |||
155 | sect = find_xelog_section(elog, PSERIES_ELOG_SECT_ID_IO_EVENT); | ||
156 | if (unlikely(!sect)) { | ||
157 | printk_once(KERN_WARNING "io_event_irq: RTAS extended event " | ||
158 | "log does not contain an IO Event section. " | ||
159 | "Could be a bug in system firmware!\n"); | ||
160 | return NULL; | ||
161 | } | ||
162 | return (struct pseries_io_event *) §->data; | ||
163 | } | ||
164 | |||
165 | /* | ||
166 | * PAPR: | ||
167 | * - check-exception returns the first found error or event and clear that | ||
168 | * error or event so it is reported once. | ||
169 | * - Each interrupt returns one event. If a plateform chooses to report | ||
170 | * multiple events through a single interrupt, it must ensure that the | ||
171 | * interrupt remains asserted until check-exception has been used to | ||
172 | * process all out-standing events for that interrupt. | ||
173 | * | ||
174 | * Implementation notes: | ||
175 | * - Events must be processed in the order they are returned. Hence, | ||
176 | * sequential in nature. | ||
177 | * - The owner of an event is determined by combinations of scope, | ||
178 | * event type, and sub-type. There is no easy way to pre-sort clients | ||
179 | * by scope or event type alone. For example, Torrent ISR route change | ||
180 | * event is reported with scope 0x00 (Not Applicatable) rather than | ||
181 | * 0x3B (Torrent-hub). It is better to let the clients to identify | ||
182 | * who owns the the event. | ||
183 | */ | ||
184 | |||
185 | static irqreturn_t ioei_interrupt(int irq, void *dev_id) | ||
186 | { | ||
187 | struct pseries_io_event *event; | ||
188 | int rtas_rc; | ||
189 | |||
190 | for (;;) { | ||
191 | rtas_rc = rtas_call(ioei_check_exception_token, 6, 1, NULL, | ||
192 | RTAS_VECTOR_EXTERNAL_INTERRUPT, | ||
193 | virq_to_hw(irq), | ||
194 | RTAS_IO_EVENTS, 1 /* Time Critical */, | ||
195 | __pa(ioei_rtas_buf), | ||
196 | RTAS_DATA_BUF_SIZE); | ||
197 | if (rtas_rc != 0) | ||
198 | break; | ||
199 | |||
200 | event = ioei_find_event((struct rtas_error_log *)ioei_rtas_buf); | ||
201 | if (!event) | ||
202 | continue; | ||
203 | |||
204 | atomic_notifier_call_chain(&pseries_ioei_notifier_list, | ||
205 | 0, event); | ||
206 | } | ||
207 | return IRQ_HANDLED; | ||
208 | } | ||
209 | |||
210 | static int __init ioei_init(void) | ||
211 | { | ||
212 | struct device_node *np; | ||
213 | |||
214 | ioei_check_exception_token = rtas_token("check-exception"); | ||
215 | if (ioei_check_exception_token == RTAS_UNKNOWN_SERVICE) { | ||
216 | pr_warning("IO Event IRQ not supported on this system !\n"); | ||
217 | return -ENODEV; | ||
218 | } | ||
219 | np = of_find_node_by_path("/event-sources/ibm,io-events"); | ||
220 | if (np) { | ||
221 | request_event_sources_irqs(np, ioei_interrupt, "IO_EVENT"); | ||
222 | of_node_put(np); | ||
223 | } else { | ||
224 | pr_err("io_event_irq: No ibm,io-events on system! " | ||
225 | "IO Event interrupt disabled.\n"); | ||
226 | return -ENODEV; | ||
227 | } | ||
228 | return 0; | ||
229 | } | ||
230 | machine_subsys_initcall(pseries, ioei_init); | ||
231 | |||
diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c index a77bcaed80af..01faab9456ca 100644 --- a/arch/powerpc/platforms/pseries/iommu.c +++ b/arch/powerpc/platforms/pseries/iommu.c | |||
@@ -33,6 +33,7 @@ | |||
33 | #include <linux/pci.h> | 33 | #include <linux/pci.h> |
34 | #include <linux/dma-mapping.h> | 34 | #include <linux/dma-mapping.h> |
35 | #include <linux/crash_dump.h> | 35 | #include <linux/crash_dump.h> |
36 | #include <linux/memory.h> | ||
36 | #include <asm/io.h> | 37 | #include <asm/io.h> |
37 | #include <asm/prom.h> | 38 | #include <asm/prom.h> |
38 | #include <asm/rtas.h> | 39 | #include <asm/rtas.h> |
@@ -45,6 +46,7 @@ | |||
45 | #include <asm/tce.h> | 46 | #include <asm/tce.h> |
46 | #include <asm/ppc-pci.h> | 47 | #include <asm/ppc-pci.h> |
47 | #include <asm/udbg.h> | 48 | #include <asm/udbg.h> |
49 | #include <asm/mmzone.h> | ||
48 | 50 | ||
49 | #include "plpar_wrappers.h" | 51 | #include "plpar_wrappers.h" |
50 | 52 | ||
@@ -140,7 +142,7 @@ static int tce_build_pSeriesLP(struct iommu_table *tbl, long tcenum, | |||
140 | return ret; | 142 | return ret; |
141 | } | 143 | } |
142 | 144 | ||
143 | static DEFINE_PER_CPU(u64 *, tce_page) = NULL; | 145 | static DEFINE_PER_CPU(u64 *, tce_page); |
144 | 146 | ||
145 | static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum, | 147 | static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum, |
146 | long npages, unsigned long uaddr, | 148 | long npages, unsigned long uaddr, |
@@ -270,6 +272,152 @@ static unsigned long tce_get_pSeriesLP(struct iommu_table *tbl, long tcenum) | |||
270 | return tce_ret; | 272 | return tce_ret; |
271 | } | 273 | } |
272 | 274 | ||
275 | /* this is compatible with cells for the device tree property */ | ||
276 | struct dynamic_dma_window_prop { | ||
277 | __be32 liobn; /* tce table number */ | ||
278 | __be64 dma_base; /* address hi,lo */ | ||
279 | __be32 tce_shift; /* ilog2(tce_page_size) */ | ||
280 | __be32 window_shift; /* ilog2(tce_window_size) */ | ||
281 | }; | ||
282 | |||
283 | struct direct_window { | ||
284 | struct device_node *device; | ||
285 | const struct dynamic_dma_window_prop *prop; | ||
286 | struct list_head list; | ||
287 | }; | ||
288 | |||
289 | /* Dynamic DMA Window support */ | ||
290 | struct ddw_query_response { | ||
291 | u32 windows_available; | ||
292 | u32 largest_available_block; | ||
293 | u32 page_size; | ||
294 | u32 migration_capable; | ||
295 | }; | ||
296 | |||
297 | struct ddw_create_response { | ||
298 | u32 liobn; | ||
299 | u32 addr_hi; | ||
300 | u32 addr_lo; | ||
301 | }; | ||
302 | |||
303 | static LIST_HEAD(direct_window_list); | ||
304 | /* prevents races between memory on/offline and window creation */ | ||
305 | static DEFINE_SPINLOCK(direct_window_list_lock); | ||
306 | /* protects initializing window twice for same device */ | ||
307 | static DEFINE_MUTEX(direct_window_init_mutex); | ||
308 | #define DIRECT64_PROPNAME "linux,direct64-ddr-window-info" | ||
309 | |||
310 | static int tce_clearrange_multi_pSeriesLP(unsigned long start_pfn, | ||
311 | unsigned long num_pfn, const void *arg) | ||
312 | { | ||
313 | const struct dynamic_dma_window_prop *maprange = arg; | ||
314 | int rc; | ||
315 | u64 tce_size, num_tce, dma_offset, next; | ||
316 | u32 tce_shift; | ||
317 | long limit; | ||
318 | |||
319 | tce_shift = be32_to_cpu(maprange->tce_shift); | ||
320 | tce_size = 1ULL << tce_shift; | ||
321 | next = start_pfn << PAGE_SHIFT; | ||
322 | num_tce = num_pfn << PAGE_SHIFT; | ||
323 | |||
324 | /* round back to the beginning of the tce page size */ | ||
325 | num_tce += next & (tce_size - 1); | ||
326 | next &= ~(tce_size - 1); | ||
327 | |||
328 | /* covert to number of tces */ | ||
329 | num_tce |= tce_size - 1; | ||
330 | num_tce >>= tce_shift; | ||
331 | |||
332 | do { | ||
333 | /* | ||
334 | * Set up the page with TCE data, looping through and setting | ||
335 | * the values. | ||
336 | */ | ||
337 | limit = min_t(long, num_tce, 512); | ||
338 | dma_offset = next + be64_to_cpu(maprange->dma_base); | ||
339 | |||
340 | rc = plpar_tce_stuff((u64)be32_to_cpu(maprange->liobn), | ||
341 | dma_offset, | ||
342 | 0, limit); | ||
343 | num_tce -= limit; | ||
344 | } while (num_tce > 0 && !rc); | ||
345 | |||
346 | return rc; | ||
347 | } | ||
348 | |||
349 | static int tce_setrange_multi_pSeriesLP(unsigned long start_pfn, | ||
350 | unsigned long num_pfn, const void *arg) | ||
351 | { | ||
352 | const struct dynamic_dma_window_prop *maprange = arg; | ||
353 | u64 *tcep, tce_size, num_tce, dma_offset, next, proto_tce, liobn; | ||
354 | u32 tce_shift; | ||
355 | u64 rc = 0; | ||
356 | long l, limit; | ||
357 | |||
358 | local_irq_disable(); /* to protect tcep and the page behind it */ | ||
359 | tcep = __get_cpu_var(tce_page); | ||
360 | |||
361 | if (!tcep) { | ||
362 | tcep = (u64 *)__get_free_page(GFP_ATOMIC); | ||
363 | if (!tcep) { | ||
364 | local_irq_enable(); | ||
365 | return -ENOMEM; | ||
366 | } | ||
367 | __get_cpu_var(tce_page) = tcep; | ||
368 | } | ||
369 | |||
370 | proto_tce = TCE_PCI_READ | TCE_PCI_WRITE; | ||
371 | |||
372 | liobn = (u64)be32_to_cpu(maprange->liobn); | ||
373 | tce_shift = be32_to_cpu(maprange->tce_shift); | ||
374 | tce_size = 1ULL << tce_shift; | ||
375 | next = start_pfn << PAGE_SHIFT; | ||
376 | num_tce = num_pfn << PAGE_SHIFT; | ||
377 | |||
378 | /* round back to the beginning of the tce page size */ | ||
379 | num_tce += next & (tce_size - 1); | ||
380 | next &= ~(tce_size - 1); | ||
381 | |||
382 | /* covert to number of tces */ | ||
383 | num_tce |= tce_size - 1; | ||
384 | num_tce >>= tce_shift; | ||
385 | |||
386 | /* We can map max one pageful of TCEs at a time */ | ||
387 | do { | ||
388 | /* | ||
389 | * Set up the page with TCE data, looping through and setting | ||
390 | * the values. | ||
391 | */ | ||
392 | limit = min_t(long, num_tce, 4096/TCE_ENTRY_SIZE); | ||
393 | dma_offset = next + be64_to_cpu(maprange->dma_base); | ||
394 | |||
395 | for (l = 0; l < limit; l++) { | ||
396 | tcep[l] = proto_tce | next; | ||
397 | next += tce_size; | ||
398 | } | ||
399 | |||
400 | rc = plpar_tce_put_indirect(liobn, | ||
401 | dma_offset, | ||
402 | (u64)virt_to_abs(tcep), | ||
403 | limit); | ||
404 | |||
405 | num_tce -= limit; | ||
406 | } while (num_tce > 0 && !rc); | ||
407 | |||
408 | /* error cleanup: caller will clear whole range */ | ||
409 | |||
410 | local_irq_enable(); | ||
411 | return rc; | ||
412 | } | ||
413 | |||
414 | static int tce_setrange_multi_pSeriesLP_walk(unsigned long start_pfn, | ||
415 | unsigned long num_pfn, void *arg) | ||
416 | { | ||
417 | return tce_setrange_multi_pSeriesLP(start_pfn, num_pfn, arg); | ||
418 | } | ||
419 | |||
420 | |||
273 | #ifdef CONFIG_PCI | 421 | #ifdef CONFIG_PCI |
274 | static void iommu_table_setparms(struct pci_controller *phb, | 422 | static void iommu_table_setparms(struct pci_controller *phb, |
275 | struct device_node *dn, | 423 | struct device_node *dn, |
@@ -323,14 +471,13 @@ static void iommu_table_setparms(struct pci_controller *phb, | |||
323 | static void iommu_table_setparms_lpar(struct pci_controller *phb, | 471 | static void iommu_table_setparms_lpar(struct pci_controller *phb, |
324 | struct device_node *dn, | 472 | struct device_node *dn, |
325 | struct iommu_table *tbl, | 473 | struct iommu_table *tbl, |
326 | const void *dma_window, | 474 | const void *dma_window) |
327 | int bussubno) | ||
328 | { | 475 | { |
329 | unsigned long offset, size; | 476 | unsigned long offset, size; |
330 | 477 | ||
331 | tbl->it_busno = bussubno; | ||
332 | of_parse_dma_window(dn, dma_window, &tbl->it_index, &offset, &size); | 478 | of_parse_dma_window(dn, dma_window, &tbl->it_index, &offset, &size); |
333 | 479 | ||
480 | tbl->it_busno = phb->bus->number; | ||
334 | tbl->it_base = 0; | 481 | tbl->it_base = 0; |
335 | tbl->it_blocksize = 16; | 482 | tbl->it_blocksize = 16; |
336 | tbl->it_type = TCE_PCI; | 483 | tbl->it_type = TCE_PCI; |
@@ -450,14 +597,10 @@ static void pci_dma_bus_setup_pSeriesLP(struct pci_bus *bus) | |||
450 | if (!ppci->iommu_table) { | 597 | if (!ppci->iommu_table) { |
451 | tbl = kzalloc_node(sizeof(struct iommu_table), GFP_KERNEL, | 598 | tbl = kzalloc_node(sizeof(struct iommu_table), GFP_KERNEL, |
452 | ppci->phb->node); | 599 | ppci->phb->node); |
453 | iommu_table_setparms_lpar(ppci->phb, pdn, tbl, dma_window, | 600 | iommu_table_setparms_lpar(ppci->phb, pdn, tbl, dma_window); |
454 | bus->number); | ||
455 | ppci->iommu_table = iommu_init_table(tbl, ppci->phb->node); | 601 | ppci->iommu_table = iommu_init_table(tbl, ppci->phb->node); |
456 | pr_debug(" created table: %p\n", ppci->iommu_table); | 602 | pr_debug(" created table: %p\n", ppci->iommu_table); |
457 | } | 603 | } |
458 | |||
459 | if (pdn != dn) | ||
460 | PCI_DN(dn)->iommu_table = ppci->iommu_table; | ||
461 | } | 604 | } |
462 | 605 | ||
463 | 606 | ||
@@ -500,6 +643,334 @@ static void pci_dma_dev_setup_pSeries(struct pci_dev *dev) | |||
500 | pci_name(dev)); | 643 | pci_name(dev)); |
501 | } | 644 | } |
502 | 645 | ||
646 | static int __read_mostly disable_ddw; | ||
647 | |||
648 | static int __init disable_ddw_setup(char *str) | ||
649 | { | ||
650 | disable_ddw = 1; | ||
651 | printk(KERN_INFO "ppc iommu: disabling ddw.\n"); | ||
652 | |||
653 | return 0; | ||
654 | } | ||
655 | |||
656 | early_param("disable_ddw", disable_ddw_setup); | ||
657 | |||
658 | static void remove_ddw(struct device_node *np) | ||
659 | { | ||
660 | struct dynamic_dma_window_prop *dwp; | ||
661 | struct property *win64; | ||
662 | const u32 *ddw_avail; | ||
663 | u64 liobn; | ||
664 | int len, ret; | ||
665 | |||
666 | ddw_avail = of_get_property(np, "ibm,ddw-applicable", &len); | ||
667 | win64 = of_find_property(np, DIRECT64_PROPNAME, NULL); | ||
668 | if (!win64) | ||
669 | return; | ||
670 | |||
671 | if (!ddw_avail || len < 3 * sizeof(u32) || win64->length < sizeof(*dwp)) | ||
672 | goto delprop; | ||
673 | |||
674 | dwp = win64->value; | ||
675 | liobn = (u64)be32_to_cpu(dwp->liobn); | ||
676 | |||
677 | /* clear the whole window, note the arg is in kernel pages */ | ||
678 | ret = tce_clearrange_multi_pSeriesLP(0, | ||
679 | 1ULL << (be32_to_cpu(dwp->window_shift) - PAGE_SHIFT), dwp); | ||
680 | if (ret) | ||
681 | pr_warning("%s failed to clear tces in window.\n", | ||
682 | np->full_name); | ||
683 | else | ||
684 | pr_debug("%s successfully cleared tces in window.\n", | ||
685 | np->full_name); | ||
686 | |||
687 | ret = rtas_call(ddw_avail[2], 1, 1, NULL, liobn); | ||
688 | if (ret) | ||
689 | pr_warning("%s: failed to remove direct window: rtas returned " | ||
690 | "%d to ibm,remove-pe-dma-window(%x) %llx\n", | ||
691 | np->full_name, ret, ddw_avail[2], liobn); | ||
692 | else | ||
693 | pr_debug("%s: successfully removed direct window: rtas returned " | ||
694 | "%d to ibm,remove-pe-dma-window(%x) %llx\n", | ||
695 | np->full_name, ret, ddw_avail[2], liobn); | ||
696 | |||
697 | delprop: | ||
698 | ret = prom_remove_property(np, win64); | ||
699 | if (ret) | ||
700 | pr_warning("%s: failed to remove direct window property: %d\n", | ||
701 | np->full_name, ret); | ||
702 | } | ||
703 | |||
704 | static u64 find_existing_ddw(struct device_node *pdn) | ||
705 | { | ||
706 | struct direct_window *window; | ||
707 | const struct dynamic_dma_window_prop *direct64; | ||
708 | u64 dma_addr = 0; | ||
709 | |||
710 | spin_lock(&direct_window_list_lock); | ||
711 | /* check if we already created a window and dupe that config if so */ | ||
712 | list_for_each_entry(window, &direct_window_list, list) { | ||
713 | if (window->device == pdn) { | ||
714 | direct64 = window->prop; | ||
715 | dma_addr = direct64->dma_base; | ||
716 | break; | ||
717 | } | ||
718 | } | ||
719 | spin_unlock(&direct_window_list_lock); | ||
720 | |||
721 | return dma_addr; | ||
722 | } | ||
723 | |||
724 | static int find_existing_ddw_windows(void) | ||
725 | { | ||
726 | int len; | ||
727 | struct device_node *pdn; | ||
728 | struct direct_window *window; | ||
729 | const struct dynamic_dma_window_prop *direct64; | ||
730 | |||
731 | if (!firmware_has_feature(FW_FEATURE_LPAR)) | ||
732 | return 0; | ||
733 | |||
734 | for_each_node_with_property(pdn, DIRECT64_PROPNAME) { | ||
735 | direct64 = of_get_property(pdn, DIRECT64_PROPNAME, &len); | ||
736 | if (!direct64) | ||
737 | continue; | ||
738 | |||
739 | window = kzalloc(sizeof(*window), GFP_KERNEL); | ||
740 | if (!window || len < sizeof(struct dynamic_dma_window_prop)) { | ||
741 | kfree(window); | ||
742 | remove_ddw(pdn); | ||
743 | continue; | ||
744 | } | ||
745 | |||
746 | window->device = pdn; | ||
747 | window->prop = direct64; | ||
748 | spin_lock(&direct_window_list_lock); | ||
749 | list_add(&window->list, &direct_window_list); | ||
750 | spin_unlock(&direct_window_list_lock); | ||
751 | } | ||
752 | |||
753 | return 0; | ||
754 | } | ||
755 | machine_arch_initcall(pseries, find_existing_ddw_windows); | ||
756 | |||
757 | static int query_ddw(struct pci_dev *dev, const u32 *ddw_avail, | ||
758 | struct ddw_query_response *query) | ||
759 | { | ||
760 | struct device_node *dn; | ||
761 | struct pci_dn *pcidn; | ||
762 | u32 cfg_addr; | ||
763 | u64 buid; | ||
764 | int ret; | ||
765 | |||
766 | /* | ||
767 | * Get the config address and phb buid of the PE window. | ||
768 | * Rely on eeh to retrieve this for us. | ||
769 | * Retrieve them from the pci device, not the node with the | ||
770 | * dma-window property | ||
771 | */ | ||
772 | dn = pci_device_to_OF_node(dev); | ||
773 | pcidn = PCI_DN(dn); | ||
774 | cfg_addr = pcidn->eeh_config_addr; | ||
775 | if (pcidn->eeh_pe_config_addr) | ||
776 | cfg_addr = pcidn->eeh_pe_config_addr; | ||
777 | buid = pcidn->phb->buid; | ||
778 | ret = rtas_call(ddw_avail[0], 3, 5, (u32 *)query, | ||
779 | cfg_addr, BUID_HI(buid), BUID_LO(buid)); | ||
780 | dev_info(&dev->dev, "ibm,query-pe-dma-windows(%x) %x %x %x" | ||
781 | " returned %d\n", ddw_avail[0], cfg_addr, BUID_HI(buid), | ||
782 | BUID_LO(buid), ret); | ||
783 | return ret; | ||
784 | } | ||
785 | |||
786 | static int create_ddw(struct pci_dev *dev, const u32 *ddw_avail, | ||
787 | struct ddw_create_response *create, int page_shift, | ||
788 | int window_shift) | ||
789 | { | ||
790 | struct device_node *dn; | ||
791 | struct pci_dn *pcidn; | ||
792 | u32 cfg_addr; | ||
793 | u64 buid; | ||
794 | int ret; | ||
795 | |||
796 | /* | ||
797 | * Get the config address and phb buid of the PE window. | ||
798 | * Rely on eeh to retrieve this for us. | ||
799 | * Retrieve them from the pci device, not the node with the | ||
800 | * dma-window property | ||
801 | */ | ||
802 | dn = pci_device_to_OF_node(dev); | ||
803 | pcidn = PCI_DN(dn); | ||
804 | cfg_addr = pcidn->eeh_config_addr; | ||
805 | if (pcidn->eeh_pe_config_addr) | ||
806 | cfg_addr = pcidn->eeh_pe_config_addr; | ||
807 | buid = pcidn->phb->buid; | ||
808 | |||
809 | do { | ||
810 | /* extra outputs are LIOBN and dma-addr (hi, lo) */ | ||
811 | ret = rtas_call(ddw_avail[1], 5, 4, (u32 *)create, cfg_addr, | ||
812 | BUID_HI(buid), BUID_LO(buid), page_shift, window_shift); | ||
813 | } while (rtas_busy_delay(ret)); | ||
814 | dev_info(&dev->dev, | ||
815 | "ibm,create-pe-dma-window(%x) %x %x %x %x %x returned %d " | ||
816 | "(liobn = 0x%x starting addr = %x %x)\n", ddw_avail[1], | ||
817 | cfg_addr, BUID_HI(buid), BUID_LO(buid), page_shift, | ||
818 | window_shift, ret, create->liobn, create->addr_hi, create->addr_lo); | ||
819 | |||
820 | return ret; | ||
821 | } | ||
822 | |||
823 | /* | ||
824 | * If the PE supports dynamic dma windows, and there is space for a table | ||
825 | * that can map all pages in a linear offset, then setup such a table, | ||
826 | * and record the dma-offset in the struct device. | ||
827 | * | ||
828 | * dev: the pci device we are checking | ||
829 | * pdn: the parent pe node with the ibm,dma_window property | ||
830 | * Future: also check if we can remap the base window for our base page size | ||
831 | * | ||
832 | * returns the dma offset for use by dma_set_mask | ||
833 | */ | ||
834 | static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn) | ||
835 | { | ||
836 | int len, ret; | ||
837 | struct ddw_query_response query; | ||
838 | struct ddw_create_response create; | ||
839 | int page_shift; | ||
840 | u64 dma_addr, max_addr; | ||
841 | struct device_node *dn; | ||
842 | const u32 *uninitialized_var(ddw_avail); | ||
843 | struct direct_window *window; | ||
844 | struct property *win64; | ||
845 | struct dynamic_dma_window_prop *ddwprop; | ||
846 | |||
847 | mutex_lock(&direct_window_init_mutex); | ||
848 | |||
849 | dma_addr = find_existing_ddw(pdn); | ||
850 | if (dma_addr != 0) | ||
851 | goto out_unlock; | ||
852 | |||
853 | /* | ||
854 | * the ibm,ddw-applicable property holds the tokens for: | ||
855 | * ibm,query-pe-dma-window | ||
856 | * ibm,create-pe-dma-window | ||
857 | * ibm,remove-pe-dma-window | ||
858 | * for the given node in that order. | ||
859 | * the property is actually in the parent, not the PE | ||
860 | */ | ||
861 | ddw_avail = of_get_property(pdn, "ibm,ddw-applicable", &len); | ||
862 | if (!ddw_avail || len < 3 * sizeof(u32)) | ||
863 | goto out_unlock; | ||
864 | |||
865 | /* | ||
866 | * Query if there is a second window of size to map the | ||
867 | * whole partition. Query returns number of windows, largest | ||
868 | * block assigned to PE (partition endpoint), and two bitmasks | ||
869 | * of page sizes: supported and supported for migrate-dma. | ||
870 | */ | ||
871 | dn = pci_device_to_OF_node(dev); | ||
872 | ret = query_ddw(dev, ddw_avail, &query); | ||
873 | if (ret != 0) | ||
874 | goto out_unlock; | ||
875 | |||
876 | if (query.windows_available == 0) { | ||
877 | /* | ||
878 | * no additional windows are available for this device. | ||
879 | * We might be able to reallocate the existing window, | ||
880 | * trading in for a larger page size. | ||
881 | */ | ||
882 | dev_dbg(&dev->dev, "no free dynamic windows"); | ||
883 | goto out_unlock; | ||
884 | } | ||
885 | if (query.page_size & 4) { | ||
886 | page_shift = 24; /* 16MB */ | ||
887 | } else if (query.page_size & 2) { | ||
888 | page_shift = 16; /* 64kB */ | ||
889 | } else if (query.page_size & 1) { | ||
890 | page_shift = 12; /* 4kB */ | ||
891 | } else { | ||
892 | dev_dbg(&dev->dev, "no supported direct page size in mask %x", | ||
893 | query.page_size); | ||
894 | goto out_unlock; | ||
895 | } | ||
896 | /* verify the window * number of ptes will map the partition */ | ||
897 | /* check largest block * page size > max memory hotplug addr */ | ||
898 | max_addr = memory_hotplug_max(); | ||
899 | if (query.largest_available_block < (max_addr >> page_shift)) { | ||
900 | dev_dbg(&dev->dev, "can't map partiton max 0x%llx with %u " | ||
901 | "%llu-sized pages\n", max_addr, query.largest_available_block, | ||
902 | 1ULL << page_shift); | ||
903 | goto out_unlock; | ||
904 | } | ||
905 | len = order_base_2(max_addr); | ||
906 | win64 = kzalloc(sizeof(struct property), GFP_KERNEL); | ||
907 | if (!win64) { | ||
908 | dev_info(&dev->dev, | ||
909 | "couldn't allocate property for 64bit dma window\n"); | ||
910 | goto out_unlock; | ||
911 | } | ||
912 | win64->name = kstrdup(DIRECT64_PROPNAME, GFP_KERNEL); | ||
913 | win64->value = ddwprop = kmalloc(sizeof(*ddwprop), GFP_KERNEL); | ||
914 | win64->length = sizeof(*ddwprop); | ||
915 | if (!win64->name || !win64->value) { | ||
916 | dev_info(&dev->dev, | ||
917 | "couldn't allocate property name and value\n"); | ||
918 | goto out_free_prop; | ||
919 | } | ||
920 | |||
921 | ret = create_ddw(dev, ddw_avail, &create, page_shift, len); | ||
922 | if (ret != 0) | ||
923 | goto out_free_prop; | ||
924 | |||
925 | ddwprop->liobn = cpu_to_be32(create.liobn); | ||
926 | ddwprop->dma_base = cpu_to_be64(of_read_number(&create.addr_hi, 2)); | ||
927 | ddwprop->tce_shift = cpu_to_be32(page_shift); | ||
928 | ddwprop->window_shift = cpu_to_be32(len); | ||
929 | |||
930 | dev_dbg(&dev->dev, "created tce table LIOBN 0x%x for %s\n", | ||
931 | create.liobn, dn->full_name); | ||
932 | |||
933 | window = kzalloc(sizeof(*window), GFP_KERNEL); | ||
934 | if (!window) | ||
935 | goto out_clear_window; | ||
936 | |||
937 | ret = walk_system_ram_range(0, memblock_end_of_DRAM() >> PAGE_SHIFT, | ||
938 | win64->value, tce_setrange_multi_pSeriesLP_walk); | ||
939 | if (ret) { | ||
940 | dev_info(&dev->dev, "failed to map direct window for %s: %d\n", | ||
941 | dn->full_name, ret); | ||
942 | goto out_clear_window; | ||
943 | } | ||
944 | |||
945 | ret = prom_add_property(pdn, win64); | ||
946 | if (ret) { | ||
947 | dev_err(&dev->dev, "unable to add dma window property for %s: %d", | ||
948 | pdn->full_name, ret); | ||
949 | goto out_clear_window; | ||
950 | } | ||
951 | |||
952 | window->device = pdn; | ||
953 | window->prop = ddwprop; | ||
954 | spin_lock(&direct_window_list_lock); | ||
955 | list_add(&window->list, &direct_window_list); | ||
956 | spin_unlock(&direct_window_list_lock); | ||
957 | |||
958 | dma_addr = of_read_number(&create.addr_hi, 2); | ||
959 | goto out_unlock; | ||
960 | |||
961 | out_clear_window: | ||
962 | remove_ddw(pdn); | ||
963 | |||
964 | out_free_prop: | ||
965 | kfree(win64->name); | ||
966 | kfree(win64->value); | ||
967 | kfree(win64); | ||
968 | |||
969 | out_unlock: | ||
970 | mutex_unlock(&direct_window_init_mutex); | ||
971 | return dma_addr; | ||
972 | } | ||
973 | |||
503 | static void pci_dma_dev_setup_pSeriesLP(struct pci_dev *dev) | 974 | static void pci_dma_dev_setup_pSeriesLP(struct pci_dev *dev) |
504 | { | 975 | { |
505 | struct device_node *pdn, *dn; | 976 | struct device_node *pdn, *dn; |
@@ -510,7 +981,7 @@ static void pci_dma_dev_setup_pSeriesLP(struct pci_dev *dev) | |||
510 | pr_debug("pci_dma_dev_setup_pSeriesLP: %s\n", pci_name(dev)); | 981 | pr_debug("pci_dma_dev_setup_pSeriesLP: %s\n", pci_name(dev)); |
511 | 982 | ||
512 | /* dev setup for LPAR is a little tricky, since the device tree might | 983 | /* dev setup for LPAR is a little tricky, since the device tree might |
513 | * contain the dma-window properties per-device and not neccesarily | 984 | * contain the dma-window properties per-device and not necessarily |
514 | * for the bus. So we need to search upwards in the tree until we | 985 | * for the bus. So we need to search upwards in the tree until we |
515 | * either hit a dma-window property, OR find a parent with a table | 986 | * either hit a dma-window property, OR find a parent with a table |
516 | * already allocated. | 987 | * already allocated. |
@@ -533,21 +1004,11 @@ static void pci_dma_dev_setup_pSeriesLP(struct pci_dev *dev) | |||
533 | } | 1004 | } |
534 | pr_debug(" parent is %s\n", pdn->full_name); | 1005 | pr_debug(" parent is %s\n", pdn->full_name); |
535 | 1006 | ||
536 | /* Check for parent == NULL so we don't try to setup the empty EADS | ||
537 | * slots on POWER4 machines. | ||
538 | */ | ||
539 | if (dma_window == NULL || pdn->parent == NULL) { | ||
540 | pr_debug(" no dma window for device, linking to parent\n"); | ||
541 | set_iommu_table_base(&dev->dev, PCI_DN(pdn)->iommu_table); | ||
542 | return; | ||
543 | } | ||
544 | |||
545 | pci = PCI_DN(pdn); | 1007 | pci = PCI_DN(pdn); |
546 | if (!pci->iommu_table) { | 1008 | if (!pci->iommu_table) { |
547 | tbl = kzalloc_node(sizeof(struct iommu_table), GFP_KERNEL, | 1009 | tbl = kzalloc_node(sizeof(struct iommu_table), GFP_KERNEL, |
548 | pci->phb->node); | 1010 | pci->phb->node); |
549 | iommu_table_setparms_lpar(pci->phb, pdn, tbl, dma_window, | 1011 | iommu_table_setparms_lpar(pci->phb, pdn, tbl, dma_window); |
550 | pci->phb->bus->number); | ||
551 | pci->iommu_table = iommu_init_table(tbl, pci->phb->node); | 1012 | pci->iommu_table = iommu_init_table(tbl, pci->phb->node); |
552 | pr_debug(" created table: %p\n", pci->iommu_table); | 1013 | pr_debug(" created table: %p\n", pci->iommu_table); |
553 | } else { | 1014 | } else { |
@@ -556,24 +1017,145 @@ static void pci_dma_dev_setup_pSeriesLP(struct pci_dev *dev) | |||
556 | 1017 | ||
557 | set_iommu_table_base(&dev->dev, pci->iommu_table); | 1018 | set_iommu_table_base(&dev->dev, pci->iommu_table); |
558 | } | 1019 | } |
1020 | |||
1021 | static int dma_set_mask_pSeriesLP(struct device *dev, u64 dma_mask) | ||
1022 | { | ||
1023 | bool ddw_enabled = false; | ||
1024 | struct device_node *pdn, *dn; | ||
1025 | struct pci_dev *pdev; | ||
1026 | const void *dma_window = NULL; | ||
1027 | u64 dma_offset; | ||
1028 | |||
1029 | if (!dev->dma_mask) | ||
1030 | return -EIO; | ||
1031 | |||
1032 | if (!dev_is_pci(dev)) | ||
1033 | goto check_mask; | ||
1034 | |||
1035 | pdev = to_pci_dev(dev); | ||
1036 | |||
1037 | /* only attempt to use a new window if 64-bit DMA is requested */ | ||
1038 | if (!disable_ddw && dma_mask == DMA_BIT_MASK(64)) { | ||
1039 | dn = pci_device_to_OF_node(pdev); | ||
1040 | dev_dbg(dev, "node is %s\n", dn->full_name); | ||
1041 | |||
1042 | /* | ||
1043 | * the device tree might contain the dma-window properties | ||
1044 | * per-device and not necessarily for the bus. So we need to | ||
1045 | * search upwards in the tree until we either hit a dma-window | ||
1046 | * property, OR find a parent with a table already allocated. | ||
1047 | */ | ||
1048 | for (pdn = dn; pdn && PCI_DN(pdn) && !PCI_DN(pdn)->iommu_table; | ||
1049 | pdn = pdn->parent) { | ||
1050 | dma_window = of_get_property(pdn, "ibm,dma-window", NULL); | ||
1051 | if (dma_window) | ||
1052 | break; | ||
1053 | } | ||
1054 | if (pdn && PCI_DN(pdn)) { | ||
1055 | dma_offset = enable_ddw(pdev, pdn); | ||
1056 | if (dma_offset != 0) { | ||
1057 | dev_info(dev, "Using 64-bit direct DMA at offset %llx\n", dma_offset); | ||
1058 | set_dma_offset(dev, dma_offset); | ||
1059 | set_dma_ops(dev, &dma_direct_ops); | ||
1060 | ddw_enabled = true; | ||
1061 | } | ||
1062 | } | ||
1063 | } | ||
1064 | |||
1065 | /* fall back on iommu ops, restore table pointer with ops */ | ||
1066 | if (!ddw_enabled && get_dma_ops(dev) != &dma_iommu_ops) { | ||
1067 | dev_info(dev, "Restoring 32-bit DMA via iommu\n"); | ||
1068 | set_dma_ops(dev, &dma_iommu_ops); | ||
1069 | pci_dma_dev_setup_pSeriesLP(pdev); | ||
1070 | } | ||
1071 | |||
1072 | check_mask: | ||
1073 | if (!dma_supported(dev, dma_mask)) | ||
1074 | return -EIO; | ||
1075 | |||
1076 | *dev->dma_mask = dma_mask; | ||
1077 | return 0; | ||
1078 | } | ||
1079 | |||
559 | #else /* CONFIG_PCI */ | 1080 | #else /* CONFIG_PCI */ |
560 | #define pci_dma_bus_setup_pSeries NULL | 1081 | #define pci_dma_bus_setup_pSeries NULL |
561 | #define pci_dma_dev_setup_pSeries NULL | 1082 | #define pci_dma_dev_setup_pSeries NULL |
562 | #define pci_dma_bus_setup_pSeriesLP NULL | 1083 | #define pci_dma_bus_setup_pSeriesLP NULL |
563 | #define pci_dma_dev_setup_pSeriesLP NULL | 1084 | #define pci_dma_dev_setup_pSeriesLP NULL |
1085 | #define dma_set_mask_pSeriesLP NULL | ||
564 | #endif /* !CONFIG_PCI */ | 1086 | #endif /* !CONFIG_PCI */ |
565 | 1087 | ||
1088 | static int iommu_mem_notifier(struct notifier_block *nb, unsigned long action, | ||
1089 | void *data) | ||
1090 | { | ||
1091 | struct direct_window *window; | ||
1092 | struct memory_notify *arg = data; | ||
1093 | int ret = 0; | ||
1094 | |||
1095 | switch (action) { | ||
1096 | case MEM_GOING_ONLINE: | ||
1097 | spin_lock(&direct_window_list_lock); | ||
1098 | list_for_each_entry(window, &direct_window_list, list) { | ||
1099 | ret |= tce_setrange_multi_pSeriesLP(arg->start_pfn, | ||
1100 | arg->nr_pages, window->prop); | ||
1101 | /* XXX log error */ | ||
1102 | } | ||
1103 | spin_unlock(&direct_window_list_lock); | ||
1104 | break; | ||
1105 | case MEM_CANCEL_ONLINE: | ||
1106 | case MEM_OFFLINE: | ||
1107 | spin_lock(&direct_window_list_lock); | ||
1108 | list_for_each_entry(window, &direct_window_list, list) { | ||
1109 | ret |= tce_clearrange_multi_pSeriesLP(arg->start_pfn, | ||
1110 | arg->nr_pages, window->prop); | ||
1111 | /* XXX log error */ | ||
1112 | } | ||
1113 | spin_unlock(&direct_window_list_lock); | ||
1114 | break; | ||
1115 | default: | ||
1116 | break; | ||
1117 | } | ||
1118 | if (ret && action != MEM_CANCEL_ONLINE) | ||
1119 | return NOTIFY_BAD; | ||
1120 | |||
1121 | return NOTIFY_OK; | ||
1122 | } | ||
1123 | |||
1124 | static struct notifier_block iommu_mem_nb = { | ||
1125 | .notifier_call = iommu_mem_notifier, | ||
1126 | }; | ||
1127 | |||
566 | static int iommu_reconfig_notifier(struct notifier_block *nb, unsigned long action, void *node) | 1128 | static int iommu_reconfig_notifier(struct notifier_block *nb, unsigned long action, void *node) |
567 | { | 1129 | { |
568 | int err = NOTIFY_OK; | 1130 | int err = NOTIFY_OK; |
569 | struct device_node *np = node; | 1131 | struct device_node *np = node; |
570 | struct pci_dn *pci = PCI_DN(np); | 1132 | struct pci_dn *pci = PCI_DN(np); |
1133 | struct direct_window *window; | ||
571 | 1134 | ||
572 | switch (action) { | 1135 | switch (action) { |
573 | case PSERIES_RECONFIG_REMOVE: | 1136 | case PSERIES_RECONFIG_REMOVE: |
574 | if (pci && pci->iommu_table && | 1137 | if (pci && pci->iommu_table) |
575 | of_get_property(np, "ibm,dma-window", NULL)) | ||
576 | iommu_free_table(pci->iommu_table, np->full_name); | 1138 | iommu_free_table(pci->iommu_table, np->full_name); |
1139 | |||
1140 | spin_lock(&direct_window_list_lock); | ||
1141 | list_for_each_entry(window, &direct_window_list, list) { | ||
1142 | if (window->device == np) { | ||
1143 | list_del(&window->list); | ||
1144 | kfree(window); | ||
1145 | break; | ||
1146 | } | ||
1147 | } | ||
1148 | spin_unlock(&direct_window_list_lock); | ||
1149 | |||
1150 | /* | ||
1151 | * Because the notifier runs after isolation of the | ||
1152 | * slot, we are guaranteed any DMA window has already | ||
1153 | * been revoked and the TCEs have been marked invalid, | ||
1154 | * so we don't need a call to remove_ddw(np). However, | ||
1155 | * if an additional notifier action is added before the | ||
1156 | * isolate call, we should update this code for | ||
1157 | * completeness with such a call. | ||
1158 | */ | ||
577 | break; | 1159 | break; |
578 | default: | 1160 | default: |
579 | err = NOTIFY_DONE; | 1161 | err = NOTIFY_DONE; |
@@ -589,13 +1171,8 @@ static struct notifier_block iommu_reconfig_nb = { | |||
589 | /* These are called very early. */ | 1171 | /* These are called very early. */ |
590 | void iommu_init_early_pSeries(void) | 1172 | void iommu_init_early_pSeries(void) |
591 | { | 1173 | { |
592 | if (of_chosen && of_get_property(of_chosen, "linux,iommu-off", NULL)) { | 1174 | if (of_chosen && of_get_property(of_chosen, "linux,iommu-off", NULL)) |
593 | /* Direct I/O, IOMMU off */ | ||
594 | ppc_md.pci_dma_dev_setup = NULL; | ||
595 | ppc_md.pci_dma_bus_setup = NULL; | ||
596 | set_pci_dma_ops(&dma_direct_ops); | ||
597 | return; | 1175 | return; |
598 | } | ||
599 | 1176 | ||
600 | if (firmware_has_feature(FW_FEATURE_LPAR)) { | 1177 | if (firmware_has_feature(FW_FEATURE_LPAR)) { |
601 | if (firmware_has_feature(FW_FEATURE_MULTITCE)) { | 1178 | if (firmware_has_feature(FW_FEATURE_MULTITCE)) { |
@@ -608,6 +1185,7 @@ void iommu_init_early_pSeries(void) | |||
608 | ppc_md.tce_get = tce_get_pSeriesLP; | 1185 | ppc_md.tce_get = tce_get_pSeriesLP; |
609 | ppc_md.pci_dma_bus_setup = pci_dma_bus_setup_pSeriesLP; | 1186 | ppc_md.pci_dma_bus_setup = pci_dma_bus_setup_pSeriesLP; |
610 | ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_pSeriesLP; | 1187 | ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_pSeriesLP; |
1188 | ppc_md.dma_set_mask = dma_set_mask_pSeriesLP; | ||
611 | } else { | 1189 | } else { |
612 | ppc_md.tce_build = tce_build_pSeries; | 1190 | ppc_md.tce_build = tce_build_pSeries; |
613 | ppc_md.tce_free = tce_free_pSeries; | 1191 | ppc_md.tce_free = tce_free_pSeries; |
@@ -618,7 +1196,22 @@ void iommu_init_early_pSeries(void) | |||
618 | 1196 | ||
619 | 1197 | ||
620 | pSeries_reconfig_notifier_register(&iommu_reconfig_nb); | 1198 | pSeries_reconfig_notifier_register(&iommu_reconfig_nb); |
1199 | register_memory_notifier(&iommu_mem_nb); | ||
621 | 1200 | ||
622 | set_pci_dma_ops(&dma_iommu_ops); | 1201 | set_pci_dma_ops(&dma_iommu_ops); |
623 | } | 1202 | } |
624 | 1203 | ||
1204 | static int __init disable_multitce(char *str) | ||
1205 | { | ||
1206 | if (strcmp(str, "off") == 0 && | ||
1207 | firmware_has_feature(FW_FEATURE_LPAR) && | ||
1208 | firmware_has_feature(FW_FEATURE_MULTITCE)) { | ||
1209 | printk(KERN_INFO "Disabling MULTITCE firmware feature\n"); | ||
1210 | ppc_md.tce_build = tce_build_pSeriesLP; | ||
1211 | ppc_md.tce_free = tce_free_pSeriesLP; | ||
1212 | powerpc_firmware_features &= ~FW_FEATURE_MULTITCE; | ||
1213 | } | ||
1214 | return 1; | ||
1215 | } | ||
1216 | |||
1217 | __setup("multitce=", disable_multitce); | ||
diff --git a/arch/powerpc/platforms/pseries/kexec.c b/arch/powerpc/platforms/pseries/kexec.c index 53cbd53d8740..54cf3a4aa16b 100644 --- a/arch/powerpc/platforms/pseries/kexec.c +++ b/arch/powerpc/platforms/pseries/kexec.c | |||
@@ -7,15 +7,18 @@ | |||
7 | * 2 of the License, or (at your option) any later version. | 7 | * 2 of the License, or (at your option) any later version. |
8 | */ | 8 | */ |
9 | 9 | ||
10 | #include <linux/kernel.h> | ||
11 | #include <linux/interrupt.h> | ||
12 | |||
10 | #include <asm/machdep.h> | 13 | #include <asm/machdep.h> |
11 | #include <asm/page.h> | 14 | #include <asm/page.h> |
12 | #include <asm/firmware.h> | 15 | #include <asm/firmware.h> |
13 | #include <asm/kexec.h> | 16 | #include <asm/kexec.h> |
14 | #include <asm/mpic.h> | 17 | #include <asm/mpic.h> |
18 | #include <asm/xics.h> | ||
15 | #include <asm/smp.h> | 19 | #include <asm/smp.h> |
16 | 20 | ||
17 | #include "pseries.h" | 21 | #include "pseries.h" |
18 | #include "xics.h" | ||
19 | #include "plpar_wrappers.h" | 22 | #include "plpar_wrappers.h" |
20 | 23 | ||
21 | static void pseries_kexec_cpu_down(int crash_shutdown, int secondary) | 24 | static void pseries_kexec_cpu_down(int crash_shutdown, int secondary) |
@@ -61,13 +64,3 @@ void __init setup_kexec_cpu_down_xics(void) | |||
61 | { | 64 | { |
62 | ppc_md.kexec_cpu_down = pseries_kexec_cpu_down_xics; | 65 | ppc_md.kexec_cpu_down = pseries_kexec_cpu_down_xics; |
63 | } | 66 | } |
64 | |||
65 | static int __init pseries_kexec_setup(void) | ||
66 | { | ||
67 | ppc_md.machine_kexec = default_machine_kexec; | ||
68 | ppc_md.machine_kexec_prepare = default_machine_kexec_prepare; | ||
69 | ppc_md.machine_crash_shutdown = default_machine_crash_shutdown; | ||
70 | |||
71 | return 0; | ||
72 | } | ||
73 | machine_device_initcall(pseries, pseries_kexec_setup); | ||
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c index cf79b46d8f88..39e6e0a7b2fa 100644 --- a/arch/powerpc/platforms/pseries/lpar.c +++ b/arch/powerpc/platforms/pseries/lpar.c | |||
@@ -248,11 +248,13 @@ void vpa_init(int cpu) | |||
248 | int hwcpu = get_hard_smp_processor_id(cpu); | 248 | int hwcpu = get_hard_smp_processor_id(cpu); |
249 | unsigned long addr; | 249 | unsigned long addr; |
250 | long ret; | 250 | long ret; |
251 | struct paca_struct *pp; | ||
252 | struct dtl_entry *dtl; | ||
251 | 253 | ||
252 | if (cpu_has_feature(CPU_FTR_ALTIVEC)) | 254 | if (cpu_has_feature(CPU_FTR_ALTIVEC)) |
253 | lppaca[cpu].vmxregs_in_use = 1; | 255 | lppaca_of(cpu).vmxregs_in_use = 1; |
254 | 256 | ||
255 | addr = __pa(&lppaca[cpu]); | 257 | addr = __pa(&lppaca_of(cpu)); |
256 | ret = register_vpa(hwcpu, addr); | 258 | ret = register_vpa(hwcpu, addr); |
257 | 259 | ||
258 | if (ret) { | 260 | if (ret) { |
@@ -274,6 +276,25 @@ void vpa_init(int cpu) | |||
274 | "registration for cpu %d (hw %d) of area %lx " | 276 | "registration for cpu %d (hw %d) of area %lx " |
275 | "returns %ld\n", cpu, hwcpu, addr, ret); | 277 | "returns %ld\n", cpu, hwcpu, addr, ret); |
276 | } | 278 | } |
279 | |||
280 | /* | ||
281 | * Register dispatch trace log, if one has been allocated. | ||
282 | */ | ||
283 | pp = &paca[cpu]; | ||
284 | dtl = pp->dispatch_log; | ||
285 | if (dtl) { | ||
286 | pp->dtl_ridx = 0; | ||
287 | pp->dtl_curr = dtl; | ||
288 | lppaca_of(cpu).dtl_idx = 0; | ||
289 | |||
290 | /* hypervisor reads buffer length from this field */ | ||
291 | dtl->enqueue_to_dispatch_time = DISPATCH_LOG_BYTES; | ||
292 | ret = register_dtl(hwcpu, __pa(dtl)); | ||
293 | if (ret) | ||
294 | pr_warn("DTL registration failed for cpu %d (%ld)\n", | ||
295 | cpu, ret); | ||
296 | lppaca_of(cpu).dtl_enable_mask = 2; | ||
297 | } | ||
277 | } | 298 | } |
278 | 299 | ||
279 | static long pSeries_lpar_hpte_insert(unsigned long hpte_group, | 300 | static long pSeries_lpar_hpte_insert(unsigned long hpte_group, |
@@ -308,6 +329,8 @@ static long pSeries_lpar_hpte_insert(unsigned long hpte_group, | |||
308 | /* Make pHyp happy */ | 329 | /* Make pHyp happy */ |
309 | if ((rflags & _PAGE_NO_CACHE) & !(rflags & _PAGE_WRITETHRU)) | 330 | if ((rflags & _PAGE_NO_CACHE) & !(rflags & _PAGE_WRITETHRU)) |
310 | hpte_r &= ~_PAGE_COHERENT; | 331 | hpte_r &= ~_PAGE_COHERENT; |
332 | if (firmware_has_feature(FW_FEATURE_XCMO) && !(hpte_r & HPTE_R_N)) | ||
333 | flags |= H_COALESCE_CAND; | ||
311 | 334 | ||
312 | lpar_rc = plpar_pte_enter(flags, hpte_group, hpte_v, hpte_r, &slot); | 335 | lpar_rc = plpar_pte_enter(flags, hpte_group, hpte_v, hpte_r, &slot); |
313 | if (unlikely(lpar_rc == H_PTEG_FULL)) { | 336 | if (unlikely(lpar_rc == H_PTEG_FULL)) { |
@@ -552,7 +575,7 @@ static void pSeries_lpar_flush_hash_range(unsigned long number, int local) | |||
552 | unsigned long i, pix, rc; | 575 | unsigned long i, pix, rc; |
553 | unsigned long flags = 0; | 576 | unsigned long flags = 0; |
554 | struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); | 577 | struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); |
555 | int lock_tlbie = !cpu_has_feature(CPU_FTR_LOCKLESS_TLBIE); | 578 | int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE); |
556 | unsigned long param[9]; | 579 | unsigned long param[9]; |
557 | unsigned long va; | 580 | unsigned long va; |
558 | unsigned long hash, index, shift, hidx, slot; | 581 | unsigned long hash, index, shift, hidx, slot; |
@@ -606,6 +629,18 @@ static void pSeries_lpar_flush_hash_range(unsigned long number, int local) | |||
606 | spin_unlock_irqrestore(&pSeries_lpar_tlbie_lock, flags); | 629 | spin_unlock_irqrestore(&pSeries_lpar_tlbie_lock, flags); |
607 | } | 630 | } |
608 | 631 | ||
632 | static int __init disable_bulk_remove(char *str) | ||
633 | { | ||
634 | if (strcmp(str, "off") == 0 && | ||
635 | firmware_has_feature(FW_FEATURE_BULK_REMOVE)) { | ||
636 | printk(KERN_INFO "Disabling BULK_REMOVE firmware feature"); | ||
637 | powerpc_firmware_features &= ~FW_FEATURE_BULK_REMOVE; | ||
638 | } | ||
639 | return 1; | ||
640 | } | ||
641 | |||
642 | __setup("bulk_remove=", disable_bulk_remove); | ||
643 | |||
609 | void __init hpte_init_lpar(void) | 644 | void __init hpte_init_lpar(void) |
610 | { | 645 | { |
611 | ppc_md.hpte_invalidate = pSeries_lpar_hpte_invalidate; | 646 | ppc_md.hpte_invalidate = pSeries_lpar_hpte_invalidate; |
@@ -680,6 +715,13 @@ EXPORT_SYMBOL(arch_free_page); | |||
680 | /* NB: reg/unreg are called while guarded with the tracepoints_mutex */ | 715 | /* NB: reg/unreg are called while guarded with the tracepoints_mutex */ |
681 | extern long hcall_tracepoint_refcount; | 716 | extern long hcall_tracepoint_refcount; |
682 | 717 | ||
718 | /* | ||
719 | * Since the tracing code might execute hcalls we need to guard against | ||
720 | * recursion. One example of this are spinlocks calling H_YIELD on | ||
721 | * shared processor partitions. | ||
722 | */ | ||
723 | static DEFINE_PER_CPU(unsigned int, hcall_trace_depth); | ||
724 | |||
683 | void hcall_tracepoint_regfunc(void) | 725 | void hcall_tracepoint_regfunc(void) |
684 | { | 726 | { |
685 | hcall_tracepoint_refcount++; | 727 | hcall_tracepoint_refcount++; |
@@ -692,12 +734,86 @@ void hcall_tracepoint_unregfunc(void) | |||
692 | 734 | ||
693 | void __trace_hcall_entry(unsigned long opcode, unsigned long *args) | 735 | void __trace_hcall_entry(unsigned long opcode, unsigned long *args) |
694 | { | 736 | { |
737 | unsigned long flags; | ||
738 | unsigned int *depth; | ||
739 | |||
740 | local_irq_save(flags); | ||
741 | |||
742 | depth = &__get_cpu_var(hcall_trace_depth); | ||
743 | |||
744 | if (*depth) | ||
745 | goto out; | ||
746 | |||
747 | (*depth)++; | ||
695 | trace_hcall_entry(opcode, args); | 748 | trace_hcall_entry(opcode, args); |
749 | (*depth)--; | ||
750 | |||
751 | out: | ||
752 | local_irq_restore(flags); | ||
696 | } | 753 | } |
697 | 754 | ||
698 | void __trace_hcall_exit(long opcode, unsigned long retval, | 755 | void __trace_hcall_exit(long opcode, unsigned long retval, |
699 | unsigned long *retbuf) | 756 | unsigned long *retbuf) |
700 | { | 757 | { |
758 | unsigned long flags; | ||
759 | unsigned int *depth; | ||
760 | |||
761 | local_irq_save(flags); | ||
762 | |||
763 | depth = &__get_cpu_var(hcall_trace_depth); | ||
764 | |||
765 | if (*depth) | ||
766 | goto out; | ||
767 | |||
768 | (*depth)++; | ||
701 | trace_hcall_exit(opcode, retval, retbuf); | 769 | trace_hcall_exit(opcode, retval, retbuf); |
770 | (*depth)--; | ||
771 | |||
772 | out: | ||
773 | local_irq_restore(flags); | ||
702 | } | 774 | } |
703 | #endif | 775 | #endif |
776 | |||
777 | /** | ||
778 | * h_get_mpp | ||
779 | * H_GET_MPP hcall returns info in 7 parms | ||
780 | */ | ||
781 | int h_get_mpp(struct hvcall_mpp_data *mpp_data) | ||
782 | { | ||
783 | int rc; | ||
784 | unsigned long retbuf[PLPAR_HCALL9_BUFSIZE]; | ||
785 | |||
786 | rc = plpar_hcall9(H_GET_MPP, retbuf); | ||
787 | |||
788 | mpp_data->entitled_mem = retbuf[0]; | ||
789 | mpp_data->mapped_mem = retbuf[1]; | ||
790 | |||
791 | mpp_data->group_num = (retbuf[2] >> 2 * 8) & 0xffff; | ||
792 | mpp_data->pool_num = retbuf[2] & 0xffff; | ||
793 | |||
794 | mpp_data->mem_weight = (retbuf[3] >> 7 * 8) & 0xff; | ||
795 | mpp_data->unallocated_mem_weight = (retbuf[3] >> 6 * 8) & 0xff; | ||
796 | mpp_data->unallocated_entitlement = retbuf[3] & 0xffffffffffff; | ||
797 | |||
798 | mpp_data->pool_size = retbuf[4]; | ||
799 | mpp_data->loan_request = retbuf[5]; | ||
800 | mpp_data->backing_mem = retbuf[6]; | ||
801 | |||
802 | return rc; | ||
803 | } | ||
804 | EXPORT_SYMBOL(h_get_mpp); | ||
805 | |||
806 | int h_get_mpp_x(struct hvcall_mpp_x_data *mpp_x_data) | ||
807 | { | ||
808 | int rc; | ||
809 | unsigned long retbuf[PLPAR_HCALL9_BUFSIZE] = { 0 }; | ||
810 | |||
811 | rc = plpar_hcall9(H_GET_MPP_X, retbuf); | ||
812 | |||
813 | mpp_x_data->coalesced_bytes = retbuf[0]; | ||
814 | mpp_x_data->pool_coalesced_bytes = retbuf[1]; | ||
815 | mpp_x_data->pool_purr_cycles = retbuf[2]; | ||
816 | mpp_x_data->pool_spurr_cycles = retbuf[3]; | ||
817 | |||
818 | return rc; | ||
819 | } | ||
diff --git a/arch/powerpc/platforms/pseries/mobility.c b/arch/powerpc/platforms/pseries/mobility.c new file mode 100644 index 000000000000..3e7f651e50ac --- /dev/null +++ b/arch/powerpc/platforms/pseries/mobility.c | |||
@@ -0,0 +1,362 @@ | |||
1 | /* | ||
2 | * Support for Partition Mobility/Migration | ||
3 | * | ||
4 | * Copyright (C) 2010 Nathan Fontenot | ||
5 | * Copyright (C) 2010 IBM Corporation | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or | ||
8 | * modify it under the terms of the GNU General Public License version | ||
9 | * 2 as published by the Free Software Foundation. | ||
10 | */ | ||
11 | |||
12 | #include <linux/kernel.h> | ||
13 | #include <linux/kobject.h> | ||
14 | #include <linux/smp.h> | ||
15 | #include <linux/completion.h> | ||
16 | #include <linux/device.h> | ||
17 | #include <linux/delay.h> | ||
18 | #include <linux/slab.h> | ||
19 | |||
20 | #include <asm/rtas.h> | ||
21 | #include "pseries.h" | ||
22 | |||
23 | static struct kobject *mobility_kobj; | ||
24 | |||
25 | struct update_props_workarea { | ||
26 | u32 phandle; | ||
27 | u32 state; | ||
28 | u64 reserved; | ||
29 | u32 nprops; | ||
30 | }; | ||
31 | |||
32 | #define NODE_ACTION_MASK 0xff000000 | ||
33 | #define NODE_COUNT_MASK 0x00ffffff | ||
34 | |||
35 | #define DELETE_DT_NODE 0x01000000 | ||
36 | #define UPDATE_DT_NODE 0x02000000 | ||
37 | #define ADD_DT_NODE 0x03000000 | ||
38 | |||
39 | static int mobility_rtas_call(int token, char *buf) | ||
40 | { | ||
41 | int rc; | ||
42 | |||
43 | spin_lock(&rtas_data_buf_lock); | ||
44 | |||
45 | memcpy(rtas_data_buf, buf, RTAS_DATA_BUF_SIZE); | ||
46 | rc = rtas_call(token, 2, 1, NULL, rtas_data_buf, 1); | ||
47 | memcpy(buf, rtas_data_buf, RTAS_DATA_BUF_SIZE); | ||
48 | |||
49 | spin_unlock(&rtas_data_buf_lock); | ||
50 | return rc; | ||
51 | } | ||
52 | |||
53 | static int delete_dt_node(u32 phandle) | ||
54 | { | ||
55 | struct device_node *dn; | ||
56 | |||
57 | dn = of_find_node_by_phandle(phandle); | ||
58 | if (!dn) | ||
59 | return -ENOENT; | ||
60 | |||
61 | dlpar_detach_node(dn); | ||
62 | return 0; | ||
63 | } | ||
64 | |||
65 | static int update_dt_property(struct device_node *dn, struct property **prop, | ||
66 | const char *name, u32 vd, char *value) | ||
67 | { | ||
68 | struct property *new_prop = *prop; | ||
69 | struct property *old_prop; | ||
70 | int more = 0; | ||
71 | |||
72 | /* A negative 'vd' value indicates that only part of the new property | ||
73 | * value is contained in the buffer and we need to call | ||
74 | * ibm,update-properties again to get the rest of the value. | ||
75 | * | ||
76 | * A negative value is also the two's compliment of the actual value. | ||
77 | */ | ||
78 | if (vd & 0x80000000) { | ||
79 | vd = ~vd + 1; | ||
80 | more = 1; | ||
81 | } | ||
82 | |||
83 | if (new_prop) { | ||
84 | /* partial property fixup */ | ||
85 | char *new_data = kzalloc(new_prop->length + vd, GFP_KERNEL); | ||
86 | if (!new_data) | ||
87 | return -ENOMEM; | ||
88 | |||
89 | memcpy(new_data, new_prop->value, new_prop->length); | ||
90 | memcpy(new_data + new_prop->length, value, vd); | ||
91 | |||
92 | kfree(new_prop->value); | ||
93 | new_prop->value = new_data; | ||
94 | new_prop->length += vd; | ||
95 | } else { | ||
96 | new_prop = kzalloc(sizeof(*new_prop), GFP_KERNEL); | ||
97 | if (!new_prop) | ||
98 | return -ENOMEM; | ||
99 | |||
100 | new_prop->name = kstrdup(name, GFP_KERNEL); | ||
101 | if (!new_prop->name) { | ||
102 | kfree(new_prop); | ||
103 | return -ENOMEM; | ||
104 | } | ||
105 | |||
106 | new_prop->length = vd; | ||
107 | new_prop->value = kzalloc(new_prop->length, GFP_KERNEL); | ||
108 | if (!new_prop->value) { | ||
109 | kfree(new_prop->name); | ||
110 | kfree(new_prop); | ||
111 | return -ENOMEM; | ||
112 | } | ||
113 | |||
114 | memcpy(new_prop->value, value, vd); | ||
115 | *prop = new_prop; | ||
116 | } | ||
117 | |||
118 | if (!more) { | ||
119 | old_prop = of_find_property(dn, new_prop->name, NULL); | ||
120 | if (old_prop) | ||
121 | prom_update_property(dn, new_prop, old_prop); | ||
122 | else | ||
123 | prom_add_property(dn, new_prop); | ||
124 | |||
125 | new_prop = NULL; | ||
126 | } | ||
127 | |||
128 | return 0; | ||
129 | } | ||
130 | |||
131 | static int update_dt_node(u32 phandle) | ||
132 | { | ||
133 | struct update_props_workarea *upwa; | ||
134 | struct device_node *dn; | ||
135 | struct property *prop = NULL; | ||
136 | int i, rc; | ||
137 | char *prop_data; | ||
138 | char *rtas_buf; | ||
139 | int update_properties_token; | ||
140 | |||
141 | update_properties_token = rtas_token("ibm,update-properties"); | ||
142 | if (update_properties_token == RTAS_UNKNOWN_SERVICE) | ||
143 | return -EINVAL; | ||
144 | |||
145 | rtas_buf = kzalloc(RTAS_DATA_BUF_SIZE, GFP_KERNEL); | ||
146 | if (!rtas_buf) | ||
147 | return -ENOMEM; | ||
148 | |||
149 | dn = of_find_node_by_phandle(phandle); | ||
150 | if (!dn) { | ||
151 | kfree(rtas_buf); | ||
152 | return -ENOENT; | ||
153 | } | ||
154 | |||
155 | upwa = (struct update_props_workarea *)&rtas_buf[0]; | ||
156 | upwa->phandle = phandle; | ||
157 | |||
158 | do { | ||
159 | rc = mobility_rtas_call(update_properties_token, rtas_buf); | ||
160 | if (rc < 0) | ||
161 | break; | ||
162 | |||
163 | prop_data = rtas_buf + sizeof(*upwa); | ||
164 | |||
165 | for (i = 0; i < upwa->nprops; i++) { | ||
166 | char *prop_name; | ||
167 | u32 vd; | ||
168 | |||
169 | prop_name = prop_data + 1; | ||
170 | prop_data += strlen(prop_name) + 1; | ||
171 | vd = *prop_data++; | ||
172 | |||
173 | switch (vd) { | ||
174 | case 0x00000000: | ||
175 | /* name only property, nothing to do */ | ||
176 | break; | ||
177 | |||
178 | case 0x80000000: | ||
179 | prop = of_find_property(dn, prop_name, NULL); | ||
180 | prom_remove_property(dn, prop); | ||
181 | prop = NULL; | ||
182 | break; | ||
183 | |||
184 | default: | ||
185 | rc = update_dt_property(dn, &prop, prop_name, | ||
186 | vd, prop_data); | ||
187 | if (rc) { | ||
188 | printk(KERN_ERR "Could not update %s" | ||
189 | " property\n", prop_name); | ||
190 | } | ||
191 | |||
192 | prop_data += vd; | ||
193 | } | ||
194 | } | ||
195 | } while (rc == 1); | ||
196 | |||
197 | of_node_put(dn); | ||
198 | kfree(rtas_buf); | ||
199 | return 0; | ||
200 | } | ||
201 | |||
202 | static int add_dt_node(u32 parent_phandle, u32 drc_index) | ||
203 | { | ||
204 | struct device_node *dn; | ||
205 | struct device_node *parent_dn; | ||
206 | int rc; | ||
207 | |||
208 | dn = dlpar_configure_connector(drc_index); | ||
209 | if (!dn) | ||
210 | return -ENOENT; | ||
211 | |||
212 | parent_dn = of_find_node_by_phandle(parent_phandle); | ||
213 | if (!parent_dn) { | ||
214 | dlpar_free_cc_nodes(dn); | ||
215 | return -ENOENT; | ||
216 | } | ||
217 | |||
218 | dn->parent = parent_dn; | ||
219 | rc = dlpar_attach_node(dn); | ||
220 | if (rc) | ||
221 | dlpar_free_cc_nodes(dn); | ||
222 | |||
223 | of_node_put(parent_dn); | ||
224 | return rc; | ||
225 | } | ||
226 | |||
227 | static int pseries_devicetree_update(void) | ||
228 | { | ||
229 | char *rtas_buf; | ||
230 | u32 *data; | ||
231 | int update_nodes_token; | ||
232 | int rc; | ||
233 | |||
234 | update_nodes_token = rtas_token("ibm,update-nodes"); | ||
235 | if (update_nodes_token == RTAS_UNKNOWN_SERVICE) | ||
236 | return -EINVAL; | ||
237 | |||
238 | rtas_buf = kzalloc(RTAS_DATA_BUF_SIZE, GFP_KERNEL); | ||
239 | if (!rtas_buf) | ||
240 | return -ENOMEM; | ||
241 | |||
242 | do { | ||
243 | rc = mobility_rtas_call(update_nodes_token, rtas_buf); | ||
244 | if (rc && rc != 1) | ||
245 | break; | ||
246 | |||
247 | data = (u32 *)rtas_buf + 4; | ||
248 | while (*data & NODE_ACTION_MASK) { | ||
249 | int i; | ||
250 | u32 action = *data & NODE_ACTION_MASK; | ||
251 | int node_count = *data & NODE_COUNT_MASK; | ||
252 | |||
253 | data++; | ||
254 | |||
255 | for (i = 0; i < node_count; i++) { | ||
256 | u32 phandle = *data++; | ||
257 | u32 drc_index; | ||
258 | |||
259 | switch (action) { | ||
260 | case DELETE_DT_NODE: | ||
261 | delete_dt_node(phandle); | ||
262 | break; | ||
263 | case UPDATE_DT_NODE: | ||
264 | update_dt_node(phandle); | ||
265 | break; | ||
266 | case ADD_DT_NODE: | ||
267 | drc_index = *data++; | ||
268 | add_dt_node(phandle, drc_index); | ||
269 | break; | ||
270 | } | ||
271 | } | ||
272 | } | ||
273 | } while (rc == 1); | ||
274 | |||
275 | kfree(rtas_buf); | ||
276 | return rc; | ||
277 | } | ||
278 | |||
279 | void post_mobility_fixup(void) | ||
280 | { | ||
281 | int rc; | ||
282 | int activate_fw_token; | ||
283 | |||
284 | rc = pseries_devicetree_update(); | ||
285 | if (rc) { | ||
286 | printk(KERN_ERR "Initial post-mobility device tree update " | ||
287 | "failed: %d\n", rc); | ||
288 | return; | ||
289 | } | ||
290 | |||
291 | activate_fw_token = rtas_token("ibm,activate-firmware"); | ||
292 | if (activate_fw_token == RTAS_UNKNOWN_SERVICE) { | ||
293 | printk(KERN_ERR "Could not make post-mobility " | ||
294 | "activate-fw call.\n"); | ||
295 | return; | ||
296 | } | ||
297 | |||
298 | rc = rtas_call(activate_fw_token, 0, 1, NULL); | ||
299 | if (!rc) { | ||
300 | rc = pseries_devicetree_update(); | ||
301 | if (rc) | ||
302 | printk(KERN_ERR "Secondary post-mobility device tree " | ||
303 | "update failed: %d\n", rc); | ||
304 | } else { | ||
305 | printk(KERN_ERR "Post-mobility activate-fw failed: %d\n", rc); | ||
306 | return; | ||
307 | } | ||
308 | |||
309 | return; | ||
310 | } | ||
311 | |||
312 | static ssize_t migrate_store(struct class *class, struct class_attribute *attr, | ||
313 | const char *buf, size_t count) | ||
314 | { | ||
315 | struct rtas_args args; | ||
316 | u64 streamid; | ||
317 | int rc; | ||
318 | |||
319 | rc = strict_strtoull(buf, 0, &streamid); | ||
320 | if (rc) | ||
321 | return rc; | ||
322 | |||
323 | memset(&args, 0, sizeof(args)); | ||
324 | args.token = rtas_token("ibm,suspend-me"); | ||
325 | args.nargs = 2; | ||
326 | args.nret = 1; | ||
327 | |||
328 | args.args[0] = streamid >> 32 ; | ||
329 | args.args[1] = streamid & 0xffffffff; | ||
330 | args.rets = &args.args[args.nargs]; | ||
331 | |||
332 | do { | ||
333 | args.rets[0] = 0; | ||
334 | rc = rtas_ibm_suspend_me(&args); | ||
335 | if (!rc && args.rets[0] == RTAS_NOT_SUSPENDABLE) | ||
336 | ssleep(1); | ||
337 | } while (!rc && args.rets[0] == RTAS_NOT_SUSPENDABLE); | ||
338 | |||
339 | if (rc) | ||
340 | return rc; | ||
341 | else if (args.rets[0]) | ||
342 | return args.rets[0]; | ||
343 | |||
344 | post_mobility_fixup(); | ||
345 | return count; | ||
346 | } | ||
347 | |||
348 | static CLASS_ATTR(migration, S_IWUSR, NULL, migrate_store); | ||
349 | |||
350 | static int __init mobility_sysfs_init(void) | ||
351 | { | ||
352 | int rc; | ||
353 | |||
354 | mobility_kobj = kobject_create_and_add("mobility", kernel_kobj); | ||
355 | if (!mobility_kobj) | ||
356 | return -ENOMEM; | ||
357 | |||
358 | rc = sysfs_create_file(mobility_kobj, &class_attr_migration.attr); | ||
359 | |||
360 | return rc; | ||
361 | } | ||
362 | device_initcall(mobility_sysfs_init); | ||
diff --git a/arch/powerpc/platforms/pseries/msi.c b/arch/powerpc/platforms/pseries/msi.c index 1164c3430f2c..38d24e7e7bb1 100644 --- a/arch/powerpc/platforms/pseries/msi.c +++ b/arch/powerpc/platforms/pseries/msi.c | |||
@@ -93,8 +93,18 @@ static void rtas_disable_msi(struct pci_dev *pdev) | |||
93 | if (!pdn) | 93 | if (!pdn) |
94 | return; | 94 | return; |
95 | 95 | ||
96 | if (rtas_change_msi(pdn, RTAS_CHANGE_FN, 0) != 0) | 96 | /* |
97 | pr_debug("rtas_msi: Setting MSIs to 0 failed!\n"); | 97 | * disabling MSI with the explicit interface also disables MSI-X |
98 | */ | ||
99 | if (rtas_change_msi(pdn, RTAS_CHANGE_MSI_FN, 0) != 0) { | ||
100 | /* | ||
101 | * may have failed because explicit interface is not | ||
102 | * present | ||
103 | */ | ||
104 | if (rtas_change_msi(pdn, RTAS_CHANGE_FN, 0) != 0) { | ||
105 | pr_debug("rtas_msi: Setting MSIs to 0 failed!\n"); | ||
106 | } | ||
107 | } | ||
98 | } | 108 | } |
99 | 109 | ||
100 | static int rtas_query_irq_number(struct pci_dn *pdn, int offset) | 110 | static int rtas_query_irq_number(struct pci_dn *pdn, int offset) |
@@ -127,7 +137,7 @@ static void rtas_teardown_msi_irqs(struct pci_dev *pdev) | |||
127 | if (entry->irq == NO_IRQ) | 137 | if (entry->irq == NO_IRQ) |
128 | continue; | 138 | continue; |
129 | 139 | ||
130 | set_irq_msi(entry->irq, NULL); | 140 | irq_set_msi_desc(entry->irq, NULL); |
131 | irq_dispose_mapping(entry->irq); | 141 | irq_dispose_mapping(entry->irq); |
132 | } | 142 | } |
133 | 143 | ||
@@ -427,7 +437,7 @@ static int rtas_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type) | |||
427 | } | 437 | } |
428 | 438 | ||
429 | dev_dbg(&pdev->dev, "rtas_msi: allocated virq %d\n", virq); | 439 | dev_dbg(&pdev->dev, "rtas_msi: allocated virq %d\n", virq); |
430 | set_irq_msi(virq, entry); | 440 | irq_set_msi_desc(virq, entry); |
431 | 441 | ||
432 | /* Read config space back so we can restore after reset */ | 442 | /* Read config space back so we can restore after reset */ |
433 | read_msi_msg(virq, &msg); | 443 | read_msi_msg(virq, &msg); |
diff --git a/arch/powerpc/platforms/pseries/nvram.c b/arch/powerpc/platforms/pseries/nvram.c index bc3c7f2abd79..00cc3a094885 100644 --- a/arch/powerpc/platforms/pseries/nvram.c +++ b/arch/powerpc/platforms/pseries/nvram.c | |||
@@ -16,17 +16,70 @@ | |||
16 | #include <linux/errno.h> | 16 | #include <linux/errno.h> |
17 | #include <linux/init.h> | 17 | #include <linux/init.h> |
18 | #include <linux/spinlock.h> | 18 | #include <linux/spinlock.h> |
19 | #include <linux/slab.h> | ||
20 | #include <linux/kmsg_dump.h> | ||
19 | #include <asm/uaccess.h> | 21 | #include <asm/uaccess.h> |
20 | #include <asm/nvram.h> | 22 | #include <asm/nvram.h> |
21 | #include <asm/rtas.h> | 23 | #include <asm/rtas.h> |
22 | #include <asm/prom.h> | 24 | #include <asm/prom.h> |
23 | #include <asm/machdep.h> | 25 | #include <asm/machdep.h> |
24 | 26 | ||
27 | /* Max bytes to read/write in one go */ | ||
28 | #define NVRW_CNT 0x20 | ||
29 | |||
25 | static unsigned int nvram_size; | 30 | static unsigned int nvram_size; |
26 | static int nvram_fetch, nvram_store; | 31 | static int nvram_fetch, nvram_store; |
27 | static char nvram_buf[NVRW_CNT]; /* assume this is in the first 4GB */ | 32 | static char nvram_buf[NVRW_CNT]; /* assume this is in the first 4GB */ |
28 | static DEFINE_SPINLOCK(nvram_lock); | 33 | static DEFINE_SPINLOCK(nvram_lock); |
29 | 34 | ||
35 | struct err_log_info { | ||
36 | int error_type; | ||
37 | unsigned int seq_num; | ||
38 | }; | ||
39 | |||
40 | struct nvram_os_partition { | ||
41 | const char *name; | ||
42 | int req_size; /* desired size, in bytes */ | ||
43 | int min_size; /* minimum acceptable size (0 means req_size) */ | ||
44 | long size; /* size of data portion (excluding err_log_info) */ | ||
45 | long index; /* offset of data portion of partition */ | ||
46 | }; | ||
47 | |||
48 | static struct nvram_os_partition rtas_log_partition = { | ||
49 | .name = "ibm,rtas-log", | ||
50 | .req_size = 2079, | ||
51 | .min_size = 1055, | ||
52 | .index = -1 | ||
53 | }; | ||
54 | |||
55 | static struct nvram_os_partition oops_log_partition = { | ||
56 | .name = "lnx,oops-log", | ||
57 | .req_size = 4000, | ||
58 | .min_size = 2000, | ||
59 | .index = -1 | ||
60 | }; | ||
61 | |||
62 | static const char *pseries_nvram_os_partitions[] = { | ||
63 | "ibm,rtas-log", | ||
64 | "lnx,oops-log", | ||
65 | NULL | ||
66 | }; | ||
67 | |||
68 | static void oops_to_nvram(struct kmsg_dumper *dumper, | ||
69 | enum kmsg_dump_reason reason, | ||
70 | const char *old_msgs, unsigned long old_len, | ||
71 | const char *new_msgs, unsigned long new_len); | ||
72 | |||
73 | static struct kmsg_dumper nvram_kmsg_dumper = { | ||
74 | .dump = oops_to_nvram | ||
75 | }; | ||
76 | |||
77 | /* See clobbering_unread_rtas_event() */ | ||
78 | #define NVRAM_RTAS_READ_TIMEOUT 5 /* seconds */ | ||
79 | static unsigned long last_unread_rtas_event; /* timestamp */ | ||
80 | |||
81 | /* We preallocate oops_buf during init to avoid kmalloc during oops/panic. */ | ||
82 | static char *oops_buf; | ||
30 | 83 | ||
31 | static ssize_t pSeries_nvram_read(char *buf, size_t count, loff_t *index) | 84 | static ssize_t pSeries_nvram_read(char *buf, size_t count, loff_t *index) |
32 | { | 85 | { |
@@ -119,6 +172,239 @@ static ssize_t pSeries_nvram_get_size(void) | |||
119 | return nvram_size ? nvram_size : -ENODEV; | 172 | return nvram_size ? nvram_size : -ENODEV; |
120 | } | 173 | } |
121 | 174 | ||
175 | |||
176 | /* nvram_write_os_partition, nvram_write_error_log | ||
177 | * | ||
178 | * We need to buffer the error logs into nvram to ensure that we have | ||
179 | * the failure information to decode. If we have a severe error there | ||
180 | * is no way to guarantee that the OS or the machine is in a state to | ||
181 | * get back to user land and write the error to disk. For example if | ||
182 | * the SCSI device driver causes a Machine Check by writing to a bad | ||
183 | * IO address, there is no way of guaranteeing that the device driver | ||
184 | * is in any state that is would also be able to write the error data | ||
185 | * captured to disk, thus we buffer it in NVRAM for analysis on the | ||
186 | * next boot. | ||
187 | * | ||
188 | * In NVRAM the partition containing the error log buffer will looks like: | ||
189 | * Header (in bytes): | ||
190 | * +-----------+----------+--------+------------+------------------+ | ||
191 | * | signature | checksum | length | name | data | | ||
192 | * |0 |1 |2 3|4 15|16 length-1| | ||
193 | * +-----------+----------+--------+------------+------------------+ | ||
194 | * | ||
195 | * The 'data' section would look like (in bytes): | ||
196 | * +--------------+------------+-----------------------------------+ | ||
197 | * | event_logged | sequence # | error log | | ||
198 | * |0 3|4 7|8 error_log_size-1| | ||
199 | * +--------------+------------+-----------------------------------+ | ||
200 | * | ||
201 | * event_logged: 0 if event has not been logged to syslog, 1 if it has | ||
202 | * sequence #: The unique sequence # for each event. (until it wraps) | ||
203 | * error log: The error log from event_scan | ||
204 | */ | ||
205 | int nvram_write_os_partition(struct nvram_os_partition *part, char * buff, | ||
206 | int length, unsigned int err_type, unsigned int error_log_cnt) | ||
207 | { | ||
208 | int rc; | ||
209 | loff_t tmp_index; | ||
210 | struct err_log_info info; | ||
211 | |||
212 | if (part->index == -1) { | ||
213 | return -ESPIPE; | ||
214 | } | ||
215 | |||
216 | if (length > part->size) { | ||
217 | length = part->size; | ||
218 | } | ||
219 | |||
220 | info.error_type = err_type; | ||
221 | info.seq_num = error_log_cnt; | ||
222 | |||
223 | tmp_index = part->index; | ||
224 | |||
225 | rc = ppc_md.nvram_write((char *)&info, sizeof(struct err_log_info), &tmp_index); | ||
226 | if (rc <= 0) { | ||
227 | pr_err("%s: Failed nvram_write (%d)\n", __FUNCTION__, rc); | ||
228 | return rc; | ||
229 | } | ||
230 | |||
231 | rc = ppc_md.nvram_write(buff, length, &tmp_index); | ||
232 | if (rc <= 0) { | ||
233 | pr_err("%s: Failed nvram_write (%d)\n", __FUNCTION__, rc); | ||
234 | return rc; | ||
235 | } | ||
236 | |||
237 | return 0; | ||
238 | } | ||
239 | |||
240 | int nvram_write_error_log(char * buff, int length, | ||
241 | unsigned int err_type, unsigned int error_log_cnt) | ||
242 | { | ||
243 | int rc = nvram_write_os_partition(&rtas_log_partition, buff, length, | ||
244 | err_type, error_log_cnt); | ||
245 | if (!rc) | ||
246 | last_unread_rtas_event = get_seconds(); | ||
247 | return rc; | ||
248 | } | ||
249 | |||
250 | /* nvram_read_error_log | ||
251 | * | ||
252 | * Reads nvram for error log for at most 'length' | ||
253 | */ | ||
254 | int nvram_read_error_log(char * buff, int length, | ||
255 | unsigned int * err_type, unsigned int * error_log_cnt) | ||
256 | { | ||
257 | int rc; | ||
258 | loff_t tmp_index; | ||
259 | struct err_log_info info; | ||
260 | |||
261 | if (rtas_log_partition.index == -1) | ||
262 | return -1; | ||
263 | |||
264 | if (length > rtas_log_partition.size) | ||
265 | length = rtas_log_partition.size; | ||
266 | |||
267 | tmp_index = rtas_log_partition.index; | ||
268 | |||
269 | rc = ppc_md.nvram_read((char *)&info, sizeof(struct err_log_info), &tmp_index); | ||
270 | if (rc <= 0) { | ||
271 | printk(KERN_ERR "nvram_read_error_log: Failed nvram_read (%d)\n", rc); | ||
272 | return rc; | ||
273 | } | ||
274 | |||
275 | rc = ppc_md.nvram_read(buff, length, &tmp_index); | ||
276 | if (rc <= 0) { | ||
277 | printk(KERN_ERR "nvram_read_error_log: Failed nvram_read (%d)\n", rc); | ||
278 | return rc; | ||
279 | } | ||
280 | |||
281 | *error_log_cnt = info.seq_num; | ||
282 | *err_type = info.error_type; | ||
283 | |||
284 | return 0; | ||
285 | } | ||
286 | |||
287 | /* This doesn't actually zero anything, but it sets the event_logged | ||
288 | * word to tell that this event is safely in syslog. | ||
289 | */ | ||
290 | int nvram_clear_error_log(void) | ||
291 | { | ||
292 | loff_t tmp_index; | ||
293 | int clear_word = ERR_FLAG_ALREADY_LOGGED; | ||
294 | int rc; | ||
295 | |||
296 | if (rtas_log_partition.index == -1) | ||
297 | return -1; | ||
298 | |||
299 | tmp_index = rtas_log_partition.index; | ||
300 | |||
301 | rc = ppc_md.nvram_write((char *)&clear_word, sizeof(int), &tmp_index); | ||
302 | if (rc <= 0) { | ||
303 | printk(KERN_ERR "nvram_clear_error_log: Failed nvram_write (%d)\n", rc); | ||
304 | return rc; | ||
305 | } | ||
306 | last_unread_rtas_event = 0; | ||
307 | |||
308 | return 0; | ||
309 | } | ||
310 | |||
311 | /* pseries_nvram_init_os_partition | ||
312 | * | ||
313 | * This sets up a partition with an "OS" signature. | ||
314 | * | ||
315 | * The general strategy is the following: | ||
316 | * 1.) If a partition with the indicated name already exists... | ||
317 | * - If it's large enough, use it. | ||
318 | * - Otherwise, recycle it and keep going. | ||
319 | * 2.) Search for a free partition that is large enough. | ||
320 | * 3.) If there's not a free partition large enough, recycle any obsolete | ||
321 | * OS partitions and try again. | ||
322 | * 4.) Will first try getting a chunk that will satisfy the requested size. | ||
323 | * 5.) If a chunk of the requested size cannot be allocated, then try finding | ||
324 | * a chunk that will satisfy the minum needed. | ||
325 | * | ||
326 | * Returns 0 on success, else -1. | ||
327 | */ | ||
328 | static int __init pseries_nvram_init_os_partition(struct nvram_os_partition | ||
329 | *part) | ||
330 | { | ||
331 | loff_t p; | ||
332 | int size; | ||
333 | |||
334 | /* Scan nvram for partitions */ | ||
335 | nvram_scan_partitions(); | ||
336 | |||
337 | /* Look for ours */ | ||
338 | p = nvram_find_partition(part->name, NVRAM_SIG_OS, &size); | ||
339 | |||
340 | /* Found one but too small, remove it */ | ||
341 | if (p && size < part->min_size) { | ||
342 | pr_info("nvram: Found too small %s partition," | ||
343 | " removing it...\n", part->name); | ||
344 | nvram_remove_partition(part->name, NVRAM_SIG_OS, NULL); | ||
345 | p = 0; | ||
346 | } | ||
347 | |||
348 | /* Create one if we didn't find */ | ||
349 | if (!p) { | ||
350 | p = nvram_create_partition(part->name, NVRAM_SIG_OS, | ||
351 | part->req_size, part->min_size); | ||
352 | if (p == -ENOSPC) { | ||
353 | pr_info("nvram: No room to create %s partition, " | ||
354 | "deleting any obsolete OS partitions...\n", | ||
355 | part->name); | ||
356 | nvram_remove_partition(NULL, NVRAM_SIG_OS, | ||
357 | pseries_nvram_os_partitions); | ||
358 | p = nvram_create_partition(part->name, NVRAM_SIG_OS, | ||
359 | part->req_size, part->min_size); | ||
360 | } | ||
361 | } | ||
362 | |||
363 | if (p <= 0) { | ||
364 | pr_err("nvram: Failed to find or create %s" | ||
365 | " partition, err %d\n", part->name, (int)p); | ||
366 | return -1; | ||
367 | } | ||
368 | |||
369 | part->index = p; | ||
370 | part->size = nvram_get_partition_size(p) - sizeof(struct err_log_info); | ||
371 | |||
372 | return 0; | ||
373 | } | ||
374 | |||
375 | static void __init nvram_init_oops_partition(int rtas_partition_exists) | ||
376 | { | ||
377 | int rc; | ||
378 | |||
379 | rc = pseries_nvram_init_os_partition(&oops_log_partition); | ||
380 | if (rc != 0) { | ||
381 | if (!rtas_partition_exists) | ||
382 | return; | ||
383 | pr_notice("nvram: Using %s partition to log both" | ||
384 | " RTAS errors and oops/panic reports\n", | ||
385 | rtas_log_partition.name); | ||
386 | memcpy(&oops_log_partition, &rtas_log_partition, | ||
387 | sizeof(rtas_log_partition)); | ||
388 | } | ||
389 | oops_buf = kmalloc(oops_log_partition.size, GFP_KERNEL); | ||
390 | rc = kmsg_dump_register(&nvram_kmsg_dumper); | ||
391 | if (rc != 0) { | ||
392 | pr_err("nvram: kmsg_dump_register() failed; returned %d\n", rc); | ||
393 | kfree(oops_buf); | ||
394 | return; | ||
395 | } | ||
396 | } | ||
397 | |||
398 | static int __init pseries_nvram_init_log_partitions(void) | ||
399 | { | ||
400 | int rc; | ||
401 | |||
402 | rc = pseries_nvram_init_os_partition(&rtas_log_partition); | ||
403 | nvram_init_oops_partition(rc == 0); | ||
404 | return 0; | ||
405 | } | ||
406 | machine_arch_initcall(pseries, pseries_nvram_init_log_partitions); | ||
407 | |||
122 | int __init pSeries_nvram_init(void) | 408 | int __init pSeries_nvram_init(void) |
123 | { | 409 | { |
124 | struct device_node *nvram; | 410 | struct device_node *nvram; |
@@ -148,3 +434,83 @@ int __init pSeries_nvram_init(void) | |||
148 | 434 | ||
149 | return 0; | 435 | return 0; |
150 | } | 436 | } |
437 | |||
438 | /* | ||
439 | * Try to capture the last capture_len bytes of the printk buffer. Return | ||
440 | * the amount actually captured. | ||
441 | */ | ||
442 | static size_t capture_last_msgs(const char *old_msgs, size_t old_len, | ||
443 | const char *new_msgs, size_t new_len, | ||
444 | char *captured, size_t capture_len) | ||
445 | { | ||
446 | if (new_len >= capture_len) { | ||
447 | memcpy(captured, new_msgs + (new_len - capture_len), | ||
448 | capture_len); | ||
449 | return capture_len; | ||
450 | } else { | ||
451 | /* Grab the end of old_msgs. */ | ||
452 | size_t old_tail_len = min(old_len, capture_len - new_len); | ||
453 | memcpy(captured, old_msgs + (old_len - old_tail_len), | ||
454 | old_tail_len); | ||
455 | memcpy(captured + old_tail_len, new_msgs, new_len); | ||
456 | return old_tail_len + new_len; | ||
457 | } | ||
458 | } | ||
459 | |||
460 | /* | ||
461 | * Are we using the ibm,rtas-log for oops/panic reports? And if so, | ||
462 | * would logging this oops/panic overwrite an RTAS event that rtas_errd | ||
463 | * hasn't had a chance to read and process? Return 1 if so, else 0. | ||
464 | * | ||
465 | * We assume that if rtas_errd hasn't read the RTAS event in | ||
466 | * NVRAM_RTAS_READ_TIMEOUT seconds, it's probably not going to. | ||
467 | */ | ||
468 | static int clobbering_unread_rtas_event(void) | ||
469 | { | ||
470 | return (oops_log_partition.index == rtas_log_partition.index | ||
471 | && last_unread_rtas_event | ||
472 | && get_seconds() - last_unread_rtas_event <= | ||
473 | NVRAM_RTAS_READ_TIMEOUT); | ||
474 | } | ||
475 | |||
476 | /* our kmsg_dump callback */ | ||
477 | static void oops_to_nvram(struct kmsg_dumper *dumper, | ||
478 | enum kmsg_dump_reason reason, | ||
479 | const char *old_msgs, unsigned long old_len, | ||
480 | const char *new_msgs, unsigned long new_len) | ||
481 | { | ||
482 | static unsigned int oops_count = 0; | ||
483 | static bool panicking = false; | ||
484 | size_t text_len; | ||
485 | |||
486 | switch (reason) { | ||
487 | case KMSG_DUMP_RESTART: | ||
488 | case KMSG_DUMP_HALT: | ||
489 | case KMSG_DUMP_POWEROFF: | ||
490 | /* These are almost always orderly shutdowns. */ | ||
491 | return; | ||
492 | case KMSG_DUMP_OOPS: | ||
493 | case KMSG_DUMP_KEXEC: | ||
494 | break; | ||
495 | case KMSG_DUMP_PANIC: | ||
496 | panicking = true; | ||
497 | break; | ||
498 | case KMSG_DUMP_EMERG: | ||
499 | if (panicking) | ||
500 | /* Panic report already captured. */ | ||
501 | return; | ||
502 | break; | ||
503 | default: | ||
504 | pr_err("%s: ignoring unrecognized KMSG_DUMP_* reason %d\n", | ||
505 | __FUNCTION__, (int) reason); | ||
506 | return; | ||
507 | } | ||
508 | |||
509 | if (clobbering_unread_rtas_event()) | ||
510 | return; | ||
511 | |||
512 | text_len = capture_last_msgs(old_msgs, old_len, new_msgs, new_len, | ||
513 | oops_buf, oops_log_partition.size); | ||
514 | (void) nvram_write_os_partition(&oops_log_partition, oops_buf, | ||
515 | (int) text_len, ERR_TYPE_KERNEL_PANIC, ++oops_count); | ||
516 | } | ||
diff --git a/arch/powerpc/platforms/pseries/offline_states.h b/arch/powerpc/platforms/pseries/offline_states.h index 75a6f480d931..08672d9136ab 100644 --- a/arch/powerpc/platforms/pseries/offline_states.h +++ b/arch/powerpc/platforms/pseries/offline_states.h | |||
@@ -34,6 +34,4 @@ static inline void set_default_offline_state(int cpu) | |||
34 | #endif | 34 | #endif |
35 | 35 | ||
36 | extern enum cpu_state_vals get_preferred_offline_state(int cpu); | 36 | extern enum cpu_state_vals get_preferred_offline_state(int cpu); |
37 | extern int start_secondary(void); | ||
38 | extern void start_secondary_resume(void); | ||
39 | #endif | 37 | #endif |
diff --git a/arch/powerpc/platforms/pseries/pci_dlpar.c b/arch/powerpc/platforms/pseries/pci_dlpar.c index 4b7a062dee15..3bf4488aaec6 100644 --- a/arch/powerpc/platforms/pseries/pci_dlpar.c +++ b/arch/powerpc/platforms/pseries/pci_dlpar.c | |||
@@ -25,8 +25,6 @@ | |||
25 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | 25 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. |
26 | */ | 26 | */ |
27 | 27 | ||
28 | #undef DEBUG | ||
29 | |||
30 | #include <linux/pci.h> | 28 | #include <linux/pci.h> |
31 | #include <asm/pci-bridge.h> | 29 | #include <asm/pci-bridge.h> |
32 | #include <asm/ppc-pci.h> | 30 | #include <asm/ppc-pci.h> |
@@ -151,7 +149,7 @@ struct pci_controller * __devinit init_phb_dynamic(struct device_node *dn) | |||
151 | if (dn->child) | 149 | if (dn->child) |
152 | eeh_add_device_tree_early(dn); | 150 | eeh_add_device_tree_early(dn); |
153 | 151 | ||
154 | pcibios_scan_phb(phb, dn); | 152 | pcibios_scan_phb(phb); |
155 | pcibios_finish_adding_to_bus(phb->bus); | 153 | pcibios_finish_adding_to_bus(phb->bus); |
156 | 154 | ||
157 | return phb; | 155 | return phb; |
diff --git a/arch/powerpc/platforms/pseries/plpar_wrappers.h b/arch/powerpc/platforms/pseries/plpar_wrappers.h index d9801117124b..4bf21207d7d3 100644 --- a/arch/powerpc/platforms/pseries/plpar_wrappers.h +++ b/arch/powerpc/platforms/pseries/plpar_wrappers.h | |||
@@ -270,31 +270,4 @@ static inline long plpar_put_term_char(unsigned long termno, unsigned long len, | |||
270 | lbuf[1]); | 270 | lbuf[1]); |
271 | } | 271 | } |
272 | 272 | ||
273 | static inline long plpar_eoi(unsigned long xirr) | ||
274 | { | ||
275 | return plpar_hcall_norets(H_EOI, xirr); | ||
276 | } | ||
277 | |||
278 | static inline long plpar_cppr(unsigned long cppr) | ||
279 | { | ||
280 | return plpar_hcall_norets(H_CPPR, cppr); | ||
281 | } | ||
282 | |||
283 | static inline long plpar_ipi(unsigned long servernum, unsigned long mfrr) | ||
284 | { | ||
285 | return plpar_hcall_norets(H_IPI, servernum, mfrr); | ||
286 | } | ||
287 | |||
288 | static inline long plpar_xirr(unsigned long *xirr_ret, unsigned char cppr) | ||
289 | { | ||
290 | long rc; | ||
291 | unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; | ||
292 | |||
293 | rc = plpar_hcall(H_XIRR, retbuf, cppr); | ||
294 | |||
295 | *xirr_ret = retbuf[0]; | ||
296 | |||
297 | return rc; | ||
298 | } | ||
299 | |||
300 | #endif /* _PSERIES_PLPAR_WRAPPERS_H */ | 273 | #endif /* _PSERIES_PLPAR_WRAPPERS_H */ |
diff --git a/arch/powerpc/platforms/pseries/pseries.h b/arch/powerpc/platforms/pseries/pseries.h index 40c93cad91d2..e9f6d2859c3c 100644 --- a/arch/powerpc/platforms/pseries/pseries.h +++ b/arch/powerpc/platforms/pseries/pseries.h | |||
@@ -17,6 +17,8 @@ struct device_node; | |||
17 | extern void request_event_sources_irqs(struct device_node *np, | 17 | extern void request_event_sources_irqs(struct device_node *np, |
18 | irq_handler_t handler, const char *name); | 18 | irq_handler_t handler, const char *name); |
19 | 19 | ||
20 | #include <linux/of.h> | ||
21 | |||
20 | extern void __init fw_feature_init(const char *hypertas, unsigned long len); | 22 | extern void __init fw_feature_init(const char *hypertas, unsigned long len); |
21 | 23 | ||
22 | struct pt_regs; | 24 | struct pt_regs; |
@@ -47,4 +49,11 @@ extern unsigned long rtas_poweron_auto; | |||
47 | 49 | ||
48 | extern void find_udbg_vterm(void); | 50 | extern void find_udbg_vterm(void); |
49 | 51 | ||
52 | /* Dynamic logical Partitioning/Mobility */ | ||
53 | extern void dlpar_free_cc_nodes(struct device_node *); | ||
54 | extern void dlpar_free_cc_property(struct property *); | ||
55 | extern struct device_node *dlpar_configure_connector(u32); | ||
56 | extern int dlpar_attach_node(struct device_node *); | ||
57 | extern int dlpar_detach_node(struct device_node *); | ||
58 | |||
50 | #endif /* _PSERIES_PSERIES_H */ | 59 | #endif /* _PSERIES_PSERIES_H */ |
diff --git a/arch/powerpc/platforms/pseries/pseries_energy.c b/arch/powerpc/platforms/pseries/pseries_energy.c new file mode 100644 index 000000000000..c8b3c69fe891 --- /dev/null +++ b/arch/powerpc/platforms/pseries/pseries_energy.c | |||
@@ -0,0 +1,326 @@ | |||
1 | /* | ||
2 | * POWER platform energy management driver | ||
3 | * Copyright (C) 2010 IBM Corporation | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or | ||
6 | * modify it under the terms of the GNU General Public License | ||
7 | * version 2 as published by the Free Software Foundation. | ||
8 | * | ||
9 | * This pseries platform device driver provides access to | ||
10 | * platform energy management capabilities. | ||
11 | */ | ||
12 | |||
13 | #include <linux/module.h> | ||
14 | #include <linux/types.h> | ||
15 | #include <linux/errno.h> | ||
16 | #include <linux/init.h> | ||
17 | #include <linux/seq_file.h> | ||
18 | #include <linux/sysdev.h> | ||
19 | #include <linux/cpu.h> | ||
20 | #include <linux/of.h> | ||
21 | #include <asm/cputhreads.h> | ||
22 | #include <asm/page.h> | ||
23 | #include <asm/hvcall.h> | ||
24 | |||
25 | |||
26 | #define MODULE_VERS "1.0" | ||
27 | #define MODULE_NAME "pseries_energy" | ||
28 | |||
29 | /* Driver flags */ | ||
30 | |||
31 | static int sysfs_entries; | ||
32 | |||
33 | /* Helper routines */ | ||
34 | |||
35 | /* | ||
36 | * Routine to detect firmware support for hcall | ||
37 | * return 1 if H_BEST_ENERGY is supported | ||
38 | * else return 0 | ||
39 | */ | ||
40 | |||
41 | static int check_for_h_best_energy(void) | ||
42 | { | ||
43 | struct device_node *rtas = NULL; | ||
44 | const char *hypertas, *s; | ||
45 | int length; | ||
46 | int rc = 0; | ||
47 | |||
48 | rtas = of_find_node_by_path("/rtas"); | ||
49 | if (!rtas) | ||
50 | return 0; | ||
51 | |||
52 | hypertas = of_get_property(rtas, "ibm,hypertas-functions", &length); | ||
53 | if (!hypertas) { | ||
54 | of_node_put(rtas); | ||
55 | return 0; | ||
56 | } | ||
57 | |||
58 | /* hypertas will have list of strings with hcall names */ | ||
59 | for (s = hypertas; s < hypertas + length; s += strlen(s) + 1) { | ||
60 | if (!strncmp("hcall-best-energy-1", s, 19)) { | ||
61 | rc = 1; /* Found the string */ | ||
62 | break; | ||
63 | } | ||
64 | } | ||
65 | of_node_put(rtas); | ||
66 | return rc; | ||
67 | } | ||
68 | |||
69 | /* Helper Routines to convert between drc_index to cpu numbers */ | ||
70 | |||
71 | static u32 cpu_to_drc_index(int cpu) | ||
72 | { | ||
73 | struct device_node *dn = NULL; | ||
74 | const int *indexes; | ||
75 | int i; | ||
76 | int rc = 1; | ||
77 | u32 ret = 0; | ||
78 | |||
79 | dn = of_find_node_by_path("/cpus"); | ||
80 | if (dn == NULL) | ||
81 | goto err; | ||
82 | indexes = of_get_property(dn, "ibm,drc-indexes", NULL); | ||
83 | if (indexes == NULL) | ||
84 | goto err_of_node_put; | ||
85 | /* Convert logical cpu number to core number */ | ||
86 | i = cpu_core_index_of_thread(cpu); | ||
87 | /* | ||
88 | * The first element indexes[0] is the number of drc_indexes | ||
89 | * returned in the list. Hence i+1 will get the drc_index | ||
90 | * corresponding to core number i. | ||
91 | */ | ||
92 | WARN_ON(i > indexes[0]); | ||
93 | ret = indexes[i + 1]; | ||
94 | rc = 0; | ||
95 | |||
96 | err_of_node_put: | ||
97 | of_node_put(dn); | ||
98 | err: | ||
99 | if (rc) | ||
100 | printk(KERN_WARNING "cpu_to_drc_index(%d) failed", cpu); | ||
101 | return ret; | ||
102 | } | ||
103 | |||
104 | static int drc_index_to_cpu(u32 drc_index) | ||
105 | { | ||
106 | struct device_node *dn = NULL; | ||
107 | const int *indexes; | ||
108 | int i, cpu = 0; | ||
109 | int rc = 1; | ||
110 | |||
111 | dn = of_find_node_by_path("/cpus"); | ||
112 | if (dn == NULL) | ||
113 | goto err; | ||
114 | indexes = of_get_property(dn, "ibm,drc-indexes", NULL); | ||
115 | if (indexes == NULL) | ||
116 | goto err_of_node_put; | ||
117 | /* | ||
118 | * First element in the array is the number of drc_indexes | ||
119 | * returned. Search through the list to find the matching | ||
120 | * drc_index and get the core number | ||
121 | */ | ||
122 | for (i = 0; i < indexes[0]; i++) { | ||
123 | if (indexes[i + 1] == drc_index) | ||
124 | break; | ||
125 | } | ||
126 | /* Convert core number to logical cpu number */ | ||
127 | cpu = cpu_first_thread_of_core(i); | ||
128 | rc = 0; | ||
129 | |||
130 | err_of_node_put: | ||
131 | of_node_put(dn); | ||
132 | err: | ||
133 | if (rc) | ||
134 | printk(KERN_WARNING "drc_index_to_cpu(%d) failed", drc_index); | ||
135 | return cpu; | ||
136 | } | ||
137 | |||
138 | /* | ||
139 | * pseries hypervisor call H_BEST_ENERGY provides hints to OS on | ||
140 | * preferred logical cpus to activate or deactivate for optimized | ||
141 | * energy consumption. | ||
142 | */ | ||
143 | |||
144 | #define FLAGS_MODE1 0x004E200000080E01 | ||
145 | #define FLAGS_MODE2 0x004E200000080401 | ||
146 | #define FLAGS_ACTIVATE 0x100 | ||
147 | |||
148 | static ssize_t get_best_energy_list(char *page, int activate) | ||
149 | { | ||
150 | int rc, cnt, i, cpu; | ||
151 | unsigned long retbuf[PLPAR_HCALL9_BUFSIZE]; | ||
152 | unsigned long flags = 0; | ||
153 | u32 *buf_page; | ||
154 | char *s = page; | ||
155 | |||
156 | buf_page = (u32 *) get_zeroed_page(GFP_KERNEL); | ||
157 | if (!buf_page) | ||
158 | return -ENOMEM; | ||
159 | |||
160 | flags = FLAGS_MODE1; | ||
161 | if (activate) | ||
162 | flags |= FLAGS_ACTIVATE; | ||
163 | |||
164 | rc = plpar_hcall9(H_BEST_ENERGY, retbuf, flags, 0, __pa(buf_page), | ||
165 | 0, 0, 0, 0, 0, 0); | ||
166 | if (rc != H_SUCCESS) { | ||
167 | free_page((unsigned long) buf_page); | ||
168 | return -EINVAL; | ||
169 | } | ||
170 | |||
171 | cnt = retbuf[0]; | ||
172 | for (i = 0; i < cnt; i++) { | ||
173 | cpu = drc_index_to_cpu(buf_page[2*i+1]); | ||
174 | if ((cpu_online(cpu) && !activate) || | ||
175 | (!cpu_online(cpu) && activate)) | ||
176 | s += sprintf(s, "%d,", cpu); | ||
177 | } | ||
178 | if (s > page) { /* Something to show */ | ||
179 | s--; /* Suppress last comma */ | ||
180 | s += sprintf(s, "\n"); | ||
181 | } | ||
182 | |||
183 | free_page((unsigned long) buf_page); | ||
184 | return s-page; | ||
185 | } | ||
186 | |||
187 | static ssize_t get_best_energy_data(struct sys_device *dev, | ||
188 | char *page, int activate) | ||
189 | { | ||
190 | int rc; | ||
191 | unsigned long retbuf[PLPAR_HCALL9_BUFSIZE]; | ||
192 | unsigned long flags = 0; | ||
193 | |||
194 | flags = FLAGS_MODE2; | ||
195 | if (activate) | ||
196 | flags |= FLAGS_ACTIVATE; | ||
197 | |||
198 | rc = plpar_hcall9(H_BEST_ENERGY, retbuf, flags, | ||
199 | cpu_to_drc_index(dev->id), | ||
200 | 0, 0, 0, 0, 0, 0, 0); | ||
201 | |||
202 | if (rc != H_SUCCESS) | ||
203 | return -EINVAL; | ||
204 | |||
205 | return sprintf(page, "%lu\n", retbuf[1] >> 32); | ||
206 | } | ||
207 | |||
208 | /* Wrapper functions */ | ||
209 | |||
210 | static ssize_t cpu_activate_hint_list_show(struct sysdev_class *class, | ||
211 | struct sysdev_class_attribute *attr, char *page) | ||
212 | { | ||
213 | return get_best_energy_list(page, 1); | ||
214 | } | ||
215 | |||
216 | static ssize_t cpu_deactivate_hint_list_show(struct sysdev_class *class, | ||
217 | struct sysdev_class_attribute *attr, char *page) | ||
218 | { | ||
219 | return get_best_energy_list(page, 0); | ||
220 | } | ||
221 | |||
222 | static ssize_t percpu_activate_hint_show(struct sys_device *dev, | ||
223 | struct sysdev_attribute *attr, char *page) | ||
224 | { | ||
225 | return get_best_energy_data(dev, page, 1); | ||
226 | } | ||
227 | |||
228 | static ssize_t percpu_deactivate_hint_show(struct sys_device *dev, | ||
229 | struct sysdev_attribute *attr, char *page) | ||
230 | { | ||
231 | return get_best_energy_data(dev, page, 0); | ||
232 | } | ||
233 | |||
234 | /* | ||
235 | * Create sysfs interface: | ||
236 | * /sys/devices/system/cpu/pseries_activate_hint_list | ||
237 | * /sys/devices/system/cpu/pseries_deactivate_hint_list | ||
238 | * Comma separated list of cpus to activate or deactivate | ||
239 | * /sys/devices/system/cpu/cpuN/pseries_activate_hint | ||
240 | * /sys/devices/system/cpu/cpuN/pseries_deactivate_hint | ||
241 | * Per-cpu value of the hint | ||
242 | */ | ||
243 | |||
244 | struct sysdev_class_attribute attr_cpu_activate_hint_list = | ||
245 | _SYSDEV_CLASS_ATTR(pseries_activate_hint_list, 0444, | ||
246 | cpu_activate_hint_list_show, NULL); | ||
247 | |||
248 | struct sysdev_class_attribute attr_cpu_deactivate_hint_list = | ||
249 | _SYSDEV_CLASS_ATTR(pseries_deactivate_hint_list, 0444, | ||
250 | cpu_deactivate_hint_list_show, NULL); | ||
251 | |||
252 | struct sysdev_attribute attr_percpu_activate_hint = | ||
253 | _SYSDEV_ATTR(pseries_activate_hint, 0444, | ||
254 | percpu_activate_hint_show, NULL); | ||
255 | |||
256 | struct sysdev_attribute attr_percpu_deactivate_hint = | ||
257 | _SYSDEV_ATTR(pseries_deactivate_hint, 0444, | ||
258 | percpu_deactivate_hint_show, NULL); | ||
259 | |||
260 | static int __init pseries_energy_init(void) | ||
261 | { | ||
262 | int cpu, err; | ||
263 | struct sys_device *cpu_sys_dev; | ||
264 | |||
265 | if (!check_for_h_best_energy()) { | ||
266 | printk(KERN_INFO "Hypercall H_BEST_ENERGY not supported\n"); | ||
267 | return 0; | ||
268 | } | ||
269 | /* Create the sysfs files */ | ||
270 | err = sysfs_create_file(&cpu_sysdev_class.kset.kobj, | ||
271 | &attr_cpu_activate_hint_list.attr); | ||
272 | if (!err) | ||
273 | err = sysfs_create_file(&cpu_sysdev_class.kset.kobj, | ||
274 | &attr_cpu_deactivate_hint_list.attr); | ||
275 | |||
276 | if (err) | ||
277 | return err; | ||
278 | for_each_possible_cpu(cpu) { | ||
279 | cpu_sys_dev = get_cpu_sysdev(cpu); | ||
280 | err = sysfs_create_file(&cpu_sys_dev->kobj, | ||
281 | &attr_percpu_activate_hint.attr); | ||
282 | if (err) | ||
283 | break; | ||
284 | err = sysfs_create_file(&cpu_sys_dev->kobj, | ||
285 | &attr_percpu_deactivate_hint.attr); | ||
286 | if (err) | ||
287 | break; | ||
288 | } | ||
289 | |||
290 | if (err) | ||
291 | return err; | ||
292 | |||
293 | sysfs_entries = 1; /* Removed entries on cleanup */ | ||
294 | return 0; | ||
295 | |||
296 | } | ||
297 | |||
298 | static void __exit pseries_energy_cleanup(void) | ||
299 | { | ||
300 | int cpu; | ||
301 | struct sys_device *cpu_sys_dev; | ||
302 | |||
303 | if (!sysfs_entries) | ||
304 | return; | ||
305 | |||
306 | /* Remove the sysfs files */ | ||
307 | sysfs_remove_file(&cpu_sysdev_class.kset.kobj, | ||
308 | &attr_cpu_activate_hint_list.attr); | ||
309 | |||
310 | sysfs_remove_file(&cpu_sysdev_class.kset.kobj, | ||
311 | &attr_cpu_deactivate_hint_list.attr); | ||
312 | |||
313 | for_each_possible_cpu(cpu) { | ||
314 | cpu_sys_dev = get_cpu_sysdev(cpu); | ||
315 | sysfs_remove_file(&cpu_sys_dev->kobj, | ||
316 | &attr_percpu_activate_hint.attr); | ||
317 | sysfs_remove_file(&cpu_sys_dev->kobj, | ||
318 | &attr_percpu_deactivate_hint.attr); | ||
319 | } | ||
320 | } | ||
321 | |||
322 | module_init(pseries_energy_init); | ||
323 | module_exit(pseries_energy_cleanup); | ||
324 | MODULE_DESCRIPTION("Driver for pSeries platform energy management"); | ||
325 | MODULE_AUTHOR("Vaidyanathan Srinivasan"); | ||
326 | MODULE_LICENSE("GPL"); | ||
diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c index a4fc6da87c2e..086d2ae4e06a 100644 --- a/arch/powerpc/platforms/pseries/ras.c +++ b/arch/powerpc/platforms/pseries/ras.c | |||
@@ -54,7 +54,8 @@ | |||
54 | static unsigned char ras_log_buf[RTAS_ERROR_LOG_MAX]; | 54 | static unsigned char ras_log_buf[RTAS_ERROR_LOG_MAX]; |
55 | static DEFINE_SPINLOCK(ras_log_buf_lock); | 55 | static DEFINE_SPINLOCK(ras_log_buf_lock); |
56 | 56 | ||
57 | static char mce_data_buf[RTAS_ERROR_LOG_MAX]; | 57 | static char global_mce_data_buf[RTAS_ERROR_LOG_MAX]; |
58 | static DEFINE_PER_CPU(__u64, mce_data_buf); | ||
58 | 59 | ||
59 | static int ras_get_sensor_state_token; | 60 | static int ras_get_sensor_state_token; |
60 | static int ras_check_exception_token; | 61 | static int ras_check_exception_token; |
@@ -121,7 +122,7 @@ static irqreturn_t ras_epow_interrupt(int irq, void *dev_id) | |||
121 | 122 | ||
122 | status = rtas_call(ras_check_exception_token, 6, 1, NULL, | 123 | status = rtas_call(ras_check_exception_token, 6, 1, NULL, |
123 | RTAS_VECTOR_EXTERNAL_INTERRUPT, | 124 | RTAS_VECTOR_EXTERNAL_INTERRUPT, |
124 | irq_map[irq].hwirq, | 125 | virq_to_hw(irq), |
125 | RTAS_EPOW_WARNING | RTAS_POWERMGM_EVENTS, | 126 | RTAS_EPOW_WARNING | RTAS_POWERMGM_EVENTS, |
126 | critical, __pa(&ras_log_buf), | 127 | critical, __pa(&ras_log_buf), |
127 | rtas_get_error_log_max()); | 128 | rtas_get_error_log_max()); |
@@ -156,7 +157,7 @@ static irqreturn_t ras_error_interrupt(int irq, void *dev_id) | |||
156 | 157 | ||
157 | status = rtas_call(ras_check_exception_token, 6, 1, NULL, | 158 | status = rtas_call(ras_check_exception_token, 6, 1, NULL, |
158 | RTAS_VECTOR_EXTERNAL_INTERRUPT, | 159 | RTAS_VECTOR_EXTERNAL_INTERRUPT, |
159 | irq_map[irq].hwirq, | 160 | virq_to_hw(irq), |
160 | RTAS_INTERNAL_ERROR, 1 /*Time Critical */, | 161 | RTAS_INTERNAL_ERROR, 1 /*Time Critical */, |
161 | __pa(&ras_log_buf), | 162 | __pa(&ras_log_buf), |
162 | rtas_get_error_log_max()); | 163 | rtas_get_error_log_max()); |
@@ -196,12 +197,24 @@ static irqreturn_t ras_error_interrupt(int irq, void *dev_id) | |||
196 | return IRQ_HANDLED; | 197 | return IRQ_HANDLED; |
197 | } | 198 | } |
198 | 199 | ||
199 | /* Get the error information for errors coming through the | 200 | /* |
201 | * Some versions of FWNMI place the buffer inside the 4kB page starting at | ||
202 | * 0x7000. Other versions place it inside the rtas buffer. We check both. | ||
203 | */ | ||
204 | #define VALID_FWNMI_BUFFER(A) \ | ||
205 | ((((A) >= 0x7000) && ((A) < 0x7ff0)) || \ | ||
206 | (((A) >= rtas.base) && ((A) < (rtas.base + rtas.size - 16)))) | ||
207 | |||
208 | /* | ||
209 | * Get the error information for errors coming through the | ||
200 | * FWNMI vectors. The pt_regs' r3 will be updated to reflect | 210 | * FWNMI vectors. The pt_regs' r3 will be updated to reflect |
201 | * the actual r3 if possible, and a ptr to the error log entry | 211 | * the actual r3 if possible, and a ptr to the error log entry |
202 | * will be returned if found. | 212 | * will be returned if found. |
203 | * | 213 | * |
204 | * The mce_data_buf does not have any locks or protection around it, | 214 | * If the RTAS error is not of the extended type, then we put it in a per |
215 | * cpu 64bit buffer. If it is the extended type we use global_mce_data_buf. | ||
216 | * | ||
217 | * The global_mce_data_buf does not have any locks or protection around it, | ||
205 | * if a second machine check comes in, or a system reset is done | 218 | * if a second machine check comes in, or a system reset is done |
206 | * before we have logged the error, then we will get corruption in the | 219 | * before we have logged the error, then we will get corruption in the |
207 | * error log. This is preferable over holding off on calling | 220 | * error log. This is preferable over holding off on calling |
@@ -210,20 +223,31 @@ static irqreturn_t ras_error_interrupt(int irq, void *dev_id) | |||
210 | */ | 223 | */ |
211 | static struct rtas_error_log *fwnmi_get_errinfo(struct pt_regs *regs) | 224 | static struct rtas_error_log *fwnmi_get_errinfo(struct pt_regs *regs) |
212 | { | 225 | { |
213 | unsigned long errdata = regs->gpr[3]; | ||
214 | struct rtas_error_log *errhdr = NULL; | ||
215 | unsigned long *savep; | 226 | unsigned long *savep; |
227 | struct rtas_error_log *h, *errhdr = NULL; | ||
228 | |||
229 | if (!VALID_FWNMI_BUFFER(regs->gpr[3])) { | ||
230 | printk(KERN_ERR "FWNMI: corrupt r3 0x%016lx\n", regs->gpr[3]); | ||
231 | return NULL; | ||
232 | } | ||
216 | 233 | ||
217 | if ((errdata >= 0x7000 && errdata < 0x7fff0) || | 234 | savep = __va(regs->gpr[3]); |
218 | (errdata >= rtas.base && errdata < rtas.base + rtas.size - 16)) { | 235 | regs->gpr[3] = savep[0]; /* restore original r3 */ |
219 | savep = __va(errdata); | 236 | |
220 | regs->gpr[3] = savep[0]; /* restore original r3 */ | 237 | /* If it isn't an extended log we can use the per cpu 64bit buffer */ |
221 | memset(mce_data_buf, 0, RTAS_ERROR_LOG_MAX); | 238 | h = (struct rtas_error_log *)&savep[1]; |
222 | memcpy(mce_data_buf, (char *)(savep + 1), RTAS_ERROR_LOG_MAX); | 239 | if (!h->extended) { |
223 | errhdr = (struct rtas_error_log *)mce_data_buf; | 240 | memcpy(&__get_cpu_var(mce_data_buf), h, sizeof(__u64)); |
241 | errhdr = (struct rtas_error_log *)&__get_cpu_var(mce_data_buf); | ||
224 | } else { | 242 | } else { |
225 | printk("FWNMI: corrupt r3\n"); | 243 | int len; |
244 | |||
245 | len = max_t(int, 8+h->extended_log_length, RTAS_ERROR_LOG_MAX); | ||
246 | memset(global_mce_data_buf, 0, RTAS_ERROR_LOG_MAX); | ||
247 | memcpy(global_mce_data_buf, h, len); | ||
248 | errhdr = (struct rtas_error_log *)global_mce_data_buf; | ||
226 | } | 249 | } |
250 | |||
227 | return errhdr; | 251 | return errhdr; |
228 | } | 252 | } |
229 | 253 | ||
@@ -235,7 +259,7 @@ static void fwnmi_release_errinfo(void) | |||
235 | { | 259 | { |
236 | int ret = rtas_call(rtas_token("ibm,nmi-interlock"), 0, 1, NULL); | 260 | int ret = rtas_call(rtas_token("ibm,nmi-interlock"), 0, 1, NULL); |
237 | if (ret != 0) | 261 | if (ret != 0) |
238 | printk("FWNMI: nmi-interlock failed: %d\n", ret); | 262 | printk(KERN_ERR "FWNMI: nmi-interlock failed: %d\n", ret); |
239 | } | 263 | } |
240 | 264 | ||
241 | int pSeries_system_reset_exception(struct pt_regs *regs) | 265 | int pSeries_system_reset_exception(struct pt_regs *regs) |
@@ -259,31 +283,43 @@ int pSeries_system_reset_exception(struct pt_regs *regs) | |||
259 | * Return 1 if corrected (or delivered a signal). | 283 | * Return 1 if corrected (or delivered a signal). |
260 | * Return 0 if there is nothing we can do. | 284 | * Return 0 if there is nothing we can do. |
261 | */ | 285 | */ |
262 | static int recover_mce(struct pt_regs *regs, struct rtas_error_log * err) | 286 | static int recover_mce(struct pt_regs *regs, struct rtas_error_log *err) |
263 | { | 287 | { |
264 | int nonfatal = 0; | 288 | int recovered = 0; |
265 | 289 | ||
266 | if (err->disposition == RTAS_DISP_FULLY_RECOVERED) { | 290 | if (!(regs->msr & MSR_RI)) { |
291 | /* If MSR_RI isn't set, we cannot recover */ | ||
292 | recovered = 0; | ||
293 | |||
294 | } else if (err->disposition == RTAS_DISP_FULLY_RECOVERED) { | ||
267 | /* Platform corrected itself */ | 295 | /* Platform corrected itself */ |
268 | nonfatal = 1; | 296 | recovered = 1; |
269 | } else if ((regs->msr & MSR_RI) && | 297 | |
270 | user_mode(regs) && | 298 | } else if (err->disposition == RTAS_DISP_LIMITED_RECOVERY) { |
271 | err->severity == RTAS_SEVERITY_ERROR_SYNC && | 299 | /* Platform corrected itself but could be degraded */ |
272 | err->disposition == RTAS_DISP_NOT_RECOVERED && | 300 | printk(KERN_ERR "MCE: limited recovery, system may " |
273 | err->target == RTAS_TARGET_MEMORY && | 301 | "be degraded\n"); |
274 | err->type == RTAS_TYPE_ECC_UNCORR && | 302 | recovered = 1; |
275 | !(current->pid == 0 || is_global_init(current))) { | 303 | |
276 | /* Kill off a user process with an ECC error */ | 304 | } else if (user_mode(regs) && !is_global_init(current) && |
277 | printk(KERN_ERR "MCE: uncorrectable ecc error for pid %d\n", | 305 | err->severity == RTAS_SEVERITY_ERROR_SYNC) { |
278 | current->pid); | 306 | |
279 | /* XXX something better for ECC error? */ | 307 | /* |
280 | _exception(SIGBUS, regs, BUS_ADRERR, regs->nip); | 308 | * If we received a synchronous error when in userspace |
281 | nonfatal = 1; | 309 | * kill the task. Firmware may report details of the fail |
310 | * asynchronously, so we can't rely on the target and type | ||
311 | * fields being valid here. | ||
312 | */ | ||
313 | printk(KERN_ERR "MCE: uncorrectable error, killing task " | ||
314 | "%s:%d\n", current->comm, current->pid); | ||
315 | |||
316 | _exception(SIGBUS, regs, BUS_MCEERR_AR, regs->nip); | ||
317 | recovered = 1; | ||
282 | } | 318 | } |
283 | 319 | ||
284 | log_error((char *)err, ERR_TYPE_RTAS_LOG, !nonfatal); | 320 | log_error((char *)err, ERR_TYPE_RTAS_LOG, 0); |
285 | 321 | ||
286 | return nonfatal; | 322 | return recovered; |
287 | } | 323 | } |
288 | 324 | ||
289 | /* | 325 | /* |
diff --git a/arch/powerpc/platforms/pseries/reconfig.c b/arch/powerpc/platforms/pseries/reconfig.c index 57ddbb43b33a..1de2cbb92303 100644 --- a/arch/powerpc/platforms/pseries/reconfig.c +++ b/arch/powerpc/platforms/pseries/reconfig.c | |||
@@ -539,7 +539,8 @@ out: | |||
539 | } | 539 | } |
540 | 540 | ||
541 | static const struct file_operations ofdt_fops = { | 541 | static const struct file_operations ofdt_fops = { |
542 | .write = ofdt_write | 542 | .write = ofdt_write, |
543 | .llseek = noop_llseek, | ||
543 | }; | 544 | }; |
544 | 545 | ||
545 | /* create /proc/powerpc/ofdt write-only by root */ | 546 | /* create /proc/powerpc/ofdt write-only by root */ |
diff --git a/arch/powerpc/platforms/pseries/scanlog.c b/arch/powerpc/platforms/pseries/scanlog.c index 80e9e7652a4d..554457294a2b 100644 --- a/arch/powerpc/platforms/pseries/scanlog.c +++ b/arch/powerpc/platforms/pseries/scanlog.c | |||
@@ -170,6 +170,7 @@ const struct file_operations scanlog_fops = { | |||
170 | .write = scanlog_write, | 170 | .write = scanlog_write, |
171 | .open = scanlog_open, | 171 | .open = scanlog_open, |
172 | .release = scanlog_release, | 172 | .release = scanlog_release, |
173 | .llseek = noop_llseek, | ||
173 | }; | 174 | }; |
174 | 175 | ||
175 | static int __init scanlog_init(void) | 176 | static int __init scanlog_init(void) |
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c index a6d19e3a505e..593acceeff96 100644 --- a/arch/powerpc/platforms/pseries/setup.c +++ b/arch/powerpc/platforms/pseries/setup.c | |||
@@ -53,9 +53,9 @@ | |||
53 | #include <asm/irq.h> | 53 | #include <asm/irq.h> |
54 | #include <asm/time.h> | 54 | #include <asm/time.h> |
55 | #include <asm/nvram.h> | 55 | #include <asm/nvram.h> |
56 | #include "xics.h" | ||
57 | #include <asm/pmc.h> | 56 | #include <asm/pmc.h> |
58 | #include <asm/mpic.h> | 57 | #include <asm/mpic.h> |
58 | #include <asm/xics.h> | ||
59 | #include <asm/ppc-pci.h> | 59 | #include <asm/ppc-pci.h> |
60 | #include <asm/i8259.h> | 60 | #include <asm/i8259.h> |
61 | #include <asm/udbg.h> | 61 | #include <asm/udbg.h> |
@@ -114,10 +114,13 @@ static void __init fwnmi_init(void) | |||
114 | 114 | ||
115 | static void pseries_8259_cascade(unsigned int irq, struct irq_desc *desc) | 115 | static void pseries_8259_cascade(unsigned int irq, struct irq_desc *desc) |
116 | { | 116 | { |
117 | struct irq_chip *chip = irq_desc_get_chip(desc); | ||
117 | unsigned int cascade_irq = i8259_irq(); | 118 | unsigned int cascade_irq = i8259_irq(); |
119 | |||
118 | if (cascade_irq != NO_IRQ) | 120 | if (cascade_irq != NO_IRQ) |
119 | generic_handle_irq(cascade_irq); | 121 | generic_handle_irq(cascade_irq); |
120 | desc->chip->eoi(irq); | 122 | |
123 | chip->irq_eoi(&desc->irq_data); | ||
121 | } | 124 | } |
122 | 125 | ||
123 | static void __init pseries_setup_i8259_cascade(void) | 126 | static void __init pseries_setup_i8259_cascade(void) |
@@ -166,7 +169,7 @@ static void __init pseries_setup_i8259_cascade(void) | |||
166 | printk(KERN_DEBUG "pic: PCI 8259 intack at 0x%016lx\n", intack); | 169 | printk(KERN_DEBUG "pic: PCI 8259 intack at 0x%016lx\n", intack); |
167 | i8259_init(found, intack); | 170 | i8259_init(found, intack); |
168 | of_node_put(found); | 171 | of_node_put(found); |
169 | set_irq_chained_handler(cascade, pseries_8259_cascade); | 172 | irq_set_chained_handler(cascade, pseries_8259_cascade); |
170 | } | 173 | } |
171 | 174 | ||
172 | static void __init pseries_mpic_init_IRQ(void) | 175 | static void __init pseries_mpic_init_IRQ(void) |
@@ -202,6 +205,9 @@ static void __init pseries_mpic_init_IRQ(void) | |||
202 | mpic_assign_isu(mpic, n, isuaddr); | 205 | mpic_assign_isu(mpic, n, isuaddr); |
203 | } | 206 | } |
204 | 207 | ||
208 | /* Setup top-level get_irq */ | ||
209 | ppc_md.get_irq = mpic_get_irq; | ||
210 | |||
205 | /* All ISUs are setup, complete initialization */ | 211 | /* All ISUs are setup, complete initialization */ |
206 | mpic_init(mpic); | 212 | mpic_init(mpic); |
207 | 213 | ||
@@ -211,7 +217,7 @@ static void __init pseries_mpic_init_IRQ(void) | |||
211 | 217 | ||
212 | static void __init pseries_xics_init_IRQ(void) | 218 | static void __init pseries_xics_init_IRQ(void) |
213 | { | 219 | { |
214 | xics_init_IRQ(); | 220 | xics_init(); |
215 | pseries_setup_i8259_cascade(); | 221 | pseries_setup_i8259_cascade(); |
216 | } | 222 | } |
217 | 223 | ||
@@ -235,7 +241,6 @@ static void __init pseries_discover_pic(void) | |||
235 | if (strstr(typep, "open-pic")) { | 241 | if (strstr(typep, "open-pic")) { |
236 | pSeries_mpic_node = of_node_get(np); | 242 | pSeries_mpic_node = of_node_get(np); |
237 | ppc_md.init_IRQ = pseries_mpic_init_IRQ; | 243 | ppc_md.init_IRQ = pseries_mpic_init_IRQ; |
238 | ppc_md.get_irq = mpic_get_irq; | ||
239 | setup_kexec_cpu_down_mpic(); | 244 | setup_kexec_cpu_down_mpic(); |
240 | smp_init_pseries_mpic(); | 245 | smp_init_pseries_mpic(); |
241 | return; | 246 | return; |
@@ -273,6 +278,79 @@ static struct notifier_block pci_dn_reconfig_nb = { | |||
273 | .notifier_call = pci_dn_reconfig_notifier, | 278 | .notifier_call = pci_dn_reconfig_notifier, |
274 | }; | 279 | }; |
275 | 280 | ||
281 | struct kmem_cache *dtl_cache; | ||
282 | |||
283 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
284 | /* | ||
285 | * Allocate space for the dispatch trace log for all possible cpus | ||
286 | * and register the buffers with the hypervisor. This is used for | ||
287 | * computing time stolen by the hypervisor. | ||
288 | */ | ||
289 | static int alloc_dispatch_logs(void) | ||
290 | { | ||
291 | int cpu, ret; | ||
292 | struct paca_struct *pp; | ||
293 | struct dtl_entry *dtl; | ||
294 | |||
295 | if (!firmware_has_feature(FW_FEATURE_SPLPAR)) | ||
296 | return 0; | ||
297 | |||
298 | if (!dtl_cache) | ||
299 | return 0; | ||
300 | |||
301 | for_each_possible_cpu(cpu) { | ||
302 | pp = &paca[cpu]; | ||
303 | dtl = kmem_cache_alloc(dtl_cache, GFP_KERNEL); | ||
304 | if (!dtl) { | ||
305 | pr_warn("Failed to allocate dispatch trace log for cpu %d\n", | ||
306 | cpu); | ||
307 | pr_warn("Stolen time statistics will be unreliable\n"); | ||
308 | break; | ||
309 | } | ||
310 | |||
311 | pp->dtl_ridx = 0; | ||
312 | pp->dispatch_log = dtl; | ||
313 | pp->dispatch_log_end = dtl + N_DISPATCH_LOG; | ||
314 | pp->dtl_curr = dtl; | ||
315 | } | ||
316 | |||
317 | /* Register the DTL for the current (boot) cpu */ | ||
318 | dtl = get_paca()->dispatch_log; | ||
319 | get_paca()->dtl_ridx = 0; | ||
320 | get_paca()->dtl_curr = dtl; | ||
321 | get_paca()->lppaca_ptr->dtl_idx = 0; | ||
322 | |||
323 | /* hypervisor reads buffer length from this field */ | ||
324 | dtl->enqueue_to_dispatch_time = DISPATCH_LOG_BYTES; | ||
325 | ret = register_dtl(hard_smp_processor_id(), __pa(dtl)); | ||
326 | if (ret) | ||
327 | pr_warn("DTL registration failed for boot cpu %d (%d)\n", | ||
328 | smp_processor_id(), ret); | ||
329 | get_paca()->lppaca_ptr->dtl_enable_mask = 2; | ||
330 | |||
331 | return 0; | ||
332 | } | ||
333 | #else /* !CONFIG_VIRT_CPU_ACCOUNTING */ | ||
334 | static inline int alloc_dispatch_logs(void) | ||
335 | { | ||
336 | return 0; | ||
337 | } | ||
338 | #endif /* CONFIG_VIRT_CPU_ACCOUNTING */ | ||
339 | |||
340 | static int alloc_dispatch_log_kmem_cache(void) | ||
341 | { | ||
342 | dtl_cache = kmem_cache_create("dtl", DISPATCH_LOG_BYTES, | ||
343 | DISPATCH_LOG_BYTES, 0, NULL); | ||
344 | if (!dtl_cache) { | ||
345 | pr_warn("Failed to create dispatch trace log buffer cache\n"); | ||
346 | pr_warn("Stolen time statistics will be unreliable\n"); | ||
347 | return 0; | ||
348 | } | ||
349 | |||
350 | return alloc_dispatch_logs(); | ||
351 | } | ||
352 | early_initcall(alloc_dispatch_log_kmem_cache); | ||
353 | |||
276 | static void __init pSeries_setup_arch(void) | 354 | static void __init pSeries_setup_arch(void) |
277 | { | 355 | { |
278 | /* Discover PIC type and setup ppc_md accordingly */ | 356 | /* Discover PIC type and setup ppc_md accordingly */ |
@@ -323,7 +401,7 @@ static int __init pSeries_init_panel(void) | |||
323 | 401 | ||
324 | return 0; | 402 | return 0; |
325 | } | 403 | } |
326 | arch_initcall(pSeries_init_panel); | 404 | machine_arch_initcall(pseries, pSeries_init_panel); |
327 | 405 | ||
328 | static int pseries_set_dabr(unsigned long dabr) | 406 | static int pseries_set_dabr(unsigned long dabr) |
329 | { | 407 | { |
@@ -340,6 +418,16 @@ static int pseries_set_xdabr(unsigned long dabr) | |||
340 | #define CMO_CHARACTERISTICS_TOKEN 44 | 418 | #define CMO_CHARACTERISTICS_TOKEN 44 |
341 | #define CMO_MAXLENGTH 1026 | 419 | #define CMO_MAXLENGTH 1026 |
342 | 420 | ||
421 | void pSeries_coalesce_init(void) | ||
422 | { | ||
423 | struct hvcall_mpp_x_data mpp_x_data; | ||
424 | |||
425 | if (firmware_has_feature(FW_FEATURE_CMO) && !h_get_mpp_x(&mpp_x_data)) | ||
426 | powerpc_firmware_features |= FW_FEATURE_XCMO; | ||
427 | else | ||
428 | powerpc_firmware_features &= ~FW_FEATURE_XCMO; | ||
429 | } | ||
430 | |||
343 | /** | 431 | /** |
344 | * fw_cmo_feature_init - FW_FEATURE_CMO is not stored in ibm,hypertas-functions, | 432 | * fw_cmo_feature_init - FW_FEATURE_CMO is not stored in ibm,hypertas-functions, |
345 | * handle that here. (Stolen from parse_system_parameter_string) | 433 | * handle that here. (Stolen from parse_system_parameter_string) |
@@ -409,6 +497,7 @@ void pSeries_cmo_feature_init(void) | |||
409 | pr_debug("CMO enabled, PrPSP=%d, SecPSP=%d\n", CMO_PrPSP, | 497 | pr_debug("CMO enabled, PrPSP=%d, SecPSP=%d\n", CMO_PrPSP, |
410 | CMO_SecPSP); | 498 | CMO_SecPSP); |
411 | powerpc_firmware_features |= FW_FEATURE_CMO; | 499 | powerpc_firmware_features |= FW_FEATURE_CMO; |
500 | pSeries_coalesce_init(); | ||
412 | } else | 501 | } else |
413 | pr_debug("CMO not enabled, PrPSP=%d, SecPSP=%d\n", CMO_PrPSP, | 502 | pr_debug("CMO not enabled, PrPSP=%d, SecPSP=%d\n", CMO_PrPSP, |
414 | CMO_SecPSP); | 503 | CMO_SecPSP); |
diff --git a/arch/powerpc/platforms/pseries/smp.c b/arch/powerpc/platforms/pseries/smp.c index 0317cce877c6..fbffd7e47ab8 100644 --- a/arch/powerpc/platforms/pseries/smp.c +++ b/arch/powerpc/platforms/pseries/smp.c | |||
@@ -44,10 +44,11 @@ | |||
44 | #include <asm/mpic.h> | 44 | #include <asm/mpic.h> |
45 | #include <asm/vdso_datapage.h> | 45 | #include <asm/vdso_datapage.h> |
46 | #include <asm/cputhreads.h> | 46 | #include <asm/cputhreads.h> |
47 | #include <asm/mpic.h> | ||
48 | #include <asm/xics.h> | ||
47 | 49 | ||
48 | #include "plpar_wrappers.h" | 50 | #include "plpar_wrappers.h" |
49 | #include "pseries.h" | 51 | #include "pseries.h" |
50 | #include "xics.h" | ||
51 | #include "offline_states.h" | 52 | #include "offline_states.h" |
52 | 53 | ||
53 | 54 | ||
@@ -64,8 +65,8 @@ int smp_query_cpu_stopped(unsigned int pcpu) | |||
64 | int qcss_tok = rtas_token("query-cpu-stopped-state"); | 65 | int qcss_tok = rtas_token("query-cpu-stopped-state"); |
65 | 66 | ||
66 | if (qcss_tok == RTAS_UNKNOWN_SERVICE) { | 67 | if (qcss_tok == RTAS_UNKNOWN_SERVICE) { |
67 | printk(KERN_INFO "Firmware doesn't support " | 68 | printk_once(KERN_INFO |
68 | "query-cpu-stopped-state\n"); | 69 | "Firmware doesn't support query-cpu-stopped-state\n"); |
69 | return QCSS_HARDWARE_ERROR; | 70 | return QCSS_HARDWARE_ERROR; |
70 | } | 71 | } |
71 | 72 | ||
@@ -112,10 +113,10 @@ static inline int __devinit smp_startup_cpu(unsigned int lcpu) | |||
112 | 113 | ||
113 | /* Fixup atomic count: it exited inside IRQ handler. */ | 114 | /* Fixup atomic count: it exited inside IRQ handler. */ |
114 | task_thread_info(paca[lcpu].__current)->preempt_count = 0; | 115 | task_thread_info(paca[lcpu].__current)->preempt_count = 0; |
115 | 116 | #ifdef CONFIG_HOTPLUG_CPU | |
116 | if (get_cpu_current_state(lcpu) == CPU_STATE_INACTIVE) | 117 | if (get_cpu_current_state(lcpu) == CPU_STATE_INACTIVE) |
117 | goto out; | 118 | goto out; |
118 | 119 | #endif | |
119 | /* | 120 | /* |
120 | * If the RTAS start-cpu token does not exist then presume the | 121 | * If the RTAS start-cpu token does not exist then presume the |
121 | * cpu is already spinning. | 122 | * cpu is already spinning. |
@@ -130,11 +131,12 @@ static inline int __devinit smp_startup_cpu(unsigned int lcpu) | |||
130 | return 0; | 131 | return 0; |
131 | } | 132 | } |
132 | 133 | ||
134 | #ifdef CONFIG_HOTPLUG_CPU | ||
133 | out: | 135 | out: |
136 | #endif | ||
134 | return 1; | 137 | return 1; |
135 | } | 138 | } |
136 | 139 | ||
137 | #ifdef CONFIG_XICS | ||
138 | static void __devinit smp_xics_setup_cpu(int cpu) | 140 | static void __devinit smp_xics_setup_cpu(int cpu) |
139 | { | 141 | { |
140 | if (cpu != boot_cpuid) | 142 | if (cpu != boot_cpuid) |
@@ -144,20 +146,18 @@ static void __devinit smp_xics_setup_cpu(int cpu) | |||
144 | vpa_init(cpu); | 146 | vpa_init(cpu); |
145 | 147 | ||
146 | cpumask_clear_cpu(cpu, of_spin_mask); | 148 | cpumask_clear_cpu(cpu, of_spin_mask); |
149 | #ifdef CONFIG_HOTPLUG_CPU | ||
147 | set_cpu_current_state(cpu, CPU_STATE_ONLINE); | 150 | set_cpu_current_state(cpu, CPU_STATE_ONLINE); |
148 | set_default_offline_state(cpu); | 151 | set_default_offline_state(cpu); |
149 | 152 | #endif | |
150 | } | 153 | } |
151 | #endif /* CONFIG_XICS */ | ||
152 | 154 | ||
153 | static void __devinit smp_pSeries_kick_cpu(int nr) | 155 | static int __devinit smp_pSeries_kick_cpu(int nr) |
154 | { | 156 | { |
155 | long rc; | ||
156 | unsigned long hcpuid; | ||
157 | BUG_ON(nr < 0 || nr >= NR_CPUS); | 157 | BUG_ON(nr < 0 || nr >= NR_CPUS); |
158 | 158 | ||
159 | if (!smp_startup_cpu(nr)) | 159 | if (!smp_startup_cpu(nr)) |
160 | return; | 160 | return -ENOENT; |
161 | 161 | ||
162 | /* | 162 | /* |
163 | * The processor is currently spinning, waiting for the | 163 | * The processor is currently spinning, waiting for the |
@@ -165,16 +165,22 @@ static void __devinit smp_pSeries_kick_cpu(int nr) | |||
165 | * the processor will continue on to secondary_start | 165 | * the processor will continue on to secondary_start |
166 | */ | 166 | */ |
167 | paca[nr].cpu_start = 1; | 167 | paca[nr].cpu_start = 1; |
168 | 168 | #ifdef CONFIG_HOTPLUG_CPU | |
169 | set_preferred_offline_state(nr, CPU_STATE_ONLINE); | 169 | set_preferred_offline_state(nr, CPU_STATE_ONLINE); |
170 | 170 | ||
171 | if (get_cpu_current_state(nr) == CPU_STATE_INACTIVE) { | 171 | if (get_cpu_current_state(nr) == CPU_STATE_INACTIVE) { |
172 | long rc; | ||
173 | unsigned long hcpuid; | ||
174 | |||
172 | hcpuid = get_hard_smp_processor_id(nr); | 175 | hcpuid = get_hard_smp_processor_id(nr); |
173 | rc = plpar_hcall_norets(H_PROD, hcpuid); | 176 | rc = plpar_hcall_norets(H_PROD, hcpuid); |
174 | if (rc != H_SUCCESS) | 177 | if (rc != H_SUCCESS) |
175 | printk(KERN_ERR "Error: Prod to wake up processor %d " | 178 | printk(KERN_ERR "Error: Prod to wake up processor %d " |
176 | "Ret= %ld\n", nr, rc); | 179 | "Ret= %ld\n", nr, rc); |
177 | } | 180 | } |
181 | #endif | ||
182 | |||
183 | return 0; | ||
178 | } | 184 | } |
179 | 185 | ||
180 | static int smp_pSeries_cpu_bootable(unsigned int nr) | 186 | static int smp_pSeries_cpu_bootable(unsigned int nr) |
@@ -192,23 +198,22 @@ static int smp_pSeries_cpu_bootable(unsigned int nr) | |||
192 | 198 | ||
193 | return 1; | 199 | return 1; |
194 | } | 200 | } |
195 | #ifdef CONFIG_MPIC | 201 | |
196 | static struct smp_ops_t pSeries_mpic_smp_ops = { | 202 | static struct smp_ops_t pSeries_mpic_smp_ops = { |
197 | .message_pass = smp_mpic_message_pass, | 203 | .message_pass = smp_mpic_message_pass, |
198 | .probe = smp_mpic_probe, | 204 | .probe = smp_mpic_probe, |
199 | .kick_cpu = smp_pSeries_kick_cpu, | 205 | .kick_cpu = smp_pSeries_kick_cpu, |
200 | .setup_cpu = smp_mpic_setup_cpu, | 206 | .setup_cpu = smp_mpic_setup_cpu, |
201 | }; | 207 | }; |
202 | #endif | 208 | |
203 | #ifdef CONFIG_XICS | ||
204 | static struct smp_ops_t pSeries_xics_smp_ops = { | 209 | static struct smp_ops_t pSeries_xics_smp_ops = { |
205 | .message_pass = smp_xics_message_pass, | 210 | .message_pass = smp_muxed_ipi_message_pass, |
206 | .probe = smp_xics_probe, | 211 | .cause_ipi = NULL, /* Filled at runtime by xics_smp_probe() */ |
212 | .probe = xics_smp_probe, | ||
207 | .kick_cpu = smp_pSeries_kick_cpu, | 213 | .kick_cpu = smp_pSeries_kick_cpu, |
208 | .setup_cpu = smp_xics_setup_cpu, | 214 | .setup_cpu = smp_xics_setup_cpu, |
209 | .cpu_bootable = smp_pSeries_cpu_bootable, | 215 | .cpu_bootable = smp_pSeries_cpu_bootable, |
210 | }; | 216 | }; |
211 | #endif | ||
212 | 217 | ||
213 | /* This is called very early */ | 218 | /* This is called very early */ |
214 | static void __init smp_init_pseries(void) | 219 | static void __init smp_init_pseries(void) |
@@ -240,14 +245,12 @@ static void __init smp_init_pseries(void) | |||
240 | pr_debug(" <- smp_init_pSeries()\n"); | 245 | pr_debug(" <- smp_init_pSeries()\n"); |
241 | } | 246 | } |
242 | 247 | ||
243 | #ifdef CONFIG_MPIC | ||
244 | void __init smp_init_pseries_mpic(void) | 248 | void __init smp_init_pseries_mpic(void) |
245 | { | 249 | { |
246 | smp_ops = &pSeries_mpic_smp_ops; | 250 | smp_ops = &pSeries_mpic_smp_ops; |
247 | 251 | ||
248 | smp_init_pseries(); | 252 | smp_init_pseries(); |
249 | } | 253 | } |
250 | #endif | ||
251 | 254 | ||
252 | void __init smp_init_pseries_xics(void) | 255 | void __init smp_init_pseries_xics(void) |
253 | { | 256 | { |
diff --git a/arch/powerpc/platforms/pseries/suspend.c b/arch/powerpc/platforms/pseries/suspend.c index ed72098bb4e3..a8ca289ff267 100644 --- a/arch/powerpc/platforms/pseries/suspend.c +++ b/arch/powerpc/platforms/pseries/suspend.c | |||
@@ -153,7 +153,7 @@ static struct sysdev_class suspend_sysdev_class = { | |||
153 | .name = "power", | 153 | .name = "power", |
154 | }; | 154 | }; |
155 | 155 | ||
156 | static struct platform_suspend_ops pseries_suspend_ops = { | 156 | static const struct platform_suspend_ops pseries_suspend_ops = { |
157 | .valid = suspend_valid_only_mem, | 157 | .valid = suspend_valid_only_mem, |
158 | .begin = pseries_suspend_begin, | 158 | .begin = pseries_suspend_begin, |
159 | .prepare_late = pseries_prepare_late, | 159 | .prepare_late = pseries_prepare_late, |
diff --git a/arch/powerpc/platforms/pseries/xics.c b/arch/powerpc/platforms/pseries/xics.c deleted file mode 100644 index 93834b0d8272..000000000000 --- a/arch/powerpc/platforms/pseries/xics.c +++ /dev/null | |||
@@ -1,943 +0,0 @@ | |||
1 | /* | ||
2 | * arch/powerpc/platforms/pseries/xics.c | ||
3 | * | ||
4 | * Copyright 2000 IBM Corporation. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #include <linux/types.h> | ||
13 | #include <linux/threads.h> | ||
14 | #include <linux/kernel.h> | ||
15 | #include <linux/irq.h> | ||
16 | #include <linux/smp.h> | ||
17 | #include <linux/interrupt.h> | ||
18 | #include <linux/init.h> | ||
19 | #include <linux/radix-tree.h> | ||
20 | #include <linux/cpu.h> | ||
21 | #include <linux/msi.h> | ||
22 | #include <linux/of.h> | ||
23 | #include <linux/percpu.h> | ||
24 | |||
25 | #include <asm/firmware.h> | ||
26 | #include <asm/io.h> | ||
27 | #include <asm/pgtable.h> | ||
28 | #include <asm/smp.h> | ||
29 | #include <asm/rtas.h> | ||
30 | #include <asm/hvcall.h> | ||
31 | #include <asm/machdep.h> | ||
32 | |||
33 | #include "xics.h" | ||
34 | #include "plpar_wrappers.h" | ||
35 | |||
36 | static struct irq_host *xics_host; | ||
37 | |||
38 | #define XICS_IPI 2 | ||
39 | #define XICS_IRQ_SPURIOUS 0 | ||
40 | |||
41 | /* Want a priority other than 0. Various HW issues require this. */ | ||
42 | #define DEFAULT_PRIORITY 5 | ||
43 | |||
44 | /* | ||
45 | * Mark IPIs as higher priority so we can take them inside interrupts that | ||
46 | * arent marked IRQF_DISABLED | ||
47 | */ | ||
48 | #define IPI_PRIORITY 4 | ||
49 | |||
50 | /* The least favored priority */ | ||
51 | #define LOWEST_PRIORITY 0xFF | ||
52 | |||
53 | /* The number of priorities defined above */ | ||
54 | #define MAX_NUM_PRIORITIES 3 | ||
55 | |||
56 | static unsigned int default_server = 0xFF; | ||
57 | static unsigned int default_distrib_server = 0; | ||
58 | static unsigned int interrupt_server_size = 8; | ||
59 | |||
60 | /* RTAS service tokens */ | ||
61 | static int ibm_get_xive; | ||
62 | static int ibm_set_xive; | ||
63 | static int ibm_int_on; | ||
64 | static int ibm_int_off; | ||
65 | |||
66 | struct xics_cppr { | ||
67 | unsigned char stack[MAX_NUM_PRIORITIES]; | ||
68 | int index; | ||
69 | }; | ||
70 | |||
71 | static DEFINE_PER_CPU(struct xics_cppr, xics_cppr); | ||
72 | |||
73 | /* Direct hardware low level accessors */ | ||
74 | |||
75 | /* The part of the interrupt presentation layer that we care about */ | ||
76 | struct xics_ipl { | ||
77 | union { | ||
78 | u32 word; | ||
79 | u8 bytes[4]; | ||
80 | } xirr_poll; | ||
81 | union { | ||
82 | u32 word; | ||
83 | u8 bytes[4]; | ||
84 | } xirr; | ||
85 | u32 dummy; | ||
86 | union { | ||
87 | u32 word; | ||
88 | u8 bytes[4]; | ||
89 | } qirr; | ||
90 | }; | ||
91 | |||
92 | static struct xics_ipl __iomem *xics_per_cpu[NR_CPUS]; | ||
93 | |||
94 | static inline unsigned int direct_xirr_info_get(void) | ||
95 | { | ||
96 | int cpu = smp_processor_id(); | ||
97 | |||
98 | return in_be32(&xics_per_cpu[cpu]->xirr.word); | ||
99 | } | ||
100 | |||
101 | static inline void direct_xirr_info_set(unsigned int value) | ||
102 | { | ||
103 | int cpu = smp_processor_id(); | ||
104 | |||
105 | out_be32(&xics_per_cpu[cpu]->xirr.word, value); | ||
106 | } | ||
107 | |||
108 | static inline void direct_cppr_info(u8 value) | ||
109 | { | ||
110 | int cpu = smp_processor_id(); | ||
111 | |||
112 | out_8(&xics_per_cpu[cpu]->xirr.bytes[0], value); | ||
113 | } | ||
114 | |||
115 | static inline void direct_qirr_info(int n_cpu, u8 value) | ||
116 | { | ||
117 | out_8(&xics_per_cpu[n_cpu]->qirr.bytes[0], value); | ||
118 | } | ||
119 | |||
120 | |||
121 | /* LPAR low level accessors */ | ||
122 | |||
123 | static inline unsigned int lpar_xirr_info_get(unsigned char cppr) | ||
124 | { | ||
125 | unsigned long lpar_rc; | ||
126 | unsigned long return_value; | ||
127 | |||
128 | lpar_rc = plpar_xirr(&return_value, cppr); | ||
129 | if (lpar_rc != H_SUCCESS) | ||
130 | panic(" bad return code xirr - rc = %lx\n", lpar_rc); | ||
131 | return (unsigned int)return_value; | ||
132 | } | ||
133 | |||
134 | static inline void lpar_xirr_info_set(unsigned int value) | ||
135 | { | ||
136 | unsigned long lpar_rc; | ||
137 | |||
138 | lpar_rc = plpar_eoi(value); | ||
139 | if (lpar_rc != H_SUCCESS) | ||
140 | panic("bad return code EOI - rc = %ld, value=%x\n", lpar_rc, | ||
141 | value); | ||
142 | } | ||
143 | |||
144 | static inline void lpar_cppr_info(u8 value) | ||
145 | { | ||
146 | unsigned long lpar_rc; | ||
147 | |||
148 | lpar_rc = plpar_cppr(value); | ||
149 | if (lpar_rc != H_SUCCESS) | ||
150 | panic("bad return code cppr - rc = %lx\n", lpar_rc); | ||
151 | } | ||
152 | |||
153 | static inline void lpar_qirr_info(int n_cpu , u8 value) | ||
154 | { | ||
155 | unsigned long lpar_rc; | ||
156 | |||
157 | lpar_rc = plpar_ipi(get_hard_smp_processor_id(n_cpu), value); | ||
158 | if (lpar_rc != H_SUCCESS) | ||
159 | panic("bad return code qirr - rc = %lx\n", lpar_rc); | ||
160 | } | ||
161 | |||
162 | |||
163 | /* Interface to generic irq subsystem */ | ||
164 | |||
165 | #ifdef CONFIG_SMP | ||
166 | /* | ||
167 | * For the moment we only implement delivery to all cpus or one cpu. | ||
168 | * | ||
169 | * If the requested affinity is cpu_all_mask, we set global affinity. | ||
170 | * If not we set it to the first cpu in the mask, even if multiple cpus | ||
171 | * are set. This is so things like irqbalance (which set core and package | ||
172 | * wide affinities) do the right thing. | ||
173 | */ | ||
174 | static int get_irq_server(unsigned int virq, const struct cpumask *cpumask, | ||
175 | unsigned int strict_check) | ||
176 | { | ||
177 | |||
178 | if (!distribute_irqs) | ||
179 | return default_server; | ||
180 | |||
181 | if (!cpumask_equal(cpumask, cpu_all_mask)) { | ||
182 | int server = cpumask_first_and(cpu_online_mask, cpumask); | ||
183 | |||
184 | if (server < nr_cpu_ids) | ||
185 | return get_hard_smp_processor_id(server); | ||
186 | |||
187 | if (strict_check) | ||
188 | return -1; | ||
189 | } | ||
190 | |||
191 | /* | ||
192 | * Workaround issue with some versions of JS20 firmware that | ||
193 | * deliver interrupts to cpus which haven't been started. This | ||
194 | * happens when using the maxcpus= boot option. | ||
195 | */ | ||
196 | if (cpumask_equal(cpu_online_mask, cpu_present_mask)) | ||
197 | return default_distrib_server; | ||
198 | |||
199 | return default_server; | ||
200 | } | ||
201 | #else | ||
202 | #define get_irq_server(virq, cpumask, strict_check) (default_server) | ||
203 | #endif | ||
204 | |||
205 | static void xics_unmask_irq(unsigned int virq) | ||
206 | { | ||
207 | unsigned int irq; | ||
208 | int call_status; | ||
209 | int server; | ||
210 | |||
211 | pr_devel("xics: unmask virq %d\n", virq); | ||
212 | |||
213 | irq = (unsigned int)irq_map[virq].hwirq; | ||
214 | pr_devel(" -> map to hwirq 0x%x\n", irq); | ||
215 | if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS) | ||
216 | return; | ||
217 | |||
218 | server = get_irq_server(virq, irq_to_desc(virq)->affinity, 0); | ||
219 | |||
220 | call_status = rtas_call(ibm_set_xive, 3, 1, NULL, irq, server, | ||
221 | DEFAULT_PRIORITY); | ||
222 | if (call_status != 0) { | ||
223 | printk(KERN_ERR | ||
224 | "%s: ibm_set_xive irq %u server %x returned %d\n", | ||
225 | __func__, irq, server, call_status); | ||
226 | return; | ||
227 | } | ||
228 | |||
229 | /* Now unmask the interrupt (often a no-op) */ | ||
230 | call_status = rtas_call(ibm_int_on, 1, 1, NULL, irq); | ||
231 | if (call_status != 0) { | ||
232 | printk(KERN_ERR "%s: ibm_int_on irq=%u returned %d\n", | ||
233 | __func__, irq, call_status); | ||
234 | return; | ||
235 | } | ||
236 | } | ||
237 | |||
238 | static unsigned int xics_startup(unsigned int virq) | ||
239 | { | ||
240 | /* | ||
241 | * The generic MSI code returns with the interrupt disabled on the | ||
242 | * card, using the MSI mask bits. Firmware doesn't appear to unmask | ||
243 | * at that level, so we do it here by hand. | ||
244 | */ | ||
245 | if (irq_to_desc(virq)->msi_desc) | ||
246 | unmask_msi_irq(virq); | ||
247 | |||
248 | /* unmask it */ | ||
249 | xics_unmask_irq(virq); | ||
250 | return 0; | ||
251 | } | ||
252 | |||
253 | static void xics_mask_real_irq(unsigned int irq) | ||
254 | { | ||
255 | int call_status; | ||
256 | |||
257 | if (irq == XICS_IPI) | ||
258 | return; | ||
259 | |||
260 | call_status = rtas_call(ibm_int_off, 1, 1, NULL, irq); | ||
261 | if (call_status != 0) { | ||
262 | printk(KERN_ERR "%s: ibm_int_off irq=%u returned %d\n", | ||
263 | __func__, irq, call_status); | ||
264 | return; | ||
265 | } | ||
266 | |||
267 | /* Have to set XIVE to 0xff to be able to remove a slot */ | ||
268 | call_status = rtas_call(ibm_set_xive, 3, 1, NULL, irq, | ||
269 | default_server, 0xff); | ||
270 | if (call_status != 0) { | ||
271 | printk(KERN_ERR "%s: ibm_set_xive(0xff) irq=%u returned %d\n", | ||
272 | __func__, irq, call_status); | ||
273 | return; | ||
274 | } | ||
275 | } | ||
276 | |||
277 | static void xics_mask_irq(unsigned int virq) | ||
278 | { | ||
279 | unsigned int irq; | ||
280 | |||
281 | pr_devel("xics: mask virq %d\n", virq); | ||
282 | |||
283 | irq = (unsigned int)irq_map[virq].hwirq; | ||
284 | if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS) | ||
285 | return; | ||
286 | xics_mask_real_irq(irq); | ||
287 | } | ||
288 | |||
289 | static void xics_mask_unknown_vec(unsigned int vec) | ||
290 | { | ||
291 | printk(KERN_ERR "Interrupt %u (real) is invalid, disabling it.\n", vec); | ||
292 | xics_mask_real_irq(vec); | ||
293 | } | ||
294 | |||
295 | static inline unsigned int xics_xirr_vector(unsigned int xirr) | ||
296 | { | ||
297 | /* | ||
298 | * The top byte is the old cppr, to be restored on EOI. | ||
299 | * The remaining 24 bits are the vector. | ||
300 | */ | ||
301 | return xirr & 0x00ffffff; | ||
302 | } | ||
303 | |||
304 | static void push_cppr(unsigned int vec) | ||
305 | { | ||
306 | struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr); | ||
307 | |||
308 | if (WARN_ON(os_cppr->index >= MAX_NUM_PRIORITIES - 1)) | ||
309 | return; | ||
310 | |||
311 | if (vec == XICS_IPI) | ||
312 | os_cppr->stack[++os_cppr->index] = IPI_PRIORITY; | ||
313 | else | ||
314 | os_cppr->stack[++os_cppr->index] = DEFAULT_PRIORITY; | ||
315 | } | ||
316 | |||
317 | static unsigned int xics_get_irq_direct(void) | ||
318 | { | ||
319 | unsigned int xirr = direct_xirr_info_get(); | ||
320 | unsigned int vec = xics_xirr_vector(xirr); | ||
321 | unsigned int irq; | ||
322 | |||
323 | if (vec == XICS_IRQ_SPURIOUS) | ||
324 | return NO_IRQ; | ||
325 | |||
326 | irq = irq_radix_revmap_lookup(xics_host, vec); | ||
327 | if (likely(irq != NO_IRQ)) { | ||
328 | push_cppr(vec); | ||
329 | return irq; | ||
330 | } | ||
331 | |||
332 | /* We don't have a linux mapping, so have rtas mask it. */ | ||
333 | xics_mask_unknown_vec(vec); | ||
334 | |||
335 | /* We might learn about it later, so EOI it */ | ||
336 | direct_xirr_info_set(xirr); | ||
337 | return NO_IRQ; | ||
338 | } | ||
339 | |||
340 | static unsigned int xics_get_irq_lpar(void) | ||
341 | { | ||
342 | struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr); | ||
343 | unsigned int xirr = lpar_xirr_info_get(os_cppr->stack[os_cppr->index]); | ||
344 | unsigned int vec = xics_xirr_vector(xirr); | ||
345 | unsigned int irq; | ||
346 | |||
347 | if (vec == XICS_IRQ_SPURIOUS) | ||
348 | return NO_IRQ; | ||
349 | |||
350 | irq = irq_radix_revmap_lookup(xics_host, vec); | ||
351 | if (likely(irq != NO_IRQ)) { | ||
352 | push_cppr(vec); | ||
353 | return irq; | ||
354 | } | ||
355 | |||
356 | /* We don't have a linux mapping, so have RTAS mask it. */ | ||
357 | xics_mask_unknown_vec(vec); | ||
358 | |||
359 | /* We might learn about it later, so EOI it */ | ||
360 | lpar_xirr_info_set(xirr); | ||
361 | return NO_IRQ; | ||
362 | } | ||
363 | |||
364 | static unsigned char pop_cppr(void) | ||
365 | { | ||
366 | struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr); | ||
367 | |||
368 | if (WARN_ON(os_cppr->index < 1)) | ||
369 | return LOWEST_PRIORITY; | ||
370 | |||
371 | return os_cppr->stack[--os_cppr->index]; | ||
372 | } | ||
373 | |||
374 | static void xics_eoi_direct(unsigned int virq) | ||
375 | { | ||
376 | unsigned int irq = (unsigned int)irq_map[virq].hwirq; | ||
377 | |||
378 | iosync(); | ||
379 | direct_xirr_info_set((pop_cppr() << 24) | irq); | ||
380 | } | ||
381 | |||
382 | static void xics_eoi_lpar(unsigned int virq) | ||
383 | { | ||
384 | unsigned int irq = (unsigned int)irq_map[virq].hwirq; | ||
385 | |||
386 | iosync(); | ||
387 | lpar_xirr_info_set((pop_cppr() << 24) | irq); | ||
388 | } | ||
389 | |||
390 | static int xics_set_affinity(unsigned int virq, const struct cpumask *cpumask) | ||
391 | { | ||
392 | unsigned int irq; | ||
393 | int status; | ||
394 | int xics_status[2]; | ||
395 | int irq_server; | ||
396 | |||
397 | irq = (unsigned int)irq_map[virq].hwirq; | ||
398 | if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS) | ||
399 | return -1; | ||
400 | |||
401 | status = rtas_call(ibm_get_xive, 1, 3, xics_status, irq); | ||
402 | |||
403 | if (status) { | ||
404 | printk(KERN_ERR "%s: ibm,get-xive irq=%u returns %d\n", | ||
405 | __func__, irq, status); | ||
406 | return -1; | ||
407 | } | ||
408 | |||
409 | irq_server = get_irq_server(virq, cpumask, 1); | ||
410 | if (irq_server == -1) { | ||
411 | char cpulist[128]; | ||
412 | cpumask_scnprintf(cpulist, sizeof(cpulist), cpumask); | ||
413 | printk(KERN_WARNING | ||
414 | "%s: No online cpus in the mask %s for irq %d\n", | ||
415 | __func__, cpulist, virq); | ||
416 | return -1; | ||
417 | } | ||
418 | |||
419 | status = rtas_call(ibm_set_xive, 3, 1, NULL, | ||
420 | irq, irq_server, xics_status[1]); | ||
421 | |||
422 | if (status) { | ||
423 | printk(KERN_ERR "%s: ibm,set-xive irq=%u returns %d\n", | ||
424 | __func__, irq, status); | ||
425 | return -1; | ||
426 | } | ||
427 | |||
428 | return 0; | ||
429 | } | ||
430 | |||
431 | static struct irq_chip xics_pic_direct = { | ||
432 | .name = "XICS", | ||
433 | .startup = xics_startup, | ||
434 | .mask = xics_mask_irq, | ||
435 | .unmask = xics_unmask_irq, | ||
436 | .eoi = xics_eoi_direct, | ||
437 | .set_affinity = xics_set_affinity | ||
438 | }; | ||
439 | |||
440 | static struct irq_chip xics_pic_lpar = { | ||
441 | .name = "XICS", | ||
442 | .startup = xics_startup, | ||
443 | .mask = xics_mask_irq, | ||
444 | .unmask = xics_unmask_irq, | ||
445 | .eoi = xics_eoi_lpar, | ||
446 | .set_affinity = xics_set_affinity | ||
447 | }; | ||
448 | |||
449 | |||
450 | /* Interface to arch irq controller subsystem layer */ | ||
451 | |||
452 | /* Points to the irq_chip we're actually using */ | ||
453 | static struct irq_chip *xics_irq_chip; | ||
454 | |||
455 | static int xics_host_match(struct irq_host *h, struct device_node *node) | ||
456 | { | ||
457 | /* IBM machines have interrupt parents of various funky types for things | ||
458 | * like vdevices, events, etc... The trick we use here is to match | ||
459 | * everything here except the legacy 8259 which is compatible "chrp,iic" | ||
460 | */ | ||
461 | return !of_device_is_compatible(node, "chrp,iic"); | ||
462 | } | ||
463 | |||
464 | static int xics_host_map(struct irq_host *h, unsigned int virq, | ||
465 | irq_hw_number_t hw) | ||
466 | { | ||
467 | pr_devel("xics: map virq %d, hwirq 0x%lx\n", virq, hw); | ||
468 | |||
469 | /* Insert the interrupt mapping into the radix tree for fast lookup */ | ||
470 | irq_radix_revmap_insert(xics_host, virq, hw); | ||
471 | |||
472 | irq_to_desc(virq)->status |= IRQ_LEVEL; | ||
473 | set_irq_chip_and_handler(virq, xics_irq_chip, handle_fasteoi_irq); | ||
474 | return 0; | ||
475 | } | ||
476 | |||
477 | static int xics_host_xlate(struct irq_host *h, struct device_node *ct, | ||
478 | const u32 *intspec, unsigned int intsize, | ||
479 | irq_hw_number_t *out_hwirq, unsigned int *out_flags) | ||
480 | |||
481 | { | ||
482 | /* Current xics implementation translates everything | ||
483 | * to level. It is not technically right for MSIs but this | ||
484 | * is irrelevant at this point. We might get smarter in the future | ||
485 | */ | ||
486 | *out_hwirq = intspec[0]; | ||
487 | *out_flags = IRQ_TYPE_LEVEL_LOW; | ||
488 | |||
489 | return 0; | ||
490 | } | ||
491 | |||
492 | static struct irq_host_ops xics_host_ops = { | ||
493 | .match = xics_host_match, | ||
494 | .map = xics_host_map, | ||
495 | .xlate = xics_host_xlate, | ||
496 | }; | ||
497 | |||
498 | static void __init xics_init_host(void) | ||
499 | { | ||
500 | if (firmware_has_feature(FW_FEATURE_LPAR)) | ||
501 | xics_irq_chip = &xics_pic_lpar; | ||
502 | else | ||
503 | xics_irq_chip = &xics_pic_direct; | ||
504 | |||
505 | xics_host = irq_alloc_host(NULL, IRQ_HOST_MAP_TREE, 0, &xics_host_ops, | ||
506 | XICS_IRQ_SPURIOUS); | ||
507 | BUG_ON(xics_host == NULL); | ||
508 | irq_set_default_host(xics_host); | ||
509 | } | ||
510 | |||
511 | |||
512 | /* Inter-processor interrupt support */ | ||
513 | |||
514 | #ifdef CONFIG_SMP | ||
515 | /* | ||
516 | * XICS only has a single IPI, so encode the messages per CPU | ||
517 | */ | ||
518 | static DEFINE_PER_CPU_SHARED_ALIGNED(unsigned long, xics_ipi_message); | ||
519 | |||
520 | static inline void smp_xics_do_message(int cpu, int msg) | ||
521 | { | ||
522 | unsigned long *tgt = &per_cpu(xics_ipi_message, cpu); | ||
523 | |||
524 | set_bit(msg, tgt); | ||
525 | mb(); | ||
526 | if (firmware_has_feature(FW_FEATURE_LPAR)) | ||
527 | lpar_qirr_info(cpu, IPI_PRIORITY); | ||
528 | else | ||
529 | direct_qirr_info(cpu, IPI_PRIORITY); | ||
530 | } | ||
531 | |||
532 | void smp_xics_message_pass(int target, int msg) | ||
533 | { | ||
534 | unsigned int i; | ||
535 | |||
536 | if (target < NR_CPUS) { | ||
537 | smp_xics_do_message(target, msg); | ||
538 | } else { | ||
539 | for_each_online_cpu(i) { | ||
540 | if (target == MSG_ALL_BUT_SELF | ||
541 | && i == smp_processor_id()) | ||
542 | continue; | ||
543 | smp_xics_do_message(i, msg); | ||
544 | } | ||
545 | } | ||
546 | } | ||
547 | |||
548 | static irqreturn_t xics_ipi_dispatch(int cpu) | ||
549 | { | ||
550 | unsigned long *tgt = &per_cpu(xics_ipi_message, cpu); | ||
551 | |||
552 | mb(); /* order mmio clearing qirr */ | ||
553 | while (*tgt) { | ||
554 | if (test_and_clear_bit(PPC_MSG_CALL_FUNCTION, tgt)) { | ||
555 | smp_message_recv(PPC_MSG_CALL_FUNCTION); | ||
556 | } | ||
557 | if (test_and_clear_bit(PPC_MSG_RESCHEDULE, tgt)) { | ||
558 | smp_message_recv(PPC_MSG_RESCHEDULE); | ||
559 | } | ||
560 | if (test_and_clear_bit(PPC_MSG_CALL_FUNC_SINGLE, tgt)) { | ||
561 | smp_message_recv(PPC_MSG_CALL_FUNC_SINGLE); | ||
562 | } | ||
563 | #if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC) | ||
564 | if (test_and_clear_bit(PPC_MSG_DEBUGGER_BREAK, tgt)) { | ||
565 | smp_message_recv(PPC_MSG_DEBUGGER_BREAK); | ||
566 | } | ||
567 | #endif | ||
568 | } | ||
569 | return IRQ_HANDLED; | ||
570 | } | ||
571 | |||
572 | static irqreturn_t xics_ipi_action_direct(int irq, void *dev_id) | ||
573 | { | ||
574 | int cpu = smp_processor_id(); | ||
575 | |||
576 | direct_qirr_info(cpu, 0xff); | ||
577 | |||
578 | return xics_ipi_dispatch(cpu); | ||
579 | } | ||
580 | |||
581 | static irqreturn_t xics_ipi_action_lpar(int irq, void *dev_id) | ||
582 | { | ||
583 | int cpu = smp_processor_id(); | ||
584 | |||
585 | lpar_qirr_info(cpu, 0xff); | ||
586 | |||
587 | return xics_ipi_dispatch(cpu); | ||
588 | } | ||
589 | |||
590 | static void xics_request_ipi(void) | ||
591 | { | ||
592 | unsigned int ipi; | ||
593 | int rc; | ||
594 | |||
595 | ipi = irq_create_mapping(xics_host, XICS_IPI); | ||
596 | BUG_ON(ipi == NO_IRQ); | ||
597 | |||
598 | /* | ||
599 | * IPIs are marked IRQF_DISABLED as they must run with irqs | ||
600 | * disabled | ||
601 | */ | ||
602 | set_irq_handler(ipi, handle_percpu_irq); | ||
603 | if (firmware_has_feature(FW_FEATURE_LPAR)) | ||
604 | rc = request_irq(ipi, xics_ipi_action_lpar, | ||
605 | IRQF_DISABLED|IRQF_PERCPU, "IPI", NULL); | ||
606 | else | ||
607 | rc = request_irq(ipi, xics_ipi_action_direct, | ||
608 | IRQF_DISABLED|IRQF_PERCPU, "IPI", NULL); | ||
609 | BUG_ON(rc); | ||
610 | } | ||
611 | |||
612 | int __init smp_xics_probe(void) | ||
613 | { | ||
614 | xics_request_ipi(); | ||
615 | |||
616 | return cpumask_weight(cpu_possible_mask); | ||
617 | } | ||
618 | |||
619 | #endif /* CONFIG_SMP */ | ||
620 | |||
621 | |||
622 | /* Initialization */ | ||
623 | |||
624 | static void xics_update_irq_servers(void) | ||
625 | { | ||
626 | int i, j; | ||
627 | struct device_node *np; | ||
628 | u32 ilen; | ||
629 | const u32 *ireg; | ||
630 | u32 hcpuid; | ||
631 | |||
632 | /* Find the server numbers for the boot cpu. */ | ||
633 | np = of_get_cpu_node(boot_cpuid, NULL); | ||
634 | BUG_ON(!np); | ||
635 | |||
636 | ireg = of_get_property(np, "ibm,ppc-interrupt-gserver#s", &ilen); | ||
637 | if (!ireg) { | ||
638 | of_node_put(np); | ||
639 | return; | ||
640 | } | ||
641 | |||
642 | i = ilen / sizeof(int); | ||
643 | hcpuid = get_hard_smp_processor_id(boot_cpuid); | ||
644 | |||
645 | /* Global interrupt distribution server is specified in the last | ||
646 | * entry of "ibm,ppc-interrupt-gserver#s" property. Get the last | ||
647 | * entry fom this property for current boot cpu id and use it as | ||
648 | * default distribution server | ||
649 | */ | ||
650 | for (j = 0; j < i; j += 2) { | ||
651 | if (ireg[j] == hcpuid) { | ||
652 | default_server = hcpuid; | ||
653 | default_distrib_server = ireg[j+1]; | ||
654 | } | ||
655 | } | ||
656 | |||
657 | of_node_put(np); | ||
658 | } | ||
659 | |||
660 | static void __init xics_map_one_cpu(int hw_id, unsigned long addr, | ||
661 | unsigned long size) | ||
662 | { | ||
663 | int i; | ||
664 | |||
665 | /* This may look gross but it's good enough for now, we don't quite | ||
666 | * have a hard -> linux processor id matching. | ||
667 | */ | ||
668 | for_each_possible_cpu(i) { | ||
669 | if (!cpu_present(i)) | ||
670 | continue; | ||
671 | if (hw_id == get_hard_smp_processor_id(i)) { | ||
672 | xics_per_cpu[i] = ioremap(addr, size); | ||
673 | return; | ||
674 | } | ||
675 | } | ||
676 | } | ||
677 | |||
678 | static void __init xics_init_one_node(struct device_node *np, | ||
679 | unsigned int *indx) | ||
680 | { | ||
681 | unsigned int ilen; | ||
682 | const u32 *ireg; | ||
683 | |||
684 | /* This code does the theorically broken assumption that the interrupt | ||
685 | * server numbers are the same as the hard CPU numbers. | ||
686 | * This happens to be the case so far but we are playing with fire... | ||
687 | * should be fixed one of these days. -BenH. | ||
688 | */ | ||
689 | ireg = of_get_property(np, "ibm,interrupt-server-ranges", NULL); | ||
690 | |||
691 | /* Do that ever happen ? we'll know soon enough... but even good'old | ||
692 | * f80 does have that property .. | ||
693 | */ | ||
694 | WARN_ON(ireg == NULL); | ||
695 | if (ireg) { | ||
696 | /* | ||
697 | * set node starting index for this node | ||
698 | */ | ||
699 | *indx = *ireg; | ||
700 | } | ||
701 | ireg = of_get_property(np, "reg", &ilen); | ||
702 | if (!ireg) | ||
703 | panic("xics_init_IRQ: can't find interrupt reg property"); | ||
704 | |||
705 | while (ilen >= (4 * sizeof(u32))) { | ||
706 | unsigned long addr, size; | ||
707 | |||
708 | /* XXX Use proper OF parsing code here !!! */ | ||
709 | addr = (unsigned long)*ireg++ << 32; | ||
710 | ilen -= sizeof(u32); | ||
711 | addr |= *ireg++; | ||
712 | ilen -= sizeof(u32); | ||
713 | size = (unsigned long)*ireg++ << 32; | ||
714 | ilen -= sizeof(u32); | ||
715 | size |= *ireg++; | ||
716 | ilen -= sizeof(u32); | ||
717 | xics_map_one_cpu(*indx, addr, size); | ||
718 | (*indx)++; | ||
719 | } | ||
720 | } | ||
721 | |||
722 | void __init xics_init_IRQ(void) | ||
723 | { | ||
724 | struct device_node *np; | ||
725 | u32 indx = 0; | ||
726 | int found = 0; | ||
727 | const u32 *isize; | ||
728 | |||
729 | ppc64_boot_msg(0x20, "XICS Init"); | ||
730 | |||
731 | ibm_get_xive = rtas_token("ibm,get-xive"); | ||
732 | ibm_set_xive = rtas_token("ibm,set-xive"); | ||
733 | ibm_int_on = rtas_token("ibm,int-on"); | ||
734 | ibm_int_off = rtas_token("ibm,int-off"); | ||
735 | |||
736 | for_each_node_by_type(np, "PowerPC-External-Interrupt-Presentation") { | ||
737 | found = 1; | ||
738 | if (firmware_has_feature(FW_FEATURE_LPAR)) { | ||
739 | of_node_put(np); | ||
740 | break; | ||
741 | } | ||
742 | xics_init_one_node(np, &indx); | ||
743 | } | ||
744 | if (found == 0) | ||
745 | return; | ||
746 | |||
747 | /* get the bit size of server numbers */ | ||
748 | found = 0; | ||
749 | |||
750 | for_each_compatible_node(np, NULL, "ibm,ppc-xics") { | ||
751 | isize = of_get_property(np, "ibm,interrupt-server#-size", NULL); | ||
752 | |||
753 | if (!isize) | ||
754 | continue; | ||
755 | |||
756 | if (!found) { | ||
757 | interrupt_server_size = *isize; | ||
758 | found = 1; | ||
759 | } else if (*isize != interrupt_server_size) { | ||
760 | printk(KERN_WARNING "XICS: " | ||
761 | "mismatched ibm,interrupt-server#-size\n"); | ||
762 | interrupt_server_size = max(*isize, | ||
763 | interrupt_server_size); | ||
764 | } | ||
765 | } | ||
766 | |||
767 | xics_update_irq_servers(); | ||
768 | xics_init_host(); | ||
769 | |||
770 | if (firmware_has_feature(FW_FEATURE_LPAR)) | ||
771 | ppc_md.get_irq = xics_get_irq_lpar; | ||
772 | else | ||
773 | ppc_md.get_irq = xics_get_irq_direct; | ||
774 | |||
775 | xics_setup_cpu(); | ||
776 | |||
777 | ppc64_boot_msg(0x21, "XICS Done"); | ||
778 | } | ||
779 | |||
780 | /* Cpu startup, shutdown, and hotplug */ | ||
781 | |||
782 | static void xics_set_cpu_priority(unsigned char cppr) | ||
783 | { | ||
784 | struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr); | ||
785 | |||
786 | /* | ||
787 | * we only really want to set the priority when there's | ||
788 | * just one cppr value on the stack | ||
789 | */ | ||
790 | WARN_ON(os_cppr->index != 0); | ||
791 | |||
792 | os_cppr->stack[0] = cppr; | ||
793 | |||
794 | if (firmware_has_feature(FW_FEATURE_LPAR)) | ||
795 | lpar_cppr_info(cppr); | ||
796 | else | ||
797 | direct_cppr_info(cppr); | ||
798 | iosync(); | ||
799 | } | ||
800 | |||
801 | /* Have the calling processor join or leave the specified global queue */ | ||
802 | static void xics_set_cpu_giq(unsigned int gserver, unsigned int join) | ||
803 | { | ||
804 | int index; | ||
805 | int status; | ||
806 | |||
807 | if (!rtas_indicator_present(GLOBAL_INTERRUPT_QUEUE, NULL)) | ||
808 | return; | ||
809 | |||
810 | index = (1UL << interrupt_server_size) - 1 - gserver; | ||
811 | |||
812 | status = rtas_set_indicator_fast(GLOBAL_INTERRUPT_QUEUE, index, join); | ||
813 | |||
814 | WARN(status < 0, "set-indicator(%d, %d, %u) returned %d\n", | ||
815 | GLOBAL_INTERRUPT_QUEUE, index, join, status); | ||
816 | } | ||
817 | |||
818 | void xics_setup_cpu(void) | ||
819 | { | ||
820 | xics_set_cpu_priority(LOWEST_PRIORITY); | ||
821 | |||
822 | xics_set_cpu_giq(default_distrib_server, 1); | ||
823 | } | ||
824 | |||
825 | void xics_teardown_cpu(void) | ||
826 | { | ||
827 | struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr); | ||
828 | int cpu = smp_processor_id(); | ||
829 | |||
830 | /* | ||
831 | * we have to reset the cppr index to 0 because we're | ||
832 | * not going to return from the IPI | ||
833 | */ | ||
834 | os_cppr->index = 0; | ||
835 | xics_set_cpu_priority(0); | ||
836 | |||
837 | /* Clear any pending IPI request */ | ||
838 | if (firmware_has_feature(FW_FEATURE_LPAR)) | ||
839 | lpar_qirr_info(cpu, 0xff); | ||
840 | else | ||
841 | direct_qirr_info(cpu, 0xff); | ||
842 | } | ||
843 | |||
844 | void xics_kexec_teardown_cpu(int secondary) | ||
845 | { | ||
846 | xics_teardown_cpu(); | ||
847 | |||
848 | /* | ||
849 | * we take the ipi irq but and never return so we | ||
850 | * need to EOI the IPI, but want to leave our priority 0 | ||
851 | * | ||
852 | * should we check all the other interrupts too? | ||
853 | * should we be flagging idle loop instead? | ||
854 | * or creating some task to be scheduled? | ||
855 | */ | ||
856 | |||
857 | if (firmware_has_feature(FW_FEATURE_LPAR)) | ||
858 | lpar_xirr_info_set((0x00 << 24) | XICS_IPI); | ||
859 | else | ||
860 | direct_xirr_info_set((0x00 << 24) | XICS_IPI); | ||
861 | |||
862 | /* | ||
863 | * Some machines need to have at least one cpu in the GIQ, | ||
864 | * so leave the master cpu in the group. | ||
865 | */ | ||
866 | if (secondary) | ||
867 | xics_set_cpu_giq(default_distrib_server, 0); | ||
868 | } | ||
869 | |||
870 | #ifdef CONFIG_HOTPLUG_CPU | ||
871 | |||
872 | /* Interrupts are disabled. */ | ||
873 | void xics_migrate_irqs_away(void) | ||
874 | { | ||
875 | int cpu = smp_processor_id(), hw_cpu = hard_smp_processor_id(); | ||
876 | unsigned int irq, virq; | ||
877 | |||
878 | /* If we used to be the default server, move to the new "boot_cpuid" */ | ||
879 | if (hw_cpu == default_server) | ||
880 | xics_update_irq_servers(); | ||
881 | |||
882 | /* Reject any interrupt that was queued to us... */ | ||
883 | xics_set_cpu_priority(0); | ||
884 | |||
885 | /* Remove ourselves from the global interrupt queue */ | ||
886 | xics_set_cpu_giq(default_distrib_server, 0); | ||
887 | |||
888 | /* Allow IPIs again... */ | ||
889 | xics_set_cpu_priority(DEFAULT_PRIORITY); | ||
890 | |||
891 | for_each_irq(virq) { | ||
892 | struct irq_desc *desc; | ||
893 | int xics_status[2]; | ||
894 | int status; | ||
895 | unsigned long flags; | ||
896 | |||
897 | /* We cant set affinity on ISA interrupts */ | ||
898 | if (virq < NUM_ISA_INTERRUPTS) | ||
899 | continue; | ||
900 | if (irq_map[virq].host != xics_host) | ||
901 | continue; | ||
902 | irq = (unsigned int)irq_map[virq].hwirq; | ||
903 | /* We need to get IPIs still. */ | ||
904 | if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS) | ||
905 | continue; | ||
906 | desc = irq_to_desc(virq); | ||
907 | |||
908 | /* We only need to migrate enabled IRQS */ | ||
909 | if (desc == NULL || desc->chip == NULL | ||
910 | || desc->action == NULL | ||
911 | || desc->chip->set_affinity == NULL) | ||
912 | continue; | ||
913 | |||
914 | raw_spin_lock_irqsave(&desc->lock, flags); | ||
915 | |||
916 | status = rtas_call(ibm_get_xive, 1, 3, xics_status, irq); | ||
917 | if (status) { | ||
918 | printk(KERN_ERR "%s: ibm,get-xive irq=%u returns %d\n", | ||
919 | __func__, irq, status); | ||
920 | goto unlock; | ||
921 | } | ||
922 | |||
923 | /* | ||
924 | * We only support delivery to all cpus or to one cpu. | ||
925 | * The irq has to be migrated only in the single cpu | ||
926 | * case. | ||
927 | */ | ||
928 | if (xics_status[0] != hw_cpu) | ||
929 | goto unlock; | ||
930 | |||
931 | /* This is expected during cpu offline. */ | ||
932 | if (cpu_online(cpu)) | ||
933 | printk(KERN_WARNING "IRQ %u affinity broken off cpu %u\n", | ||
934 | virq, cpu); | ||
935 | |||
936 | /* Reset affinity to all cpus */ | ||
937 | cpumask_setall(irq_to_desc(virq)->affinity); | ||
938 | desc->chip->set_affinity(virq, cpu_all_mask); | ||
939 | unlock: | ||
940 | raw_spin_unlock_irqrestore(&desc->lock, flags); | ||
941 | } | ||
942 | } | ||
943 | #endif | ||
diff --git a/arch/powerpc/platforms/pseries/xics.h b/arch/powerpc/platforms/pseries/xics.h deleted file mode 100644 index d1d5a83039ae..000000000000 --- a/arch/powerpc/platforms/pseries/xics.h +++ /dev/null | |||
@@ -1,23 +0,0 @@ | |||
1 | /* | ||
2 | * arch/powerpc/platforms/pseries/xics.h | ||
3 | * | ||
4 | * Copyright 2000 IBM Corporation. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #ifndef _POWERPC_KERNEL_XICS_H | ||
13 | #define _POWERPC_KERNEL_XICS_H | ||
14 | |||
15 | extern void xics_init_IRQ(void); | ||
16 | extern void xics_setup_cpu(void); | ||
17 | extern void xics_teardown_cpu(void); | ||
18 | extern void xics_kexec_teardown_cpu(int secondary); | ||
19 | extern void xics_migrate_irqs_away(void); | ||
20 | extern int smp_xics_probe(void); | ||
21 | extern void smp_xics_message_pass(int target, int msg); | ||
22 | |||
23 | #endif /* _POWERPC_KERNEL_XICS_H */ | ||
diff --git a/arch/powerpc/platforms/wsp/Kconfig b/arch/powerpc/platforms/wsp/Kconfig new file mode 100644 index 000000000000..c3c48eb62cc1 --- /dev/null +++ b/arch/powerpc/platforms/wsp/Kconfig | |||
@@ -0,0 +1,28 @@ | |||
1 | config PPC_WSP | ||
2 | bool | ||
3 | default n | ||
4 | |||
5 | menu "WSP platform selection" | ||
6 | depends on PPC_BOOK3E_64 | ||
7 | |||
8 | config PPC_PSR2 | ||
9 | bool "PSR-2 platform" | ||
10 | select PPC_A2 | ||
11 | select GENERIC_TBSYNC | ||
12 | select PPC_SCOM | ||
13 | select EPAPR_BOOT | ||
14 | select PPC_WSP | ||
15 | select PPC_XICS | ||
16 | select PPC_ICP_NATIVE | ||
17 | default y | ||
18 | |||
19 | endmenu | ||
20 | |||
21 | config PPC_A2_DD2 | ||
22 | bool "Support for DD2 based A2/WSP systems" | ||
23 | depends on PPC_A2 | ||
24 | |||
25 | config WORKAROUND_ERRATUM_463 | ||
26 | depends on PPC_A2_DD2 | ||
27 | bool "Workaround erratum 463" | ||
28 | default y | ||
diff --git a/arch/powerpc/platforms/wsp/Makefile b/arch/powerpc/platforms/wsp/Makefile new file mode 100644 index 000000000000..095be73d6cd4 --- /dev/null +++ b/arch/powerpc/platforms/wsp/Makefile | |||
@@ -0,0 +1,6 @@ | |||
1 | ccflags-y += -mno-minimal-toc | ||
2 | |||
3 | obj-y += setup.o ics.o | ||
4 | obj-$(CONFIG_PPC_PSR2) += psr2.o opb_pic.o | ||
5 | obj-$(CONFIG_PPC_WSP) += scom_wsp.o | ||
6 | obj-$(CONFIG_SMP) += smp.o scom_smp.o | ||
diff --git a/arch/powerpc/platforms/wsp/ics.c b/arch/powerpc/platforms/wsp/ics.c new file mode 100644 index 000000000000..e53bd9e7b125 --- /dev/null +++ b/arch/powerpc/platforms/wsp/ics.c | |||
@@ -0,0 +1,712 @@ | |||
1 | /* | ||
2 | * Copyright 2008-2011 IBM Corporation. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation; either version | ||
7 | * 2 of the License, or (at your option) any later version. | ||
8 | */ | ||
9 | |||
10 | #include <linux/cpu.h> | ||
11 | #include <linux/init.h> | ||
12 | #include <linux/interrupt.h> | ||
13 | #include <linux/irq.h> | ||
14 | #include <linux/kernel.h> | ||
15 | #include <linux/msi.h> | ||
16 | #include <linux/of.h> | ||
17 | #include <linux/slab.h> | ||
18 | #include <linux/smp.h> | ||
19 | #include <linux/spinlock.h> | ||
20 | #include <linux/types.h> | ||
21 | |||
22 | #include <asm/io.h> | ||
23 | #include <asm/irq.h> | ||
24 | #include <asm/xics.h> | ||
25 | |||
26 | #include "wsp.h" | ||
27 | #include "ics.h" | ||
28 | |||
29 | |||
30 | /* WSP ICS */ | ||
31 | |||
32 | struct wsp_ics { | ||
33 | struct ics ics; | ||
34 | struct device_node *dn; | ||
35 | void __iomem *regs; | ||
36 | spinlock_t lock; | ||
37 | unsigned long *bitmap; | ||
38 | u32 chip_id; | ||
39 | u32 lsi_base; | ||
40 | u32 lsi_count; | ||
41 | u64 hwirq_start; | ||
42 | u64 count; | ||
43 | #ifdef CONFIG_SMP | ||
44 | int *hwirq_cpu_map; | ||
45 | #endif | ||
46 | }; | ||
47 | |||
48 | #define to_wsp_ics(ics) container_of(ics, struct wsp_ics, ics) | ||
49 | |||
50 | #define INT_SRC_LAYER_BUID_REG(base) ((base) + 0x00) | ||
51 | #define IODA_TBL_ADDR_REG(base) ((base) + 0x18) | ||
52 | #define IODA_TBL_DATA_REG(base) ((base) + 0x20) | ||
53 | #define XIVE_UPDATE_REG(base) ((base) + 0x28) | ||
54 | #define ICS_INT_CAPS_REG(base) ((base) + 0x30) | ||
55 | |||
56 | #define TBL_AUTO_INCREMENT ((1UL << 63) | (1UL << 15)) | ||
57 | #define TBL_SELECT_XIST (1UL << 48) | ||
58 | #define TBL_SELECT_XIVT (1UL << 49) | ||
59 | |||
60 | #define IODA_IRQ(irq) ((irq) & (0x7FFULL)) /* HRM 5.1.3.4 */ | ||
61 | |||
62 | #define XIST_REQUIRED 0x8 | ||
63 | #define XIST_REJECTED 0x4 | ||
64 | #define XIST_PRESENTED 0x2 | ||
65 | #define XIST_PENDING 0x1 | ||
66 | |||
67 | #define XIVE_SERVER_SHIFT 42 | ||
68 | #define XIVE_SERVER_MASK 0xFFFFULL | ||
69 | #define XIVE_PRIORITY_MASK 0xFFULL | ||
70 | #define XIVE_PRIORITY_SHIFT 32 | ||
71 | #define XIVE_WRITE_ENABLE (1ULL << 63) | ||
72 | |||
73 | /* | ||
74 | * The docs refer to a 6 bit field called ChipID, which consists of a | ||
75 | * 3 bit NodeID and a 3 bit ChipID. On WSP the ChipID is always zero | ||
76 | * so we ignore it, and every where we use "chip id" in this code we | ||
77 | * mean the NodeID. | ||
78 | */ | ||
79 | #define WSP_ICS_CHIP_SHIFT 17 | ||
80 | |||
81 | |||
82 | static struct wsp_ics *ics_list; | ||
83 | static int num_ics; | ||
84 | |||
85 | /* ICS Source controller accessors */ | ||
86 | |||
87 | static u64 wsp_ics_get_xive(struct wsp_ics *ics, unsigned int irq) | ||
88 | { | ||
89 | unsigned long flags; | ||
90 | u64 xive; | ||
91 | |||
92 | spin_lock_irqsave(&ics->lock, flags); | ||
93 | out_be64(IODA_TBL_ADDR_REG(ics->regs), TBL_SELECT_XIVT | IODA_IRQ(irq)); | ||
94 | xive = in_be64(IODA_TBL_DATA_REG(ics->regs)); | ||
95 | spin_unlock_irqrestore(&ics->lock, flags); | ||
96 | |||
97 | return xive; | ||
98 | } | ||
99 | |||
100 | static void wsp_ics_set_xive(struct wsp_ics *ics, unsigned int irq, u64 xive) | ||
101 | { | ||
102 | xive &= ~XIVE_ADDR_MASK; | ||
103 | xive |= (irq & XIVE_ADDR_MASK); | ||
104 | xive |= XIVE_WRITE_ENABLE; | ||
105 | |||
106 | out_be64(XIVE_UPDATE_REG(ics->regs), xive); | ||
107 | } | ||
108 | |||
109 | static u64 xive_set_server(u64 xive, unsigned int server) | ||
110 | { | ||
111 | u64 mask = ~(XIVE_SERVER_MASK << XIVE_SERVER_SHIFT); | ||
112 | |||
113 | xive &= mask; | ||
114 | xive |= (server & XIVE_SERVER_MASK) << XIVE_SERVER_SHIFT; | ||
115 | |||
116 | return xive; | ||
117 | } | ||
118 | |||
119 | static u64 xive_set_priority(u64 xive, unsigned int priority) | ||
120 | { | ||
121 | u64 mask = ~(XIVE_PRIORITY_MASK << XIVE_PRIORITY_SHIFT); | ||
122 | |||
123 | xive &= mask; | ||
124 | xive |= (priority & XIVE_PRIORITY_MASK) << XIVE_PRIORITY_SHIFT; | ||
125 | |||
126 | return xive; | ||
127 | } | ||
128 | |||
129 | |||
130 | #ifdef CONFIG_SMP | ||
131 | /* Find logical CPUs within mask on a given chip and store result in ret */ | ||
132 | void cpus_on_chip(int chip_id, cpumask_t *mask, cpumask_t *ret) | ||
133 | { | ||
134 | int cpu, chip; | ||
135 | struct device_node *cpu_dn, *dn; | ||
136 | const u32 *prop; | ||
137 | |||
138 | cpumask_clear(ret); | ||
139 | for_each_cpu(cpu, mask) { | ||
140 | cpu_dn = of_get_cpu_node(cpu, NULL); | ||
141 | if (!cpu_dn) | ||
142 | continue; | ||
143 | |||
144 | prop = of_get_property(cpu_dn, "at-node", NULL); | ||
145 | if (!prop) { | ||
146 | of_node_put(cpu_dn); | ||
147 | continue; | ||
148 | } | ||
149 | |||
150 | dn = of_find_node_by_phandle(*prop); | ||
151 | of_node_put(cpu_dn); | ||
152 | |||
153 | chip = wsp_get_chip_id(dn); | ||
154 | if (chip == chip_id) | ||
155 | cpumask_set_cpu(cpu, ret); | ||
156 | |||
157 | of_node_put(dn); | ||
158 | } | ||
159 | } | ||
160 | |||
161 | /* Store a suitable CPU to handle a hwirq in the ics->hwirq_cpu_map cache */ | ||
162 | static int cache_hwirq_map(struct wsp_ics *ics, unsigned int hwirq, | ||
163 | const cpumask_t *affinity) | ||
164 | { | ||
165 | cpumask_var_t avail, newmask; | ||
166 | int ret = -ENOMEM, cpu, cpu_rover = 0, target; | ||
167 | int index = hwirq - ics->hwirq_start; | ||
168 | unsigned int nodeid; | ||
169 | |||
170 | BUG_ON(index < 0 || index >= ics->count); | ||
171 | |||
172 | if (!ics->hwirq_cpu_map) | ||
173 | return -ENOMEM; | ||
174 | |||
175 | if (!distribute_irqs) { | ||
176 | ics->hwirq_cpu_map[hwirq - ics->hwirq_start] = xics_default_server; | ||
177 | return 0; | ||
178 | } | ||
179 | |||
180 | /* Allocate needed CPU masks */ | ||
181 | if (!alloc_cpumask_var(&avail, GFP_KERNEL)) | ||
182 | goto ret; | ||
183 | if (!alloc_cpumask_var(&newmask, GFP_KERNEL)) | ||
184 | goto freeavail; | ||
185 | |||
186 | /* Find PBus attached to the source of this IRQ */ | ||
187 | nodeid = (hwirq >> WSP_ICS_CHIP_SHIFT) & 0x3; /* 12:14 */ | ||
188 | |||
189 | /* Find CPUs that could handle this IRQ */ | ||
190 | if (affinity) | ||
191 | cpumask_and(avail, cpu_online_mask, affinity); | ||
192 | else | ||
193 | cpumask_copy(avail, cpu_online_mask); | ||
194 | |||
195 | /* Narrow selection down to logical CPUs on the same chip */ | ||
196 | cpus_on_chip(nodeid, avail, newmask); | ||
197 | |||
198 | /* Ensure we haven't narrowed it down to 0 */ | ||
199 | if (unlikely(cpumask_empty(newmask))) { | ||
200 | if (unlikely(cpumask_empty(avail))) { | ||
201 | ret = -1; | ||
202 | goto out; | ||
203 | } | ||
204 | cpumask_copy(newmask, avail); | ||
205 | } | ||
206 | |||
207 | /* Choose a CPU out of those we narrowed it down to in round robin */ | ||
208 | target = hwirq % cpumask_weight(newmask); | ||
209 | for_each_cpu(cpu, newmask) { | ||
210 | if (cpu_rover++ >= target) { | ||
211 | ics->hwirq_cpu_map[index] = get_hard_smp_processor_id(cpu); | ||
212 | ret = 0; | ||
213 | goto out; | ||
214 | } | ||
215 | } | ||
216 | |||
217 | /* Shouldn't happen */ | ||
218 | WARN_ON(1); | ||
219 | |||
220 | out: | ||
221 | free_cpumask_var(newmask); | ||
222 | freeavail: | ||
223 | free_cpumask_var(avail); | ||
224 | ret: | ||
225 | if (ret < 0) { | ||
226 | ics->hwirq_cpu_map[index] = cpumask_first(cpu_online_mask); | ||
227 | pr_warning("Error, falling hwirq 0x%x routing back to CPU %i\n", | ||
228 | hwirq, ics->hwirq_cpu_map[index]); | ||
229 | } | ||
230 | return ret; | ||
231 | } | ||
232 | |||
233 | static void alloc_irq_map(struct wsp_ics *ics) | ||
234 | { | ||
235 | int i; | ||
236 | |||
237 | ics->hwirq_cpu_map = kmalloc(sizeof(int) * ics->count, GFP_KERNEL); | ||
238 | if (!ics->hwirq_cpu_map) { | ||
239 | pr_warning("Allocate hwirq_cpu_map failed, " | ||
240 | "IRQ balancing disabled\n"); | ||
241 | return; | ||
242 | } | ||
243 | |||
244 | for (i=0; i < ics->count; i++) | ||
245 | ics->hwirq_cpu_map[i] = xics_default_server; | ||
246 | } | ||
247 | |||
248 | static int get_irq_server(struct wsp_ics *ics, unsigned int hwirq) | ||
249 | { | ||
250 | int index = hwirq - ics->hwirq_start; | ||
251 | |||
252 | BUG_ON(index < 0 || index >= ics->count); | ||
253 | |||
254 | if (!ics->hwirq_cpu_map) | ||
255 | return xics_default_server; | ||
256 | |||
257 | return ics->hwirq_cpu_map[index]; | ||
258 | } | ||
259 | #else /* !CONFIG_SMP */ | ||
260 | static int cache_hwirq_map(struct wsp_ics *ics, unsigned int hwirq, | ||
261 | const cpumask_t *affinity) | ||
262 | { | ||
263 | return 0; | ||
264 | } | ||
265 | |||
266 | static int get_irq_server(struct wsp_ics *ics, unsigned int hwirq) | ||
267 | { | ||
268 | return xics_default_server; | ||
269 | } | ||
270 | |||
271 | static void alloc_irq_map(struct wsp_ics *ics) { } | ||
272 | #endif | ||
273 | |||
274 | static void wsp_chip_unmask_irq(struct irq_data *d) | ||
275 | { | ||
276 | unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d); | ||
277 | struct wsp_ics *ics; | ||
278 | int server; | ||
279 | u64 xive; | ||
280 | |||
281 | if (hw_irq == XICS_IPI || hw_irq == XICS_IRQ_SPURIOUS) | ||
282 | return; | ||
283 | |||
284 | ics = d->chip_data; | ||
285 | if (WARN_ON(!ics)) | ||
286 | return; | ||
287 | |||
288 | server = get_irq_server(ics, hw_irq); | ||
289 | |||
290 | xive = wsp_ics_get_xive(ics, hw_irq); | ||
291 | xive = xive_set_server(xive, server); | ||
292 | xive = xive_set_priority(xive, DEFAULT_PRIORITY); | ||
293 | wsp_ics_set_xive(ics, hw_irq, xive); | ||
294 | } | ||
295 | |||
296 | static unsigned int wsp_chip_startup(struct irq_data *d) | ||
297 | { | ||
298 | /* unmask it */ | ||
299 | wsp_chip_unmask_irq(d); | ||
300 | return 0; | ||
301 | } | ||
302 | |||
303 | static void wsp_mask_real_irq(unsigned int hw_irq, struct wsp_ics *ics) | ||
304 | { | ||
305 | u64 xive; | ||
306 | |||
307 | if (hw_irq == XICS_IPI) | ||
308 | return; | ||
309 | |||
310 | if (WARN_ON(!ics)) | ||
311 | return; | ||
312 | xive = wsp_ics_get_xive(ics, hw_irq); | ||
313 | xive = xive_set_server(xive, xics_default_server); | ||
314 | xive = xive_set_priority(xive, LOWEST_PRIORITY); | ||
315 | wsp_ics_set_xive(ics, hw_irq, xive); | ||
316 | } | ||
317 | |||
318 | static void wsp_chip_mask_irq(struct irq_data *d) | ||
319 | { | ||
320 | unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d); | ||
321 | struct wsp_ics *ics = d->chip_data; | ||
322 | |||
323 | if (hw_irq == XICS_IPI || hw_irq == XICS_IRQ_SPURIOUS) | ||
324 | return; | ||
325 | |||
326 | wsp_mask_real_irq(hw_irq, ics); | ||
327 | } | ||
328 | |||
329 | static int wsp_chip_set_affinity(struct irq_data *d, | ||
330 | const struct cpumask *cpumask, bool force) | ||
331 | { | ||
332 | unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d); | ||
333 | struct wsp_ics *ics; | ||
334 | int ret; | ||
335 | u64 xive; | ||
336 | |||
337 | if (hw_irq == XICS_IPI || hw_irq == XICS_IRQ_SPURIOUS) | ||
338 | return -1; | ||
339 | |||
340 | ics = d->chip_data; | ||
341 | if (WARN_ON(!ics)) | ||
342 | return -1; | ||
343 | xive = wsp_ics_get_xive(ics, hw_irq); | ||
344 | |||
345 | /* | ||
346 | * For the moment only implement delivery to all cpus or one cpu. | ||
347 | * Get current irq_server for the given irq | ||
348 | */ | ||
349 | ret = cache_hwirq_map(ics, d->irq, cpumask); | ||
350 | if (ret == -1) { | ||
351 | char cpulist[128]; | ||
352 | cpumask_scnprintf(cpulist, sizeof(cpulist), cpumask); | ||
353 | pr_warning("%s: No online cpus in the mask %s for irq %d\n", | ||
354 | __func__, cpulist, d->irq); | ||
355 | return -1; | ||
356 | } else if (ret == -ENOMEM) { | ||
357 | pr_warning("%s: Out of memory\n", __func__); | ||
358 | return -1; | ||
359 | } | ||
360 | |||
361 | xive = xive_set_server(xive, get_irq_server(ics, hw_irq)); | ||
362 | wsp_ics_set_xive(ics, hw_irq, xive); | ||
363 | |||
364 | return 0; | ||
365 | } | ||
366 | |||
367 | static struct irq_chip wsp_irq_chip = { | ||
368 | .name = "WSP ICS", | ||
369 | .irq_startup = wsp_chip_startup, | ||
370 | .irq_mask = wsp_chip_mask_irq, | ||
371 | .irq_unmask = wsp_chip_unmask_irq, | ||
372 | .irq_set_affinity = wsp_chip_set_affinity | ||
373 | }; | ||
374 | |||
375 | static int wsp_ics_host_match(struct ics *ics, struct device_node *dn) | ||
376 | { | ||
377 | /* All ICSs in the system implement a global irq number space, | ||
378 | * so match against them all. */ | ||
379 | return of_device_is_compatible(dn, "ibm,ppc-xics"); | ||
380 | } | ||
381 | |||
382 | static int wsp_ics_match_hwirq(struct wsp_ics *wsp_ics, unsigned int hwirq) | ||
383 | { | ||
384 | if (hwirq >= wsp_ics->hwirq_start && | ||
385 | hwirq < wsp_ics->hwirq_start + wsp_ics->count) | ||
386 | return 1; | ||
387 | |||
388 | return 0; | ||
389 | } | ||
390 | |||
391 | static int wsp_ics_map(struct ics *ics, unsigned int virq) | ||
392 | { | ||
393 | struct wsp_ics *wsp_ics = to_wsp_ics(ics); | ||
394 | unsigned int hw_irq = virq_to_hw(virq); | ||
395 | unsigned long flags; | ||
396 | |||
397 | if (!wsp_ics_match_hwirq(wsp_ics, hw_irq)) | ||
398 | return -ENOENT; | ||
399 | |||
400 | irq_set_chip_and_handler(virq, &wsp_irq_chip, handle_fasteoi_irq); | ||
401 | |||
402 | irq_set_chip_data(virq, wsp_ics); | ||
403 | |||
404 | spin_lock_irqsave(&wsp_ics->lock, flags); | ||
405 | bitmap_allocate_region(wsp_ics->bitmap, hw_irq - wsp_ics->hwirq_start, 0); | ||
406 | spin_unlock_irqrestore(&wsp_ics->lock, flags); | ||
407 | |||
408 | return 0; | ||
409 | } | ||
410 | |||
411 | static void wsp_ics_mask_unknown(struct ics *ics, unsigned long hw_irq) | ||
412 | { | ||
413 | struct wsp_ics *wsp_ics = to_wsp_ics(ics); | ||
414 | |||
415 | if (!wsp_ics_match_hwirq(wsp_ics, hw_irq)) | ||
416 | return; | ||
417 | |||
418 | pr_err("%s: IRQ %lu (real) is invalid, disabling it.\n", __func__, hw_irq); | ||
419 | wsp_mask_real_irq(hw_irq, wsp_ics); | ||
420 | } | ||
421 | |||
422 | static long wsp_ics_get_server(struct ics *ics, unsigned long hw_irq) | ||
423 | { | ||
424 | struct wsp_ics *wsp_ics = to_wsp_ics(ics); | ||
425 | |||
426 | if (!wsp_ics_match_hwirq(wsp_ics, hw_irq)) | ||
427 | return -ENOENT; | ||
428 | |||
429 | return get_irq_server(wsp_ics, hw_irq); | ||
430 | } | ||
431 | |||
432 | /* HW Number allocation API */ | ||
433 | |||
434 | static struct wsp_ics *wsp_ics_find_dn_ics(struct device_node *dn) | ||
435 | { | ||
436 | struct device_node *iparent; | ||
437 | int i; | ||
438 | |||
439 | iparent = of_irq_find_parent(dn); | ||
440 | if (!iparent) { | ||
441 | pr_err("wsp_ics: Failed to find interrupt parent!\n"); | ||
442 | return NULL; | ||
443 | } | ||
444 | |||
445 | for(i = 0; i < num_ics; i++) { | ||
446 | if(ics_list[i].dn == iparent) | ||
447 | break; | ||
448 | } | ||
449 | |||
450 | if (i >= num_ics) { | ||
451 | pr_err("wsp_ics: Unable to find parent bitmap!\n"); | ||
452 | return NULL; | ||
453 | } | ||
454 | |||
455 | return &ics_list[i]; | ||
456 | } | ||
457 | |||
458 | int wsp_ics_alloc_irq(struct device_node *dn, int num) | ||
459 | { | ||
460 | struct wsp_ics *ics; | ||
461 | int order, offset; | ||
462 | |||
463 | ics = wsp_ics_find_dn_ics(dn); | ||
464 | if (!ics) | ||
465 | return -ENODEV; | ||
466 | |||
467 | /* Fast, but overly strict if num isn't a power of two */ | ||
468 | order = get_count_order(num); | ||
469 | |||
470 | spin_lock_irq(&ics->lock); | ||
471 | offset = bitmap_find_free_region(ics->bitmap, ics->count, order); | ||
472 | spin_unlock_irq(&ics->lock); | ||
473 | |||
474 | if (offset < 0) | ||
475 | return offset; | ||
476 | |||
477 | return offset + ics->hwirq_start; | ||
478 | } | ||
479 | |||
480 | void wsp_ics_free_irq(struct device_node *dn, unsigned int irq) | ||
481 | { | ||
482 | struct wsp_ics *ics; | ||
483 | |||
484 | ics = wsp_ics_find_dn_ics(dn); | ||
485 | if (WARN_ON(!ics)) | ||
486 | return; | ||
487 | |||
488 | spin_lock_irq(&ics->lock); | ||
489 | bitmap_release_region(ics->bitmap, irq, 0); | ||
490 | spin_unlock_irq(&ics->lock); | ||
491 | } | ||
492 | |||
493 | /* Initialisation */ | ||
494 | |||
495 | static int __init wsp_ics_bitmap_setup(struct wsp_ics *ics, | ||
496 | struct device_node *dn) | ||
497 | { | ||
498 | int len, i, j, size; | ||
499 | u32 start, count; | ||
500 | const u32 *p; | ||
501 | |||
502 | size = BITS_TO_LONGS(ics->count) * sizeof(long); | ||
503 | ics->bitmap = kzalloc(size, GFP_KERNEL); | ||
504 | if (!ics->bitmap) { | ||
505 | pr_err("wsp_ics: ENOMEM allocating IRQ bitmap!\n"); | ||
506 | return -ENOMEM; | ||
507 | } | ||
508 | |||
509 | spin_lock_init(&ics->lock); | ||
510 | |||
511 | p = of_get_property(dn, "available-ranges", &len); | ||
512 | if (!p || !len) { | ||
513 | /* FIXME this should be a WARN() once mambo is updated */ | ||
514 | pr_err("wsp_ics: No available-ranges defined for %s\n", | ||
515 | dn->full_name); | ||
516 | return 0; | ||
517 | } | ||
518 | |||
519 | if (len % (2 * sizeof(u32)) != 0) { | ||
520 | /* FIXME this should be a WARN() once mambo is updated */ | ||
521 | pr_err("wsp_ics: Invalid available-ranges for %s\n", | ||
522 | dn->full_name); | ||
523 | return 0; | ||
524 | } | ||
525 | |||
526 | bitmap_fill(ics->bitmap, ics->count); | ||
527 | |||
528 | for (i = 0; i < len / sizeof(u32); i += 2) { | ||
529 | start = of_read_number(p + i, 1); | ||
530 | count = of_read_number(p + i + 1, 1); | ||
531 | |||
532 | pr_devel("%s: start: %d count: %d\n", __func__, start, count); | ||
533 | |||
534 | if ((start + count) > (ics->hwirq_start + ics->count) || | ||
535 | start < ics->hwirq_start) { | ||
536 | pr_err("wsp_ics: Invalid range! -> %d to %d\n", | ||
537 | start, start + count); | ||
538 | break; | ||
539 | } | ||
540 | |||
541 | for (j = 0; j < count; j++) | ||
542 | bitmap_release_region(ics->bitmap, | ||
543 | (start + j) - ics->hwirq_start, 0); | ||
544 | } | ||
545 | |||
546 | /* Ensure LSIs are not available for allocation */ | ||
547 | bitmap_allocate_region(ics->bitmap, ics->lsi_base, | ||
548 | get_count_order(ics->lsi_count)); | ||
549 | |||
550 | return 0; | ||
551 | } | ||
552 | |||
553 | static int __init wsp_ics_setup(struct wsp_ics *ics, struct device_node *dn) | ||
554 | { | ||
555 | u32 lsi_buid, msi_buid, msi_base, msi_count; | ||
556 | void __iomem *regs; | ||
557 | const u32 *p; | ||
558 | int rc, len, i; | ||
559 | u64 caps, buid; | ||
560 | |||
561 | p = of_get_property(dn, "interrupt-ranges", &len); | ||
562 | if (!p || len < (2 * sizeof(u32))) { | ||
563 | pr_err("wsp_ics: No/bad interrupt-ranges found on %s\n", | ||
564 | dn->full_name); | ||
565 | return -ENOENT; | ||
566 | } | ||
567 | |||
568 | if (len > (2 * sizeof(u32))) { | ||
569 | pr_err("wsp_ics: Multiple ics ranges not supported.\n"); | ||
570 | return -EINVAL; | ||
571 | } | ||
572 | |||
573 | regs = of_iomap(dn, 0); | ||
574 | if (!regs) { | ||
575 | pr_err("wsp_ics: of_iomap(%s) failed\n", dn->full_name); | ||
576 | return -ENXIO; | ||
577 | } | ||
578 | |||
579 | ics->hwirq_start = of_read_number(p, 1); | ||
580 | ics->count = of_read_number(p + 1, 1); | ||
581 | ics->regs = regs; | ||
582 | |||
583 | ics->chip_id = wsp_get_chip_id(dn); | ||
584 | if (WARN_ON(ics->chip_id < 0)) | ||
585 | ics->chip_id = 0; | ||
586 | |||
587 | /* Get some informations about the critter */ | ||
588 | caps = in_be64(ICS_INT_CAPS_REG(ics->regs)); | ||
589 | buid = in_be64(INT_SRC_LAYER_BUID_REG(ics->regs)); | ||
590 | ics->lsi_count = caps >> 56; | ||
591 | msi_count = (caps >> 44) & 0x7ff; | ||
592 | |||
593 | /* Note: LSI BUID is 9 bits, but really only 3 are BUID and the | ||
594 | * rest is mixed in the interrupt number. We store the whole | ||
595 | * thing though | ||
596 | */ | ||
597 | lsi_buid = (buid >> 48) & 0x1ff; | ||
598 | ics->lsi_base = (ics->chip_id << WSP_ICS_CHIP_SHIFT) | lsi_buid << 5; | ||
599 | msi_buid = (buid >> 37) & 0x7; | ||
600 | msi_base = (ics->chip_id << WSP_ICS_CHIP_SHIFT) | msi_buid << 11; | ||
601 | |||
602 | pr_info("wsp_ics: Found %s\n", dn->full_name); | ||
603 | pr_info("wsp_ics: irq range : 0x%06llx..0x%06llx\n", | ||
604 | ics->hwirq_start, ics->hwirq_start + ics->count - 1); | ||
605 | pr_info("wsp_ics: %4d LSIs : 0x%06x..0x%06x\n", | ||
606 | ics->lsi_count, ics->lsi_base, | ||
607 | ics->lsi_base + ics->lsi_count - 1); | ||
608 | pr_info("wsp_ics: %4d MSIs : 0x%06x..0x%06x\n", | ||
609 | msi_count, msi_base, | ||
610 | msi_base + msi_count - 1); | ||
611 | |||
612 | /* Let's check the HW config is sane */ | ||
613 | if (ics->lsi_base < ics->hwirq_start || | ||
614 | (ics->lsi_base + ics->lsi_count) > (ics->hwirq_start + ics->count)) | ||
615 | pr_warning("wsp_ics: WARNING ! LSIs out of interrupt-ranges !\n"); | ||
616 | if (msi_base < ics->hwirq_start || | ||
617 | (msi_base + msi_count) > (ics->hwirq_start + ics->count)) | ||
618 | pr_warning("wsp_ics: WARNING ! MSIs out of interrupt-ranges !\n"); | ||
619 | |||
620 | /* We don't check for overlap between LSI and MSI, which will happen | ||
621 | * if we use the same BUID, I'm not sure yet how legit that is. | ||
622 | */ | ||
623 | |||
624 | rc = wsp_ics_bitmap_setup(ics, dn); | ||
625 | if (rc) { | ||
626 | iounmap(regs); | ||
627 | return rc; | ||
628 | } | ||
629 | |||
630 | ics->dn = of_node_get(dn); | ||
631 | alloc_irq_map(ics); | ||
632 | |||
633 | for(i = 0; i < ics->count; i++) | ||
634 | wsp_mask_real_irq(ics->hwirq_start + i, ics); | ||
635 | |||
636 | ics->ics.map = wsp_ics_map; | ||
637 | ics->ics.mask_unknown = wsp_ics_mask_unknown; | ||
638 | ics->ics.get_server = wsp_ics_get_server; | ||
639 | ics->ics.host_match = wsp_ics_host_match; | ||
640 | |||
641 | xics_register_ics(&ics->ics); | ||
642 | |||
643 | return 0; | ||
644 | } | ||
645 | |||
646 | static void __init wsp_ics_set_default_server(void) | ||
647 | { | ||
648 | struct device_node *np; | ||
649 | u32 hwid; | ||
650 | |||
651 | /* Find the server number for the boot cpu. */ | ||
652 | np = of_get_cpu_node(boot_cpuid, NULL); | ||
653 | BUG_ON(!np); | ||
654 | |||
655 | hwid = get_hard_smp_processor_id(boot_cpuid); | ||
656 | |||
657 | pr_info("wsp_ics: default server is %#x, CPU %s\n", hwid, np->full_name); | ||
658 | xics_default_server = hwid; | ||
659 | |||
660 | of_node_put(np); | ||
661 | } | ||
662 | |||
663 | static int __init wsp_ics_init(void) | ||
664 | { | ||
665 | struct device_node *dn; | ||
666 | struct wsp_ics *ics; | ||
667 | int rc, found; | ||
668 | |||
669 | wsp_ics_set_default_server(); | ||
670 | |||
671 | found = 0; | ||
672 | for_each_compatible_node(dn, NULL, "ibm,ppc-xics") | ||
673 | found++; | ||
674 | |||
675 | if (found == 0) { | ||
676 | pr_err("wsp_ics: No ICS's found!\n"); | ||
677 | return -ENODEV; | ||
678 | } | ||
679 | |||
680 | ics_list = kmalloc(sizeof(*ics) * found, GFP_KERNEL); | ||
681 | if (!ics_list) { | ||
682 | pr_err("wsp_ics: No memory for structs.\n"); | ||
683 | return -ENOMEM; | ||
684 | } | ||
685 | |||
686 | num_ics = 0; | ||
687 | ics = ics_list; | ||
688 | for_each_compatible_node(dn, NULL, "ibm,wsp-xics") { | ||
689 | rc = wsp_ics_setup(ics, dn); | ||
690 | if (rc == 0) { | ||
691 | ics++; | ||
692 | num_ics++; | ||
693 | } | ||
694 | } | ||
695 | |||
696 | if (found != num_ics) { | ||
697 | pr_err("wsp_ics: Failed setting up %d ICS's\n", | ||
698 | found - num_ics); | ||
699 | return -1; | ||
700 | } | ||
701 | |||
702 | return 0; | ||
703 | } | ||
704 | |||
705 | void __init wsp_init_irq(void) | ||
706 | { | ||
707 | wsp_ics_init(); | ||
708 | xics_init(); | ||
709 | |||
710 | /* We need to patch our irq chip's EOI to point to the right ICP */ | ||
711 | wsp_irq_chip.irq_eoi = icp_ops->eoi; | ||
712 | } | ||
diff --git a/arch/powerpc/platforms/wsp/ics.h b/arch/powerpc/platforms/wsp/ics.h new file mode 100644 index 000000000000..e34d53102640 --- /dev/null +++ b/arch/powerpc/platforms/wsp/ics.h | |||
@@ -0,0 +1,20 @@ | |||
1 | /* | ||
2 | * Copyright 2009 IBM Corporation. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation; either version | ||
7 | * 2 of the License, or (at your option) any later version. | ||
8 | */ | ||
9 | |||
10 | #ifndef __ICS_H | ||
11 | #define __ICS_H | ||
12 | |||
13 | #define XIVE_ADDR_MASK 0x7FFULL | ||
14 | |||
15 | extern void wsp_init_irq(void); | ||
16 | |||
17 | extern int wsp_ics_alloc_irq(struct device_node *dn, int num); | ||
18 | extern void wsp_ics_free_irq(struct device_node *dn, unsigned int irq); | ||
19 | |||
20 | #endif /* __ICS_H */ | ||
diff --git a/arch/powerpc/platforms/wsp/opb_pic.c b/arch/powerpc/platforms/wsp/opb_pic.c new file mode 100644 index 000000000000..be05631a3c1c --- /dev/null +++ b/arch/powerpc/platforms/wsp/opb_pic.c | |||
@@ -0,0 +1,332 @@ | |||
1 | /* | ||
2 | * IBM Onboard Peripheral Bus Interrupt Controller | ||
3 | * | ||
4 | * Copyright 2010 Jack Miller, IBM Corporation. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify it | ||
7 | * under the terms of the GNU General Public License as published by the | ||
8 | * Free Software Foundation; either version 2 of the License, or (at your | ||
9 | * option) any later version. | ||
10 | */ | ||
11 | |||
12 | #include <linux/interrupt.h> | ||
13 | #include <linux/io.h> | ||
14 | #include <linux/irq.h> | ||
15 | #include <linux/of.h> | ||
16 | #include <linux/slab.h> | ||
17 | #include <linux/time.h> | ||
18 | |||
19 | #include <asm/reg_a2.h> | ||
20 | #include <asm/irq.h> | ||
21 | |||
22 | #define OPB_NR_IRQS 32 | ||
23 | |||
24 | #define OPB_MLSASIER 0x04 /* MLS Accumulated Status IER */ | ||
25 | #define OPB_MLSIR 0x50 /* MLS Interrupt Register */ | ||
26 | #define OPB_MLSIER 0x54 /* MLS Interrupt Enable Register */ | ||
27 | #define OPB_MLSIPR 0x58 /* MLS Interrupt Polarity Register */ | ||
28 | #define OPB_MLSIIR 0x5c /* MLS Interrupt Inputs Register */ | ||
29 | |||
30 | static int opb_index = 0; | ||
31 | |||
32 | struct opb_pic { | ||
33 | struct irq_host *host; | ||
34 | void *regs; | ||
35 | int index; | ||
36 | spinlock_t lock; | ||
37 | }; | ||
38 | |||
39 | static u32 opb_in(struct opb_pic *opb, int offset) | ||
40 | { | ||
41 | return in_be32(opb->regs + offset); | ||
42 | } | ||
43 | |||
44 | static void opb_out(struct opb_pic *opb, int offset, u32 val) | ||
45 | { | ||
46 | out_be32(opb->regs + offset, val); | ||
47 | } | ||
48 | |||
49 | static void opb_unmask_irq(struct irq_data *d) | ||
50 | { | ||
51 | struct opb_pic *opb; | ||
52 | unsigned long flags; | ||
53 | u32 ier, bitset; | ||
54 | |||
55 | opb = d->chip_data; | ||
56 | bitset = (1 << (31 - irqd_to_hwirq(d))); | ||
57 | |||
58 | spin_lock_irqsave(&opb->lock, flags); | ||
59 | |||
60 | ier = opb_in(opb, OPB_MLSIER); | ||
61 | opb_out(opb, OPB_MLSIER, ier | bitset); | ||
62 | ier = opb_in(opb, OPB_MLSIER); | ||
63 | |||
64 | spin_unlock_irqrestore(&opb->lock, flags); | ||
65 | } | ||
66 | |||
67 | static void opb_mask_irq(struct irq_data *d) | ||
68 | { | ||
69 | struct opb_pic *opb; | ||
70 | unsigned long flags; | ||
71 | u32 ier, mask; | ||
72 | |||
73 | opb = d->chip_data; | ||
74 | mask = ~(1 << (31 - irqd_to_hwirq(d))); | ||
75 | |||
76 | spin_lock_irqsave(&opb->lock, flags); | ||
77 | |||
78 | ier = opb_in(opb, OPB_MLSIER); | ||
79 | opb_out(opb, OPB_MLSIER, ier & mask); | ||
80 | ier = opb_in(opb, OPB_MLSIER); // Flush posted writes | ||
81 | |||
82 | spin_unlock_irqrestore(&opb->lock, flags); | ||
83 | } | ||
84 | |||
85 | static void opb_ack_irq(struct irq_data *d) | ||
86 | { | ||
87 | struct opb_pic *opb; | ||
88 | unsigned long flags; | ||
89 | u32 bitset; | ||
90 | |||
91 | opb = d->chip_data; | ||
92 | bitset = (1 << (31 - irqd_to_hwirq(d))); | ||
93 | |||
94 | spin_lock_irqsave(&opb->lock, flags); | ||
95 | |||
96 | opb_out(opb, OPB_MLSIR, bitset); | ||
97 | opb_in(opb, OPB_MLSIR); // Flush posted writes | ||
98 | |||
99 | spin_unlock_irqrestore(&opb->lock, flags); | ||
100 | } | ||
101 | |||
102 | static void opb_mask_ack_irq(struct irq_data *d) | ||
103 | { | ||
104 | struct opb_pic *opb; | ||
105 | unsigned long flags; | ||
106 | u32 bitset; | ||
107 | u32 ier, ir; | ||
108 | |||
109 | opb = d->chip_data; | ||
110 | bitset = (1 << (31 - irqd_to_hwirq(d))); | ||
111 | |||
112 | spin_lock_irqsave(&opb->lock, flags); | ||
113 | |||
114 | ier = opb_in(opb, OPB_MLSIER); | ||
115 | opb_out(opb, OPB_MLSIER, ier & ~bitset); | ||
116 | ier = opb_in(opb, OPB_MLSIER); // Flush posted writes | ||
117 | |||
118 | opb_out(opb, OPB_MLSIR, bitset); | ||
119 | ir = opb_in(opb, OPB_MLSIR); // Flush posted writes | ||
120 | |||
121 | spin_unlock_irqrestore(&opb->lock, flags); | ||
122 | } | ||
123 | |||
124 | static int opb_set_irq_type(struct irq_data *d, unsigned int flow) | ||
125 | { | ||
126 | struct opb_pic *opb; | ||
127 | unsigned long flags; | ||
128 | int invert, ipr, mask, bit; | ||
129 | |||
130 | opb = d->chip_data; | ||
131 | |||
132 | /* The only information we're interested in in the type is whether it's | ||
133 | * a high or low trigger. For high triggered interrupts, the polarity | ||
134 | * set for it in the MLS Interrupt Polarity Register is 0, for low | ||
135 | * interrupts it's 1 so that the proper input in the MLS Interrupt Input | ||
136 | * Register is interrupted as asserting the interrupt. */ | ||
137 | |||
138 | switch (flow) { | ||
139 | case IRQ_TYPE_NONE: | ||
140 | opb_mask_irq(d); | ||
141 | return 0; | ||
142 | |||
143 | case IRQ_TYPE_LEVEL_HIGH: | ||
144 | invert = 0; | ||
145 | break; | ||
146 | |||
147 | case IRQ_TYPE_LEVEL_LOW: | ||
148 | invert = 1; | ||
149 | break; | ||
150 | |||
151 | default: | ||
152 | return -EINVAL; | ||
153 | } | ||
154 | |||
155 | bit = (1 << (31 - irqd_to_hwirq(d))); | ||
156 | mask = ~bit; | ||
157 | |||
158 | spin_lock_irqsave(&opb->lock, flags); | ||
159 | |||
160 | ipr = opb_in(opb, OPB_MLSIPR); | ||
161 | ipr = (ipr & mask) | (invert ? bit : 0); | ||
162 | opb_out(opb, OPB_MLSIPR, ipr); | ||
163 | ipr = opb_in(opb, OPB_MLSIPR); // Flush posted writes | ||
164 | |||
165 | spin_unlock_irqrestore(&opb->lock, flags); | ||
166 | |||
167 | /* Record the type in the interrupt descriptor */ | ||
168 | irqd_set_trigger_type(d, flow); | ||
169 | |||
170 | return 0; | ||
171 | } | ||
172 | |||
173 | static struct irq_chip opb_irq_chip = { | ||
174 | .name = "OPB", | ||
175 | .irq_mask = opb_mask_irq, | ||
176 | .irq_unmask = opb_unmask_irq, | ||
177 | .irq_mask_ack = opb_mask_ack_irq, | ||
178 | .irq_ack = opb_ack_irq, | ||
179 | .irq_set_type = opb_set_irq_type | ||
180 | }; | ||
181 | |||
182 | static int opb_host_map(struct irq_host *host, unsigned int virq, | ||
183 | irq_hw_number_t hwirq) | ||
184 | { | ||
185 | struct opb_pic *opb; | ||
186 | |||
187 | opb = host->host_data; | ||
188 | |||
189 | /* Most of the important stuff is handled by the generic host code, like | ||
190 | * the lookup, so just attach some info to the virtual irq */ | ||
191 | |||
192 | irq_set_chip_data(virq, opb); | ||
193 | irq_set_chip_and_handler(virq, &opb_irq_chip, handle_level_irq); | ||
194 | irq_set_irq_type(virq, IRQ_TYPE_NONE); | ||
195 | |||
196 | return 0; | ||
197 | } | ||
198 | |||
199 | static int opb_host_xlate(struct irq_host *host, struct device_node *dn, | ||
200 | const u32 *intspec, unsigned int intsize, | ||
201 | irq_hw_number_t *out_hwirq, unsigned int *out_type) | ||
202 | { | ||
203 | /* Interrupt size must == 2 */ | ||
204 | BUG_ON(intsize != 2); | ||
205 | *out_hwirq = intspec[0]; | ||
206 | *out_type = intspec[1]; | ||
207 | return 0; | ||
208 | } | ||
209 | |||
210 | static struct irq_host_ops opb_host_ops = { | ||
211 | .map = opb_host_map, | ||
212 | .xlate = opb_host_xlate, | ||
213 | }; | ||
214 | |||
215 | irqreturn_t opb_irq_handler(int irq, void *private) | ||
216 | { | ||
217 | struct opb_pic *opb; | ||
218 | u32 ir, src, subvirq; | ||
219 | |||
220 | opb = (struct opb_pic *) private; | ||
221 | |||
222 | /* Read the OPB MLS Interrupt Register for | ||
223 | * asserted interrupts */ | ||
224 | ir = opb_in(opb, OPB_MLSIR); | ||
225 | if (!ir) | ||
226 | return IRQ_NONE; | ||
227 | |||
228 | do { | ||
229 | /* Get 1 - 32 source, *NOT* bit */ | ||
230 | src = 32 - ffs(ir); | ||
231 | |||
232 | /* Translate from the OPB's conception of interrupt number to | ||
233 | * Linux's virtual IRQ */ | ||
234 | |||
235 | subvirq = irq_linear_revmap(opb->host, src); | ||
236 | |||
237 | generic_handle_irq(subvirq); | ||
238 | } while ((ir = opb_in(opb, OPB_MLSIR))); | ||
239 | |||
240 | return IRQ_HANDLED; | ||
241 | } | ||
242 | |||
243 | struct opb_pic *opb_pic_init_one(struct device_node *dn) | ||
244 | { | ||
245 | struct opb_pic *opb; | ||
246 | struct resource res; | ||
247 | |||
248 | if (of_address_to_resource(dn, 0, &res)) { | ||
249 | printk(KERN_ERR "opb: Couldn't translate resource\n"); | ||
250 | return NULL; | ||
251 | } | ||
252 | |||
253 | opb = kzalloc(sizeof(struct opb_pic), GFP_KERNEL); | ||
254 | if (!opb) { | ||
255 | printk(KERN_ERR "opb: Failed to allocate opb struct!\n"); | ||
256 | return NULL; | ||
257 | } | ||
258 | |||
259 | /* Get access to the OPB MMIO registers */ | ||
260 | opb->regs = ioremap(res.start + 0x10000, 0x1000); | ||
261 | if (!opb->regs) { | ||
262 | printk(KERN_ERR "opb: Failed to allocate register space!\n"); | ||
263 | goto free_opb; | ||
264 | } | ||
265 | |||
266 | /* Allocate an irq host so that Linux knows that despite only | ||
267 | * having one interrupt to issue, we're the controller for multiple | ||
268 | * hardware IRQs, so later we can lookup their virtual IRQs. */ | ||
269 | |||
270 | opb->host = irq_alloc_host(dn, IRQ_HOST_MAP_LINEAR, | ||
271 | OPB_NR_IRQS, &opb_host_ops, -1); | ||
272 | |||
273 | if (!opb->host) { | ||
274 | printk(KERN_ERR "opb: Failed to allocate IRQ host!\n"); | ||
275 | goto free_regs; | ||
276 | } | ||
277 | |||
278 | opb->index = opb_index++; | ||
279 | spin_lock_init(&opb->lock); | ||
280 | opb->host->host_data = opb; | ||
281 | |||
282 | /* Disable all interrupts by default */ | ||
283 | opb_out(opb, OPB_MLSASIER, 0); | ||
284 | opb_out(opb, OPB_MLSIER, 0); | ||
285 | |||
286 | /* ACK any interrupts left by FW */ | ||
287 | opb_out(opb, OPB_MLSIR, 0xFFFFFFFF); | ||
288 | |||
289 | return opb; | ||
290 | |||
291 | free_regs: | ||
292 | iounmap(opb->regs); | ||
293 | free_opb: | ||
294 | kfree(opb); | ||
295 | return NULL; | ||
296 | } | ||
297 | |||
298 | void __init opb_pic_init(void) | ||
299 | { | ||
300 | struct device_node *dn; | ||
301 | struct opb_pic *opb; | ||
302 | int virq; | ||
303 | int rc; | ||
304 | |||
305 | /* Call init_one for each OPB device */ | ||
306 | for_each_compatible_node(dn, NULL, "ibm,opb") { | ||
307 | |||
308 | /* Fill in an OPB struct */ | ||
309 | opb = opb_pic_init_one(dn); | ||
310 | if (!opb) { | ||
311 | printk(KERN_WARNING "opb: Failed to init node, skipped!\n"); | ||
312 | continue; | ||
313 | } | ||
314 | |||
315 | /* Map / get opb's hardware virtual irq */ | ||
316 | virq = irq_of_parse_and_map(dn, 0); | ||
317 | if (virq <= 0) { | ||
318 | printk("opb: irq_op_parse_and_map failed!\n"); | ||
319 | continue; | ||
320 | } | ||
321 | |||
322 | /* Attach opb interrupt handler to new virtual IRQ */ | ||
323 | rc = request_irq(virq, opb_irq_handler, 0, "OPB LS Cascade", opb); | ||
324 | if (rc) { | ||
325 | printk("opb: request_irq failed: %d\n", rc); | ||
326 | continue; | ||
327 | } | ||
328 | |||
329 | printk("OPB%d init with %d IRQs at %p\n", opb->index, | ||
330 | OPB_NR_IRQS, opb->regs); | ||
331 | } | ||
332 | } | ||
diff --git a/arch/powerpc/platforms/wsp/psr2.c b/arch/powerpc/platforms/wsp/psr2.c new file mode 100644 index 000000000000..40f28916ff6c --- /dev/null +++ b/arch/powerpc/platforms/wsp/psr2.c | |||
@@ -0,0 +1,95 @@ | |||
1 | /* | ||
2 | * Copyright 2008-2011, IBM Corporation | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation; either version | ||
7 | * 2 of the License, or (at your option) any later version. | ||
8 | */ | ||
9 | |||
10 | #include <linux/delay.h> | ||
11 | #include <linux/init.h> | ||
12 | #include <linux/irq.h> | ||
13 | #include <linux/kernel.h> | ||
14 | #include <linux/mm.h> | ||
15 | #include <linux/of.h> | ||
16 | #include <linux/smp.h> | ||
17 | |||
18 | #include <asm/machdep.h> | ||
19 | #include <asm/system.h> | ||
20 | #include <asm/time.h> | ||
21 | #include <asm/udbg.h> | ||
22 | |||
23 | #include "ics.h" | ||
24 | #include "wsp.h" | ||
25 | |||
26 | |||
27 | static void psr2_spin(void) | ||
28 | { | ||
29 | hard_irq_disable(); | ||
30 | for (;;) ; | ||
31 | } | ||
32 | |||
33 | static void psr2_restart(char *cmd) | ||
34 | { | ||
35 | psr2_spin(); | ||
36 | } | ||
37 | |||
38 | static int psr2_probe_devices(void) | ||
39 | { | ||
40 | struct device_node *np; | ||
41 | |||
42 | /* Our RTC is a ds1500. It seems to be programatically compatible | ||
43 | * with the ds1511 for which we have a driver so let's use that | ||
44 | */ | ||
45 | np = of_find_compatible_node(NULL, NULL, "dallas,ds1500"); | ||
46 | if (np != NULL) { | ||
47 | struct resource res; | ||
48 | if (of_address_to_resource(np, 0, &res) == 0) | ||
49 | platform_device_register_simple("ds1511", 0, &res, 1); | ||
50 | } | ||
51 | return 0; | ||
52 | } | ||
53 | machine_arch_initcall(psr2_md, psr2_probe_devices); | ||
54 | |||
55 | static void __init psr2_setup_arch(void) | ||
56 | { | ||
57 | /* init to some ~sane value until calibrate_delay() runs */ | ||
58 | loops_per_jiffy = 50000000; | ||
59 | |||
60 | scom_init_wsp(); | ||
61 | |||
62 | /* Setup SMP callback */ | ||
63 | #ifdef CONFIG_SMP | ||
64 | a2_setup_smp(); | ||
65 | #endif | ||
66 | } | ||
67 | |||
68 | static int __init psr2_probe(void) | ||
69 | { | ||
70 | unsigned long root = of_get_flat_dt_root(); | ||
71 | |||
72 | if (!of_flat_dt_is_compatible(root, "ibm,psr2")) | ||
73 | return 0; | ||
74 | |||
75 | return 1; | ||
76 | } | ||
77 | |||
78 | static void __init psr2_init_irq(void) | ||
79 | { | ||
80 | wsp_init_irq(); | ||
81 | opb_pic_init(); | ||
82 | } | ||
83 | |||
84 | define_machine(psr2_md) { | ||
85 | .name = "PSR2 A2", | ||
86 | .probe = psr2_probe, | ||
87 | .setup_arch = psr2_setup_arch, | ||
88 | .restart = psr2_restart, | ||
89 | .power_off = psr2_spin, | ||
90 | .halt = psr2_spin, | ||
91 | .calibrate_decr = generic_calibrate_decr, | ||
92 | .init_IRQ = psr2_init_irq, | ||
93 | .progress = udbg_progress, | ||
94 | .power_save = book3e_idle, | ||
95 | }; | ||
diff --git a/arch/powerpc/platforms/wsp/scom_smp.c b/arch/powerpc/platforms/wsp/scom_smp.c new file mode 100644 index 000000000000..141e78032097 --- /dev/null +++ b/arch/powerpc/platforms/wsp/scom_smp.c | |||
@@ -0,0 +1,427 @@ | |||
1 | /* | ||
2 | * SCOM support for A2 platforms | ||
3 | * | ||
4 | * Copyright 2007-2011 Benjamin Herrenschmidt, David Gibson, | ||
5 | * Michael Ellerman, IBM Corp. | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or | ||
8 | * modify it under the terms of the GNU General Public License | ||
9 | * as published by the Free Software Foundation; either version | ||
10 | * 2 of the License, or (at your option) any later version. | ||
11 | */ | ||
12 | |||
13 | #include <linux/cpumask.h> | ||
14 | #include <linux/io.h> | ||
15 | #include <linux/of.h> | ||
16 | #include <linux/spinlock.h> | ||
17 | #include <linux/types.h> | ||
18 | |||
19 | #include <asm/cputhreads.h> | ||
20 | #include <asm/reg_a2.h> | ||
21 | #include <asm/scom.h> | ||
22 | #include <asm/udbg.h> | ||
23 | |||
24 | #include "wsp.h" | ||
25 | |||
26 | #define SCOM_RAMC 0x2a /* Ram Command */ | ||
27 | #define SCOM_RAMC_TGT1_EXT 0x80000000 | ||
28 | #define SCOM_RAMC_SRC1_EXT 0x40000000 | ||
29 | #define SCOM_RAMC_SRC2_EXT 0x20000000 | ||
30 | #define SCOM_RAMC_SRC3_EXT 0x10000000 | ||
31 | #define SCOM_RAMC_ENABLE 0x00080000 | ||
32 | #define SCOM_RAMC_THREADSEL 0x00060000 | ||
33 | #define SCOM_RAMC_EXECUTE 0x00010000 | ||
34 | #define SCOM_RAMC_MSR_OVERRIDE 0x00008000 | ||
35 | #define SCOM_RAMC_MSR_PR 0x00004000 | ||
36 | #define SCOM_RAMC_MSR_GS 0x00002000 | ||
37 | #define SCOM_RAMC_FORCE 0x00001000 | ||
38 | #define SCOM_RAMC_FLUSH 0x00000800 | ||
39 | #define SCOM_RAMC_INTERRUPT 0x00000004 | ||
40 | #define SCOM_RAMC_ERROR 0x00000002 | ||
41 | #define SCOM_RAMC_DONE 0x00000001 | ||
42 | #define SCOM_RAMI 0x29 /* Ram Instruction */ | ||
43 | #define SCOM_RAMIC 0x28 /* Ram Instruction and Command */ | ||
44 | #define SCOM_RAMIC_INSN 0xffffffff00000000 | ||
45 | #define SCOM_RAMD 0x2d /* Ram Data */ | ||
46 | #define SCOM_RAMDH 0x2e /* Ram Data High */ | ||
47 | #define SCOM_RAMDL 0x2f /* Ram Data Low */ | ||
48 | #define SCOM_PCCR0 0x33 /* PC Configuration Register 0 */ | ||
49 | #define SCOM_PCCR0_ENABLE_DEBUG 0x80000000 | ||
50 | #define SCOM_PCCR0_ENABLE_RAM 0x40000000 | ||
51 | #define SCOM_THRCTL 0x30 /* Thread Control and Status */ | ||
52 | #define SCOM_THRCTL_T0_STOP 0x80000000 | ||
53 | #define SCOM_THRCTL_T1_STOP 0x40000000 | ||
54 | #define SCOM_THRCTL_T2_STOP 0x20000000 | ||
55 | #define SCOM_THRCTL_T3_STOP 0x10000000 | ||
56 | #define SCOM_THRCTL_T0_STEP 0x08000000 | ||
57 | #define SCOM_THRCTL_T1_STEP 0x04000000 | ||
58 | #define SCOM_THRCTL_T2_STEP 0x02000000 | ||
59 | #define SCOM_THRCTL_T3_STEP 0x01000000 | ||
60 | #define SCOM_THRCTL_T0_RUN 0x00800000 | ||
61 | #define SCOM_THRCTL_T1_RUN 0x00400000 | ||
62 | #define SCOM_THRCTL_T2_RUN 0x00200000 | ||
63 | #define SCOM_THRCTL_T3_RUN 0x00100000 | ||
64 | #define SCOM_THRCTL_T0_PM 0x00080000 | ||
65 | #define SCOM_THRCTL_T1_PM 0x00040000 | ||
66 | #define SCOM_THRCTL_T2_PM 0x00020000 | ||
67 | #define SCOM_THRCTL_T3_PM 0x00010000 | ||
68 | #define SCOM_THRCTL_T0_UDE 0x00008000 | ||
69 | #define SCOM_THRCTL_T1_UDE 0x00004000 | ||
70 | #define SCOM_THRCTL_T2_UDE 0x00002000 | ||
71 | #define SCOM_THRCTL_T3_UDE 0x00001000 | ||
72 | #define SCOM_THRCTL_ASYNC_DIS 0x00000800 | ||
73 | #define SCOM_THRCTL_TB_DIS 0x00000400 | ||
74 | #define SCOM_THRCTL_DEC_DIS 0x00000200 | ||
75 | #define SCOM_THRCTL_AND 0x31 /* Thread Control and Status */ | ||
76 | #define SCOM_THRCTL_OR 0x32 /* Thread Control and Status */ | ||
77 | |||
78 | |||
79 | static DEFINE_PER_CPU(scom_map_t, scom_ptrs); | ||
80 | |||
81 | static scom_map_t get_scom(int cpu, struct device_node *np, int *first_thread) | ||
82 | { | ||
83 | scom_map_t scom = per_cpu(scom_ptrs, cpu); | ||
84 | int tcpu; | ||
85 | |||
86 | if (scom_map_ok(scom)) { | ||
87 | *first_thread = 0; | ||
88 | return scom; | ||
89 | } | ||
90 | |||
91 | *first_thread = 1; | ||
92 | |||
93 | scom = scom_map_device(np, 0); | ||
94 | |||
95 | for (tcpu = cpu_first_thread_sibling(cpu); | ||
96 | tcpu <= cpu_last_thread_sibling(cpu); tcpu++) | ||
97 | per_cpu(scom_ptrs, tcpu) = scom; | ||
98 | |||
99 | /* Hack: for the boot core, this will actually get called on | ||
100 | * the second thread up, not the first so our test above will | ||
101 | * set first_thread incorrectly. */ | ||
102 | if (cpu_first_thread_sibling(cpu) == 0) | ||
103 | *first_thread = 0; | ||
104 | |||
105 | return scom; | ||
106 | } | ||
107 | |||
108 | static int a2_scom_ram(scom_map_t scom, int thread, u32 insn, int extmask) | ||
109 | { | ||
110 | u64 cmd, mask, val; | ||
111 | int n = 0; | ||
112 | |||
113 | cmd = ((u64)insn << 32) | (((u64)extmask & 0xf) << 28) | ||
114 | | ((u64)thread << 17) | SCOM_RAMC_ENABLE | SCOM_RAMC_EXECUTE; | ||
115 | mask = SCOM_RAMC_DONE | SCOM_RAMC_INTERRUPT | SCOM_RAMC_ERROR; | ||
116 | |||
117 | scom_write(scom, SCOM_RAMIC, cmd); | ||
118 | |||
119 | while (!((val = scom_read(scom, SCOM_RAMC)) & mask)) { | ||
120 | pr_devel("Waiting on RAMC = 0x%llx\n", val); | ||
121 | if (++n == 3) { | ||
122 | pr_err("RAMC timeout on instruction 0x%08x, thread %d\n", | ||
123 | insn, thread); | ||
124 | return -1; | ||
125 | } | ||
126 | } | ||
127 | |||
128 | if (val & SCOM_RAMC_INTERRUPT) { | ||
129 | pr_err("RAMC interrupt on instruction 0x%08x, thread %d\n", | ||
130 | insn, thread); | ||
131 | return -SCOM_RAMC_INTERRUPT; | ||
132 | } | ||
133 | |||
134 | if (val & SCOM_RAMC_ERROR) { | ||
135 | pr_err("RAMC error on instruction 0x%08x, thread %d\n", | ||
136 | insn, thread); | ||
137 | return -SCOM_RAMC_ERROR; | ||
138 | } | ||
139 | |||
140 | return 0; | ||
141 | } | ||
142 | |||
143 | static int a2_scom_getgpr(scom_map_t scom, int thread, int gpr, int alt, | ||
144 | u64 *out_gpr) | ||
145 | { | ||
146 | int rc; | ||
147 | |||
148 | /* or rN, rN, rN */ | ||
149 | u32 insn = 0x7c000378 | (gpr << 21) | (gpr << 16) | (gpr << 11); | ||
150 | rc = a2_scom_ram(scom, thread, insn, alt ? 0xf : 0x0); | ||
151 | if (rc) | ||
152 | return rc; | ||
153 | |||
154 | *out_gpr = scom_read(scom, SCOM_RAMD); | ||
155 | |||
156 | return 0; | ||
157 | } | ||
158 | |||
159 | static int a2_scom_getspr(scom_map_t scom, int thread, int spr, u64 *out_spr) | ||
160 | { | ||
161 | int rc, sprhi, sprlo; | ||
162 | u32 insn; | ||
163 | |||
164 | sprhi = spr >> 5; | ||
165 | sprlo = spr & 0x1f; | ||
166 | insn = 0x7c2002a6 | (sprlo << 16) | (sprhi << 11); /* mfspr r1,spr */ | ||
167 | |||
168 | if (spr == 0x0ff0) | ||
169 | insn = 0x7c2000a6; /* mfmsr r1 */ | ||
170 | |||
171 | rc = a2_scom_ram(scom, thread, insn, 0xf); | ||
172 | if (rc) | ||
173 | return rc; | ||
174 | return a2_scom_getgpr(scom, thread, 1, 1, out_spr); | ||
175 | } | ||
176 | |||
177 | static int a2_scom_setgpr(scom_map_t scom, int thread, int gpr, | ||
178 | int alt, u64 val) | ||
179 | { | ||
180 | u32 lis = 0x3c000000 | (gpr << 21); | ||
181 | u32 li = 0x38000000 | (gpr << 21); | ||
182 | u32 oris = 0x64000000 | (gpr << 21) | (gpr << 16); | ||
183 | u32 ori = 0x60000000 | (gpr << 21) | (gpr << 16); | ||
184 | u32 rldicr32 = 0x780007c6 | (gpr << 21) | (gpr << 16); | ||
185 | u32 highest = val >> 48; | ||
186 | u32 higher = (val >> 32) & 0xffff; | ||
187 | u32 high = (val >> 16) & 0xffff; | ||
188 | u32 low = val & 0xffff; | ||
189 | int lext = alt ? 0x8 : 0x0; | ||
190 | int oext = alt ? 0xf : 0x0; | ||
191 | int rc = 0; | ||
192 | |||
193 | if (highest) | ||
194 | rc |= a2_scom_ram(scom, thread, lis | highest, lext); | ||
195 | |||
196 | if (higher) { | ||
197 | if (highest) | ||
198 | rc |= a2_scom_ram(scom, thread, oris | higher, oext); | ||
199 | else | ||
200 | rc |= a2_scom_ram(scom, thread, li | higher, lext); | ||
201 | } | ||
202 | |||
203 | if (highest || higher) | ||
204 | rc |= a2_scom_ram(scom, thread, rldicr32, oext); | ||
205 | |||
206 | if (high) { | ||
207 | if (highest || higher) | ||
208 | rc |= a2_scom_ram(scom, thread, oris | high, oext); | ||
209 | else | ||
210 | rc |= a2_scom_ram(scom, thread, lis | high, lext); | ||
211 | } | ||
212 | |||
213 | if (highest || higher || high) | ||
214 | rc |= a2_scom_ram(scom, thread, ori | low, oext); | ||
215 | else | ||
216 | rc |= a2_scom_ram(scom, thread, li | low, lext); | ||
217 | |||
218 | return rc; | ||
219 | } | ||
220 | |||
221 | static int a2_scom_setspr(scom_map_t scom, int thread, int spr, u64 val) | ||
222 | { | ||
223 | int sprhi = spr >> 5; | ||
224 | int sprlo = spr & 0x1f; | ||
225 | /* mtspr spr, r1 */ | ||
226 | u32 insn = 0x7c2003a6 | (sprlo << 16) | (sprhi << 11); | ||
227 | |||
228 | if (spr == 0x0ff0) | ||
229 | insn = 0x7c200124; /* mtmsr r1 */ | ||
230 | |||
231 | if (a2_scom_setgpr(scom, thread, 1, 1, val)) | ||
232 | return -1; | ||
233 | |||
234 | return a2_scom_ram(scom, thread, insn, 0xf); | ||
235 | } | ||
236 | |||
237 | static int a2_scom_initial_tlb(scom_map_t scom, int thread) | ||
238 | { | ||
239 | extern u32 a2_tlbinit_code_start[], a2_tlbinit_code_end[]; | ||
240 | extern u32 a2_tlbinit_after_iprot_flush[]; | ||
241 | extern u32 a2_tlbinit_after_linear_map[]; | ||
242 | u32 assoc, entries, i; | ||
243 | u64 epn, tlbcfg; | ||
244 | u32 *p; | ||
245 | int rc; | ||
246 | |||
247 | /* Invalidate all entries (including iprot) */ | ||
248 | |||
249 | rc = a2_scom_getspr(scom, thread, SPRN_TLB0CFG, &tlbcfg); | ||
250 | if (rc) | ||
251 | goto scom_fail; | ||
252 | entries = tlbcfg & TLBnCFG_N_ENTRY; | ||
253 | assoc = (tlbcfg & TLBnCFG_ASSOC) >> 24; | ||
254 | epn = 0; | ||
255 | |||
256 | /* Set MMUCR2 to enable 4K, 64K, 1M, 16M and 1G pages */ | ||
257 | a2_scom_setspr(scom, thread, SPRN_MMUCR2, 0x000a7531); | ||
258 | /* Set MMUCR3 to write all thids bit to the TLB */ | ||
259 | a2_scom_setspr(scom, thread, SPRN_MMUCR3, 0x0000000f); | ||
260 | |||
261 | /* Set MAS1 for 1G page size, and MAS2 to our initial EPN */ | ||
262 | a2_scom_setspr(scom, thread, SPRN_MAS1, MAS1_TSIZE(BOOK3E_PAGESZ_1GB)); | ||
263 | a2_scom_setspr(scom, thread, SPRN_MAS2, epn); | ||
264 | for (i = 0; i < entries; i++) { | ||
265 | |||
266 | a2_scom_setspr(scom, thread, SPRN_MAS0, MAS0_ESEL(i % assoc)); | ||
267 | |||
268 | /* tlbwe */ | ||
269 | rc = a2_scom_ram(scom, thread, 0x7c0007a4, 0); | ||
270 | if (rc) | ||
271 | goto scom_fail; | ||
272 | |||
273 | /* Next entry is new address? */ | ||
274 | if((i + 1) % assoc == 0) { | ||
275 | epn += (1 << 30); | ||
276 | a2_scom_setspr(scom, thread, SPRN_MAS2, epn); | ||
277 | } | ||
278 | } | ||
279 | |||
280 | /* Setup args for linear mapping */ | ||
281 | rc = a2_scom_setgpr(scom, thread, 3, 0, MAS0_TLBSEL(0)); | ||
282 | if (rc) | ||
283 | goto scom_fail; | ||
284 | |||
285 | /* Linear mapping */ | ||
286 | for (p = a2_tlbinit_code_start; p < a2_tlbinit_after_linear_map; p++) { | ||
287 | rc = a2_scom_ram(scom, thread, *p, 0); | ||
288 | if (rc) | ||
289 | goto scom_fail; | ||
290 | } | ||
291 | |||
292 | /* | ||
293 | * For the boot thread, between the linear mapping and the debug | ||
294 | * mappings there is a loop to flush iprot mappings. Ramming doesn't do | ||
295 | * branches, but the secondary threads don't need to be nearly as smart | ||
296 | * (i.e. we don't need to worry about invalidating the mapping we're | ||
297 | * standing on). | ||
298 | */ | ||
299 | |||
300 | /* Debug mappings. Expects r11 = MAS0 from linear map (set above) */ | ||
301 | for (p = a2_tlbinit_after_iprot_flush; p < a2_tlbinit_code_end; p++) { | ||
302 | rc = a2_scom_ram(scom, thread, *p, 0); | ||
303 | if (rc) | ||
304 | goto scom_fail; | ||
305 | } | ||
306 | |||
307 | scom_fail: | ||
308 | if (rc) | ||
309 | pr_err("Setting up initial TLB failed, err %d\n", rc); | ||
310 | |||
311 | if (rc == -SCOM_RAMC_INTERRUPT) { | ||
312 | /* Interrupt, dump some status */ | ||
313 | int rc[10]; | ||
314 | u64 iar, srr0, srr1, esr, mas0, mas1, mas2, mas7_3, mas8, ccr2; | ||
315 | rc[0] = a2_scom_getspr(scom, thread, SPRN_IAR, &iar); | ||
316 | rc[1] = a2_scom_getspr(scom, thread, SPRN_SRR0, &srr0); | ||
317 | rc[2] = a2_scom_getspr(scom, thread, SPRN_SRR1, &srr1); | ||
318 | rc[3] = a2_scom_getspr(scom, thread, SPRN_ESR, &esr); | ||
319 | rc[4] = a2_scom_getspr(scom, thread, SPRN_MAS0, &mas0); | ||
320 | rc[5] = a2_scom_getspr(scom, thread, SPRN_MAS1, &mas1); | ||
321 | rc[6] = a2_scom_getspr(scom, thread, SPRN_MAS2, &mas2); | ||
322 | rc[7] = a2_scom_getspr(scom, thread, SPRN_MAS7_MAS3, &mas7_3); | ||
323 | rc[8] = a2_scom_getspr(scom, thread, SPRN_MAS8, &mas8); | ||
324 | rc[9] = a2_scom_getspr(scom, thread, SPRN_A2_CCR2, &ccr2); | ||
325 | pr_err(" -> retreived IAR =0x%llx (err %d)\n", iar, rc[0]); | ||
326 | pr_err(" retreived SRR0=0x%llx (err %d)\n", srr0, rc[1]); | ||
327 | pr_err(" retreived SRR1=0x%llx (err %d)\n", srr1, rc[2]); | ||
328 | pr_err(" retreived ESR =0x%llx (err %d)\n", esr, rc[3]); | ||
329 | pr_err(" retreived MAS0=0x%llx (err %d)\n", mas0, rc[4]); | ||
330 | pr_err(" retreived MAS1=0x%llx (err %d)\n", mas1, rc[5]); | ||
331 | pr_err(" retreived MAS2=0x%llx (err %d)\n", mas2, rc[6]); | ||
332 | pr_err(" retreived MS73=0x%llx (err %d)\n", mas7_3, rc[7]); | ||
333 | pr_err(" retreived MAS8=0x%llx (err %d)\n", mas8, rc[8]); | ||
334 | pr_err(" retreived CCR2=0x%llx (err %d)\n", ccr2, rc[9]); | ||
335 | } | ||
336 | |||
337 | return rc; | ||
338 | } | ||
339 | |||
340 | int __devinit a2_scom_startup_cpu(unsigned int lcpu, int thr_idx, | ||
341 | struct device_node *np) | ||
342 | { | ||
343 | u64 init_iar, init_msr, init_ccr2; | ||
344 | unsigned long start_here; | ||
345 | int rc, core_setup; | ||
346 | scom_map_t scom; | ||
347 | u64 pccr0; | ||
348 | |||
349 | scom = get_scom(lcpu, np, &core_setup); | ||
350 | if (!scom) { | ||
351 | printk(KERN_ERR "Couldn't map SCOM for CPU%d\n", lcpu); | ||
352 | return -1; | ||
353 | } | ||
354 | |||
355 | pr_devel("Bringing up CPU%d using SCOM...\n", lcpu); | ||
356 | |||
357 | pccr0 = scom_read(scom, SCOM_PCCR0); | ||
358 | scom_write(scom, SCOM_PCCR0, pccr0 | SCOM_PCCR0_ENABLE_DEBUG | | ||
359 | SCOM_PCCR0_ENABLE_RAM); | ||
360 | |||
361 | /* Stop the thead with THRCTL. If we are setting up the TLB we stop all | ||
362 | * threads. We also disable asynchronous interrupts while RAMing. | ||
363 | */ | ||
364 | if (core_setup) | ||
365 | scom_write(scom, SCOM_THRCTL_OR, | ||
366 | SCOM_THRCTL_T0_STOP | | ||
367 | SCOM_THRCTL_T1_STOP | | ||
368 | SCOM_THRCTL_T2_STOP | | ||
369 | SCOM_THRCTL_T3_STOP | | ||
370 | SCOM_THRCTL_ASYNC_DIS); | ||
371 | else | ||
372 | scom_write(scom, SCOM_THRCTL_OR, SCOM_THRCTL_T0_STOP >> thr_idx); | ||
373 | |||
374 | /* Flush its pipeline just in case */ | ||
375 | scom_write(scom, SCOM_RAMC, ((u64)thr_idx << 17) | | ||
376 | SCOM_RAMC_FLUSH | SCOM_RAMC_ENABLE); | ||
377 | |||
378 | a2_scom_getspr(scom, thr_idx, SPRN_IAR, &init_iar); | ||
379 | a2_scom_getspr(scom, thr_idx, 0x0ff0, &init_msr); | ||
380 | a2_scom_getspr(scom, thr_idx, SPRN_A2_CCR2, &init_ccr2); | ||
381 | |||
382 | /* Set MSR to MSR_CM (0x0ff0 is magic value for MSR_CM) */ | ||
383 | rc = a2_scom_setspr(scom, thr_idx, 0x0ff0, MSR_CM); | ||
384 | if (rc) { | ||
385 | pr_err("Failed to set MSR ! err %d\n", rc); | ||
386 | return rc; | ||
387 | } | ||
388 | |||
389 | /* RAM in an sync/isync for the sake of it */ | ||
390 | a2_scom_ram(scom, thr_idx, 0x7c0004ac, 0); | ||
391 | a2_scom_ram(scom, thr_idx, 0x4c00012c, 0); | ||
392 | |||
393 | if (core_setup) { | ||
394 | pr_devel("CPU%d is first thread in core, initializing TLB...\n", | ||
395 | lcpu); | ||
396 | rc = a2_scom_initial_tlb(scom, thr_idx); | ||
397 | if (rc) | ||
398 | goto fail; | ||
399 | } | ||
400 | |||
401 | start_here = *(unsigned long *)(core_setup ? generic_secondary_smp_init | ||
402 | : generic_secondary_thread_init); | ||
403 | pr_devel("CPU%d entry point at 0x%lx...\n", lcpu, start_here); | ||
404 | |||
405 | rc |= a2_scom_setspr(scom, thr_idx, SPRN_IAR, start_here); | ||
406 | rc |= a2_scom_setgpr(scom, thr_idx, 3, 0, | ||
407 | get_hard_smp_processor_id(lcpu)); | ||
408 | /* | ||
409 | * Tell book3e_secondary_core_init not to set up the TLB, we've | ||
410 | * already done that. | ||
411 | */ | ||
412 | rc |= a2_scom_setgpr(scom, thr_idx, 4, 0, 1); | ||
413 | |||
414 | rc |= a2_scom_setspr(scom, thr_idx, SPRN_TENS, 0x1 << thr_idx); | ||
415 | |||
416 | scom_write(scom, SCOM_RAMC, 0); | ||
417 | scom_write(scom, SCOM_THRCTL_AND, ~(SCOM_THRCTL_T0_STOP >> thr_idx)); | ||
418 | scom_write(scom, SCOM_PCCR0, pccr0); | ||
419 | fail: | ||
420 | pr_devel(" SCOM initialization %s\n", rc ? "failed" : "succeeded"); | ||
421 | if (rc) { | ||
422 | pr_err("Old IAR=0x%08llx MSR=0x%08llx CCR2=0x%08llx\n", | ||
423 | init_iar, init_msr, init_ccr2); | ||
424 | } | ||
425 | |||
426 | return rc; | ||
427 | } | ||
diff --git a/arch/powerpc/platforms/wsp/scom_wsp.c b/arch/powerpc/platforms/wsp/scom_wsp.c new file mode 100644 index 000000000000..4052e2259f30 --- /dev/null +++ b/arch/powerpc/platforms/wsp/scom_wsp.c | |||
@@ -0,0 +1,77 @@ | |||
1 | /* | ||
2 | * SCOM backend for WSP | ||
3 | * | ||
4 | * Copyright 2010 Benjamin Herrenschmidt, IBM Corp. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #include <linux/cpumask.h> | ||
13 | #include <linux/io.h> | ||
14 | #include <linux/of.h> | ||
15 | #include <linux/spinlock.h> | ||
16 | #include <linux/types.h> | ||
17 | |||
18 | #include <asm/cputhreads.h> | ||
19 | #include <asm/reg_a2.h> | ||
20 | #include <asm/scom.h> | ||
21 | #include <asm/udbg.h> | ||
22 | |||
23 | #include "wsp.h" | ||
24 | |||
25 | |||
26 | static scom_map_t wsp_scom_map(struct device_node *dev, u64 reg, u64 count) | ||
27 | { | ||
28 | struct resource r; | ||
29 | u64 xscom_addr; | ||
30 | |||
31 | if (!of_get_property(dev, "scom-controller", NULL)) { | ||
32 | pr_err("%s: device %s is not a SCOM controller\n", | ||
33 | __func__, dev->full_name); | ||
34 | return SCOM_MAP_INVALID; | ||
35 | } | ||
36 | |||
37 | if (of_address_to_resource(dev, 0, &r)) { | ||
38 | pr_debug("Failed to find SCOM controller address\n"); | ||
39 | return 0; | ||
40 | } | ||
41 | |||
42 | /* Transform the SCOM address into an XSCOM offset */ | ||
43 | xscom_addr = ((reg & 0x7f000000) >> 1) | ((reg & 0xfffff) << 3); | ||
44 | |||
45 | return (scom_map_t)ioremap(r.start + xscom_addr, count << 3); | ||
46 | } | ||
47 | |||
48 | static void wsp_scom_unmap(scom_map_t map) | ||
49 | { | ||
50 | iounmap((void *)map); | ||
51 | } | ||
52 | |||
53 | static u64 wsp_scom_read(scom_map_t map, u32 reg) | ||
54 | { | ||
55 | u64 __iomem *addr = (u64 __iomem *)map; | ||
56 | |||
57 | return in_be64(addr + reg); | ||
58 | } | ||
59 | |||
60 | static void wsp_scom_write(scom_map_t map, u32 reg, u64 value) | ||
61 | { | ||
62 | u64 __iomem *addr = (u64 __iomem *)map; | ||
63 | |||
64 | return out_be64(addr + reg, value); | ||
65 | } | ||
66 | |||
67 | static const struct scom_controller wsp_scom_controller = { | ||
68 | .map = wsp_scom_map, | ||
69 | .unmap = wsp_scom_unmap, | ||
70 | .read = wsp_scom_read, | ||
71 | .write = wsp_scom_write | ||
72 | }; | ||
73 | |||
74 | void scom_init_wsp(void) | ||
75 | { | ||
76 | scom_init(&wsp_scom_controller); | ||
77 | } | ||
diff --git a/arch/powerpc/platforms/wsp/setup.c b/arch/powerpc/platforms/wsp/setup.c new file mode 100644 index 000000000000..11ac2f05e01c --- /dev/null +++ b/arch/powerpc/platforms/wsp/setup.c | |||
@@ -0,0 +1,36 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Michael Ellerman, IBM Corporation | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation; either version | ||
7 | * 2 of the License, or (at your option) any later version. | ||
8 | */ | ||
9 | |||
10 | #include <linux/kernel.h> | ||
11 | #include <linux/of_platform.h> | ||
12 | |||
13 | #include "wsp.h" | ||
14 | |||
15 | /* | ||
16 | * Find chip-id by walking up device tree looking for ibm,wsp-chip-id property. | ||
17 | * Won't work for nodes that are not a descendant of a wsp node. | ||
18 | */ | ||
19 | int wsp_get_chip_id(struct device_node *dn) | ||
20 | { | ||
21 | const u32 *p; | ||
22 | int rc; | ||
23 | |||
24 | /* Start looking at the specified node, not its parent */ | ||
25 | dn = of_node_get(dn); | ||
26 | while (dn && !(p = of_get_property(dn, "ibm,wsp-chip-id", NULL))) | ||
27 | dn = of_get_next_parent(dn); | ||
28 | |||
29 | if (!dn) | ||
30 | return -1; | ||
31 | |||
32 | rc = *p; | ||
33 | of_node_put(dn); | ||
34 | |||
35 | return rc; | ||
36 | } | ||
diff --git a/arch/powerpc/platforms/wsp/smp.c b/arch/powerpc/platforms/wsp/smp.c new file mode 100644 index 000000000000..9d20fa9d3710 --- /dev/null +++ b/arch/powerpc/platforms/wsp/smp.c | |||
@@ -0,0 +1,88 @@ | |||
1 | /* | ||
2 | * SMP Support for A2 platforms | ||
3 | * | ||
4 | * Copyright 2007 Benjamin Herrenschmidt, IBM Corp. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | * | ||
11 | */ | ||
12 | |||
13 | #include <linux/cpumask.h> | ||
14 | #include <linux/init.h> | ||
15 | #include <linux/kernel.h> | ||
16 | #include <linux/of.h> | ||
17 | #include <linux/smp.h> | ||
18 | |||
19 | #include <asm/dbell.h> | ||
20 | #include <asm/machdep.h> | ||
21 | #include <asm/xics.h> | ||
22 | |||
23 | #include "ics.h" | ||
24 | #include "wsp.h" | ||
25 | |||
26 | static void __devinit smp_a2_setup_cpu(int cpu) | ||
27 | { | ||
28 | doorbell_setup_this_cpu(); | ||
29 | |||
30 | if (cpu != boot_cpuid) | ||
31 | xics_setup_cpu(); | ||
32 | } | ||
33 | |||
34 | int __devinit smp_a2_kick_cpu(int nr) | ||
35 | { | ||
36 | const char *enable_method; | ||
37 | struct device_node *np; | ||
38 | int thr_idx; | ||
39 | |||
40 | if (nr < 0 || nr >= NR_CPUS) | ||
41 | return -ENOENT; | ||
42 | |||
43 | np = of_get_cpu_node(nr, &thr_idx); | ||
44 | if (!np) | ||
45 | return -ENODEV; | ||
46 | |||
47 | enable_method = of_get_property(np, "enable-method", NULL); | ||
48 | pr_devel("CPU%d has enable-method: \"%s\"\n", nr, enable_method); | ||
49 | |||
50 | if (!enable_method) { | ||
51 | printk(KERN_ERR "CPU%d has no enable-method\n", nr); | ||
52 | return -ENOENT; | ||
53 | } else if (strcmp(enable_method, "ibm,a2-scom") == 0) { | ||
54 | if (a2_scom_startup_cpu(nr, thr_idx, np)) | ||
55 | return -1; | ||
56 | } else { | ||
57 | printk(KERN_ERR "CPU%d: Don't understand enable-method \"%s\"\n", | ||
58 | nr, enable_method); | ||
59 | return -EINVAL; | ||
60 | } | ||
61 | |||
62 | /* | ||
63 | * The processor is currently spinning, waiting for the | ||
64 | * cpu_start field to become non-zero After we set cpu_start, | ||
65 | * the processor will continue on to secondary_start | ||
66 | */ | ||
67 | paca[nr].cpu_start = 1; | ||
68 | |||
69 | return 0; | ||
70 | } | ||
71 | |||
72 | static int __init smp_a2_probe(void) | ||
73 | { | ||
74 | return cpus_weight(cpu_possible_map); | ||
75 | } | ||
76 | |||
77 | static struct smp_ops_t a2_smp_ops = { | ||
78 | .message_pass = smp_muxed_ipi_message_pass, | ||
79 | .cause_ipi = doorbell_cause_ipi, | ||
80 | .probe = smp_a2_probe, | ||
81 | .kick_cpu = smp_a2_kick_cpu, | ||
82 | .setup_cpu = smp_a2_setup_cpu, | ||
83 | }; | ||
84 | |||
85 | void __init a2_setup_smp(void) | ||
86 | { | ||
87 | smp_ops = &a2_smp_ops; | ||
88 | } | ||
diff --git a/arch/powerpc/platforms/wsp/wsp.h b/arch/powerpc/platforms/wsp/wsp.h new file mode 100644 index 000000000000..7c3e087fd2f2 --- /dev/null +++ b/arch/powerpc/platforms/wsp/wsp.h | |||
@@ -0,0 +1,17 @@ | |||
1 | #ifndef __WSP_H | ||
2 | #define __WSP_H | ||
3 | |||
4 | #include <asm/wsp.h> | ||
5 | |||
6 | extern void wsp_setup_pci(void); | ||
7 | extern void scom_init_wsp(void); | ||
8 | |||
9 | extern void a2_setup_smp(void); | ||
10 | extern int a2_scom_startup_cpu(unsigned int lcpu, int thr_idx, | ||
11 | struct device_node *np); | ||
12 | int smp_a2_cpu_bootable(unsigned int nr); | ||
13 | int __devinit smp_a2_kick_cpu(int nr); | ||
14 | |||
15 | void opb_pic_init(void); | ||
16 | |||
17 | #endif /* __WSP_H */ | ||