aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/platforms
diff options
context:
space:
mode:
authorPaul Mundt <lethal@linux-sh.org>2011-05-23 23:05:26 -0400
committerPaul Mundt <lethal@linux-sh.org>2011-05-23 23:05:26 -0400
commit8b1aaeaf54f1bcaa0bbec6bb170db367c998d27c (patch)
tree2478e1708f5a3da261597f4aa1011d4d41c2cc84 /arch/powerpc/platforms
parentbca606a646a2b1f4fa1ae2b22a0ac707505d7297 (diff)
parent5e152b4c9e0fce6149c74406346a7ae7e7a17727 (diff)
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6 into rmobile-latest
Diffstat (limited to 'arch/powerpc/platforms')
-rw-r--r--arch/powerpc/platforms/44x/iss4xx.c6
-rw-r--r--arch/powerpc/platforms/512x/mpc5121_ads_cpld.c10
-rw-r--r--arch/powerpc/platforms/52xx/media5200.c4
-rw-r--r--arch/powerpc/platforms/52xx/mpc52xx_pic.c83
-rw-r--r--arch/powerpc/platforms/82xx/pq2ads-pci-pic.c12
-rw-r--r--arch/powerpc/platforms/83xx/suspend.c7
-rw-r--r--arch/powerpc/platforms/85xx/smp.c12
-rw-r--r--arch/powerpc/platforms/85xx/socrates_fpga_pic.c26
-rw-r--r--arch/powerpc/platforms/86xx/gef_pic.c10
-rw-r--r--arch/powerpc/platforms/86xx/mpc8610_hpcd.c99
-rw-r--r--arch/powerpc/platforms/86xx/mpc86xx_smp.c6
-rw-r--r--arch/powerpc/platforms/8xx/m8xx_setup.c2
-rw-r--r--arch/powerpc/platforms/Kconfig31
-rw-r--r--arch/powerpc/platforms/Kconfig.cputype24
-rw-r--r--arch/powerpc/platforms/Makefile1
-rw-r--r--arch/powerpc/platforms/cell/Kconfig4
-rw-r--r--arch/powerpc/platforms/cell/Makefile9
-rw-r--r--arch/powerpc/platforms/cell/axon_msi.c3
-rw-r--r--arch/powerpc/platforms/cell/beat_interrupt.c27
-rw-r--r--arch/powerpc/platforms/cell/beat_interrupt.h3
-rw-r--r--arch/powerpc/platforms/cell/beat_smp.c124
-rw-r--r--arch/powerpc/platforms/cell/cbe_regs.c11
-rw-r--r--arch/powerpc/platforms/cell/celleb_pci.c25
-rw-r--r--arch/powerpc/platforms/cell/celleb_pci.h3
-rw-r--r--arch/powerpc/platforms/cell/celleb_setup.c4
-rw-r--r--arch/powerpc/platforms/cell/interrupt.c16
-rw-r--r--arch/powerpc/platforms/cell/io-workarounds.c185
-rw-r--r--arch/powerpc/platforms/cell/io-workarounds.h49
-rw-r--r--arch/powerpc/platforms/cell/qpace_setup.c1
-rw-r--r--arch/powerpc/platforms/cell/setup.c4
-rw-r--r--arch/powerpc/platforms/cell/smp.c37
-rw-r--r--arch/powerpc/platforms/cell/spider-pci.c3
-rw-r--r--arch/powerpc/platforms/cell/spider-pic.c21
-rw-r--r--arch/powerpc/platforms/cell/spu_base.c28
-rw-r--r--arch/powerpc/platforms/cell/spufs/sched.c2
-rw-r--r--arch/powerpc/platforms/chrp/smp.c4
-rw-r--r--arch/powerpc/platforms/embedded6xx/flipper-pic.c15
-rw-r--r--arch/powerpc/platforms/embedded6xx/hlwd-pic.c15
-rw-r--r--arch/powerpc/platforms/iseries/Kconfig4
-rw-r--r--arch/powerpc/platforms/iseries/exception.S62
-rw-r--r--arch/powerpc/platforms/iseries/irq.c13
-rw-r--r--arch/powerpc/platforms/iseries/setup.c9
-rw-r--r--arch/powerpc/platforms/iseries/smp.c45
-rw-r--r--arch/powerpc/platforms/iseries/smp.h6
-rw-r--r--arch/powerpc/platforms/powermac/Kconfig11
-rw-r--r--arch/powerpc/platforms/powermac/pic.c67
-rw-r--r--arch/powerpc/platforms/powermac/pic.h11
-rw-r--r--arch/powerpc/platforms/powermac/pmac.h1
-rw-r--r--arch/powerpc/platforms/powermac/smp.c105
-rw-r--r--arch/powerpc/platforms/ps3/interrupt.c8
-rw-r--r--arch/powerpc/platforms/ps3/smp.c22
-rw-r--r--arch/powerpc/platforms/ps3/spu.c4
-rw-r--r--arch/powerpc/platforms/pseries/Kconfig23
-rw-r--r--arch/powerpc/platforms/pseries/Makefile2
-rw-r--r--arch/powerpc/platforms/pseries/dtl.c20
-rw-r--r--arch/powerpc/platforms/pseries/eeh.c82
-rw-r--r--arch/powerpc/platforms/pseries/eeh_driver.c22
-rw-r--r--arch/powerpc/platforms/pseries/hotplug-cpu.c5
-rw-r--r--arch/powerpc/platforms/pseries/io_event_irq.c231
-rw-r--r--arch/powerpc/platforms/pseries/iommu.c117
-rw-r--r--arch/powerpc/platforms/pseries/kexec.c5
-rw-r--r--arch/powerpc/platforms/pseries/lpar.c48
-rw-r--r--arch/powerpc/platforms/pseries/plpar_wrappers.h27
-rw-r--r--arch/powerpc/platforms/pseries/ras.c6
-rw-r--r--arch/powerpc/platforms/pseries/setup.c48
-rw-r--r--arch/powerpc/platforms/pseries/smp.c24
-rw-r--r--arch/powerpc/platforms/pseries/xics.c949
-rw-r--r--arch/powerpc/platforms/pseries/xics.h23
-rw-r--r--arch/powerpc/platforms/wsp/Kconfig28
-rw-r--r--arch/powerpc/platforms/wsp/Makefile6
-rw-r--r--arch/powerpc/platforms/wsp/ics.c712
-rw-r--r--arch/powerpc/platforms/wsp/ics.h20
-rw-r--r--arch/powerpc/platforms/wsp/opb_pic.c332
-rw-r--r--arch/powerpc/platforms/wsp/psr2.c95
-rw-r--r--arch/powerpc/platforms/wsp/scom_smp.c427
-rw-r--r--arch/powerpc/platforms/wsp/scom_wsp.c77
-rw-r--r--arch/powerpc/platforms/wsp/setup.c36
-rw-r--r--arch/powerpc/platforms/wsp/smp.c88
-rw-r--r--arch/powerpc/platforms/wsp/wsp.h17
79 files changed, 2780 insertions, 1984 deletions
diff --git a/arch/powerpc/platforms/44x/iss4xx.c b/arch/powerpc/platforms/44x/iss4xx.c
index aa46e9d1e771..19395f18b1db 100644
--- a/arch/powerpc/platforms/44x/iss4xx.c
+++ b/arch/powerpc/platforms/44x/iss4xx.c
@@ -87,7 +87,7 @@ static void __cpuinit smp_iss4xx_setup_cpu(int cpu)
87 mpic_setup_this_cpu(); 87 mpic_setup_this_cpu();
88} 88}
89 89
90static void __cpuinit smp_iss4xx_kick_cpu(int cpu) 90static int __cpuinit smp_iss4xx_kick_cpu(int cpu)
91{ 91{
92 struct device_node *cpunode = of_get_cpu_node(cpu, NULL); 92 struct device_node *cpunode = of_get_cpu_node(cpu, NULL);
93 const u64 *spin_table_addr_prop; 93 const u64 *spin_table_addr_prop;
@@ -104,7 +104,7 @@ static void __cpuinit smp_iss4xx_kick_cpu(int cpu)
104 NULL); 104 NULL);
105 if (spin_table_addr_prop == NULL) { 105 if (spin_table_addr_prop == NULL) {
106 pr_err("CPU%d: Can't start, missing cpu-release-addr !\n", cpu); 106 pr_err("CPU%d: Can't start, missing cpu-release-addr !\n", cpu);
107 return; 107 return -ENOENT;
108 } 108 }
109 109
110 /* Assume it's mapped as part of the linear mapping. This is a bit 110 /* Assume it's mapped as part of the linear mapping. This is a bit
@@ -117,6 +117,8 @@ static void __cpuinit smp_iss4xx_kick_cpu(int cpu)
117 smp_wmb(); 117 smp_wmb();
118 spin_table[1] = __pa(start_secondary_47x); 118 spin_table[1] = __pa(start_secondary_47x);
119 mb(); 119 mb();
120
121 return 0;
120} 122}
121 123
122static struct smp_ops_t iss_smp_ops = { 124static struct smp_ops_t iss_smp_ops = {
diff --git a/arch/powerpc/platforms/512x/mpc5121_ads_cpld.c b/arch/powerpc/platforms/512x/mpc5121_ads_cpld.c
index cfc4b2009982..9f09319352c0 100644
--- a/arch/powerpc/platforms/512x/mpc5121_ads_cpld.c
+++ b/arch/powerpc/platforms/512x/mpc5121_ads_cpld.c
@@ -61,7 +61,7 @@ irq_to_pic_bit(unsigned int irq)
61static void 61static void
62cpld_mask_irq(struct irq_data *d) 62cpld_mask_irq(struct irq_data *d)
63{ 63{
64 unsigned int cpld_irq = (unsigned int)irq_map[d->irq].hwirq; 64 unsigned int cpld_irq = (unsigned int)irqd_to_hwirq(d);
65 void __iomem *pic_mask = irq_to_pic_mask(cpld_irq); 65 void __iomem *pic_mask = irq_to_pic_mask(cpld_irq);
66 66
67 out_8(pic_mask, 67 out_8(pic_mask,
@@ -71,7 +71,7 @@ cpld_mask_irq(struct irq_data *d)
71static void 71static void
72cpld_unmask_irq(struct irq_data *d) 72cpld_unmask_irq(struct irq_data *d)
73{ 73{
74 unsigned int cpld_irq = (unsigned int)irq_map[d->irq].hwirq; 74 unsigned int cpld_irq = (unsigned int)irqd_to_hwirq(d);
75 void __iomem *pic_mask = irq_to_pic_mask(cpld_irq); 75 void __iomem *pic_mask = irq_to_pic_mask(cpld_irq);
76 76
77 out_8(pic_mask, 77 out_8(pic_mask,
@@ -97,7 +97,7 @@ cpld_pic_get_irq(int offset, u8 ignore, u8 __iomem *statusp,
97 status |= (ignore | mask); 97 status |= (ignore | mask);
98 98
99 if (status == 0xff) 99 if (status == 0xff)
100 return NO_IRQ_IGNORE; 100 return NO_IRQ;
101 101
102 cpld_irq = ffz(status) + offset; 102 cpld_irq = ffz(status) + offset;
103 103
@@ -109,14 +109,14 @@ cpld_pic_cascade(unsigned int irq, struct irq_desc *desc)
109{ 109{
110 irq = cpld_pic_get_irq(0, PCI_IGNORE, &cpld_regs->pci_status, 110 irq = cpld_pic_get_irq(0, PCI_IGNORE, &cpld_regs->pci_status,
111 &cpld_regs->pci_mask); 111 &cpld_regs->pci_mask);
112 if (irq != NO_IRQ && irq != NO_IRQ_IGNORE) { 112 if (irq != NO_IRQ) {
113 generic_handle_irq(irq); 113 generic_handle_irq(irq);
114 return; 114 return;
115 } 115 }
116 116
117 irq = cpld_pic_get_irq(8, MISC_IGNORE, &cpld_regs->misc_status, 117 irq = cpld_pic_get_irq(8, MISC_IGNORE, &cpld_regs->misc_status,
118 &cpld_regs->misc_mask); 118 &cpld_regs->misc_mask);
119 if (irq != NO_IRQ && irq != NO_IRQ_IGNORE) { 119 if (irq != NO_IRQ) {
120 generic_handle_irq(irq); 120 generic_handle_irq(irq);
121 return; 121 return;
122 } 122 }
diff --git a/arch/powerpc/platforms/52xx/media5200.c b/arch/powerpc/platforms/52xx/media5200.c
index 57a6a349e932..96f85e5e0cd3 100644
--- a/arch/powerpc/platforms/52xx/media5200.c
+++ b/arch/powerpc/platforms/52xx/media5200.c
@@ -56,7 +56,7 @@ static void media5200_irq_unmask(struct irq_data *d)
56 56
57 spin_lock_irqsave(&media5200_irq.lock, flags); 57 spin_lock_irqsave(&media5200_irq.lock, flags);
58 val = in_be32(media5200_irq.regs + MEDIA5200_IRQ_ENABLE); 58 val = in_be32(media5200_irq.regs + MEDIA5200_IRQ_ENABLE);
59 val |= 1 << (MEDIA5200_IRQ_SHIFT + irq_map[d->irq].hwirq); 59 val |= 1 << (MEDIA5200_IRQ_SHIFT + irqd_to_hwirq(d));
60 out_be32(media5200_irq.regs + MEDIA5200_IRQ_ENABLE, val); 60 out_be32(media5200_irq.regs + MEDIA5200_IRQ_ENABLE, val);
61 spin_unlock_irqrestore(&media5200_irq.lock, flags); 61 spin_unlock_irqrestore(&media5200_irq.lock, flags);
62} 62}
@@ -68,7 +68,7 @@ static void media5200_irq_mask(struct irq_data *d)
68 68
69 spin_lock_irqsave(&media5200_irq.lock, flags); 69 spin_lock_irqsave(&media5200_irq.lock, flags);
70 val = in_be32(media5200_irq.regs + MEDIA5200_IRQ_ENABLE); 70 val = in_be32(media5200_irq.regs + MEDIA5200_IRQ_ENABLE);
71 val &= ~(1 << (MEDIA5200_IRQ_SHIFT + irq_map[d->irq].hwirq)); 71 val &= ~(1 << (MEDIA5200_IRQ_SHIFT + irqd_to_hwirq(d)));
72 out_be32(media5200_irq.regs + MEDIA5200_IRQ_ENABLE, val); 72 out_be32(media5200_irq.regs + MEDIA5200_IRQ_ENABLE, val);
73 spin_unlock_irqrestore(&media5200_irq.lock, flags); 73 spin_unlock_irqrestore(&media5200_irq.lock, flags);
74} 74}
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_pic.c b/arch/powerpc/platforms/52xx/mpc52xx_pic.c
index 1dd15400f6f0..1a9a49570579 100644
--- a/arch/powerpc/platforms/52xx/mpc52xx_pic.c
+++ b/arch/powerpc/platforms/52xx/mpc52xx_pic.c
@@ -157,48 +157,30 @@ static inline void io_be_clrbit(u32 __iomem *addr, int bitno)
157 */ 157 */
158static void mpc52xx_extirq_mask(struct irq_data *d) 158static void mpc52xx_extirq_mask(struct irq_data *d)
159{ 159{
160 int irq; 160 int l2irq = irqd_to_hwirq(d) & MPC52xx_IRQ_L2_MASK;
161 int l2irq;
162
163 irq = irq_map[d->irq].hwirq;
164 l2irq = irq & MPC52xx_IRQ_L2_MASK;
165
166 io_be_clrbit(&intr->ctrl, 11 - l2irq); 161 io_be_clrbit(&intr->ctrl, 11 - l2irq);
167} 162}
168 163
169static void mpc52xx_extirq_unmask(struct irq_data *d) 164static void mpc52xx_extirq_unmask(struct irq_data *d)
170{ 165{
171 int irq; 166 int l2irq = irqd_to_hwirq(d) & MPC52xx_IRQ_L2_MASK;
172 int l2irq;
173
174 irq = irq_map[d->irq].hwirq;
175 l2irq = irq & MPC52xx_IRQ_L2_MASK;
176
177 io_be_setbit(&intr->ctrl, 11 - l2irq); 167 io_be_setbit(&intr->ctrl, 11 - l2irq);
178} 168}
179 169
180static void mpc52xx_extirq_ack(struct irq_data *d) 170static void mpc52xx_extirq_ack(struct irq_data *d)
181{ 171{
182 int irq; 172 int l2irq = irqd_to_hwirq(d) & MPC52xx_IRQ_L2_MASK;
183 int l2irq;
184
185 irq = irq_map[d->irq].hwirq;
186 l2irq = irq & MPC52xx_IRQ_L2_MASK;
187
188 io_be_setbit(&intr->ctrl, 27-l2irq); 173 io_be_setbit(&intr->ctrl, 27-l2irq);
189} 174}
190 175
191static int mpc52xx_extirq_set_type(struct irq_data *d, unsigned int flow_type) 176static int mpc52xx_extirq_set_type(struct irq_data *d, unsigned int flow_type)
192{ 177{
193 u32 ctrl_reg, type; 178 u32 ctrl_reg, type;
194 int irq; 179 int l2irq = irqd_to_hwirq(d) & MPC52xx_IRQ_L2_MASK;
195 int l2irq;
196 void *handler = handle_level_irq; 180 void *handler = handle_level_irq;
197 181
198 irq = irq_map[d->irq].hwirq; 182 pr_debug("%s: irq=%x. l2=%d flow_type=%d\n", __func__,
199 l2irq = irq & MPC52xx_IRQ_L2_MASK; 183 (int) irqd_to_hwirq(d), l2irq, flow_type);
200
201 pr_debug("%s: irq=%x. l2=%d flow_type=%d\n", __func__, irq, l2irq, flow_type);
202 184
203 switch (flow_type) { 185 switch (flow_type) {
204 case IRQF_TRIGGER_HIGH: type = 0; break; 186 case IRQF_TRIGGER_HIGH: type = 0; break;
@@ -237,23 +219,13 @@ static int mpc52xx_null_set_type(struct irq_data *d, unsigned int flow_type)
237 219
238static void mpc52xx_main_mask(struct irq_data *d) 220static void mpc52xx_main_mask(struct irq_data *d)
239{ 221{
240 int irq; 222 int l2irq = irqd_to_hwirq(d) & MPC52xx_IRQ_L2_MASK;
241 int l2irq;
242
243 irq = irq_map[d->irq].hwirq;
244 l2irq = irq & MPC52xx_IRQ_L2_MASK;
245
246 io_be_setbit(&intr->main_mask, 16 - l2irq); 223 io_be_setbit(&intr->main_mask, 16 - l2irq);
247} 224}
248 225
249static void mpc52xx_main_unmask(struct irq_data *d) 226static void mpc52xx_main_unmask(struct irq_data *d)
250{ 227{
251 int irq; 228 int l2irq = irqd_to_hwirq(d) & MPC52xx_IRQ_L2_MASK;
252 int l2irq;
253
254 irq = irq_map[d->irq].hwirq;
255 l2irq = irq & MPC52xx_IRQ_L2_MASK;
256
257 io_be_clrbit(&intr->main_mask, 16 - l2irq); 229 io_be_clrbit(&intr->main_mask, 16 - l2irq);
258} 230}
259 231
@@ -270,23 +242,13 @@ static struct irq_chip mpc52xx_main_irqchip = {
270 */ 242 */
271static void mpc52xx_periph_mask(struct irq_data *d) 243static void mpc52xx_periph_mask(struct irq_data *d)
272{ 244{
273 int irq; 245 int l2irq = irqd_to_hwirq(d) & MPC52xx_IRQ_L2_MASK;
274 int l2irq;
275
276 irq = irq_map[d->irq].hwirq;
277 l2irq = irq & MPC52xx_IRQ_L2_MASK;
278
279 io_be_setbit(&intr->per_mask, 31 - l2irq); 246 io_be_setbit(&intr->per_mask, 31 - l2irq);
280} 247}
281 248
282static void mpc52xx_periph_unmask(struct irq_data *d) 249static void mpc52xx_periph_unmask(struct irq_data *d)
283{ 250{
284 int irq; 251 int l2irq = irqd_to_hwirq(d) & MPC52xx_IRQ_L2_MASK;
285 int l2irq;
286
287 irq = irq_map[d->irq].hwirq;
288 l2irq = irq & MPC52xx_IRQ_L2_MASK;
289
290 io_be_clrbit(&intr->per_mask, 31 - l2irq); 252 io_be_clrbit(&intr->per_mask, 31 - l2irq);
291} 253}
292 254
@@ -303,34 +265,19 @@ static struct irq_chip mpc52xx_periph_irqchip = {
303 */ 265 */
304static void mpc52xx_sdma_mask(struct irq_data *d) 266static void mpc52xx_sdma_mask(struct irq_data *d)
305{ 267{
306 int irq; 268 int l2irq = irqd_to_hwirq(d) & MPC52xx_IRQ_L2_MASK;
307 int l2irq;
308
309 irq = irq_map[d->irq].hwirq;
310 l2irq = irq & MPC52xx_IRQ_L2_MASK;
311
312 io_be_setbit(&sdma->IntMask, l2irq); 269 io_be_setbit(&sdma->IntMask, l2irq);
313} 270}
314 271
315static void mpc52xx_sdma_unmask(struct irq_data *d) 272static void mpc52xx_sdma_unmask(struct irq_data *d)
316{ 273{
317 int irq; 274 int l2irq = irqd_to_hwirq(d) & MPC52xx_IRQ_L2_MASK;
318 int l2irq;
319
320 irq = irq_map[d->irq].hwirq;
321 l2irq = irq & MPC52xx_IRQ_L2_MASK;
322
323 io_be_clrbit(&sdma->IntMask, l2irq); 275 io_be_clrbit(&sdma->IntMask, l2irq);
324} 276}
325 277
326static void mpc52xx_sdma_ack(struct irq_data *d) 278static void mpc52xx_sdma_ack(struct irq_data *d)
327{ 279{
328 int irq; 280 int l2irq = irqd_to_hwirq(d) & MPC52xx_IRQ_L2_MASK;
329 int l2irq;
330
331 irq = irq_map[d->irq].hwirq;
332 l2irq = irq & MPC52xx_IRQ_L2_MASK;
333
334 out_be32(&sdma->IntPend, 1 << l2irq); 281 out_be32(&sdma->IntPend, 1 << l2irq);
335} 282}
336 283
@@ -539,7 +486,7 @@ void __init mpc52xx_init_irq(void)
539unsigned int mpc52xx_get_irq(void) 486unsigned int mpc52xx_get_irq(void)
540{ 487{
541 u32 status; 488 u32 status;
542 int irq = NO_IRQ_IGNORE; 489 int irq;
543 490
544 status = in_be32(&intr->enc_status); 491 status = in_be32(&intr->enc_status);
545 if (status & 0x00000400) { /* critical */ 492 if (status & 0x00000400) { /* critical */
@@ -562,6 +509,8 @@ unsigned int mpc52xx_get_irq(void)
562 } else { 509 } else {
563 irq |= (MPC52xx_IRQ_L1_PERP << MPC52xx_IRQ_L1_OFFSET); 510 irq |= (MPC52xx_IRQ_L1_PERP << MPC52xx_IRQ_L1_OFFSET);
564 } 511 }
512 } else {
513 return NO_IRQ;
565 } 514 }
566 515
567 return irq_linear_revmap(mpc52xx_irqhost, irq); 516 return irq_linear_revmap(mpc52xx_irqhost, irq);
diff --git a/arch/powerpc/platforms/82xx/pq2ads-pci-pic.c b/arch/powerpc/platforms/82xx/pq2ads-pci-pic.c
index 4a4eb6ffa12f..8ccf9ed62fe2 100644
--- a/arch/powerpc/platforms/82xx/pq2ads-pci-pic.c
+++ b/arch/powerpc/platforms/82xx/pq2ads-pci-pic.c
@@ -42,7 +42,7 @@ struct pq2ads_pci_pic {
42static void pq2ads_pci_mask_irq(struct irq_data *d) 42static void pq2ads_pci_mask_irq(struct irq_data *d)
43{ 43{
44 struct pq2ads_pci_pic *priv = irq_data_get_irq_chip_data(d); 44 struct pq2ads_pci_pic *priv = irq_data_get_irq_chip_data(d);
45 int irq = NUM_IRQS - virq_to_hw(d->irq) - 1; 45 int irq = NUM_IRQS - irqd_to_hwirq(d) - 1;
46 46
47 if (irq != -1) { 47 if (irq != -1) {
48 unsigned long flags; 48 unsigned long flags;
@@ -58,7 +58,7 @@ static void pq2ads_pci_mask_irq(struct irq_data *d)
58static void pq2ads_pci_unmask_irq(struct irq_data *d) 58static void pq2ads_pci_unmask_irq(struct irq_data *d)
59{ 59{
60 struct pq2ads_pci_pic *priv = irq_data_get_irq_chip_data(d); 60 struct pq2ads_pci_pic *priv = irq_data_get_irq_chip_data(d);
61 int irq = NUM_IRQS - virq_to_hw(d->irq) - 1; 61 int irq = NUM_IRQS - irqd_to_hwirq(d) - 1;
62 62
63 if (irq != -1) { 63 if (irq != -1) {
64 unsigned long flags; 64 unsigned long flags;
@@ -112,16 +112,8 @@ static int pci_pic_host_map(struct irq_host *h, unsigned int virq,
112 return 0; 112 return 0;
113} 113}
114 114
115static void pci_host_unmap(struct irq_host *h, unsigned int virq)
116{
117 /* remove chip and handler */
118 irq_set_chip_data(virq, NULL);
119 irq_set_chip(virq, NULL);
120}
121
122static struct irq_host_ops pci_pic_host_ops = { 115static struct irq_host_ops pci_pic_host_ops = {
123 .map = pci_pic_host_map, 116 .map = pci_pic_host_map,
124 .unmap = pci_host_unmap,
125}; 117};
126 118
127int __init pq2ads_pci_init_irq(void) 119int __init pq2ads_pci_init_irq(void)
diff --git a/arch/powerpc/platforms/83xx/suspend.c b/arch/powerpc/platforms/83xx/suspend.c
index 188272934cfb..104faa8aa23c 100644
--- a/arch/powerpc/platforms/83xx/suspend.c
+++ b/arch/powerpc/platforms/83xx/suspend.c
@@ -318,17 +318,20 @@ static const struct platform_suspend_ops mpc83xx_suspend_ops = {
318 .end = mpc83xx_suspend_end, 318 .end = mpc83xx_suspend_end,
319}; 319};
320 320
321static struct of_device_id pmc_match[];
321static int pmc_probe(struct platform_device *ofdev) 322static int pmc_probe(struct platform_device *ofdev)
322{ 323{
324 const struct of_device_id *match;
323 struct device_node *np = ofdev->dev.of_node; 325 struct device_node *np = ofdev->dev.of_node;
324 struct resource res; 326 struct resource res;
325 struct pmc_type *type; 327 struct pmc_type *type;
326 int ret = 0; 328 int ret = 0;
327 329
328 if (!ofdev->dev.of_match) 330 match = of_match_device(pmc_match, &ofdev->dev);
331 if (!match)
329 return -EINVAL; 332 return -EINVAL;
330 333
331 type = ofdev->dev.of_match->data; 334 type = match->data;
332 335
333 if (!of_device_is_available(np)) 336 if (!of_device_is_available(np))
334 return -ENODEV; 337 return -ENODEV;
diff --git a/arch/powerpc/platforms/85xx/smp.c b/arch/powerpc/platforms/85xx/smp.c
index 0d00ff9d05a0..d6a93a10c0f5 100644
--- a/arch/powerpc/platforms/85xx/smp.c
+++ b/arch/powerpc/platforms/85xx/smp.c
@@ -41,7 +41,7 @@ extern void __early_start(void);
41#define NUM_BOOT_ENTRY 8 41#define NUM_BOOT_ENTRY 8
42#define SIZE_BOOT_ENTRY (NUM_BOOT_ENTRY * sizeof(u32)) 42#define SIZE_BOOT_ENTRY (NUM_BOOT_ENTRY * sizeof(u32))
43 43
44static void __init 44static int __init
45smp_85xx_kick_cpu(int nr) 45smp_85xx_kick_cpu(int nr)
46{ 46{
47 unsigned long flags; 47 unsigned long flags;
@@ -60,7 +60,7 @@ smp_85xx_kick_cpu(int nr)
60 60
61 if (cpu_rel_addr == NULL) { 61 if (cpu_rel_addr == NULL) {
62 printk(KERN_ERR "No cpu-release-addr for cpu %d\n", nr); 62 printk(KERN_ERR "No cpu-release-addr for cpu %d\n", nr);
63 return; 63 return -ENOENT;
64 } 64 }
65 65
66 /* 66 /*
@@ -107,6 +107,8 @@ smp_85xx_kick_cpu(int nr)
107 iounmap(bptr_vaddr); 107 iounmap(bptr_vaddr);
108 108
109 pr_debug("waited %d msecs for CPU #%d.\n", n, nr); 109 pr_debug("waited %d msecs for CPU #%d.\n", n, nr);
110
111 return 0;
110} 112}
111 113
112static void __init 114static void __init
@@ -233,8 +235,10 @@ void __init mpc85xx_smp_init(void)
233 smp_85xx_ops.message_pass = smp_mpic_message_pass; 235 smp_85xx_ops.message_pass = smp_mpic_message_pass;
234 } 236 }
235 237
236 if (cpu_has_feature(CPU_FTR_DBELL)) 238 if (cpu_has_feature(CPU_FTR_DBELL)) {
237 smp_85xx_ops.message_pass = doorbell_message_pass; 239 smp_85xx_ops.message_pass = smp_muxed_ipi_message_pass;
240 smp_85xx_ops.cause_ipi = doorbell_cause_ipi;
241 }
238 242
239 BUG_ON(!smp_85xx_ops.message_pass); 243 BUG_ON(!smp_85xx_ops.message_pass);
240 244
diff --git a/arch/powerpc/platforms/85xx/socrates_fpga_pic.c b/arch/powerpc/platforms/85xx/socrates_fpga_pic.c
index db864623b4ae..12cb9bb2cc68 100644
--- a/arch/powerpc/platforms/85xx/socrates_fpga_pic.c
+++ b/arch/powerpc/platforms/85xx/socrates_fpga_pic.c
@@ -48,8 +48,6 @@ static struct socrates_fpga_irq_info fpga_irqs[SOCRATES_FPGA_NUM_IRQS] = {
48 [8] = {0, IRQ_TYPE_LEVEL_HIGH}, 48 [8] = {0, IRQ_TYPE_LEVEL_HIGH},
49}; 49};
50 50
51#define socrates_fpga_irq_to_hw(virq) ((unsigned int)irq_map[virq].hwirq)
52
53static DEFINE_RAW_SPINLOCK(socrates_fpga_pic_lock); 51static DEFINE_RAW_SPINLOCK(socrates_fpga_pic_lock);
54 52
55static void __iomem *socrates_fpga_pic_iobase; 53static void __iomem *socrates_fpga_pic_iobase;
@@ -110,11 +108,9 @@ void socrates_fpga_pic_cascade(unsigned int irq, struct irq_desc *desc)
110static void socrates_fpga_pic_ack(struct irq_data *d) 108static void socrates_fpga_pic_ack(struct irq_data *d)
111{ 109{
112 unsigned long flags; 110 unsigned long flags;
113 unsigned int hwirq, irq_line; 111 unsigned int irq_line, hwirq = irqd_to_hwirq(d);
114 uint32_t mask; 112 uint32_t mask;
115 113
116 hwirq = socrates_fpga_irq_to_hw(d->irq);
117
118 irq_line = fpga_irqs[hwirq].irq_line; 114 irq_line = fpga_irqs[hwirq].irq_line;
119 raw_spin_lock_irqsave(&socrates_fpga_pic_lock, flags); 115 raw_spin_lock_irqsave(&socrates_fpga_pic_lock, flags);
120 mask = socrates_fpga_pic_read(FPGA_PIC_IRQMASK(irq_line)) 116 mask = socrates_fpga_pic_read(FPGA_PIC_IRQMASK(irq_line))
@@ -127,12 +123,10 @@ static void socrates_fpga_pic_ack(struct irq_data *d)
127static void socrates_fpga_pic_mask(struct irq_data *d) 123static void socrates_fpga_pic_mask(struct irq_data *d)
128{ 124{
129 unsigned long flags; 125 unsigned long flags;
130 unsigned int hwirq; 126 unsigned int hwirq = irqd_to_hwirq(d);
131 int irq_line; 127 int irq_line;
132 u32 mask; 128 u32 mask;
133 129
134 hwirq = socrates_fpga_irq_to_hw(d->irq);
135
136 irq_line = fpga_irqs[hwirq].irq_line; 130 irq_line = fpga_irqs[hwirq].irq_line;
137 raw_spin_lock_irqsave(&socrates_fpga_pic_lock, flags); 131 raw_spin_lock_irqsave(&socrates_fpga_pic_lock, flags);
138 mask = socrates_fpga_pic_read(FPGA_PIC_IRQMASK(irq_line)) 132 mask = socrates_fpga_pic_read(FPGA_PIC_IRQMASK(irq_line))
@@ -145,12 +139,10 @@ static void socrates_fpga_pic_mask(struct irq_data *d)
145static void socrates_fpga_pic_mask_ack(struct irq_data *d) 139static void socrates_fpga_pic_mask_ack(struct irq_data *d)
146{ 140{
147 unsigned long flags; 141 unsigned long flags;
148 unsigned int hwirq; 142 unsigned int hwirq = irqd_to_hwirq(d);
149 int irq_line; 143 int irq_line;
150 u32 mask; 144 u32 mask;
151 145
152 hwirq = socrates_fpga_irq_to_hw(d->irq);
153
154 irq_line = fpga_irqs[hwirq].irq_line; 146 irq_line = fpga_irqs[hwirq].irq_line;
155 raw_spin_lock_irqsave(&socrates_fpga_pic_lock, flags); 147 raw_spin_lock_irqsave(&socrates_fpga_pic_lock, flags);
156 mask = socrates_fpga_pic_read(FPGA_PIC_IRQMASK(irq_line)) 148 mask = socrates_fpga_pic_read(FPGA_PIC_IRQMASK(irq_line))
@@ -164,12 +156,10 @@ static void socrates_fpga_pic_mask_ack(struct irq_data *d)
164static void socrates_fpga_pic_unmask(struct irq_data *d) 156static void socrates_fpga_pic_unmask(struct irq_data *d)
165{ 157{
166 unsigned long flags; 158 unsigned long flags;
167 unsigned int hwirq; 159 unsigned int hwirq = irqd_to_hwirq(d);
168 int irq_line; 160 int irq_line;
169 u32 mask; 161 u32 mask;
170 162
171 hwirq = socrates_fpga_irq_to_hw(d->irq);
172
173 irq_line = fpga_irqs[hwirq].irq_line; 163 irq_line = fpga_irqs[hwirq].irq_line;
174 raw_spin_lock_irqsave(&socrates_fpga_pic_lock, flags); 164 raw_spin_lock_irqsave(&socrates_fpga_pic_lock, flags);
175 mask = socrates_fpga_pic_read(FPGA_PIC_IRQMASK(irq_line)) 165 mask = socrates_fpga_pic_read(FPGA_PIC_IRQMASK(irq_line))
@@ -182,12 +172,10 @@ static void socrates_fpga_pic_unmask(struct irq_data *d)
182static void socrates_fpga_pic_eoi(struct irq_data *d) 172static void socrates_fpga_pic_eoi(struct irq_data *d)
183{ 173{
184 unsigned long flags; 174 unsigned long flags;
185 unsigned int hwirq; 175 unsigned int hwirq = irqd_to_hwirq(d);
186 int irq_line; 176 int irq_line;
187 u32 mask; 177 u32 mask;
188 178
189 hwirq = socrates_fpga_irq_to_hw(d->irq);
190
191 irq_line = fpga_irqs[hwirq].irq_line; 179 irq_line = fpga_irqs[hwirq].irq_line;
192 raw_spin_lock_irqsave(&socrates_fpga_pic_lock, flags); 180 raw_spin_lock_irqsave(&socrates_fpga_pic_lock, flags);
193 mask = socrates_fpga_pic_read(FPGA_PIC_IRQMASK(irq_line)) 181 mask = socrates_fpga_pic_read(FPGA_PIC_IRQMASK(irq_line))
@@ -201,12 +189,10 @@ static int socrates_fpga_pic_set_type(struct irq_data *d,
201 unsigned int flow_type) 189 unsigned int flow_type)
202{ 190{
203 unsigned long flags; 191 unsigned long flags;
204 unsigned int hwirq; 192 unsigned int hwirq = irqd_to_hwirq(d);
205 int polarity; 193 int polarity;
206 u32 mask; 194 u32 mask;
207 195
208 hwirq = socrates_fpga_irq_to_hw(d->irq);
209
210 if (fpga_irqs[hwirq].type != IRQ_TYPE_NONE) 196 if (fpga_irqs[hwirq].type != IRQ_TYPE_NONE)
211 return -EINVAL; 197 return -EINVAL;
212 198
diff --git a/arch/powerpc/platforms/86xx/gef_pic.c b/arch/powerpc/platforms/86xx/gef_pic.c
index 0beec7d5566b..94594e58594c 100644
--- a/arch/powerpc/platforms/86xx/gef_pic.c
+++ b/arch/powerpc/platforms/86xx/gef_pic.c
@@ -46,8 +46,6 @@
46#define GEF_PIC_CPU0_MCP_MASK GEF_PIC_MCP_MASK(0) 46#define GEF_PIC_CPU0_MCP_MASK GEF_PIC_MCP_MASK(0)
47#define GEF_PIC_CPU1_MCP_MASK GEF_PIC_MCP_MASK(1) 47#define GEF_PIC_CPU1_MCP_MASK GEF_PIC_MCP_MASK(1)
48 48
49#define gef_irq_to_hw(virq) ((unsigned int)irq_map[virq].hwirq)
50
51 49
52static DEFINE_RAW_SPINLOCK(gef_pic_lock); 50static DEFINE_RAW_SPINLOCK(gef_pic_lock);
53 51
@@ -113,11 +111,9 @@ void gef_pic_cascade(unsigned int irq, struct irq_desc *desc)
113static void gef_pic_mask(struct irq_data *d) 111static void gef_pic_mask(struct irq_data *d)
114{ 112{
115 unsigned long flags; 113 unsigned long flags;
116 unsigned int hwirq; 114 unsigned int hwirq = irqd_to_hwirq(d);
117 u32 mask; 115 u32 mask;
118 116
119 hwirq = gef_irq_to_hw(d->irq);
120
121 raw_spin_lock_irqsave(&gef_pic_lock, flags); 117 raw_spin_lock_irqsave(&gef_pic_lock, flags);
122 mask = in_be32(gef_pic_irq_reg_base + GEF_PIC_INTR_MASK(0)); 118 mask = in_be32(gef_pic_irq_reg_base + GEF_PIC_INTR_MASK(0));
123 mask &= ~(1 << hwirq); 119 mask &= ~(1 << hwirq);
@@ -136,11 +132,9 @@ static void gef_pic_mask_ack(struct irq_data *d)
136static void gef_pic_unmask(struct irq_data *d) 132static void gef_pic_unmask(struct irq_data *d)
137{ 133{
138 unsigned long flags; 134 unsigned long flags;
139 unsigned int hwirq; 135 unsigned int hwirq = irqd_to_hwirq(d);
140 u32 mask; 136 u32 mask;
141 137
142 hwirq = gef_irq_to_hw(d->irq);
143
144 raw_spin_lock_irqsave(&gef_pic_lock, flags); 138 raw_spin_lock_irqsave(&gef_pic_lock, flags);
145 mask = in_be32(gef_pic_irq_reg_base + GEF_PIC_INTR_MASK(0)); 139 mask = in_be32(gef_pic_irq_reg_base + GEF_PIC_INTR_MASK(0));
146 mask |= (1 << hwirq); 140 mask |= (1 << hwirq);
diff --git a/arch/powerpc/platforms/86xx/mpc8610_hpcd.c b/arch/powerpc/platforms/86xx/mpc8610_hpcd.c
index 018cc67be426..a896511690c2 100644
--- a/arch/powerpc/platforms/86xx/mpc8610_hpcd.c
+++ b/arch/powerpc/platforms/86xx/mpc8610_hpcd.c
@@ -66,7 +66,7 @@ static void __init mpc8610_suspend_init(void)
66 return; 66 return;
67 } 67 }
68 68
69 ret = request_irq(irq, mpc8610_sw9_irq, 0, "sw9/wakeup", NULL); 69 ret = request_irq(irq, mpc8610_sw9_irq, 0, "sw9:wakeup", NULL);
70 if (ret) { 70 if (ret) {
71 pr_err("%s: can't request pixis event IRQ: %d\n", 71 pr_err("%s: can't request pixis event IRQ: %d\n",
72 __func__, ret); 72 __func__, ret);
@@ -105,45 +105,77 @@ machine_device_initcall(mpc86xx_hpcd, mpc8610_declare_of_platform_devices);
105 105
106#if defined(CONFIG_FB_FSL_DIU) || defined(CONFIG_FB_FSL_DIU_MODULE) 106#if defined(CONFIG_FB_FSL_DIU) || defined(CONFIG_FB_FSL_DIU_MODULE)
107 107
108static u32 get_busfreq(void) 108/*
109{ 109 * DIU Area Descriptor
110 struct device_node *node; 110 *
111 111 * The MPC8610 reference manual shows the bits of the AD register in
112 u32 fs_busfreq = 0; 112 * little-endian order, which causes the BLUE_C field to be split into two
113 node = of_find_node_by_type(NULL, "cpu"); 113 * parts. To simplify the definition of the MAKE_AD() macro, we define the
114 if (node) { 114 * fields in big-endian order and byte-swap the result.
115 unsigned int size; 115 *
116 const unsigned int *prop = 116 * So even though the registers don't look like they're in the
117 of_get_property(node, "bus-frequency", &size); 117 * same bit positions as they are on the P1022, the same value is written to
118 if (prop) 118 * the AD register on the MPC8610 and on the P1022.
119 fs_busfreq = *prop; 119 */
120 of_node_put(node); 120#define AD_BYTE_F 0x10000000
121 }; 121#define AD_ALPHA_C_MASK 0x0E000000
122 return fs_busfreq; 122#define AD_ALPHA_C_SHIFT 25
123} 123#define AD_BLUE_C_MASK 0x01800000
124#define AD_BLUE_C_SHIFT 23
125#define AD_GREEN_C_MASK 0x00600000
126#define AD_GREEN_C_SHIFT 21
127#define AD_RED_C_MASK 0x00180000
128#define AD_RED_C_SHIFT 19
129#define AD_PALETTE 0x00040000
130#define AD_PIXEL_S_MASK 0x00030000
131#define AD_PIXEL_S_SHIFT 16
132#define AD_COMP_3_MASK 0x0000F000
133#define AD_COMP_3_SHIFT 12
134#define AD_COMP_2_MASK 0x00000F00
135#define AD_COMP_2_SHIFT 8
136#define AD_COMP_1_MASK 0x000000F0
137#define AD_COMP_1_SHIFT 4
138#define AD_COMP_0_MASK 0x0000000F
139#define AD_COMP_0_SHIFT 0
140
141#define MAKE_AD(alpha, red, blue, green, size, c0, c1, c2, c3) \
142 cpu_to_le32(AD_BYTE_F | (alpha << AD_ALPHA_C_SHIFT) | \
143 (blue << AD_BLUE_C_SHIFT) | (green << AD_GREEN_C_SHIFT) | \
144 (red << AD_RED_C_SHIFT) | (c3 << AD_COMP_3_SHIFT) | \
145 (c2 << AD_COMP_2_SHIFT) | (c1 << AD_COMP_1_SHIFT) | \
146 (c0 << AD_COMP_0_SHIFT) | (size << AD_PIXEL_S_SHIFT))
124 147
125unsigned int mpc8610hpcd_get_pixel_format(unsigned int bits_per_pixel, 148unsigned int mpc8610hpcd_get_pixel_format(unsigned int bits_per_pixel,
126 int monitor_port) 149 int monitor_port)
127{ 150{
128 static const unsigned long pixelformat[][3] = { 151 static const unsigned long pixelformat[][3] = {
129 {0x88882317, 0x88083218, 0x65052119}, 152 {
130 {0x88883316, 0x88082219, 0x65053118}, 153 MAKE_AD(3, 0, 2, 1, 3, 8, 8, 8, 8),
154 MAKE_AD(4, 2, 0, 1, 2, 8, 8, 8, 0),
155 MAKE_AD(4, 0, 2, 1, 1, 5, 6, 5, 0)
156 },
157 {
158 MAKE_AD(3, 2, 0, 1, 3, 8, 8, 8, 8),
159 MAKE_AD(4, 0, 2, 1, 2, 8, 8, 8, 0),
160 MAKE_AD(4, 2, 0, 1, 1, 5, 6, 5, 0)
161 },
131 }; 162 };
132 unsigned int pix_fmt, arch_monitor; 163 unsigned int arch_monitor;
133 164
165 /* The DVI port is mis-wired on revision 1 of this board. */
134 arch_monitor = ((*pixis_arch == 0x01) && (monitor_port == 0))? 0 : 1; 166 arch_monitor = ((*pixis_arch == 0x01) && (monitor_port == 0))? 0 : 1;
135 /* DVI port for board version 0x01 */ 167
136 168 switch (bits_per_pixel) {
137 if (bits_per_pixel == 32) 169 case 32:
138 pix_fmt = pixelformat[arch_monitor][0]; 170 return pixelformat[arch_monitor][0];
139 else if (bits_per_pixel == 24) 171 case 24:
140 pix_fmt = pixelformat[arch_monitor][1]; 172 return pixelformat[arch_monitor][1];
141 else if (bits_per_pixel == 16) 173 case 16:
142 pix_fmt = pixelformat[arch_monitor][2]; 174 return pixelformat[arch_monitor][2];
143 else 175 default:
144 pix_fmt = pixelformat[1][0]; 176 pr_err("fsl-diu: unsupported pixel depth %u\n", bits_per_pixel);
145 177 return 0;
146 return pix_fmt; 178 }
147} 179}
148 180
149void mpc8610hpcd_set_gamma_table(int monitor_port, char *gamma_table_base) 181void mpc8610hpcd_set_gamma_table(int monitor_port, char *gamma_table_base)
@@ -190,8 +222,7 @@ void mpc8610hpcd_set_pixel_clock(unsigned int pixclock)
190 } 222 }
191 223
192 /* Pixel Clock configuration */ 224 /* Pixel Clock configuration */
193 pr_debug("DIU: Bus Frequency = %d\n", get_busfreq()); 225 speed_ccb = fsl_get_sys_freq();
194 speed_ccb = get_busfreq();
195 226
196 /* Calculate the pixel clock with the smallest error */ 227 /* Calculate the pixel clock with the smallest error */
197 /* calculate the following in steps to avoid overflow */ 228 /* calculate the following in steps to avoid overflow */
diff --git a/arch/powerpc/platforms/86xx/mpc86xx_smp.c b/arch/powerpc/platforms/86xx/mpc86xx_smp.c
index eacea0e3fcc8..af09baee22cb 100644
--- a/arch/powerpc/platforms/86xx/mpc86xx_smp.c
+++ b/arch/powerpc/platforms/86xx/mpc86xx_smp.c
@@ -56,7 +56,7 @@ smp_86xx_release_core(int nr)
56} 56}
57 57
58 58
59static void __init 59static int __init
60smp_86xx_kick_cpu(int nr) 60smp_86xx_kick_cpu(int nr)
61{ 61{
62 unsigned int save_vector; 62 unsigned int save_vector;
@@ -65,7 +65,7 @@ smp_86xx_kick_cpu(int nr)
65 unsigned int *vector = (unsigned int *)(KERNELBASE + 0x100); 65 unsigned int *vector = (unsigned int *)(KERNELBASE + 0x100);
66 66
67 if (nr < 0 || nr >= NR_CPUS) 67 if (nr < 0 || nr >= NR_CPUS)
68 return; 68 return -ENOENT;
69 69
70 pr_debug("smp_86xx_kick_cpu: kick CPU #%d\n", nr); 70 pr_debug("smp_86xx_kick_cpu: kick CPU #%d\n", nr);
71 71
@@ -92,6 +92,8 @@ smp_86xx_kick_cpu(int nr)
92 local_irq_restore(flags); 92 local_irq_restore(flags);
93 93
94 pr_debug("wait CPU #%d for %d msecs.\n", nr, n); 94 pr_debug("wait CPU #%d for %d msecs.\n", nr, n);
95
96 return 0;
95} 97}
96 98
97 99
diff --git a/arch/powerpc/platforms/8xx/m8xx_setup.c b/arch/powerpc/platforms/8xx/m8xx_setup.c
index 9ecce995dd4b..1e121088826f 100644
--- a/arch/powerpc/platforms/8xx/m8xx_setup.c
+++ b/arch/powerpc/platforms/8xx/m8xx_setup.c
@@ -150,7 +150,7 @@ void __init mpc8xx_calibrate_decr(void)
150 */ 150 */
151 cpu = of_find_node_by_type(NULL, "cpu"); 151 cpu = of_find_node_by_type(NULL, "cpu");
152 virq= irq_of_parse_and_map(cpu, 0); 152 virq= irq_of_parse_and_map(cpu, 0);
153 irq = irq_map[virq].hwirq; 153 irq = virq_to_hw(virq);
154 154
155 sys_tmr2 = immr_map(im_sit); 155 sys_tmr2 = immr_map(im_sit);
156 out_be16(&sys_tmr2->sit_tbscr, ((1 << (7 - (irq/2))) << 8) | 156 out_be16(&sys_tmr2->sit_tbscr, ((1 << (7 - (irq/2))) << 8) |
diff --git a/arch/powerpc/platforms/Kconfig b/arch/powerpc/platforms/Kconfig
index f7b07720aa30..f970ca2b180c 100644
--- a/arch/powerpc/platforms/Kconfig
+++ b/arch/powerpc/platforms/Kconfig
@@ -20,6 +20,7 @@ source "arch/powerpc/platforms/embedded6xx/Kconfig"
20source "arch/powerpc/platforms/44x/Kconfig" 20source "arch/powerpc/platforms/44x/Kconfig"
21source "arch/powerpc/platforms/40x/Kconfig" 21source "arch/powerpc/platforms/40x/Kconfig"
22source "arch/powerpc/platforms/amigaone/Kconfig" 22source "arch/powerpc/platforms/amigaone/Kconfig"
23source "arch/powerpc/platforms/wsp/Kconfig"
23 24
24config KVM_GUEST 25config KVM_GUEST
25 bool "KVM Guest support" 26 bool "KVM Guest support"
@@ -56,16 +57,19 @@ config UDBG_RTAS_CONSOLE
56 depends on PPC_RTAS 57 depends on PPC_RTAS
57 default n 58 default n
58 59
60config PPC_SMP_MUXED_IPI
61 bool
62 help
63 Select this opton if your platform supports SMP and your
64 interrupt controller provides less than 4 interrupts to each
65 cpu. This will enable the generic code to multiplex the 4
66 messages on to one ipi.
67
59config PPC_UDBG_BEAT 68config PPC_UDBG_BEAT
60 bool "BEAT based debug console" 69 bool "BEAT based debug console"
61 depends on PPC_CELLEB 70 depends on PPC_CELLEB
62 default n 71 default n
63 72
64config XICS
65 depends on PPC_PSERIES
66 bool
67 default y
68
69config IPIC 73config IPIC
70 bool 74 bool
71 default n 75 default n
@@ -147,14 +151,27 @@ config PPC_970_NAP
147 bool 151 bool
148 default n 152 default n
149 153
154config PPC_P7_NAP
155 bool
156 default n
157
150config PPC_INDIRECT_IO 158config PPC_INDIRECT_IO
151 bool 159 bool
152 select GENERIC_IOMAP 160 select GENERIC_IOMAP
153 default n 161
162config PPC_INDIRECT_PIO
163 bool
164 select PPC_INDIRECT_IO
165
166config PPC_INDIRECT_MMIO
167 bool
168 select PPC_INDIRECT_IO
169
170config PPC_IO_WORKAROUNDS
171 bool
154 172
155config GENERIC_IOMAP 173config GENERIC_IOMAP
156 bool 174 bool
157 default n
158 175
159source "drivers/cpufreq/Kconfig" 176source "drivers/cpufreq/Kconfig"
160 177
diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype
index 111138c55f9c..2165b65876f9 100644
--- a/arch/powerpc/platforms/Kconfig.cputype
+++ b/arch/powerpc/platforms/Kconfig.cputype
@@ -73,6 +73,7 @@ config PPC_BOOK3S_64
73config PPC_BOOK3E_64 73config PPC_BOOK3E_64
74 bool "Embedded processors" 74 bool "Embedded processors"
75 select PPC_FPU # Make it a choice ? 75 select PPC_FPU # Make it a choice ?
76 select PPC_SMP_MUXED_IPI
76 77
77endchoice 78endchoice
78 79
@@ -107,6 +108,10 @@ config POWER4
107 depends on PPC64 && PPC_BOOK3S 108 depends on PPC64 && PPC_BOOK3S
108 def_bool y 109 def_bool y
109 110
111config PPC_A2
112 bool
113 depends on PPC_BOOK3E_64
114
110config TUNE_CELL 115config TUNE_CELL
111 bool "Optimize for Cell Broadband Engine" 116 bool "Optimize for Cell Broadband Engine"
112 depends on PPC64 && PPC_BOOK3S 117 depends on PPC64 && PPC_BOOK3S
@@ -174,6 +179,7 @@ config FSL_BOOKE
174config PPC_FSL_BOOK3E 179config PPC_FSL_BOOK3E
175 bool 180 bool
176 select FSL_EMB_PERFMON 181 select FSL_EMB_PERFMON
182 select PPC_SMP_MUXED_IPI
177 default y if FSL_BOOKE 183 default y if FSL_BOOKE
178 184
179config PTE_64BIT 185config PTE_64BIT
@@ -226,6 +232,24 @@ config VSX
226 232
227 If in doubt, say Y here. 233 If in doubt, say Y here.
228 234
235config PPC_ICSWX
236 bool "Support for PowerPC icswx coprocessor instruction"
237 depends on POWER4
238 default n
239 ---help---
240
241 This option enables kernel support for the PowerPC Initiate
242 Coprocessor Store Word (icswx) coprocessor instruction on POWER7
243 or newer processors.
244
245 This option is only useful if you have a processor that supports
246 the icswx coprocessor instruction. It does not have any effect
247 on processors without the icswx coprocessor instruction.
248
249 This option slightly increases kernel memory usage.
250
251 If in doubt, say N here.
252
229config SPE 253config SPE
230 bool "SPE Support" 254 bool "SPE Support"
231 depends on E200 || (E500 && !PPC_E500MC) 255 depends on E200 || (E500 && !PPC_E500MC)
diff --git a/arch/powerpc/platforms/Makefile b/arch/powerpc/platforms/Makefile
index fdb9f0b0d7a8..73e2116cfeed 100644
--- a/arch/powerpc/platforms/Makefile
+++ b/arch/powerpc/platforms/Makefile
@@ -22,3 +22,4 @@ obj-$(CONFIG_PPC_CELL) += cell/
22obj-$(CONFIG_PPC_PS3) += ps3/ 22obj-$(CONFIG_PPC_PS3) += ps3/
23obj-$(CONFIG_EMBEDDED6xx) += embedded6xx/ 23obj-$(CONFIG_EMBEDDED6xx) += embedded6xx/
24obj-$(CONFIG_AMIGAONE) += amigaone/ 24obj-$(CONFIG_AMIGAONE) += amigaone/
25obj-$(CONFIG_PPC_WSP) += wsp/
diff --git a/arch/powerpc/platforms/cell/Kconfig b/arch/powerpc/platforms/cell/Kconfig
index 81239ebed83f..67d5009b4e86 100644
--- a/arch/powerpc/platforms/cell/Kconfig
+++ b/arch/powerpc/platforms/cell/Kconfig
@@ -6,7 +6,8 @@ config PPC_CELL_COMMON
6 bool 6 bool
7 select PPC_CELL 7 select PPC_CELL
8 select PPC_DCR_MMIO 8 select PPC_DCR_MMIO
9 select PPC_INDIRECT_IO 9 select PPC_INDIRECT_PIO
10 select PPC_INDIRECT_MMIO
10 select PPC_NATIVE 11 select PPC_NATIVE
11 select PPC_RTAS 12 select PPC_RTAS
12 select IRQ_EDGE_EOI_HANDLER 13 select IRQ_EDGE_EOI_HANDLER
@@ -15,6 +16,7 @@ config PPC_CELL_NATIVE
15 bool 16 bool
16 select PPC_CELL_COMMON 17 select PPC_CELL_COMMON
17 select MPIC 18 select MPIC
19 select PPC_IO_WORKAROUNDS
18 select IBM_NEW_EMAC_EMAC4 20 select IBM_NEW_EMAC_EMAC4
19 select IBM_NEW_EMAC_RGMII 21 select IBM_NEW_EMAC_RGMII
20 select IBM_NEW_EMAC_ZMII #test only 22 select IBM_NEW_EMAC_ZMII #test only
diff --git a/arch/powerpc/platforms/cell/Makefile b/arch/powerpc/platforms/cell/Makefile
index 83fafe922641..a4a89350bcfc 100644
--- a/arch/powerpc/platforms/cell/Makefile
+++ b/arch/powerpc/platforms/cell/Makefile
@@ -1,7 +1,7 @@
1obj-$(CONFIG_PPC_CELL_COMMON) += cbe_regs.o interrupt.o pervasive.o 1obj-$(CONFIG_PPC_CELL_COMMON) += cbe_regs.o interrupt.o pervasive.o
2 2
3obj-$(CONFIG_PPC_CELL_NATIVE) += iommu.o setup.o spider-pic.o \ 3obj-$(CONFIG_PPC_CELL_NATIVE) += iommu.o setup.o spider-pic.o \
4 pmu.o io-workarounds.o spider-pci.o 4 pmu.o spider-pci.o
5obj-$(CONFIG_CBE_RAS) += ras.o 5obj-$(CONFIG_CBE_RAS) += ras.o
6 6
7obj-$(CONFIG_CBE_THERM) += cbe_thermal.o 7obj-$(CONFIG_CBE_THERM) += cbe_thermal.o
@@ -39,11 +39,10 @@ obj-y += celleb_setup.o \
39 celleb_pci.o celleb_scc_epci.o \ 39 celleb_pci.o celleb_scc_epci.o \
40 celleb_scc_pciex.o \ 40 celleb_scc_pciex.o \
41 celleb_scc_uhc.o \ 41 celleb_scc_uhc.o \
42 io-workarounds.o spider-pci.o \ 42 spider-pci.o beat.o beat_htab.o \
43 beat.o beat_htab.o beat_hvCall.o \ 43 beat_hvCall.o beat_interrupt.o \
44 beat_interrupt.o beat_iommu.o 44 beat_iommu.o
45 45
46obj-$(CONFIG_SMP) += beat_smp.o
47obj-$(CONFIG_PPC_UDBG_BEAT) += beat_udbg.o 46obj-$(CONFIG_PPC_UDBG_BEAT) += beat_udbg.o
48obj-$(CONFIG_SERIAL_TXX9) += celleb_scc_sio.o 47obj-$(CONFIG_SERIAL_TXX9) += celleb_scc_sio.o
49obj-$(CONFIG_SPU_BASE) += beat_spu_priv1.o 48obj-$(CONFIG_SPU_BASE) += beat_spu_priv1.o
diff --git a/arch/powerpc/platforms/cell/axon_msi.c b/arch/powerpc/platforms/cell/axon_msi.c
index bb5ebf8fa80b..ac06903e136a 100644
--- a/arch/powerpc/platforms/cell/axon_msi.c
+++ b/arch/powerpc/platforms/cell/axon_msi.c
@@ -113,7 +113,7 @@ static void axon_msi_cascade(unsigned int irq, struct irq_desc *desc)
113 pr_devel("axon_msi: woff %x roff %x msi %x\n", 113 pr_devel("axon_msi: woff %x roff %x msi %x\n",
114 write_offset, msic->read_offset, msi); 114 write_offset, msic->read_offset, msi);
115 115
116 if (msi < NR_IRQS && irq_map[msi].host == msic->irq_host) { 116 if (msi < NR_IRQS && irq_get_chip_data(msi) == msic) {
117 generic_handle_irq(msi); 117 generic_handle_irq(msi);
118 msic->fifo_virt[idx] = cpu_to_le32(0xffffffff); 118 msic->fifo_virt[idx] = cpu_to_le32(0xffffffff);
119 } else { 119 } else {
@@ -320,6 +320,7 @@ static struct irq_chip msic_irq_chip = {
320static int msic_host_map(struct irq_host *h, unsigned int virq, 320static int msic_host_map(struct irq_host *h, unsigned int virq,
321 irq_hw_number_t hw) 321 irq_hw_number_t hw)
322{ 322{
323 irq_set_chip_data(virq, h->host_data);
323 irq_set_chip_and_handler(virq, &msic_irq_chip, handle_simple_irq); 324 irq_set_chip_and_handler(virq, &msic_irq_chip, handle_simple_irq);
324 325
325 return 0; 326 return 0;
diff --git a/arch/powerpc/platforms/cell/beat_interrupt.c b/arch/powerpc/platforms/cell/beat_interrupt.c
index 4cb9e147c307..55015e1f6939 100644
--- a/arch/powerpc/platforms/cell/beat_interrupt.c
+++ b/arch/powerpc/platforms/cell/beat_interrupt.c
@@ -148,16 +148,6 @@ static int beatic_pic_host_map(struct irq_host *h, unsigned int virq,
148} 148}
149 149
150/* 150/*
151 * Update binding hardware IRQ number (hw) and Virtuql
152 * IRQ number (virq). This is called only once for a given mapping.
153 */
154static void beatic_pic_host_remap(struct irq_host *h, unsigned int virq,
155 irq_hw_number_t hw)
156{
157 beat_construct_and_connect_irq_plug(virq, hw);
158}
159
160/*
161 * Translate device-tree interrupt spec to irq_hw_number_t style (ulong), 151 * Translate device-tree interrupt spec to irq_hw_number_t style (ulong),
162 * to pass away to irq_create_mapping(). 152 * to pass away to irq_create_mapping().
163 * 153 *
@@ -184,7 +174,6 @@ static int beatic_pic_host_match(struct irq_host *h, struct device_node *np)
184 174
185static struct irq_host_ops beatic_pic_host_ops = { 175static struct irq_host_ops beatic_pic_host_ops = {
186 .map = beatic_pic_host_map, 176 .map = beatic_pic_host_map,
187 .remap = beatic_pic_host_remap,
188 .unmap = beatic_pic_host_unmap, 177 .unmap = beatic_pic_host_unmap,
189 .xlate = beatic_pic_host_xlate, 178 .xlate = beatic_pic_host_xlate,
190 .match = beatic_pic_host_match, 179 .match = beatic_pic_host_match,
@@ -257,22 +246,6 @@ void __init beatic_init_IRQ(void)
257 irq_set_default_host(beatic_host); 246 irq_set_default_host(beatic_host);
258} 247}
259 248
260#ifdef CONFIG_SMP
261
262/* Nullified to compile with SMP mode */
263void beatic_setup_cpu(int cpu)
264{
265}
266
267void beatic_cause_IPI(int cpu, int mesg)
268{
269}
270
271void beatic_request_IPIs(void)
272{
273}
274#endif /* CONFIG_SMP */
275
276void beatic_deinit_IRQ(void) 249void beatic_deinit_IRQ(void)
277{ 250{
278 int i; 251 int i;
diff --git a/arch/powerpc/platforms/cell/beat_interrupt.h b/arch/powerpc/platforms/cell/beat_interrupt.h
index b470fd0051f1..a7e52f91a078 100644
--- a/arch/powerpc/platforms/cell/beat_interrupt.h
+++ b/arch/powerpc/platforms/cell/beat_interrupt.h
@@ -24,9 +24,6 @@
24 24
25extern void beatic_init_IRQ(void); 25extern void beatic_init_IRQ(void);
26extern unsigned int beatic_get_irq(void); 26extern unsigned int beatic_get_irq(void);
27extern void beatic_cause_IPI(int cpu, int mesg);
28extern void beatic_request_IPIs(void);
29extern void beatic_setup_cpu(int);
30extern void beatic_deinit_IRQ(void); 27extern void beatic_deinit_IRQ(void);
31 28
32#endif 29#endif
diff --git a/arch/powerpc/platforms/cell/beat_smp.c b/arch/powerpc/platforms/cell/beat_smp.c
deleted file mode 100644
index 26efc204c47f..000000000000
--- a/arch/powerpc/platforms/cell/beat_smp.c
+++ /dev/null
@@ -1,124 +0,0 @@
1/*
2 * SMP support for Celleb platform. (Incomplete)
3 *
4 * (C) Copyright 2006 TOSHIBA CORPORATION
5 *
6 * This code is based on arch/powerpc/platforms/cell/smp.c:
7 * Dave Engebretsen, Peter Bergner, and
8 * Mike Corrigan {engebret|bergner|mikec}@us.ibm.com
9 * Plus various changes from other IBM teams...
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License along
22 * with this program; if not, write to the Free Software Foundation, Inc.,
23 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 */
25
26#undef DEBUG
27
28#include <linux/kernel.h>
29#include <linux/smp.h>
30#include <linux/interrupt.h>
31#include <linux/init.h>
32#include <linux/threads.h>
33#include <linux/cpu.h>
34
35#include <asm/irq.h>
36#include <asm/smp.h>
37#include <asm/machdep.h>
38#include <asm/udbg.h>
39
40#include "beat_interrupt.h"
41
42#ifdef DEBUG
43#define DBG(fmt...) udbg_printf(fmt)
44#else
45#define DBG(fmt...)
46#endif
47
48/*
49 * The primary thread of each non-boot processor is recorded here before
50 * smp init.
51 */
52/* static cpumask_t of_spin_map; */
53
54/**
55 * smp_startup_cpu() - start the given cpu
56 *
57 * At boot time, there is nothing to do for primary threads which were
58 * started from Open Firmware. For anything else, call RTAS with the
59 * appropriate start location.
60 *
61 * Returns:
62 * 0 - failure
63 * 1 - success
64 */
65static inline int __devinit smp_startup_cpu(unsigned int lcpu)
66{
67 return 0;
68}
69
70static void smp_beatic_message_pass(int target, int msg)
71{
72 unsigned int i;
73
74 if (target < NR_CPUS) {
75 beatic_cause_IPI(target, msg);
76 } else {
77 for_each_online_cpu(i) {
78 if (target == MSG_ALL_BUT_SELF
79 && i == smp_processor_id())
80 continue;
81 beatic_cause_IPI(i, msg);
82 }
83 }
84}
85
86static int __init smp_beatic_probe(void)
87{
88 return cpus_weight(cpu_possible_map);
89}
90
91static void __devinit smp_beatic_setup_cpu(int cpu)
92{
93 beatic_setup_cpu(cpu);
94}
95
96static void __devinit smp_celleb_kick_cpu(int nr)
97{
98 BUG_ON(nr < 0 || nr >= NR_CPUS);
99
100 if (!smp_startup_cpu(nr))
101 return;
102}
103
104static int smp_celleb_cpu_bootable(unsigned int nr)
105{
106 return 1;
107}
108static struct smp_ops_t bpa_beatic_smp_ops = {
109 .message_pass = smp_beatic_message_pass,
110 .probe = smp_beatic_probe,
111 .kick_cpu = smp_celleb_kick_cpu,
112 .setup_cpu = smp_beatic_setup_cpu,
113 .cpu_bootable = smp_celleb_cpu_bootable,
114};
115
116/* This is called very early */
117void __init smp_init_celleb(void)
118{
119 DBG(" -> smp_init_celleb()\n");
120
121 smp_ops = &bpa_beatic_smp_ops;
122
123 DBG(" <- smp_init_celleb()\n");
124}
diff --git a/arch/powerpc/platforms/cell/cbe_regs.c b/arch/powerpc/platforms/cell/cbe_regs.c
index dbc338f187a2..f3917e7a5b44 100644
--- a/arch/powerpc/platforms/cell/cbe_regs.c
+++ b/arch/powerpc/platforms/cell/cbe_regs.c
@@ -45,8 +45,8 @@ static struct cbe_thread_map
45 unsigned int cbe_id; 45 unsigned int cbe_id;
46} cbe_thread_map[NR_CPUS]; 46} cbe_thread_map[NR_CPUS];
47 47
48static cpumask_t cbe_local_mask[MAX_CBE] = { [0 ... MAX_CBE-1] = CPU_MASK_NONE }; 48static cpumask_t cbe_local_mask[MAX_CBE] = { [0 ... MAX_CBE-1] = {CPU_BITS_NONE} };
49static cpumask_t cbe_first_online_cpu = CPU_MASK_NONE; 49static cpumask_t cbe_first_online_cpu = { CPU_BITS_NONE };
50 50
51static struct cbe_regs_map *cbe_find_map(struct device_node *np) 51static struct cbe_regs_map *cbe_find_map(struct device_node *np)
52{ 52{
@@ -159,7 +159,8 @@ EXPORT_SYMBOL_GPL(cbe_cpu_to_node);
159 159
160u32 cbe_node_to_cpu(int node) 160u32 cbe_node_to_cpu(int node)
161{ 161{
162 return find_first_bit( (unsigned long *) &cbe_local_mask[node], sizeof(cpumask_t)); 162 return cpumask_first(&cbe_local_mask[node]);
163
163} 164}
164EXPORT_SYMBOL_GPL(cbe_node_to_cpu); 165EXPORT_SYMBOL_GPL(cbe_node_to_cpu);
165 166
@@ -268,9 +269,9 @@ void __init cbe_regs_init(void)
268 thread->regs = map; 269 thread->regs = map;
269 thread->cbe_id = cbe_id; 270 thread->cbe_id = cbe_id;
270 map->be_node = thread->be_node; 271 map->be_node = thread->be_node;
271 cpu_set(i, cbe_local_mask[cbe_id]); 272 cpumask_set_cpu(i, &cbe_local_mask[cbe_id]);
272 if(thread->thread_id == 0) 273 if(thread->thread_id == 0)
273 cpu_set(i, cbe_first_online_cpu); 274 cpumask_set_cpu(i, &cbe_first_online_cpu);
274 } 275 }
275 } 276 }
276 277
diff --git a/arch/powerpc/platforms/cell/celleb_pci.c b/arch/powerpc/platforms/cell/celleb_pci.c
index 404d1fc04d59..5822141aa63f 100644
--- a/arch/powerpc/platforms/cell/celleb_pci.c
+++ b/arch/powerpc/platforms/cell/celleb_pci.c
@@ -41,7 +41,6 @@
41#include <asm/pci-bridge.h> 41#include <asm/pci-bridge.h>
42#include <asm/ppc-pci.h> 42#include <asm/ppc-pci.h>
43 43
44#include "io-workarounds.h"
45#include "celleb_pci.h" 44#include "celleb_pci.h"
46 45
47#define MAX_PCI_DEVICES 32 46#define MAX_PCI_DEVICES 32
@@ -320,7 +319,7 @@ static int __init celleb_setup_fake_pci_device(struct device_node *node,
320 319
321 size = 256; 320 size = 256;
322 config = &private->fake_config[devno][fn]; 321 config = &private->fake_config[devno][fn];
323 *config = alloc_maybe_bootmem(size, GFP_KERNEL); 322 *config = zalloc_maybe_bootmem(size, GFP_KERNEL);
324 if (*config == NULL) { 323 if (*config == NULL) {
325 printk(KERN_ERR "PCI: " 324 printk(KERN_ERR "PCI: "
326 "not enough memory for fake configuration space\n"); 325 "not enough memory for fake configuration space\n");
@@ -331,7 +330,7 @@ static int __init celleb_setup_fake_pci_device(struct device_node *node,
331 330
332 size = sizeof(struct celleb_pci_resource); 331 size = sizeof(struct celleb_pci_resource);
333 res = &private->res[devno][fn]; 332 res = &private->res[devno][fn];
334 *res = alloc_maybe_bootmem(size, GFP_KERNEL); 333 *res = zalloc_maybe_bootmem(size, GFP_KERNEL);
335 if (*res == NULL) { 334 if (*res == NULL) {
336 printk(KERN_ERR 335 printk(KERN_ERR
337 "PCI: not enough memory for resource data space\n"); 336 "PCI: not enough memory for resource data space\n");
@@ -432,7 +431,7 @@ static int __init phb_set_bus_ranges(struct device_node *dev,
432static void __init celleb_alloc_private_mem(struct pci_controller *hose) 431static void __init celleb_alloc_private_mem(struct pci_controller *hose)
433{ 432{
434 hose->private_data = 433 hose->private_data =
435 alloc_maybe_bootmem(sizeof(struct celleb_pci_private), 434 zalloc_maybe_bootmem(sizeof(struct celleb_pci_private),
436 GFP_KERNEL); 435 GFP_KERNEL);
437} 436}
438 437
@@ -469,18 +468,6 @@ static struct of_device_id celleb_phb_match[] __initdata = {
469 }, 468 },
470}; 469};
471 470
472static int __init celleb_io_workaround_init(struct pci_controller *phb,
473 struct celleb_phb_spec *phb_spec)
474{
475 if (phb_spec->ops) {
476 iowa_register_bus(phb, phb_spec->ops, phb_spec->iowa_init,
477 phb_spec->iowa_data);
478 io_workaround_init();
479 }
480
481 return 0;
482}
483
484int __init celleb_setup_phb(struct pci_controller *phb) 471int __init celleb_setup_phb(struct pci_controller *phb)
485{ 472{
486 struct device_node *dev = phb->dn; 473 struct device_node *dev = phb->dn;
@@ -500,7 +487,11 @@ int __init celleb_setup_phb(struct pci_controller *phb)
500 if (rc) 487 if (rc)
501 return 1; 488 return 1;
502 489
503 return celleb_io_workaround_init(phb, phb_spec); 490 if (phb_spec->ops)
491 iowa_register_bus(phb, phb_spec->ops,
492 phb_spec->iowa_init,
493 phb_spec->iowa_data);
494 return 0;
504} 495}
505 496
506int celleb_pci_probe_mode(struct pci_bus *bus) 497int celleb_pci_probe_mode(struct pci_bus *bus)
diff --git a/arch/powerpc/platforms/cell/celleb_pci.h b/arch/powerpc/platforms/cell/celleb_pci.h
index 4cba1523ec50..a801fcc5f389 100644
--- a/arch/powerpc/platforms/cell/celleb_pci.h
+++ b/arch/powerpc/platforms/cell/celleb_pci.h
@@ -26,8 +26,9 @@
26#include <asm/pci-bridge.h> 26#include <asm/pci-bridge.h>
27#include <asm/prom.h> 27#include <asm/prom.h>
28#include <asm/ppc-pci.h> 28#include <asm/ppc-pci.h>
29#include <asm/io-workarounds.h>
29 30
30#include "io-workarounds.h" 31struct iowa_bus;
31 32
32struct celleb_phb_spec { 33struct celleb_phb_spec {
33 int (*setup)(struct device_node *, struct pci_controller *); 34 int (*setup)(struct device_node *, struct pci_controller *);
diff --git a/arch/powerpc/platforms/cell/celleb_setup.c b/arch/powerpc/platforms/cell/celleb_setup.c
index e53845579770..d58d9bae4b9b 100644
--- a/arch/powerpc/platforms/cell/celleb_setup.c
+++ b/arch/powerpc/platforms/cell/celleb_setup.c
@@ -128,10 +128,6 @@ static void __init celleb_setup_arch_beat(void)
128 spu_management_ops = &spu_management_of_ops; 128 spu_management_ops = &spu_management_of_ops;
129#endif 129#endif
130 130
131#ifdef CONFIG_SMP
132 smp_init_celleb();
133#endif
134
135 celleb_setup_arch_common(); 131 celleb_setup_arch_common();
136} 132}
137 133
diff --git a/arch/powerpc/platforms/cell/interrupt.c b/arch/powerpc/platforms/cell/interrupt.c
index 44cfd1bef89b..449c08c15862 100644
--- a/arch/powerpc/platforms/cell/interrupt.c
+++ b/arch/powerpc/platforms/cell/interrupt.c
@@ -196,8 +196,20 @@ static irqreturn_t iic_ipi_action(int irq, void *dev_id)
196{ 196{
197 int ipi = (int)(long)dev_id; 197 int ipi = (int)(long)dev_id;
198 198
199 smp_message_recv(ipi); 199 switch(ipi) {
200 200 case PPC_MSG_CALL_FUNCTION:
201 generic_smp_call_function_interrupt();
202 break;
203 case PPC_MSG_RESCHEDULE:
204 scheduler_ipi();
205 break;
206 case PPC_MSG_CALL_FUNC_SINGLE:
207 generic_smp_call_function_single_interrupt();
208 break;
209 case PPC_MSG_DEBUGGER_BREAK:
210 debug_ipi_action(0, NULL);
211 break;
212 }
201 return IRQ_HANDLED; 213 return IRQ_HANDLED;
202} 214}
203static void iic_request_ipi(int ipi, const char *name) 215static void iic_request_ipi(int ipi, const char *name)
diff --git a/arch/powerpc/platforms/cell/io-workarounds.c b/arch/powerpc/platforms/cell/io-workarounds.c
deleted file mode 100644
index 5c1118e31940..000000000000
--- a/arch/powerpc/platforms/cell/io-workarounds.c
+++ /dev/null
@@ -1,185 +0,0 @@
1/*
2 * Support PCI IO workaround
3 *
4 * Copyright (C) 2006 Benjamin Herrenschmidt <benh@kernel.crashing.org>
5 * IBM, Corp.
6 * (C) Copyright 2007-2008 TOSHIBA CORPORATION
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12#undef DEBUG
13
14#include <linux/kernel.h>
15
16#include <asm/io.h>
17#include <asm/machdep.h>
18#include <asm/pgtable.h>
19#include <asm/ppc-pci.h>
20
21#include "io-workarounds.h"
22
23#define IOWA_MAX_BUS 8
24
25static struct iowa_bus iowa_busses[IOWA_MAX_BUS];
26static unsigned int iowa_bus_count;
27
28static struct iowa_bus *iowa_pci_find(unsigned long vaddr, unsigned long paddr)
29{
30 int i, j;
31 struct resource *res;
32 unsigned long vstart, vend;
33
34 for (i = 0; i < iowa_bus_count; i++) {
35 struct iowa_bus *bus = &iowa_busses[i];
36 struct pci_controller *phb = bus->phb;
37
38 if (vaddr) {
39 vstart = (unsigned long)phb->io_base_virt;
40 vend = vstart + phb->pci_io_size - 1;
41 if ((vaddr >= vstart) && (vaddr <= vend))
42 return bus;
43 }
44
45 if (paddr)
46 for (j = 0; j < 3; j++) {
47 res = &phb->mem_resources[j];
48 if (paddr >= res->start && paddr <= res->end)
49 return bus;
50 }
51 }
52
53 return NULL;
54}
55
56struct iowa_bus *iowa_mem_find_bus(const PCI_IO_ADDR addr)
57{
58 struct iowa_bus *bus;
59 int token;
60
61 token = PCI_GET_ADDR_TOKEN(addr);
62
63 if (token && token <= iowa_bus_count)
64 bus = &iowa_busses[token - 1];
65 else {
66 unsigned long vaddr, paddr;
67 pte_t *ptep;
68
69 vaddr = (unsigned long)PCI_FIX_ADDR(addr);
70 if (vaddr < PHB_IO_BASE || vaddr >= PHB_IO_END)
71 return NULL;
72
73 ptep = find_linux_pte(init_mm.pgd, vaddr);
74 if (ptep == NULL)
75 paddr = 0;
76 else
77 paddr = pte_pfn(*ptep) << PAGE_SHIFT;
78 bus = iowa_pci_find(vaddr, paddr);
79
80 if (bus == NULL)
81 return NULL;
82 }
83
84 return bus;
85}
86
87struct iowa_bus *iowa_pio_find_bus(unsigned long port)
88{
89 unsigned long vaddr = (unsigned long)pci_io_base + port;
90 return iowa_pci_find(vaddr, 0);
91}
92
93
94#define DEF_PCI_AC_RET(name, ret, at, al, space, aa) \
95static ret iowa_##name at \
96{ \
97 struct iowa_bus *bus; \
98 bus = iowa_##space##_find_bus(aa); \
99 if (bus && bus->ops && bus->ops->name) \
100 return bus->ops->name al; \
101 return __do_##name al; \
102}
103
104#define DEF_PCI_AC_NORET(name, at, al, space, aa) \
105static void iowa_##name at \
106{ \
107 struct iowa_bus *bus; \
108 bus = iowa_##space##_find_bus(aa); \
109 if (bus && bus->ops && bus->ops->name) { \
110 bus->ops->name al; \
111 return; \
112 } \
113 __do_##name al; \
114}
115
116#include <asm/io-defs.h>
117
118#undef DEF_PCI_AC_RET
119#undef DEF_PCI_AC_NORET
120
121static const struct ppc_pci_io __devinitconst iowa_pci_io = {
122
123#define DEF_PCI_AC_RET(name, ret, at, al, space, aa) .name = iowa_##name,
124#define DEF_PCI_AC_NORET(name, at, al, space, aa) .name = iowa_##name,
125
126#include <asm/io-defs.h>
127
128#undef DEF_PCI_AC_RET
129#undef DEF_PCI_AC_NORET
130
131};
132
133static void __iomem *iowa_ioremap(phys_addr_t addr, unsigned long size,
134 unsigned long flags, void *caller)
135{
136 struct iowa_bus *bus;
137 void __iomem *res = __ioremap_caller(addr, size, flags, caller);
138 int busno;
139
140 bus = iowa_pci_find(0, (unsigned long)addr);
141 if (bus != NULL) {
142 busno = bus - iowa_busses;
143 PCI_SET_ADDR_TOKEN(res, busno + 1);
144 }
145 return res;
146}
147
148/* Regist new bus to support workaround */
149void __devinit iowa_register_bus(struct pci_controller *phb,
150 struct ppc_pci_io *ops,
151 int (*initfunc)(struct iowa_bus *, void *), void *data)
152{
153 struct iowa_bus *bus;
154 struct device_node *np = phb->dn;
155
156 if (iowa_bus_count >= IOWA_MAX_BUS) {
157 pr_err("IOWA:Too many pci bridges, "
158 "workarounds disabled for %s\n", np->full_name);
159 return;
160 }
161
162 bus = &iowa_busses[iowa_bus_count];
163 bus->phb = phb;
164 bus->ops = ops;
165
166 if (initfunc)
167 if ((*initfunc)(bus, data))
168 return;
169
170 iowa_bus_count++;
171
172 pr_debug("IOWA:[%d]Add bus, %s.\n", iowa_bus_count-1, np->full_name);
173}
174
175/* enable IO workaround */
176void __devinit io_workaround_init(void)
177{
178 static int io_workaround_inited;
179
180 if (io_workaround_inited)
181 return;
182 ppc_pci_io = iowa_pci_io;
183 ppc_md.ioremap = iowa_ioremap;
184 io_workaround_inited = 1;
185}
diff --git a/arch/powerpc/platforms/cell/io-workarounds.h b/arch/powerpc/platforms/cell/io-workarounds.h
deleted file mode 100644
index 6efc7782ebf2..000000000000
--- a/arch/powerpc/platforms/cell/io-workarounds.h
+++ /dev/null
@@ -1,49 +0,0 @@
1/*
2 * Support PCI IO workaround
3 *
4 * (C) Copyright 2007-2008 TOSHIBA CORPORATION
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License along
17 * with this program; if not, write to the Free Software Foundation, Inc.,
18 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
19 */
20
21#ifndef _IO_WORKAROUNDS_H
22#define _IO_WORKAROUNDS_H
23
24#include <linux/io.h>
25#include <asm/pci-bridge.h>
26
27/* Bus info */
28struct iowa_bus {
29 struct pci_controller *phb;
30 struct ppc_pci_io *ops;
31 void *private;
32};
33
34void __devinit io_workaround_init(void);
35void __devinit iowa_register_bus(struct pci_controller *, struct ppc_pci_io *,
36 int (*)(struct iowa_bus *, void *), void *);
37struct iowa_bus *iowa_mem_find_bus(const PCI_IO_ADDR);
38struct iowa_bus *iowa_pio_find_bus(unsigned long);
39
40extern struct ppc_pci_io spiderpci_ops;
41extern int spiderpci_iowa_init(struct iowa_bus *, void *);
42
43#define SPIDER_PCI_REG_BASE 0xd000
44#define SPIDER_PCI_REG_SIZE 0x1000
45#define SPIDER_PCI_VCI_CNTL_STAT 0x0110
46#define SPIDER_PCI_DUMMY_READ 0x0810
47#define SPIDER_PCI_DUMMY_READ_BASE 0x0814
48
49#endif /* _IO_WORKAROUNDS_H */
diff --git a/arch/powerpc/platforms/cell/qpace_setup.c b/arch/powerpc/platforms/cell/qpace_setup.c
index d31c594cfdf3..51e290126bc1 100644
--- a/arch/powerpc/platforms/cell/qpace_setup.c
+++ b/arch/powerpc/platforms/cell/qpace_setup.c
@@ -42,7 +42,6 @@
42#include "interrupt.h" 42#include "interrupt.h"
43#include "pervasive.h" 43#include "pervasive.h"
44#include "ras.h" 44#include "ras.h"
45#include "io-workarounds.h"
46 45
47static void qpace_show_cpuinfo(struct seq_file *m) 46static void qpace_show_cpuinfo(struct seq_file *m)
48{ 47{
diff --git a/arch/powerpc/platforms/cell/setup.c b/arch/powerpc/platforms/cell/setup.c
index fd57bfe00edf..c73cf4c43fc2 100644
--- a/arch/powerpc/platforms/cell/setup.c
+++ b/arch/powerpc/platforms/cell/setup.c
@@ -51,11 +51,11 @@
51#include <asm/udbg.h> 51#include <asm/udbg.h>
52#include <asm/mpic.h> 52#include <asm/mpic.h>
53#include <asm/cell-regs.h> 53#include <asm/cell-regs.h>
54#include <asm/io-workarounds.h>
54 55
55#include "interrupt.h" 56#include "interrupt.h"
56#include "pervasive.h" 57#include "pervasive.h"
57#include "ras.h" 58#include "ras.h"
58#include "io-workarounds.h"
59 59
60#ifdef DEBUG 60#ifdef DEBUG
61#define DBG(fmt...) udbg_printf(fmt) 61#define DBG(fmt...) udbg_printf(fmt)
@@ -136,8 +136,6 @@ static int __devinit cell_setup_phb(struct pci_controller *phb)
136 136
137 iowa_register_bus(phb, &spiderpci_ops, &spiderpci_iowa_init, 137 iowa_register_bus(phb, &spiderpci_ops, &spiderpci_iowa_init,
138 (void *)SPIDER_PCI_REG_BASE); 138 (void *)SPIDER_PCI_REG_BASE);
139 io_workaround_init();
140
141 return 0; 139 return 0;
142} 140}
143 141
diff --git a/arch/powerpc/platforms/cell/smp.c b/arch/powerpc/platforms/cell/smp.c
index f774530075b7..d176e6148e3f 100644
--- a/arch/powerpc/platforms/cell/smp.c
+++ b/arch/powerpc/platforms/cell/smp.c
@@ -77,7 +77,7 @@ static inline int __devinit smp_startup_cpu(unsigned int lcpu)
77 unsigned int pcpu; 77 unsigned int pcpu;
78 int start_cpu; 78 int start_cpu;
79 79
80 if (cpu_isset(lcpu, of_spin_map)) 80 if (cpumask_test_cpu(lcpu, &of_spin_map))
81 /* Already started by OF and sitting in spin loop */ 81 /* Already started by OF and sitting in spin loop */
82 return 1; 82 return 1;
83 83
@@ -103,27 +103,11 @@ static inline int __devinit smp_startup_cpu(unsigned int lcpu)
103 return 1; 103 return 1;
104} 104}
105 105
106static void smp_iic_message_pass(int target, int msg)
107{
108 unsigned int i;
109
110 if (target < NR_CPUS) {
111 iic_cause_IPI(target, msg);
112 } else {
113 for_each_online_cpu(i) {
114 if (target == MSG_ALL_BUT_SELF
115 && i == smp_processor_id())
116 continue;
117 iic_cause_IPI(i, msg);
118 }
119 }
120}
121
122static int __init smp_iic_probe(void) 106static int __init smp_iic_probe(void)
123{ 107{
124 iic_request_IPIs(); 108 iic_request_IPIs();
125 109
126 return cpus_weight(cpu_possible_map); 110 return cpumask_weight(cpu_possible_mask);
127} 111}
128 112
129static void __devinit smp_cell_setup_cpu(int cpu) 113static void __devinit smp_cell_setup_cpu(int cpu)
@@ -137,12 +121,12 @@ static void __devinit smp_cell_setup_cpu(int cpu)
137 mtspr(SPRN_DABRX, DABRX_KERNEL | DABRX_USER); 121 mtspr(SPRN_DABRX, DABRX_KERNEL | DABRX_USER);
138} 122}
139 123
140static void __devinit smp_cell_kick_cpu(int nr) 124static int __devinit smp_cell_kick_cpu(int nr)
141{ 125{
142 BUG_ON(nr < 0 || nr >= NR_CPUS); 126 BUG_ON(nr < 0 || nr >= NR_CPUS);
143 127
144 if (!smp_startup_cpu(nr)) 128 if (!smp_startup_cpu(nr))
145 return; 129 return -ENOENT;
146 130
147 /* 131 /*
148 * The processor is currently spinning, waiting for the 132 * The processor is currently spinning, waiting for the
@@ -150,6 +134,8 @@ static void __devinit smp_cell_kick_cpu(int nr)
150 * the processor will continue on to secondary_start 134 * the processor will continue on to secondary_start
151 */ 135 */
152 paca[nr].cpu_start = 1; 136 paca[nr].cpu_start = 1;
137
138 return 0;
153} 139}
154 140
155static int smp_cell_cpu_bootable(unsigned int nr) 141static int smp_cell_cpu_bootable(unsigned int nr)
@@ -166,7 +152,7 @@ static int smp_cell_cpu_bootable(unsigned int nr)
166 return 1; 152 return 1;
167} 153}
168static struct smp_ops_t bpa_iic_smp_ops = { 154static struct smp_ops_t bpa_iic_smp_ops = {
169 .message_pass = smp_iic_message_pass, 155 .message_pass = iic_cause_IPI,
170 .probe = smp_iic_probe, 156 .probe = smp_iic_probe,
171 .kick_cpu = smp_cell_kick_cpu, 157 .kick_cpu = smp_cell_kick_cpu,
172 .setup_cpu = smp_cell_setup_cpu, 158 .setup_cpu = smp_cell_setup_cpu,
@@ -186,13 +172,12 @@ void __init smp_init_cell(void)
186 if (cpu_has_feature(CPU_FTR_SMT)) { 172 if (cpu_has_feature(CPU_FTR_SMT)) {
187 for_each_present_cpu(i) { 173 for_each_present_cpu(i) {
188 if (cpu_thread_in_core(i) == 0) 174 if (cpu_thread_in_core(i) == 0)
189 cpu_set(i, of_spin_map); 175 cpumask_set_cpu(i, &of_spin_map);
190 } 176 }
191 } else { 177 } else
192 of_spin_map = cpu_present_map; 178 cpumask_copy(&of_spin_map, cpu_present_mask);
193 }
194 179
195 cpu_clear(boot_cpuid, of_spin_map); 180 cpumask_clear_cpu(boot_cpuid, &of_spin_map);
196 181
197 /* Non-lpar has additional take/give timebase */ 182 /* Non-lpar has additional take/give timebase */
198 if (rtas_token("freeze-time-base") != RTAS_UNKNOWN_SERVICE) { 183 if (rtas_token("freeze-time-base") != RTAS_UNKNOWN_SERVICE) {
diff --git a/arch/powerpc/platforms/cell/spider-pci.c b/arch/powerpc/platforms/cell/spider-pci.c
index ca7731c0b595..f1f7878893f3 100644
--- a/arch/powerpc/platforms/cell/spider-pci.c
+++ b/arch/powerpc/platforms/cell/spider-pci.c
@@ -27,8 +27,7 @@
27 27
28#include <asm/ppc-pci.h> 28#include <asm/ppc-pci.h>
29#include <asm/pci-bridge.h> 29#include <asm/pci-bridge.h>
30 30#include <asm/io-workarounds.h>
31#include "io-workarounds.h"
32 31
33#define SPIDER_PCI_DISABLE_PREFETCH 32#define SPIDER_PCI_DISABLE_PREFETCH
34 33
diff --git a/arch/powerpc/platforms/cell/spider-pic.c b/arch/powerpc/platforms/cell/spider-pic.c
index c5cf50e6b45a..442c28c00f88 100644
--- a/arch/powerpc/platforms/cell/spider-pic.c
+++ b/arch/powerpc/platforms/cell/spider-pic.c
@@ -68,9 +68,9 @@ struct spider_pic {
68}; 68};
69static struct spider_pic spider_pics[SPIDER_CHIP_COUNT]; 69static struct spider_pic spider_pics[SPIDER_CHIP_COUNT];
70 70
71static struct spider_pic *spider_virq_to_pic(unsigned int virq) 71static struct spider_pic *spider_irq_data_to_pic(struct irq_data *d)
72{ 72{
73 return irq_map[virq].host->host_data; 73 return irq_data_get_irq_chip_data(d);
74} 74}
75 75
76static void __iomem *spider_get_irq_config(struct spider_pic *pic, 76static void __iomem *spider_get_irq_config(struct spider_pic *pic,
@@ -81,24 +81,24 @@ static void __iomem *spider_get_irq_config(struct spider_pic *pic,
81 81
82static void spider_unmask_irq(struct irq_data *d) 82static void spider_unmask_irq(struct irq_data *d)
83{ 83{
84 struct spider_pic *pic = spider_virq_to_pic(d->irq); 84 struct spider_pic *pic = spider_irq_data_to_pic(d);
85 void __iomem *cfg = spider_get_irq_config(pic, irq_map[d->irq].hwirq); 85 void __iomem *cfg = spider_get_irq_config(pic, irqd_to_hwirq(d));
86 86
87 out_be32(cfg, in_be32(cfg) | 0x30000000u); 87 out_be32(cfg, in_be32(cfg) | 0x30000000u);
88} 88}
89 89
90static void spider_mask_irq(struct irq_data *d) 90static void spider_mask_irq(struct irq_data *d)
91{ 91{
92 struct spider_pic *pic = spider_virq_to_pic(d->irq); 92 struct spider_pic *pic = spider_irq_data_to_pic(d);
93 void __iomem *cfg = spider_get_irq_config(pic, irq_map[d->irq].hwirq); 93 void __iomem *cfg = spider_get_irq_config(pic, irqd_to_hwirq(d));
94 94
95 out_be32(cfg, in_be32(cfg) & ~0x30000000u); 95 out_be32(cfg, in_be32(cfg) & ~0x30000000u);
96} 96}
97 97
98static void spider_ack_irq(struct irq_data *d) 98static void spider_ack_irq(struct irq_data *d)
99{ 99{
100 struct spider_pic *pic = spider_virq_to_pic(d->irq); 100 struct spider_pic *pic = spider_irq_data_to_pic(d);
101 unsigned int src = irq_map[d->irq].hwirq; 101 unsigned int src = irqd_to_hwirq(d);
102 102
103 /* Reset edge detection logic if necessary 103 /* Reset edge detection logic if necessary
104 */ 104 */
@@ -116,8 +116,8 @@ static void spider_ack_irq(struct irq_data *d)
116static int spider_set_irq_type(struct irq_data *d, unsigned int type) 116static int spider_set_irq_type(struct irq_data *d, unsigned int type)
117{ 117{
118 unsigned int sense = type & IRQ_TYPE_SENSE_MASK; 118 unsigned int sense = type & IRQ_TYPE_SENSE_MASK;
119 struct spider_pic *pic = spider_virq_to_pic(d->irq); 119 struct spider_pic *pic = spider_irq_data_to_pic(d);
120 unsigned int hw = irq_map[d->irq].hwirq; 120 unsigned int hw = irqd_to_hwirq(d);
121 void __iomem *cfg = spider_get_irq_config(pic, hw); 121 void __iomem *cfg = spider_get_irq_config(pic, hw);
122 u32 old_mask; 122 u32 old_mask;
123 u32 ic; 123 u32 ic;
@@ -171,6 +171,7 @@ static struct irq_chip spider_pic = {
171static int spider_host_map(struct irq_host *h, unsigned int virq, 171static int spider_host_map(struct irq_host *h, unsigned int virq,
172 irq_hw_number_t hw) 172 irq_hw_number_t hw)
173{ 173{
174 irq_set_chip_data(virq, h->host_data);
174 irq_set_chip_and_handler(virq, &spider_pic, handle_level_irq); 175 irq_set_chip_and_handler(virq, &spider_pic, handle_level_irq);
175 176
176 /* Set default irq type */ 177 /* Set default irq type */
diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c
index acfaccea5f4f..3675da73623f 100644
--- a/arch/powerpc/platforms/cell/spu_base.c
+++ b/arch/powerpc/platforms/cell/spu_base.c
@@ -32,6 +32,7 @@
32#include <linux/io.h> 32#include <linux/io.h>
33#include <linux/mutex.h> 33#include <linux/mutex.h>
34#include <linux/linux_logo.h> 34#include <linux/linux_logo.h>
35#include <linux/syscore_ops.h>
35#include <asm/spu.h> 36#include <asm/spu.h>
36#include <asm/spu_priv1.h> 37#include <asm/spu_priv1.h>
37#include <asm/spu_csa.h> 38#include <asm/spu_csa.h>
@@ -521,18 +522,8 @@ void spu_init_channels(struct spu *spu)
521} 522}
522EXPORT_SYMBOL_GPL(spu_init_channels); 523EXPORT_SYMBOL_GPL(spu_init_channels);
523 524
524static int spu_shutdown(struct sys_device *sysdev)
525{
526 struct spu *spu = container_of(sysdev, struct spu, sysdev);
527
528 spu_free_irqs(spu);
529 spu_destroy_spu(spu);
530 return 0;
531}
532
533static struct sysdev_class spu_sysdev_class = { 525static struct sysdev_class spu_sysdev_class = {
534 .name = "spu", 526 .name = "spu",
535 .shutdown = spu_shutdown,
536}; 527};
537 528
538int spu_add_sysdev_attr(struct sysdev_attribute *attr) 529int spu_add_sysdev_attr(struct sysdev_attribute *attr)
@@ -797,6 +788,22 @@ static inline void crash_register_spus(struct list_head *list)
797} 788}
798#endif 789#endif
799 790
791static void spu_shutdown(void)
792{
793 struct spu *spu;
794
795 mutex_lock(&spu_full_list_mutex);
796 list_for_each_entry(spu, &spu_full_list, full_list) {
797 spu_free_irqs(spu);
798 spu_destroy_spu(spu);
799 }
800 mutex_unlock(&spu_full_list_mutex);
801}
802
803static struct syscore_ops spu_syscore_ops = {
804 .shutdown = spu_shutdown,
805};
806
800static int __init init_spu_base(void) 807static int __init init_spu_base(void)
801{ 808{
802 int i, ret = 0; 809 int i, ret = 0;
@@ -830,6 +837,7 @@ static int __init init_spu_base(void)
830 crash_register_spus(&spu_full_list); 837 crash_register_spus(&spu_full_list);
831 mutex_unlock(&spu_full_list_mutex); 838 mutex_unlock(&spu_full_list_mutex);
832 spu_add_sysdev_attr(&attr_stat); 839 spu_add_sysdev_attr(&attr_stat);
840 register_syscore_ops(&spu_syscore_ops);
833 841
834 spu_init_affinity(); 842 spu_init_affinity();
835 843
diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c
index 65203857b0ce..32cb4e66d2cd 100644
--- a/arch/powerpc/platforms/cell/spufs/sched.c
+++ b/arch/powerpc/platforms/cell/spufs/sched.c
@@ -141,7 +141,7 @@ void __spu_update_sched_info(struct spu_context *ctx)
141 * runqueue. The context will be rescheduled on the proper node 141 * runqueue. The context will be rescheduled on the proper node
142 * if it is timesliced or preempted. 142 * if it is timesliced or preempted.
143 */ 143 */
144 ctx->cpus_allowed = current->cpus_allowed; 144 cpumask_copy(&ctx->cpus_allowed, tsk_cpus_allowed(current));
145 145
146 /* Save the current cpu id for spu interrupt routing. */ 146 /* Save the current cpu id for spu interrupt routing. */
147 ctx->last_ran = raw_smp_processor_id(); 147 ctx->last_ran = raw_smp_processor_id();
diff --git a/arch/powerpc/platforms/chrp/smp.c b/arch/powerpc/platforms/chrp/smp.c
index 02cafecc90e3..a800122e4dda 100644
--- a/arch/powerpc/platforms/chrp/smp.c
+++ b/arch/powerpc/platforms/chrp/smp.c
@@ -30,10 +30,12 @@
30#include <asm/mpic.h> 30#include <asm/mpic.h>
31#include <asm/rtas.h> 31#include <asm/rtas.h>
32 32
33static void __devinit smp_chrp_kick_cpu(int nr) 33static int __devinit smp_chrp_kick_cpu(int nr)
34{ 34{
35 *(unsigned long *)KERNELBASE = nr; 35 *(unsigned long *)KERNELBASE = nr;
36 asm volatile("dcbf 0,%0"::"r"(KERNELBASE):"memory"); 36 asm volatile("dcbf 0,%0"::"r"(KERNELBASE):"memory");
37
38 return 0;
37} 39}
38 40
39static void __devinit smp_chrp_setup_cpu(int cpu_nr) 41static void __devinit smp_chrp_setup_cpu(int cpu_nr)
diff --git a/arch/powerpc/platforms/embedded6xx/flipper-pic.c b/arch/powerpc/platforms/embedded6xx/flipper-pic.c
index 12aa62b6f227..f61a2dd96b99 100644
--- a/arch/powerpc/platforms/embedded6xx/flipper-pic.c
+++ b/arch/powerpc/platforms/embedded6xx/flipper-pic.c
@@ -48,7 +48,7 @@
48 48
49static void flipper_pic_mask_and_ack(struct irq_data *d) 49static void flipper_pic_mask_and_ack(struct irq_data *d)
50{ 50{
51 int irq = virq_to_hw(d->irq); 51 int irq = irqd_to_hwirq(d);
52 void __iomem *io_base = irq_data_get_irq_chip_data(d); 52 void __iomem *io_base = irq_data_get_irq_chip_data(d);
53 u32 mask = 1 << irq; 53 u32 mask = 1 << irq;
54 54
@@ -59,7 +59,7 @@ static void flipper_pic_mask_and_ack(struct irq_data *d)
59 59
60static void flipper_pic_ack(struct irq_data *d) 60static void flipper_pic_ack(struct irq_data *d)
61{ 61{
62 int irq = virq_to_hw(d->irq); 62 int irq = irqd_to_hwirq(d);
63 void __iomem *io_base = irq_data_get_irq_chip_data(d); 63 void __iomem *io_base = irq_data_get_irq_chip_data(d);
64 64
65 /* this is at least needed for RSW */ 65 /* this is at least needed for RSW */
@@ -68,7 +68,7 @@ static void flipper_pic_ack(struct irq_data *d)
68 68
69static void flipper_pic_mask(struct irq_data *d) 69static void flipper_pic_mask(struct irq_data *d)
70{ 70{
71 int irq = virq_to_hw(d->irq); 71 int irq = irqd_to_hwirq(d);
72 void __iomem *io_base = irq_data_get_irq_chip_data(d); 72 void __iomem *io_base = irq_data_get_irq_chip_data(d);
73 73
74 clrbits32(io_base + FLIPPER_IMR, 1 << irq); 74 clrbits32(io_base + FLIPPER_IMR, 1 << irq);
@@ -76,7 +76,7 @@ static void flipper_pic_mask(struct irq_data *d)
76 76
77static void flipper_pic_unmask(struct irq_data *d) 77static void flipper_pic_unmask(struct irq_data *d)
78{ 78{
79 int irq = virq_to_hw(d->irq); 79 int irq = irqd_to_hwirq(d);
80 void __iomem *io_base = irq_data_get_irq_chip_data(d); 80 void __iomem *io_base = irq_data_get_irq_chip_data(d);
81 81
82 setbits32(io_base + FLIPPER_IMR, 1 << irq); 82 setbits32(io_base + FLIPPER_IMR, 1 << irq);
@@ -107,12 +107,6 @@ static int flipper_pic_map(struct irq_host *h, unsigned int virq,
107 return 0; 107 return 0;
108} 108}
109 109
110static void flipper_pic_unmap(struct irq_host *h, unsigned int irq)
111{
112 irq_set_chip_data(irq, NULL);
113 irq_set_chip(irq, NULL);
114}
115
116static int flipper_pic_match(struct irq_host *h, struct device_node *np) 110static int flipper_pic_match(struct irq_host *h, struct device_node *np)
117{ 111{
118 return 1; 112 return 1;
@@ -121,7 +115,6 @@ static int flipper_pic_match(struct irq_host *h, struct device_node *np)
121 115
122static struct irq_host_ops flipper_irq_host_ops = { 116static struct irq_host_ops flipper_irq_host_ops = {
123 .map = flipper_pic_map, 117 .map = flipper_pic_map,
124 .unmap = flipper_pic_unmap,
125 .match = flipper_pic_match, 118 .match = flipper_pic_match,
126}; 119};
127 120
diff --git a/arch/powerpc/platforms/embedded6xx/hlwd-pic.c b/arch/powerpc/platforms/embedded6xx/hlwd-pic.c
index 2bdddfc9d520..e4919170c6bc 100644
--- a/arch/powerpc/platforms/embedded6xx/hlwd-pic.c
+++ b/arch/powerpc/platforms/embedded6xx/hlwd-pic.c
@@ -43,7 +43,7 @@
43 43
44static void hlwd_pic_mask_and_ack(struct irq_data *d) 44static void hlwd_pic_mask_and_ack(struct irq_data *d)
45{ 45{
46 int irq = virq_to_hw(d->irq); 46 int irq = irqd_to_hwirq(d);
47 void __iomem *io_base = irq_data_get_irq_chip_data(d); 47 void __iomem *io_base = irq_data_get_irq_chip_data(d);
48 u32 mask = 1 << irq; 48 u32 mask = 1 << irq;
49 49
@@ -53,7 +53,7 @@ static void hlwd_pic_mask_and_ack(struct irq_data *d)
53 53
54static void hlwd_pic_ack(struct irq_data *d) 54static void hlwd_pic_ack(struct irq_data *d)
55{ 55{
56 int irq = virq_to_hw(d->irq); 56 int irq = irqd_to_hwirq(d);
57 void __iomem *io_base = irq_data_get_irq_chip_data(d); 57 void __iomem *io_base = irq_data_get_irq_chip_data(d);
58 58
59 out_be32(io_base + HW_BROADWAY_ICR, 1 << irq); 59 out_be32(io_base + HW_BROADWAY_ICR, 1 << irq);
@@ -61,7 +61,7 @@ static void hlwd_pic_ack(struct irq_data *d)
61 61
62static void hlwd_pic_mask(struct irq_data *d) 62static void hlwd_pic_mask(struct irq_data *d)
63{ 63{
64 int irq = virq_to_hw(d->irq); 64 int irq = irqd_to_hwirq(d);
65 void __iomem *io_base = irq_data_get_irq_chip_data(d); 65 void __iomem *io_base = irq_data_get_irq_chip_data(d);
66 66
67 clrbits32(io_base + HW_BROADWAY_IMR, 1 << irq); 67 clrbits32(io_base + HW_BROADWAY_IMR, 1 << irq);
@@ -69,7 +69,7 @@ static void hlwd_pic_mask(struct irq_data *d)
69 69
70static void hlwd_pic_unmask(struct irq_data *d) 70static void hlwd_pic_unmask(struct irq_data *d)
71{ 71{
72 int irq = virq_to_hw(d->irq); 72 int irq = irqd_to_hwirq(d);
73 void __iomem *io_base = irq_data_get_irq_chip_data(d); 73 void __iomem *io_base = irq_data_get_irq_chip_data(d);
74 74
75 setbits32(io_base + HW_BROADWAY_IMR, 1 << irq); 75 setbits32(io_base + HW_BROADWAY_IMR, 1 << irq);
@@ -100,15 +100,8 @@ static int hlwd_pic_map(struct irq_host *h, unsigned int virq,
100 return 0; 100 return 0;
101} 101}
102 102
103static void hlwd_pic_unmap(struct irq_host *h, unsigned int irq)
104{
105 irq_set_chip_data(irq, NULL);
106 irq_set_chip(irq, NULL);
107}
108
109static struct irq_host_ops hlwd_irq_host_ops = { 103static struct irq_host_ops hlwd_irq_host_ops = {
110 .map = hlwd_pic_map, 104 .map = hlwd_pic_map,
111 .unmap = hlwd_pic_unmap,
112}; 105};
113 106
114static unsigned int __hlwd_pic_get_irq(struct irq_host *h) 107static unsigned int __hlwd_pic_get_irq(struct irq_host *h)
diff --git a/arch/powerpc/platforms/iseries/Kconfig b/arch/powerpc/platforms/iseries/Kconfig
index e5bc9f75d474..b57cda3a0817 100644
--- a/arch/powerpc/platforms/iseries/Kconfig
+++ b/arch/powerpc/platforms/iseries/Kconfig
@@ -1,7 +1,9 @@
1config PPC_ISERIES 1config PPC_ISERIES
2 bool "IBM Legacy iSeries" 2 bool "IBM Legacy iSeries"
3 depends on PPC64 && PPC_BOOK3S 3 depends on PPC64 && PPC_BOOK3S
4 select PPC_INDIRECT_IO 4 select PPC_SMP_MUXED_IPI
5 select PPC_INDIRECT_PIO
6 select PPC_INDIRECT_MMIO
5 select PPC_PCI_CHOICE if EXPERT 7 select PPC_PCI_CHOICE if EXPERT
6 8
7menu "iSeries device drivers" 9menu "iSeries device drivers"
diff --git a/arch/powerpc/platforms/iseries/exception.S b/arch/powerpc/platforms/iseries/exception.S
index 32a56c6dfa72..29c02f36b32f 100644
--- a/arch/powerpc/platforms/iseries/exception.S
+++ b/arch/powerpc/platforms/iseries/exception.S
@@ -31,6 +31,7 @@
31#include <asm/thread_info.h> 31#include <asm/thread_info.h>
32#include <asm/ptrace.h> 32#include <asm/ptrace.h>
33#include <asm/cputable.h> 33#include <asm/cputable.h>
34#include <asm/mmu.h>
34 35
35#include "exception.h" 36#include "exception.h"
36 37
@@ -60,29 +61,31 @@ system_reset_iSeries:
60/* Spin on __secondary_hold_spinloop until it is updated by the boot cpu. */ 61/* Spin on __secondary_hold_spinloop until it is updated by the boot cpu. */
61/* In the UP case we'll yield() later, and we will not access the paca anyway */ 62/* In the UP case we'll yield() later, and we will not access the paca anyway */
62#ifdef CONFIG_SMP 63#ifdef CONFIG_SMP
631: 64iSeries_secondary_wait_paca:
64 HMT_LOW 65 HMT_LOW
65 LOAD_REG_ADDR(r23, __secondary_hold_spinloop) 66 LOAD_REG_ADDR(r23, __secondary_hold_spinloop)
66 ld r23,0(r23) 67 ld r23,0(r23)
67 sync
68 LOAD_REG_ADDR(r3,current_set)
69 sldi r28,r24,3 /* get current_set[cpu#] */
70 ldx r3,r3,r28
71 addi r1,r3,THREAD_SIZE
72 subi r1,r1,STACK_FRAME_OVERHEAD
73 68
74 cmpwi 0,r23,0 /* Keep poking the Hypervisor until */ 69 cmpdi 0,r23,0
75 bne 2f /* we're released */ 70 bne 2f /* go on when the master is ready */
76 /* Let the Hypervisor know we are alive */ 71
72 /* Keep poking the Hypervisor until we're released */
77 /* 8002 is a call to HvCallCfg::getLps, a harmless Hypervisor function */ 73 /* 8002 is a call to HvCallCfg::getLps, a harmless Hypervisor function */
78 lis r3,0x8002 74 lis r3,0x8002
79 rldicr r3,r3,32,15 /* r0 = (r3 << 32) & 0xffff000000000000 */ 75 rldicr r3,r3,32,15 /* r0 = (r3 << 32) & 0xffff000000000000 */
80 li r0,-1 /* r0=-1 indicates a Hypervisor call */ 76 li r0,-1 /* r0=-1 indicates a Hypervisor call */
81 sc /* Invoke the hypervisor via a system call */ 77 sc /* Invoke the hypervisor via a system call */
82 b 1b 78 b iSeries_secondary_wait_paca
83#endif
84 79
852: 802:
81 HMT_MEDIUM
82 sync
83
84 LOAD_REG_ADDR(r3, nr_cpu_ids) /* get number of pacas allocated */
85 lwz r3,0(r3) /* nr_cpus= or NR_CPUS can limit */
86 cmpld 0,r24,r3 /* is our cpu number allocated? */
87 bge iSeries_secondary_yield /* no, yield forever */
88
86 /* Load our paca now that it's been allocated */ 89 /* Load our paca now that it's been allocated */
87 LOAD_REG_ADDR(r13, paca) 90 LOAD_REG_ADDR(r13, paca)
88 ld r13,0(r13) 91 ld r13,0(r13)
@@ -93,10 +96,24 @@ system_reset_iSeries:
93 ori r23,r23,MSR_RI 96 ori r23,r23,MSR_RI
94 mtmsrd r23 /* RI on */ 97 mtmsrd r23 /* RI on */
95 98
96 HMT_LOW 99iSeries_secondary_smp_loop:
97#ifdef CONFIG_SMP
98 lbz r23,PACAPROCSTART(r13) /* Test if this processor 100 lbz r23,PACAPROCSTART(r13) /* Test if this processor
99 * should start */ 101 * should start */
102 cmpwi 0,r23,0
103 bne 3f /* go on when we are told */
104
105 HMT_LOW
106 /* Let the Hypervisor know we are alive */
107 /* 8002 is a call to HvCallCfg::getLps, a harmless Hypervisor function */
108 lis r3,0x8002
109 rldicr r3,r3,32,15 /* r0 = (r3 << 32) & 0xffff000000000000 */
110 li r0,-1 /* r0=-1 indicates a Hypervisor call */
111 sc /* Invoke the hypervisor via a system call */
112 mfspr r13,SPRN_SPRG_PACA /* Put r13 back ???? */
113 b iSeries_secondary_smp_loop /* wait for signal to start */
114
1153:
116 HMT_MEDIUM
100 sync 117 sync
101 LOAD_REG_ADDR(r3,current_set) 118 LOAD_REG_ADDR(r3,current_set)
102 sldi r28,r24,3 /* get current_set[cpu#] */ 119 sldi r28,r24,3 /* get current_set[cpu#] */
@@ -104,27 +121,22 @@ system_reset_iSeries:
104 addi r1,r3,THREAD_SIZE 121 addi r1,r3,THREAD_SIZE
105 subi r1,r1,STACK_FRAME_OVERHEAD 122 subi r1,r1,STACK_FRAME_OVERHEAD
106 123
107 cmpwi 0,r23,0
108 beq iSeries_secondary_smp_loop /* Loop until told to go */
109 b __secondary_start /* Loop until told to go */ 124 b __secondary_start /* Loop until told to go */
110iSeries_secondary_smp_loop: 125#endif /* CONFIG_SMP */
111 /* Let the Hypervisor know we are alive */ 126
112 /* 8002 is a call to HvCallCfg::getLps, a harmless Hypervisor function */ 127iSeries_secondary_yield:
113 lis r3,0x8002
114 rldicr r3,r3,32,15 /* r0 = (r3 << 32) & 0xffff000000000000 */
115#else /* CONFIG_SMP */
116 /* Yield the processor. This is required for non-SMP kernels 128 /* Yield the processor. This is required for non-SMP kernels
117 which are running on multi-threaded machines. */ 129 which are running on multi-threaded machines. */
130 HMT_LOW
118 lis r3,0x8000 131 lis r3,0x8000
119 rldicr r3,r3,32,15 /* r3 = (r3 << 32) & 0xffff000000000000 */ 132 rldicr r3,r3,32,15 /* r3 = (r3 << 32) & 0xffff000000000000 */
120 addi r3,r3,18 /* r3 = 0x8000000000000012 which is "yield" */ 133 addi r3,r3,18 /* r3 = 0x8000000000000012 which is "yield" */
121 li r4,0 /* "yield timed" */ 134 li r4,0 /* "yield timed" */
122 li r5,-1 /* "yield forever" */ 135 li r5,-1 /* "yield forever" */
123#endif /* CONFIG_SMP */
124 li r0,-1 /* r0=-1 indicates a Hypervisor call */ 136 li r0,-1 /* r0=-1 indicates a Hypervisor call */
125 sc /* Invoke the hypervisor via a system call */ 137 sc /* Invoke the hypervisor via a system call */
126 mfspr r13,SPRN_SPRG_PACA /* Put r13 back ???? */ 138 mfspr r13,SPRN_SPRG_PACA /* Put r13 back ???? */
127 b 2b /* If SMP not configured, secondaries 139 b iSeries_secondary_yield /* If SMP not configured, secondaries
128 * loop forever */ 140 * loop forever */
129 141
130/*** ISeries-LPAR interrupt handlers ***/ 142/*** ISeries-LPAR interrupt handlers ***/
@@ -157,7 +169,7 @@ BEGIN_FTR_SECTION
157FTR_SECTION_ELSE 169FTR_SECTION_ELSE
158 EXCEPTION_PROLOG_1(PACA_EXGEN) 170 EXCEPTION_PROLOG_1(PACA_EXGEN)
159 EXCEPTION_PROLOG_ISERIES_1 171 EXCEPTION_PROLOG_ISERIES_1
160ALT_FTR_SECTION_END_IFCLR(CPU_FTR_SLB) 172ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_SLB)
161 b data_access_common 173 b data_access_common
162 174
163.do_stab_bolted_iSeries: 175.do_stab_bolted_iSeries:
diff --git a/arch/powerpc/platforms/iseries/irq.c b/arch/powerpc/platforms/iseries/irq.c
index 52a6889832c7..b2103453eb01 100644
--- a/arch/powerpc/platforms/iseries/irq.c
+++ b/arch/powerpc/platforms/iseries/irq.c
@@ -42,7 +42,6 @@
42#include "irq.h" 42#include "irq.h"
43#include "pci.h" 43#include "pci.h"
44#include "call_pci.h" 44#include "call_pci.h"
45#include "smp.h"
46 45
47#ifdef CONFIG_PCI 46#ifdef CONFIG_PCI
48 47
@@ -171,7 +170,7 @@ static void iseries_enable_IRQ(struct irq_data *d)
171{ 170{
172 u32 bus, dev_id, function, mask; 171 u32 bus, dev_id, function, mask;
173 const u32 sub_bus = 0; 172 const u32 sub_bus = 0;
174 unsigned int rirq = (unsigned int)irq_map[d->irq].hwirq; 173 unsigned int rirq = (unsigned int)irqd_to_hwirq(d);
175 174
176 /* The IRQ has already been locked by the caller */ 175 /* The IRQ has already been locked by the caller */
177 bus = REAL_IRQ_TO_BUS(rirq); 176 bus = REAL_IRQ_TO_BUS(rirq);
@@ -188,7 +187,7 @@ static unsigned int iseries_startup_IRQ(struct irq_data *d)
188{ 187{
189 u32 bus, dev_id, function, mask; 188 u32 bus, dev_id, function, mask;
190 const u32 sub_bus = 0; 189 const u32 sub_bus = 0;
191 unsigned int rirq = (unsigned int)irq_map[d->irq].hwirq; 190 unsigned int rirq = (unsigned int)irqd_to_hwirq(d);
192 191
193 bus = REAL_IRQ_TO_BUS(rirq); 192 bus = REAL_IRQ_TO_BUS(rirq);
194 function = REAL_IRQ_TO_FUNC(rirq); 193 function = REAL_IRQ_TO_FUNC(rirq);
@@ -234,7 +233,7 @@ static void iseries_shutdown_IRQ(struct irq_data *d)
234{ 233{
235 u32 bus, dev_id, function, mask; 234 u32 bus, dev_id, function, mask;
236 const u32 sub_bus = 0; 235 const u32 sub_bus = 0;
237 unsigned int rirq = (unsigned int)irq_map[d->irq].hwirq; 236 unsigned int rirq = (unsigned int)irqd_to_hwirq(d);
238 237
239 /* irq should be locked by the caller */ 238 /* irq should be locked by the caller */
240 bus = REAL_IRQ_TO_BUS(rirq); 239 bus = REAL_IRQ_TO_BUS(rirq);
@@ -257,7 +256,7 @@ static void iseries_disable_IRQ(struct irq_data *d)
257{ 256{
258 u32 bus, dev_id, function, mask; 257 u32 bus, dev_id, function, mask;
259 const u32 sub_bus = 0; 258 const u32 sub_bus = 0;
260 unsigned int rirq = (unsigned int)irq_map[d->irq].hwirq; 259 unsigned int rirq = (unsigned int)irqd_to_hwirq(d);
261 260
262 /* The IRQ has already been locked by the caller */ 261 /* The IRQ has already been locked by the caller */
263 bus = REAL_IRQ_TO_BUS(rirq); 262 bus = REAL_IRQ_TO_BUS(rirq);
@@ -271,7 +270,7 @@ static void iseries_disable_IRQ(struct irq_data *d)
271 270
272static void iseries_end_IRQ(struct irq_data *d) 271static void iseries_end_IRQ(struct irq_data *d)
273{ 272{
274 unsigned int rirq = (unsigned int)irq_map[d->irq].hwirq; 273 unsigned int rirq = (unsigned int)irqd_to_hwirq(d);
275 274
276 HvCallPci_eoi(REAL_IRQ_TO_BUS(rirq), REAL_IRQ_TO_SUBBUS(rirq), 275 HvCallPci_eoi(REAL_IRQ_TO_BUS(rirq), REAL_IRQ_TO_SUBBUS(rirq),
277 (REAL_IRQ_TO_IDSEL(rirq) << 4) + REAL_IRQ_TO_FUNC(rirq)); 276 (REAL_IRQ_TO_IDSEL(rirq) << 4) + REAL_IRQ_TO_FUNC(rirq));
@@ -316,7 +315,7 @@ unsigned int iSeries_get_irq(void)
316#ifdef CONFIG_SMP 315#ifdef CONFIG_SMP
317 if (get_lppaca()->int_dword.fields.ipi_cnt) { 316 if (get_lppaca()->int_dword.fields.ipi_cnt) {
318 get_lppaca()->int_dword.fields.ipi_cnt = 0; 317 get_lppaca()->int_dword.fields.ipi_cnt = 0;
319 iSeries_smp_message_recv(); 318 smp_ipi_demux();
320 } 319 }
321#endif /* CONFIG_SMP */ 320#endif /* CONFIG_SMP */
322 if (hvlpevent_is_pending()) 321 if (hvlpevent_is_pending())
diff --git a/arch/powerpc/platforms/iseries/setup.c b/arch/powerpc/platforms/iseries/setup.c
index 2946ae10fbfd..c25a0815c26b 100644
--- a/arch/powerpc/platforms/iseries/setup.c
+++ b/arch/powerpc/platforms/iseries/setup.c
@@ -249,7 +249,7 @@ static unsigned long iSeries_process_mainstore_vpd(struct MemoryBlock *mb_array,
249 unsigned long i; 249 unsigned long i;
250 unsigned long mem_blocks = 0; 250 unsigned long mem_blocks = 0;
251 251
252 if (cpu_has_feature(CPU_FTR_SLB)) 252 if (mmu_has_feature(MMU_FTR_SLB))
253 mem_blocks = iSeries_process_Regatta_mainstore_vpd(mb_array, 253 mem_blocks = iSeries_process_Regatta_mainstore_vpd(mb_array,
254 max_entries); 254 max_entries);
255 else 255 else
@@ -634,7 +634,7 @@ static int __init iseries_probe(void)
634 634
635 hpte_init_iSeries(); 635 hpte_init_iSeries();
636 /* iSeries does not support 16M pages */ 636 /* iSeries does not support 16M pages */
637 cur_cpu_spec->cpu_features &= ~CPU_FTR_16M_PAGE; 637 cur_cpu_spec->mmu_features &= ~MMU_FTR_16M_PAGE;
638 638
639 return 1; 639 return 1;
640} 640}
@@ -685,6 +685,11 @@ void * __init iSeries_early_setup(void)
685 powerpc_firmware_features |= FW_FEATURE_ISERIES; 685 powerpc_firmware_features |= FW_FEATURE_ISERIES;
686 powerpc_firmware_features |= FW_FEATURE_LPAR; 686 powerpc_firmware_features |= FW_FEATURE_LPAR;
687 687
688#ifdef CONFIG_SMP
689 /* On iSeries we know we can never have more than 64 cpus */
690 nr_cpu_ids = max(nr_cpu_ids, 64);
691#endif
692
688 iSeries_fixup_klimit(); 693 iSeries_fixup_klimit();
689 694
690 /* 695 /*
diff --git a/arch/powerpc/platforms/iseries/smp.c b/arch/powerpc/platforms/iseries/smp.c
index 6c6029914dbc..e3265adde5d3 100644
--- a/arch/powerpc/platforms/iseries/smp.c
+++ b/arch/powerpc/platforms/iseries/smp.c
@@ -42,57 +42,23 @@
42#include <asm/cputable.h> 42#include <asm/cputable.h>
43#include <asm/system.h> 43#include <asm/system.h>
44 44
45#include "smp.h" 45static void smp_iSeries_cause_ipi(int cpu, unsigned long data)
46
47static unsigned long iSeries_smp_message[NR_CPUS];
48
49void iSeries_smp_message_recv(void)
50{
51 int cpu = smp_processor_id();
52 int msg;
53
54 if (num_online_cpus() < 2)
55 return;
56
57 for (msg = 0; msg < 4; msg++)
58 if (test_and_clear_bit(msg, &iSeries_smp_message[cpu]))
59 smp_message_recv(msg);
60}
61
62static inline void smp_iSeries_do_message(int cpu, int msg)
63{ 46{
64 set_bit(msg, &iSeries_smp_message[cpu]);
65 HvCall_sendIPI(&(paca[cpu])); 47 HvCall_sendIPI(&(paca[cpu]));
66} 48}
67 49
68static void smp_iSeries_message_pass(int target, int msg)
69{
70 int i;
71
72 if (target < NR_CPUS)
73 smp_iSeries_do_message(target, msg);
74 else {
75 for_each_online_cpu(i) {
76 if ((target == MSG_ALL_BUT_SELF) &&
77 (i == smp_processor_id()))
78 continue;
79 smp_iSeries_do_message(i, msg);
80 }
81 }
82}
83
84static int smp_iSeries_probe(void) 50static int smp_iSeries_probe(void)
85{ 51{
86 return cpumask_weight(cpu_possible_mask); 52 return cpumask_weight(cpu_possible_mask);
87} 53}
88 54
89static void smp_iSeries_kick_cpu(int nr) 55static int smp_iSeries_kick_cpu(int nr)
90{ 56{
91 BUG_ON((nr < 0) || (nr >= NR_CPUS)); 57 BUG_ON((nr < 0) || (nr >= NR_CPUS));
92 58
93 /* Verify that our partition has a processor nr */ 59 /* Verify that our partition has a processor nr */
94 if (lppaca_of(nr).dyn_proc_status >= 2) 60 if (lppaca_of(nr).dyn_proc_status >= 2)
95 return; 61 return -ENOENT;
96 62
97 /* The processor is currently spinning, waiting 63 /* The processor is currently spinning, waiting
98 * for the cpu_start field to become non-zero 64 * for the cpu_start field to become non-zero
@@ -100,6 +66,8 @@ static void smp_iSeries_kick_cpu(int nr)
100 * continue on to secondary_start in iSeries_head.S 66 * continue on to secondary_start in iSeries_head.S
101 */ 67 */
102 paca[nr].cpu_start = 1; 68 paca[nr].cpu_start = 1;
69
70 return 0;
103} 71}
104 72
105static void __devinit smp_iSeries_setup_cpu(int nr) 73static void __devinit smp_iSeries_setup_cpu(int nr)
@@ -107,7 +75,8 @@ static void __devinit smp_iSeries_setup_cpu(int nr)
107} 75}
108 76
109static struct smp_ops_t iSeries_smp_ops = { 77static struct smp_ops_t iSeries_smp_ops = {
110 .message_pass = smp_iSeries_message_pass, 78 .message_pass = smp_muxed_ipi_message_pass,
79 .cause_ipi = smp_iSeries_cause_ipi,
111 .probe = smp_iSeries_probe, 80 .probe = smp_iSeries_probe,
112 .kick_cpu = smp_iSeries_kick_cpu, 81 .kick_cpu = smp_iSeries_kick_cpu,
113 .setup_cpu = smp_iSeries_setup_cpu, 82 .setup_cpu = smp_iSeries_setup_cpu,
diff --git a/arch/powerpc/platforms/iseries/smp.h b/arch/powerpc/platforms/iseries/smp.h
deleted file mode 100644
index d501f7de01e7..000000000000
--- a/arch/powerpc/platforms/iseries/smp.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef _PLATFORMS_ISERIES_SMP_H
2#define _PLATFORMS_ISERIES_SMP_H
3
4extern void iSeries_smp_message_recv(void);
5
6#endif /* _PLATFORMS_ISERIES_SMP_H */
diff --git a/arch/powerpc/platforms/powermac/Kconfig b/arch/powerpc/platforms/powermac/Kconfig
index 1e1a0873e1dd..1afd10f67858 100644
--- a/arch/powerpc/platforms/powermac/Kconfig
+++ b/arch/powerpc/platforms/powermac/Kconfig
@@ -18,4 +18,13 @@ config PPC_PMAC64
18 select PPC_970_NAP 18 select PPC_970_NAP
19 default y 19 default y
20 20
21 21config PPC_PMAC32_PSURGE
22 bool "Support for powersurge upgrade cards" if EXPERT
23 depends on SMP && PPC32 && PPC_PMAC
24 select PPC_SMP_MUXED_IPI
25 default y
26 help
27 The powersurge cpu boards can be used in the generation
28 of powermacs that have a socket for an upgradeable cpu card,
29 including the 7500, 8500, 9500, 9600. Support exists for
30 both dual and quad socket upgrade cards.
diff --git a/arch/powerpc/platforms/powermac/pic.c b/arch/powerpc/platforms/powermac/pic.c
index 023f24086a0a..9089b0421191 100644
--- a/arch/powerpc/platforms/powermac/pic.c
+++ b/arch/powerpc/platforms/powermac/pic.c
@@ -21,7 +21,7 @@
21#include <linux/signal.h> 21#include <linux/signal.h>
22#include <linux/pci.h> 22#include <linux/pci.h>
23#include <linux/interrupt.h> 23#include <linux/interrupt.h>
24#include <linux/sysdev.h> 24#include <linux/syscore_ops.h>
25#include <linux/adb.h> 25#include <linux/adb.h>
26#include <linux/pmu.h> 26#include <linux/pmu.h>
27#include <linux/module.h> 27#include <linux/module.h>
@@ -84,7 +84,7 @@ static void __pmac_retrigger(unsigned int irq_nr)
84 84
85static void pmac_mask_and_ack_irq(struct irq_data *d) 85static void pmac_mask_and_ack_irq(struct irq_data *d)
86{ 86{
87 unsigned int src = irq_map[d->irq].hwirq; 87 unsigned int src = irqd_to_hwirq(d);
88 unsigned long bit = 1UL << (src & 0x1f); 88 unsigned long bit = 1UL << (src & 0x1f);
89 int i = src >> 5; 89 int i = src >> 5;
90 unsigned long flags; 90 unsigned long flags;
@@ -106,7 +106,7 @@ static void pmac_mask_and_ack_irq(struct irq_data *d)
106 106
107static void pmac_ack_irq(struct irq_data *d) 107static void pmac_ack_irq(struct irq_data *d)
108{ 108{
109 unsigned int src = irq_map[d->irq].hwirq; 109 unsigned int src = irqd_to_hwirq(d);
110 unsigned long bit = 1UL << (src & 0x1f); 110 unsigned long bit = 1UL << (src & 0x1f);
111 int i = src >> 5; 111 int i = src >> 5;
112 unsigned long flags; 112 unsigned long flags;
@@ -152,7 +152,7 @@ static void __pmac_set_irq_mask(unsigned int irq_nr, int nokicklost)
152static unsigned int pmac_startup_irq(struct irq_data *d) 152static unsigned int pmac_startup_irq(struct irq_data *d)
153{ 153{
154 unsigned long flags; 154 unsigned long flags;
155 unsigned int src = irq_map[d->irq].hwirq; 155 unsigned int src = irqd_to_hwirq(d);
156 unsigned long bit = 1UL << (src & 0x1f); 156 unsigned long bit = 1UL << (src & 0x1f);
157 int i = src >> 5; 157 int i = src >> 5;
158 158
@@ -169,7 +169,7 @@ static unsigned int pmac_startup_irq(struct irq_data *d)
169static void pmac_mask_irq(struct irq_data *d) 169static void pmac_mask_irq(struct irq_data *d)
170{ 170{
171 unsigned long flags; 171 unsigned long flags;
172 unsigned int src = irq_map[d->irq].hwirq; 172 unsigned int src = irqd_to_hwirq(d);
173 173
174 raw_spin_lock_irqsave(&pmac_pic_lock, flags); 174 raw_spin_lock_irqsave(&pmac_pic_lock, flags);
175 __clear_bit(src, ppc_cached_irq_mask); 175 __clear_bit(src, ppc_cached_irq_mask);
@@ -180,7 +180,7 @@ static void pmac_mask_irq(struct irq_data *d)
180static void pmac_unmask_irq(struct irq_data *d) 180static void pmac_unmask_irq(struct irq_data *d)
181{ 181{
182 unsigned long flags; 182 unsigned long flags;
183 unsigned int src = irq_map[d->irq].hwirq; 183 unsigned int src = irqd_to_hwirq(d);
184 184
185 raw_spin_lock_irqsave(&pmac_pic_lock, flags); 185 raw_spin_lock_irqsave(&pmac_pic_lock, flags);
186 __set_bit(src, ppc_cached_irq_mask); 186 __set_bit(src, ppc_cached_irq_mask);
@@ -193,7 +193,7 @@ static int pmac_retrigger(struct irq_data *d)
193 unsigned long flags; 193 unsigned long flags;
194 194
195 raw_spin_lock_irqsave(&pmac_pic_lock, flags); 195 raw_spin_lock_irqsave(&pmac_pic_lock, flags);
196 __pmac_retrigger(irq_map[d->irq].hwirq); 196 __pmac_retrigger(irqd_to_hwirq(d));
197 raw_spin_unlock_irqrestore(&pmac_pic_lock, flags); 197 raw_spin_unlock_irqrestore(&pmac_pic_lock, flags);
198 return 1; 198 return 1;
199} 199}
@@ -239,15 +239,12 @@ static unsigned int pmac_pic_get_irq(void)
239 unsigned long bits = 0; 239 unsigned long bits = 0;
240 unsigned long flags; 240 unsigned long flags;
241 241
242#ifdef CONFIG_SMP 242#ifdef CONFIG_PPC_PMAC32_PSURGE
243 void psurge_smp_message_recv(void); 243 /* IPI's are a hack on the powersurge -- Cort */
244 244 if (smp_processor_id() != 0) {
245 /* IPI's are a hack on the powersurge -- Cort */ 245 return psurge_secondary_virq;
246 if ( smp_processor_id() != 0 ) {
247 psurge_smp_message_recv();
248 return NO_IRQ_IGNORE; /* ignore, already handled */
249 } 246 }
250#endif /* CONFIG_SMP */ 247#endif /* CONFIG_PPC_PMAC32_PSURGE */
251 raw_spin_lock_irqsave(&pmac_pic_lock, flags); 248 raw_spin_lock_irqsave(&pmac_pic_lock, flags);
252 for (irq = max_real_irqs; (irq -= 32) >= 0; ) { 249 for (irq = max_real_irqs; (irq -= 32) >= 0; ) {
253 int i = irq >> 5; 250 int i = irq >> 5;
@@ -677,7 +674,7 @@ not_found:
677 return viaint; 674 return viaint;
678} 675}
679 676
680static int pmacpic_suspend(struct sys_device *sysdev, pm_message_t state) 677static int pmacpic_suspend(void)
681{ 678{
682 int viaint = pmacpic_find_viaint(); 679 int viaint = pmacpic_find_viaint();
683 680
@@ -698,7 +695,7 @@ static int pmacpic_suspend(struct sys_device *sysdev, pm_message_t state)
698 return 0; 695 return 0;
699} 696}
700 697
701static int pmacpic_resume(struct sys_device *sysdev) 698static void pmacpic_resume(void)
702{ 699{
703 int i; 700 int i;
704 701
@@ -709,39 +706,19 @@ static int pmacpic_resume(struct sys_device *sysdev)
709 for (i = 0; i < max_real_irqs; ++i) 706 for (i = 0; i < max_real_irqs; ++i)
710 if (test_bit(i, sleep_save_mask)) 707 if (test_bit(i, sleep_save_mask))
711 pmac_unmask_irq(irq_get_irq_data(i)); 708 pmac_unmask_irq(irq_get_irq_data(i));
712
713 return 0;
714} 709}
715 710
716#endif /* CONFIG_PM && CONFIG_PPC32 */ 711static struct syscore_ops pmacpic_syscore_ops = {
717 712 .suspend = pmacpic_suspend,
718static struct sysdev_class pmacpic_sysclass = { 713 .resume = pmacpic_resume,
719 .name = "pmac_pic",
720};
721
722static struct sys_device device_pmacpic = {
723 .id = 0,
724 .cls = &pmacpic_sysclass,
725};
726
727static struct sysdev_driver driver_pmacpic = {
728#if defined(CONFIG_PM) && defined(CONFIG_PPC32)
729 .suspend = &pmacpic_suspend,
730 .resume = &pmacpic_resume,
731#endif /* CONFIG_PM && CONFIG_PPC32 */
732}; 714};
733 715
734static int __init init_pmacpic_sysfs(void) 716static int __init init_pmacpic_syscore(void)
735{ 717{
736#ifdef CONFIG_PPC32 718 register_syscore_ops(&pmacpic_syscore_ops);
737 if (max_irqs == 0)
738 return -ENODEV;
739#endif
740 printk(KERN_DEBUG "Registering pmac pic with sysfs...\n");
741 sysdev_class_register(&pmacpic_sysclass);
742 sysdev_register(&device_pmacpic);
743 sysdev_driver_register(&pmacpic_sysclass, &driver_pmacpic);
744 return 0; 719 return 0;
745} 720}
746machine_subsys_initcall(powermac, init_pmacpic_sysfs);
747 721
722machine_subsys_initcall(powermac, init_pmacpic_syscore);
723
724#endif /* CONFIG_PM && CONFIG_PPC32 */
diff --git a/arch/powerpc/platforms/powermac/pic.h b/arch/powerpc/platforms/powermac/pic.h
deleted file mode 100644
index d622a8345aaa..000000000000
--- a/arch/powerpc/platforms/powermac/pic.h
+++ /dev/null
@@ -1,11 +0,0 @@
1#ifndef __PPC_PLATFORMS_PMAC_PIC_H
2#define __PPC_PLATFORMS_PMAC_PIC_H
3
4#include <linux/irq.h>
5
6extern struct irq_chip pmac_pic;
7
8extern void pmac_pic_init(void);
9extern int pmac_get_irq(void);
10
11#endif /* __PPC_PLATFORMS_PMAC_PIC_H */
diff --git a/arch/powerpc/platforms/powermac/pmac.h b/arch/powerpc/platforms/powermac/pmac.h
index 20468f49aec0..8327cce2bdb0 100644
--- a/arch/powerpc/platforms/powermac/pmac.h
+++ b/arch/powerpc/platforms/powermac/pmac.h
@@ -33,6 +33,7 @@ extern void pmac_setup_pci_dma(void);
33extern void pmac_check_ht_link(void); 33extern void pmac_check_ht_link(void);
34 34
35extern void pmac_setup_smp(void); 35extern void pmac_setup_smp(void);
36extern int psurge_secondary_virq;
36extern void low_cpu_die(void) __attribute__((noreturn)); 37extern void low_cpu_die(void) __attribute__((noreturn));
37 38
38extern int pmac_nvram_init(void); 39extern int pmac_nvram_init(void);
diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c
index a830c5e80657..db092d7c4c5b 100644
--- a/arch/powerpc/platforms/powermac/smp.c
+++ b/arch/powerpc/platforms/powermac/smp.c
@@ -70,7 +70,7 @@ static void (*pmac_tb_freeze)(int freeze);
70static u64 timebase; 70static u64 timebase;
71static int tb_req; 71static int tb_req;
72 72
73#ifdef CONFIG_PPC32 73#ifdef CONFIG_PPC_PMAC32_PSURGE
74 74
75/* 75/*
76 * Powersurge (old powermac SMP) support. 76 * Powersurge (old powermac SMP) support.
@@ -124,6 +124,10 @@ static volatile u32 __iomem *psurge_start;
124/* what sort of powersurge board we have */ 124/* what sort of powersurge board we have */
125static int psurge_type = PSURGE_NONE; 125static int psurge_type = PSURGE_NONE;
126 126
127/* irq for secondary cpus to report */
128static struct irq_host *psurge_host;
129int psurge_secondary_virq;
130
127/* 131/*
128 * Set and clear IPIs for powersurge. 132 * Set and clear IPIs for powersurge.
129 */ 133 */
@@ -156,51 +160,52 @@ static inline void psurge_clr_ipi(int cpu)
156/* 160/*
157 * On powersurge (old SMP powermac architecture) we don't have 161 * On powersurge (old SMP powermac architecture) we don't have
158 * separate IPIs for separate messages like openpic does. Instead 162 * separate IPIs for separate messages like openpic does. Instead
159 * we have a bitmap for each processor, where a 1 bit means that 163 * use the generic demux helpers
160 * the corresponding message is pending for that processor.
161 * Ideally each cpu's entry would be in a different cache line.
162 * -- paulus. 164 * -- paulus.
163 */ 165 */
164static unsigned long psurge_smp_message[NR_CPUS]; 166static irqreturn_t psurge_ipi_intr(int irq, void *d)
165
166void psurge_smp_message_recv(void)
167{ 167{
168 int cpu = smp_processor_id(); 168 psurge_clr_ipi(smp_processor_id());
169 int msg; 169 smp_ipi_demux();
170
171 /* clear interrupt */
172 psurge_clr_ipi(cpu);
173 170
174 if (num_online_cpus() < 2) 171 return IRQ_HANDLED;
175 return; 172}
176 173
177 /* make sure there is a message there */ 174static void smp_psurge_cause_ipi(int cpu, unsigned long data)
178 for (msg = 0; msg < 4; msg++) 175{
179 if (test_and_clear_bit(msg, &psurge_smp_message[cpu])) 176 psurge_set_ipi(cpu);
180 smp_message_recv(msg);
181} 177}
182 178
183irqreturn_t psurge_primary_intr(int irq, void *d) 179static int psurge_host_map(struct irq_host *h, unsigned int virq,
180 irq_hw_number_t hw)
184{ 181{
185 psurge_smp_message_recv(); 182 irq_set_chip_and_handler(virq, &dummy_irq_chip, handle_percpu_irq);
186 return IRQ_HANDLED; 183
184 return 0;
187} 185}
188 186
189static void smp_psurge_message_pass(int target, int msg) 187struct irq_host_ops psurge_host_ops = {
188 .map = psurge_host_map,
189};
190
191static int psurge_secondary_ipi_init(void)
190{ 192{
191 int i; 193 int rc = -ENOMEM;
192 194
193 if (num_online_cpus() < 2) 195 psurge_host = irq_alloc_host(NULL, IRQ_HOST_MAP_NOMAP, 0,
194 return; 196 &psurge_host_ops, 0);
195 197
196 for_each_online_cpu(i) { 198 if (psurge_host)
197 if (target == MSG_ALL 199 psurge_secondary_virq = irq_create_direct_mapping(psurge_host);
198 || (target == MSG_ALL_BUT_SELF && i != smp_processor_id()) 200
199 || target == i) { 201 if (psurge_secondary_virq)
200 set_bit(msg, &psurge_smp_message[i]); 202 rc = request_irq(psurge_secondary_virq, psurge_ipi_intr,
201 psurge_set_ipi(i); 203 IRQF_DISABLED|IRQF_PERCPU, "IPI", NULL);
202 } 204
203 } 205 if (rc)
206 pr_err("Failed to setup secondary cpu IPI\n");
207
208 return rc;
204} 209}
205 210
206/* 211/*
@@ -311,6 +316,9 @@ static int __init smp_psurge_probe(void)
311 ncpus = 2; 316 ncpus = 2;
312 } 317 }
313 318
319 if (psurge_secondary_ipi_init())
320 return 1;
321
314 psurge_start = ioremap(PSURGE_START, 4); 322 psurge_start = ioremap(PSURGE_START, 4);
315 psurge_pri_intr = ioremap(PSURGE_PRI_INTR, 4); 323 psurge_pri_intr = ioremap(PSURGE_PRI_INTR, 4);
316 324
@@ -329,7 +337,7 @@ static int __init smp_psurge_probe(void)
329 return ncpus; 337 return ncpus;
330} 338}
331 339
332static void __init smp_psurge_kick_cpu(int nr) 340static int __init smp_psurge_kick_cpu(int nr)
333{ 341{
334 unsigned long start = __pa(__secondary_start_pmac_0) + nr * 8; 342 unsigned long start = __pa(__secondary_start_pmac_0) + nr * 8;
335 unsigned long a, flags; 343 unsigned long a, flags;
@@ -394,11 +402,13 @@ static void __init smp_psurge_kick_cpu(int nr)
394 psurge_set_ipi(1); 402 psurge_set_ipi(1);
395 403
396 if (ppc_md.progress) ppc_md.progress("smp_psurge_kick_cpu - done", 0x354); 404 if (ppc_md.progress) ppc_md.progress("smp_psurge_kick_cpu - done", 0x354);
405
406 return 0;
397} 407}
398 408
399static struct irqaction psurge_irqaction = { 409static struct irqaction psurge_irqaction = {
400 .handler = psurge_primary_intr, 410 .handler = psurge_ipi_intr,
401 .flags = IRQF_DISABLED, 411 .flags = IRQF_DISABLED|IRQF_PERCPU,
402 .name = "primary IPI", 412 .name = "primary IPI",
403}; 413};
404 414
@@ -437,14 +447,15 @@ void __init smp_psurge_give_timebase(void)
437 447
438/* PowerSurge-style Macs */ 448/* PowerSurge-style Macs */
439struct smp_ops_t psurge_smp_ops = { 449struct smp_ops_t psurge_smp_ops = {
440 .message_pass = smp_psurge_message_pass, 450 .message_pass = smp_muxed_ipi_message_pass,
451 .cause_ipi = smp_psurge_cause_ipi,
441 .probe = smp_psurge_probe, 452 .probe = smp_psurge_probe,
442 .kick_cpu = smp_psurge_kick_cpu, 453 .kick_cpu = smp_psurge_kick_cpu,
443 .setup_cpu = smp_psurge_setup_cpu, 454 .setup_cpu = smp_psurge_setup_cpu,
444 .give_timebase = smp_psurge_give_timebase, 455 .give_timebase = smp_psurge_give_timebase,
445 .take_timebase = smp_psurge_take_timebase, 456 .take_timebase = smp_psurge_take_timebase,
446}; 457};
447#endif /* CONFIG_PPC32 - actually powersurge support */ 458#endif /* CONFIG_PPC_PMAC32_PSURGE */
448 459
449/* 460/*
450 * Core 99 and later support 461 * Core 99 and later support
@@ -791,14 +802,14 @@ static int __init smp_core99_probe(void)
791 return ncpus; 802 return ncpus;
792} 803}
793 804
794static void __devinit smp_core99_kick_cpu(int nr) 805static int __devinit smp_core99_kick_cpu(int nr)
795{ 806{
796 unsigned int save_vector; 807 unsigned int save_vector;
797 unsigned long target, flags; 808 unsigned long target, flags;
798 unsigned int *vector = (unsigned int *)(PAGE_OFFSET+0x100); 809 unsigned int *vector = (unsigned int *)(PAGE_OFFSET+0x100);
799 810
800 if (nr < 0 || nr > 3) 811 if (nr < 0 || nr > 3)
801 return; 812 return -ENOENT;
802 813
803 if (ppc_md.progress) 814 if (ppc_md.progress)
804 ppc_md.progress("smp_core99_kick_cpu", 0x346); 815 ppc_md.progress("smp_core99_kick_cpu", 0x346);
@@ -830,6 +841,8 @@ static void __devinit smp_core99_kick_cpu(int nr)
830 841
831 local_irq_restore(flags); 842 local_irq_restore(flags);
832 if (ppc_md.progress) ppc_md.progress("smp_core99_kick_cpu done", 0x347); 843 if (ppc_md.progress) ppc_md.progress("smp_core99_kick_cpu done", 0x347);
844
845 return 0;
833} 846}
834 847
835static void __devinit smp_core99_setup_cpu(int cpu_nr) 848static void __devinit smp_core99_setup_cpu(int cpu_nr)
@@ -842,6 +855,7 @@ static void __devinit smp_core99_setup_cpu(int cpu_nr)
842 mpic_setup_this_cpu(); 855 mpic_setup_this_cpu();
843} 856}
844 857
858#ifdef CONFIG_PPC64
845#ifdef CONFIG_HOTPLUG_CPU 859#ifdef CONFIG_HOTPLUG_CPU
846static int smp_core99_cpu_notify(struct notifier_block *self, 860static int smp_core99_cpu_notify(struct notifier_block *self,
847 unsigned long action, void *hcpu) 861 unsigned long action, void *hcpu)
@@ -879,7 +893,6 @@ static struct notifier_block __cpuinitdata smp_core99_cpu_nb = {
879 893
880static void __init smp_core99_bringup_done(void) 894static void __init smp_core99_bringup_done(void)
881{ 895{
882#ifdef CONFIG_PPC64
883 extern void g5_phy_disable_cpu1(void); 896 extern void g5_phy_disable_cpu1(void);
884 897
885 /* Close i2c bus if it was used for tb sync */ 898 /* Close i2c bus if it was used for tb sync */
@@ -894,14 +907,14 @@ static void __init smp_core99_bringup_done(void)
894 set_cpu_present(1, false); 907 set_cpu_present(1, false);
895 g5_phy_disable_cpu1(); 908 g5_phy_disable_cpu1();
896 } 909 }
897#endif /* CONFIG_PPC64 */
898
899#ifdef CONFIG_HOTPLUG_CPU 910#ifdef CONFIG_HOTPLUG_CPU
900 register_cpu_notifier(&smp_core99_cpu_nb); 911 register_cpu_notifier(&smp_core99_cpu_nb);
901#endif 912#endif
913
902 if (ppc_md.progress) 914 if (ppc_md.progress)
903 ppc_md.progress("smp_core99_bringup_done", 0x349); 915 ppc_md.progress("smp_core99_bringup_done", 0x349);
904} 916}
917#endif /* CONFIG_PPC64 */
905 918
906#ifdef CONFIG_HOTPLUG_CPU 919#ifdef CONFIG_HOTPLUG_CPU
907 920
@@ -975,7 +988,9 @@ static void pmac_cpu_die(void)
975struct smp_ops_t core99_smp_ops = { 988struct smp_ops_t core99_smp_ops = {
976 .message_pass = smp_mpic_message_pass, 989 .message_pass = smp_mpic_message_pass,
977 .probe = smp_core99_probe, 990 .probe = smp_core99_probe,
991#ifdef CONFIG_PPC64
978 .bringup_done = smp_core99_bringup_done, 992 .bringup_done = smp_core99_bringup_done,
993#endif
979 .kick_cpu = smp_core99_kick_cpu, 994 .kick_cpu = smp_core99_kick_cpu,
980 .setup_cpu = smp_core99_setup_cpu, 995 .setup_cpu = smp_core99_setup_cpu,
981 .give_timebase = smp_core99_give_timebase, 996 .give_timebase = smp_core99_give_timebase,
@@ -1000,7 +1015,7 @@ void __init pmac_setup_smp(void)
1000 of_node_put(np); 1015 of_node_put(np);
1001 smp_ops = &core99_smp_ops; 1016 smp_ops = &core99_smp_ops;
1002 } 1017 }
1003#ifdef CONFIG_PPC32 1018#ifdef CONFIG_PPC_PMAC32_PSURGE
1004 else { 1019 else {
1005 /* We have to set bits in cpu_possible_mask here since the 1020 /* We have to set bits in cpu_possible_mask here since the
1006 * secondary CPU(s) aren't in the device tree. Various 1021 * secondary CPU(s) aren't in the device tree. Various
@@ -1013,7 +1028,7 @@ void __init pmac_setup_smp(void)
1013 set_cpu_possible(cpu, true); 1028 set_cpu_possible(cpu, true);
1014 smp_ops = &psurge_smp_ops; 1029 smp_ops = &psurge_smp_ops;
1015 } 1030 }
1016#endif /* CONFIG_PPC32 */ 1031#endif /* CONFIG_PPC_PMAC32_PSURGE */
1017 1032
1018#ifdef CONFIG_HOTPLUG_CPU 1033#ifdef CONFIG_HOTPLUG_CPU
1019 ppc_md.cpu_die = pmac_cpu_die; 1034 ppc_md.cpu_die = pmac_cpu_die;
diff --git a/arch/powerpc/platforms/ps3/interrupt.c b/arch/powerpc/platforms/ps3/interrupt.c
index f2f6413b81d3..600ed2c0ed59 100644
--- a/arch/powerpc/platforms/ps3/interrupt.c
+++ b/arch/powerpc/platforms/ps3/interrupt.c
@@ -197,7 +197,7 @@ static int ps3_virq_setup(enum ps3_cpu_binding cpu, unsigned long outlet,
197 result = irq_set_chip_data(*virq, pd); 197 result = irq_set_chip_data(*virq, pd);
198 198
199 if (result) { 199 if (result) {
200 pr_debug("%s:%d: set_irq_chip_data failed\n", 200 pr_debug("%s:%d: irq_set_chip_data failed\n",
201 __func__, __LINE__); 201 __func__, __LINE__);
202 goto fail_set; 202 goto fail_set;
203 } 203 }
@@ -659,11 +659,6 @@ static void __maybe_unused _dump_mask(struct ps3_private *pd,
659static void dump_bmp(struct ps3_private* pd) {}; 659static void dump_bmp(struct ps3_private* pd) {};
660#endif /* defined(DEBUG) */ 660#endif /* defined(DEBUG) */
661 661
662static void ps3_host_unmap(struct irq_host *h, unsigned int virq)
663{
664 irq_set_chip_data(virq, NULL);
665}
666
667static int ps3_host_map(struct irq_host *h, unsigned int virq, 662static int ps3_host_map(struct irq_host *h, unsigned int virq,
668 irq_hw_number_t hwirq) 663 irq_hw_number_t hwirq)
669{ 664{
@@ -683,7 +678,6 @@ static int ps3_host_match(struct irq_host *h, struct device_node *np)
683 678
684static struct irq_host_ops ps3_host_ops = { 679static struct irq_host_ops ps3_host_ops = {
685 .map = ps3_host_map, 680 .map = ps3_host_map,
686 .unmap = ps3_host_unmap,
687 .match = ps3_host_match, 681 .match = ps3_host_match,
688}; 682};
689 683
diff --git a/arch/powerpc/platforms/ps3/smp.c b/arch/powerpc/platforms/ps3/smp.c
index 51ffde40af2b..4c44794faac0 100644
--- a/arch/powerpc/platforms/ps3/smp.c
+++ b/arch/powerpc/platforms/ps3/smp.c
@@ -39,7 +39,7 @@
39#define MSG_COUNT 4 39#define MSG_COUNT 4
40static DEFINE_PER_CPU(unsigned int [MSG_COUNT], ps3_ipi_virqs); 40static DEFINE_PER_CPU(unsigned int [MSG_COUNT], ps3_ipi_virqs);
41 41
42static void do_message_pass(int target, int msg) 42static void ps3_smp_message_pass(int cpu, int msg)
43{ 43{
44 int result; 44 int result;
45 unsigned int virq; 45 unsigned int virq;
@@ -49,28 +49,12 @@ static void do_message_pass(int target, int msg)
49 return; 49 return;
50 } 50 }
51 51
52 virq = per_cpu(ps3_ipi_virqs, target)[msg]; 52 virq = per_cpu(ps3_ipi_virqs, cpu)[msg];
53 result = ps3_send_event_locally(virq); 53 result = ps3_send_event_locally(virq);
54 54
55 if (result) 55 if (result)
56 DBG("%s:%d: ps3_send_event_locally(%d, %d) failed" 56 DBG("%s:%d: ps3_send_event_locally(%d, %d) failed"
57 " (%d)\n", __func__, __LINE__, target, msg, result); 57 " (%d)\n", __func__, __LINE__, cpu, msg, result);
58}
59
60static void ps3_smp_message_pass(int target, int msg)
61{
62 int cpu;
63
64 if (target < NR_CPUS)
65 do_message_pass(target, msg);
66 else if (target == MSG_ALL_BUT_SELF) {
67 for_each_online_cpu(cpu)
68 if (cpu != smp_processor_id())
69 do_message_pass(cpu, msg);
70 } else {
71 for_each_online_cpu(cpu)
72 do_message_pass(cpu, msg);
73 }
74} 58}
75 59
76static int ps3_smp_probe(void) 60static int ps3_smp_probe(void)
diff --git a/arch/powerpc/platforms/ps3/spu.c b/arch/powerpc/platforms/ps3/spu.c
index 39a472e9e80f..375a9f92158d 100644
--- a/arch/powerpc/platforms/ps3/spu.c
+++ b/arch/powerpc/platforms/ps3/spu.c
@@ -197,7 +197,7 @@ static void spu_unmap(struct spu *spu)
197 * The current HV requires the spu shadow regs to be mapped with the 197 * The current HV requires the spu shadow regs to be mapped with the
198 * PTE page protection bits set as read-only (PP=3). This implementation 198 * PTE page protection bits set as read-only (PP=3). This implementation
199 * uses the low level __ioremap() to bypass the page protection settings 199 * uses the low level __ioremap() to bypass the page protection settings
200 * inforced by ioremap_flags() to get the needed PTE bits set for the 200 * inforced by ioremap_prot() to get the needed PTE bits set for the
201 * shadow regs. 201 * shadow regs.
202 */ 202 */
203 203
@@ -214,7 +214,7 @@ static int __init setup_areas(struct spu *spu)
214 goto fail_ioremap; 214 goto fail_ioremap;
215 } 215 }
216 216
217 spu->local_store = (__force void *)ioremap_flags(spu->local_store_phys, 217 spu->local_store = (__force void *)ioremap_prot(spu->local_store_phys,
218 LS_SIZE, _PAGE_NO_CACHE); 218 LS_SIZE, _PAGE_NO_CACHE);
219 219
220 if (!spu->local_store) { 220 if (!spu->local_store) {
diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig
index 5b3da4b4ea79..71af4c5d6c05 100644
--- a/arch/powerpc/platforms/pseries/Kconfig
+++ b/arch/powerpc/platforms/pseries/Kconfig
@@ -3,7 +3,10 @@ config PPC_PSERIES
3 bool "IBM pSeries & new (POWER5-based) iSeries" 3 bool "IBM pSeries & new (POWER5-based) iSeries"
4 select MPIC 4 select MPIC
5 select PCI_MSI 5 select PCI_MSI
6 select XICS 6 select PPC_XICS
7 select PPC_ICP_NATIVE
8 select PPC_ICP_HV
9 select PPC_ICS_RTAS
7 select PPC_I8259 10 select PPC_I8259
8 select PPC_RTAS 11 select PPC_RTAS
9 select PPC_RTAS_DAEMON 12 select PPC_RTAS_DAEMON
@@ -47,6 +50,24 @@ config SCANLOG
47 tristate "Scanlog dump interface" 50 tristate "Scanlog dump interface"
48 depends on RTAS_PROC && PPC_PSERIES 51 depends on RTAS_PROC && PPC_PSERIES
49 52
53config IO_EVENT_IRQ
54 bool "IO Event Interrupt support"
55 depends on PPC_PSERIES
56 default y
57 help
58 Select this option, if you want to enable support for IO Event
59 interrupts. IO event interrupt is a mechanism provided by RTAS
60 to return information about hardware error and non-error events
61 which may need OS attention. RTAS returns events for multiple
62 event types and scopes. Device drivers can register their handlers
63 to receive events.
64
65 This option will only enable the IO event platform code. You
66 will still need to enable or compile the actual drivers
67 that use this infrastruture to handle IO event interrupts.
68
69 Say Y if you are unsure.
70
50config LPARCFG 71config LPARCFG
51 bool "LPAR Configuration Data" 72 bool "LPAR Configuration Data"
52 depends on PPC_PSERIES || PPC_ISERIES 73 depends on PPC_PSERIES || PPC_ISERIES
diff --git a/arch/powerpc/platforms/pseries/Makefile b/arch/powerpc/platforms/pseries/Makefile
index fc5237810ece..3556e402cbf5 100644
--- a/arch/powerpc/platforms/pseries/Makefile
+++ b/arch/powerpc/platforms/pseries/Makefile
@@ -5,7 +5,6 @@ obj-y := lpar.o hvCall.o nvram.o reconfig.o \
5 setup.o iommu.o event_sources.o ras.o \ 5 setup.o iommu.o event_sources.o ras.o \
6 firmware.o power.o dlpar.o mobility.o 6 firmware.o power.o dlpar.o mobility.o
7obj-$(CONFIG_SMP) += smp.o 7obj-$(CONFIG_SMP) += smp.o
8obj-$(CONFIG_XICS) += xics.o
9obj-$(CONFIG_SCANLOG) += scanlog.o 8obj-$(CONFIG_SCANLOG) += scanlog.o
10obj-$(CONFIG_EEH) += eeh.o eeh_cache.o eeh_driver.o eeh_event.o eeh_sysfs.o 9obj-$(CONFIG_EEH) += eeh.o eeh_cache.o eeh_driver.o eeh_event.o eeh_sysfs.o
11obj-$(CONFIG_KEXEC) += kexec.o 10obj-$(CONFIG_KEXEC) += kexec.o
@@ -22,6 +21,7 @@ obj-$(CONFIG_HCALL_STATS) += hvCall_inst.o
22obj-$(CONFIG_PHYP_DUMP) += phyp_dump.o 21obj-$(CONFIG_PHYP_DUMP) += phyp_dump.o
23obj-$(CONFIG_CMM) += cmm.o 22obj-$(CONFIG_CMM) += cmm.o
24obj-$(CONFIG_DTL) += dtl.o 23obj-$(CONFIG_DTL) += dtl.o
24obj-$(CONFIG_IO_EVENT_IRQ) += io_event_irq.o
25 25
26ifeq ($(CONFIG_PPC_PSERIES),y) 26ifeq ($(CONFIG_PPC_PSERIES),y)
27obj-$(CONFIG_SUSPEND) += suspend.o 27obj-$(CONFIG_SUSPEND) += suspend.o
diff --git a/arch/powerpc/platforms/pseries/dtl.c b/arch/powerpc/platforms/pseries/dtl.c
index c371bc06434b..e9190073bb97 100644
--- a/arch/powerpc/platforms/pseries/dtl.c
+++ b/arch/powerpc/platforms/pseries/dtl.c
@@ -52,10 +52,10 @@ static u8 dtl_event_mask = 0x7;
52 52
53 53
54/* 54/*
55 * Size of per-cpu log buffers. Default is just under 16 pages worth. 55 * Size of per-cpu log buffers. Firmware requires that the buffer does
56 * not cross a 4k boundary.
56 */ 57 */
57static int dtl_buf_entries = (16 * 85); 58static int dtl_buf_entries = N_DISPATCH_LOG;
58
59 59
60#ifdef CONFIG_VIRT_CPU_ACCOUNTING 60#ifdef CONFIG_VIRT_CPU_ACCOUNTING
61struct dtl_ring { 61struct dtl_ring {
@@ -151,7 +151,7 @@ static int dtl_start(struct dtl *dtl)
151 151
152 /* Register our dtl buffer with the hypervisor. The HV expects the 152 /* Register our dtl buffer with the hypervisor. The HV expects the
153 * buffer size to be passed in the second word of the buffer */ 153 * buffer size to be passed in the second word of the buffer */
154 ((u32 *)dtl->buf)[1] = dtl->buf_entries * sizeof(struct dtl_entry); 154 ((u32 *)dtl->buf)[1] = DISPATCH_LOG_BYTES;
155 155
156 hwcpu = get_hard_smp_processor_id(dtl->cpu); 156 hwcpu = get_hard_smp_processor_id(dtl->cpu);
157 addr = __pa(dtl->buf); 157 addr = __pa(dtl->buf);
@@ -196,13 +196,15 @@ static int dtl_enable(struct dtl *dtl)
196 long int rc; 196 long int rc;
197 struct dtl_entry *buf = NULL; 197 struct dtl_entry *buf = NULL;
198 198
199 if (!dtl_cache)
200 return -ENOMEM;
201
199 /* only allow one reader */ 202 /* only allow one reader */
200 if (dtl->buf) 203 if (dtl->buf)
201 return -EBUSY; 204 return -EBUSY;
202 205
203 n_entries = dtl_buf_entries; 206 n_entries = dtl_buf_entries;
204 buf = kmalloc_node(n_entries * sizeof(struct dtl_entry), 207 buf = kmem_cache_alloc_node(dtl_cache, GFP_KERNEL, cpu_to_node(dtl->cpu));
205 GFP_KERNEL, cpu_to_node(dtl->cpu));
206 if (!buf) { 208 if (!buf) {
207 printk(KERN_WARNING "%s: buffer alloc failed for cpu %d\n", 209 printk(KERN_WARNING "%s: buffer alloc failed for cpu %d\n",
208 __func__, dtl->cpu); 210 __func__, dtl->cpu);
@@ -223,7 +225,7 @@ static int dtl_enable(struct dtl *dtl)
223 spin_unlock(&dtl->lock); 225 spin_unlock(&dtl->lock);
224 226
225 if (rc) 227 if (rc)
226 kfree(buf); 228 kmem_cache_free(dtl_cache, buf);
227 return rc; 229 return rc;
228} 230}
229 231
@@ -231,7 +233,7 @@ static void dtl_disable(struct dtl *dtl)
231{ 233{
232 spin_lock(&dtl->lock); 234 spin_lock(&dtl->lock);
233 dtl_stop(dtl); 235 dtl_stop(dtl);
234 kfree(dtl->buf); 236 kmem_cache_free(dtl_cache, dtl->buf);
235 dtl->buf = NULL; 237 dtl->buf = NULL;
236 dtl->buf_entries = 0; 238 dtl->buf_entries = 0;
237 spin_unlock(&dtl->lock); 239 spin_unlock(&dtl->lock);
@@ -365,7 +367,7 @@ static int dtl_init(void)
365 367
366 event_mask_file = debugfs_create_x8("dtl_event_mask", 0600, 368 event_mask_file = debugfs_create_x8("dtl_event_mask", 0600,
367 dtl_dir, &dtl_event_mask); 369 dtl_dir, &dtl_event_mask);
368 buf_entries_file = debugfs_create_u32("dtl_buf_entries", 0600, 370 buf_entries_file = debugfs_create_u32("dtl_buf_entries", 0400,
369 dtl_dir, &dtl_buf_entries); 371 dtl_dir, &dtl_buf_entries);
370 372
371 if (!event_mask_file || !buf_entries_file) { 373 if (!event_mask_file || !buf_entries_file) {
diff --git a/arch/powerpc/platforms/pseries/eeh.c b/arch/powerpc/platforms/pseries/eeh.c
index 89649173d3a3..46b55cf563e3 100644
--- a/arch/powerpc/platforms/pseries/eeh.c
+++ b/arch/powerpc/platforms/pseries/eeh.c
@@ -93,6 +93,7 @@ static int ibm_slot_error_detail;
93static int ibm_get_config_addr_info; 93static int ibm_get_config_addr_info;
94static int ibm_get_config_addr_info2; 94static int ibm_get_config_addr_info2;
95static int ibm_configure_bridge; 95static int ibm_configure_bridge;
96static int ibm_configure_pe;
96 97
97int eeh_subsystem_enabled; 98int eeh_subsystem_enabled;
98EXPORT_SYMBOL(eeh_subsystem_enabled); 99EXPORT_SYMBOL(eeh_subsystem_enabled);
@@ -261,6 +262,8 @@ void eeh_slot_error_detail(struct pci_dn *pdn, int severity)
261 pci_regs_buf[0] = 0; 262 pci_regs_buf[0] = 0;
262 263
263 rtas_pci_enable(pdn, EEH_THAW_MMIO); 264 rtas_pci_enable(pdn, EEH_THAW_MMIO);
265 rtas_configure_bridge(pdn);
266 eeh_restore_bars(pdn);
264 loglen = gather_pci_data(pdn, pci_regs_buf, EEH_PCI_REGS_LOG_LEN); 267 loglen = gather_pci_data(pdn, pci_regs_buf, EEH_PCI_REGS_LOG_LEN);
265 268
266 rtas_slot_error_detail(pdn, severity, pci_regs_buf, loglen); 269 rtas_slot_error_detail(pdn, severity, pci_regs_buf, loglen);
@@ -448,6 +451,39 @@ void eeh_clear_slot (struct device_node *dn, int mode_flag)
448 raw_spin_unlock_irqrestore(&confirm_error_lock, flags); 451 raw_spin_unlock_irqrestore(&confirm_error_lock, flags);
449} 452}
450 453
454void __eeh_set_pe_freset(struct device_node *parent, unsigned int *freset)
455{
456 struct device_node *dn;
457
458 for_each_child_of_node(parent, dn) {
459 if (PCI_DN(dn)) {
460
461 struct pci_dev *dev = PCI_DN(dn)->pcidev;
462
463 if (dev && dev->driver)
464 *freset |= dev->needs_freset;
465
466 __eeh_set_pe_freset(dn, freset);
467 }
468 }
469}
470
471void eeh_set_pe_freset(struct device_node *dn, unsigned int *freset)
472{
473 struct pci_dev *dev;
474 dn = find_device_pe(dn);
475
476 /* Back up one, since config addrs might be shared */
477 if (!pcibios_find_pci_bus(dn) && PCI_DN(dn->parent))
478 dn = dn->parent;
479
480 dev = PCI_DN(dn)->pcidev;
481 if (dev)
482 *freset |= dev->needs_freset;
483
484 __eeh_set_pe_freset(dn, freset);
485}
486
451/** 487/**
452 * eeh_dn_check_failure - check if all 1's data is due to EEH slot freeze 488 * eeh_dn_check_failure - check if all 1's data is due to EEH slot freeze
453 * @dn device node 489 * @dn device node
@@ -692,15 +728,24 @@ rtas_pci_slot_reset(struct pci_dn *pdn, int state)
692 if (pdn->eeh_pe_config_addr) 728 if (pdn->eeh_pe_config_addr)
693 config_addr = pdn->eeh_pe_config_addr; 729 config_addr = pdn->eeh_pe_config_addr;
694 730
695 rc = rtas_call(ibm_set_slot_reset,4,1, NULL, 731 rc = rtas_call(ibm_set_slot_reset, 4, 1, NULL,
696 config_addr, 732 config_addr,
697 BUID_HI(pdn->phb->buid), 733 BUID_HI(pdn->phb->buid),
698 BUID_LO(pdn->phb->buid), 734 BUID_LO(pdn->phb->buid),
699 state); 735 state);
700 if (rc) 736
701 printk (KERN_WARNING "EEH: Unable to reset the failed slot," 737 /* Fundamental-reset not supported on this PE, try hot-reset */
702 " (%d) #RST=%d dn=%s\n", 738 if (rc == -8 && state == 3) {
703 rc, state, pdn->node->full_name); 739 rc = rtas_call(ibm_set_slot_reset, 4, 1, NULL,
740 config_addr,
741 BUID_HI(pdn->phb->buid),
742 BUID_LO(pdn->phb->buid), 1);
743 if (rc)
744 printk(KERN_WARNING
745 "EEH: Unable to reset the failed slot,"
746 " #RST=%d dn=%s\n",
747 rc, pdn->node->full_name);
748 }
704} 749}
705 750
706/** 751/**
@@ -736,18 +781,21 @@ int pcibios_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state stat
736/** 781/**
737 * rtas_set_slot_reset -- assert the pci #RST line for 1/4 second 782 * rtas_set_slot_reset -- assert the pci #RST line for 1/4 second
738 * @pdn: pci device node to be reset. 783 * @pdn: pci device node to be reset.
739 *
740 * Return 0 if success, else a non-zero value.
741 */ 784 */
742 785
743static void __rtas_set_slot_reset(struct pci_dn *pdn) 786static void __rtas_set_slot_reset(struct pci_dn *pdn)
744{ 787{
745 struct pci_dev *dev = pdn->pcidev; 788 unsigned int freset = 0;
746 789
747 /* Determine type of EEH reset required by device, 790 /* Determine type of EEH reset required for
748 * default hot reset or fundamental reset 791 * Partitionable Endpoint, a hot-reset (1)
749 */ 792 * or a fundamental reset (3).
750 if (dev && dev->needs_freset) 793 * A fundamental reset required by any device under
794 * Partitionable Endpoint trumps hot-reset.
795 */
796 eeh_set_pe_freset(pdn->node, &freset);
797
798 if (freset)
751 rtas_pci_slot_reset(pdn, 3); 799 rtas_pci_slot_reset(pdn, 3);
752 else 800 else
753 rtas_pci_slot_reset(pdn, 1); 801 rtas_pci_slot_reset(pdn, 1);
@@ -895,13 +943,20 @@ rtas_configure_bridge(struct pci_dn *pdn)
895{ 943{
896 int config_addr; 944 int config_addr;
897 int rc; 945 int rc;
946 int token;
898 947
899 /* Use PE configuration address, if present */ 948 /* Use PE configuration address, if present */
900 config_addr = pdn->eeh_config_addr; 949 config_addr = pdn->eeh_config_addr;
901 if (pdn->eeh_pe_config_addr) 950 if (pdn->eeh_pe_config_addr)
902 config_addr = pdn->eeh_pe_config_addr; 951 config_addr = pdn->eeh_pe_config_addr;
903 952
904 rc = rtas_call(ibm_configure_bridge,3,1, NULL, 953 /* Use new configure-pe function, if supported */
954 if (ibm_configure_pe != RTAS_UNKNOWN_SERVICE)
955 token = ibm_configure_pe;
956 else
957 token = ibm_configure_bridge;
958
959 rc = rtas_call(token, 3, 1, NULL,
905 config_addr, 960 config_addr,
906 BUID_HI(pdn->phb->buid), 961 BUID_HI(pdn->phb->buid),
907 BUID_LO(pdn->phb->buid)); 962 BUID_LO(pdn->phb->buid));
@@ -1077,6 +1132,7 @@ void __init eeh_init(void)
1077 ibm_get_config_addr_info = rtas_token("ibm,get-config-addr-info"); 1132 ibm_get_config_addr_info = rtas_token("ibm,get-config-addr-info");
1078 ibm_get_config_addr_info2 = rtas_token("ibm,get-config-addr-info2"); 1133 ibm_get_config_addr_info2 = rtas_token("ibm,get-config-addr-info2");
1079 ibm_configure_bridge = rtas_token ("ibm,configure-bridge"); 1134 ibm_configure_bridge = rtas_token ("ibm,configure-bridge");
1135 ibm_configure_pe = rtas_token("ibm,configure-pe");
1080 1136
1081 if (ibm_set_eeh_option == RTAS_UNKNOWN_SERVICE) 1137 if (ibm_set_eeh_option == RTAS_UNKNOWN_SERVICE)
1082 return; 1138 return;
diff --git a/arch/powerpc/platforms/pseries/eeh_driver.c b/arch/powerpc/platforms/pseries/eeh_driver.c
index b8d70f5d9aa9..1b6cb10589e0 100644
--- a/arch/powerpc/platforms/pseries/eeh_driver.c
+++ b/arch/powerpc/platforms/pseries/eeh_driver.c
@@ -328,7 +328,7 @@ struct pci_dn * handle_eeh_events (struct eeh_event *event)
328 struct pci_bus *frozen_bus; 328 struct pci_bus *frozen_bus;
329 int rc = 0; 329 int rc = 0;
330 enum pci_ers_result result = PCI_ERS_RESULT_NONE; 330 enum pci_ers_result result = PCI_ERS_RESULT_NONE;
331 const char *location, *pci_str, *drv_str; 331 const char *location, *pci_str, *drv_str, *bus_pci_str, *bus_drv_str;
332 332
333 frozen_dn = find_device_pe(event->dn); 333 frozen_dn = find_device_pe(event->dn);
334 if (!frozen_dn) { 334 if (!frozen_dn) {
@@ -364,13 +364,8 @@ struct pci_dn * handle_eeh_events (struct eeh_event *event)
364 frozen_pdn = PCI_DN(frozen_dn); 364 frozen_pdn = PCI_DN(frozen_dn);
365 frozen_pdn->eeh_freeze_count++; 365 frozen_pdn->eeh_freeze_count++;
366 366
367 if (frozen_pdn->pcidev) { 367 pci_str = eeh_pci_name(event->dev);
368 pci_str = pci_name (frozen_pdn->pcidev); 368 drv_str = pcid_name(event->dev);
369 drv_str = pcid_name (frozen_pdn->pcidev);
370 } else {
371 pci_str = eeh_pci_name(event->dev);
372 drv_str = pcid_name (event->dev);
373 }
374 369
375 if (frozen_pdn->eeh_freeze_count > EEH_MAX_ALLOWED_FREEZES) 370 if (frozen_pdn->eeh_freeze_count > EEH_MAX_ALLOWED_FREEZES)
376 goto excess_failures; 371 goto excess_failures;
@@ -378,8 +373,17 @@ struct pci_dn * handle_eeh_events (struct eeh_event *event)
378 printk(KERN_WARNING 373 printk(KERN_WARNING
379 "EEH: This PCI device has failed %d times in the last hour:\n", 374 "EEH: This PCI device has failed %d times in the last hour:\n",
380 frozen_pdn->eeh_freeze_count); 375 frozen_pdn->eeh_freeze_count);
376
377 if (frozen_pdn->pcidev) {
378 bus_pci_str = pci_name(frozen_pdn->pcidev);
379 bus_drv_str = pcid_name(frozen_pdn->pcidev);
380 printk(KERN_WARNING
381 "EEH: Bus location=%s driver=%s pci addr=%s\n",
382 location, bus_drv_str, bus_pci_str);
383 }
384
381 printk(KERN_WARNING 385 printk(KERN_WARNING
382 "EEH: location=%s driver=%s pci addr=%s\n", 386 "EEH: Device location=%s driver=%s pci addr=%s\n",
383 location, drv_str, pci_str); 387 location, drv_str, pci_str);
384 388
385 /* Walk the various device drivers attached to this slot through 389 /* Walk the various device drivers attached to this slot through
diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c
index ef8c45489e20..46f13a3c5d09 100644
--- a/arch/powerpc/platforms/pseries/hotplug-cpu.c
+++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c
@@ -19,6 +19,7 @@
19 */ 19 */
20 20
21#include <linux/kernel.h> 21#include <linux/kernel.h>
22#include <linux/interrupt.h>
22#include <linux/delay.h> 23#include <linux/delay.h>
23#include <linux/cpu.h> 24#include <linux/cpu.h>
24#include <asm/system.h> 25#include <asm/system.h>
@@ -28,7 +29,7 @@
28#include <asm/machdep.h> 29#include <asm/machdep.h>
29#include <asm/vdso_datapage.h> 30#include <asm/vdso_datapage.h>
30#include <asm/pSeries_reconfig.h> 31#include <asm/pSeries_reconfig.h>
31#include "xics.h" 32#include <asm/xics.h>
32#include "plpar_wrappers.h" 33#include "plpar_wrappers.h"
33#include "offline_states.h" 34#include "offline_states.h"
34 35
@@ -280,7 +281,7 @@ static int pseries_add_processor(struct device_node *np)
280 } 281 }
281 282
282 for_each_cpu(cpu, tmp) { 283 for_each_cpu(cpu, tmp) {
283 BUG_ON(cpumask_test_cpu(cpu, cpu_present_mask)); 284 BUG_ON(cpu_present(cpu));
284 set_cpu_present(cpu, true); 285 set_cpu_present(cpu, true);
285 set_hard_smp_processor_id(cpu, *intserv++); 286 set_hard_smp_processor_id(cpu, *intserv++);
286 } 287 }
diff --git a/arch/powerpc/platforms/pseries/io_event_irq.c b/arch/powerpc/platforms/pseries/io_event_irq.c
new file mode 100644
index 000000000000..c829e6067d54
--- /dev/null
+++ b/arch/powerpc/platforms/pseries/io_event_irq.c
@@ -0,0 +1,231 @@
1/*
2 * Copyright 2010 2011 Mark Nelson and Tseng-Hui (Frank) Lin, IBM Corporation
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9
10#include <linux/errno.h>
11#include <linux/slab.h>
12#include <linux/module.h>
13#include <linux/irq.h>
14#include <linux/interrupt.h>
15#include <linux/of.h>
16#include <linux/list.h>
17#include <linux/notifier.h>
18
19#include <asm/machdep.h>
20#include <asm/rtas.h>
21#include <asm/irq.h>
22#include <asm/io_event_irq.h>
23
24#include "pseries.h"
25
26/*
27 * IO event interrupt is a mechanism provided by RTAS to return
28 * information about hardware error and non-error events. Device
29 * drivers can register their event handlers to receive events.
30 * Device drivers are expected to use atomic_notifier_chain_register()
31 * and atomic_notifier_chain_unregister() to register and unregister
32 * their event handlers. Since multiple IO event types and scopes
33 * share an IO event interrupt, the event handlers are called one
34 * by one until the IO event is claimed by one of the handlers.
35 * The event handlers are expected to return NOTIFY_OK if the
36 * event is handled by the event handler or NOTIFY_DONE if the
37 * event does not belong to the handler.
38 *
39 * Usage:
40 *
41 * Notifier function:
42 * #include <asm/io_event_irq.h>
43 * int event_handler(struct notifier_block *nb, unsigned long val, void *data) {
44 * p = (struct pseries_io_event_sect_data *) data;
45 * if (! is_my_event(p->scope, p->event_type)) return NOTIFY_DONE;
46 * :
47 * :
48 * return NOTIFY_OK;
49 * }
50 * struct notifier_block event_nb = {
51 * .notifier_call = event_handler,
52 * }
53 *
54 * Registration:
55 * atomic_notifier_chain_register(&pseries_ioei_notifier_list, &event_nb);
56 *
57 * Unregistration:
58 * atomic_notifier_chain_unregister(&pseries_ioei_notifier_list, &event_nb);
59 */
60
61ATOMIC_NOTIFIER_HEAD(pseries_ioei_notifier_list);
62EXPORT_SYMBOL_GPL(pseries_ioei_notifier_list);
63
64static int ioei_check_exception_token;
65
66/* pSeries event log format */
67
68/* Two bytes ASCII section IDs */
69#define PSERIES_ELOG_SECT_ID_PRIV_HDR (('P' << 8) | 'H')
70#define PSERIES_ELOG_SECT_ID_USER_HDR (('U' << 8) | 'H')
71#define PSERIES_ELOG_SECT_ID_PRIMARY_SRC (('P' << 8) | 'S')
72#define PSERIES_ELOG_SECT_ID_EXTENDED_UH (('E' << 8) | 'H')
73#define PSERIES_ELOG_SECT_ID_FAILING_MTMS (('M' << 8) | 'T')
74#define PSERIES_ELOG_SECT_ID_SECONDARY_SRC (('S' << 8) | 'S')
75#define PSERIES_ELOG_SECT_ID_DUMP_LOCATOR (('D' << 8) | 'H')
76#define PSERIES_ELOG_SECT_ID_FW_ERROR (('S' << 8) | 'W')
77#define PSERIES_ELOG_SECT_ID_IMPACT_PART_ID (('L' << 8) | 'P')
78#define PSERIES_ELOG_SECT_ID_LOGIC_RESOURCE_ID (('L' << 8) | 'R')
79#define PSERIES_ELOG_SECT_ID_HMC_ID (('H' << 8) | 'M')
80#define PSERIES_ELOG_SECT_ID_EPOW (('E' << 8) | 'P')
81#define PSERIES_ELOG_SECT_ID_IO_EVENT (('I' << 8) | 'E')
82#define PSERIES_ELOG_SECT_ID_MANUFACT_INFO (('M' << 8) | 'I')
83#define PSERIES_ELOG_SECT_ID_CALL_HOME (('C' << 8) | 'H')
84#define PSERIES_ELOG_SECT_ID_USER_DEF (('U' << 8) | 'D')
85
86/* Vendor specific Platform Event Log Format, Version 6, section header */
87struct pseries_elog_section {
88 uint16_t id; /* 0x00 2-byte ASCII section ID */
89 uint16_t length; /* 0x02 Section length in bytes */
90 uint8_t version; /* 0x04 Section version */
91 uint8_t subtype; /* 0x05 Section subtype */
92 uint16_t creator_component; /* 0x06 Creator component ID */
93 uint8_t data[]; /* 0x08 Start of section data */
94};
95
96static char ioei_rtas_buf[RTAS_DATA_BUF_SIZE] __cacheline_aligned;
97
98/**
99 * Find data portion of a specific section in RTAS extended event log.
100 * @elog: RTAS error/event log.
101 * @sect_id: secsion ID.
102 *
103 * Return:
104 * pointer to the section data of the specified section
105 * NULL if not found
106 */
107static struct pseries_elog_section *find_xelog_section(struct rtas_error_log *elog,
108 uint16_t sect_id)
109{
110 struct rtas_ext_event_log_v6 *xelog =
111 (struct rtas_ext_event_log_v6 *) elog->buffer;
112 struct pseries_elog_section *sect;
113 unsigned char *p, *log_end;
114
115 /* Check that we understand the format */
116 if (elog->extended_log_length < sizeof(struct rtas_ext_event_log_v6) ||
117 xelog->log_format != RTAS_V6EXT_LOG_FORMAT_EVENT_LOG ||
118 xelog->company_id != RTAS_V6EXT_COMPANY_ID_IBM)
119 return NULL;
120
121 log_end = elog->buffer + elog->extended_log_length;
122 p = xelog->vendor_log;
123 while (p < log_end) {
124 sect = (struct pseries_elog_section *)p;
125 if (sect->id == sect_id)
126 return sect;
127 p += sect->length;
128 }
129 return NULL;
130}
131
132/**
133 * Find the data portion of an IO Event section from event log.
134 * @elog: RTAS error/event log.
135 *
136 * Return:
137 * pointer to a valid IO event section data. NULL if not found.
138 */
139static struct pseries_io_event * ioei_find_event(struct rtas_error_log *elog)
140{
141 struct pseries_elog_section *sect;
142
143 /* We should only ever get called for io-event interrupts, but if
144 * we do get called for another type then something went wrong so
145 * make some noise about it.
146 * RTAS_TYPE_IO only exists in extended event log version 6 or later.
147 * No need to check event log version.
148 */
149 if (unlikely(elog->type != RTAS_TYPE_IO)) {
150 printk_once(KERN_WARNING "io_event_irq: Unexpected event type %d",
151 elog->type);
152 return NULL;
153 }
154
155 sect = find_xelog_section(elog, PSERIES_ELOG_SECT_ID_IO_EVENT);
156 if (unlikely(!sect)) {
157 printk_once(KERN_WARNING "io_event_irq: RTAS extended event "
158 "log does not contain an IO Event section. "
159 "Could be a bug in system firmware!\n");
160 return NULL;
161 }
162 return (struct pseries_io_event *) &sect->data;
163}
164
165/*
166 * PAPR:
167 * - check-exception returns the first found error or event and clear that
168 * error or event so it is reported once.
169 * - Each interrupt returns one event. If a plateform chooses to report
170 * multiple events through a single interrupt, it must ensure that the
171 * interrupt remains asserted until check-exception has been used to
172 * process all out-standing events for that interrupt.
173 *
174 * Implementation notes:
175 * - Events must be processed in the order they are returned. Hence,
176 * sequential in nature.
177 * - The owner of an event is determined by combinations of scope,
178 * event type, and sub-type. There is no easy way to pre-sort clients
179 * by scope or event type alone. For example, Torrent ISR route change
180 * event is reported with scope 0x00 (Not Applicatable) rather than
181 * 0x3B (Torrent-hub). It is better to let the clients to identify
182 * who owns the the event.
183 */
184
185static irqreturn_t ioei_interrupt(int irq, void *dev_id)
186{
187 struct pseries_io_event *event;
188 int rtas_rc;
189
190 for (;;) {
191 rtas_rc = rtas_call(ioei_check_exception_token, 6, 1, NULL,
192 RTAS_VECTOR_EXTERNAL_INTERRUPT,
193 virq_to_hw(irq),
194 RTAS_IO_EVENTS, 1 /* Time Critical */,
195 __pa(ioei_rtas_buf),
196 RTAS_DATA_BUF_SIZE);
197 if (rtas_rc != 0)
198 break;
199
200 event = ioei_find_event((struct rtas_error_log *)ioei_rtas_buf);
201 if (!event)
202 continue;
203
204 atomic_notifier_call_chain(&pseries_ioei_notifier_list,
205 0, event);
206 }
207 return IRQ_HANDLED;
208}
209
210static int __init ioei_init(void)
211{
212 struct device_node *np;
213
214 ioei_check_exception_token = rtas_token("check-exception");
215 if (ioei_check_exception_token == RTAS_UNKNOWN_SERVICE) {
216 pr_warning("IO Event IRQ not supported on this system !\n");
217 return -ENODEV;
218 }
219 np = of_find_node_by_path("/event-sources/ibm,io-events");
220 if (np) {
221 request_event_sources_irqs(np, ioei_interrupt, "IO_EVENT");
222 of_node_put(np);
223 } else {
224 pr_err("io_event_irq: No ibm,io-events on system! "
225 "IO Event interrupt disabled.\n");
226 return -ENODEV;
227 }
228 return 0;
229}
230machine_subsys_initcall(pseries, ioei_init);
231
diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
index 6d5412a18b26..01faab9456ca 100644
--- a/arch/powerpc/platforms/pseries/iommu.c
+++ b/arch/powerpc/platforms/pseries/iommu.c
@@ -659,15 +659,18 @@ static void remove_ddw(struct device_node *np)
659{ 659{
660 struct dynamic_dma_window_prop *dwp; 660 struct dynamic_dma_window_prop *dwp;
661 struct property *win64; 661 struct property *win64;
662 const u32 *ddr_avail; 662 const u32 *ddw_avail;
663 u64 liobn; 663 u64 liobn;
664 int len, ret; 664 int len, ret;
665 665
666 ddr_avail = of_get_property(np, "ibm,ddw-applicable", &len); 666 ddw_avail = of_get_property(np, "ibm,ddw-applicable", &len);
667 win64 = of_find_property(np, DIRECT64_PROPNAME, NULL); 667 win64 = of_find_property(np, DIRECT64_PROPNAME, NULL);
668 if (!win64 || !ddr_avail || len < 3 * sizeof(u32)) 668 if (!win64)
669 return; 669 return;
670 670
671 if (!ddw_avail || len < 3 * sizeof(u32) || win64->length < sizeof(*dwp))
672 goto delprop;
673
671 dwp = win64->value; 674 dwp = win64->value;
672 liobn = (u64)be32_to_cpu(dwp->liobn); 675 liobn = (u64)be32_to_cpu(dwp->liobn);
673 676
@@ -681,28 +684,29 @@ static void remove_ddw(struct device_node *np)
681 pr_debug("%s successfully cleared tces in window.\n", 684 pr_debug("%s successfully cleared tces in window.\n",
682 np->full_name); 685 np->full_name);
683 686
684 ret = rtas_call(ddr_avail[2], 1, 1, NULL, liobn); 687 ret = rtas_call(ddw_avail[2], 1, 1, NULL, liobn);
685 if (ret) 688 if (ret)
686 pr_warning("%s: failed to remove direct window: rtas returned " 689 pr_warning("%s: failed to remove direct window: rtas returned "
687 "%d to ibm,remove-pe-dma-window(%x) %llx\n", 690 "%d to ibm,remove-pe-dma-window(%x) %llx\n",
688 np->full_name, ret, ddr_avail[2], liobn); 691 np->full_name, ret, ddw_avail[2], liobn);
689 else 692 else
690 pr_debug("%s: successfully removed direct window: rtas returned " 693 pr_debug("%s: successfully removed direct window: rtas returned "
691 "%d to ibm,remove-pe-dma-window(%x) %llx\n", 694 "%d to ibm,remove-pe-dma-window(%x) %llx\n",
692 np->full_name, ret, ddr_avail[2], liobn); 695 np->full_name, ret, ddw_avail[2], liobn);
693}
694 696
697delprop:
698 ret = prom_remove_property(np, win64);
699 if (ret)
700 pr_warning("%s: failed to remove direct window property: %d\n",
701 np->full_name, ret);
702}
695 703
696static int dupe_ddw_if_already_created(struct pci_dev *dev, struct device_node *pdn) 704static u64 find_existing_ddw(struct device_node *pdn)
697{ 705{
698 struct device_node *dn;
699 struct pci_dn *pcidn;
700 struct direct_window *window; 706 struct direct_window *window;
701 const struct dynamic_dma_window_prop *direct64; 707 const struct dynamic_dma_window_prop *direct64;
702 u64 dma_addr = 0; 708 u64 dma_addr = 0;
703 709
704 dn = pci_device_to_OF_node(dev);
705 pcidn = PCI_DN(dn);
706 spin_lock(&direct_window_list_lock); 710 spin_lock(&direct_window_list_lock);
707 /* check if we already created a window and dupe that config if so */ 711 /* check if we already created a window and dupe that config if so */
708 list_for_each_entry(window, &direct_window_list, list) { 712 list_for_each_entry(window, &direct_window_list, list) {
@@ -717,36 +721,40 @@ static int dupe_ddw_if_already_created(struct pci_dev *dev, struct device_node *
717 return dma_addr; 721 return dma_addr;
718} 722}
719 723
720static u64 dupe_ddw_if_kexec(struct pci_dev *dev, struct device_node *pdn) 724static int find_existing_ddw_windows(void)
721{ 725{
722 struct device_node *dn;
723 struct pci_dn *pcidn;
724 int len; 726 int len;
727 struct device_node *pdn;
725 struct direct_window *window; 728 struct direct_window *window;
726 const struct dynamic_dma_window_prop *direct64; 729 const struct dynamic_dma_window_prop *direct64;
727 u64 dma_addr = 0;
728 730
729 dn = pci_device_to_OF_node(dev); 731 if (!firmware_has_feature(FW_FEATURE_LPAR))
730 pcidn = PCI_DN(dn); 732 return 0;
731 direct64 = of_get_property(pdn, DIRECT64_PROPNAME, &len); 733
732 if (direct64) { 734 for_each_node_with_property(pdn, DIRECT64_PROPNAME) {
735 direct64 = of_get_property(pdn, DIRECT64_PROPNAME, &len);
736 if (!direct64)
737 continue;
738
733 window = kzalloc(sizeof(*window), GFP_KERNEL); 739 window = kzalloc(sizeof(*window), GFP_KERNEL);
734 if (!window) { 740 if (!window || len < sizeof(struct dynamic_dma_window_prop)) {
741 kfree(window);
735 remove_ddw(pdn); 742 remove_ddw(pdn);
736 } else { 743 continue;
737 window->device = pdn;
738 window->prop = direct64;
739 spin_lock(&direct_window_list_lock);
740 list_add(&window->list, &direct_window_list);
741 spin_unlock(&direct_window_list_lock);
742 dma_addr = direct64->dma_base;
743 } 744 }
745
746 window->device = pdn;
747 window->prop = direct64;
748 spin_lock(&direct_window_list_lock);
749 list_add(&window->list, &direct_window_list);
750 spin_unlock(&direct_window_list_lock);
744 } 751 }
745 752
746 return dma_addr; 753 return 0;
747} 754}
755machine_arch_initcall(pseries, find_existing_ddw_windows);
748 756
749static int query_ddw(struct pci_dev *dev, const u32 *ddr_avail, 757static int query_ddw(struct pci_dev *dev, const u32 *ddw_avail,
750 struct ddw_query_response *query) 758 struct ddw_query_response *query)
751{ 759{
752 struct device_node *dn; 760 struct device_node *dn;
@@ -767,15 +775,15 @@ static int query_ddw(struct pci_dev *dev, const u32 *ddr_avail,
767 if (pcidn->eeh_pe_config_addr) 775 if (pcidn->eeh_pe_config_addr)
768 cfg_addr = pcidn->eeh_pe_config_addr; 776 cfg_addr = pcidn->eeh_pe_config_addr;
769 buid = pcidn->phb->buid; 777 buid = pcidn->phb->buid;
770 ret = rtas_call(ddr_avail[0], 3, 5, (u32 *)query, 778 ret = rtas_call(ddw_avail[0], 3, 5, (u32 *)query,
771 cfg_addr, BUID_HI(buid), BUID_LO(buid)); 779 cfg_addr, BUID_HI(buid), BUID_LO(buid));
772 dev_info(&dev->dev, "ibm,query-pe-dma-windows(%x) %x %x %x" 780 dev_info(&dev->dev, "ibm,query-pe-dma-windows(%x) %x %x %x"
773 " returned %d\n", ddr_avail[0], cfg_addr, BUID_HI(buid), 781 " returned %d\n", ddw_avail[0], cfg_addr, BUID_HI(buid),
774 BUID_LO(buid), ret); 782 BUID_LO(buid), ret);
775 return ret; 783 return ret;
776} 784}
777 785
778static int create_ddw(struct pci_dev *dev, const u32 *ddr_avail, 786static int create_ddw(struct pci_dev *dev, const u32 *ddw_avail,
779 struct ddw_create_response *create, int page_shift, 787 struct ddw_create_response *create, int page_shift,
780 int window_shift) 788 int window_shift)
781{ 789{
@@ -800,12 +808,12 @@ static int create_ddw(struct pci_dev *dev, const u32 *ddr_avail,
800 808
801 do { 809 do {
802 /* extra outputs are LIOBN and dma-addr (hi, lo) */ 810 /* extra outputs are LIOBN and dma-addr (hi, lo) */
803 ret = rtas_call(ddr_avail[1], 5, 4, (u32 *)create, cfg_addr, 811 ret = rtas_call(ddw_avail[1], 5, 4, (u32 *)create, cfg_addr,
804 BUID_HI(buid), BUID_LO(buid), page_shift, window_shift); 812 BUID_HI(buid), BUID_LO(buid), page_shift, window_shift);
805 } while (rtas_busy_delay(ret)); 813 } while (rtas_busy_delay(ret));
806 dev_info(&dev->dev, 814 dev_info(&dev->dev,
807 "ibm,create-pe-dma-window(%x) %x %x %x %x %x returned %d " 815 "ibm,create-pe-dma-window(%x) %x %x %x %x %x returned %d "
808 "(liobn = 0x%x starting addr = %x %x)\n", ddr_avail[1], 816 "(liobn = 0x%x starting addr = %x %x)\n", ddw_avail[1],
809 cfg_addr, BUID_HI(buid), BUID_LO(buid), page_shift, 817 cfg_addr, BUID_HI(buid), BUID_LO(buid), page_shift,
810 window_shift, ret, create->liobn, create->addr_hi, create->addr_lo); 818 window_shift, ret, create->liobn, create->addr_hi, create->addr_lo);
811 819
@@ -831,18 +839,14 @@ static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn)
831 int page_shift; 839 int page_shift;
832 u64 dma_addr, max_addr; 840 u64 dma_addr, max_addr;
833 struct device_node *dn; 841 struct device_node *dn;
834 const u32 *uninitialized_var(ddr_avail); 842 const u32 *uninitialized_var(ddw_avail);
835 struct direct_window *window; 843 struct direct_window *window;
836 struct property *uninitialized_var(win64); 844 struct property *win64;
837 struct dynamic_dma_window_prop *ddwprop; 845 struct dynamic_dma_window_prop *ddwprop;
838 846
839 mutex_lock(&direct_window_init_mutex); 847 mutex_lock(&direct_window_init_mutex);
840 848
841 dma_addr = dupe_ddw_if_already_created(dev, pdn); 849 dma_addr = find_existing_ddw(pdn);
842 if (dma_addr != 0)
843 goto out_unlock;
844
845 dma_addr = dupe_ddw_if_kexec(dev, pdn);
846 if (dma_addr != 0) 850 if (dma_addr != 0)
847 goto out_unlock; 851 goto out_unlock;
848 852
@@ -854,8 +858,8 @@ static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn)
854 * for the given node in that order. 858 * for the given node in that order.
855 * the property is actually in the parent, not the PE 859 * the property is actually in the parent, not the PE
856 */ 860 */
857 ddr_avail = of_get_property(pdn, "ibm,ddw-applicable", &len); 861 ddw_avail = of_get_property(pdn, "ibm,ddw-applicable", &len);
858 if (!ddr_avail || len < 3 * sizeof(u32)) 862 if (!ddw_avail || len < 3 * sizeof(u32))
859 goto out_unlock; 863 goto out_unlock;
860 864
861 /* 865 /*
@@ -865,7 +869,7 @@ static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn)
865 * of page sizes: supported and supported for migrate-dma. 869 * of page sizes: supported and supported for migrate-dma.
866 */ 870 */
867 dn = pci_device_to_OF_node(dev); 871 dn = pci_device_to_OF_node(dev);
868 ret = query_ddw(dev, ddr_avail, &query); 872 ret = query_ddw(dev, ddw_avail, &query);
869 if (ret != 0) 873 if (ret != 0)
870 goto out_unlock; 874 goto out_unlock;
871 875
@@ -907,13 +911,14 @@ static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn)
907 } 911 }
908 win64->name = kstrdup(DIRECT64_PROPNAME, GFP_KERNEL); 912 win64->name = kstrdup(DIRECT64_PROPNAME, GFP_KERNEL);
909 win64->value = ddwprop = kmalloc(sizeof(*ddwprop), GFP_KERNEL); 913 win64->value = ddwprop = kmalloc(sizeof(*ddwprop), GFP_KERNEL);
914 win64->length = sizeof(*ddwprop);
910 if (!win64->name || !win64->value) { 915 if (!win64->name || !win64->value) {
911 dev_info(&dev->dev, 916 dev_info(&dev->dev,
912 "couldn't allocate property name and value\n"); 917 "couldn't allocate property name and value\n");
913 goto out_free_prop; 918 goto out_free_prop;
914 } 919 }
915 920
916 ret = create_ddw(dev, ddr_avail, &create, page_shift, len); 921 ret = create_ddw(dev, ddw_avail, &create, page_shift, len);
917 if (ret != 0) 922 if (ret != 0)
918 goto out_free_prop; 923 goto out_free_prop;
919 924
@@ -1021,13 +1026,16 @@ static int dma_set_mask_pSeriesLP(struct device *dev, u64 dma_mask)
1021 const void *dma_window = NULL; 1026 const void *dma_window = NULL;
1022 u64 dma_offset; 1027 u64 dma_offset;
1023 1028
1024 if (!dev->dma_mask || !dma_supported(dev, dma_mask)) 1029 if (!dev->dma_mask)
1025 return -EIO; 1030 return -EIO;
1026 1031
1032 if (!dev_is_pci(dev))
1033 goto check_mask;
1034
1035 pdev = to_pci_dev(dev);
1036
1027 /* only attempt to use a new window if 64-bit DMA is requested */ 1037 /* only attempt to use a new window if 64-bit DMA is requested */
1028 if (!disable_ddw && dma_mask == DMA_BIT_MASK(64)) { 1038 if (!disable_ddw && dma_mask == DMA_BIT_MASK(64)) {
1029 pdev = to_pci_dev(dev);
1030
1031 dn = pci_device_to_OF_node(pdev); 1039 dn = pci_device_to_OF_node(pdev);
1032 dev_dbg(dev, "node is %s\n", dn->full_name); 1040 dev_dbg(dev, "node is %s\n", dn->full_name);
1033 1041
@@ -1054,12 +1062,17 @@ static int dma_set_mask_pSeriesLP(struct device *dev, u64 dma_mask)
1054 } 1062 }
1055 } 1063 }
1056 1064
1057 /* fall-through to iommu ops */ 1065 /* fall back on iommu ops, restore table pointer with ops */
1058 if (!ddw_enabled) { 1066 if (!ddw_enabled && get_dma_ops(dev) != &dma_iommu_ops) {
1059 dev_info(dev, "Using 32-bit DMA via iommu\n"); 1067 dev_info(dev, "Restoring 32-bit DMA via iommu\n");
1060 set_dma_ops(dev, &dma_iommu_ops); 1068 set_dma_ops(dev, &dma_iommu_ops);
1069 pci_dma_dev_setup_pSeriesLP(pdev);
1061 } 1070 }
1062 1071
1072check_mask:
1073 if (!dma_supported(dev, dma_mask))
1074 return -EIO;
1075
1063 *dev->dma_mask = dma_mask; 1076 *dev->dma_mask = dma_mask;
1064 return 0; 1077 return 0;
1065} 1078}
diff --git a/arch/powerpc/platforms/pseries/kexec.c b/arch/powerpc/platforms/pseries/kexec.c
index 77d38a5e2ff9..54cf3a4aa16b 100644
--- a/arch/powerpc/platforms/pseries/kexec.c
+++ b/arch/powerpc/platforms/pseries/kexec.c
@@ -7,15 +7,18 @@
7 * 2 of the License, or (at your option) any later version. 7 * 2 of the License, or (at your option) any later version.
8 */ 8 */
9 9
10#include <linux/kernel.h>
11#include <linux/interrupt.h>
12
10#include <asm/machdep.h> 13#include <asm/machdep.h>
11#include <asm/page.h> 14#include <asm/page.h>
12#include <asm/firmware.h> 15#include <asm/firmware.h>
13#include <asm/kexec.h> 16#include <asm/kexec.h>
14#include <asm/mpic.h> 17#include <asm/mpic.h>
18#include <asm/xics.h>
15#include <asm/smp.h> 19#include <asm/smp.h>
16 20
17#include "pseries.h" 21#include "pseries.h"
18#include "xics.h"
19#include "plpar_wrappers.h" 22#include "plpar_wrappers.h"
20 23
21static void pseries_kexec_cpu_down(int crash_shutdown, int secondary) 24static void pseries_kexec_cpu_down(int crash_shutdown, int secondary)
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
index ca5d5898d320..39e6e0a7b2fa 100644
--- a/arch/powerpc/platforms/pseries/lpar.c
+++ b/arch/powerpc/platforms/pseries/lpar.c
@@ -329,6 +329,8 @@ static long pSeries_lpar_hpte_insert(unsigned long hpte_group,
329 /* Make pHyp happy */ 329 /* Make pHyp happy */
330 if ((rflags & _PAGE_NO_CACHE) & !(rflags & _PAGE_WRITETHRU)) 330 if ((rflags & _PAGE_NO_CACHE) & !(rflags & _PAGE_WRITETHRU))
331 hpte_r &= ~_PAGE_COHERENT; 331 hpte_r &= ~_PAGE_COHERENT;
332 if (firmware_has_feature(FW_FEATURE_XCMO) && !(hpte_r & HPTE_R_N))
333 flags |= H_COALESCE_CAND;
332 334
333 lpar_rc = plpar_pte_enter(flags, hpte_group, hpte_v, hpte_r, &slot); 335 lpar_rc = plpar_pte_enter(flags, hpte_group, hpte_v, hpte_r, &slot);
334 if (unlikely(lpar_rc == H_PTEG_FULL)) { 336 if (unlikely(lpar_rc == H_PTEG_FULL)) {
@@ -573,7 +575,7 @@ static void pSeries_lpar_flush_hash_range(unsigned long number, int local)
573 unsigned long i, pix, rc; 575 unsigned long i, pix, rc;
574 unsigned long flags = 0; 576 unsigned long flags = 0;
575 struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); 577 struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch);
576 int lock_tlbie = !cpu_has_feature(CPU_FTR_LOCKLESS_TLBIE); 578 int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
577 unsigned long param[9]; 579 unsigned long param[9];
578 unsigned long va; 580 unsigned long va;
579 unsigned long hash, index, shift, hidx, slot; 581 unsigned long hash, index, shift, hidx, slot;
@@ -771,3 +773,47 @@ out:
771 local_irq_restore(flags); 773 local_irq_restore(flags);
772} 774}
773#endif 775#endif
776
777/**
778 * h_get_mpp
779 * H_GET_MPP hcall returns info in 7 parms
780 */
781int h_get_mpp(struct hvcall_mpp_data *mpp_data)
782{
783 int rc;
784 unsigned long retbuf[PLPAR_HCALL9_BUFSIZE];
785
786 rc = plpar_hcall9(H_GET_MPP, retbuf);
787
788 mpp_data->entitled_mem = retbuf[0];
789 mpp_data->mapped_mem = retbuf[1];
790
791 mpp_data->group_num = (retbuf[2] >> 2 * 8) & 0xffff;
792 mpp_data->pool_num = retbuf[2] & 0xffff;
793
794 mpp_data->mem_weight = (retbuf[3] >> 7 * 8) & 0xff;
795 mpp_data->unallocated_mem_weight = (retbuf[3] >> 6 * 8) & 0xff;
796 mpp_data->unallocated_entitlement = retbuf[3] & 0xffffffffffff;
797
798 mpp_data->pool_size = retbuf[4];
799 mpp_data->loan_request = retbuf[5];
800 mpp_data->backing_mem = retbuf[6];
801
802 return rc;
803}
804EXPORT_SYMBOL(h_get_mpp);
805
806int h_get_mpp_x(struct hvcall_mpp_x_data *mpp_x_data)
807{
808 int rc;
809 unsigned long retbuf[PLPAR_HCALL9_BUFSIZE] = { 0 };
810
811 rc = plpar_hcall9(H_GET_MPP_X, retbuf);
812
813 mpp_x_data->coalesced_bytes = retbuf[0];
814 mpp_x_data->pool_coalesced_bytes = retbuf[1];
815 mpp_x_data->pool_purr_cycles = retbuf[2];
816 mpp_x_data->pool_spurr_cycles = retbuf[3];
817
818 return rc;
819}
diff --git a/arch/powerpc/platforms/pseries/plpar_wrappers.h b/arch/powerpc/platforms/pseries/plpar_wrappers.h
index d9801117124b..4bf21207d7d3 100644
--- a/arch/powerpc/platforms/pseries/plpar_wrappers.h
+++ b/arch/powerpc/platforms/pseries/plpar_wrappers.h
@@ -270,31 +270,4 @@ static inline long plpar_put_term_char(unsigned long termno, unsigned long len,
270 lbuf[1]); 270 lbuf[1]);
271} 271}
272 272
273static inline long plpar_eoi(unsigned long xirr)
274{
275 return plpar_hcall_norets(H_EOI, xirr);
276}
277
278static inline long plpar_cppr(unsigned long cppr)
279{
280 return plpar_hcall_norets(H_CPPR, cppr);
281}
282
283static inline long plpar_ipi(unsigned long servernum, unsigned long mfrr)
284{
285 return plpar_hcall_norets(H_IPI, servernum, mfrr);
286}
287
288static inline long plpar_xirr(unsigned long *xirr_ret, unsigned char cppr)
289{
290 long rc;
291 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
292
293 rc = plpar_hcall(H_XIRR, retbuf, cppr);
294
295 *xirr_ret = retbuf[0];
296
297 return rc;
298}
299
300#endif /* _PSERIES_PLPAR_WRAPPERS_H */ 273#endif /* _PSERIES_PLPAR_WRAPPERS_H */
diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c
index c55d7ad9c648..086d2ae4e06a 100644
--- a/arch/powerpc/platforms/pseries/ras.c
+++ b/arch/powerpc/platforms/pseries/ras.c
@@ -122,7 +122,7 @@ static irqreturn_t ras_epow_interrupt(int irq, void *dev_id)
122 122
123 status = rtas_call(ras_check_exception_token, 6, 1, NULL, 123 status = rtas_call(ras_check_exception_token, 6, 1, NULL,
124 RTAS_VECTOR_EXTERNAL_INTERRUPT, 124 RTAS_VECTOR_EXTERNAL_INTERRUPT,
125 irq_map[irq].hwirq, 125 virq_to_hw(irq),
126 RTAS_EPOW_WARNING | RTAS_POWERMGM_EVENTS, 126 RTAS_EPOW_WARNING | RTAS_POWERMGM_EVENTS,
127 critical, __pa(&ras_log_buf), 127 critical, __pa(&ras_log_buf),
128 rtas_get_error_log_max()); 128 rtas_get_error_log_max());
@@ -157,7 +157,7 @@ static irqreturn_t ras_error_interrupt(int irq, void *dev_id)
157 157
158 status = rtas_call(ras_check_exception_token, 6, 1, NULL, 158 status = rtas_call(ras_check_exception_token, 6, 1, NULL,
159 RTAS_VECTOR_EXTERNAL_INTERRUPT, 159 RTAS_VECTOR_EXTERNAL_INTERRUPT,
160 irq_map[irq].hwirq, 160 virq_to_hw(irq),
161 RTAS_INTERNAL_ERROR, 1 /*Time Critical */, 161 RTAS_INTERNAL_ERROR, 1 /*Time Critical */,
162 __pa(&ras_log_buf), 162 __pa(&ras_log_buf),
163 rtas_get_error_log_max()); 163 rtas_get_error_log_max());
@@ -227,7 +227,7 @@ static struct rtas_error_log *fwnmi_get_errinfo(struct pt_regs *regs)
227 struct rtas_error_log *h, *errhdr = NULL; 227 struct rtas_error_log *h, *errhdr = NULL;
228 228
229 if (!VALID_FWNMI_BUFFER(regs->gpr[3])) { 229 if (!VALID_FWNMI_BUFFER(regs->gpr[3])) {
230 printk(KERN_ERR "FWNMI: corrupt r3\n"); 230 printk(KERN_ERR "FWNMI: corrupt r3 0x%016lx\n", regs->gpr[3]);
231 return NULL; 231 return NULL;
232 } 232 }
233 233
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index 000724149089..593acceeff96 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -53,9 +53,9 @@
53#include <asm/irq.h> 53#include <asm/irq.h>
54#include <asm/time.h> 54#include <asm/time.h>
55#include <asm/nvram.h> 55#include <asm/nvram.h>
56#include "xics.h"
57#include <asm/pmc.h> 56#include <asm/pmc.h>
58#include <asm/mpic.h> 57#include <asm/mpic.h>
58#include <asm/xics.h>
59#include <asm/ppc-pci.h> 59#include <asm/ppc-pci.h>
60#include <asm/i8259.h> 60#include <asm/i8259.h>
61#include <asm/udbg.h> 61#include <asm/udbg.h>
@@ -205,6 +205,9 @@ static void __init pseries_mpic_init_IRQ(void)
205 mpic_assign_isu(mpic, n, isuaddr); 205 mpic_assign_isu(mpic, n, isuaddr);
206 } 206 }
207 207
208 /* Setup top-level get_irq */
209 ppc_md.get_irq = mpic_get_irq;
210
208 /* All ISUs are setup, complete initialization */ 211 /* All ISUs are setup, complete initialization */
209 mpic_init(mpic); 212 mpic_init(mpic);
210 213
@@ -214,7 +217,7 @@ static void __init pseries_mpic_init_IRQ(void)
214 217
215static void __init pseries_xics_init_IRQ(void) 218static void __init pseries_xics_init_IRQ(void)
216{ 219{
217 xics_init_IRQ(); 220 xics_init();
218 pseries_setup_i8259_cascade(); 221 pseries_setup_i8259_cascade();
219} 222}
220 223
@@ -238,7 +241,6 @@ static void __init pseries_discover_pic(void)
238 if (strstr(typep, "open-pic")) { 241 if (strstr(typep, "open-pic")) {
239 pSeries_mpic_node = of_node_get(np); 242 pSeries_mpic_node = of_node_get(np);
240 ppc_md.init_IRQ = pseries_mpic_init_IRQ; 243 ppc_md.init_IRQ = pseries_mpic_init_IRQ;
241 ppc_md.get_irq = mpic_get_irq;
242 setup_kexec_cpu_down_mpic(); 244 setup_kexec_cpu_down_mpic();
243 smp_init_pseries_mpic(); 245 smp_init_pseries_mpic();
244 return; 246 return;
@@ -276,6 +278,8 @@ static struct notifier_block pci_dn_reconfig_nb = {
276 .notifier_call = pci_dn_reconfig_notifier, 278 .notifier_call = pci_dn_reconfig_notifier,
277}; 279};
278 280
281struct kmem_cache *dtl_cache;
282
279#ifdef CONFIG_VIRT_CPU_ACCOUNTING 283#ifdef CONFIG_VIRT_CPU_ACCOUNTING
280/* 284/*
281 * Allocate space for the dispatch trace log for all possible cpus 285 * Allocate space for the dispatch trace log for all possible cpus
@@ -291,10 +295,12 @@ static int alloc_dispatch_logs(void)
291 if (!firmware_has_feature(FW_FEATURE_SPLPAR)) 295 if (!firmware_has_feature(FW_FEATURE_SPLPAR))
292 return 0; 296 return 0;
293 297
298 if (!dtl_cache)
299 return 0;
300
294 for_each_possible_cpu(cpu) { 301 for_each_possible_cpu(cpu) {
295 pp = &paca[cpu]; 302 pp = &paca[cpu];
296 dtl = kmalloc_node(DISPATCH_LOG_BYTES, GFP_KERNEL, 303 dtl = kmem_cache_alloc(dtl_cache, GFP_KERNEL);
297 cpu_to_node(cpu));
298 if (!dtl) { 304 if (!dtl) {
299 pr_warn("Failed to allocate dispatch trace log for cpu %d\n", 305 pr_warn("Failed to allocate dispatch trace log for cpu %d\n",
300 cpu); 306 cpu);
@@ -324,10 +330,27 @@ static int alloc_dispatch_logs(void)
324 330
325 return 0; 331 return 0;
326} 332}
327 333#else /* !CONFIG_VIRT_CPU_ACCOUNTING */
328early_initcall(alloc_dispatch_logs); 334static inline int alloc_dispatch_logs(void)
335{
336 return 0;
337}
329#endif /* CONFIG_VIRT_CPU_ACCOUNTING */ 338#endif /* CONFIG_VIRT_CPU_ACCOUNTING */
330 339
340static int alloc_dispatch_log_kmem_cache(void)
341{
342 dtl_cache = kmem_cache_create("dtl", DISPATCH_LOG_BYTES,
343 DISPATCH_LOG_BYTES, 0, NULL);
344 if (!dtl_cache) {
345 pr_warn("Failed to create dispatch trace log buffer cache\n");
346 pr_warn("Stolen time statistics will be unreliable\n");
347 return 0;
348 }
349
350 return alloc_dispatch_logs();
351}
352early_initcall(alloc_dispatch_log_kmem_cache);
353
331static void __init pSeries_setup_arch(void) 354static void __init pSeries_setup_arch(void)
332{ 355{
333 /* Discover PIC type and setup ppc_md accordingly */ 356 /* Discover PIC type and setup ppc_md accordingly */
@@ -395,6 +418,16 @@ static int pseries_set_xdabr(unsigned long dabr)
395#define CMO_CHARACTERISTICS_TOKEN 44 418#define CMO_CHARACTERISTICS_TOKEN 44
396#define CMO_MAXLENGTH 1026 419#define CMO_MAXLENGTH 1026
397 420
421void pSeries_coalesce_init(void)
422{
423 struct hvcall_mpp_x_data mpp_x_data;
424
425 if (firmware_has_feature(FW_FEATURE_CMO) && !h_get_mpp_x(&mpp_x_data))
426 powerpc_firmware_features |= FW_FEATURE_XCMO;
427 else
428 powerpc_firmware_features &= ~FW_FEATURE_XCMO;
429}
430
398/** 431/**
399 * fw_cmo_feature_init - FW_FEATURE_CMO is not stored in ibm,hypertas-functions, 432 * fw_cmo_feature_init - FW_FEATURE_CMO is not stored in ibm,hypertas-functions,
400 * handle that here. (Stolen from parse_system_parameter_string) 433 * handle that here. (Stolen from parse_system_parameter_string)
@@ -464,6 +497,7 @@ void pSeries_cmo_feature_init(void)
464 pr_debug("CMO enabled, PrPSP=%d, SecPSP=%d\n", CMO_PrPSP, 497 pr_debug("CMO enabled, PrPSP=%d, SecPSP=%d\n", CMO_PrPSP,
465 CMO_SecPSP); 498 CMO_SecPSP);
466 powerpc_firmware_features |= FW_FEATURE_CMO; 499 powerpc_firmware_features |= FW_FEATURE_CMO;
500 pSeries_coalesce_init();
467 } else 501 } else
468 pr_debug("CMO not enabled, PrPSP=%d, SecPSP=%d\n", CMO_PrPSP, 502 pr_debug("CMO not enabled, PrPSP=%d, SecPSP=%d\n", CMO_PrPSP,
469 CMO_SecPSP); 503 CMO_SecPSP);
diff --git a/arch/powerpc/platforms/pseries/smp.c b/arch/powerpc/platforms/pseries/smp.c
index a509c5292a67..fbffd7e47ab8 100644
--- a/arch/powerpc/platforms/pseries/smp.c
+++ b/arch/powerpc/platforms/pseries/smp.c
@@ -44,10 +44,11 @@
44#include <asm/mpic.h> 44#include <asm/mpic.h>
45#include <asm/vdso_datapage.h> 45#include <asm/vdso_datapage.h>
46#include <asm/cputhreads.h> 46#include <asm/cputhreads.h>
47#include <asm/mpic.h>
48#include <asm/xics.h>
47 49
48#include "plpar_wrappers.h" 50#include "plpar_wrappers.h"
49#include "pseries.h" 51#include "pseries.h"
50#include "xics.h"
51#include "offline_states.h" 52#include "offline_states.h"
52 53
53 54
@@ -136,7 +137,6 @@ out:
136 return 1; 137 return 1;
137} 138}
138 139
139#ifdef CONFIG_XICS
140static void __devinit smp_xics_setup_cpu(int cpu) 140static void __devinit smp_xics_setup_cpu(int cpu)
141{ 141{
142 if (cpu != boot_cpuid) 142 if (cpu != boot_cpuid)
@@ -151,14 +151,13 @@ static void __devinit smp_xics_setup_cpu(int cpu)
151 set_default_offline_state(cpu); 151 set_default_offline_state(cpu);
152#endif 152#endif
153} 153}
154#endif /* CONFIG_XICS */
155 154
156static void __devinit smp_pSeries_kick_cpu(int nr) 155static int __devinit smp_pSeries_kick_cpu(int nr)
157{ 156{
158 BUG_ON(nr < 0 || nr >= NR_CPUS); 157 BUG_ON(nr < 0 || nr >= NR_CPUS);
159 158
160 if (!smp_startup_cpu(nr)) 159 if (!smp_startup_cpu(nr))
161 return; 160 return -ENOENT;
162 161
163 /* 162 /*
164 * The processor is currently spinning, waiting for the 163 * The processor is currently spinning, waiting for the
@@ -180,6 +179,8 @@ static void __devinit smp_pSeries_kick_cpu(int nr)
180 "Ret= %ld\n", nr, rc); 179 "Ret= %ld\n", nr, rc);
181 } 180 }
182#endif 181#endif
182
183 return 0;
183} 184}
184 185
185static int smp_pSeries_cpu_bootable(unsigned int nr) 186static int smp_pSeries_cpu_bootable(unsigned int nr)
@@ -197,23 +198,22 @@ static int smp_pSeries_cpu_bootable(unsigned int nr)
197 198
198 return 1; 199 return 1;
199} 200}
200#ifdef CONFIG_MPIC 201
201static struct smp_ops_t pSeries_mpic_smp_ops = { 202static struct smp_ops_t pSeries_mpic_smp_ops = {
202 .message_pass = smp_mpic_message_pass, 203 .message_pass = smp_mpic_message_pass,
203 .probe = smp_mpic_probe, 204 .probe = smp_mpic_probe,
204 .kick_cpu = smp_pSeries_kick_cpu, 205 .kick_cpu = smp_pSeries_kick_cpu,
205 .setup_cpu = smp_mpic_setup_cpu, 206 .setup_cpu = smp_mpic_setup_cpu,
206}; 207};
207#endif 208
208#ifdef CONFIG_XICS
209static struct smp_ops_t pSeries_xics_smp_ops = { 209static struct smp_ops_t pSeries_xics_smp_ops = {
210 .message_pass = smp_xics_message_pass, 210 .message_pass = smp_muxed_ipi_message_pass,
211 .probe = smp_xics_probe, 211 .cause_ipi = NULL, /* Filled at runtime by xics_smp_probe() */
212 .probe = xics_smp_probe,
212 .kick_cpu = smp_pSeries_kick_cpu, 213 .kick_cpu = smp_pSeries_kick_cpu,
213 .setup_cpu = smp_xics_setup_cpu, 214 .setup_cpu = smp_xics_setup_cpu,
214 .cpu_bootable = smp_pSeries_cpu_bootable, 215 .cpu_bootable = smp_pSeries_cpu_bootable,
215}; 216};
216#endif
217 217
218/* This is called very early */ 218/* This is called very early */
219static void __init smp_init_pseries(void) 219static void __init smp_init_pseries(void)
@@ -245,14 +245,12 @@ static void __init smp_init_pseries(void)
245 pr_debug(" <- smp_init_pSeries()\n"); 245 pr_debug(" <- smp_init_pSeries()\n");
246} 246}
247 247
248#ifdef CONFIG_MPIC
249void __init smp_init_pseries_mpic(void) 248void __init smp_init_pseries_mpic(void)
250{ 249{
251 smp_ops = &pSeries_mpic_smp_ops; 250 smp_ops = &pSeries_mpic_smp_ops;
252 251
253 smp_init_pseries(); 252 smp_init_pseries();
254} 253}
255#endif
256 254
257void __init smp_init_pseries_xics(void) 255void __init smp_init_pseries_xics(void)
258{ 256{
diff --git a/arch/powerpc/platforms/pseries/xics.c b/arch/powerpc/platforms/pseries/xics.c
deleted file mode 100644
index d6901334d66e..000000000000
--- a/arch/powerpc/platforms/pseries/xics.c
+++ /dev/null
@@ -1,949 +0,0 @@
1/*
2 * arch/powerpc/platforms/pseries/xics.c
3 *
4 * Copyright 2000 IBM Corporation.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#include <linux/types.h>
13#include <linux/threads.h>
14#include <linux/kernel.h>
15#include <linux/irq.h>
16#include <linux/smp.h>
17#include <linux/interrupt.h>
18#include <linux/init.h>
19#include <linux/radix-tree.h>
20#include <linux/cpu.h>
21#include <linux/msi.h>
22#include <linux/of.h>
23#include <linux/percpu.h>
24
25#include <asm/firmware.h>
26#include <asm/io.h>
27#include <asm/pgtable.h>
28#include <asm/smp.h>
29#include <asm/rtas.h>
30#include <asm/hvcall.h>
31#include <asm/machdep.h>
32
33#include "xics.h"
34#include "plpar_wrappers.h"
35
36static struct irq_host *xics_host;
37
38#define XICS_IPI 2
39#define XICS_IRQ_SPURIOUS 0
40
41/* Want a priority other than 0. Various HW issues require this. */
42#define DEFAULT_PRIORITY 5
43
44/*
45 * Mark IPIs as higher priority so we can take them inside interrupts that
46 * arent marked IRQF_DISABLED
47 */
48#define IPI_PRIORITY 4
49
50/* The least favored priority */
51#define LOWEST_PRIORITY 0xFF
52
53/* The number of priorities defined above */
54#define MAX_NUM_PRIORITIES 3
55
56static unsigned int default_server = 0xFF;
57static unsigned int default_distrib_server = 0;
58static unsigned int interrupt_server_size = 8;
59
60/* RTAS service tokens */
61static int ibm_get_xive;
62static int ibm_set_xive;
63static int ibm_int_on;
64static int ibm_int_off;
65
66struct xics_cppr {
67 unsigned char stack[MAX_NUM_PRIORITIES];
68 int index;
69};
70
71static DEFINE_PER_CPU(struct xics_cppr, xics_cppr);
72
73/* Direct hardware low level accessors */
74
75/* The part of the interrupt presentation layer that we care about */
76struct xics_ipl {
77 union {
78 u32 word;
79 u8 bytes[4];
80 } xirr_poll;
81 union {
82 u32 word;
83 u8 bytes[4];
84 } xirr;
85 u32 dummy;
86 union {
87 u32 word;
88 u8 bytes[4];
89 } qirr;
90};
91
92static struct xics_ipl __iomem *xics_per_cpu[NR_CPUS];
93
94static inline unsigned int direct_xirr_info_get(void)
95{
96 int cpu = smp_processor_id();
97
98 return in_be32(&xics_per_cpu[cpu]->xirr.word);
99}
100
101static inline void direct_xirr_info_set(unsigned int value)
102{
103 int cpu = smp_processor_id();
104
105 out_be32(&xics_per_cpu[cpu]->xirr.word, value);
106}
107
108static inline void direct_cppr_info(u8 value)
109{
110 int cpu = smp_processor_id();
111
112 out_8(&xics_per_cpu[cpu]->xirr.bytes[0], value);
113}
114
115static inline void direct_qirr_info(int n_cpu, u8 value)
116{
117 out_8(&xics_per_cpu[n_cpu]->qirr.bytes[0], value);
118}
119
120
121/* LPAR low level accessors */
122
123static inline unsigned int lpar_xirr_info_get(unsigned char cppr)
124{
125 unsigned long lpar_rc;
126 unsigned long return_value;
127
128 lpar_rc = plpar_xirr(&return_value, cppr);
129 if (lpar_rc != H_SUCCESS)
130 panic(" bad return code xirr - rc = %lx\n", lpar_rc);
131 return (unsigned int)return_value;
132}
133
134static inline void lpar_xirr_info_set(unsigned int value)
135{
136 unsigned long lpar_rc;
137
138 lpar_rc = plpar_eoi(value);
139 if (lpar_rc != H_SUCCESS)
140 panic("bad return code EOI - rc = %ld, value=%x\n", lpar_rc,
141 value);
142}
143
144static inline void lpar_cppr_info(u8 value)
145{
146 unsigned long lpar_rc;
147
148 lpar_rc = plpar_cppr(value);
149 if (lpar_rc != H_SUCCESS)
150 panic("bad return code cppr - rc = %lx\n", lpar_rc);
151}
152
153static inline void lpar_qirr_info(int n_cpu , u8 value)
154{
155 unsigned long lpar_rc;
156
157 lpar_rc = plpar_ipi(get_hard_smp_processor_id(n_cpu), value);
158 if (lpar_rc != H_SUCCESS)
159 panic("bad return code qirr - rc = %lx\n", lpar_rc);
160}
161
162
163/* Interface to generic irq subsystem */
164
165#ifdef CONFIG_SMP
166/*
167 * For the moment we only implement delivery to all cpus or one cpu.
168 *
169 * If the requested affinity is cpu_all_mask, we set global affinity.
170 * If not we set it to the first cpu in the mask, even if multiple cpus
171 * are set. This is so things like irqbalance (which set core and package
172 * wide affinities) do the right thing.
173 */
174static int get_irq_server(unsigned int virq, const struct cpumask *cpumask,
175 unsigned int strict_check)
176{
177
178 if (!distribute_irqs)
179 return default_server;
180
181 if (!cpumask_subset(cpu_possible_mask, cpumask)) {
182 int server = cpumask_first_and(cpu_online_mask, cpumask);
183
184 if (server < nr_cpu_ids)
185 return get_hard_smp_processor_id(server);
186
187 if (strict_check)
188 return -1;
189 }
190
191 /*
192 * Workaround issue with some versions of JS20 firmware that
193 * deliver interrupts to cpus which haven't been started. This
194 * happens when using the maxcpus= boot option.
195 */
196 if (cpumask_equal(cpu_online_mask, cpu_present_mask))
197 return default_distrib_server;
198
199 return default_server;
200}
201#else
202#define get_irq_server(virq, cpumask, strict_check) (default_server)
203#endif
204
205static void xics_unmask_irq(struct irq_data *d)
206{
207 unsigned int hwirq;
208 int call_status;
209 int server;
210
211 pr_devel("xics: unmask virq %d\n", d->irq);
212
213 hwirq = (unsigned int)irq_map[d->irq].hwirq;
214 pr_devel(" -> map to hwirq 0x%x\n", hwirq);
215 if (hwirq == XICS_IPI || hwirq == XICS_IRQ_SPURIOUS)
216 return;
217
218 server = get_irq_server(d->irq, d->affinity, 0);
219
220 call_status = rtas_call(ibm_set_xive, 3, 1, NULL, hwirq, server,
221 DEFAULT_PRIORITY);
222 if (call_status != 0) {
223 printk(KERN_ERR
224 "%s: ibm_set_xive irq %u server %x returned %d\n",
225 __func__, hwirq, server, call_status);
226 return;
227 }
228
229 /* Now unmask the interrupt (often a no-op) */
230 call_status = rtas_call(ibm_int_on, 1, 1, NULL, hwirq);
231 if (call_status != 0) {
232 printk(KERN_ERR "%s: ibm_int_on irq=%u returned %d\n",
233 __func__, hwirq, call_status);
234 return;
235 }
236}
237
238static unsigned int xics_startup(struct irq_data *d)
239{
240 /*
241 * The generic MSI code returns with the interrupt disabled on the
242 * card, using the MSI mask bits. Firmware doesn't appear to unmask
243 * at that level, so we do it here by hand.
244 */
245 if (d->msi_desc)
246 unmask_msi_irq(d);
247
248 /* unmask it */
249 xics_unmask_irq(d);
250 return 0;
251}
252
253static void xics_mask_real_irq(unsigned int hwirq)
254{
255 int call_status;
256
257 if (hwirq == XICS_IPI)
258 return;
259
260 call_status = rtas_call(ibm_int_off, 1, 1, NULL, hwirq);
261 if (call_status != 0) {
262 printk(KERN_ERR "%s: ibm_int_off irq=%u returned %d\n",
263 __func__, hwirq, call_status);
264 return;
265 }
266
267 /* Have to set XIVE to 0xff to be able to remove a slot */
268 call_status = rtas_call(ibm_set_xive, 3, 1, NULL, hwirq,
269 default_server, 0xff);
270 if (call_status != 0) {
271 printk(KERN_ERR "%s: ibm_set_xive(0xff) irq=%u returned %d\n",
272 __func__, hwirq, call_status);
273 return;
274 }
275}
276
277static void xics_mask_irq(struct irq_data *d)
278{
279 unsigned int hwirq;
280
281 pr_devel("xics: mask virq %d\n", d->irq);
282
283 hwirq = (unsigned int)irq_map[d->irq].hwirq;
284 if (hwirq == XICS_IPI || hwirq == XICS_IRQ_SPURIOUS)
285 return;
286 xics_mask_real_irq(hwirq);
287}
288
289static void xics_mask_unknown_vec(unsigned int vec)
290{
291 printk(KERN_ERR "Interrupt %u (real) is invalid, disabling it.\n", vec);
292 xics_mask_real_irq(vec);
293}
294
295static inline unsigned int xics_xirr_vector(unsigned int xirr)
296{
297 /*
298 * The top byte is the old cppr, to be restored on EOI.
299 * The remaining 24 bits are the vector.
300 */
301 return xirr & 0x00ffffff;
302}
303
304static void push_cppr(unsigned int vec)
305{
306 struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr);
307
308 if (WARN_ON(os_cppr->index >= MAX_NUM_PRIORITIES - 1))
309 return;
310
311 if (vec == XICS_IPI)
312 os_cppr->stack[++os_cppr->index] = IPI_PRIORITY;
313 else
314 os_cppr->stack[++os_cppr->index] = DEFAULT_PRIORITY;
315}
316
317static unsigned int xics_get_irq_direct(void)
318{
319 unsigned int xirr = direct_xirr_info_get();
320 unsigned int vec = xics_xirr_vector(xirr);
321 unsigned int irq;
322
323 if (vec == XICS_IRQ_SPURIOUS)
324 return NO_IRQ;
325
326 irq = irq_radix_revmap_lookup(xics_host, vec);
327 if (likely(irq != NO_IRQ)) {
328 push_cppr(vec);
329 return irq;
330 }
331
332 /* We don't have a linux mapping, so have rtas mask it. */
333 xics_mask_unknown_vec(vec);
334
335 /* We might learn about it later, so EOI it */
336 direct_xirr_info_set(xirr);
337 return NO_IRQ;
338}
339
340static unsigned int xics_get_irq_lpar(void)
341{
342 struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr);
343 unsigned int xirr = lpar_xirr_info_get(os_cppr->stack[os_cppr->index]);
344 unsigned int vec = xics_xirr_vector(xirr);
345 unsigned int irq;
346
347 if (vec == XICS_IRQ_SPURIOUS)
348 return NO_IRQ;
349
350 irq = irq_radix_revmap_lookup(xics_host, vec);
351 if (likely(irq != NO_IRQ)) {
352 push_cppr(vec);
353 return irq;
354 }
355
356 /* We don't have a linux mapping, so have RTAS mask it. */
357 xics_mask_unknown_vec(vec);
358
359 /* We might learn about it later, so EOI it */
360 lpar_xirr_info_set(xirr);
361 return NO_IRQ;
362}
363
364static unsigned char pop_cppr(void)
365{
366 struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr);
367
368 if (WARN_ON(os_cppr->index < 1))
369 return LOWEST_PRIORITY;
370
371 return os_cppr->stack[--os_cppr->index];
372}
373
374static void xics_eoi_direct(struct irq_data *d)
375{
376 unsigned int hwirq = (unsigned int)irq_map[d->irq].hwirq;
377
378 iosync();
379 direct_xirr_info_set((pop_cppr() << 24) | hwirq);
380}
381
382static void xics_eoi_lpar(struct irq_data *d)
383{
384 unsigned int hwirq = (unsigned int)irq_map[d->irq].hwirq;
385
386 iosync();
387 lpar_xirr_info_set((pop_cppr() << 24) | hwirq);
388}
389
390static int
391xics_set_affinity(struct irq_data *d, const struct cpumask *cpumask, bool force)
392{
393 unsigned int hwirq;
394 int status;
395 int xics_status[2];
396 int irq_server;
397
398 hwirq = (unsigned int)irq_map[d->irq].hwirq;
399 if (hwirq == XICS_IPI || hwirq == XICS_IRQ_SPURIOUS)
400 return -1;
401
402 status = rtas_call(ibm_get_xive, 1, 3, xics_status, hwirq);
403
404 if (status) {
405 printk(KERN_ERR "%s: ibm,get-xive irq=%u returns %d\n",
406 __func__, hwirq, status);
407 return -1;
408 }
409
410 irq_server = get_irq_server(d->irq, cpumask, 1);
411 if (irq_server == -1) {
412 char cpulist[128];
413 cpumask_scnprintf(cpulist, sizeof(cpulist), cpumask);
414 printk(KERN_WARNING
415 "%s: No online cpus in the mask %s for irq %d\n",
416 __func__, cpulist, d->irq);
417 return -1;
418 }
419
420 status = rtas_call(ibm_set_xive, 3, 1, NULL,
421 hwirq, irq_server, xics_status[1]);
422
423 if (status) {
424 printk(KERN_ERR "%s: ibm,set-xive irq=%u returns %d\n",
425 __func__, hwirq, status);
426 return -1;
427 }
428
429 return 0;
430}
431
432static struct irq_chip xics_pic_direct = {
433 .name = "XICS",
434 .irq_startup = xics_startup,
435 .irq_mask = xics_mask_irq,
436 .irq_unmask = xics_unmask_irq,
437 .irq_eoi = xics_eoi_direct,
438 .irq_set_affinity = xics_set_affinity
439};
440
441static struct irq_chip xics_pic_lpar = {
442 .name = "XICS",
443 .irq_startup = xics_startup,
444 .irq_mask = xics_mask_irq,
445 .irq_unmask = xics_unmask_irq,
446 .irq_eoi = xics_eoi_lpar,
447 .irq_set_affinity = xics_set_affinity
448};
449
450
451/* Interface to arch irq controller subsystem layer */
452
453/* Points to the irq_chip we're actually using */
454static struct irq_chip *xics_irq_chip;
455
456static int xics_host_match(struct irq_host *h, struct device_node *node)
457{
458 /* IBM machines have interrupt parents of various funky types for things
459 * like vdevices, events, etc... The trick we use here is to match
460 * everything here except the legacy 8259 which is compatible "chrp,iic"
461 */
462 return !of_device_is_compatible(node, "chrp,iic");
463}
464
465static int xics_host_map(struct irq_host *h, unsigned int virq,
466 irq_hw_number_t hw)
467{
468 pr_devel("xics: map virq %d, hwirq 0x%lx\n", virq, hw);
469
470 /* Insert the interrupt mapping into the radix tree for fast lookup */
471 irq_radix_revmap_insert(xics_host, virq, hw);
472
473 irq_set_status_flags(virq, IRQ_LEVEL);
474 irq_set_chip_and_handler(virq, xics_irq_chip, handle_fasteoi_irq);
475 return 0;
476}
477
478static int xics_host_xlate(struct irq_host *h, struct device_node *ct,
479 const u32 *intspec, unsigned int intsize,
480 irq_hw_number_t *out_hwirq, unsigned int *out_flags)
481
482{
483 /* Current xics implementation translates everything
484 * to level. It is not technically right for MSIs but this
485 * is irrelevant at this point. We might get smarter in the future
486 */
487 *out_hwirq = intspec[0];
488 *out_flags = IRQ_TYPE_LEVEL_LOW;
489
490 return 0;
491}
492
493static struct irq_host_ops xics_host_ops = {
494 .match = xics_host_match,
495 .map = xics_host_map,
496 .xlate = xics_host_xlate,
497};
498
499static void __init xics_init_host(void)
500{
501 if (firmware_has_feature(FW_FEATURE_LPAR))
502 xics_irq_chip = &xics_pic_lpar;
503 else
504 xics_irq_chip = &xics_pic_direct;
505
506 xics_host = irq_alloc_host(NULL, IRQ_HOST_MAP_TREE, 0, &xics_host_ops,
507 XICS_IRQ_SPURIOUS);
508 BUG_ON(xics_host == NULL);
509 irq_set_default_host(xics_host);
510}
511
512
513/* Inter-processor interrupt support */
514
515#ifdef CONFIG_SMP
516/*
517 * XICS only has a single IPI, so encode the messages per CPU
518 */
519static DEFINE_PER_CPU_SHARED_ALIGNED(unsigned long, xics_ipi_message);
520
521static inline void smp_xics_do_message(int cpu, int msg)
522{
523 unsigned long *tgt = &per_cpu(xics_ipi_message, cpu);
524
525 set_bit(msg, tgt);
526 mb();
527 if (firmware_has_feature(FW_FEATURE_LPAR))
528 lpar_qirr_info(cpu, IPI_PRIORITY);
529 else
530 direct_qirr_info(cpu, IPI_PRIORITY);
531}
532
533void smp_xics_message_pass(int target, int msg)
534{
535 unsigned int i;
536
537 if (target < NR_CPUS) {
538 smp_xics_do_message(target, msg);
539 } else {
540 for_each_online_cpu(i) {
541 if (target == MSG_ALL_BUT_SELF
542 && i == smp_processor_id())
543 continue;
544 smp_xics_do_message(i, msg);
545 }
546 }
547}
548
549static irqreturn_t xics_ipi_dispatch(int cpu)
550{
551 unsigned long *tgt = &per_cpu(xics_ipi_message, cpu);
552
553 mb(); /* order mmio clearing qirr */
554 while (*tgt) {
555 if (test_and_clear_bit(PPC_MSG_CALL_FUNCTION, tgt)) {
556 smp_message_recv(PPC_MSG_CALL_FUNCTION);
557 }
558 if (test_and_clear_bit(PPC_MSG_RESCHEDULE, tgt)) {
559 smp_message_recv(PPC_MSG_RESCHEDULE);
560 }
561 if (test_and_clear_bit(PPC_MSG_CALL_FUNC_SINGLE, tgt)) {
562 smp_message_recv(PPC_MSG_CALL_FUNC_SINGLE);
563 }
564#if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC)
565 if (test_and_clear_bit(PPC_MSG_DEBUGGER_BREAK, tgt)) {
566 smp_message_recv(PPC_MSG_DEBUGGER_BREAK);
567 }
568#endif
569 }
570 return IRQ_HANDLED;
571}
572
573static irqreturn_t xics_ipi_action_direct(int irq, void *dev_id)
574{
575 int cpu = smp_processor_id();
576
577 direct_qirr_info(cpu, 0xff);
578
579 return xics_ipi_dispatch(cpu);
580}
581
582static irqreturn_t xics_ipi_action_lpar(int irq, void *dev_id)
583{
584 int cpu = smp_processor_id();
585
586 lpar_qirr_info(cpu, 0xff);
587
588 return xics_ipi_dispatch(cpu);
589}
590
591static void xics_request_ipi(void)
592{
593 unsigned int ipi;
594 int rc;
595
596 ipi = irq_create_mapping(xics_host, XICS_IPI);
597 BUG_ON(ipi == NO_IRQ);
598
599 /*
600 * IPIs are marked IRQF_DISABLED as they must run with irqs
601 * disabled
602 */
603 irq_set_handler(ipi, handle_percpu_irq);
604 if (firmware_has_feature(FW_FEATURE_LPAR))
605 rc = request_irq(ipi, xics_ipi_action_lpar,
606 IRQF_DISABLED|IRQF_PERCPU, "IPI", NULL);
607 else
608 rc = request_irq(ipi, xics_ipi_action_direct,
609 IRQF_DISABLED|IRQF_PERCPU, "IPI", NULL);
610 BUG_ON(rc);
611}
612
613int __init smp_xics_probe(void)
614{
615 xics_request_ipi();
616
617 return cpumask_weight(cpu_possible_mask);
618}
619
620#endif /* CONFIG_SMP */
621
622
623/* Initialization */
624
625static void xics_update_irq_servers(void)
626{
627 int i, j;
628 struct device_node *np;
629 u32 ilen;
630 const u32 *ireg;
631 u32 hcpuid;
632
633 /* Find the server numbers for the boot cpu. */
634 np = of_get_cpu_node(boot_cpuid, NULL);
635 BUG_ON(!np);
636
637 ireg = of_get_property(np, "ibm,ppc-interrupt-gserver#s", &ilen);
638 if (!ireg) {
639 of_node_put(np);
640 return;
641 }
642
643 i = ilen / sizeof(int);
644 hcpuid = get_hard_smp_processor_id(boot_cpuid);
645
646 /* Global interrupt distribution server is specified in the last
647 * entry of "ibm,ppc-interrupt-gserver#s" property. Get the last
648 * entry fom this property for current boot cpu id and use it as
649 * default distribution server
650 */
651 for (j = 0; j < i; j += 2) {
652 if (ireg[j] == hcpuid) {
653 default_server = hcpuid;
654 default_distrib_server = ireg[j+1];
655 }
656 }
657
658 of_node_put(np);
659}
660
661static void __init xics_map_one_cpu(int hw_id, unsigned long addr,
662 unsigned long size)
663{
664 int i;
665
666 /* This may look gross but it's good enough for now, we don't quite
667 * have a hard -> linux processor id matching.
668 */
669 for_each_possible_cpu(i) {
670 if (!cpu_present(i))
671 continue;
672 if (hw_id == get_hard_smp_processor_id(i)) {
673 xics_per_cpu[i] = ioremap(addr, size);
674 return;
675 }
676 }
677}
678
679static void __init xics_init_one_node(struct device_node *np,
680 unsigned int *indx)
681{
682 unsigned int ilen;
683 const u32 *ireg;
684
685 /* This code does the theorically broken assumption that the interrupt
686 * server numbers are the same as the hard CPU numbers.
687 * This happens to be the case so far but we are playing with fire...
688 * should be fixed one of these days. -BenH.
689 */
690 ireg = of_get_property(np, "ibm,interrupt-server-ranges", NULL);
691
692 /* Do that ever happen ? we'll know soon enough... but even good'old
693 * f80 does have that property ..
694 */
695 WARN_ON(ireg == NULL);
696 if (ireg) {
697 /*
698 * set node starting index for this node
699 */
700 *indx = *ireg;
701 }
702 ireg = of_get_property(np, "reg", &ilen);
703 if (!ireg)
704 panic("xics_init_IRQ: can't find interrupt reg property");
705
706 while (ilen >= (4 * sizeof(u32))) {
707 unsigned long addr, size;
708
709 /* XXX Use proper OF parsing code here !!! */
710 addr = (unsigned long)*ireg++ << 32;
711 ilen -= sizeof(u32);
712 addr |= *ireg++;
713 ilen -= sizeof(u32);
714 size = (unsigned long)*ireg++ << 32;
715 ilen -= sizeof(u32);
716 size |= *ireg++;
717 ilen -= sizeof(u32);
718 xics_map_one_cpu(*indx, addr, size);
719 (*indx)++;
720 }
721}
722
723void __init xics_init_IRQ(void)
724{
725 struct device_node *np;
726 u32 indx = 0;
727 int found = 0;
728 const u32 *isize;
729
730 ppc64_boot_msg(0x20, "XICS Init");
731
732 ibm_get_xive = rtas_token("ibm,get-xive");
733 ibm_set_xive = rtas_token("ibm,set-xive");
734 ibm_int_on = rtas_token("ibm,int-on");
735 ibm_int_off = rtas_token("ibm,int-off");
736
737 for_each_node_by_type(np, "PowerPC-External-Interrupt-Presentation") {
738 found = 1;
739 if (firmware_has_feature(FW_FEATURE_LPAR)) {
740 of_node_put(np);
741 break;
742 }
743 xics_init_one_node(np, &indx);
744 }
745 if (found == 0)
746 return;
747
748 /* get the bit size of server numbers */
749 found = 0;
750
751 for_each_compatible_node(np, NULL, "ibm,ppc-xics") {
752 isize = of_get_property(np, "ibm,interrupt-server#-size", NULL);
753
754 if (!isize)
755 continue;
756
757 if (!found) {
758 interrupt_server_size = *isize;
759 found = 1;
760 } else if (*isize != interrupt_server_size) {
761 printk(KERN_WARNING "XICS: "
762 "mismatched ibm,interrupt-server#-size\n");
763 interrupt_server_size = max(*isize,
764 interrupt_server_size);
765 }
766 }
767
768 xics_update_irq_servers();
769 xics_init_host();
770
771 if (firmware_has_feature(FW_FEATURE_LPAR))
772 ppc_md.get_irq = xics_get_irq_lpar;
773 else
774 ppc_md.get_irq = xics_get_irq_direct;
775
776 xics_setup_cpu();
777
778 ppc64_boot_msg(0x21, "XICS Done");
779}
780
781/* Cpu startup, shutdown, and hotplug */
782
783static void xics_set_cpu_priority(unsigned char cppr)
784{
785 struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr);
786
787 /*
788 * we only really want to set the priority when there's
789 * just one cppr value on the stack
790 */
791 WARN_ON(os_cppr->index != 0);
792
793 os_cppr->stack[0] = cppr;
794
795 if (firmware_has_feature(FW_FEATURE_LPAR))
796 lpar_cppr_info(cppr);
797 else
798 direct_cppr_info(cppr);
799 iosync();
800}
801
802/* Have the calling processor join or leave the specified global queue */
803static void xics_set_cpu_giq(unsigned int gserver, unsigned int join)
804{
805 int index;
806 int status;
807
808 if (!rtas_indicator_present(GLOBAL_INTERRUPT_QUEUE, NULL))
809 return;
810
811 index = (1UL << interrupt_server_size) - 1 - gserver;
812
813 status = rtas_set_indicator_fast(GLOBAL_INTERRUPT_QUEUE, index, join);
814
815 WARN(status < 0, "set-indicator(%d, %d, %u) returned %d\n",
816 GLOBAL_INTERRUPT_QUEUE, index, join, status);
817}
818
819void xics_setup_cpu(void)
820{
821 xics_set_cpu_priority(LOWEST_PRIORITY);
822
823 xics_set_cpu_giq(default_distrib_server, 1);
824}
825
826void xics_teardown_cpu(void)
827{
828 struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr);
829 int cpu = smp_processor_id();
830
831 /*
832 * we have to reset the cppr index to 0 because we're
833 * not going to return from the IPI
834 */
835 os_cppr->index = 0;
836 xics_set_cpu_priority(0);
837
838 /* Clear any pending IPI request */
839 if (firmware_has_feature(FW_FEATURE_LPAR))
840 lpar_qirr_info(cpu, 0xff);
841 else
842 direct_qirr_info(cpu, 0xff);
843}
844
845void xics_kexec_teardown_cpu(int secondary)
846{
847 xics_teardown_cpu();
848
849 /*
850 * we take the ipi irq but and never return so we
851 * need to EOI the IPI, but want to leave our priority 0
852 *
853 * should we check all the other interrupts too?
854 * should we be flagging idle loop instead?
855 * or creating some task to be scheduled?
856 */
857
858 if (firmware_has_feature(FW_FEATURE_LPAR))
859 lpar_xirr_info_set((0x00 << 24) | XICS_IPI);
860 else
861 direct_xirr_info_set((0x00 << 24) | XICS_IPI);
862
863 /*
864 * Some machines need to have at least one cpu in the GIQ,
865 * so leave the master cpu in the group.
866 */
867 if (secondary)
868 xics_set_cpu_giq(default_distrib_server, 0);
869}
870
871#ifdef CONFIG_HOTPLUG_CPU
872
873/* Interrupts are disabled. */
874void xics_migrate_irqs_away(void)
875{
876 int cpu = smp_processor_id(), hw_cpu = hard_smp_processor_id();
877 int virq;
878
879 /* If we used to be the default server, move to the new "boot_cpuid" */
880 if (hw_cpu == default_server)
881 xics_update_irq_servers();
882
883 /* Reject any interrupt that was queued to us... */
884 xics_set_cpu_priority(0);
885
886 /* Remove ourselves from the global interrupt queue */
887 xics_set_cpu_giq(default_distrib_server, 0);
888
889 /* Allow IPIs again... */
890 xics_set_cpu_priority(DEFAULT_PRIORITY);
891
892 for_each_irq(virq) {
893 struct irq_desc *desc;
894 struct irq_chip *chip;
895 unsigned int hwirq;
896 int xics_status[2];
897 int status;
898 unsigned long flags;
899
900 /* We can't set affinity on ISA interrupts */
901 if (virq < NUM_ISA_INTERRUPTS)
902 continue;
903 if (irq_map[virq].host != xics_host)
904 continue;
905 hwirq = (unsigned int)irq_map[virq].hwirq;
906 /* We need to get IPIs still. */
907 if (hwirq == XICS_IPI || hwirq == XICS_IRQ_SPURIOUS)
908 continue;
909
910 desc = irq_to_desc(virq);
911
912 /* We only need to migrate enabled IRQS */
913 if (desc == NULL || desc->action == NULL)
914 continue;
915
916 chip = irq_desc_get_chip(desc);
917 if (chip == NULL || chip->irq_set_affinity == NULL)
918 continue;
919
920 raw_spin_lock_irqsave(&desc->lock, flags);
921
922 status = rtas_call(ibm_get_xive, 1, 3, xics_status, hwirq);
923 if (status) {
924 printk(KERN_ERR "%s: ibm,get-xive irq=%u returns %d\n",
925 __func__, hwirq, status);
926 goto unlock;
927 }
928
929 /*
930 * We only support delivery to all cpus or to one cpu.
931 * The irq has to be migrated only in the single cpu
932 * case.
933 */
934 if (xics_status[0] != hw_cpu)
935 goto unlock;
936
937 /* This is expected during cpu offline. */
938 if (cpu_online(cpu))
939 printk(KERN_WARNING "IRQ %u affinity broken off cpu %u\n",
940 virq, cpu);
941
942 /* Reset affinity to all cpus */
943 cpumask_setall(desc->irq_data.affinity);
944 chip->irq_set_affinity(&desc->irq_data, cpu_all_mask, true);
945unlock:
946 raw_spin_unlock_irqrestore(&desc->lock, flags);
947 }
948}
949#endif
diff --git a/arch/powerpc/platforms/pseries/xics.h b/arch/powerpc/platforms/pseries/xics.h
deleted file mode 100644
index d1d5a83039ae..000000000000
--- a/arch/powerpc/platforms/pseries/xics.h
+++ /dev/null
@@ -1,23 +0,0 @@
1/*
2 * arch/powerpc/platforms/pseries/xics.h
3 *
4 * Copyright 2000 IBM Corporation.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#ifndef _POWERPC_KERNEL_XICS_H
13#define _POWERPC_KERNEL_XICS_H
14
15extern void xics_init_IRQ(void);
16extern void xics_setup_cpu(void);
17extern void xics_teardown_cpu(void);
18extern void xics_kexec_teardown_cpu(int secondary);
19extern void xics_migrate_irqs_away(void);
20extern int smp_xics_probe(void);
21extern void smp_xics_message_pass(int target, int msg);
22
23#endif /* _POWERPC_KERNEL_XICS_H */
diff --git a/arch/powerpc/platforms/wsp/Kconfig b/arch/powerpc/platforms/wsp/Kconfig
new file mode 100644
index 000000000000..c3c48eb62cc1
--- /dev/null
+++ b/arch/powerpc/platforms/wsp/Kconfig
@@ -0,0 +1,28 @@
1config PPC_WSP
2 bool
3 default n
4
5menu "WSP platform selection"
6 depends on PPC_BOOK3E_64
7
8config PPC_PSR2
9 bool "PSR-2 platform"
10 select PPC_A2
11 select GENERIC_TBSYNC
12 select PPC_SCOM
13 select EPAPR_BOOT
14 select PPC_WSP
15 select PPC_XICS
16 select PPC_ICP_NATIVE
17 default y
18
19endmenu
20
21config PPC_A2_DD2
22 bool "Support for DD2 based A2/WSP systems"
23 depends on PPC_A2
24
25config WORKAROUND_ERRATUM_463
26 depends on PPC_A2_DD2
27 bool "Workaround erratum 463"
28 default y
diff --git a/arch/powerpc/platforms/wsp/Makefile b/arch/powerpc/platforms/wsp/Makefile
new file mode 100644
index 000000000000..095be73d6cd4
--- /dev/null
+++ b/arch/powerpc/platforms/wsp/Makefile
@@ -0,0 +1,6 @@
1ccflags-y += -mno-minimal-toc
2
3obj-y += setup.o ics.o
4obj-$(CONFIG_PPC_PSR2) += psr2.o opb_pic.o
5obj-$(CONFIG_PPC_WSP) += scom_wsp.o
6obj-$(CONFIG_SMP) += smp.o scom_smp.o
diff --git a/arch/powerpc/platforms/wsp/ics.c b/arch/powerpc/platforms/wsp/ics.c
new file mode 100644
index 000000000000..e53bd9e7b125
--- /dev/null
+++ b/arch/powerpc/platforms/wsp/ics.c
@@ -0,0 +1,712 @@
1/*
2 * Copyright 2008-2011 IBM Corporation.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9
10#include <linux/cpu.h>
11#include <linux/init.h>
12#include <linux/interrupt.h>
13#include <linux/irq.h>
14#include <linux/kernel.h>
15#include <linux/msi.h>
16#include <linux/of.h>
17#include <linux/slab.h>
18#include <linux/smp.h>
19#include <linux/spinlock.h>
20#include <linux/types.h>
21
22#include <asm/io.h>
23#include <asm/irq.h>
24#include <asm/xics.h>
25
26#include "wsp.h"
27#include "ics.h"
28
29
30/* WSP ICS */
31
32struct wsp_ics {
33 struct ics ics;
34 struct device_node *dn;
35 void __iomem *regs;
36 spinlock_t lock;
37 unsigned long *bitmap;
38 u32 chip_id;
39 u32 lsi_base;
40 u32 lsi_count;
41 u64 hwirq_start;
42 u64 count;
43#ifdef CONFIG_SMP
44 int *hwirq_cpu_map;
45#endif
46};
47
48#define to_wsp_ics(ics) container_of(ics, struct wsp_ics, ics)
49
50#define INT_SRC_LAYER_BUID_REG(base) ((base) + 0x00)
51#define IODA_TBL_ADDR_REG(base) ((base) + 0x18)
52#define IODA_TBL_DATA_REG(base) ((base) + 0x20)
53#define XIVE_UPDATE_REG(base) ((base) + 0x28)
54#define ICS_INT_CAPS_REG(base) ((base) + 0x30)
55
56#define TBL_AUTO_INCREMENT ((1UL << 63) | (1UL << 15))
57#define TBL_SELECT_XIST (1UL << 48)
58#define TBL_SELECT_XIVT (1UL << 49)
59
60#define IODA_IRQ(irq) ((irq) & (0x7FFULL)) /* HRM 5.1.3.4 */
61
62#define XIST_REQUIRED 0x8
63#define XIST_REJECTED 0x4
64#define XIST_PRESENTED 0x2
65#define XIST_PENDING 0x1
66
67#define XIVE_SERVER_SHIFT 42
68#define XIVE_SERVER_MASK 0xFFFFULL
69#define XIVE_PRIORITY_MASK 0xFFULL
70#define XIVE_PRIORITY_SHIFT 32
71#define XIVE_WRITE_ENABLE (1ULL << 63)
72
73/*
74 * The docs refer to a 6 bit field called ChipID, which consists of a
75 * 3 bit NodeID and a 3 bit ChipID. On WSP the ChipID is always zero
76 * so we ignore it, and every where we use "chip id" in this code we
77 * mean the NodeID.
78 */
79#define WSP_ICS_CHIP_SHIFT 17
80
81
82static struct wsp_ics *ics_list;
83static int num_ics;
84
85/* ICS Source controller accessors */
86
87static u64 wsp_ics_get_xive(struct wsp_ics *ics, unsigned int irq)
88{
89 unsigned long flags;
90 u64 xive;
91
92 spin_lock_irqsave(&ics->lock, flags);
93 out_be64(IODA_TBL_ADDR_REG(ics->regs), TBL_SELECT_XIVT | IODA_IRQ(irq));
94 xive = in_be64(IODA_TBL_DATA_REG(ics->regs));
95 spin_unlock_irqrestore(&ics->lock, flags);
96
97 return xive;
98}
99
100static void wsp_ics_set_xive(struct wsp_ics *ics, unsigned int irq, u64 xive)
101{
102 xive &= ~XIVE_ADDR_MASK;
103 xive |= (irq & XIVE_ADDR_MASK);
104 xive |= XIVE_WRITE_ENABLE;
105
106 out_be64(XIVE_UPDATE_REG(ics->regs), xive);
107}
108
109static u64 xive_set_server(u64 xive, unsigned int server)
110{
111 u64 mask = ~(XIVE_SERVER_MASK << XIVE_SERVER_SHIFT);
112
113 xive &= mask;
114 xive |= (server & XIVE_SERVER_MASK) << XIVE_SERVER_SHIFT;
115
116 return xive;
117}
118
119static u64 xive_set_priority(u64 xive, unsigned int priority)
120{
121 u64 mask = ~(XIVE_PRIORITY_MASK << XIVE_PRIORITY_SHIFT);
122
123 xive &= mask;
124 xive |= (priority & XIVE_PRIORITY_MASK) << XIVE_PRIORITY_SHIFT;
125
126 return xive;
127}
128
129
130#ifdef CONFIG_SMP
131/* Find logical CPUs within mask on a given chip and store result in ret */
132void cpus_on_chip(int chip_id, cpumask_t *mask, cpumask_t *ret)
133{
134 int cpu, chip;
135 struct device_node *cpu_dn, *dn;
136 const u32 *prop;
137
138 cpumask_clear(ret);
139 for_each_cpu(cpu, mask) {
140 cpu_dn = of_get_cpu_node(cpu, NULL);
141 if (!cpu_dn)
142 continue;
143
144 prop = of_get_property(cpu_dn, "at-node", NULL);
145 if (!prop) {
146 of_node_put(cpu_dn);
147 continue;
148 }
149
150 dn = of_find_node_by_phandle(*prop);
151 of_node_put(cpu_dn);
152
153 chip = wsp_get_chip_id(dn);
154 if (chip == chip_id)
155 cpumask_set_cpu(cpu, ret);
156
157 of_node_put(dn);
158 }
159}
160
161/* Store a suitable CPU to handle a hwirq in the ics->hwirq_cpu_map cache */
162static int cache_hwirq_map(struct wsp_ics *ics, unsigned int hwirq,
163 const cpumask_t *affinity)
164{
165 cpumask_var_t avail, newmask;
166 int ret = -ENOMEM, cpu, cpu_rover = 0, target;
167 int index = hwirq - ics->hwirq_start;
168 unsigned int nodeid;
169
170 BUG_ON(index < 0 || index >= ics->count);
171
172 if (!ics->hwirq_cpu_map)
173 return -ENOMEM;
174
175 if (!distribute_irqs) {
176 ics->hwirq_cpu_map[hwirq - ics->hwirq_start] = xics_default_server;
177 return 0;
178 }
179
180 /* Allocate needed CPU masks */
181 if (!alloc_cpumask_var(&avail, GFP_KERNEL))
182 goto ret;
183 if (!alloc_cpumask_var(&newmask, GFP_KERNEL))
184 goto freeavail;
185
186 /* Find PBus attached to the source of this IRQ */
187 nodeid = (hwirq >> WSP_ICS_CHIP_SHIFT) & 0x3; /* 12:14 */
188
189 /* Find CPUs that could handle this IRQ */
190 if (affinity)
191 cpumask_and(avail, cpu_online_mask, affinity);
192 else
193 cpumask_copy(avail, cpu_online_mask);
194
195 /* Narrow selection down to logical CPUs on the same chip */
196 cpus_on_chip(nodeid, avail, newmask);
197
198 /* Ensure we haven't narrowed it down to 0 */
199 if (unlikely(cpumask_empty(newmask))) {
200 if (unlikely(cpumask_empty(avail))) {
201 ret = -1;
202 goto out;
203 }
204 cpumask_copy(newmask, avail);
205 }
206
207 /* Choose a CPU out of those we narrowed it down to in round robin */
208 target = hwirq % cpumask_weight(newmask);
209 for_each_cpu(cpu, newmask) {
210 if (cpu_rover++ >= target) {
211 ics->hwirq_cpu_map[index] = get_hard_smp_processor_id(cpu);
212 ret = 0;
213 goto out;
214 }
215 }
216
217 /* Shouldn't happen */
218 WARN_ON(1);
219
220out:
221 free_cpumask_var(newmask);
222freeavail:
223 free_cpumask_var(avail);
224ret:
225 if (ret < 0) {
226 ics->hwirq_cpu_map[index] = cpumask_first(cpu_online_mask);
227 pr_warning("Error, falling hwirq 0x%x routing back to CPU %i\n",
228 hwirq, ics->hwirq_cpu_map[index]);
229 }
230 return ret;
231}
232
233static void alloc_irq_map(struct wsp_ics *ics)
234{
235 int i;
236
237 ics->hwirq_cpu_map = kmalloc(sizeof(int) * ics->count, GFP_KERNEL);
238 if (!ics->hwirq_cpu_map) {
239 pr_warning("Allocate hwirq_cpu_map failed, "
240 "IRQ balancing disabled\n");
241 return;
242 }
243
244 for (i=0; i < ics->count; i++)
245 ics->hwirq_cpu_map[i] = xics_default_server;
246}
247
248static int get_irq_server(struct wsp_ics *ics, unsigned int hwirq)
249{
250 int index = hwirq - ics->hwirq_start;
251
252 BUG_ON(index < 0 || index >= ics->count);
253
254 if (!ics->hwirq_cpu_map)
255 return xics_default_server;
256
257 return ics->hwirq_cpu_map[index];
258}
259#else /* !CONFIG_SMP */
260static int cache_hwirq_map(struct wsp_ics *ics, unsigned int hwirq,
261 const cpumask_t *affinity)
262{
263 return 0;
264}
265
266static int get_irq_server(struct wsp_ics *ics, unsigned int hwirq)
267{
268 return xics_default_server;
269}
270
271static void alloc_irq_map(struct wsp_ics *ics) { }
272#endif
273
274static void wsp_chip_unmask_irq(struct irq_data *d)
275{
276 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
277 struct wsp_ics *ics;
278 int server;
279 u64 xive;
280
281 if (hw_irq == XICS_IPI || hw_irq == XICS_IRQ_SPURIOUS)
282 return;
283
284 ics = d->chip_data;
285 if (WARN_ON(!ics))
286 return;
287
288 server = get_irq_server(ics, hw_irq);
289
290 xive = wsp_ics_get_xive(ics, hw_irq);
291 xive = xive_set_server(xive, server);
292 xive = xive_set_priority(xive, DEFAULT_PRIORITY);
293 wsp_ics_set_xive(ics, hw_irq, xive);
294}
295
296static unsigned int wsp_chip_startup(struct irq_data *d)
297{
298 /* unmask it */
299 wsp_chip_unmask_irq(d);
300 return 0;
301}
302
303static void wsp_mask_real_irq(unsigned int hw_irq, struct wsp_ics *ics)
304{
305 u64 xive;
306
307 if (hw_irq == XICS_IPI)
308 return;
309
310 if (WARN_ON(!ics))
311 return;
312 xive = wsp_ics_get_xive(ics, hw_irq);
313 xive = xive_set_server(xive, xics_default_server);
314 xive = xive_set_priority(xive, LOWEST_PRIORITY);
315 wsp_ics_set_xive(ics, hw_irq, xive);
316}
317
318static void wsp_chip_mask_irq(struct irq_data *d)
319{
320 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
321 struct wsp_ics *ics = d->chip_data;
322
323 if (hw_irq == XICS_IPI || hw_irq == XICS_IRQ_SPURIOUS)
324 return;
325
326 wsp_mask_real_irq(hw_irq, ics);
327}
328
329static int wsp_chip_set_affinity(struct irq_data *d,
330 const struct cpumask *cpumask, bool force)
331{
332 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
333 struct wsp_ics *ics;
334 int ret;
335 u64 xive;
336
337 if (hw_irq == XICS_IPI || hw_irq == XICS_IRQ_SPURIOUS)
338 return -1;
339
340 ics = d->chip_data;
341 if (WARN_ON(!ics))
342 return -1;
343 xive = wsp_ics_get_xive(ics, hw_irq);
344
345 /*
346 * For the moment only implement delivery to all cpus or one cpu.
347 * Get current irq_server for the given irq
348 */
349 ret = cache_hwirq_map(ics, d->irq, cpumask);
350 if (ret == -1) {
351 char cpulist[128];
352 cpumask_scnprintf(cpulist, sizeof(cpulist), cpumask);
353 pr_warning("%s: No online cpus in the mask %s for irq %d\n",
354 __func__, cpulist, d->irq);
355 return -1;
356 } else if (ret == -ENOMEM) {
357 pr_warning("%s: Out of memory\n", __func__);
358 return -1;
359 }
360
361 xive = xive_set_server(xive, get_irq_server(ics, hw_irq));
362 wsp_ics_set_xive(ics, hw_irq, xive);
363
364 return 0;
365}
366
367static struct irq_chip wsp_irq_chip = {
368 .name = "WSP ICS",
369 .irq_startup = wsp_chip_startup,
370 .irq_mask = wsp_chip_mask_irq,
371 .irq_unmask = wsp_chip_unmask_irq,
372 .irq_set_affinity = wsp_chip_set_affinity
373};
374
375static int wsp_ics_host_match(struct ics *ics, struct device_node *dn)
376{
377 /* All ICSs in the system implement a global irq number space,
378 * so match against them all. */
379 return of_device_is_compatible(dn, "ibm,ppc-xics");
380}
381
382static int wsp_ics_match_hwirq(struct wsp_ics *wsp_ics, unsigned int hwirq)
383{
384 if (hwirq >= wsp_ics->hwirq_start &&
385 hwirq < wsp_ics->hwirq_start + wsp_ics->count)
386 return 1;
387
388 return 0;
389}
390
391static int wsp_ics_map(struct ics *ics, unsigned int virq)
392{
393 struct wsp_ics *wsp_ics = to_wsp_ics(ics);
394 unsigned int hw_irq = virq_to_hw(virq);
395 unsigned long flags;
396
397 if (!wsp_ics_match_hwirq(wsp_ics, hw_irq))
398 return -ENOENT;
399
400 irq_set_chip_and_handler(virq, &wsp_irq_chip, handle_fasteoi_irq);
401
402 irq_set_chip_data(virq, wsp_ics);
403
404 spin_lock_irqsave(&wsp_ics->lock, flags);
405 bitmap_allocate_region(wsp_ics->bitmap, hw_irq - wsp_ics->hwirq_start, 0);
406 spin_unlock_irqrestore(&wsp_ics->lock, flags);
407
408 return 0;
409}
410
411static void wsp_ics_mask_unknown(struct ics *ics, unsigned long hw_irq)
412{
413 struct wsp_ics *wsp_ics = to_wsp_ics(ics);
414
415 if (!wsp_ics_match_hwirq(wsp_ics, hw_irq))
416 return;
417
418 pr_err("%s: IRQ %lu (real) is invalid, disabling it.\n", __func__, hw_irq);
419 wsp_mask_real_irq(hw_irq, wsp_ics);
420}
421
422static long wsp_ics_get_server(struct ics *ics, unsigned long hw_irq)
423{
424 struct wsp_ics *wsp_ics = to_wsp_ics(ics);
425
426 if (!wsp_ics_match_hwirq(wsp_ics, hw_irq))
427 return -ENOENT;
428
429 return get_irq_server(wsp_ics, hw_irq);
430}
431
432/* HW Number allocation API */
433
434static struct wsp_ics *wsp_ics_find_dn_ics(struct device_node *dn)
435{
436 struct device_node *iparent;
437 int i;
438
439 iparent = of_irq_find_parent(dn);
440 if (!iparent) {
441 pr_err("wsp_ics: Failed to find interrupt parent!\n");
442 return NULL;
443 }
444
445 for(i = 0; i < num_ics; i++) {
446 if(ics_list[i].dn == iparent)
447 break;
448 }
449
450 if (i >= num_ics) {
451 pr_err("wsp_ics: Unable to find parent bitmap!\n");
452 return NULL;
453 }
454
455 return &ics_list[i];
456}
457
458int wsp_ics_alloc_irq(struct device_node *dn, int num)
459{
460 struct wsp_ics *ics;
461 int order, offset;
462
463 ics = wsp_ics_find_dn_ics(dn);
464 if (!ics)
465 return -ENODEV;
466
467 /* Fast, but overly strict if num isn't a power of two */
468 order = get_count_order(num);
469
470 spin_lock_irq(&ics->lock);
471 offset = bitmap_find_free_region(ics->bitmap, ics->count, order);
472 spin_unlock_irq(&ics->lock);
473
474 if (offset < 0)
475 return offset;
476
477 return offset + ics->hwirq_start;
478}
479
480void wsp_ics_free_irq(struct device_node *dn, unsigned int irq)
481{
482 struct wsp_ics *ics;
483
484 ics = wsp_ics_find_dn_ics(dn);
485 if (WARN_ON(!ics))
486 return;
487
488 spin_lock_irq(&ics->lock);
489 bitmap_release_region(ics->bitmap, irq, 0);
490 spin_unlock_irq(&ics->lock);
491}
492
493/* Initialisation */
494
495static int __init wsp_ics_bitmap_setup(struct wsp_ics *ics,
496 struct device_node *dn)
497{
498 int len, i, j, size;
499 u32 start, count;
500 const u32 *p;
501
502 size = BITS_TO_LONGS(ics->count) * sizeof(long);
503 ics->bitmap = kzalloc(size, GFP_KERNEL);
504 if (!ics->bitmap) {
505 pr_err("wsp_ics: ENOMEM allocating IRQ bitmap!\n");
506 return -ENOMEM;
507 }
508
509 spin_lock_init(&ics->lock);
510
511 p = of_get_property(dn, "available-ranges", &len);
512 if (!p || !len) {
513 /* FIXME this should be a WARN() once mambo is updated */
514 pr_err("wsp_ics: No available-ranges defined for %s\n",
515 dn->full_name);
516 return 0;
517 }
518
519 if (len % (2 * sizeof(u32)) != 0) {
520 /* FIXME this should be a WARN() once mambo is updated */
521 pr_err("wsp_ics: Invalid available-ranges for %s\n",
522 dn->full_name);
523 return 0;
524 }
525
526 bitmap_fill(ics->bitmap, ics->count);
527
528 for (i = 0; i < len / sizeof(u32); i += 2) {
529 start = of_read_number(p + i, 1);
530 count = of_read_number(p + i + 1, 1);
531
532 pr_devel("%s: start: %d count: %d\n", __func__, start, count);
533
534 if ((start + count) > (ics->hwirq_start + ics->count) ||
535 start < ics->hwirq_start) {
536 pr_err("wsp_ics: Invalid range! -> %d to %d\n",
537 start, start + count);
538 break;
539 }
540
541 for (j = 0; j < count; j++)
542 bitmap_release_region(ics->bitmap,
543 (start + j) - ics->hwirq_start, 0);
544 }
545
546 /* Ensure LSIs are not available for allocation */
547 bitmap_allocate_region(ics->bitmap, ics->lsi_base,
548 get_count_order(ics->lsi_count));
549
550 return 0;
551}
552
553static int __init wsp_ics_setup(struct wsp_ics *ics, struct device_node *dn)
554{
555 u32 lsi_buid, msi_buid, msi_base, msi_count;
556 void __iomem *regs;
557 const u32 *p;
558 int rc, len, i;
559 u64 caps, buid;
560
561 p = of_get_property(dn, "interrupt-ranges", &len);
562 if (!p || len < (2 * sizeof(u32))) {
563 pr_err("wsp_ics: No/bad interrupt-ranges found on %s\n",
564 dn->full_name);
565 return -ENOENT;
566 }
567
568 if (len > (2 * sizeof(u32))) {
569 pr_err("wsp_ics: Multiple ics ranges not supported.\n");
570 return -EINVAL;
571 }
572
573 regs = of_iomap(dn, 0);
574 if (!regs) {
575 pr_err("wsp_ics: of_iomap(%s) failed\n", dn->full_name);
576 return -ENXIO;
577 }
578
579 ics->hwirq_start = of_read_number(p, 1);
580 ics->count = of_read_number(p + 1, 1);
581 ics->regs = regs;
582
583 ics->chip_id = wsp_get_chip_id(dn);
584 if (WARN_ON(ics->chip_id < 0))
585 ics->chip_id = 0;
586
587 /* Get some informations about the critter */
588 caps = in_be64(ICS_INT_CAPS_REG(ics->regs));
589 buid = in_be64(INT_SRC_LAYER_BUID_REG(ics->regs));
590 ics->lsi_count = caps >> 56;
591 msi_count = (caps >> 44) & 0x7ff;
592
593 /* Note: LSI BUID is 9 bits, but really only 3 are BUID and the
594 * rest is mixed in the interrupt number. We store the whole
595 * thing though
596 */
597 lsi_buid = (buid >> 48) & 0x1ff;
598 ics->lsi_base = (ics->chip_id << WSP_ICS_CHIP_SHIFT) | lsi_buid << 5;
599 msi_buid = (buid >> 37) & 0x7;
600 msi_base = (ics->chip_id << WSP_ICS_CHIP_SHIFT) | msi_buid << 11;
601
602 pr_info("wsp_ics: Found %s\n", dn->full_name);
603 pr_info("wsp_ics: irq range : 0x%06llx..0x%06llx\n",
604 ics->hwirq_start, ics->hwirq_start + ics->count - 1);
605 pr_info("wsp_ics: %4d LSIs : 0x%06x..0x%06x\n",
606 ics->lsi_count, ics->lsi_base,
607 ics->lsi_base + ics->lsi_count - 1);
608 pr_info("wsp_ics: %4d MSIs : 0x%06x..0x%06x\n",
609 msi_count, msi_base,
610 msi_base + msi_count - 1);
611
612 /* Let's check the HW config is sane */
613 if (ics->lsi_base < ics->hwirq_start ||
614 (ics->lsi_base + ics->lsi_count) > (ics->hwirq_start + ics->count))
615 pr_warning("wsp_ics: WARNING ! LSIs out of interrupt-ranges !\n");
616 if (msi_base < ics->hwirq_start ||
617 (msi_base + msi_count) > (ics->hwirq_start + ics->count))
618 pr_warning("wsp_ics: WARNING ! MSIs out of interrupt-ranges !\n");
619
620 /* We don't check for overlap between LSI and MSI, which will happen
621 * if we use the same BUID, I'm not sure yet how legit that is.
622 */
623
624 rc = wsp_ics_bitmap_setup(ics, dn);
625 if (rc) {
626 iounmap(regs);
627 return rc;
628 }
629
630 ics->dn = of_node_get(dn);
631 alloc_irq_map(ics);
632
633 for(i = 0; i < ics->count; i++)
634 wsp_mask_real_irq(ics->hwirq_start + i, ics);
635
636 ics->ics.map = wsp_ics_map;
637 ics->ics.mask_unknown = wsp_ics_mask_unknown;
638 ics->ics.get_server = wsp_ics_get_server;
639 ics->ics.host_match = wsp_ics_host_match;
640
641 xics_register_ics(&ics->ics);
642
643 return 0;
644}
645
646static void __init wsp_ics_set_default_server(void)
647{
648 struct device_node *np;
649 u32 hwid;
650
651 /* Find the server number for the boot cpu. */
652 np = of_get_cpu_node(boot_cpuid, NULL);
653 BUG_ON(!np);
654
655 hwid = get_hard_smp_processor_id(boot_cpuid);
656
657 pr_info("wsp_ics: default server is %#x, CPU %s\n", hwid, np->full_name);
658 xics_default_server = hwid;
659
660 of_node_put(np);
661}
662
663static int __init wsp_ics_init(void)
664{
665 struct device_node *dn;
666 struct wsp_ics *ics;
667 int rc, found;
668
669 wsp_ics_set_default_server();
670
671 found = 0;
672 for_each_compatible_node(dn, NULL, "ibm,ppc-xics")
673 found++;
674
675 if (found == 0) {
676 pr_err("wsp_ics: No ICS's found!\n");
677 return -ENODEV;
678 }
679
680 ics_list = kmalloc(sizeof(*ics) * found, GFP_KERNEL);
681 if (!ics_list) {
682 pr_err("wsp_ics: No memory for structs.\n");
683 return -ENOMEM;
684 }
685
686 num_ics = 0;
687 ics = ics_list;
688 for_each_compatible_node(dn, NULL, "ibm,wsp-xics") {
689 rc = wsp_ics_setup(ics, dn);
690 if (rc == 0) {
691 ics++;
692 num_ics++;
693 }
694 }
695
696 if (found != num_ics) {
697 pr_err("wsp_ics: Failed setting up %d ICS's\n",
698 found - num_ics);
699 return -1;
700 }
701
702 return 0;
703}
704
705void __init wsp_init_irq(void)
706{
707 wsp_ics_init();
708 xics_init();
709
710 /* We need to patch our irq chip's EOI to point to the right ICP */
711 wsp_irq_chip.irq_eoi = icp_ops->eoi;
712}
diff --git a/arch/powerpc/platforms/wsp/ics.h b/arch/powerpc/platforms/wsp/ics.h
new file mode 100644
index 000000000000..e34d53102640
--- /dev/null
+++ b/arch/powerpc/platforms/wsp/ics.h
@@ -0,0 +1,20 @@
1/*
2 * Copyright 2009 IBM Corporation.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9
10#ifndef __ICS_H
11#define __ICS_H
12
13#define XIVE_ADDR_MASK 0x7FFULL
14
15extern void wsp_init_irq(void);
16
17extern int wsp_ics_alloc_irq(struct device_node *dn, int num);
18extern void wsp_ics_free_irq(struct device_node *dn, unsigned int irq);
19
20#endif /* __ICS_H */
diff --git a/arch/powerpc/platforms/wsp/opb_pic.c b/arch/powerpc/platforms/wsp/opb_pic.c
new file mode 100644
index 000000000000..be05631a3c1c
--- /dev/null
+++ b/arch/powerpc/platforms/wsp/opb_pic.c
@@ -0,0 +1,332 @@
1/*
2 * IBM Onboard Peripheral Bus Interrupt Controller
3 *
4 * Copyright 2010 Jack Miller, IBM Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
10 */
11
12#include <linux/interrupt.h>
13#include <linux/io.h>
14#include <linux/irq.h>
15#include <linux/of.h>
16#include <linux/slab.h>
17#include <linux/time.h>
18
19#include <asm/reg_a2.h>
20#include <asm/irq.h>
21
22#define OPB_NR_IRQS 32
23
24#define OPB_MLSASIER 0x04 /* MLS Accumulated Status IER */
25#define OPB_MLSIR 0x50 /* MLS Interrupt Register */
26#define OPB_MLSIER 0x54 /* MLS Interrupt Enable Register */
27#define OPB_MLSIPR 0x58 /* MLS Interrupt Polarity Register */
28#define OPB_MLSIIR 0x5c /* MLS Interrupt Inputs Register */
29
30static int opb_index = 0;
31
32struct opb_pic {
33 struct irq_host *host;
34 void *regs;
35 int index;
36 spinlock_t lock;
37};
38
39static u32 opb_in(struct opb_pic *opb, int offset)
40{
41 return in_be32(opb->regs + offset);
42}
43
44static void opb_out(struct opb_pic *opb, int offset, u32 val)
45{
46 out_be32(opb->regs + offset, val);
47}
48
49static void opb_unmask_irq(struct irq_data *d)
50{
51 struct opb_pic *opb;
52 unsigned long flags;
53 u32 ier, bitset;
54
55 opb = d->chip_data;
56 bitset = (1 << (31 - irqd_to_hwirq(d)));
57
58 spin_lock_irqsave(&opb->lock, flags);
59
60 ier = opb_in(opb, OPB_MLSIER);
61 opb_out(opb, OPB_MLSIER, ier | bitset);
62 ier = opb_in(opb, OPB_MLSIER);
63
64 spin_unlock_irqrestore(&opb->lock, flags);
65}
66
67static void opb_mask_irq(struct irq_data *d)
68{
69 struct opb_pic *opb;
70 unsigned long flags;
71 u32 ier, mask;
72
73 opb = d->chip_data;
74 mask = ~(1 << (31 - irqd_to_hwirq(d)));
75
76 spin_lock_irqsave(&opb->lock, flags);
77
78 ier = opb_in(opb, OPB_MLSIER);
79 opb_out(opb, OPB_MLSIER, ier & mask);
80 ier = opb_in(opb, OPB_MLSIER); // Flush posted writes
81
82 spin_unlock_irqrestore(&opb->lock, flags);
83}
84
85static void opb_ack_irq(struct irq_data *d)
86{
87 struct opb_pic *opb;
88 unsigned long flags;
89 u32 bitset;
90
91 opb = d->chip_data;
92 bitset = (1 << (31 - irqd_to_hwirq(d)));
93
94 spin_lock_irqsave(&opb->lock, flags);
95
96 opb_out(opb, OPB_MLSIR, bitset);
97 opb_in(opb, OPB_MLSIR); // Flush posted writes
98
99 spin_unlock_irqrestore(&opb->lock, flags);
100}
101
102static void opb_mask_ack_irq(struct irq_data *d)
103{
104 struct opb_pic *opb;
105 unsigned long flags;
106 u32 bitset;
107 u32 ier, ir;
108
109 opb = d->chip_data;
110 bitset = (1 << (31 - irqd_to_hwirq(d)));
111
112 spin_lock_irqsave(&opb->lock, flags);
113
114 ier = opb_in(opb, OPB_MLSIER);
115 opb_out(opb, OPB_MLSIER, ier & ~bitset);
116 ier = opb_in(opb, OPB_MLSIER); // Flush posted writes
117
118 opb_out(opb, OPB_MLSIR, bitset);
119 ir = opb_in(opb, OPB_MLSIR); // Flush posted writes
120
121 spin_unlock_irqrestore(&opb->lock, flags);
122}
123
124static int opb_set_irq_type(struct irq_data *d, unsigned int flow)
125{
126 struct opb_pic *opb;
127 unsigned long flags;
128 int invert, ipr, mask, bit;
129
130 opb = d->chip_data;
131
132 /* The only information we're interested in in the type is whether it's
133 * a high or low trigger. For high triggered interrupts, the polarity
134 * set for it in the MLS Interrupt Polarity Register is 0, for low
135 * interrupts it's 1 so that the proper input in the MLS Interrupt Input
136 * Register is interrupted as asserting the interrupt. */
137
138 switch (flow) {
139 case IRQ_TYPE_NONE:
140 opb_mask_irq(d);
141 return 0;
142
143 case IRQ_TYPE_LEVEL_HIGH:
144 invert = 0;
145 break;
146
147 case IRQ_TYPE_LEVEL_LOW:
148 invert = 1;
149 break;
150
151 default:
152 return -EINVAL;
153 }
154
155 bit = (1 << (31 - irqd_to_hwirq(d)));
156 mask = ~bit;
157
158 spin_lock_irqsave(&opb->lock, flags);
159
160 ipr = opb_in(opb, OPB_MLSIPR);
161 ipr = (ipr & mask) | (invert ? bit : 0);
162 opb_out(opb, OPB_MLSIPR, ipr);
163 ipr = opb_in(opb, OPB_MLSIPR); // Flush posted writes
164
165 spin_unlock_irqrestore(&opb->lock, flags);
166
167 /* Record the type in the interrupt descriptor */
168 irqd_set_trigger_type(d, flow);
169
170 return 0;
171}
172
173static struct irq_chip opb_irq_chip = {
174 .name = "OPB",
175 .irq_mask = opb_mask_irq,
176 .irq_unmask = opb_unmask_irq,
177 .irq_mask_ack = opb_mask_ack_irq,
178 .irq_ack = opb_ack_irq,
179 .irq_set_type = opb_set_irq_type
180};
181
182static int opb_host_map(struct irq_host *host, unsigned int virq,
183 irq_hw_number_t hwirq)
184{
185 struct opb_pic *opb;
186
187 opb = host->host_data;
188
189 /* Most of the important stuff is handled by the generic host code, like
190 * the lookup, so just attach some info to the virtual irq */
191
192 irq_set_chip_data(virq, opb);
193 irq_set_chip_and_handler(virq, &opb_irq_chip, handle_level_irq);
194 irq_set_irq_type(virq, IRQ_TYPE_NONE);
195
196 return 0;
197}
198
199static int opb_host_xlate(struct irq_host *host, struct device_node *dn,
200 const u32 *intspec, unsigned int intsize,
201 irq_hw_number_t *out_hwirq, unsigned int *out_type)
202{
203 /* Interrupt size must == 2 */
204 BUG_ON(intsize != 2);
205 *out_hwirq = intspec[0];
206 *out_type = intspec[1];
207 return 0;
208}
209
210static struct irq_host_ops opb_host_ops = {
211 .map = opb_host_map,
212 .xlate = opb_host_xlate,
213};
214
215irqreturn_t opb_irq_handler(int irq, void *private)
216{
217 struct opb_pic *opb;
218 u32 ir, src, subvirq;
219
220 opb = (struct opb_pic *) private;
221
222 /* Read the OPB MLS Interrupt Register for
223 * asserted interrupts */
224 ir = opb_in(opb, OPB_MLSIR);
225 if (!ir)
226 return IRQ_NONE;
227
228 do {
229 /* Get 1 - 32 source, *NOT* bit */
230 src = 32 - ffs(ir);
231
232 /* Translate from the OPB's conception of interrupt number to
233 * Linux's virtual IRQ */
234
235 subvirq = irq_linear_revmap(opb->host, src);
236
237 generic_handle_irq(subvirq);
238 } while ((ir = opb_in(opb, OPB_MLSIR)));
239
240 return IRQ_HANDLED;
241}
242
243struct opb_pic *opb_pic_init_one(struct device_node *dn)
244{
245 struct opb_pic *opb;
246 struct resource res;
247
248 if (of_address_to_resource(dn, 0, &res)) {
249 printk(KERN_ERR "opb: Couldn't translate resource\n");
250 return NULL;
251 }
252
253 opb = kzalloc(sizeof(struct opb_pic), GFP_KERNEL);
254 if (!opb) {
255 printk(KERN_ERR "opb: Failed to allocate opb struct!\n");
256 return NULL;
257 }
258
259 /* Get access to the OPB MMIO registers */
260 opb->regs = ioremap(res.start + 0x10000, 0x1000);
261 if (!opb->regs) {
262 printk(KERN_ERR "opb: Failed to allocate register space!\n");
263 goto free_opb;
264 }
265
266 /* Allocate an irq host so that Linux knows that despite only
267 * having one interrupt to issue, we're the controller for multiple
268 * hardware IRQs, so later we can lookup their virtual IRQs. */
269
270 opb->host = irq_alloc_host(dn, IRQ_HOST_MAP_LINEAR,
271 OPB_NR_IRQS, &opb_host_ops, -1);
272
273 if (!opb->host) {
274 printk(KERN_ERR "opb: Failed to allocate IRQ host!\n");
275 goto free_regs;
276 }
277
278 opb->index = opb_index++;
279 spin_lock_init(&opb->lock);
280 opb->host->host_data = opb;
281
282 /* Disable all interrupts by default */
283 opb_out(opb, OPB_MLSASIER, 0);
284 opb_out(opb, OPB_MLSIER, 0);
285
286 /* ACK any interrupts left by FW */
287 opb_out(opb, OPB_MLSIR, 0xFFFFFFFF);
288
289 return opb;
290
291free_regs:
292 iounmap(opb->regs);
293free_opb:
294 kfree(opb);
295 return NULL;
296}
297
298void __init opb_pic_init(void)
299{
300 struct device_node *dn;
301 struct opb_pic *opb;
302 int virq;
303 int rc;
304
305 /* Call init_one for each OPB device */
306 for_each_compatible_node(dn, NULL, "ibm,opb") {
307
308 /* Fill in an OPB struct */
309 opb = opb_pic_init_one(dn);
310 if (!opb) {
311 printk(KERN_WARNING "opb: Failed to init node, skipped!\n");
312 continue;
313 }
314
315 /* Map / get opb's hardware virtual irq */
316 virq = irq_of_parse_and_map(dn, 0);
317 if (virq <= 0) {
318 printk("opb: irq_op_parse_and_map failed!\n");
319 continue;
320 }
321
322 /* Attach opb interrupt handler to new virtual IRQ */
323 rc = request_irq(virq, opb_irq_handler, 0, "OPB LS Cascade", opb);
324 if (rc) {
325 printk("opb: request_irq failed: %d\n", rc);
326 continue;
327 }
328
329 printk("OPB%d init with %d IRQs at %p\n", opb->index,
330 OPB_NR_IRQS, opb->regs);
331 }
332}
diff --git a/arch/powerpc/platforms/wsp/psr2.c b/arch/powerpc/platforms/wsp/psr2.c
new file mode 100644
index 000000000000..40f28916ff6c
--- /dev/null
+++ b/arch/powerpc/platforms/wsp/psr2.c
@@ -0,0 +1,95 @@
1/*
2 * Copyright 2008-2011, IBM Corporation
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9
10#include <linux/delay.h>
11#include <linux/init.h>
12#include <linux/irq.h>
13#include <linux/kernel.h>
14#include <linux/mm.h>
15#include <linux/of.h>
16#include <linux/smp.h>
17
18#include <asm/machdep.h>
19#include <asm/system.h>
20#include <asm/time.h>
21#include <asm/udbg.h>
22
23#include "ics.h"
24#include "wsp.h"
25
26
27static void psr2_spin(void)
28{
29 hard_irq_disable();
30 for (;;) ;
31}
32
33static void psr2_restart(char *cmd)
34{
35 psr2_spin();
36}
37
38static int psr2_probe_devices(void)
39{
40 struct device_node *np;
41
42 /* Our RTC is a ds1500. It seems to be programatically compatible
43 * with the ds1511 for which we have a driver so let's use that
44 */
45 np = of_find_compatible_node(NULL, NULL, "dallas,ds1500");
46 if (np != NULL) {
47 struct resource res;
48 if (of_address_to_resource(np, 0, &res) == 0)
49 platform_device_register_simple("ds1511", 0, &res, 1);
50 }
51 return 0;
52}
53machine_arch_initcall(psr2_md, psr2_probe_devices);
54
55static void __init psr2_setup_arch(void)
56{
57 /* init to some ~sane value until calibrate_delay() runs */
58 loops_per_jiffy = 50000000;
59
60 scom_init_wsp();
61
62 /* Setup SMP callback */
63#ifdef CONFIG_SMP
64 a2_setup_smp();
65#endif
66}
67
68static int __init psr2_probe(void)
69{
70 unsigned long root = of_get_flat_dt_root();
71
72 if (!of_flat_dt_is_compatible(root, "ibm,psr2"))
73 return 0;
74
75 return 1;
76}
77
78static void __init psr2_init_irq(void)
79{
80 wsp_init_irq();
81 opb_pic_init();
82}
83
84define_machine(psr2_md) {
85 .name = "PSR2 A2",
86 .probe = psr2_probe,
87 .setup_arch = psr2_setup_arch,
88 .restart = psr2_restart,
89 .power_off = psr2_spin,
90 .halt = psr2_spin,
91 .calibrate_decr = generic_calibrate_decr,
92 .init_IRQ = psr2_init_irq,
93 .progress = udbg_progress,
94 .power_save = book3e_idle,
95};
diff --git a/arch/powerpc/platforms/wsp/scom_smp.c b/arch/powerpc/platforms/wsp/scom_smp.c
new file mode 100644
index 000000000000..141e78032097
--- /dev/null
+++ b/arch/powerpc/platforms/wsp/scom_smp.c
@@ -0,0 +1,427 @@
1/*
2 * SCOM support for A2 platforms
3 *
4 * Copyright 2007-2011 Benjamin Herrenschmidt, David Gibson,
5 * Michael Ellerman, IBM Corp.
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 */
12
13#include <linux/cpumask.h>
14#include <linux/io.h>
15#include <linux/of.h>
16#include <linux/spinlock.h>
17#include <linux/types.h>
18
19#include <asm/cputhreads.h>
20#include <asm/reg_a2.h>
21#include <asm/scom.h>
22#include <asm/udbg.h>
23
24#include "wsp.h"
25
26#define SCOM_RAMC 0x2a /* Ram Command */
27#define SCOM_RAMC_TGT1_EXT 0x80000000
28#define SCOM_RAMC_SRC1_EXT 0x40000000
29#define SCOM_RAMC_SRC2_EXT 0x20000000
30#define SCOM_RAMC_SRC3_EXT 0x10000000
31#define SCOM_RAMC_ENABLE 0x00080000
32#define SCOM_RAMC_THREADSEL 0x00060000
33#define SCOM_RAMC_EXECUTE 0x00010000
34#define SCOM_RAMC_MSR_OVERRIDE 0x00008000
35#define SCOM_RAMC_MSR_PR 0x00004000
36#define SCOM_RAMC_MSR_GS 0x00002000
37#define SCOM_RAMC_FORCE 0x00001000
38#define SCOM_RAMC_FLUSH 0x00000800
39#define SCOM_RAMC_INTERRUPT 0x00000004
40#define SCOM_RAMC_ERROR 0x00000002
41#define SCOM_RAMC_DONE 0x00000001
42#define SCOM_RAMI 0x29 /* Ram Instruction */
43#define SCOM_RAMIC 0x28 /* Ram Instruction and Command */
44#define SCOM_RAMIC_INSN 0xffffffff00000000
45#define SCOM_RAMD 0x2d /* Ram Data */
46#define SCOM_RAMDH 0x2e /* Ram Data High */
47#define SCOM_RAMDL 0x2f /* Ram Data Low */
48#define SCOM_PCCR0 0x33 /* PC Configuration Register 0 */
49#define SCOM_PCCR0_ENABLE_DEBUG 0x80000000
50#define SCOM_PCCR0_ENABLE_RAM 0x40000000
51#define SCOM_THRCTL 0x30 /* Thread Control and Status */
52#define SCOM_THRCTL_T0_STOP 0x80000000
53#define SCOM_THRCTL_T1_STOP 0x40000000
54#define SCOM_THRCTL_T2_STOP 0x20000000
55#define SCOM_THRCTL_T3_STOP 0x10000000
56#define SCOM_THRCTL_T0_STEP 0x08000000
57#define SCOM_THRCTL_T1_STEP 0x04000000
58#define SCOM_THRCTL_T2_STEP 0x02000000
59#define SCOM_THRCTL_T3_STEP 0x01000000
60#define SCOM_THRCTL_T0_RUN 0x00800000
61#define SCOM_THRCTL_T1_RUN 0x00400000
62#define SCOM_THRCTL_T2_RUN 0x00200000
63#define SCOM_THRCTL_T3_RUN 0x00100000
64#define SCOM_THRCTL_T0_PM 0x00080000
65#define SCOM_THRCTL_T1_PM 0x00040000
66#define SCOM_THRCTL_T2_PM 0x00020000
67#define SCOM_THRCTL_T3_PM 0x00010000
68#define SCOM_THRCTL_T0_UDE 0x00008000
69#define SCOM_THRCTL_T1_UDE 0x00004000
70#define SCOM_THRCTL_T2_UDE 0x00002000
71#define SCOM_THRCTL_T3_UDE 0x00001000
72#define SCOM_THRCTL_ASYNC_DIS 0x00000800
73#define SCOM_THRCTL_TB_DIS 0x00000400
74#define SCOM_THRCTL_DEC_DIS 0x00000200
75#define SCOM_THRCTL_AND 0x31 /* Thread Control and Status */
76#define SCOM_THRCTL_OR 0x32 /* Thread Control and Status */
77
78
79static DEFINE_PER_CPU(scom_map_t, scom_ptrs);
80
81static scom_map_t get_scom(int cpu, struct device_node *np, int *first_thread)
82{
83 scom_map_t scom = per_cpu(scom_ptrs, cpu);
84 int tcpu;
85
86 if (scom_map_ok(scom)) {
87 *first_thread = 0;
88 return scom;
89 }
90
91 *first_thread = 1;
92
93 scom = scom_map_device(np, 0);
94
95 for (tcpu = cpu_first_thread_sibling(cpu);
96 tcpu <= cpu_last_thread_sibling(cpu); tcpu++)
97 per_cpu(scom_ptrs, tcpu) = scom;
98
99 /* Hack: for the boot core, this will actually get called on
100 * the second thread up, not the first so our test above will
101 * set first_thread incorrectly. */
102 if (cpu_first_thread_sibling(cpu) == 0)
103 *first_thread = 0;
104
105 return scom;
106}
107
108static int a2_scom_ram(scom_map_t scom, int thread, u32 insn, int extmask)
109{
110 u64 cmd, mask, val;
111 int n = 0;
112
113 cmd = ((u64)insn << 32) | (((u64)extmask & 0xf) << 28)
114 | ((u64)thread << 17) | SCOM_RAMC_ENABLE | SCOM_RAMC_EXECUTE;
115 mask = SCOM_RAMC_DONE | SCOM_RAMC_INTERRUPT | SCOM_RAMC_ERROR;
116
117 scom_write(scom, SCOM_RAMIC, cmd);
118
119 while (!((val = scom_read(scom, SCOM_RAMC)) & mask)) {
120 pr_devel("Waiting on RAMC = 0x%llx\n", val);
121 if (++n == 3) {
122 pr_err("RAMC timeout on instruction 0x%08x, thread %d\n",
123 insn, thread);
124 return -1;
125 }
126 }
127
128 if (val & SCOM_RAMC_INTERRUPT) {
129 pr_err("RAMC interrupt on instruction 0x%08x, thread %d\n",
130 insn, thread);
131 return -SCOM_RAMC_INTERRUPT;
132 }
133
134 if (val & SCOM_RAMC_ERROR) {
135 pr_err("RAMC error on instruction 0x%08x, thread %d\n",
136 insn, thread);
137 return -SCOM_RAMC_ERROR;
138 }
139
140 return 0;
141}
142
143static int a2_scom_getgpr(scom_map_t scom, int thread, int gpr, int alt,
144 u64 *out_gpr)
145{
146 int rc;
147
148 /* or rN, rN, rN */
149 u32 insn = 0x7c000378 | (gpr << 21) | (gpr << 16) | (gpr << 11);
150 rc = a2_scom_ram(scom, thread, insn, alt ? 0xf : 0x0);
151 if (rc)
152 return rc;
153
154 *out_gpr = scom_read(scom, SCOM_RAMD);
155
156 return 0;
157}
158
159static int a2_scom_getspr(scom_map_t scom, int thread, int spr, u64 *out_spr)
160{
161 int rc, sprhi, sprlo;
162 u32 insn;
163
164 sprhi = spr >> 5;
165 sprlo = spr & 0x1f;
166 insn = 0x7c2002a6 | (sprlo << 16) | (sprhi << 11); /* mfspr r1,spr */
167
168 if (spr == 0x0ff0)
169 insn = 0x7c2000a6; /* mfmsr r1 */
170
171 rc = a2_scom_ram(scom, thread, insn, 0xf);
172 if (rc)
173 return rc;
174 return a2_scom_getgpr(scom, thread, 1, 1, out_spr);
175}
176
177static int a2_scom_setgpr(scom_map_t scom, int thread, int gpr,
178 int alt, u64 val)
179{
180 u32 lis = 0x3c000000 | (gpr << 21);
181 u32 li = 0x38000000 | (gpr << 21);
182 u32 oris = 0x64000000 | (gpr << 21) | (gpr << 16);
183 u32 ori = 0x60000000 | (gpr << 21) | (gpr << 16);
184 u32 rldicr32 = 0x780007c6 | (gpr << 21) | (gpr << 16);
185 u32 highest = val >> 48;
186 u32 higher = (val >> 32) & 0xffff;
187 u32 high = (val >> 16) & 0xffff;
188 u32 low = val & 0xffff;
189 int lext = alt ? 0x8 : 0x0;
190 int oext = alt ? 0xf : 0x0;
191 int rc = 0;
192
193 if (highest)
194 rc |= a2_scom_ram(scom, thread, lis | highest, lext);
195
196 if (higher) {
197 if (highest)
198 rc |= a2_scom_ram(scom, thread, oris | higher, oext);
199 else
200 rc |= a2_scom_ram(scom, thread, li | higher, lext);
201 }
202
203 if (highest || higher)
204 rc |= a2_scom_ram(scom, thread, rldicr32, oext);
205
206 if (high) {
207 if (highest || higher)
208 rc |= a2_scom_ram(scom, thread, oris | high, oext);
209 else
210 rc |= a2_scom_ram(scom, thread, lis | high, lext);
211 }
212
213 if (highest || higher || high)
214 rc |= a2_scom_ram(scom, thread, ori | low, oext);
215 else
216 rc |= a2_scom_ram(scom, thread, li | low, lext);
217
218 return rc;
219}
220
221static int a2_scom_setspr(scom_map_t scom, int thread, int spr, u64 val)
222{
223 int sprhi = spr >> 5;
224 int sprlo = spr & 0x1f;
225 /* mtspr spr, r1 */
226 u32 insn = 0x7c2003a6 | (sprlo << 16) | (sprhi << 11);
227
228 if (spr == 0x0ff0)
229 insn = 0x7c200124; /* mtmsr r1 */
230
231 if (a2_scom_setgpr(scom, thread, 1, 1, val))
232 return -1;
233
234 return a2_scom_ram(scom, thread, insn, 0xf);
235}
236
237static int a2_scom_initial_tlb(scom_map_t scom, int thread)
238{
239 extern u32 a2_tlbinit_code_start[], a2_tlbinit_code_end[];
240 extern u32 a2_tlbinit_after_iprot_flush[];
241 extern u32 a2_tlbinit_after_linear_map[];
242 u32 assoc, entries, i;
243 u64 epn, tlbcfg;
244 u32 *p;
245 int rc;
246
247 /* Invalidate all entries (including iprot) */
248
249 rc = a2_scom_getspr(scom, thread, SPRN_TLB0CFG, &tlbcfg);
250 if (rc)
251 goto scom_fail;
252 entries = tlbcfg & TLBnCFG_N_ENTRY;
253 assoc = (tlbcfg & TLBnCFG_ASSOC) >> 24;
254 epn = 0;
255
256 /* Set MMUCR2 to enable 4K, 64K, 1M, 16M and 1G pages */
257 a2_scom_setspr(scom, thread, SPRN_MMUCR2, 0x000a7531);
258 /* Set MMUCR3 to write all thids bit to the TLB */
259 a2_scom_setspr(scom, thread, SPRN_MMUCR3, 0x0000000f);
260
261 /* Set MAS1 for 1G page size, and MAS2 to our initial EPN */
262 a2_scom_setspr(scom, thread, SPRN_MAS1, MAS1_TSIZE(BOOK3E_PAGESZ_1GB));
263 a2_scom_setspr(scom, thread, SPRN_MAS2, epn);
264 for (i = 0; i < entries; i++) {
265
266 a2_scom_setspr(scom, thread, SPRN_MAS0, MAS0_ESEL(i % assoc));
267
268 /* tlbwe */
269 rc = a2_scom_ram(scom, thread, 0x7c0007a4, 0);
270 if (rc)
271 goto scom_fail;
272
273 /* Next entry is new address? */
274 if((i + 1) % assoc == 0) {
275 epn += (1 << 30);
276 a2_scom_setspr(scom, thread, SPRN_MAS2, epn);
277 }
278 }
279
280 /* Setup args for linear mapping */
281 rc = a2_scom_setgpr(scom, thread, 3, 0, MAS0_TLBSEL(0));
282 if (rc)
283 goto scom_fail;
284
285 /* Linear mapping */
286 for (p = a2_tlbinit_code_start; p < a2_tlbinit_after_linear_map; p++) {
287 rc = a2_scom_ram(scom, thread, *p, 0);
288 if (rc)
289 goto scom_fail;
290 }
291
292 /*
293 * For the boot thread, between the linear mapping and the debug
294 * mappings there is a loop to flush iprot mappings. Ramming doesn't do
295 * branches, but the secondary threads don't need to be nearly as smart
296 * (i.e. we don't need to worry about invalidating the mapping we're
297 * standing on).
298 */
299
300 /* Debug mappings. Expects r11 = MAS0 from linear map (set above) */
301 for (p = a2_tlbinit_after_iprot_flush; p < a2_tlbinit_code_end; p++) {
302 rc = a2_scom_ram(scom, thread, *p, 0);
303 if (rc)
304 goto scom_fail;
305 }
306
307scom_fail:
308 if (rc)
309 pr_err("Setting up initial TLB failed, err %d\n", rc);
310
311 if (rc == -SCOM_RAMC_INTERRUPT) {
312 /* Interrupt, dump some status */
313 int rc[10];
314 u64 iar, srr0, srr1, esr, mas0, mas1, mas2, mas7_3, mas8, ccr2;
315 rc[0] = a2_scom_getspr(scom, thread, SPRN_IAR, &iar);
316 rc[1] = a2_scom_getspr(scom, thread, SPRN_SRR0, &srr0);
317 rc[2] = a2_scom_getspr(scom, thread, SPRN_SRR1, &srr1);
318 rc[3] = a2_scom_getspr(scom, thread, SPRN_ESR, &esr);
319 rc[4] = a2_scom_getspr(scom, thread, SPRN_MAS0, &mas0);
320 rc[5] = a2_scom_getspr(scom, thread, SPRN_MAS1, &mas1);
321 rc[6] = a2_scom_getspr(scom, thread, SPRN_MAS2, &mas2);
322 rc[7] = a2_scom_getspr(scom, thread, SPRN_MAS7_MAS3, &mas7_3);
323 rc[8] = a2_scom_getspr(scom, thread, SPRN_MAS8, &mas8);
324 rc[9] = a2_scom_getspr(scom, thread, SPRN_A2_CCR2, &ccr2);
325 pr_err(" -> retreived IAR =0x%llx (err %d)\n", iar, rc[0]);
326 pr_err(" retreived SRR0=0x%llx (err %d)\n", srr0, rc[1]);
327 pr_err(" retreived SRR1=0x%llx (err %d)\n", srr1, rc[2]);
328 pr_err(" retreived ESR =0x%llx (err %d)\n", esr, rc[3]);
329 pr_err(" retreived MAS0=0x%llx (err %d)\n", mas0, rc[4]);
330 pr_err(" retreived MAS1=0x%llx (err %d)\n", mas1, rc[5]);
331 pr_err(" retreived MAS2=0x%llx (err %d)\n", mas2, rc[6]);
332 pr_err(" retreived MS73=0x%llx (err %d)\n", mas7_3, rc[7]);
333 pr_err(" retreived MAS8=0x%llx (err %d)\n", mas8, rc[8]);
334 pr_err(" retreived CCR2=0x%llx (err %d)\n", ccr2, rc[9]);
335 }
336
337 return rc;
338}
339
340int __devinit a2_scom_startup_cpu(unsigned int lcpu, int thr_idx,
341 struct device_node *np)
342{
343 u64 init_iar, init_msr, init_ccr2;
344 unsigned long start_here;
345 int rc, core_setup;
346 scom_map_t scom;
347 u64 pccr0;
348
349 scom = get_scom(lcpu, np, &core_setup);
350 if (!scom) {
351 printk(KERN_ERR "Couldn't map SCOM for CPU%d\n", lcpu);
352 return -1;
353 }
354
355 pr_devel("Bringing up CPU%d using SCOM...\n", lcpu);
356
357 pccr0 = scom_read(scom, SCOM_PCCR0);
358 scom_write(scom, SCOM_PCCR0, pccr0 | SCOM_PCCR0_ENABLE_DEBUG |
359 SCOM_PCCR0_ENABLE_RAM);
360
361 /* Stop the thead with THRCTL. If we are setting up the TLB we stop all
362 * threads. We also disable asynchronous interrupts while RAMing.
363 */
364 if (core_setup)
365 scom_write(scom, SCOM_THRCTL_OR,
366 SCOM_THRCTL_T0_STOP |
367 SCOM_THRCTL_T1_STOP |
368 SCOM_THRCTL_T2_STOP |
369 SCOM_THRCTL_T3_STOP |
370 SCOM_THRCTL_ASYNC_DIS);
371 else
372 scom_write(scom, SCOM_THRCTL_OR, SCOM_THRCTL_T0_STOP >> thr_idx);
373
374 /* Flush its pipeline just in case */
375 scom_write(scom, SCOM_RAMC, ((u64)thr_idx << 17) |
376 SCOM_RAMC_FLUSH | SCOM_RAMC_ENABLE);
377
378 a2_scom_getspr(scom, thr_idx, SPRN_IAR, &init_iar);
379 a2_scom_getspr(scom, thr_idx, 0x0ff0, &init_msr);
380 a2_scom_getspr(scom, thr_idx, SPRN_A2_CCR2, &init_ccr2);
381
382 /* Set MSR to MSR_CM (0x0ff0 is magic value for MSR_CM) */
383 rc = a2_scom_setspr(scom, thr_idx, 0x0ff0, MSR_CM);
384 if (rc) {
385 pr_err("Failed to set MSR ! err %d\n", rc);
386 return rc;
387 }
388
389 /* RAM in an sync/isync for the sake of it */
390 a2_scom_ram(scom, thr_idx, 0x7c0004ac, 0);
391 a2_scom_ram(scom, thr_idx, 0x4c00012c, 0);
392
393 if (core_setup) {
394 pr_devel("CPU%d is first thread in core, initializing TLB...\n",
395 lcpu);
396 rc = a2_scom_initial_tlb(scom, thr_idx);
397 if (rc)
398 goto fail;
399 }
400
401 start_here = *(unsigned long *)(core_setup ? generic_secondary_smp_init
402 : generic_secondary_thread_init);
403 pr_devel("CPU%d entry point at 0x%lx...\n", lcpu, start_here);
404
405 rc |= a2_scom_setspr(scom, thr_idx, SPRN_IAR, start_here);
406 rc |= a2_scom_setgpr(scom, thr_idx, 3, 0,
407 get_hard_smp_processor_id(lcpu));
408 /*
409 * Tell book3e_secondary_core_init not to set up the TLB, we've
410 * already done that.
411 */
412 rc |= a2_scom_setgpr(scom, thr_idx, 4, 0, 1);
413
414 rc |= a2_scom_setspr(scom, thr_idx, SPRN_TENS, 0x1 << thr_idx);
415
416 scom_write(scom, SCOM_RAMC, 0);
417 scom_write(scom, SCOM_THRCTL_AND, ~(SCOM_THRCTL_T0_STOP >> thr_idx));
418 scom_write(scom, SCOM_PCCR0, pccr0);
419fail:
420 pr_devel(" SCOM initialization %s\n", rc ? "failed" : "succeeded");
421 if (rc) {
422 pr_err("Old IAR=0x%08llx MSR=0x%08llx CCR2=0x%08llx\n",
423 init_iar, init_msr, init_ccr2);
424 }
425
426 return rc;
427}
diff --git a/arch/powerpc/platforms/wsp/scom_wsp.c b/arch/powerpc/platforms/wsp/scom_wsp.c
new file mode 100644
index 000000000000..4052e2259f30
--- /dev/null
+++ b/arch/powerpc/platforms/wsp/scom_wsp.c
@@ -0,0 +1,77 @@
1/*
2 * SCOM backend for WSP
3 *
4 * Copyright 2010 Benjamin Herrenschmidt, IBM Corp.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#include <linux/cpumask.h>
13#include <linux/io.h>
14#include <linux/of.h>
15#include <linux/spinlock.h>
16#include <linux/types.h>
17
18#include <asm/cputhreads.h>
19#include <asm/reg_a2.h>
20#include <asm/scom.h>
21#include <asm/udbg.h>
22
23#include "wsp.h"
24
25
26static scom_map_t wsp_scom_map(struct device_node *dev, u64 reg, u64 count)
27{
28 struct resource r;
29 u64 xscom_addr;
30
31 if (!of_get_property(dev, "scom-controller", NULL)) {
32 pr_err("%s: device %s is not a SCOM controller\n",
33 __func__, dev->full_name);
34 return SCOM_MAP_INVALID;
35 }
36
37 if (of_address_to_resource(dev, 0, &r)) {
38 pr_debug("Failed to find SCOM controller address\n");
39 return 0;
40 }
41
42 /* Transform the SCOM address into an XSCOM offset */
43 xscom_addr = ((reg & 0x7f000000) >> 1) | ((reg & 0xfffff) << 3);
44
45 return (scom_map_t)ioremap(r.start + xscom_addr, count << 3);
46}
47
48static void wsp_scom_unmap(scom_map_t map)
49{
50 iounmap((void *)map);
51}
52
53static u64 wsp_scom_read(scom_map_t map, u32 reg)
54{
55 u64 __iomem *addr = (u64 __iomem *)map;
56
57 return in_be64(addr + reg);
58}
59
60static void wsp_scom_write(scom_map_t map, u32 reg, u64 value)
61{
62 u64 __iomem *addr = (u64 __iomem *)map;
63
64 return out_be64(addr + reg, value);
65}
66
67static const struct scom_controller wsp_scom_controller = {
68 .map = wsp_scom_map,
69 .unmap = wsp_scom_unmap,
70 .read = wsp_scom_read,
71 .write = wsp_scom_write
72};
73
74void scom_init_wsp(void)
75{
76 scom_init(&wsp_scom_controller);
77}
diff --git a/arch/powerpc/platforms/wsp/setup.c b/arch/powerpc/platforms/wsp/setup.c
new file mode 100644
index 000000000000..11ac2f05e01c
--- /dev/null
+++ b/arch/powerpc/platforms/wsp/setup.c
@@ -0,0 +1,36 @@
1/*
2 * Copyright 2010 Michael Ellerman, IBM Corporation
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9
10#include <linux/kernel.h>
11#include <linux/of_platform.h>
12
13#include "wsp.h"
14
15/*
16 * Find chip-id by walking up device tree looking for ibm,wsp-chip-id property.
17 * Won't work for nodes that are not a descendant of a wsp node.
18 */
19int wsp_get_chip_id(struct device_node *dn)
20{
21 const u32 *p;
22 int rc;
23
24 /* Start looking at the specified node, not its parent */
25 dn = of_node_get(dn);
26 while (dn && !(p = of_get_property(dn, "ibm,wsp-chip-id", NULL)))
27 dn = of_get_next_parent(dn);
28
29 if (!dn)
30 return -1;
31
32 rc = *p;
33 of_node_put(dn);
34
35 return rc;
36}
diff --git a/arch/powerpc/platforms/wsp/smp.c b/arch/powerpc/platforms/wsp/smp.c
new file mode 100644
index 000000000000..9d20fa9d3710
--- /dev/null
+++ b/arch/powerpc/platforms/wsp/smp.c
@@ -0,0 +1,88 @@
1/*
2 * SMP Support for A2 platforms
3 *
4 * Copyright 2007 Benjamin Herrenschmidt, IBM Corp.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 */
12
13#include <linux/cpumask.h>
14#include <linux/init.h>
15#include <linux/kernel.h>
16#include <linux/of.h>
17#include <linux/smp.h>
18
19#include <asm/dbell.h>
20#include <asm/machdep.h>
21#include <asm/xics.h>
22
23#include "ics.h"
24#include "wsp.h"
25
26static void __devinit smp_a2_setup_cpu(int cpu)
27{
28 doorbell_setup_this_cpu();
29
30 if (cpu != boot_cpuid)
31 xics_setup_cpu();
32}
33
34int __devinit smp_a2_kick_cpu(int nr)
35{
36 const char *enable_method;
37 struct device_node *np;
38 int thr_idx;
39
40 if (nr < 0 || nr >= NR_CPUS)
41 return -ENOENT;
42
43 np = of_get_cpu_node(nr, &thr_idx);
44 if (!np)
45 return -ENODEV;
46
47 enable_method = of_get_property(np, "enable-method", NULL);
48 pr_devel("CPU%d has enable-method: \"%s\"\n", nr, enable_method);
49
50 if (!enable_method) {
51 printk(KERN_ERR "CPU%d has no enable-method\n", nr);
52 return -ENOENT;
53 } else if (strcmp(enable_method, "ibm,a2-scom") == 0) {
54 if (a2_scom_startup_cpu(nr, thr_idx, np))
55 return -1;
56 } else {
57 printk(KERN_ERR "CPU%d: Don't understand enable-method \"%s\"\n",
58 nr, enable_method);
59 return -EINVAL;
60 }
61
62 /*
63 * The processor is currently spinning, waiting for the
64 * cpu_start field to become non-zero After we set cpu_start,
65 * the processor will continue on to secondary_start
66 */
67 paca[nr].cpu_start = 1;
68
69 return 0;
70}
71
72static int __init smp_a2_probe(void)
73{
74 return cpus_weight(cpu_possible_map);
75}
76
77static struct smp_ops_t a2_smp_ops = {
78 .message_pass = smp_muxed_ipi_message_pass,
79 .cause_ipi = doorbell_cause_ipi,
80 .probe = smp_a2_probe,
81 .kick_cpu = smp_a2_kick_cpu,
82 .setup_cpu = smp_a2_setup_cpu,
83};
84
85void __init a2_setup_smp(void)
86{
87 smp_ops = &a2_smp_ops;
88}
diff --git a/arch/powerpc/platforms/wsp/wsp.h b/arch/powerpc/platforms/wsp/wsp.h
new file mode 100644
index 000000000000..7c3e087fd2f2
--- /dev/null
+++ b/arch/powerpc/platforms/wsp/wsp.h
@@ -0,0 +1,17 @@
1#ifndef __WSP_H
2#define __WSP_H
3
4#include <asm/wsp.h>
5
6extern void wsp_setup_pci(void);
7extern void scom_init_wsp(void);
8
9extern void a2_setup_smp(void);
10extern int a2_scom_startup_cpu(unsigned int lcpu, int thr_idx,
11 struct device_node *np);
12int smp_a2_cpu_bootable(unsigned int nr);
13int __devinit smp_a2_kick_cpu(int nr);
14
15void opb_pic_init(void);
16
17#endif /* __WSP_H */