aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorDavid Gibson <david@gibson.dropbear.id.au>2011-04-14 18:32:06 -0400
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2011-05-05 23:32:35 -0400
commita1d0d98daf6ce580d017a43b09fe30a375cde3e8 (patch)
tree8aaeff82e2d4edbb8ceec68f1cf5ceecc4c08be1 /arch
parent82578e192bb837b984ed5d8389245ea1fee09dd5 (diff)
powerpc: Add WSP platform
Add a platform for the Wire Speed Processor, based on the PPC A2. This includes code for the ICS & OPB interrupt controllers, as well as a SCOM backend, and SCOM based cpu bringup. Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> Signed-off-by: David Gibson <david@gibson.dropbear.id.au> Signed-off-by: Jack Miller <jack@codezen.org> Signed-off-by: Ian Munsie <imunsie@au1.ibm.com> Signed-off-by: Michael Ellerman <michael@ellerman.id.au> Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/powerpc/include/asm/wsp.h14
-rw-r--r--arch/powerpc/kernel/exceptions-64e.S23
-rw-r--r--arch/powerpc/platforms/Kconfig1
-rw-r--r--arch/powerpc/platforms/Makefile1
-rw-r--r--arch/powerpc/platforms/wsp/Kconfig28
-rw-r--r--arch/powerpc/platforms/wsp/Makefile6
-rw-r--r--arch/powerpc/platforms/wsp/ics.c712
-rw-r--r--arch/powerpc/platforms/wsp/ics.h20
-rw-r--r--arch/powerpc/platforms/wsp/opb_pic.c332
-rw-r--r--arch/powerpc/platforms/wsp/psr2.c95
-rw-r--r--arch/powerpc/platforms/wsp/scom_smp.c427
-rw-r--r--arch/powerpc/platforms/wsp/scom_wsp.c77
-rw-r--r--arch/powerpc/platforms/wsp/setup.c36
-rw-r--r--arch/powerpc/platforms/wsp/smp.c87
-rw-r--r--arch/powerpc/platforms/wsp/wsp.h17
-rw-r--r--arch/powerpc/sysdev/xics/icp-native.c1
16 files changed, 1877 insertions, 0 deletions
diff --git a/arch/powerpc/include/asm/wsp.h b/arch/powerpc/include/asm/wsp.h
new file mode 100644
index 000000000000..c7dc83088a33
--- /dev/null
+++ b/arch/powerpc/include/asm/wsp.h
@@ -0,0 +1,14 @@
1/*
2 * Copyright 2011 Michael Ellerman, IBM Corp.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9#ifndef __ASM_POWERPC_WSP_H
10#define __ASM_POWERPC_WSP_H
11
12extern int wsp_get_chip_id(struct device_node *dn);
13
14#endif /* __ASM_POWERPC_WSP_H */
diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
index 23bd83b20be4..c98e9d260621 100644
--- a/arch/powerpc/kernel/exceptions-64e.S
+++ b/arch/powerpc/kernel/exceptions-64e.S
@@ -864,6 +864,20 @@ have_hes:
864 * that will have to be made dependent on whether we are running under 864 * that will have to be made dependent on whether we are running under
865 * a hypervisor I suppose. 865 * a hypervisor I suppose.
866 */ 866 */
867
868 /* BEWARE, MAGIC
869 * This code is called as an ordinary function on the boot CPU. But to
870 * avoid duplication, this code is also used in SCOM bringup of
871 * secondary CPUs. We read the code between the initial_tlb_code_start
872 * and initial_tlb_code_end labels one instruction at a time and RAM it
873 * into the new core via SCOM. That doesn't process branches, so there
874 * must be none between those two labels. It also means if this code
875 * ever takes any parameters, the SCOM code must also be updated to
876 * provide them.
877 */
878 .globl a2_tlbinit_code_start
879a2_tlbinit_code_start:
880
867 ori r11,r3,MAS0_WQ_ALLWAYS 881 ori r11,r3,MAS0_WQ_ALLWAYS
868 oris r11,r11,MAS0_ESEL(3)@h /* Use way 3: workaround A2 erratum 376 */ 882 oris r11,r11,MAS0_ESEL(3)@h /* Use way 3: workaround A2 erratum 376 */
869 mtspr SPRN_MAS0,r11 883 mtspr SPRN_MAS0,r11
@@ -880,6 +894,9 @@ have_hes:
880 /* Write the TLB entry */ 894 /* Write the TLB entry */
881 tlbwe 895 tlbwe
882 896
897 .globl a2_tlbinit_after_linear_map
898a2_tlbinit_after_linear_map:
899
883 /* Now we branch the new virtual address mapped by this entry */ 900 /* Now we branch the new virtual address mapped by this entry */
884 LOAD_REG_IMMEDIATE(r3,1f) 901 LOAD_REG_IMMEDIATE(r3,1f)
885 mtctr r3 902 mtctr r3
@@ -931,10 +948,16 @@ have_hes:
931 cmpw r3,r9 948 cmpw r3,r9
932 blt 2b 949 blt 2b
933 950
951 .globl a2_tlbinit_after_iprot_flush
952a2_tlbinit_after_iprot_flush:
953
934 PPC_TLBILX(0,0,0) 954 PPC_TLBILX(0,0,0)
935 sync 955 sync
936 isync 956 isync
937 957
958 .globl a2_tlbinit_code_end
959a2_tlbinit_code_end:
960
938 /* We translate LR and return */ 961 /* We translate LR and return */
939 mflr r3 962 mflr r3
940 tovirt(r3,r3) 963 tovirt(r3,r3)
diff --git a/arch/powerpc/platforms/Kconfig b/arch/powerpc/platforms/Kconfig
index f2352fc5cbbe..6059053e7158 100644
--- a/arch/powerpc/platforms/Kconfig
+++ b/arch/powerpc/platforms/Kconfig
@@ -20,6 +20,7 @@ source "arch/powerpc/platforms/embedded6xx/Kconfig"
20source "arch/powerpc/platforms/44x/Kconfig" 20source "arch/powerpc/platforms/44x/Kconfig"
21source "arch/powerpc/platforms/40x/Kconfig" 21source "arch/powerpc/platforms/40x/Kconfig"
22source "arch/powerpc/platforms/amigaone/Kconfig" 22source "arch/powerpc/platforms/amigaone/Kconfig"
23source "arch/powerpc/platforms/wsp/Kconfig"
23 24
24config KVM_GUEST 25config KVM_GUEST
25 bool "KVM Guest support" 26 bool "KVM Guest support"
diff --git a/arch/powerpc/platforms/Makefile b/arch/powerpc/platforms/Makefile
index fdb9f0b0d7a8..73e2116cfeed 100644
--- a/arch/powerpc/platforms/Makefile
+++ b/arch/powerpc/platforms/Makefile
@@ -22,3 +22,4 @@ obj-$(CONFIG_PPC_CELL) += cell/
22obj-$(CONFIG_PPC_PS3) += ps3/ 22obj-$(CONFIG_PPC_PS3) += ps3/
23obj-$(CONFIG_EMBEDDED6xx) += embedded6xx/ 23obj-$(CONFIG_EMBEDDED6xx) += embedded6xx/
24obj-$(CONFIG_AMIGAONE) += amigaone/ 24obj-$(CONFIG_AMIGAONE) += amigaone/
25obj-$(CONFIG_PPC_WSP) += wsp/
diff --git a/arch/powerpc/platforms/wsp/Kconfig b/arch/powerpc/platforms/wsp/Kconfig
new file mode 100644
index 000000000000..c3c48eb62cc1
--- /dev/null
+++ b/arch/powerpc/platforms/wsp/Kconfig
@@ -0,0 +1,28 @@
1config PPC_WSP
2 bool
3 default n
4
5menu "WSP platform selection"
6 depends on PPC_BOOK3E_64
7
8config PPC_PSR2
9 bool "PSR-2 platform"
10 select PPC_A2
11 select GENERIC_TBSYNC
12 select PPC_SCOM
13 select EPAPR_BOOT
14 select PPC_WSP
15 select PPC_XICS
16 select PPC_ICP_NATIVE
17 default y
18
19endmenu
20
21config PPC_A2_DD2
22 bool "Support for DD2 based A2/WSP systems"
23 depends on PPC_A2
24
25config WORKAROUND_ERRATUM_463
26 depends on PPC_A2_DD2
27 bool "Workaround erratum 463"
28 default y
diff --git a/arch/powerpc/platforms/wsp/Makefile b/arch/powerpc/platforms/wsp/Makefile
new file mode 100644
index 000000000000..095be73d6cd4
--- /dev/null
+++ b/arch/powerpc/platforms/wsp/Makefile
@@ -0,0 +1,6 @@
1ccflags-y += -mno-minimal-toc
2
3obj-y += setup.o ics.o
4obj-$(CONFIG_PPC_PSR2) += psr2.o opb_pic.o
5obj-$(CONFIG_PPC_WSP) += scom_wsp.o
6obj-$(CONFIG_SMP) += smp.o scom_smp.o
diff --git a/arch/powerpc/platforms/wsp/ics.c b/arch/powerpc/platforms/wsp/ics.c
new file mode 100644
index 000000000000..e53bd9e7b125
--- /dev/null
+++ b/arch/powerpc/platforms/wsp/ics.c
@@ -0,0 +1,712 @@
1/*
2 * Copyright 2008-2011 IBM Corporation.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9
10#include <linux/cpu.h>
11#include <linux/init.h>
12#include <linux/interrupt.h>
13#include <linux/irq.h>
14#include <linux/kernel.h>
15#include <linux/msi.h>
16#include <linux/of.h>
17#include <linux/slab.h>
18#include <linux/smp.h>
19#include <linux/spinlock.h>
20#include <linux/types.h>
21
22#include <asm/io.h>
23#include <asm/irq.h>
24#include <asm/xics.h>
25
26#include "wsp.h"
27#include "ics.h"
28
29
30/* WSP ICS */
31
32struct wsp_ics {
33 struct ics ics;
34 struct device_node *dn;
35 void __iomem *regs;
36 spinlock_t lock;
37 unsigned long *bitmap;
38 u32 chip_id;
39 u32 lsi_base;
40 u32 lsi_count;
41 u64 hwirq_start;
42 u64 count;
43#ifdef CONFIG_SMP
44 int *hwirq_cpu_map;
45#endif
46};
47
48#define to_wsp_ics(ics) container_of(ics, struct wsp_ics, ics)
49
50#define INT_SRC_LAYER_BUID_REG(base) ((base) + 0x00)
51#define IODA_TBL_ADDR_REG(base) ((base) + 0x18)
52#define IODA_TBL_DATA_REG(base) ((base) + 0x20)
53#define XIVE_UPDATE_REG(base) ((base) + 0x28)
54#define ICS_INT_CAPS_REG(base) ((base) + 0x30)
55
56#define TBL_AUTO_INCREMENT ((1UL << 63) | (1UL << 15))
57#define TBL_SELECT_XIST (1UL << 48)
58#define TBL_SELECT_XIVT (1UL << 49)
59
60#define IODA_IRQ(irq) ((irq) & (0x7FFULL)) /* HRM 5.1.3.4 */
61
62#define XIST_REQUIRED 0x8
63#define XIST_REJECTED 0x4
64#define XIST_PRESENTED 0x2
65#define XIST_PENDING 0x1
66
67#define XIVE_SERVER_SHIFT 42
68#define XIVE_SERVER_MASK 0xFFFFULL
69#define XIVE_PRIORITY_MASK 0xFFULL
70#define XIVE_PRIORITY_SHIFT 32
71#define XIVE_WRITE_ENABLE (1ULL << 63)
72
73/*
74 * The docs refer to a 6 bit field called ChipID, which consists of a
75 * 3 bit NodeID and a 3 bit ChipID. On WSP the ChipID is always zero
76 * so we ignore it, and every where we use "chip id" in this code we
77 * mean the NodeID.
78 */
79#define WSP_ICS_CHIP_SHIFT 17
80
81
82static struct wsp_ics *ics_list;
83static int num_ics;
84
85/* ICS Source controller accessors */
86
87static u64 wsp_ics_get_xive(struct wsp_ics *ics, unsigned int irq)
88{
89 unsigned long flags;
90 u64 xive;
91
92 spin_lock_irqsave(&ics->lock, flags);
93 out_be64(IODA_TBL_ADDR_REG(ics->regs), TBL_SELECT_XIVT | IODA_IRQ(irq));
94 xive = in_be64(IODA_TBL_DATA_REG(ics->regs));
95 spin_unlock_irqrestore(&ics->lock, flags);
96
97 return xive;
98}
99
100static void wsp_ics_set_xive(struct wsp_ics *ics, unsigned int irq, u64 xive)
101{
102 xive &= ~XIVE_ADDR_MASK;
103 xive |= (irq & XIVE_ADDR_MASK);
104 xive |= XIVE_WRITE_ENABLE;
105
106 out_be64(XIVE_UPDATE_REG(ics->regs), xive);
107}
108
109static u64 xive_set_server(u64 xive, unsigned int server)
110{
111 u64 mask = ~(XIVE_SERVER_MASK << XIVE_SERVER_SHIFT);
112
113 xive &= mask;
114 xive |= (server & XIVE_SERVER_MASK) << XIVE_SERVER_SHIFT;
115
116 return xive;
117}
118
119static u64 xive_set_priority(u64 xive, unsigned int priority)
120{
121 u64 mask = ~(XIVE_PRIORITY_MASK << XIVE_PRIORITY_SHIFT);
122
123 xive &= mask;
124 xive |= (priority & XIVE_PRIORITY_MASK) << XIVE_PRIORITY_SHIFT;
125
126 return xive;
127}
128
129
130#ifdef CONFIG_SMP
131/* Find logical CPUs within mask on a given chip and store result in ret */
132void cpus_on_chip(int chip_id, cpumask_t *mask, cpumask_t *ret)
133{
134 int cpu, chip;
135 struct device_node *cpu_dn, *dn;
136 const u32 *prop;
137
138 cpumask_clear(ret);
139 for_each_cpu(cpu, mask) {
140 cpu_dn = of_get_cpu_node(cpu, NULL);
141 if (!cpu_dn)
142 continue;
143
144 prop = of_get_property(cpu_dn, "at-node", NULL);
145 if (!prop) {
146 of_node_put(cpu_dn);
147 continue;
148 }
149
150 dn = of_find_node_by_phandle(*prop);
151 of_node_put(cpu_dn);
152
153 chip = wsp_get_chip_id(dn);
154 if (chip == chip_id)
155 cpumask_set_cpu(cpu, ret);
156
157 of_node_put(dn);
158 }
159}
160
161/* Store a suitable CPU to handle a hwirq in the ics->hwirq_cpu_map cache */
162static int cache_hwirq_map(struct wsp_ics *ics, unsigned int hwirq,
163 const cpumask_t *affinity)
164{
165 cpumask_var_t avail, newmask;
166 int ret = -ENOMEM, cpu, cpu_rover = 0, target;
167 int index = hwirq - ics->hwirq_start;
168 unsigned int nodeid;
169
170 BUG_ON(index < 0 || index >= ics->count);
171
172 if (!ics->hwirq_cpu_map)
173 return -ENOMEM;
174
175 if (!distribute_irqs) {
176 ics->hwirq_cpu_map[hwirq - ics->hwirq_start] = xics_default_server;
177 return 0;
178 }
179
180 /* Allocate needed CPU masks */
181 if (!alloc_cpumask_var(&avail, GFP_KERNEL))
182 goto ret;
183 if (!alloc_cpumask_var(&newmask, GFP_KERNEL))
184 goto freeavail;
185
186 /* Find PBus attached to the source of this IRQ */
187 nodeid = (hwirq >> WSP_ICS_CHIP_SHIFT) & 0x3; /* 12:14 */
188
189 /* Find CPUs that could handle this IRQ */
190 if (affinity)
191 cpumask_and(avail, cpu_online_mask, affinity);
192 else
193 cpumask_copy(avail, cpu_online_mask);
194
195 /* Narrow selection down to logical CPUs on the same chip */
196 cpus_on_chip(nodeid, avail, newmask);
197
198 /* Ensure we haven't narrowed it down to 0 */
199 if (unlikely(cpumask_empty(newmask))) {
200 if (unlikely(cpumask_empty(avail))) {
201 ret = -1;
202 goto out;
203 }
204 cpumask_copy(newmask, avail);
205 }
206
207 /* Choose a CPU out of those we narrowed it down to in round robin */
208 target = hwirq % cpumask_weight(newmask);
209 for_each_cpu(cpu, newmask) {
210 if (cpu_rover++ >= target) {
211 ics->hwirq_cpu_map[index] = get_hard_smp_processor_id(cpu);
212 ret = 0;
213 goto out;
214 }
215 }
216
217 /* Shouldn't happen */
218 WARN_ON(1);
219
220out:
221 free_cpumask_var(newmask);
222freeavail:
223 free_cpumask_var(avail);
224ret:
225 if (ret < 0) {
226 ics->hwirq_cpu_map[index] = cpumask_first(cpu_online_mask);
227 pr_warning("Error, falling hwirq 0x%x routing back to CPU %i\n",
228 hwirq, ics->hwirq_cpu_map[index]);
229 }
230 return ret;
231}
232
233static void alloc_irq_map(struct wsp_ics *ics)
234{
235 int i;
236
237 ics->hwirq_cpu_map = kmalloc(sizeof(int) * ics->count, GFP_KERNEL);
238 if (!ics->hwirq_cpu_map) {
239 pr_warning("Allocate hwirq_cpu_map failed, "
240 "IRQ balancing disabled\n");
241 return;
242 }
243
244 for (i=0; i < ics->count; i++)
245 ics->hwirq_cpu_map[i] = xics_default_server;
246}
247
248static int get_irq_server(struct wsp_ics *ics, unsigned int hwirq)
249{
250 int index = hwirq - ics->hwirq_start;
251
252 BUG_ON(index < 0 || index >= ics->count);
253
254 if (!ics->hwirq_cpu_map)
255 return xics_default_server;
256
257 return ics->hwirq_cpu_map[index];
258}
259#else /* !CONFIG_SMP */
260static int cache_hwirq_map(struct wsp_ics *ics, unsigned int hwirq,
261 const cpumask_t *affinity)
262{
263 return 0;
264}
265
266static int get_irq_server(struct wsp_ics *ics, unsigned int hwirq)
267{
268 return xics_default_server;
269}
270
271static void alloc_irq_map(struct wsp_ics *ics) { }
272#endif
273
274static void wsp_chip_unmask_irq(struct irq_data *d)
275{
276 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
277 struct wsp_ics *ics;
278 int server;
279 u64 xive;
280
281 if (hw_irq == XICS_IPI || hw_irq == XICS_IRQ_SPURIOUS)
282 return;
283
284 ics = d->chip_data;
285 if (WARN_ON(!ics))
286 return;
287
288 server = get_irq_server(ics, hw_irq);
289
290 xive = wsp_ics_get_xive(ics, hw_irq);
291 xive = xive_set_server(xive, server);
292 xive = xive_set_priority(xive, DEFAULT_PRIORITY);
293 wsp_ics_set_xive(ics, hw_irq, xive);
294}
295
296static unsigned int wsp_chip_startup(struct irq_data *d)
297{
298 /* unmask it */
299 wsp_chip_unmask_irq(d);
300 return 0;
301}
302
303static void wsp_mask_real_irq(unsigned int hw_irq, struct wsp_ics *ics)
304{
305 u64 xive;
306
307 if (hw_irq == XICS_IPI)
308 return;
309
310 if (WARN_ON(!ics))
311 return;
312 xive = wsp_ics_get_xive(ics, hw_irq);
313 xive = xive_set_server(xive, xics_default_server);
314 xive = xive_set_priority(xive, LOWEST_PRIORITY);
315 wsp_ics_set_xive(ics, hw_irq, xive);
316}
317
318static void wsp_chip_mask_irq(struct irq_data *d)
319{
320 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
321 struct wsp_ics *ics = d->chip_data;
322
323 if (hw_irq == XICS_IPI || hw_irq == XICS_IRQ_SPURIOUS)
324 return;
325
326 wsp_mask_real_irq(hw_irq, ics);
327}
328
329static int wsp_chip_set_affinity(struct irq_data *d,
330 const struct cpumask *cpumask, bool force)
331{
332 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
333 struct wsp_ics *ics;
334 int ret;
335 u64 xive;
336
337 if (hw_irq == XICS_IPI || hw_irq == XICS_IRQ_SPURIOUS)
338 return -1;
339
340 ics = d->chip_data;
341 if (WARN_ON(!ics))
342 return -1;
343 xive = wsp_ics_get_xive(ics, hw_irq);
344
345 /*
346 * For the moment only implement delivery to all cpus or one cpu.
347 * Get current irq_server for the given irq
348 */
349 ret = cache_hwirq_map(ics, d->irq, cpumask);
350 if (ret == -1) {
351 char cpulist[128];
352 cpumask_scnprintf(cpulist, sizeof(cpulist), cpumask);
353 pr_warning("%s: No online cpus in the mask %s for irq %d\n",
354 __func__, cpulist, d->irq);
355 return -1;
356 } else if (ret == -ENOMEM) {
357 pr_warning("%s: Out of memory\n", __func__);
358 return -1;
359 }
360
361 xive = xive_set_server(xive, get_irq_server(ics, hw_irq));
362 wsp_ics_set_xive(ics, hw_irq, xive);
363
364 return 0;
365}
366
367static struct irq_chip wsp_irq_chip = {
368 .name = "WSP ICS",
369 .irq_startup = wsp_chip_startup,
370 .irq_mask = wsp_chip_mask_irq,
371 .irq_unmask = wsp_chip_unmask_irq,
372 .irq_set_affinity = wsp_chip_set_affinity
373};
374
375static int wsp_ics_host_match(struct ics *ics, struct device_node *dn)
376{
377 /* All ICSs in the system implement a global irq number space,
378 * so match against them all. */
379 return of_device_is_compatible(dn, "ibm,ppc-xics");
380}
381
382static int wsp_ics_match_hwirq(struct wsp_ics *wsp_ics, unsigned int hwirq)
383{
384 if (hwirq >= wsp_ics->hwirq_start &&
385 hwirq < wsp_ics->hwirq_start + wsp_ics->count)
386 return 1;
387
388 return 0;
389}
390
391static int wsp_ics_map(struct ics *ics, unsigned int virq)
392{
393 struct wsp_ics *wsp_ics = to_wsp_ics(ics);
394 unsigned int hw_irq = virq_to_hw(virq);
395 unsigned long flags;
396
397 if (!wsp_ics_match_hwirq(wsp_ics, hw_irq))
398 return -ENOENT;
399
400 irq_set_chip_and_handler(virq, &wsp_irq_chip, handle_fasteoi_irq);
401
402 irq_set_chip_data(virq, wsp_ics);
403
404 spin_lock_irqsave(&wsp_ics->lock, flags);
405 bitmap_allocate_region(wsp_ics->bitmap, hw_irq - wsp_ics->hwirq_start, 0);
406 spin_unlock_irqrestore(&wsp_ics->lock, flags);
407
408 return 0;
409}
410
411static void wsp_ics_mask_unknown(struct ics *ics, unsigned long hw_irq)
412{
413 struct wsp_ics *wsp_ics = to_wsp_ics(ics);
414
415 if (!wsp_ics_match_hwirq(wsp_ics, hw_irq))
416 return;
417
418 pr_err("%s: IRQ %lu (real) is invalid, disabling it.\n", __func__, hw_irq);
419 wsp_mask_real_irq(hw_irq, wsp_ics);
420}
421
422static long wsp_ics_get_server(struct ics *ics, unsigned long hw_irq)
423{
424 struct wsp_ics *wsp_ics = to_wsp_ics(ics);
425
426 if (!wsp_ics_match_hwirq(wsp_ics, hw_irq))
427 return -ENOENT;
428
429 return get_irq_server(wsp_ics, hw_irq);
430}
431
432/* HW Number allocation API */
433
434static struct wsp_ics *wsp_ics_find_dn_ics(struct device_node *dn)
435{
436 struct device_node *iparent;
437 int i;
438
439 iparent = of_irq_find_parent(dn);
440 if (!iparent) {
441 pr_err("wsp_ics: Failed to find interrupt parent!\n");
442 return NULL;
443 }
444
445 for(i = 0; i < num_ics; i++) {
446 if(ics_list[i].dn == iparent)
447 break;
448 }
449
450 if (i >= num_ics) {
451 pr_err("wsp_ics: Unable to find parent bitmap!\n");
452 return NULL;
453 }
454
455 return &ics_list[i];
456}
457
458int wsp_ics_alloc_irq(struct device_node *dn, int num)
459{
460 struct wsp_ics *ics;
461 int order, offset;
462
463 ics = wsp_ics_find_dn_ics(dn);
464 if (!ics)
465 return -ENODEV;
466
467 /* Fast, but overly strict if num isn't a power of two */
468 order = get_count_order(num);
469
470 spin_lock_irq(&ics->lock);
471 offset = bitmap_find_free_region(ics->bitmap, ics->count, order);
472 spin_unlock_irq(&ics->lock);
473
474 if (offset < 0)
475 return offset;
476
477 return offset + ics->hwirq_start;
478}
479
480void wsp_ics_free_irq(struct device_node *dn, unsigned int irq)
481{
482 struct wsp_ics *ics;
483
484 ics = wsp_ics_find_dn_ics(dn);
485 if (WARN_ON(!ics))
486 return;
487
488 spin_lock_irq(&ics->lock);
489 bitmap_release_region(ics->bitmap, irq, 0);
490 spin_unlock_irq(&ics->lock);
491}
492
493/* Initialisation */
494
495static int __init wsp_ics_bitmap_setup(struct wsp_ics *ics,
496 struct device_node *dn)
497{
498 int len, i, j, size;
499 u32 start, count;
500 const u32 *p;
501
502 size = BITS_TO_LONGS(ics->count) * sizeof(long);
503 ics->bitmap = kzalloc(size, GFP_KERNEL);
504 if (!ics->bitmap) {
505 pr_err("wsp_ics: ENOMEM allocating IRQ bitmap!\n");
506 return -ENOMEM;
507 }
508
509 spin_lock_init(&ics->lock);
510
511 p = of_get_property(dn, "available-ranges", &len);
512 if (!p || !len) {
513 /* FIXME this should be a WARN() once mambo is updated */
514 pr_err("wsp_ics: No available-ranges defined for %s\n",
515 dn->full_name);
516 return 0;
517 }
518
519 if (len % (2 * sizeof(u32)) != 0) {
520 /* FIXME this should be a WARN() once mambo is updated */
521 pr_err("wsp_ics: Invalid available-ranges for %s\n",
522 dn->full_name);
523 return 0;
524 }
525
526 bitmap_fill(ics->bitmap, ics->count);
527
528 for (i = 0; i < len / sizeof(u32); i += 2) {
529 start = of_read_number(p + i, 1);
530 count = of_read_number(p + i + 1, 1);
531
532 pr_devel("%s: start: %d count: %d\n", __func__, start, count);
533
534 if ((start + count) > (ics->hwirq_start + ics->count) ||
535 start < ics->hwirq_start) {
536 pr_err("wsp_ics: Invalid range! -> %d to %d\n",
537 start, start + count);
538 break;
539 }
540
541 for (j = 0; j < count; j++)
542 bitmap_release_region(ics->bitmap,
543 (start + j) - ics->hwirq_start, 0);
544 }
545
546 /* Ensure LSIs are not available for allocation */
547 bitmap_allocate_region(ics->bitmap, ics->lsi_base,
548 get_count_order(ics->lsi_count));
549
550 return 0;
551}
552
553static int __init wsp_ics_setup(struct wsp_ics *ics, struct device_node *dn)
554{
555 u32 lsi_buid, msi_buid, msi_base, msi_count;
556 void __iomem *regs;
557 const u32 *p;
558 int rc, len, i;
559 u64 caps, buid;
560
561 p = of_get_property(dn, "interrupt-ranges", &len);
562 if (!p || len < (2 * sizeof(u32))) {
563 pr_err("wsp_ics: No/bad interrupt-ranges found on %s\n",
564 dn->full_name);
565 return -ENOENT;
566 }
567
568 if (len > (2 * sizeof(u32))) {
569 pr_err("wsp_ics: Multiple ics ranges not supported.\n");
570 return -EINVAL;
571 }
572
573 regs = of_iomap(dn, 0);
574 if (!regs) {
575 pr_err("wsp_ics: of_iomap(%s) failed\n", dn->full_name);
576 return -ENXIO;
577 }
578
579 ics->hwirq_start = of_read_number(p, 1);
580 ics->count = of_read_number(p + 1, 1);
581 ics->regs = regs;
582
583 ics->chip_id = wsp_get_chip_id(dn);
584 if (WARN_ON(ics->chip_id < 0))
585 ics->chip_id = 0;
586
587 /* Get some informations about the critter */
588 caps = in_be64(ICS_INT_CAPS_REG(ics->regs));
589 buid = in_be64(INT_SRC_LAYER_BUID_REG(ics->regs));
590 ics->lsi_count = caps >> 56;
591 msi_count = (caps >> 44) & 0x7ff;
592
593 /* Note: LSI BUID is 9 bits, but really only 3 are BUID and the
594 * rest is mixed in the interrupt number. We store the whole
595 * thing though
596 */
597 lsi_buid = (buid >> 48) & 0x1ff;
598 ics->lsi_base = (ics->chip_id << WSP_ICS_CHIP_SHIFT) | lsi_buid << 5;
599 msi_buid = (buid >> 37) & 0x7;
600 msi_base = (ics->chip_id << WSP_ICS_CHIP_SHIFT) | msi_buid << 11;
601
602 pr_info("wsp_ics: Found %s\n", dn->full_name);
603 pr_info("wsp_ics: irq range : 0x%06llx..0x%06llx\n",
604 ics->hwirq_start, ics->hwirq_start + ics->count - 1);
605 pr_info("wsp_ics: %4d LSIs : 0x%06x..0x%06x\n",
606 ics->lsi_count, ics->lsi_base,
607 ics->lsi_base + ics->lsi_count - 1);
608 pr_info("wsp_ics: %4d MSIs : 0x%06x..0x%06x\n",
609 msi_count, msi_base,
610 msi_base + msi_count - 1);
611
612 /* Let's check the HW config is sane */
613 if (ics->lsi_base < ics->hwirq_start ||
614 (ics->lsi_base + ics->lsi_count) > (ics->hwirq_start + ics->count))
615 pr_warning("wsp_ics: WARNING ! LSIs out of interrupt-ranges !\n");
616 if (msi_base < ics->hwirq_start ||
617 (msi_base + msi_count) > (ics->hwirq_start + ics->count))
618 pr_warning("wsp_ics: WARNING ! MSIs out of interrupt-ranges !\n");
619
620 /* We don't check for overlap between LSI and MSI, which will happen
621 * if we use the same BUID, I'm not sure yet how legit that is.
622 */
623
624 rc = wsp_ics_bitmap_setup(ics, dn);
625 if (rc) {
626 iounmap(regs);
627 return rc;
628 }
629
630 ics->dn = of_node_get(dn);
631 alloc_irq_map(ics);
632
633 for(i = 0; i < ics->count; i++)
634 wsp_mask_real_irq(ics->hwirq_start + i, ics);
635
636 ics->ics.map = wsp_ics_map;
637 ics->ics.mask_unknown = wsp_ics_mask_unknown;
638 ics->ics.get_server = wsp_ics_get_server;
639 ics->ics.host_match = wsp_ics_host_match;
640
641 xics_register_ics(&ics->ics);
642
643 return 0;
644}
645
646static void __init wsp_ics_set_default_server(void)
647{
648 struct device_node *np;
649 u32 hwid;
650
651 /* Find the server number for the boot cpu. */
652 np = of_get_cpu_node(boot_cpuid, NULL);
653 BUG_ON(!np);
654
655 hwid = get_hard_smp_processor_id(boot_cpuid);
656
657 pr_info("wsp_ics: default server is %#x, CPU %s\n", hwid, np->full_name);
658 xics_default_server = hwid;
659
660 of_node_put(np);
661}
662
663static int __init wsp_ics_init(void)
664{
665 struct device_node *dn;
666 struct wsp_ics *ics;
667 int rc, found;
668
669 wsp_ics_set_default_server();
670
671 found = 0;
672 for_each_compatible_node(dn, NULL, "ibm,ppc-xics")
673 found++;
674
675 if (found == 0) {
676 pr_err("wsp_ics: No ICS's found!\n");
677 return -ENODEV;
678 }
679
680 ics_list = kmalloc(sizeof(*ics) * found, GFP_KERNEL);
681 if (!ics_list) {
682 pr_err("wsp_ics: No memory for structs.\n");
683 return -ENOMEM;
684 }
685
686 num_ics = 0;
687 ics = ics_list;
688 for_each_compatible_node(dn, NULL, "ibm,wsp-xics") {
689 rc = wsp_ics_setup(ics, dn);
690 if (rc == 0) {
691 ics++;
692 num_ics++;
693 }
694 }
695
696 if (found != num_ics) {
697 pr_err("wsp_ics: Failed setting up %d ICS's\n",
698 found - num_ics);
699 return -1;
700 }
701
702 return 0;
703}
704
705void __init wsp_init_irq(void)
706{
707 wsp_ics_init();
708 xics_init();
709
710 /* We need to patch our irq chip's EOI to point to the right ICP */
711 wsp_irq_chip.irq_eoi = icp_ops->eoi;
712}
diff --git a/arch/powerpc/platforms/wsp/ics.h b/arch/powerpc/platforms/wsp/ics.h
new file mode 100644
index 000000000000..e34d53102640
--- /dev/null
+++ b/arch/powerpc/platforms/wsp/ics.h
@@ -0,0 +1,20 @@
1/*
2 * Copyright 2009 IBM Corporation.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9
10#ifndef __ICS_H
11#define __ICS_H
12
13#define XIVE_ADDR_MASK 0x7FFULL
14
15extern void wsp_init_irq(void);
16
17extern int wsp_ics_alloc_irq(struct device_node *dn, int num);
18extern void wsp_ics_free_irq(struct device_node *dn, unsigned int irq);
19
20#endif /* __ICS_H */
diff --git a/arch/powerpc/platforms/wsp/opb_pic.c b/arch/powerpc/platforms/wsp/opb_pic.c
new file mode 100644
index 000000000000..be05631a3c1c
--- /dev/null
+++ b/arch/powerpc/platforms/wsp/opb_pic.c
@@ -0,0 +1,332 @@
1/*
2 * IBM Onboard Peripheral Bus Interrupt Controller
3 *
4 * Copyright 2010 Jack Miller, IBM Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
10 */
11
12#include <linux/interrupt.h>
13#include <linux/io.h>
14#include <linux/irq.h>
15#include <linux/of.h>
16#include <linux/slab.h>
17#include <linux/time.h>
18
19#include <asm/reg_a2.h>
20#include <asm/irq.h>
21
22#define OPB_NR_IRQS 32
23
24#define OPB_MLSASIER 0x04 /* MLS Accumulated Status IER */
25#define OPB_MLSIR 0x50 /* MLS Interrupt Register */
26#define OPB_MLSIER 0x54 /* MLS Interrupt Enable Register */
27#define OPB_MLSIPR 0x58 /* MLS Interrupt Polarity Register */
28#define OPB_MLSIIR 0x5c /* MLS Interrupt Inputs Register */
29
30static int opb_index = 0;
31
32struct opb_pic {
33 struct irq_host *host;
34 void *regs;
35 int index;
36 spinlock_t lock;
37};
38
39static u32 opb_in(struct opb_pic *opb, int offset)
40{
41 return in_be32(opb->regs + offset);
42}
43
44static void opb_out(struct opb_pic *opb, int offset, u32 val)
45{
46 out_be32(opb->regs + offset, val);
47}
48
49static void opb_unmask_irq(struct irq_data *d)
50{
51 struct opb_pic *opb;
52 unsigned long flags;
53 u32 ier, bitset;
54
55 opb = d->chip_data;
56 bitset = (1 << (31 - irqd_to_hwirq(d)));
57
58 spin_lock_irqsave(&opb->lock, flags);
59
60 ier = opb_in(opb, OPB_MLSIER);
61 opb_out(opb, OPB_MLSIER, ier | bitset);
62 ier = opb_in(opb, OPB_MLSIER);
63
64 spin_unlock_irqrestore(&opb->lock, flags);
65}
66
67static void opb_mask_irq(struct irq_data *d)
68{
69 struct opb_pic *opb;
70 unsigned long flags;
71 u32 ier, mask;
72
73 opb = d->chip_data;
74 mask = ~(1 << (31 - irqd_to_hwirq(d)));
75
76 spin_lock_irqsave(&opb->lock, flags);
77
78 ier = opb_in(opb, OPB_MLSIER);
79 opb_out(opb, OPB_MLSIER, ier & mask);
80 ier = opb_in(opb, OPB_MLSIER); // Flush posted writes
81
82 spin_unlock_irqrestore(&opb->lock, flags);
83}
84
85static void opb_ack_irq(struct irq_data *d)
86{
87 struct opb_pic *opb;
88 unsigned long flags;
89 u32 bitset;
90
91 opb = d->chip_data;
92 bitset = (1 << (31 - irqd_to_hwirq(d)));
93
94 spin_lock_irqsave(&opb->lock, flags);
95
96 opb_out(opb, OPB_MLSIR, bitset);
97 opb_in(opb, OPB_MLSIR); // Flush posted writes
98
99 spin_unlock_irqrestore(&opb->lock, flags);
100}
101
102static void opb_mask_ack_irq(struct irq_data *d)
103{
104 struct opb_pic *opb;
105 unsigned long flags;
106 u32 bitset;
107 u32 ier, ir;
108
109 opb = d->chip_data;
110 bitset = (1 << (31 - irqd_to_hwirq(d)));
111
112 spin_lock_irqsave(&opb->lock, flags);
113
114 ier = opb_in(opb, OPB_MLSIER);
115 opb_out(opb, OPB_MLSIER, ier & ~bitset);
116 ier = opb_in(opb, OPB_MLSIER); // Flush posted writes
117
118 opb_out(opb, OPB_MLSIR, bitset);
119 ir = opb_in(opb, OPB_MLSIR); // Flush posted writes
120
121 spin_unlock_irqrestore(&opb->lock, flags);
122}
123
124static int opb_set_irq_type(struct irq_data *d, unsigned int flow)
125{
126 struct opb_pic *opb;
127 unsigned long flags;
128 int invert, ipr, mask, bit;
129
130 opb = d->chip_data;
131
132 /* The only information we're interested in in the type is whether it's
133 * a high or low trigger. For high triggered interrupts, the polarity
134 * set for it in the MLS Interrupt Polarity Register is 0, for low
135 * interrupts it's 1 so that the proper input in the MLS Interrupt Input
136 * Register is interrupted as asserting the interrupt. */
137
138 switch (flow) {
139 case IRQ_TYPE_NONE:
140 opb_mask_irq(d);
141 return 0;
142
143 case IRQ_TYPE_LEVEL_HIGH:
144 invert = 0;
145 break;
146
147 case IRQ_TYPE_LEVEL_LOW:
148 invert = 1;
149 break;
150
151 default:
152 return -EINVAL;
153 }
154
155 bit = (1 << (31 - irqd_to_hwirq(d)));
156 mask = ~bit;
157
158 spin_lock_irqsave(&opb->lock, flags);
159
160 ipr = opb_in(opb, OPB_MLSIPR);
161 ipr = (ipr & mask) | (invert ? bit : 0);
162 opb_out(opb, OPB_MLSIPR, ipr);
163 ipr = opb_in(opb, OPB_MLSIPR); // Flush posted writes
164
165 spin_unlock_irqrestore(&opb->lock, flags);
166
167 /* Record the type in the interrupt descriptor */
168 irqd_set_trigger_type(d, flow);
169
170 return 0;
171}
172
173static struct irq_chip opb_irq_chip = {
174 .name = "OPB",
175 .irq_mask = opb_mask_irq,
176 .irq_unmask = opb_unmask_irq,
177 .irq_mask_ack = opb_mask_ack_irq,
178 .irq_ack = opb_ack_irq,
179 .irq_set_type = opb_set_irq_type
180};
181
182static int opb_host_map(struct irq_host *host, unsigned int virq,
183 irq_hw_number_t hwirq)
184{
185 struct opb_pic *opb;
186
187 opb = host->host_data;
188
189 /* Most of the important stuff is handled by the generic host code, like
190 * the lookup, so just attach some info to the virtual irq */
191
192 irq_set_chip_data(virq, opb);
193 irq_set_chip_and_handler(virq, &opb_irq_chip, handle_level_irq);
194 irq_set_irq_type(virq, IRQ_TYPE_NONE);
195
196 return 0;
197}
198
199static int opb_host_xlate(struct irq_host *host, struct device_node *dn,
200 const u32 *intspec, unsigned int intsize,
201 irq_hw_number_t *out_hwirq, unsigned int *out_type)
202{
203 /* Interrupt size must == 2 */
204 BUG_ON(intsize != 2);
205 *out_hwirq = intspec[0];
206 *out_type = intspec[1];
207 return 0;
208}
209
210static struct irq_host_ops opb_host_ops = {
211 .map = opb_host_map,
212 .xlate = opb_host_xlate,
213};
214
215irqreturn_t opb_irq_handler(int irq, void *private)
216{
217 struct opb_pic *opb;
218 u32 ir, src, subvirq;
219
220 opb = (struct opb_pic *) private;
221
222 /* Read the OPB MLS Interrupt Register for
223 * asserted interrupts */
224 ir = opb_in(opb, OPB_MLSIR);
225 if (!ir)
226 return IRQ_NONE;
227
228 do {
229 /* Get 1 - 32 source, *NOT* bit */
230 src = 32 - ffs(ir);
231
232 /* Translate from the OPB's conception of interrupt number to
233 * Linux's virtual IRQ */
234
235 subvirq = irq_linear_revmap(opb->host, src);
236
237 generic_handle_irq(subvirq);
238 } while ((ir = opb_in(opb, OPB_MLSIR)));
239
240 return IRQ_HANDLED;
241}
242
243struct opb_pic *opb_pic_init_one(struct device_node *dn)
244{
245 struct opb_pic *opb;
246 struct resource res;
247
248 if (of_address_to_resource(dn, 0, &res)) {
249 printk(KERN_ERR "opb: Couldn't translate resource\n");
250 return NULL;
251 }
252
253 opb = kzalloc(sizeof(struct opb_pic), GFP_KERNEL);
254 if (!opb) {
255 printk(KERN_ERR "opb: Failed to allocate opb struct!\n");
256 return NULL;
257 }
258
259 /* Get access to the OPB MMIO registers */
260 opb->regs = ioremap(res.start + 0x10000, 0x1000);
261 if (!opb->regs) {
262 printk(KERN_ERR "opb: Failed to allocate register space!\n");
263 goto free_opb;
264 }
265
266 /* Allocate an irq host so that Linux knows that despite only
267 * having one interrupt to issue, we're the controller for multiple
268 * hardware IRQs, so later we can lookup their virtual IRQs. */
269
270 opb->host = irq_alloc_host(dn, IRQ_HOST_MAP_LINEAR,
271 OPB_NR_IRQS, &opb_host_ops, -1);
272
273 if (!opb->host) {
274 printk(KERN_ERR "opb: Failed to allocate IRQ host!\n");
275 goto free_regs;
276 }
277
278 opb->index = opb_index++;
279 spin_lock_init(&opb->lock);
280 opb->host->host_data = opb;
281
282 /* Disable all interrupts by default */
283 opb_out(opb, OPB_MLSASIER, 0);
284 opb_out(opb, OPB_MLSIER, 0);
285
286 /* ACK any interrupts left by FW */
287 opb_out(opb, OPB_MLSIR, 0xFFFFFFFF);
288
289 return opb;
290
291free_regs:
292 iounmap(opb->regs);
293free_opb:
294 kfree(opb);
295 return NULL;
296}
297
298void __init opb_pic_init(void)
299{
300 struct device_node *dn;
301 struct opb_pic *opb;
302 int virq;
303 int rc;
304
305 /* Call init_one for each OPB device */
306 for_each_compatible_node(dn, NULL, "ibm,opb") {
307
308 /* Fill in an OPB struct */
309 opb = opb_pic_init_one(dn);
310 if (!opb) {
311 printk(KERN_WARNING "opb: Failed to init node, skipped!\n");
312 continue;
313 }
314
315 /* Map / get opb's hardware virtual irq */
316 virq = irq_of_parse_and_map(dn, 0);
317 if (virq <= 0) {
318 printk("opb: irq_op_parse_and_map failed!\n");
319 continue;
320 }
321
322 /* Attach opb interrupt handler to new virtual IRQ */
323 rc = request_irq(virq, opb_irq_handler, 0, "OPB LS Cascade", opb);
324 if (rc) {
325 printk("opb: request_irq failed: %d\n", rc);
326 continue;
327 }
328
329 printk("OPB%d init with %d IRQs at %p\n", opb->index,
330 OPB_NR_IRQS, opb->regs);
331 }
332}
diff --git a/arch/powerpc/platforms/wsp/psr2.c b/arch/powerpc/platforms/wsp/psr2.c
new file mode 100644
index 000000000000..40f28916ff6c
--- /dev/null
+++ b/arch/powerpc/platforms/wsp/psr2.c
@@ -0,0 +1,95 @@
1/*
2 * Copyright 2008-2011, IBM Corporation
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9
10#include <linux/delay.h>
11#include <linux/init.h>
12#include <linux/irq.h>
13#include <linux/kernel.h>
14#include <linux/mm.h>
15#include <linux/of.h>
16#include <linux/smp.h>
17
18#include <asm/machdep.h>
19#include <asm/system.h>
20#include <asm/time.h>
21#include <asm/udbg.h>
22
23#include "ics.h"
24#include "wsp.h"
25
26
27static void psr2_spin(void)
28{
29 hard_irq_disable();
30 for (;;) ;
31}
32
33static void psr2_restart(char *cmd)
34{
35 psr2_spin();
36}
37
38static int psr2_probe_devices(void)
39{
40 struct device_node *np;
41
42 /* Our RTC is a ds1500. It seems to be programatically compatible
43 * with the ds1511 for which we have a driver so let's use that
44 */
45 np = of_find_compatible_node(NULL, NULL, "dallas,ds1500");
46 if (np != NULL) {
47 struct resource res;
48 if (of_address_to_resource(np, 0, &res) == 0)
49 platform_device_register_simple("ds1511", 0, &res, 1);
50 }
51 return 0;
52}
53machine_arch_initcall(psr2_md, psr2_probe_devices);
54
55static void __init psr2_setup_arch(void)
56{
57 /* init to some ~sane value until calibrate_delay() runs */
58 loops_per_jiffy = 50000000;
59
60 scom_init_wsp();
61
62 /* Setup SMP callback */
63#ifdef CONFIG_SMP
64 a2_setup_smp();
65#endif
66}
67
68static int __init psr2_probe(void)
69{
70 unsigned long root = of_get_flat_dt_root();
71
72 if (!of_flat_dt_is_compatible(root, "ibm,psr2"))
73 return 0;
74
75 return 1;
76}
77
78static void __init psr2_init_irq(void)
79{
80 wsp_init_irq();
81 opb_pic_init();
82}
83
84define_machine(psr2_md) {
85 .name = "PSR2 A2",
86 .probe = psr2_probe,
87 .setup_arch = psr2_setup_arch,
88 .restart = psr2_restart,
89 .power_off = psr2_spin,
90 .halt = psr2_spin,
91 .calibrate_decr = generic_calibrate_decr,
92 .init_IRQ = psr2_init_irq,
93 .progress = udbg_progress,
94 .power_save = book3e_idle,
95};
diff --git a/arch/powerpc/platforms/wsp/scom_smp.c b/arch/powerpc/platforms/wsp/scom_smp.c
new file mode 100644
index 000000000000..141e78032097
--- /dev/null
+++ b/arch/powerpc/platforms/wsp/scom_smp.c
@@ -0,0 +1,427 @@
1/*
2 * SCOM support for A2 platforms
3 *
4 * Copyright 2007-2011 Benjamin Herrenschmidt, David Gibson,
5 * Michael Ellerman, IBM Corp.
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 */
12
13#include <linux/cpumask.h>
14#include <linux/io.h>
15#include <linux/of.h>
16#include <linux/spinlock.h>
17#include <linux/types.h>
18
19#include <asm/cputhreads.h>
20#include <asm/reg_a2.h>
21#include <asm/scom.h>
22#include <asm/udbg.h>
23
24#include "wsp.h"
25
26#define SCOM_RAMC 0x2a /* Ram Command */
27#define SCOM_RAMC_TGT1_EXT 0x80000000
28#define SCOM_RAMC_SRC1_EXT 0x40000000
29#define SCOM_RAMC_SRC2_EXT 0x20000000
30#define SCOM_RAMC_SRC3_EXT 0x10000000
31#define SCOM_RAMC_ENABLE 0x00080000
32#define SCOM_RAMC_THREADSEL 0x00060000
33#define SCOM_RAMC_EXECUTE 0x00010000
34#define SCOM_RAMC_MSR_OVERRIDE 0x00008000
35#define SCOM_RAMC_MSR_PR 0x00004000
36#define SCOM_RAMC_MSR_GS 0x00002000
37#define SCOM_RAMC_FORCE 0x00001000
38#define SCOM_RAMC_FLUSH 0x00000800
39#define SCOM_RAMC_INTERRUPT 0x00000004
40#define SCOM_RAMC_ERROR 0x00000002
41#define SCOM_RAMC_DONE 0x00000001
42#define SCOM_RAMI 0x29 /* Ram Instruction */
43#define SCOM_RAMIC 0x28 /* Ram Instruction and Command */
44#define SCOM_RAMIC_INSN 0xffffffff00000000
45#define SCOM_RAMD 0x2d /* Ram Data */
46#define SCOM_RAMDH 0x2e /* Ram Data High */
47#define SCOM_RAMDL 0x2f /* Ram Data Low */
48#define SCOM_PCCR0 0x33 /* PC Configuration Register 0 */
49#define SCOM_PCCR0_ENABLE_DEBUG 0x80000000
50#define SCOM_PCCR0_ENABLE_RAM 0x40000000
51#define SCOM_THRCTL 0x30 /* Thread Control and Status */
52#define SCOM_THRCTL_T0_STOP 0x80000000
53#define SCOM_THRCTL_T1_STOP 0x40000000
54#define SCOM_THRCTL_T2_STOP 0x20000000
55#define SCOM_THRCTL_T3_STOP 0x10000000
56#define SCOM_THRCTL_T0_STEP 0x08000000
57#define SCOM_THRCTL_T1_STEP 0x04000000
58#define SCOM_THRCTL_T2_STEP 0x02000000
59#define SCOM_THRCTL_T3_STEP 0x01000000
60#define SCOM_THRCTL_T0_RUN 0x00800000
61#define SCOM_THRCTL_T1_RUN 0x00400000
62#define SCOM_THRCTL_T2_RUN 0x00200000
63#define SCOM_THRCTL_T3_RUN 0x00100000
64#define SCOM_THRCTL_T0_PM 0x00080000
65#define SCOM_THRCTL_T1_PM 0x00040000
66#define SCOM_THRCTL_T2_PM 0x00020000
67#define SCOM_THRCTL_T3_PM 0x00010000
68#define SCOM_THRCTL_T0_UDE 0x00008000
69#define SCOM_THRCTL_T1_UDE 0x00004000
70#define SCOM_THRCTL_T2_UDE 0x00002000
71#define SCOM_THRCTL_T3_UDE 0x00001000
72#define SCOM_THRCTL_ASYNC_DIS 0x00000800
73#define SCOM_THRCTL_TB_DIS 0x00000400
74#define SCOM_THRCTL_DEC_DIS 0x00000200
75#define SCOM_THRCTL_AND 0x31 /* Thread Control and Status */
76#define SCOM_THRCTL_OR 0x32 /* Thread Control and Status */
77
78
79static DEFINE_PER_CPU(scom_map_t, scom_ptrs);
80
81static scom_map_t get_scom(int cpu, struct device_node *np, int *first_thread)
82{
83 scom_map_t scom = per_cpu(scom_ptrs, cpu);
84 int tcpu;
85
86 if (scom_map_ok(scom)) {
87 *first_thread = 0;
88 return scom;
89 }
90
91 *first_thread = 1;
92
93 scom = scom_map_device(np, 0);
94
95 for (tcpu = cpu_first_thread_sibling(cpu);
96 tcpu <= cpu_last_thread_sibling(cpu); tcpu++)
97 per_cpu(scom_ptrs, tcpu) = scom;
98
99 /* Hack: for the boot core, this will actually get called on
100 * the second thread up, not the first so our test above will
101 * set first_thread incorrectly. */
102 if (cpu_first_thread_sibling(cpu) == 0)
103 *first_thread = 0;
104
105 return scom;
106}
107
108static int a2_scom_ram(scom_map_t scom, int thread, u32 insn, int extmask)
109{
110 u64 cmd, mask, val;
111 int n = 0;
112
113 cmd = ((u64)insn << 32) | (((u64)extmask & 0xf) << 28)
114 | ((u64)thread << 17) | SCOM_RAMC_ENABLE | SCOM_RAMC_EXECUTE;
115 mask = SCOM_RAMC_DONE | SCOM_RAMC_INTERRUPT | SCOM_RAMC_ERROR;
116
117 scom_write(scom, SCOM_RAMIC, cmd);
118
119 while (!((val = scom_read(scom, SCOM_RAMC)) & mask)) {
120 pr_devel("Waiting on RAMC = 0x%llx\n", val);
121 if (++n == 3) {
122 pr_err("RAMC timeout on instruction 0x%08x, thread %d\n",
123 insn, thread);
124 return -1;
125 }
126 }
127
128 if (val & SCOM_RAMC_INTERRUPT) {
129 pr_err("RAMC interrupt on instruction 0x%08x, thread %d\n",
130 insn, thread);
131 return -SCOM_RAMC_INTERRUPT;
132 }
133
134 if (val & SCOM_RAMC_ERROR) {
135 pr_err("RAMC error on instruction 0x%08x, thread %d\n",
136 insn, thread);
137 return -SCOM_RAMC_ERROR;
138 }
139
140 return 0;
141}
142
143static int a2_scom_getgpr(scom_map_t scom, int thread, int gpr, int alt,
144 u64 *out_gpr)
145{
146 int rc;
147
148 /* or rN, rN, rN */
149 u32 insn = 0x7c000378 | (gpr << 21) | (gpr << 16) | (gpr << 11);
150 rc = a2_scom_ram(scom, thread, insn, alt ? 0xf : 0x0);
151 if (rc)
152 return rc;
153
154 *out_gpr = scom_read(scom, SCOM_RAMD);
155
156 return 0;
157}
158
159static int a2_scom_getspr(scom_map_t scom, int thread, int spr, u64 *out_spr)
160{
161 int rc, sprhi, sprlo;
162 u32 insn;
163
164 sprhi = spr >> 5;
165 sprlo = spr & 0x1f;
166 insn = 0x7c2002a6 | (sprlo << 16) | (sprhi << 11); /* mfspr r1,spr */
167
168 if (spr == 0x0ff0)
169 insn = 0x7c2000a6; /* mfmsr r1 */
170
171 rc = a2_scom_ram(scom, thread, insn, 0xf);
172 if (rc)
173 return rc;
174 return a2_scom_getgpr(scom, thread, 1, 1, out_spr);
175}
176
177static int a2_scom_setgpr(scom_map_t scom, int thread, int gpr,
178 int alt, u64 val)
179{
180 u32 lis = 0x3c000000 | (gpr << 21);
181 u32 li = 0x38000000 | (gpr << 21);
182 u32 oris = 0x64000000 | (gpr << 21) | (gpr << 16);
183 u32 ori = 0x60000000 | (gpr << 21) | (gpr << 16);
184 u32 rldicr32 = 0x780007c6 | (gpr << 21) | (gpr << 16);
185 u32 highest = val >> 48;
186 u32 higher = (val >> 32) & 0xffff;
187 u32 high = (val >> 16) & 0xffff;
188 u32 low = val & 0xffff;
189 int lext = alt ? 0x8 : 0x0;
190 int oext = alt ? 0xf : 0x0;
191 int rc = 0;
192
193 if (highest)
194 rc |= a2_scom_ram(scom, thread, lis | highest, lext);
195
196 if (higher) {
197 if (highest)
198 rc |= a2_scom_ram(scom, thread, oris | higher, oext);
199 else
200 rc |= a2_scom_ram(scom, thread, li | higher, lext);
201 }
202
203 if (highest || higher)
204 rc |= a2_scom_ram(scom, thread, rldicr32, oext);
205
206 if (high) {
207 if (highest || higher)
208 rc |= a2_scom_ram(scom, thread, oris | high, oext);
209 else
210 rc |= a2_scom_ram(scom, thread, lis | high, lext);
211 }
212
213 if (highest || higher || high)
214 rc |= a2_scom_ram(scom, thread, ori | low, oext);
215 else
216 rc |= a2_scom_ram(scom, thread, li | low, lext);
217
218 return rc;
219}
220
221static int a2_scom_setspr(scom_map_t scom, int thread, int spr, u64 val)
222{
223 int sprhi = spr >> 5;
224 int sprlo = spr & 0x1f;
225 /* mtspr spr, r1 */
226 u32 insn = 0x7c2003a6 | (sprlo << 16) | (sprhi << 11);
227
228 if (spr == 0x0ff0)
229 insn = 0x7c200124; /* mtmsr r1 */
230
231 if (a2_scom_setgpr(scom, thread, 1, 1, val))
232 return -1;
233
234 return a2_scom_ram(scom, thread, insn, 0xf);
235}
236
237static int a2_scom_initial_tlb(scom_map_t scom, int thread)
238{
239 extern u32 a2_tlbinit_code_start[], a2_tlbinit_code_end[];
240 extern u32 a2_tlbinit_after_iprot_flush[];
241 extern u32 a2_tlbinit_after_linear_map[];
242 u32 assoc, entries, i;
243 u64 epn, tlbcfg;
244 u32 *p;
245 int rc;
246
247 /* Invalidate all entries (including iprot) */
248
249 rc = a2_scom_getspr(scom, thread, SPRN_TLB0CFG, &tlbcfg);
250 if (rc)
251 goto scom_fail;
252 entries = tlbcfg & TLBnCFG_N_ENTRY;
253 assoc = (tlbcfg & TLBnCFG_ASSOC) >> 24;
254 epn = 0;
255
256 /* Set MMUCR2 to enable 4K, 64K, 1M, 16M and 1G pages */
257 a2_scom_setspr(scom, thread, SPRN_MMUCR2, 0x000a7531);
258 /* Set MMUCR3 to write all thids bit to the TLB */
259 a2_scom_setspr(scom, thread, SPRN_MMUCR3, 0x0000000f);
260
261 /* Set MAS1 for 1G page size, and MAS2 to our initial EPN */
262 a2_scom_setspr(scom, thread, SPRN_MAS1, MAS1_TSIZE(BOOK3E_PAGESZ_1GB));
263 a2_scom_setspr(scom, thread, SPRN_MAS2, epn);
264 for (i = 0; i < entries; i++) {
265
266 a2_scom_setspr(scom, thread, SPRN_MAS0, MAS0_ESEL(i % assoc));
267
268 /* tlbwe */
269 rc = a2_scom_ram(scom, thread, 0x7c0007a4, 0);
270 if (rc)
271 goto scom_fail;
272
273 /* Next entry is new address? */
274 if((i + 1) % assoc == 0) {
275 epn += (1 << 30);
276 a2_scom_setspr(scom, thread, SPRN_MAS2, epn);
277 }
278 }
279
280 /* Setup args for linear mapping */
281 rc = a2_scom_setgpr(scom, thread, 3, 0, MAS0_TLBSEL(0));
282 if (rc)
283 goto scom_fail;
284
285 /* Linear mapping */
286 for (p = a2_tlbinit_code_start; p < a2_tlbinit_after_linear_map; p++) {
287 rc = a2_scom_ram(scom, thread, *p, 0);
288 if (rc)
289 goto scom_fail;
290 }
291
292 /*
293 * For the boot thread, between the linear mapping and the debug
294 * mappings there is a loop to flush iprot mappings. Ramming doesn't do
295 * branches, but the secondary threads don't need to be nearly as smart
296 * (i.e. we don't need to worry about invalidating the mapping we're
297 * standing on).
298 */
299
300 /* Debug mappings. Expects r11 = MAS0 from linear map (set above) */
301 for (p = a2_tlbinit_after_iprot_flush; p < a2_tlbinit_code_end; p++) {
302 rc = a2_scom_ram(scom, thread, *p, 0);
303 if (rc)
304 goto scom_fail;
305 }
306
307scom_fail:
308 if (rc)
309 pr_err("Setting up initial TLB failed, err %d\n", rc);
310
311 if (rc == -SCOM_RAMC_INTERRUPT) {
312 /* Interrupt, dump some status */
313 int rc[10];
314 u64 iar, srr0, srr1, esr, mas0, mas1, mas2, mas7_3, mas8, ccr2;
315 rc[0] = a2_scom_getspr(scom, thread, SPRN_IAR, &iar);
316 rc[1] = a2_scom_getspr(scom, thread, SPRN_SRR0, &srr0);
317 rc[2] = a2_scom_getspr(scom, thread, SPRN_SRR1, &srr1);
318 rc[3] = a2_scom_getspr(scom, thread, SPRN_ESR, &esr);
319 rc[4] = a2_scom_getspr(scom, thread, SPRN_MAS0, &mas0);
320 rc[5] = a2_scom_getspr(scom, thread, SPRN_MAS1, &mas1);
321 rc[6] = a2_scom_getspr(scom, thread, SPRN_MAS2, &mas2);
322 rc[7] = a2_scom_getspr(scom, thread, SPRN_MAS7_MAS3, &mas7_3);
323 rc[8] = a2_scom_getspr(scom, thread, SPRN_MAS8, &mas8);
324 rc[9] = a2_scom_getspr(scom, thread, SPRN_A2_CCR2, &ccr2);
325 pr_err(" -> retreived IAR =0x%llx (err %d)\n", iar, rc[0]);
326 pr_err(" retreived SRR0=0x%llx (err %d)\n", srr0, rc[1]);
327 pr_err(" retreived SRR1=0x%llx (err %d)\n", srr1, rc[2]);
328 pr_err(" retreived ESR =0x%llx (err %d)\n", esr, rc[3]);
329 pr_err(" retreived MAS0=0x%llx (err %d)\n", mas0, rc[4]);
330 pr_err(" retreived MAS1=0x%llx (err %d)\n", mas1, rc[5]);
331 pr_err(" retreived MAS2=0x%llx (err %d)\n", mas2, rc[6]);
332 pr_err(" retreived MS73=0x%llx (err %d)\n", mas7_3, rc[7]);
333 pr_err(" retreived MAS8=0x%llx (err %d)\n", mas8, rc[8]);
334 pr_err(" retreived CCR2=0x%llx (err %d)\n", ccr2, rc[9]);
335 }
336
337 return rc;
338}
339
340int __devinit a2_scom_startup_cpu(unsigned int lcpu, int thr_idx,
341 struct device_node *np)
342{
343 u64 init_iar, init_msr, init_ccr2;
344 unsigned long start_here;
345 int rc, core_setup;
346 scom_map_t scom;
347 u64 pccr0;
348
349 scom = get_scom(lcpu, np, &core_setup);
350 if (!scom) {
351 printk(KERN_ERR "Couldn't map SCOM for CPU%d\n", lcpu);
352 return -1;
353 }
354
355 pr_devel("Bringing up CPU%d using SCOM...\n", lcpu);
356
357 pccr0 = scom_read(scom, SCOM_PCCR0);
358 scom_write(scom, SCOM_PCCR0, pccr0 | SCOM_PCCR0_ENABLE_DEBUG |
359 SCOM_PCCR0_ENABLE_RAM);
360
361 /* Stop the thead with THRCTL. If we are setting up the TLB we stop all
362 * threads. We also disable asynchronous interrupts while RAMing.
363 */
364 if (core_setup)
365 scom_write(scom, SCOM_THRCTL_OR,
366 SCOM_THRCTL_T0_STOP |
367 SCOM_THRCTL_T1_STOP |
368 SCOM_THRCTL_T2_STOP |
369 SCOM_THRCTL_T3_STOP |
370 SCOM_THRCTL_ASYNC_DIS);
371 else
372 scom_write(scom, SCOM_THRCTL_OR, SCOM_THRCTL_T0_STOP >> thr_idx);
373
374 /* Flush its pipeline just in case */
375 scom_write(scom, SCOM_RAMC, ((u64)thr_idx << 17) |
376 SCOM_RAMC_FLUSH | SCOM_RAMC_ENABLE);
377
378 a2_scom_getspr(scom, thr_idx, SPRN_IAR, &init_iar);
379 a2_scom_getspr(scom, thr_idx, 0x0ff0, &init_msr);
380 a2_scom_getspr(scom, thr_idx, SPRN_A2_CCR2, &init_ccr2);
381
382 /* Set MSR to MSR_CM (0x0ff0 is magic value for MSR_CM) */
383 rc = a2_scom_setspr(scom, thr_idx, 0x0ff0, MSR_CM);
384 if (rc) {
385 pr_err("Failed to set MSR ! err %d\n", rc);
386 return rc;
387 }
388
389 /* RAM in an sync/isync for the sake of it */
390 a2_scom_ram(scom, thr_idx, 0x7c0004ac, 0);
391 a2_scom_ram(scom, thr_idx, 0x4c00012c, 0);
392
393 if (core_setup) {
394 pr_devel("CPU%d is first thread in core, initializing TLB...\n",
395 lcpu);
396 rc = a2_scom_initial_tlb(scom, thr_idx);
397 if (rc)
398 goto fail;
399 }
400
401 start_here = *(unsigned long *)(core_setup ? generic_secondary_smp_init
402 : generic_secondary_thread_init);
403 pr_devel("CPU%d entry point at 0x%lx...\n", lcpu, start_here);
404
405 rc |= a2_scom_setspr(scom, thr_idx, SPRN_IAR, start_here);
406 rc |= a2_scom_setgpr(scom, thr_idx, 3, 0,
407 get_hard_smp_processor_id(lcpu));
408 /*
409 * Tell book3e_secondary_core_init not to set up the TLB, we've
410 * already done that.
411 */
412 rc |= a2_scom_setgpr(scom, thr_idx, 4, 0, 1);
413
414 rc |= a2_scom_setspr(scom, thr_idx, SPRN_TENS, 0x1 << thr_idx);
415
416 scom_write(scom, SCOM_RAMC, 0);
417 scom_write(scom, SCOM_THRCTL_AND, ~(SCOM_THRCTL_T0_STOP >> thr_idx));
418 scom_write(scom, SCOM_PCCR0, pccr0);
419fail:
420 pr_devel(" SCOM initialization %s\n", rc ? "failed" : "succeeded");
421 if (rc) {
422 pr_err("Old IAR=0x%08llx MSR=0x%08llx CCR2=0x%08llx\n",
423 init_iar, init_msr, init_ccr2);
424 }
425
426 return rc;
427}
diff --git a/arch/powerpc/platforms/wsp/scom_wsp.c b/arch/powerpc/platforms/wsp/scom_wsp.c
new file mode 100644
index 000000000000..4052e2259f30
--- /dev/null
+++ b/arch/powerpc/platforms/wsp/scom_wsp.c
@@ -0,0 +1,77 @@
1/*
2 * SCOM backend for WSP
3 *
4 * Copyright 2010 Benjamin Herrenschmidt, IBM Corp.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#include <linux/cpumask.h>
13#include <linux/io.h>
14#include <linux/of.h>
15#include <linux/spinlock.h>
16#include <linux/types.h>
17
18#include <asm/cputhreads.h>
19#include <asm/reg_a2.h>
20#include <asm/scom.h>
21#include <asm/udbg.h>
22
23#include "wsp.h"
24
25
26static scom_map_t wsp_scom_map(struct device_node *dev, u64 reg, u64 count)
27{
28 struct resource r;
29 u64 xscom_addr;
30
31 if (!of_get_property(dev, "scom-controller", NULL)) {
32 pr_err("%s: device %s is not a SCOM controller\n",
33 __func__, dev->full_name);
34 return SCOM_MAP_INVALID;
35 }
36
37 if (of_address_to_resource(dev, 0, &r)) {
38 pr_debug("Failed to find SCOM controller address\n");
39 return 0;
40 }
41
42 /* Transform the SCOM address into an XSCOM offset */
43 xscom_addr = ((reg & 0x7f000000) >> 1) | ((reg & 0xfffff) << 3);
44
45 return (scom_map_t)ioremap(r.start + xscom_addr, count << 3);
46}
47
48static void wsp_scom_unmap(scom_map_t map)
49{
50 iounmap((void *)map);
51}
52
53static u64 wsp_scom_read(scom_map_t map, u32 reg)
54{
55 u64 __iomem *addr = (u64 __iomem *)map;
56
57 return in_be64(addr + reg);
58}
59
60static void wsp_scom_write(scom_map_t map, u32 reg, u64 value)
61{
62 u64 __iomem *addr = (u64 __iomem *)map;
63
64 return out_be64(addr + reg, value);
65}
66
67static const struct scom_controller wsp_scom_controller = {
68 .map = wsp_scom_map,
69 .unmap = wsp_scom_unmap,
70 .read = wsp_scom_read,
71 .write = wsp_scom_write
72};
73
74void scom_init_wsp(void)
75{
76 scom_init(&wsp_scom_controller);
77}
diff --git a/arch/powerpc/platforms/wsp/setup.c b/arch/powerpc/platforms/wsp/setup.c
new file mode 100644
index 000000000000..11ac2f05e01c
--- /dev/null
+++ b/arch/powerpc/platforms/wsp/setup.c
@@ -0,0 +1,36 @@
1/*
2 * Copyright 2010 Michael Ellerman, IBM Corporation
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9
10#include <linux/kernel.h>
11#include <linux/of_platform.h>
12
13#include "wsp.h"
14
15/*
16 * Find chip-id by walking up device tree looking for ibm,wsp-chip-id property.
17 * Won't work for nodes that are not a descendant of a wsp node.
18 */
19int wsp_get_chip_id(struct device_node *dn)
20{
21 const u32 *p;
22 int rc;
23
24 /* Start looking at the specified node, not its parent */
25 dn = of_node_get(dn);
26 while (dn && !(p = of_get_property(dn, "ibm,wsp-chip-id", NULL)))
27 dn = of_get_next_parent(dn);
28
29 if (!dn)
30 return -1;
31
32 rc = *p;
33 of_node_put(dn);
34
35 return rc;
36}
diff --git a/arch/powerpc/platforms/wsp/smp.c b/arch/powerpc/platforms/wsp/smp.c
new file mode 100644
index 000000000000..c7b8db9ed9b3
--- /dev/null
+++ b/arch/powerpc/platforms/wsp/smp.c
@@ -0,0 +1,87 @@
1/*
2 * SMP Support for A2 platforms
3 *
4 * Copyright 2007 Benjamin Herrenschmidt, IBM Corp.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 */
12
13#include <linux/cpumask.h>
14#include <linux/init.h>
15#include <linux/kernel.h>
16#include <linux/of.h>
17#include <linux/smp.h>
18
19#include <asm/dbell.h>
20#include <asm/machdep.h>
21#include <asm/xics.h>
22
23#include "ics.h"
24#include "wsp.h"
25
26static void __devinit smp_a2_setup_cpu(int cpu)
27{
28 doorbell_setup_this_cpu();
29
30 if (cpu != boot_cpuid)
31 xics_setup_cpu();
32}
33
34int __devinit smp_a2_kick_cpu(int nr)
35{
36 const char *enable_method;
37 struct device_node *np;
38 int thr_idx;
39
40 if (nr < 0 || nr >= NR_CPUS)
41 return -ENOENT;
42
43 np = of_get_cpu_node(nr, &thr_idx);
44 if (!np)
45 return -ENODEV;
46
47 enable_method = of_get_property(np, "enable-method", NULL);
48 pr_devel("CPU%d has enable-method: \"%s\"\n", nr, enable_method);
49
50 if (!enable_method) {
51 printk(KERN_ERR "CPU%d has no enable-method\n", nr);
52 return -ENOENT;
53 } else if (strcmp(enable_method, "ibm,a2-scom") == 0) {
54 if (a2_scom_startup_cpu(nr, thr_idx, np))
55 return -1;
56 } else {
57 printk(KERN_ERR "CPU%d: Don't understand enable-method \"%s\"\n",
58 nr, enable_method);
59 return -EINVAL;
60 }
61
62 /*
63 * The processor is currently spinning, waiting for the
64 * cpu_start field to become non-zero After we set cpu_start,
65 * the processor will continue on to secondary_start
66 */
67 paca[nr].cpu_start = 1;
68
69 return 0;
70}
71
72static int __init smp_a2_probe(void)
73{
74 return cpus_weight(cpu_possible_map);
75}
76
77static struct smp_ops_t a2_smp_ops = {
78 .message_pass = doorbell_message_pass,
79 .probe = smp_a2_probe,
80 .kick_cpu = smp_a2_kick_cpu,
81 .setup_cpu = smp_a2_setup_cpu,
82};
83
84void __init a2_setup_smp(void)
85{
86 smp_ops = &a2_smp_ops;
87}
diff --git a/arch/powerpc/platforms/wsp/wsp.h b/arch/powerpc/platforms/wsp/wsp.h
new file mode 100644
index 000000000000..7c3e087fd2f2
--- /dev/null
+++ b/arch/powerpc/platforms/wsp/wsp.h
@@ -0,0 +1,17 @@
1#ifndef __WSP_H
2#define __WSP_H
3
4#include <asm/wsp.h>
5
6extern void wsp_setup_pci(void);
7extern void scom_init_wsp(void);
8
9extern void a2_setup_smp(void);
10extern int a2_scom_startup_cpu(unsigned int lcpu, int thr_idx,
11 struct device_node *np);
12int smp_a2_cpu_bootable(unsigned int nr);
13int __devinit smp_a2_kick_cpu(int nr);
14
15void opb_pic_init(void);
16
17#endif /* __WSP_H */
diff --git a/arch/powerpc/sysdev/xics/icp-native.c b/arch/powerpc/sysdev/xics/icp-native.c
index d9e0515592c4..3508321c4501 100644
--- a/arch/powerpc/sysdev/xics/icp-native.c
+++ b/arch/powerpc/sysdev/xics/icp-native.c
@@ -7,6 +7,7 @@
7 * 2 of the License, or (at your option) any later version. 7 * 2 of the License, or (at your option) any later version.
8 * 8 *
9 */ 9 */
10
10#include <linux/types.h> 11#include <linux/types.h>
11#include <linux/kernel.h> 12#include <linux/kernel.h>
12#include <linux/irq.h> 13#include <linux/irq.h>