aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/platforms/cell/interrupt.c
diff options
context:
space:
mode:
authorBenjamin Herrenschmidt <benh@kernel.crashing.org>2006-07-03 05:32:51 -0400
committerPaul Mackerras <paulus@samba.org>2006-07-03 05:55:12 -0400
commitb9e5b4e6a991a5a6d521f2e20a65835404b4169f (patch)
treea0ac972faae4bf9133f576d842667bb134190341 /arch/powerpc/platforms/cell/interrupt.c
parent5a43a066b11ac2fe84cf67307f20b83bea390f83 (diff)
[POWERPC] Use the genirq framework
This adapts the generic powerpc interrupt handling code, and all of the platforms except for the embedded 6xx machines, to use the new genirq framework. Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc/platforms/cell/interrupt.c')
-rw-r--r--arch/powerpc/platforms/cell/interrupt.c111
1 files changed, 56 insertions, 55 deletions
diff --git a/arch/powerpc/platforms/cell/interrupt.c b/arch/powerpc/platforms/cell/interrupt.c
index 22da1335445a..97936f547f19 100644
--- a/arch/powerpc/platforms/cell/interrupt.c
+++ b/arch/powerpc/platforms/cell/interrupt.c
@@ -37,64 +37,51 @@
37struct iic { 37struct iic {
38 struct cbe_iic_thread_regs __iomem *regs; 38 struct cbe_iic_thread_regs __iomem *regs;
39 u8 target_id; 39 u8 target_id;
40 u8 eoi_stack[16];
41 int eoi_ptr;
40}; 42};
41 43
42static DEFINE_PER_CPU(struct iic, iic); 44static DEFINE_PER_CPU(struct iic, iic);
43 45
44void iic_local_enable(void) 46static void iic_mask(unsigned int irq)
45{ 47{
46 struct iic *iic = &__get_cpu_var(iic);
47 u64 tmp;
48
49 /*
50 * There seems to be a bug that is present in DD2.x CPUs
51 * and still only partially fixed in DD3.1.
52 * This bug causes a value written to the priority register
53 * not to make it there, resulting in a system hang unless we
54 * write it again.
55 * Masking with 0xf0 is done because the Cell BE does not
56 * implement the lower four bits of the interrupt priority,
57 * they always read back as zeroes, although future CPUs
58 * might implement different bits.
59 */
60 do {
61 out_be64(&iic->regs->prio, 0xff);
62 tmp = in_be64(&iic->regs->prio);
63 } while ((tmp & 0xf0) != 0xf0);
64}
65
66void iic_local_disable(void)
67{
68 out_be64(&__get_cpu_var(iic).regs->prio, 0x0);
69}
70
71static unsigned int iic_startup(unsigned int irq)
72{
73 return 0;
74}
75
76static void iic_enable(unsigned int irq)
77{
78 iic_local_enable();
79} 48}
80 49
81static void iic_disable(unsigned int irq) 50static void iic_unmask(unsigned int irq)
82{ 51{
83} 52}
84 53
85static void iic_end(unsigned int irq) 54static void iic_eoi(unsigned int irq)
86{ 55{
87 iic_local_enable(); 56 struct iic *iic = &__get_cpu_var(iic);
57 out_be64(&iic->regs->prio, iic->eoi_stack[--iic->eoi_ptr]);
58 BUG_ON(iic->eoi_ptr < 0);
88} 59}
89 60
90static struct hw_interrupt_type iic_pic = { 61static struct irq_chip iic_chip = {
91 .typename = " CELL-IIC ", 62 .typename = " CELL-IIC ",
92 .startup = iic_startup, 63 .mask = iic_mask,
93 .enable = iic_enable, 64 .unmask = iic_unmask,
94 .disable = iic_disable, 65 .eoi = iic_eoi,
95 .end = iic_end,
96}; 66};
97 67
68/* XXX All of this has to be reworked completely. We need to assign a real
69 * interrupt numbers to the external interrupts and remove all the hard coded
70 * interrupt maps (rely on the device-tree whenever possible).
71 *
72 * Basically, my scheme is to define the "pendings" bits to be the HW interrupt
73 * number (ignoring the data and flags here). That means we can sort-of split
74 * external sources based on priority, and we can use request_irq() on pretty
75 * much anything.
76 *
77 * For spider or axon, they have their own interrupt space. spider will just have
78 * local "hardward" interrupts 0...xx * node stride. The node stride is not
79 * necessary (separate interrupt chips will have separate HW number space), but
80 * will allow to be compatible with existing device-trees.
81 *
82 * All of thise little world will get a standard remapping scheme to map those HW
83 * numbers into the linux flat irq number space.
84*/
98static int iic_external_get_irq(struct cbe_iic_pending_bits pending) 85static int iic_external_get_irq(struct cbe_iic_pending_bits pending)
99{ 86{
100 int irq; 87 int irq;
@@ -118,9 +105,10 @@ static int iic_external_get_irq(struct cbe_iic_pending_bits pending)
118 */ 105 */
119 if (pending.class != 2) 106 if (pending.class != 2)
120 break; 107 break;
121 irq = IIC_EXT_OFFSET 108 /* TODO: We might want to silently ignore cascade interrupts
122 + spider_get_irq(node) 109 * when no cascade handler exist yet
123 + node * IIC_NODE_STRIDE; 110 */
111 irq = IIC_EXT_CASCADE + node * IIC_NODE_STRIDE;
124 break; 112 break;
125 case 0x01 ... 0x04: 113 case 0x01 ... 0x04:
126 case 0x07 ... 0x0a: 114 case 0x07 ... 0x0a:
@@ -152,6 +140,8 @@ int iic_get_irq(struct pt_regs *regs)
152 iic = &__get_cpu_var(iic); 140 iic = &__get_cpu_var(iic);
153 *(unsigned long *) &pending = 141 *(unsigned long *) &pending =
154 in_be64((unsigned long __iomem *) &iic->regs->pending_destr); 142 in_be64((unsigned long __iomem *) &iic->regs->pending_destr);
143 iic->eoi_stack[++iic->eoi_ptr] = pending.prio;
144 BUG_ON(iic->eoi_ptr > 15);
155 145
156 irq = -1; 146 irq = -1;
157 if (pending.flags & CBE_IIC_IRQ_VALID) { 147 if (pending.flags & CBE_IIC_IRQ_VALID) {
@@ -172,7 +162,7 @@ int iic_get_irq(struct pt_regs *regs)
172 162
173/* hardcoded part to be compatible with older firmware */ 163/* hardcoded part to be compatible with older firmware */
174 164
175static int setup_iic_hardcoded(void) 165static int __init setup_iic_hardcoded(void)
176{ 166{
177 struct device_node *np; 167 struct device_node *np;
178 int nodeid, cpu; 168 int nodeid, cpu;
@@ -207,12 +197,13 @@ static int setup_iic_hardcoded(void)
207 printk(KERN_INFO "IIC for CPU %d at %lx\n", cpu, regs); 197 printk(KERN_INFO "IIC for CPU %d at %lx\n", cpu, regs);
208 iic->regs = ioremap(regs, sizeof(struct cbe_iic_thread_regs)); 198 iic->regs = ioremap(regs, sizeof(struct cbe_iic_thread_regs));
209 iic->target_id = (nodeid << 4) + ((cpu & 1) ? 0xf : 0xe); 199 iic->target_id = (nodeid << 4) + ((cpu & 1) ? 0xf : 0xe);
200 iic->eoi_stack[0] = 0xff;
210 } 201 }
211 202
212 return 0; 203 return 0;
213} 204}
214 205
215static int setup_iic(void) 206static int __init setup_iic(void)
216{ 207{
217 struct device_node *dn; 208 struct device_node *dn;
218 unsigned long *regs; 209 unsigned long *regs;
@@ -248,11 +239,14 @@ static int setup_iic(void)
248 iic = &per_cpu(iic, np[0]); 239 iic = &per_cpu(iic, np[0]);
249 iic->regs = ioremap(regs[0], sizeof(struct cbe_iic_thread_regs)); 240 iic->regs = ioremap(regs[0], sizeof(struct cbe_iic_thread_regs));
250 iic->target_id = ((np[0] & 2) << 3) + ((np[0] & 1) ? 0xf : 0xe); 241 iic->target_id = ((np[0] & 2) << 3) + ((np[0] & 1) ? 0xf : 0xe);
242 iic->eoi_stack[0] = 0xff;
251 printk("IIC for CPU %d at %lx mapped to %p\n", np[0], regs[0], iic->regs); 243 printk("IIC for CPU %d at %lx mapped to %p\n", np[0], regs[0], iic->regs);
252 244
253 iic = &per_cpu(iic, np[1]); 245 iic = &per_cpu(iic, np[1]);
254 iic->regs = ioremap(regs[2], sizeof(struct cbe_iic_thread_regs)); 246 iic->regs = ioremap(regs[2], sizeof(struct cbe_iic_thread_regs));
255 iic->target_id = ((np[1] & 2) << 3) + ((np[1] & 1) ? 0xf : 0xe); 247 iic->target_id = ((np[1] & 2) << 3) + ((np[1] & 1) ? 0xf : 0xe);
248 iic->eoi_stack[0] = 0xff;
249
256 printk("IIC for CPU %d at %lx mapped to %p\n", np[1], regs[2], iic->regs); 250 printk("IIC for CPU %d at %lx mapped to %p\n", np[1], regs[2], iic->regs);
257 251
258 found++; 252 found++;
@@ -304,10 +298,10 @@ static void iic_request_ipi(int ipi, const char *name)
304 int irq; 298 int irq;
305 299
306 irq = iic_ipi_to_irq(ipi); 300 irq = iic_ipi_to_irq(ipi);
301
307 /* IPIs are marked IRQF_DISABLED as they must run with irqs 302 /* IPIs are marked IRQF_DISABLED as they must run with irqs
308 * disabled */ 303 * disabled */
309 get_irq_desc(irq)->chip = &iic_pic; 304 set_irq_chip_and_handler(irq, &iic_chip, handle_percpu_irq);
310 get_irq_desc(irq)->status |= IRQ_PER_CPU;
311 request_irq(irq, iic_ipi_action, IRQF_DISABLED, name, NULL); 305 request_irq(irq, iic_ipi_action, IRQF_DISABLED, name, NULL);
312} 306}
313 307
@@ -321,20 +315,26 @@ void iic_request_IPIs(void)
321} 315}
322#endif /* CONFIG_SMP */ 316#endif /* CONFIG_SMP */
323 317
324static void iic_setup_spe_handlers(void) 318static void __init iic_setup_builtin_handlers(void)
325{ 319{
326 int be, isrc; 320 int be, isrc;
327 321
328 /* Assume two threads per BE are present */ 322 /* XXX FIXME: Assume two threads per BE are present */
329 for (be=0; be < num_present_cpus() / 2; be++) { 323 for (be=0; be < num_present_cpus() / 2; be++) {
324 int irq;
325
326 /* setup SPE chip and handlers */
330 for (isrc = 0; isrc < IIC_CLASS_STRIDE * 3; isrc++) { 327 for (isrc = 0; isrc < IIC_CLASS_STRIDE * 3; isrc++) {
331 int irq = IIC_NODE_STRIDE * be + IIC_SPE_OFFSET + isrc; 328 irq = IIC_NODE_STRIDE * be + IIC_SPE_OFFSET + isrc;
332 get_irq_desc(irq)->chip = &iic_pic; 329 set_irq_chip_and_handler(irq, &iic_chip, handle_fasteoi_irq);
333 } 330 }
331 /* setup cascade chip */
332 irq = IIC_EXT_CASCADE + be * IIC_NODE_STRIDE;
333 set_irq_chip_and_handler(irq, &iic_chip, handle_fasteoi_irq);
334 } 334 }
335} 335}
336 336
337void iic_init_IRQ(void) 337void __init iic_init_IRQ(void)
338{ 338{
339 int cpu, irq_offset; 339 int cpu, irq_offset;
340 struct iic *iic; 340 struct iic *iic;
@@ -348,5 +348,6 @@ void iic_init_IRQ(void)
348 if (iic->regs) 348 if (iic->regs)
349 out_be64(&iic->regs->prio, 0xff); 349 out_be64(&iic->regs->prio, 0xff);
350 } 350 }
351 iic_setup_spe_handlers(); 351 iic_setup_builtin_handlers();
352
352} 353}