aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips/lantiq
diff options
context:
space:
mode:
authorJohn Crispin <blogic@openwrt.org>2012-08-16 07:39:57 -0400
committerJohn Crispin <blogic@openwrt.org>2012-08-22 18:08:17 -0400
commit61fa969f27ec58296544bf94d058f3aa704cb8d9 (patch)
treeb4c8597b9b6fc758be9a42f3940cec7b2893f888 /arch/mips/lantiq
parentfea7a08acb13524b47711625eebea40a0ede69a0 (diff)
MIPS: lantiq: split up IRQ IM ranges
Up to now all our SoCs had the 5 IM ranges in a consecutive order. To accomodate the SVIP we need to support IM ranges that are scattered inside the register range. Signed-off-by: John Crispin <blogic@openwrt.org> Patchwork: http://patchwork.linux-mips.org/patch/4237/
Diffstat (limited to 'arch/mips/lantiq')
-rw-r--r--arch/mips/lantiq/irq.c60
1 files changed, 32 insertions, 28 deletions
diff --git a/arch/mips/lantiq/irq.c b/arch/mips/lantiq/irq.c
index 57c1a4e51408..a2699a70322b 100644
--- a/arch/mips/lantiq/irq.c
+++ b/arch/mips/lantiq/irq.c
@@ -55,8 +55,8 @@
55 */ 55 */
56#define LTQ_ICU_EBU_IRQ 22 56#define LTQ_ICU_EBU_IRQ 22
57 57
58#define ltq_icu_w32(x, y) ltq_w32((x), ltq_icu_membase + (y)) 58#define ltq_icu_w32(m, x, y) ltq_w32((x), ltq_icu_membase[m] + (y))
59#define ltq_icu_r32(x) ltq_r32(ltq_icu_membase + (x)) 59#define ltq_icu_r32(m, x) ltq_r32(ltq_icu_membase[m] + (x))
60 60
61#define ltq_eiu_w32(x, y) ltq_w32((x), ltq_eiu_membase + (y)) 61#define ltq_eiu_w32(x, y) ltq_w32((x), ltq_eiu_membase + (y))
62#define ltq_eiu_r32(x) ltq_r32(ltq_eiu_membase + (x)) 62#define ltq_eiu_r32(x) ltq_r32(ltq_eiu_membase + (x))
@@ -82,17 +82,17 @@ static unsigned short ltq_eiu_irq[MAX_EIU] = {
82}; 82};
83 83
84static int exin_avail; 84static int exin_avail;
85static void __iomem *ltq_icu_membase; 85static void __iomem *ltq_icu_membase[MAX_IM];
86static void __iomem *ltq_eiu_membase; 86static void __iomem *ltq_eiu_membase;
87 87
88void ltq_disable_irq(struct irq_data *d) 88void ltq_disable_irq(struct irq_data *d)
89{ 89{
90 u32 ier = LTQ_ICU_IM0_IER; 90 u32 ier = LTQ_ICU_IM0_IER;
91 int offset = d->hwirq - MIPS_CPU_IRQ_CASCADE; 91 int offset = d->hwirq - MIPS_CPU_IRQ_CASCADE;
92 int im = offset / INT_NUM_IM_OFFSET;
92 93
93 ier += LTQ_ICU_OFFSET * (offset / INT_NUM_IM_OFFSET);
94 offset %= INT_NUM_IM_OFFSET; 94 offset %= INT_NUM_IM_OFFSET;
95 ltq_icu_w32(ltq_icu_r32(ier) & ~BIT(offset), ier); 95 ltq_icu_w32(im, ltq_icu_r32(im, ier) & ~BIT(offset), ier);
96} 96}
97 97
98void ltq_mask_and_ack_irq(struct irq_data *d) 98void ltq_mask_and_ack_irq(struct irq_data *d)
@@ -100,32 +100,31 @@ void ltq_mask_and_ack_irq(struct irq_data *d)
100 u32 ier = LTQ_ICU_IM0_IER; 100 u32 ier = LTQ_ICU_IM0_IER;
101 u32 isr = LTQ_ICU_IM0_ISR; 101 u32 isr = LTQ_ICU_IM0_ISR;
102 int offset = d->hwirq - MIPS_CPU_IRQ_CASCADE; 102 int offset = d->hwirq - MIPS_CPU_IRQ_CASCADE;
103 int im = offset / INT_NUM_IM_OFFSET;
103 104
104 ier += LTQ_ICU_OFFSET * (offset / INT_NUM_IM_OFFSET);
105 isr += LTQ_ICU_OFFSET * (offset / INT_NUM_IM_OFFSET);
106 offset %= INT_NUM_IM_OFFSET; 105 offset %= INT_NUM_IM_OFFSET;
107 ltq_icu_w32(ltq_icu_r32(ier) & ~BIT(offset), ier); 106 ltq_icu_w32(im, ltq_icu_r32(im, ier) & ~BIT(offset), ier);
108 ltq_icu_w32(BIT(offset), isr); 107 ltq_icu_w32(im, BIT(offset), isr);
109} 108}
110 109
111static void ltq_ack_irq(struct irq_data *d) 110static void ltq_ack_irq(struct irq_data *d)
112{ 111{
113 u32 isr = LTQ_ICU_IM0_ISR; 112 u32 isr = LTQ_ICU_IM0_ISR;
114 int offset = d->hwirq - MIPS_CPU_IRQ_CASCADE; 113 int offset = d->hwirq - MIPS_CPU_IRQ_CASCADE;
114 int im = offset / INT_NUM_IM_OFFSET;
115 115
116 isr += LTQ_ICU_OFFSET * (offset / INT_NUM_IM_OFFSET);
117 offset %= INT_NUM_IM_OFFSET; 116 offset %= INT_NUM_IM_OFFSET;
118 ltq_icu_w32(BIT(offset), isr); 117 ltq_icu_w32(im, BIT(offset), isr);
119} 118}
120 119
121void ltq_enable_irq(struct irq_data *d) 120void ltq_enable_irq(struct irq_data *d)
122{ 121{
123 u32 ier = LTQ_ICU_IM0_IER; 122 u32 ier = LTQ_ICU_IM0_IER;
124 int offset = d->hwirq - MIPS_CPU_IRQ_CASCADE; 123 int offset = d->hwirq - MIPS_CPU_IRQ_CASCADE;
124 int im = offset / INT_NUM_IM_OFFSET;
125 125
126 ier += LTQ_ICU_OFFSET * (offset / INT_NUM_IM_OFFSET);
127 offset %= INT_NUM_IM_OFFSET; 126 offset %= INT_NUM_IM_OFFSET;
128 ltq_icu_w32(ltq_icu_r32(ier) | BIT(offset), ier); 127 ltq_icu_w32(im, ltq_icu_r32(im, ier) | BIT(offset), ier);
129} 128}
130 129
131static unsigned int ltq_startup_eiu_irq(struct irq_data *d) 130static unsigned int ltq_startup_eiu_irq(struct irq_data *d)
@@ -192,7 +191,7 @@ static void ltq_hw_irqdispatch(int module)
192{ 191{
193 u32 irq; 192 u32 irq;
194 193
195 irq = ltq_icu_r32(LTQ_ICU_IM0_IOSR + (module * LTQ_ICU_OFFSET)); 194 irq = ltq_icu_r32(module, LTQ_ICU_IM0_IOSR);
196 if (irq == 0) 195 if (irq == 0)
197 return; 196 return;
198 197
@@ -275,7 +274,7 @@ asmlinkage void plat_irq_dispatch(void)
275 do_IRQ(MIPS_CPU_TIMER_IRQ); 274 do_IRQ(MIPS_CPU_TIMER_IRQ);
276 goto out; 275 goto out;
277 } else { 276 } else {
278 for (i = 0; i < 5; i++) { 277 for (i = 0; i < MAX_IM; i++) {
279 if (pending & (CAUSEF_IP2 << i)) { 278 if (pending & (CAUSEF_IP2 << i)) {
280 ltq_hw_irqdispatch(i); 279 ltq_hw_irqdispatch(i);
281 goto out; 280 goto out;
@@ -318,15 +317,19 @@ int __init icu_of_init(struct device_node *node, struct device_node *parent)
318 struct resource res; 317 struct resource res;
319 int i; 318 int i;
320 319
321 if (of_address_to_resource(node, 0, &res)) 320 for (i = 0; i < MAX_IM; i++) {
322 panic("Failed to get icu memory range"); 321 if (of_address_to_resource(node, i, &res))
322 panic("Failed to get icu memory range");
323 323
324 if (request_mem_region(res.start, resource_size(&res), res.name) < 0) 324 if (request_mem_region(res.start, resource_size(&res),
325 pr_err("Failed to request icu memory"); 325 res.name) < 0)
326 pr_err("Failed to request icu memory");
326 327
327 ltq_icu_membase = ioremap_nocache(res.start, resource_size(&res)); 328 ltq_icu_membase[i] = ioremap_nocache(res.start,
328 if (!ltq_icu_membase) 329 resource_size(&res));
329 panic("Failed to remap icu memory"); 330 if (!ltq_icu_membase[i])
331 panic("Failed to remap icu memory");
332 }
330 333
331 /* the external interrupts are optional and xway only */ 334 /* the external interrupts are optional and xway only */
332 eiu_node = of_find_compatible_node(NULL, NULL, "lantiq,eiu"); 335 eiu_node = of_find_compatible_node(NULL, NULL, "lantiq,eiu");
@@ -351,17 +354,17 @@ int __init icu_of_init(struct device_node *node, struct device_node *parent)
351 } 354 }
352 355
353 /* turn off all irqs by default */ 356 /* turn off all irqs by default */
354 for (i = 0; i < 5; i++) { 357 for (i = 0; i < MAX_IM; i++) {
355 /* make sure all irqs are turned off by default */ 358 /* make sure all irqs are turned off by default */
356 ltq_icu_w32(0, LTQ_ICU_IM0_IER + (i * LTQ_ICU_OFFSET)); 359 ltq_icu_w32(i, 0, LTQ_ICU_IM0_IER);
357 /* clear all possibly pending interrupts */ 360 /* clear all possibly pending interrupts */
358 ltq_icu_w32(~0, LTQ_ICU_IM0_ISR + (i * LTQ_ICU_OFFSET)); 361 ltq_icu_w32(i, ~0, LTQ_ICU_IM0_ISR);
359 } 362 }
360 363
361 mips_cpu_irq_init(); 364 mips_cpu_irq_init();
362 365
363 for (i = 2; i <= 6; i++) 366 for (i = 0; i < MAX_IM; i++)
364 setup_irq(i, &cascade); 367 setup_irq(i + 2, &cascade);
365 368
366 if (cpu_has_vint) { 369 if (cpu_has_vint) {
367 pr_info("Setting up vectored interrupts\n"); 370 pr_info("Setting up vectored interrupts\n");
@@ -373,7 +376,8 @@ int __init icu_of_init(struct device_node *node, struct device_node *parent)
373 set_vi_handler(7, ltq_hw5_irqdispatch); 376 set_vi_handler(7, ltq_hw5_irqdispatch);
374 } 377 }
375 378
376 irq_domain_add_linear(node, 6 * INT_NUM_IM_OFFSET, 379 irq_domain_add_linear(node,
380 (MAX_IM * INT_NUM_IM_OFFSET) + MIPS_CPU_IRQ_CASCADE,
377 &irq_domain_ops, 0); 381 &irq_domain_ops, 0);
378 382
379#if defined(CONFIG_MIPS_MT_SMP) 383#if defined(CONFIG_MIPS_MT_SMP)