diff options
author | John Crispin <blogic@openwrt.org> | 2012-04-17 04:18:32 -0400 |
---|---|---|
committer | Ralf Baechle <ralf@linux-mips.org> | 2012-05-21 09:31:50 -0400 |
commit | 3645da0276ae9f6938ff29b13904b803ecb68424 (patch) | |
tree | 3a4a3a6314c5613a613d794f3b4ea12e4881ecbc /arch/mips | |
parent | a0392222d9a374588803454c4d2211108c64d4e4 (diff) |
OF: MIPS: lantiq: implement irq_domain support
Add support for irq_domain on lantiq socs. The conversion is straight forward
as the ICU found inside the socs allows the usage of irq_domain_add_linear.
Harware IRQ 0->7 are the generic MIPS IRQs. 8->199 are the Lantiq IRQ Modules.
Our irq_chip callbacks need to substract 8 (MIPS_CPU_IRQ_CASCADE) from d->hwirq
to find out the correct offset into the Interrupt Modules register range.
Signed-off-by: John Crispin <blogic@openwrt.org>
Cc: devicetree-discuss@lists.ozlabs.org
Cc: Grant Likely <grant.likely@secretlab.ca>
Cc: linux-mips@linux-mips.org
Patchwork: https://patchwork.linux-mips.org/patch/3802/
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Diffstat (limited to 'arch/mips')
-rw-r--r-- | arch/mips/lantiq/irq.c | 178 |
1 files changed, 102 insertions, 76 deletions
diff --git a/arch/mips/lantiq/irq.c b/arch/mips/lantiq/irq.c index d227be1c3c4d..57c1a4e51408 100644 --- a/arch/mips/lantiq/irq.c +++ b/arch/mips/lantiq/irq.c | |||
@@ -9,6 +9,11 @@ | |||
9 | 9 | ||
10 | #include <linux/interrupt.h> | 10 | #include <linux/interrupt.h> |
11 | #include <linux/ioport.h> | 11 | #include <linux/ioport.h> |
12 | #include <linux/sched.h> | ||
13 | #include <linux/irqdomain.h> | ||
14 | #include <linux/of_platform.h> | ||
15 | #include <linux/of_address.h> | ||
16 | #include <linux/of_irq.h> | ||
12 | 17 | ||
13 | #include <asm/bootinfo.h> | 18 | #include <asm/bootinfo.h> |
14 | #include <asm/irq_cpu.h> | 19 | #include <asm/irq_cpu.h> |
@@ -16,7 +21,7 @@ | |||
16 | #include <lantiq_soc.h> | 21 | #include <lantiq_soc.h> |
17 | #include <irq.h> | 22 | #include <irq.h> |
18 | 23 | ||
19 | /* register definitions */ | 24 | /* register definitions - internal irqs */ |
20 | #define LTQ_ICU_IM0_ISR 0x0000 | 25 | #define LTQ_ICU_IM0_ISR 0x0000 |
21 | #define LTQ_ICU_IM0_IER 0x0008 | 26 | #define LTQ_ICU_IM0_IER 0x0008 |
22 | #define LTQ_ICU_IM0_IOSR 0x0010 | 27 | #define LTQ_ICU_IM0_IOSR 0x0010 |
@@ -25,6 +30,7 @@ | |||
25 | #define LTQ_ICU_IM1_ISR 0x0028 | 30 | #define LTQ_ICU_IM1_ISR 0x0028 |
26 | #define LTQ_ICU_OFFSET (LTQ_ICU_IM1_ISR - LTQ_ICU_IM0_ISR) | 31 | #define LTQ_ICU_OFFSET (LTQ_ICU_IM1_ISR - LTQ_ICU_IM0_ISR) |
27 | 32 | ||
33 | /* register definitions - external irqs */ | ||
28 | #define LTQ_EIU_EXIN_C 0x0000 | 34 | #define LTQ_EIU_EXIN_C 0x0000 |
29 | #define LTQ_EIU_EXIN_INIC 0x0004 | 35 | #define LTQ_EIU_EXIN_INIC 0x0004 |
30 | #define LTQ_EIU_EXIN_INEN 0x000C | 36 | #define LTQ_EIU_EXIN_INEN 0x000C |
@@ -37,13 +43,14 @@ | |||
37 | #define LTQ_EIU_IR4 (INT_NUM_IM1_IRL0 + 1) | 43 | #define LTQ_EIU_IR4 (INT_NUM_IM1_IRL0 + 1) |
38 | #define LTQ_EIU_IR5 (INT_NUM_IM1_IRL0 + 2) | 44 | #define LTQ_EIU_IR5 (INT_NUM_IM1_IRL0 + 2) |
39 | #define LTQ_EIU_IR6 (INT_NUM_IM2_IRL0 + 30) | 45 | #define LTQ_EIU_IR6 (INT_NUM_IM2_IRL0 + 30) |
40 | 46 | #define XWAY_EXIN_COUNT 3 | |
41 | #define MAX_EIU 6 | 47 | #define MAX_EIU 6 |
42 | 48 | ||
43 | /* the performance counter */ | 49 | /* the performance counter */ |
44 | #define LTQ_PERF_IRQ (INT_NUM_IM4_IRL0 + 31) | 50 | #define LTQ_PERF_IRQ (INT_NUM_IM4_IRL0 + 31) |
45 | 51 | ||
46 | /* irqs generated by device attached to the EBU need to be acked in | 52 | /* |
53 | * irqs generated by devices attached to the EBU need to be acked in | ||
47 | * a special manner | 54 | * a special manner |
48 | */ | 55 | */ |
49 | #define LTQ_ICU_EBU_IRQ 22 | 56 | #define LTQ_ICU_EBU_IRQ 22 |
@@ -58,6 +65,9 @@ | |||
58 | #define MIPS_CPU_IPI_RESCHED_IRQ 0 | 65 | #define MIPS_CPU_IPI_RESCHED_IRQ 0 |
59 | #define MIPS_CPU_IPI_CALL_IRQ 1 | 66 | #define MIPS_CPU_IPI_CALL_IRQ 1 |
60 | 67 | ||
68 | /* we have a cascade of 8 irqs */ | ||
69 | #define MIPS_CPU_IRQ_CASCADE 8 | ||
70 | |||
61 | #if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_MIPS_MT_SMTC) | 71 | #if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_MIPS_MT_SMTC) |
62 | int gic_present; | 72 | int gic_present; |
63 | #endif | 73 | #endif |
@@ -71,64 +81,51 @@ static unsigned short ltq_eiu_irq[MAX_EIU] = { | |||
71 | LTQ_EIU_IR5, | 81 | LTQ_EIU_IR5, |
72 | }; | 82 | }; |
73 | 83 | ||
74 | static struct resource ltq_icu_resource = { | 84 | static int exin_avail; |
75 | .name = "icu", | ||
76 | .start = LTQ_ICU_BASE_ADDR, | ||
77 | .end = LTQ_ICU_BASE_ADDR + LTQ_ICU_SIZE - 1, | ||
78 | .flags = IORESOURCE_MEM, | ||
79 | }; | ||
80 | |||
81 | static struct resource ltq_eiu_resource = { | ||
82 | .name = "eiu", | ||
83 | .start = LTQ_EIU_BASE_ADDR, | ||
84 | .end = LTQ_EIU_BASE_ADDR + LTQ_ICU_SIZE - 1, | ||
85 | .flags = IORESOURCE_MEM, | ||
86 | }; | ||
87 | |||
88 | static void __iomem *ltq_icu_membase; | 85 | static void __iomem *ltq_icu_membase; |
89 | static void __iomem *ltq_eiu_membase; | 86 | static void __iomem *ltq_eiu_membase; |
90 | 87 | ||
91 | void ltq_disable_irq(struct irq_data *d) | 88 | void ltq_disable_irq(struct irq_data *d) |
92 | { | 89 | { |
93 | u32 ier = LTQ_ICU_IM0_IER; | 90 | u32 ier = LTQ_ICU_IM0_IER; |
94 | int irq_nr = d->irq - INT_NUM_IRQ0; | 91 | int offset = d->hwirq - MIPS_CPU_IRQ_CASCADE; |
95 | 92 | ||
96 | ier += LTQ_ICU_OFFSET * (irq_nr / INT_NUM_IM_OFFSET); | 93 | ier += LTQ_ICU_OFFSET * (offset / INT_NUM_IM_OFFSET); |
97 | irq_nr %= INT_NUM_IM_OFFSET; | 94 | offset %= INT_NUM_IM_OFFSET; |
98 | ltq_icu_w32(ltq_icu_r32(ier) & ~(1 << irq_nr), ier); | 95 | ltq_icu_w32(ltq_icu_r32(ier) & ~BIT(offset), ier); |
99 | } | 96 | } |
100 | 97 | ||
101 | void ltq_mask_and_ack_irq(struct irq_data *d) | 98 | void ltq_mask_and_ack_irq(struct irq_data *d) |
102 | { | 99 | { |
103 | u32 ier = LTQ_ICU_IM0_IER; | 100 | u32 ier = LTQ_ICU_IM0_IER; |
104 | u32 isr = LTQ_ICU_IM0_ISR; | 101 | u32 isr = LTQ_ICU_IM0_ISR; |
105 | int irq_nr = d->irq - INT_NUM_IRQ0; | 102 | int offset = d->hwirq - MIPS_CPU_IRQ_CASCADE; |
106 | 103 | ||
107 | ier += LTQ_ICU_OFFSET * (irq_nr / INT_NUM_IM_OFFSET); | 104 | ier += LTQ_ICU_OFFSET * (offset / INT_NUM_IM_OFFSET); |
108 | isr += LTQ_ICU_OFFSET * (irq_nr / INT_NUM_IM_OFFSET); | 105 | isr += LTQ_ICU_OFFSET * (offset / INT_NUM_IM_OFFSET); |
109 | irq_nr %= INT_NUM_IM_OFFSET; | 106 | offset %= INT_NUM_IM_OFFSET; |
110 | ltq_icu_w32(ltq_icu_r32(ier) & ~(1 << irq_nr), ier); | 107 | ltq_icu_w32(ltq_icu_r32(ier) & ~BIT(offset), ier); |
111 | ltq_icu_w32((1 << irq_nr), isr); | 108 | ltq_icu_w32(BIT(offset), isr); |
112 | } | 109 | } |
113 | 110 | ||
114 | static void ltq_ack_irq(struct irq_data *d) | 111 | static void ltq_ack_irq(struct irq_data *d) |
115 | { | 112 | { |
116 | u32 isr = LTQ_ICU_IM0_ISR; | 113 | u32 isr = LTQ_ICU_IM0_ISR; |
117 | int irq_nr = d->irq - INT_NUM_IRQ0; | 114 | int offset = d->hwirq - MIPS_CPU_IRQ_CASCADE; |
118 | 115 | ||
119 | isr += LTQ_ICU_OFFSET * (irq_nr / INT_NUM_IM_OFFSET); | 116 | isr += LTQ_ICU_OFFSET * (offset / INT_NUM_IM_OFFSET); |
120 | irq_nr %= INT_NUM_IM_OFFSET; | 117 | offset %= INT_NUM_IM_OFFSET; |
121 | ltq_icu_w32((1 << irq_nr), isr); | 118 | ltq_icu_w32(BIT(offset), isr); |
122 | } | 119 | } |
123 | 120 | ||
124 | void ltq_enable_irq(struct irq_data *d) | 121 | void ltq_enable_irq(struct irq_data *d) |
125 | { | 122 | { |
126 | u32 ier = LTQ_ICU_IM0_IER; | 123 | u32 ier = LTQ_ICU_IM0_IER; |
127 | int irq_nr = d->irq - INT_NUM_IRQ0; | 124 | int offset = d->hwirq - MIPS_CPU_IRQ_CASCADE; |
128 | 125 | ||
129 | ier += LTQ_ICU_OFFSET * (irq_nr / INT_NUM_IM_OFFSET); | 126 | ier += LTQ_ICU_OFFSET * (offset / INT_NUM_IM_OFFSET); |
130 | irq_nr %= INT_NUM_IM_OFFSET; | 127 | offset %= INT_NUM_IM_OFFSET; |
131 | ltq_icu_w32(ltq_icu_r32(ier) | (1 << irq_nr), ier); | 128 | ltq_icu_w32(ltq_icu_r32(ier) | BIT(offset), ier); |
132 | } | 129 | } |
133 | 130 | ||
134 | static unsigned int ltq_startup_eiu_irq(struct irq_data *d) | 131 | static unsigned int ltq_startup_eiu_irq(struct irq_data *d) |
@@ -137,15 +134,15 @@ static unsigned int ltq_startup_eiu_irq(struct irq_data *d) | |||
137 | 134 | ||
138 | ltq_enable_irq(d); | 135 | ltq_enable_irq(d); |
139 | for (i = 0; i < MAX_EIU; i++) { | 136 | for (i = 0; i < MAX_EIU; i++) { |
140 | if (d->irq == ltq_eiu_irq[i]) { | 137 | if (d->hwirq == ltq_eiu_irq[i]) { |
141 | /* low level - we should really handle set_type */ | 138 | /* low level - we should really handle set_type */ |
142 | ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_C) | | 139 | ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_C) | |
143 | (0x6 << (i * 4)), LTQ_EIU_EXIN_C); | 140 | (0x6 << (i * 4)), LTQ_EIU_EXIN_C); |
144 | /* clear all pending */ | 141 | /* clear all pending */ |
145 | ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_INIC) & ~(1 << i), | 142 | ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_INIC) & ~BIT(i), |
146 | LTQ_EIU_EXIN_INIC); | 143 | LTQ_EIU_EXIN_INIC); |
147 | /* enable */ | 144 | /* enable */ |
148 | ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_INEN) | (1 << i), | 145 | ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_INEN) | BIT(i), |
149 | LTQ_EIU_EXIN_INEN); | 146 | LTQ_EIU_EXIN_INEN); |
150 | break; | 147 | break; |
151 | } | 148 | } |
@@ -160,9 +157,9 @@ static void ltq_shutdown_eiu_irq(struct irq_data *d) | |||
160 | 157 | ||
161 | ltq_disable_irq(d); | 158 | ltq_disable_irq(d); |
162 | for (i = 0; i < MAX_EIU; i++) { | 159 | for (i = 0; i < MAX_EIU; i++) { |
163 | if (d->irq == ltq_eiu_irq[i]) { | 160 | if (d->hwirq == ltq_eiu_irq[i]) { |
164 | /* disable */ | 161 | /* disable */ |
165 | ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_INEN) & ~(1 << i), | 162 | ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_INEN) & ~BIT(i), |
166 | LTQ_EIU_EXIN_INEN); | 163 | LTQ_EIU_EXIN_INEN); |
167 | break; | 164 | break; |
168 | } | 165 | } |
@@ -199,14 +196,15 @@ static void ltq_hw_irqdispatch(int module) | |||
199 | if (irq == 0) | 196 | if (irq == 0) |
200 | return; | 197 | return; |
201 | 198 | ||
202 | /* silicon bug causes only the msb set to 1 to be valid. all | 199 | /* |
200 | * silicon bug causes only the msb set to 1 to be valid. all | ||
203 | * other bits might be bogus | 201 | * other bits might be bogus |
204 | */ | 202 | */ |
205 | irq = __fls(irq); | 203 | irq = __fls(irq); |
206 | do_IRQ((int)irq + INT_NUM_IM0_IRL0 + (INT_NUM_IM_OFFSET * module)); | 204 | do_IRQ((int)irq + MIPS_CPU_IRQ_CASCADE + (INT_NUM_IM_OFFSET * module)); |
207 | 205 | ||
208 | /* if this is a EBU irq, we need to ack it or get a deadlock */ | 206 | /* if this is a EBU irq, we need to ack it or get a deadlock */ |
209 | if ((irq == LTQ_ICU_EBU_IRQ) && (module == 0)) | 207 | if ((irq == LTQ_ICU_EBU_IRQ) && (module == 0) && LTQ_EBU_PCC_ISTAT) |
210 | ltq_ebu_w32(ltq_ebu_r32(LTQ_EBU_PCC_ISTAT) | 0x10, | 208 | ltq_ebu_w32(ltq_ebu_r32(LTQ_EBU_PCC_ISTAT) | 0x10, |
211 | LTQ_EBU_PCC_ISTAT); | 209 | LTQ_EBU_PCC_ISTAT); |
212 | } | 210 | } |
@@ -290,38 +288,67 @@ out: | |||
290 | return; | 288 | return; |
291 | } | 289 | } |
292 | 290 | ||
291 | static int icu_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw) | ||
292 | { | ||
293 | struct irq_chip *chip = <q_irq_type; | ||
294 | int i; | ||
295 | |||
296 | for (i = 0; i < exin_avail; i++) | ||
297 | if (hw == ltq_eiu_irq[i]) | ||
298 | chip = <q_eiu_type; | ||
299 | |||
300 | irq_set_chip_and_handler(hw, chip, handle_level_irq); | ||
301 | |||
302 | return 0; | ||
303 | } | ||
304 | |||
305 | static const struct irq_domain_ops irq_domain_ops = { | ||
306 | .xlate = irq_domain_xlate_onetwocell, | ||
307 | .map = icu_map, | ||
308 | }; | ||
309 | |||
293 | static struct irqaction cascade = { | 310 | static struct irqaction cascade = { |
294 | .handler = no_action, | 311 | .handler = no_action, |
295 | .name = "cascade", | 312 | .name = "cascade", |
296 | }; | 313 | }; |
297 | 314 | ||
298 | void __init arch_init_irq(void) | 315 | int __init icu_of_init(struct device_node *node, struct device_node *parent) |
299 | { | 316 | { |
317 | struct device_node *eiu_node; | ||
318 | struct resource res; | ||
300 | int i; | 319 | int i; |
301 | 320 | ||
302 | if (insert_resource(&iomem_resource, <q_icu_resource) < 0) | 321 | if (of_address_to_resource(node, 0, &res)) |
303 | panic("Failed to insert icu memory"); | 322 | panic("Failed to get icu memory range"); |
304 | 323 | ||
305 | if (request_mem_region(ltq_icu_resource.start, | 324 | if (request_mem_region(res.start, resource_size(&res), res.name) < 0) |
306 | resource_size(<q_icu_resource), "icu") < 0) | 325 | pr_err("Failed to request icu memory"); |
307 | panic("Failed to request icu memory"); | ||
308 | 326 | ||
309 | ltq_icu_membase = ioremap_nocache(ltq_icu_resource.start, | 327 | ltq_icu_membase = ioremap_nocache(res.start, resource_size(&res)); |
310 | resource_size(<q_icu_resource)); | ||
311 | if (!ltq_icu_membase) | 328 | if (!ltq_icu_membase) |
312 | panic("Failed to remap icu memory"); | 329 | panic("Failed to remap icu memory"); |
313 | 330 | ||
314 | if (insert_resource(&iomem_resource, <q_eiu_resource) < 0) | 331 | /* the external interrupts are optional and xway only */ |
315 | panic("Failed to insert eiu memory"); | 332 | eiu_node = of_find_compatible_node(NULL, NULL, "lantiq,eiu"); |
316 | 333 | if (eiu_node && of_address_to_resource(eiu_node, 0, &res)) { | |
317 | if (request_mem_region(ltq_eiu_resource.start, | 334 | /* find out how many external irq sources we have */ |
318 | resource_size(<q_eiu_resource), "eiu") < 0) | 335 | const __be32 *count = of_get_property(node, |
319 | panic("Failed to request eiu memory"); | 336 | "lantiq,count", NULL); |
320 | 337 | ||
321 | ltq_eiu_membase = ioremap_nocache(ltq_eiu_resource.start, | 338 | if (count) |
322 | resource_size(<q_eiu_resource)); | 339 | exin_avail = *count; |
323 | if (!ltq_eiu_membase) | 340 | if (exin_avail > MAX_EIU) |
324 | panic("Failed to remap eiu memory"); | 341 | exin_avail = MAX_EIU; |
342 | |||
343 | if (request_mem_region(res.start, resource_size(&res), | ||
344 | res.name) < 0) | ||
345 | pr_err("Failed to request eiu memory"); | ||
346 | |||
347 | ltq_eiu_membase = ioremap_nocache(res.start, | ||
348 | resource_size(&res)); | ||
349 | if (!ltq_eiu_membase) | ||
350 | panic("Failed to remap eiu memory"); | ||
351 | } | ||
325 | 352 | ||
326 | /* turn off all irqs by default */ | 353 | /* turn off all irqs by default */ |
327 | for (i = 0; i < 5; i++) { | 354 | for (i = 0; i < 5; i++) { |
@@ -346,20 +373,8 @@ void __init arch_init_irq(void) | |||
346 | set_vi_handler(7, ltq_hw5_irqdispatch); | 373 | set_vi_handler(7, ltq_hw5_irqdispatch); |
347 | } | 374 | } |
348 | 375 | ||
349 | for (i = INT_NUM_IRQ0; | 376 | irq_domain_add_linear(node, 6 * INT_NUM_IM_OFFSET, |
350 | i <= (INT_NUM_IRQ0 + (5 * INT_NUM_IM_OFFSET)); i++) | 377 | &irq_domain_ops, 0); |
351 | if ((i == LTQ_EIU_IR0) || (i == LTQ_EIU_IR1) || | ||
352 | (i == LTQ_EIU_IR2)) | ||
353 | irq_set_chip_and_handler(i, <q_eiu_type, | ||
354 | handle_level_irq); | ||
355 | /* EIU3-5 only exist on ar9 and vr9 */ | ||
356 | else if (((i == LTQ_EIU_IR3) || (i == LTQ_EIU_IR4) || | ||
357 | (i == LTQ_EIU_IR5)) && (ltq_is_ar9() || ltq_is_vr9())) | ||
358 | irq_set_chip_and_handler(i, <q_eiu_type, | ||
359 | handle_level_irq); | ||
360 | else | ||
361 | irq_set_chip_and_handler(i, <q_irq_type, | ||
362 | handle_level_irq); | ||
363 | 378 | ||
364 | #if defined(CONFIG_MIPS_MT_SMP) | 379 | #if defined(CONFIG_MIPS_MT_SMP) |
365 | if (cpu_has_vint) { | 380 | if (cpu_has_vint) { |
@@ -382,9 +397,20 @@ void __init arch_init_irq(void) | |||
382 | 397 | ||
383 | /* tell oprofile which irq to use */ | 398 | /* tell oprofile which irq to use */ |
384 | cp0_perfcount_irq = LTQ_PERF_IRQ; | 399 | cp0_perfcount_irq = LTQ_PERF_IRQ; |
400 | return 0; | ||
385 | } | 401 | } |
386 | 402 | ||
387 | unsigned int __cpuinit get_c0_compare_int(void) | 403 | unsigned int __cpuinit get_c0_compare_int(void) |
388 | { | 404 | { |
389 | return CP0_LEGACY_COMPARE_IRQ; | 405 | return CP0_LEGACY_COMPARE_IRQ; |
390 | } | 406 | } |
407 | |||
408 | static struct of_device_id __initdata of_irq_ids[] = { | ||
409 | { .compatible = "lantiq,icu", .data = icu_of_init }, | ||
410 | {}, | ||
411 | }; | ||
412 | |||
413 | void __init arch_init_irq(void) | ||
414 | { | ||
415 | of_irq_init(of_irq_ids); | ||
416 | } | ||