diff options
Diffstat (limited to 'arch/mn10300/kernel/irq.c')
-rw-r--r-- | arch/mn10300/kernel/irq.c | 266 |
1 files changed, 241 insertions, 25 deletions
diff --git a/arch/mn10300/kernel/irq.c b/arch/mn10300/kernel/irq.c index b5b970d2954a..80f15725ecad 100644 --- a/arch/mn10300/kernel/irq.c +++ b/arch/mn10300/kernel/irq.c | |||
@@ -12,11 +12,34 @@ | |||
12 | #include <linux/interrupt.h> | 12 | #include <linux/interrupt.h> |
13 | #include <linux/kernel_stat.h> | 13 | #include <linux/kernel_stat.h> |
14 | #include <linux/seq_file.h> | 14 | #include <linux/seq_file.h> |
15 | #include <linux/cpumask.h> | ||
15 | #include <asm/setup.h> | 16 | #include <asm/setup.h> |
17 | #include <asm/serial-regs.h> | ||
16 | 18 | ||
17 | unsigned long __mn10300_irq_enabled_epsw = EPSW_IE | EPSW_IM_7; | 19 | #ifdef CONFIG_SMP |
20 | #undef GxICR | ||
21 | #define GxICR(X) CROSS_GxICR(X, irq_affinity_online[X]) | ||
22 | |||
23 | #undef GxICR_u8 | ||
24 | #define GxICR_u8(X) CROSS_GxICR_u8(X, irq_affinity_online[X]) | ||
25 | #endif /* CONFIG_SMP */ | ||
26 | |||
27 | unsigned long __mn10300_irq_enabled_epsw[NR_CPUS] __cacheline_aligned_in_smp = { | ||
28 | [0 ... NR_CPUS - 1] = EPSW_IE | EPSW_IM_7 | ||
29 | }; | ||
18 | EXPORT_SYMBOL(__mn10300_irq_enabled_epsw); | 30 | EXPORT_SYMBOL(__mn10300_irq_enabled_epsw); |
19 | 31 | ||
32 | #ifdef CONFIG_SMP | ||
33 | static char irq_affinity_online[NR_IRQS] = { | ||
34 | [0 ... NR_IRQS - 1] = 0 | ||
35 | }; | ||
36 | |||
37 | #define NR_IRQ_WORDS ((NR_IRQS + 31) / 32) | ||
38 | static unsigned long irq_affinity_request[NR_IRQ_WORDS] = { | ||
39 | [0 ... NR_IRQ_WORDS - 1] = 0 | ||
40 | }; | ||
41 | #endif /* CONFIG_SMP */ | ||
42 | |||
20 | atomic_t irq_err_count; | 43 | atomic_t irq_err_count; |
21 | 44 | ||
22 | /* | 45 | /* |
@@ -24,30 +47,65 @@ atomic_t irq_err_count; | |||
24 | */ | 47 | */ |
25 | static void mn10300_cpupic_ack(unsigned int irq) | 48 | static void mn10300_cpupic_ack(unsigned int irq) |
26 | { | 49 | { |
50 | unsigned long flags; | ||
51 | u16 tmp; | ||
52 | |||
53 | flags = arch_local_cli_save(); | ||
54 | GxICR_u8(irq) = GxICR_DETECT; | ||
55 | tmp = GxICR(irq); | ||
56 | arch_local_irq_restore(flags); | ||
57 | } | ||
58 | |||
59 | static void __mask_and_set_icr(unsigned int irq, | ||
60 | unsigned int mask, unsigned int set) | ||
61 | { | ||
62 | unsigned long flags; | ||
27 | u16 tmp; | 63 | u16 tmp; |
28 | *(volatile u8 *) &GxICR(irq) = GxICR_DETECT; | 64 | |
65 | flags = arch_local_cli_save(); | ||
29 | tmp = GxICR(irq); | 66 | tmp = GxICR(irq); |
67 | GxICR(irq) = (tmp & mask) | set; | ||
68 | tmp = GxICR(irq); | ||
69 | arch_local_irq_restore(flags); | ||
30 | } | 70 | } |
31 | 71 | ||
32 | static void mn10300_cpupic_mask(unsigned int irq) | 72 | static void mn10300_cpupic_mask(unsigned int irq) |
33 | { | 73 | { |
34 | u16 tmp = GxICR(irq); | 74 | __mask_and_set_icr(irq, GxICR_LEVEL, 0); |
35 | GxICR(irq) = (tmp & GxICR_LEVEL); | ||
36 | tmp = GxICR(irq); | ||
37 | } | 75 | } |
38 | 76 | ||
39 | static void mn10300_cpupic_mask_ack(unsigned int irq) | 77 | static void mn10300_cpupic_mask_ack(unsigned int irq) |
40 | { | 78 | { |
41 | u16 tmp = GxICR(irq); | 79 | #ifdef CONFIG_SMP |
42 | GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_DETECT; | 80 | unsigned long flags; |
43 | tmp = GxICR(irq); | 81 | u16 tmp; |
82 | |||
83 | flags = arch_local_cli_save(); | ||
84 | |||
85 | if (!test_and_clear_bit(irq, irq_affinity_request)) { | ||
86 | tmp = GxICR(irq); | ||
87 | GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_DETECT; | ||
88 | tmp = GxICR(irq); | ||
89 | } else { | ||
90 | u16 tmp2; | ||
91 | tmp = GxICR(irq); | ||
92 | GxICR(irq) = (tmp & GxICR_LEVEL); | ||
93 | tmp2 = GxICR(irq); | ||
94 | |||
95 | irq_affinity_online[irq] = any_online_cpu(*irq_desc[irq].affinity); | ||
96 | GxICR(irq) = (tmp & (GxICR_LEVEL | GxICR_ENABLE)) | GxICR_DETECT; | ||
97 | tmp = GxICR(irq); | ||
98 | } | ||
99 | |||
100 | arch_local_irq_restore(flags); | ||
101 | #else /* CONFIG_SMP */ | ||
102 | __mask_and_set_icr(irq, GxICR_LEVEL, GxICR_DETECT); | ||
103 | #endif /* CONFIG_SMP */ | ||
44 | } | 104 | } |
45 | 105 | ||
46 | static void mn10300_cpupic_unmask(unsigned int irq) | 106 | static void mn10300_cpupic_unmask(unsigned int irq) |
47 | { | 107 | { |
48 | u16 tmp = GxICR(irq); | 108 | __mask_and_set_icr(irq, GxICR_LEVEL, GxICR_ENABLE); |
49 | GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_ENABLE; | ||
50 | tmp = GxICR(irq); | ||
51 | } | 109 | } |
52 | 110 | ||
53 | static void mn10300_cpupic_unmask_clear(unsigned int irq) | 111 | static void mn10300_cpupic_unmask_clear(unsigned int irq) |
@@ -56,11 +114,89 @@ static void mn10300_cpupic_unmask_clear(unsigned int irq) | |||
56 | * device has ceased to assert its interrupt line and the interrupt | 114 | * device has ceased to assert its interrupt line and the interrupt |
57 | * channel has been disabled in the PIC, so for level-triggered | 115 | * channel has been disabled in the PIC, so for level-triggered |
58 | * interrupts we need to clear the request bit when we re-enable */ | 116 | * interrupts we need to clear the request bit when we re-enable */ |
59 | u16 tmp = GxICR(irq); | 117 | #ifdef CONFIG_SMP |
60 | GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_ENABLE | GxICR_DETECT; | 118 | unsigned long flags; |
61 | tmp = GxICR(irq); | 119 | u16 tmp; |
120 | |||
121 | flags = arch_local_cli_save(); | ||
122 | |||
123 | if (!test_and_clear_bit(irq, irq_affinity_request)) { | ||
124 | tmp = GxICR(irq); | ||
125 | GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_ENABLE | GxICR_DETECT; | ||
126 | tmp = GxICR(irq); | ||
127 | } else { | ||
128 | tmp = GxICR(irq); | ||
129 | |||
130 | irq_affinity_online[irq] = any_online_cpu(*irq_desc[irq].affinity); | ||
131 | GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_ENABLE | GxICR_DETECT; | ||
132 | tmp = GxICR(irq); | ||
133 | } | ||
134 | |||
135 | arch_local_irq_restore(flags); | ||
136 | #else /* CONFIG_SMP */ | ||
137 | __mask_and_set_icr(irq, GxICR_LEVEL, GxICR_ENABLE | GxICR_DETECT); | ||
138 | #endif /* CONFIG_SMP */ | ||
62 | } | 139 | } |
63 | 140 | ||
141 | #ifdef CONFIG_SMP | ||
142 | static int | ||
143 | mn10300_cpupic_setaffinity(unsigned int irq, const struct cpumask *mask) | ||
144 | { | ||
145 | unsigned long flags; | ||
146 | int err; | ||
147 | |||
148 | flags = arch_local_cli_save(); | ||
149 | |||
150 | /* check irq no */ | ||
151 | switch (irq) { | ||
152 | case TMJCIRQ: | ||
153 | case RESCHEDULE_IPI: | ||
154 | case CALL_FUNC_SINGLE_IPI: | ||
155 | case LOCAL_TIMER_IPI: | ||
156 | case FLUSH_CACHE_IPI: | ||
157 | case CALL_FUNCTION_NMI_IPI: | ||
158 | case GDB_NMI_IPI: | ||
159 | #ifdef CONFIG_MN10300_TTYSM0 | ||
160 | case SC0RXIRQ: | ||
161 | case SC0TXIRQ: | ||
162 | #ifdef CONFIG_MN10300_TTYSM0_TIMER8 | ||
163 | case TM8IRQ: | ||
164 | #elif CONFIG_MN10300_TTYSM0_TIMER2 | ||
165 | case TM2IRQ: | ||
166 | #endif /* CONFIG_MN10300_TTYSM0_TIMER8 */ | ||
167 | #endif /* CONFIG_MN10300_TTYSM0 */ | ||
168 | |||
169 | #ifdef CONFIG_MN10300_TTYSM1 | ||
170 | case SC1RXIRQ: | ||
171 | case SC1TXIRQ: | ||
172 | #ifdef CONFIG_MN10300_TTYSM1_TIMER12 | ||
173 | case TM12IRQ: | ||
174 | #elif CONFIG_MN10300_TTYSM1_TIMER9 | ||
175 | case TM9IRQ: | ||
176 | #elif CONFIG_MN10300_TTYSM1_TIMER3 | ||
177 | case TM3IRQ: | ||
178 | #endif /* CONFIG_MN10300_TTYSM1_TIMER12 */ | ||
179 | #endif /* CONFIG_MN10300_TTYSM1 */ | ||
180 | |||
181 | #ifdef CONFIG_MN10300_TTYSM2 | ||
182 | case SC2RXIRQ: | ||
183 | case SC2TXIRQ: | ||
184 | case TM10IRQ: | ||
185 | #endif /* CONFIG_MN10300_TTYSM2 */ | ||
186 | err = -1; | ||
187 | break; | ||
188 | |||
189 | default: | ||
190 | set_bit(irq, irq_affinity_request); | ||
191 | err = 0; | ||
192 | break; | ||
193 | } | ||
194 | |||
195 | arch_local_irq_restore(flags); | ||
196 | return err; | ||
197 | } | ||
198 | #endif /* CONFIG_SMP */ | ||
199 | |||
64 | /* | 200 | /* |
65 | * MN10300 PIC level-triggered IRQ handling. | 201 | * MN10300 PIC level-triggered IRQ handling. |
66 | * | 202 | * |
@@ -79,6 +215,9 @@ static struct irq_chip mn10300_cpu_pic_level = { | |||
79 | .mask = mn10300_cpupic_mask, | 215 | .mask = mn10300_cpupic_mask, |
80 | .mask_ack = mn10300_cpupic_mask, | 216 | .mask_ack = mn10300_cpupic_mask, |
81 | .unmask = mn10300_cpupic_unmask_clear, | 217 | .unmask = mn10300_cpupic_unmask_clear, |
218 | #ifdef CONFIG_SMP | ||
219 | .set_affinity = mn10300_cpupic_setaffinity, | ||
220 | #endif /* CONFIG_SMP */ | ||
82 | }; | 221 | }; |
83 | 222 | ||
84 | /* | 223 | /* |
@@ -94,6 +233,9 @@ static struct irq_chip mn10300_cpu_pic_edge = { | |||
94 | .mask = mn10300_cpupic_mask, | 233 | .mask = mn10300_cpupic_mask, |
95 | .mask_ack = mn10300_cpupic_mask_ack, | 234 | .mask_ack = mn10300_cpupic_mask_ack, |
96 | .unmask = mn10300_cpupic_unmask, | 235 | .unmask = mn10300_cpupic_unmask, |
236 | #ifdef CONFIG_SMP | ||
237 | .set_affinity = mn10300_cpupic_setaffinity, | ||
238 | #endif /* CONFIG_SMP */ | ||
97 | }; | 239 | }; |
98 | 240 | ||
99 | /* | 241 | /* |
@@ -111,14 +253,34 @@ void ack_bad_irq(int irq) | |||
111 | */ | 253 | */ |
112 | void set_intr_level(int irq, u16 level) | 254 | void set_intr_level(int irq, u16 level) |
113 | { | 255 | { |
114 | u16 tmp; | 256 | BUG_ON(in_interrupt()); |
115 | 257 | ||
116 | if (in_interrupt()) | 258 | __mask_and_set_icr(irq, GxICR_ENABLE, level); |
117 | BUG(); | 259 | } |
118 | 260 | ||
119 | tmp = GxICR(irq); | 261 | void mn10300_intc_set_level(unsigned int irq, unsigned int level) |
120 | GxICR(irq) = (tmp & GxICR_ENABLE) | level; | 262 | { |
121 | tmp = GxICR(irq); | 263 | set_intr_level(irq, NUM2GxICR_LEVEL(level) & GxICR_LEVEL); |
264 | } | ||
265 | |||
266 | void mn10300_intc_clear(unsigned int irq) | ||
267 | { | ||
268 | __mask_and_set_icr(irq, GxICR_LEVEL | GxICR_ENABLE, GxICR_DETECT); | ||
269 | } | ||
270 | |||
271 | void mn10300_intc_set(unsigned int irq) | ||
272 | { | ||
273 | __mask_and_set_icr(irq, 0, GxICR_REQUEST | GxICR_DETECT); | ||
274 | } | ||
275 | |||
276 | void mn10300_intc_enable(unsigned int irq) | ||
277 | { | ||
278 | mn10300_cpupic_unmask(irq); | ||
279 | } | ||
280 | |||
281 | void mn10300_intc_disable(unsigned int irq) | ||
282 | { | ||
283 | mn10300_cpupic_mask(irq); | ||
122 | } | 284 | } |
123 | 285 | ||
124 | /* | 286 | /* |
@@ -126,7 +288,7 @@ void set_intr_level(int irq, u16 level) | |||
126 | * than before | 288 | * than before |
127 | * - see Documentation/mn10300/features.txt | 289 | * - see Documentation/mn10300/features.txt |
128 | */ | 290 | */ |
129 | void set_intr_postackable(int irq) | 291 | void mn10300_set_lateack_irq_type(int irq) |
130 | { | 292 | { |
131 | set_irq_chip_and_handler(irq, &mn10300_cpu_pic_level, | 293 | set_irq_chip_and_handler(irq, &mn10300_cpu_pic_level, |
132 | handle_level_irq); | 294 | handle_level_irq); |
@@ -147,6 +309,7 @@ void __init init_IRQ(void) | |||
147 | * interrupts */ | 309 | * interrupts */ |
148 | set_irq_chip_and_handler(irq, &mn10300_cpu_pic_edge, | 310 | set_irq_chip_and_handler(irq, &mn10300_cpu_pic_edge, |
149 | handle_level_irq); | 311 | handle_level_irq); |
312 | |||
150 | unit_init_IRQ(); | 313 | unit_init_IRQ(); |
151 | } | 314 | } |
152 | 315 | ||
@@ -156,6 +319,7 @@ void __init init_IRQ(void) | |||
156 | asmlinkage void do_IRQ(void) | 319 | asmlinkage void do_IRQ(void) |
157 | { | 320 | { |
158 | unsigned long sp, epsw, irq_disabled_epsw, old_irq_enabled_epsw; | 321 | unsigned long sp, epsw, irq_disabled_epsw, old_irq_enabled_epsw; |
322 | unsigned int cpu_id = smp_processor_id(); | ||
159 | int irq; | 323 | int irq; |
160 | 324 | ||
161 | sp = current_stack_pointer(); | 325 | sp = current_stack_pointer(); |
@@ -163,12 +327,14 @@ asmlinkage void do_IRQ(void) | |||
163 | 327 | ||
164 | /* make sure local_irq_enable() doesn't muck up the interrupt priority | 328 | /* make sure local_irq_enable() doesn't muck up the interrupt priority |
165 | * setting in EPSW */ | 329 | * setting in EPSW */ |
166 | old_irq_enabled_epsw = __mn10300_irq_enabled_epsw; | 330 | old_irq_enabled_epsw = __mn10300_irq_enabled_epsw[cpu_id]; |
167 | local_save_flags(epsw); | 331 | local_save_flags(epsw); |
168 | __mn10300_irq_enabled_epsw = EPSW_IE | (EPSW_IM & epsw); | 332 | __mn10300_irq_enabled_epsw[cpu_id] = EPSW_IE | (EPSW_IM & epsw); |
169 | irq_disabled_epsw = EPSW_IE | MN10300_CLI_LEVEL; | 333 | irq_disabled_epsw = EPSW_IE | MN10300_CLI_LEVEL; |
170 | 334 | ||
171 | __IRQ_STAT(smp_processor_id(), __irq_count)++; | 335 | #ifdef CONFIG_MN10300_WD_TIMER |
336 | __IRQ_STAT(cpu_id, __irq_count)++; | ||
337 | #endif | ||
172 | 338 | ||
173 | irq_enter(); | 339 | irq_enter(); |
174 | 340 | ||
@@ -188,7 +354,7 @@ asmlinkage void do_IRQ(void) | |||
188 | local_irq_restore(epsw); | 354 | local_irq_restore(epsw); |
189 | } | 355 | } |
190 | 356 | ||
191 | __mn10300_irq_enabled_epsw = old_irq_enabled_epsw; | 357 | __mn10300_irq_enabled_epsw[cpu_id] = old_irq_enabled_epsw; |
192 | 358 | ||
193 | irq_exit(); | 359 | irq_exit(); |
194 | } | 360 | } |
@@ -239,11 +405,13 @@ int show_interrupts(struct seq_file *p, void *v) | |||
239 | 405 | ||
240 | /* polish off with NMI and error counters */ | 406 | /* polish off with NMI and error counters */ |
241 | case NR_IRQS: | 407 | case NR_IRQS: |
408 | #ifdef CONFIG_MN10300_WD_TIMER | ||
242 | seq_printf(p, "NMI: "); | 409 | seq_printf(p, "NMI: "); |
243 | for (j = 0; j < NR_CPUS; j++) | 410 | for (j = 0; j < NR_CPUS; j++) |
244 | if (cpu_online(j)) | 411 | if (cpu_online(j)) |
245 | seq_printf(p, "%10u ", nmi_count(j)); | 412 | seq_printf(p, "%10u ", nmi_count(j)); |
246 | seq_putc(p, '\n'); | 413 | seq_putc(p, '\n'); |
414 | #endif | ||
247 | 415 | ||
248 | seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count)); | 416 | seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count)); |
249 | break; | 417 | break; |
@@ -251,3 +419,51 @@ int show_interrupts(struct seq_file *p, void *v) | |||
251 | 419 | ||
252 | return 0; | 420 | return 0; |
253 | } | 421 | } |
422 | |||
423 | #ifdef CONFIG_HOTPLUG_CPU | ||
424 | void migrate_irqs(void) | ||
425 | { | ||
426 | irq_desc_t *desc; | ||
427 | int irq; | ||
428 | unsigned int self, new; | ||
429 | unsigned long flags; | ||
430 | |||
431 | self = smp_processor_id(); | ||
432 | for (irq = 0; irq < NR_IRQS; irq++) { | ||
433 | desc = irq_desc + irq; | ||
434 | |||
435 | if (desc->status == IRQ_PER_CPU) | ||
436 | continue; | ||
437 | |||
438 | if (cpu_isset(self, irq_desc[irq].affinity) && | ||
439 | !cpus_intersects(irq_affinity[irq], cpu_online_map)) { | ||
440 | int cpu_id; | ||
441 | cpu_id = first_cpu(cpu_online_map); | ||
442 | cpu_set(cpu_id, irq_desc[irq].affinity); | ||
443 | } | ||
444 | /* We need to operate irq_affinity_online atomically. */ | ||
445 | arch_local_cli_save(flags); | ||
446 | if (irq_affinity_online[irq] == self) { | ||
447 | u16 x, tmp; | ||
448 | |||
449 | x = CROSS_GxICR(irq, self); | ||
450 | CROSS_GxICR(irq, self) = x & GxICR_LEVEL; | ||
451 | tmp = CROSS_GxICR(irq, self); | ||
452 | |||
453 | new = any_online_cpu(irq_desc[irq].affinity); | ||
454 | irq_affinity_online[irq] = new; | ||
455 | |||
456 | CROSS_GxICR(irq, new) = | ||
457 | (x & GxICR_LEVEL) | GxICR_DETECT; | ||
458 | tmp = CROSS_GxICR(irq, new); | ||
459 | |||
460 | x &= GxICR_LEVEL | GxICR_ENABLE; | ||
461 | if (CROSS_GxICR(irq, self) & GxICR_REQUEST) | ||
462 | x |= GxICR_REQUEST | GxICR_DETECT; | ||
463 | CROSS_GxICR(irq, new) = x; | ||
464 | tmp = CROSS_GxICR(irq, new); | ||
465 | } | ||
466 | arch_local_irq_restore(flags); | ||
467 | } | ||
468 | } | ||
469 | #endif /* CONFIG_HOTPLUG_CPU */ | ||