diff options
Diffstat (limited to 'arch/sh/kernel/irq.c')
-rw-r--r-- | arch/sh/kernel/irq.c | 98 |
1 files changed, 23 insertions, 75 deletions
diff --git a/arch/sh/kernel/irq.c b/arch/sh/kernel/irq.c index 257de1f0692b..a3ee91971129 100644 --- a/arch/sh/kernel/irq.c +++ b/arch/sh/kernel/irq.c | |||
@@ -13,6 +13,7 @@ | |||
13 | #include <linux/seq_file.h> | 13 | #include <linux/seq_file.h> |
14 | #include <linux/ftrace.h> | 14 | #include <linux/ftrace.h> |
15 | #include <linux/delay.h> | 15 | #include <linux/delay.h> |
16 | #include <linux/ratelimit.h> | ||
16 | #include <asm/processor.h> | 17 | #include <asm/processor.h> |
17 | #include <asm/machvec.h> | 18 | #include <asm/machvec.h> |
18 | #include <asm/uaccess.h> | 19 | #include <asm/uaccess.h> |
@@ -34,9 +35,9 @@ void ack_bad_irq(unsigned int irq) | |||
34 | 35 | ||
35 | #if defined(CONFIG_PROC_FS) | 36 | #if defined(CONFIG_PROC_FS) |
36 | /* | 37 | /* |
37 | * /proc/interrupts printing: | 38 | * /proc/interrupts printing for arch specific interrupts |
38 | */ | 39 | */ |
39 | static int show_other_interrupts(struct seq_file *p, int prec) | 40 | int arch_show_interrupts(struct seq_file *p, int prec) |
40 | { | 41 | { |
41 | int j; | 42 | int j; |
42 | 43 | ||
@@ -49,58 +50,6 @@ static int show_other_interrupts(struct seq_file *p, int prec) | |||
49 | 50 | ||
50 | return 0; | 51 | return 0; |
51 | } | 52 | } |
52 | |||
53 | int show_interrupts(struct seq_file *p, void *v) | ||
54 | { | ||
55 | unsigned long flags, any_count = 0; | ||
56 | int i = *(loff_t *)v, j, prec; | ||
57 | struct irqaction *action; | ||
58 | struct irq_desc *desc; | ||
59 | |||
60 | if (i > nr_irqs) | ||
61 | return 0; | ||
62 | |||
63 | for (prec = 3, j = 1000; prec < 10 && j <= nr_irqs; ++prec) | ||
64 | j *= 10; | ||
65 | |||
66 | if (i == nr_irqs) | ||
67 | return show_other_interrupts(p, prec); | ||
68 | |||
69 | if (i == 0) { | ||
70 | seq_printf(p, "%*s", prec + 8, ""); | ||
71 | for_each_online_cpu(j) | ||
72 | seq_printf(p, "CPU%-8d", j); | ||
73 | seq_putc(p, '\n'); | ||
74 | } | ||
75 | |||
76 | desc = irq_to_desc(i); | ||
77 | if (!desc) | ||
78 | return 0; | ||
79 | |||
80 | raw_spin_lock_irqsave(&desc->lock, flags); | ||
81 | for_each_online_cpu(j) | ||
82 | any_count |= kstat_irqs_cpu(i, j); | ||
83 | action = desc->action; | ||
84 | if (!action && !any_count) | ||
85 | goto out; | ||
86 | |||
87 | seq_printf(p, "%*d: ", prec, i); | ||
88 | for_each_online_cpu(j) | ||
89 | seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); | ||
90 | seq_printf(p, " %14s", desc->chip->name); | ||
91 | seq_printf(p, "-%-8s", desc->name); | ||
92 | |||
93 | if (action) { | ||
94 | seq_printf(p, " %s", action->name); | ||
95 | while ((action = action->next) != NULL) | ||
96 | seq_printf(p, ", %s", action->name); | ||
97 | } | ||
98 | |||
99 | seq_putc(p, '\n'); | ||
100 | out: | ||
101 | raw_spin_unlock_irqrestore(&desc->lock, flags); | ||
102 | return 0; | ||
103 | } | ||
104 | #endif | 53 | #endif |
105 | 54 | ||
106 | #ifdef CONFIG_IRQSTACKS | 55 | #ifdef CONFIG_IRQSTACKS |
@@ -235,7 +184,7 @@ asmlinkage void do_softirq(void) | |||
235 | ); | 184 | ); |
236 | 185 | ||
237 | /* | 186 | /* |
238 | * Shouldnt happen, we returned above if in_interrupt(): | 187 | * Shouldn't happen, we returned above if in_interrupt(): |
239 | */ | 188 | */ |
240 | WARN_ON_ONCE(softirq_count()); | 189 | WARN_ON_ONCE(softirq_count()); |
241 | } | 190 | } |
@@ -273,16 +222,12 @@ void __init init_IRQ(void) | |||
273 | { | 222 | { |
274 | plat_irq_setup(); | 223 | plat_irq_setup(); |
275 | 224 | ||
276 | /* | ||
277 | * Pin any of the legacy IRQ vectors that haven't already been | ||
278 | * grabbed by the platform | ||
279 | */ | ||
280 | reserve_irq_legacy(); | ||
281 | |||
282 | /* Perform the machine specific initialisation */ | 225 | /* Perform the machine specific initialisation */ |
283 | if (sh_mv.mv_init_irq) | 226 | if (sh_mv.mv_init_irq) |
284 | sh_mv.mv_init_irq(); | 227 | sh_mv.mv_init_irq(); |
285 | 228 | ||
229 | intc_finalize(); | ||
230 | |||
286 | irq_ctx_init(smp_processor_id()); | 231 | irq_ctx_init(smp_processor_id()); |
287 | } | 232 | } |
288 | 233 | ||
@@ -290,18 +235,21 @@ void __init init_IRQ(void) | |||
290 | int __init arch_probe_nr_irqs(void) | 235 | int __init arch_probe_nr_irqs(void) |
291 | { | 236 | { |
292 | nr_irqs = sh_mv.mv_nr_irqs; | 237 | nr_irqs = sh_mv.mv_nr_irqs; |
293 | return 0; | 238 | return NR_IRQS_LEGACY; |
294 | } | 239 | } |
295 | #endif | 240 | #endif |
296 | 241 | ||
297 | #ifdef CONFIG_HOTPLUG_CPU | 242 | #ifdef CONFIG_HOTPLUG_CPU |
298 | static void route_irq(struct irq_desc *desc, unsigned int irq, unsigned int cpu) | 243 | static void route_irq(struct irq_data *data, unsigned int irq, unsigned int cpu) |
299 | { | 244 | { |
245 | struct irq_desc *desc = irq_to_desc(irq); | ||
246 | struct irq_chip *chip = irq_data_get_irq_chip(data); | ||
247 | |||
300 | printk(KERN_INFO "IRQ%u: moving from cpu%u to cpu%u\n", | 248 | printk(KERN_INFO "IRQ%u: moving from cpu%u to cpu%u\n", |
301 | irq, desc->node, cpu); | 249 | irq, data->node, cpu); |
302 | 250 | ||
303 | raw_spin_lock_irq(&desc->lock); | 251 | raw_spin_lock_irq(&desc->lock); |
304 | desc->chip->set_affinity(irq, cpumask_of(cpu)); | 252 | chip->irq_set_affinity(data, cpumask_of(cpu), false); |
305 | raw_spin_unlock_irq(&desc->lock); | 253 | raw_spin_unlock_irq(&desc->lock); |
306 | } | 254 | } |
307 | 255 | ||
@@ -312,24 +260,24 @@ static void route_irq(struct irq_desc *desc, unsigned int irq, unsigned int cpu) | |||
312 | */ | 260 | */ |
313 | void migrate_irqs(void) | 261 | void migrate_irqs(void) |
314 | { | 262 | { |
315 | struct irq_desc *desc; | ||
316 | unsigned int irq, cpu = smp_processor_id(); | 263 | unsigned int irq, cpu = smp_processor_id(); |
317 | 264 | ||
318 | for_each_irq_desc(irq, desc) { | 265 | for_each_active_irq(irq) { |
319 | if (desc->node == cpu) { | 266 | struct irq_data *data = irq_get_irq_data(irq); |
320 | unsigned int newcpu = cpumask_any_and(desc->affinity, | 267 | |
268 | if (data->node == cpu) { | ||
269 | unsigned int newcpu = cpumask_any_and(data->affinity, | ||
321 | cpu_online_mask); | 270 | cpu_online_mask); |
322 | if (newcpu >= nr_cpu_ids) { | 271 | if (newcpu >= nr_cpu_ids) { |
323 | if (printk_ratelimit()) | 272 | pr_info_ratelimited("IRQ%u no longer affine to CPU%u\n", |
324 | printk(KERN_INFO "IRQ%u no longer affine to CPU%u\n", | 273 | irq, cpu); |
325 | irq, cpu); | ||
326 | 274 | ||
327 | cpumask_setall(desc->affinity); | 275 | cpumask_setall(data->affinity); |
328 | newcpu = cpumask_any_and(desc->affinity, | 276 | newcpu = cpumask_any_and(data->affinity, |
329 | cpu_online_mask); | 277 | cpu_online_mask); |
330 | } | 278 | } |
331 | 279 | ||
332 | route_irq(desc, irq, newcpu); | 280 | route_irq(data, irq, newcpu); |
333 | } | 281 | } |
334 | } | 282 | } |
335 | } | 283 | } |