diff options
Diffstat (limited to 'arch/sh/kernel/irq.c')
-rw-r--r-- | arch/sh/kernel/irq.c | 37 |
1 files changed, 20 insertions, 17 deletions
diff --git a/arch/sh/kernel/irq.c b/arch/sh/kernel/irq.c index 9dc447db8a44..68ecbe6c881a 100644 --- a/arch/sh/kernel/irq.c +++ b/arch/sh/kernel/irq.c | |||
@@ -56,6 +56,8 @@ int show_interrupts(struct seq_file *p, void *v) | |||
56 | int i = *(loff_t *)v, j, prec; | 56 | int i = *(loff_t *)v, j, prec; |
57 | struct irqaction *action; | 57 | struct irqaction *action; |
58 | struct irq_desc *desc; | 58 | struct irq_desc *desc; |
59 | struct irq_data *data; | ||
60 | struct irq_chip *chip; | ||
59 | 61 | ||
60 | if (i > nr_irqs) | 62 | if (i > nr_irqs) |
61 | return 0; | 63 | return 0; |
@@ -77,6 +79,9 @@ int show_interrupts(struct seq_file *p, void *v) | |||
77 | if (!desc) | 79 | if (!desc) |
78 | return 0; | 80 | return 0; |
79 | 81 | ||
82 | data = irq_get_irq_data(i); | ||
83 | chip = irq_data_get_irq_chip(data); | ||
84 | |||
80 | raw_spin_lock_irqsave(&desc->lock, flags); | 85 | raw_spin_lock_irqsave(&desc->lock, flags); |
81 | for_each_online_cpu(j) | 86 | for_each_online_cpu(j) |
82 | any_count |= kstat_irqs_cpu(i, j); | 87 | any_count |= kstat_irqs_cpu(i, j); |
@@ -87,7 +92,7 @@ int show_interrupts(struct seq_file *p, void *v) | |||
87 | seq_printf(p, "%*d: ", prec, i); | 92 | seq_printf(p, "%*d: ", prec, i); |
88 | for_each_online_cpu(j) | 93 | for_each_online_cpu(j) |
89 | seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); | 94 | seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); |
90 | seq_printf(p, " %14s", desc->chip->name); | 95 | seq_printf(p, " %14s", chip->name); |
91 | seq_printf(p, "-%-8s", desc->name); | 96 | seq_printf(p, "-%-8s", desc->name); |
92 | 97 | ||
93 | if (action) { | 98 | if (action) { |
@@ -273,12 +278,6 @@ void __init init_IRQ(void) | |||
273 | { | 278 | { |
274 | plat_irq_setup(); | 279 | plat_irq_setup(); |
275 | 280 | ||
276 | /* | ||
277 | * Pin any of the legacy IRQ vectors that haven't already been | ||
278 | * grabbed by the platform | ||
279 | */ | ||
280 | reserve_irq_legacy(); | ||
281 | |||
282 | /* Perform the machine specific initialisation */ | 281 | /* Perform the machine specific initialisation */ |
283 | if (sh_mv.mv_init_irq) | 282 | if (sh_mv.mv_init_irq) |
284 | sh_mv.mv_init_irq(); | 283 | sh_mv.mv_init_irq(); |
@@ -297,13 +296,16 @@ int __init arch_probe_nr_irqs(void) | |||
297 | #endif | 296 | #endif |
298 | 297 | ||
299 | #ifdef CONFIG_HOTPLUG_CPU | 298 | #ifdef CONFIG_HOTPLUG_CPU |
300 | static void route_irq(struct irq_desc *desc, unsigned int irq, unsigned int cpu) | 299 | static void route_irq(struct irq_data *data, unsigned int irq, unsigned int cpu) |
301 | { | 300 | { |
301 | struct irq_desc *desc = irq_to_desc(irq); | ||
302 | struct irq_chip *chip = irq_data_get_irq_chip(data); | ||
303 | |||
302 | printk(KERN_INFO "IRQ%u: moving from cpu%u to cpu%u\n", | 304 | printk(KERN_INFO "IRQ%u: moving from cpu%u to cpu%u\n", |
303 | irq, desc->node, cpu); | 305 | irq, data->node, cpu); |
304 | 306 | ||
305 | raw_spin_lock_irq(&desc->lock); | 307 | raw_spin_lock_irq(&desc->lock); |
306 | desc->chip->set_affinity(irq, cpumask_of(cpu)); | 308 | chip->irq_set_affinity(data, cpumask_of(cpu), false); |
307 | raw_spin_unlock_irq(&desc->lock); | 309 | raw_spin_unlock_irq(&desc->lock); |
308 | } | 310 | } |
309 | 311 | ||
@@ -314,24 +316,25 @@ static void route_irq(struct irq_desc *desc, unsigned int irq, unsigned int cpu) | |||
314 | */ | 316 | */ |
315 | void migrate_irqs(void) | 317 | void migrate_irqs(void) |
316 | { | 318 | { |
317 | struct irq_desc *desc; | ||
318 | unsigned int irq, cpu = smp_processor_id(); | 319 | unsigned int irq, cpu = smp_processor_id(); |
319 | 320 | ||
320 | for_each_irq_desc(irq, desc) { | 321 | for_each_active_irq(irq) { |
321 | if (desc->node == cpu) { | 322 | struct irq_data *data = irq_get_irq_data(irq); |
322 | unsigned int newcpu = cpumask_any_and(desc->affinity, | 323 | |
324 | if (data->node == cpu) { | ||
325 | unsigned int newcpu = cpumask_any_and(data->affinity, | ||
323 | cpu_online_mask); | 326 | cpu_online_mask); |
324 | if (newcpu >= nr_cpu_ids) { | 327 | if (newcpu >= nr_cpu_ids) { |
325 | if (printk_ratelimit()) | 328 | if (printk_ratelimit()) |
326 | printk(KERN_INFO "IRQ%u no longer affine to CPU%u\n", | 329 | printk(KERN_INFO "IRQ%u no longer affine to CPU%u\n", |
327 | irq, cpu); | 330 | irq, cpu); |
328 | 331 | ||
329 | cpumask_setall(desc->affinity); | 332 | cpumask_setall(data->affinity); |
330 | newcpu = cpumask_any_and(desc->affinity, | 333 | newcpu = cpumask_any_and(data->affinity, |
331 | cpu_online_mask); | 334 | cpu_online_mask); |
332 | } | 335 | } |
333 | 336 | ||
334 | route_irq(desc, irq, newcpu); | 337 | route_irq(data, irq, newcpu); |
335 | } | 338 | } |
336 | } | 339 | } |
337 | } | 340 | } |