aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mn10300/kernel/irq.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mn10300/kernel/irq.c')
-rw-r--r--arch/mn10300/kernel/irq.c85
1 files changed, 18 insertions, 67 deletions
diff --git a/arch/mn10300/kernel/irq.c b/arch/mn10300/kernel/irq.c
index 5f7fc3eb45e6..86af0d7d0771 100644
--- a/arch/mn10300/kernel/irq.c
+++ b/arch/mn10300/kernel/irq.c
@@ -263,7 +263,7 @@ void set_intr_level(int irq, u16 level)
263 */ 263 */
264void mn10300_set_lateack_irq_type(int irq) 264void mn10300_set_lateack_irq_type(int irq)
265{ 265{
266 set_irq_chip_and_handler(irq, &mn10300_cpu_pic_level, 266 irq_set_chip_and_handler(irq, &mn10300_cpu_pic_level,
267 handle_level_irq); 267 handle_level_irq);
268} 268}
269 269
@@ -275,12 +275,12 @@ void __init init_IRQ(void)
275 int irq; 275 int irq;
276 276
277 for (irq = 0; irq < NR_IRQS; irq++) 277 for (irq = 0; irq < NR_IRQS; irq++)
278 if (get_irq_chip(irq) == &no_irq_chip) 278 if (irq_get_chip(irq) == &no_irq_chip)
279 /* due to the PIC latching interrupt requests, even 279 /* due to the PIC latching interrupt requests, even
280 * when the IRQ is disabled, IRQ_PENDING is superfluous 280 * when the IRQ is disabled, IRQ_PENDING is superfluous
281 * and we can use handle_level_irq() for edge-triggered 281 * and we can use handle_level_irq() for edge-triggered
282 * interrupts */ 282 * interrupts */
283 set_irq_chip_and_handler(irq, &mn10300_cpu_pic_edge, 283 irq_set_chip_and_handler(irq, &mn10300_cpu_pic_edge,
284 handle_level_irq); 284 handle_level_irq);
285 285
286 unit_init_IRQ(); 286 unit_init_IRQ();
@@ -335,91 +335,42 @@ asmlinkage void do_IRQ(void)
335/* 335/*
336 * Display interrupt management information through /proc/interrupts 336 * Display interrupt management information through /proc/interrupts
337 */ 337 */
338int show_interrupts(struct seq_file *p, void *v) 338int arch_show_interrupts(struct seq_file *p, int prec)
339{ 339{
340 int i = *(loff_t *) v, j, cpu;
341 struct irqaction *action;
342 unsigned long flags;
343
344 switch (i) {
345 /* display column title bar naming CPUs */
346 case 0:
347 seq_printf(p, " ");
348 for (j = 0; j < NR_CPUS; j++)
349 if (cpu_online(j))
350 seq_printf(p, "CPU%d ", j);
351 seq_putc(p, '\n');
352 break;
353
354 /* display information rows, one per active CPU */
355 case 1 ... NR_IRQS - 1:
356 raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
357
358 action = irq_desc[i].action;
359 if (action) {
360 seq_printf(p, "%3d: ", i);
361 for_each_present_cpu(cpu)
362 seq_printf(p, "%10u ", kstat_irqs_cpu(i, cpu));
363
364 if (i < NR_CPU_IRQS)
365 seq_printf(p, " %14s.%u",
366 irq_desc[i].irq_data.chip->name,
367 (GxICR(i) & GxICR_LEVEL) >>
368 GxICR_LEVEL_SHIFT);
369 else
370 seq_printf(p, " %14s",
371 irq_desc[i].irq_data.chip->name);
372
373 seq_printf(p, " %s", action->name);
374
375 for (action = action->next;
376 action;
377 action = action->next)
378 seq_printf(p, ", %s", action->name);
379
380 seq_putc(p, '\n');
381 }
382
383 raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
384 break;
385
386 /* polish off with NMI and error counters */
387 case NR_IRQS:
388#ifdef CONFIG_MN10300_WD_TIMER 340#ifdef CONFIG_MN10300_WD_TIMER
389 seq_printf(p, "NMI: "); 341 int j;
390 for (j = 0; j < NR_CPUS; j++)
391 if (cpu_online(j))
392 seq_printf(p, "%10u ", nmi_count(j));
393 seq_putc(p, '\n');
394#endif
395 342
396 seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count)); 343 seq_printf(p, "%*s: ", prec, "NMI");
397 break; 344 for (j = 0; j < NR_CPUS; j++)
398 } 345 if (cpu_online(j))
346 seq_printf(p, "%10u ", nmi_count(j));
347 seq_putc(p, '\n');
348#endif
399 349
350 seq_printf(p, "%*s: ", prec, "ERR");
351 seq_printf(p, "%10u\n", atomic_read(&irq_err_count));
400 return 0; 352 return 0;
401} 353}
402 354
403#ifdef CONFIG_HOTPLUG_CPU 355#ifdef CONFIG_HOTPLUG_CPU
404void migrate_irqs(void) 356void migrate_irqs(void)
405{ 357{
406 irq_desc_t *desc;
407 int irq; 358 int irq;
408 unsigned int self, new; 359 unsigned int self, new;
409 unsigned long flags; 360 unsigned long flags;
410 361
411 self = smp_processor_id(); 362 self = smp_processor_id();
412 for (irq = 0; irq < NR_IRQS; irq++) { 363 for (irq = 0; irq < NR_IRQS; irq++) {
413 desc = irq_desc + irq; 364 struct irq_data *data = irq_get_irq_data(irq);
414 365
415 if (desc->status == IRQ_PER_CPU) 366 if (irqd_is_per_cpu(data))
416 continue; 367 continue;
417 368
418 if (cpu_isset(self, irq_desc[irq].affinity) && 369 if (cpu_isset(self, data->affinity) &&
419 !cpus_intersects(irq_affinity[irq], cpu_online_map)) { 370 !cpus_intersects(irq_affinity[irq], cpu_online_map)) {
420 int cpu_id; 371 int cpu_id;
421 cpu_id = first_cpu(cpu_online_map); 372 cpu_id = first_cpu(cpu_online_map);
422 cpu_set(cpu_id, irq_desc[irq].affinity); 373 cpu_set(cpu_id, data->affinity);
423 } 374 }
424 /* We need to operate irq_affinity_online atomically. */ 375 /* We need to operate irq_affinity_online atomically. */
425 arch_local_cli_save(flags); 376 arch_local_cli_save(flags);
@@ -430,7 +381,7 @@ void migrate_irqs(void)
430 GxICR(irq) = x & GxICR_LEVEL; 381 GxICR(irq) = x & GxICR_LEVEL;
431 tmp = GxICR(irq); 382 tmp = GxICR(irq);
432 383
433 new = any_online_cpu(irq_desc[irq].affinity); 384 new = any_online_cpu(data->affinity);
434 irq_affinity_online[irq] = new; 385 irq_affinity_online[irq] = new;
435 386
436 CROSS_GxICR(irq, new) = 387 CROSS_GxICR(irq, new) =