diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2011-03-24 04:03:45 -0400 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2011-03-29 08:48:14 -0400 |
commit | fcd8d4f498698265fc0fba1dd376303caea71de4 (patch) | |
tree | e1c8c941def2021d05ced3325c675553e51b90cf /arch/sparc/kernel/irq_64.c | |
parent | fcb8918fd242f39496090dbbd6789ab24098295b (diff) |
sparc: Use the new genirq functionality
Make use of the new features in genirq:
1) Set the chip flag IRCHIP_EOI_IF_HANDLED, which ensures in the
core code that irq_eoi() is only called when the interrupt was
handled. That removes the extra status check in the callback.
2) Use the preflow handler, which is called from the fasteoi core code
before the device handler. That avoids another status check and the
open coded handler redirection.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: sparclinux@vger.kernel.org
Diffstat (limited to 'arch/sparc/kernel/irq_64.c')
-rw-r--r-- | arch/sparc/kernel/irq_64.c | 32 |
1 files changed, 8 insertions, 24 deletions
diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c index eb16e3b8a2dd..3c8b2666c325 100644 --- a/arch/sparc/kernel/irq_64.c +++ b/arch/sparc/kernel/irq_64.c | |||
@@ -344,10 +344,6 @@ static void sun4u_irq_disable(struct irq_data *data) | |||
344 | static void sun4u_irq_eoi(struct irq_data *data) | 344 | static void sun4u_irq_eoi(struct irq_data *data) |
345 | { | 345 | { |
346 | struct irq_handler_data *handler_data = data->handler_data; | 346 | struct irq_handler_data *handler_data = data->handler_data; |
347 | struct irq_desc *desc = irq_desc + data->irq; | ||
348 | |||
349 | if (unlikely(desc->status & (IRQ_DISABLED|IRQ_INPROGRESS))) | ||
350 | return; | ||
351 | 347 | ||
352 | if (likely(handler_data)) | 348 | if (likely(handler_data)) |
353 | upa_writeq(ICLR_IDLE, handler_data->iclr); | 349 | upa_writeq(ICLR_IDLE, handler_data->iclr); |
@@ -402,12 +398,8 @@ static void sun4v_irq_disable(struct irq_data *data) | |||
402 | static void sun4v_irq_eoi(struct irq_data *data) | 398 | static void sun4v_irq_eoi(struct irq_data *data) |
403 | { | 399 | { |
404 | unsigned int ino = irq_table[data->irq].dev_ino; | 400 | unsigned int ino = irq_table[data->irq].dev_ino; |
405 | struct irq_desc *desc = irq_desc + data->irq; | ||
406 | int err; | 401 | int err; |
407 | 402 | ||
408 | if (unlikely(desc->status & (IRQ_DISABLED|IRQ_INPROGRESS))) | ||
409 | return; | ||
410 | |||
411 | err = sun4v_intr_setstate(ino, HV_INTR_STATE_IDLE); | 403 | err = sun4v_intr_setstate(ino, HV_INTR_STATE_IDLE); |
412 | if (err != HV_EOK) | 404 | if (err != HV_EOK) |
413 | printk(KERN_ERR "sun4v_intr_setstate(%x): " | 405 | printk(KERN_ERR "sun4v_intr_setstate(%x): " |
@@ -481,13 +473,9 @@ static void sun4v_virq_disable(struct irq_data *data) | |||
481 | 473 | ||
482 | static void sun4v_virq_eoi(struct irq_data *data) | 474 | static void sun4v_virq_eoi(struct irq_data *data) |
483 | { | 475 | { |
484 | struct irq_desc *desc = irq_desc + data->irq; | ||
485 | unsigned long dev_handle, dev_ino; | 476 | unsigned long dev_handle, dev_ino; |
486 | int err; | 477 | int err; |
487 | 478 | ||
488 | if (unlikely(desc->status & (IRQ_DISABLED|IRQ_INPROGRESS))) | ||
489 | return; | ||
490 | |||
491 | dev_handle = irq_table[data->irq].dev_handle; | 479 | dev_handle = irq_table[data->irq].dev_handle; |
492 | dev_ino = irq_table[data->irq].dev_ino; | 480 | dev_ino = irq_table[data->irq].dev_ino; |
493 | 481 | ||
@@ -505,6 +493,7 @@ static struct irq_chip sun4u_irq = { | |||
505 | .irq_disable = sun4u_irq_disable, | 493 | .irq_disable = sun4u_irq_disable, |
506 | .irq_eoi = sun4u_irq_eoi, | 494 | .irq_eoi = sun4u_irq_eoi, |
507 | .irq_set_affinity = sun4u_set_affinity, | 495 | .irq_set_affinity = sun4u_set_affinity, |
496 | .flags = IRQCHIP_EOI_IF_HANDLED, | ||
508 | }; | 497 | }; |
509 | 498 | ||
510 | static struct irq_chip sun4v_irq = { | 499 | static struct irq_chip sun4v_irq = { |
@@ -513,6 +502,7 @@ static struct irq_chip sun4v_irq = { | |||
513 | .irq_disable = sun4v_irq_disable, | 502 | .irq_disable = sun4v_irq_disable, |
514 | .irq_eoi = sun4v_irq_eoi, | 503 | .irq_eoi = sun4v_irq_eoi, |
515 | .irq_set_affinity = sun4v_set_affinity, | 504 | .irq_set_affinity = sun4v_set_affinity, |
505 | .flags = IRQCHIP_EOI_IF_HANDLED, | ||
516 | }; | 506 | }; |
517 | 507 | ||
518 | static struct irq_chip sun4v_virq = { | 508 | static struct irq_chip sun4v_virq = { |
@@ -521,16 +511,15 @@ static struct irq_chip sun4v_virq = { | |||
521 | .irq_disable = sun4v_virq_disable, | 511 | .irq_disable = sun4v_virq_disable, |
522 | .irq_eoi = sun4v_virq_eoi, | 512 | .irq_eoi = sun4v_virq_eoi, |
523 | .irq_set_affinity = sun4v_virt_set_affinity, | 513 | .irq_set_affinity = sun4v_virt_set_affinity, |
514 | .flags = IRQCHIP_EOI_IF_HANDLED, | ||
524 | }; | 515 | }; |
525 | 516 | ||
526 | static void pre_flow_handler(unsigned int irq, struct irq_desc *desc) | 517 | static void pre_flow_handler(struct irq_data *d) |
527 | { | 518 | { |
528 | struct irq_handler_data *handler_data = get_irq_data(irq); | 519 | struct irq_handler_data *handler_data = irq_data_get_irq_handler_data(d); |
529 | unsigned int ino = irq_table[irq].dev_ino; | 520 | unsigned int ino = irq_table[d->irq].dev_ino; |
530 | 521 | ||
531 | handler_data->pre_handler(ino, handler_data->arg1, handler_data->arg2); | 522 | handler_data->pre_handler(ino, handler_data->arg1, handler_data->arg2); |
532 | |||
533 | handle_fasteoi_irq(irq, desc); | ||
534 | } | 523 | } |
535 | 524 | ||
536 | void irq_install_pre_handler(int irq, | 525 | void irq_install_pre_handler(int irq, |
@@ -538,13 +527,12 @@ void irq_install_pre_handler(int irq, | |||
538 | void *arg1, void *arg2) | 527 | void *arg1, void *arg2) |
539 | { | 528 | { |
540 | struct irq_handler_data *handler_data = get_irq_data(irq); | 529 | struct irq_handler_data *handler_data = get_irq_data(irq); |
541 | struct irq_desc *desc = irq_desc + irq; | ||
542 | 530 | ||
543 | handler_data->pre_handler = func; | 531 | handler_data->pre_handler = func; |
544 | handler_data->arg1 = arg1; | 532 | handler_data->arg1 = arg1; |
545 | handler_data->arg2 = arg2; | 533 | handler_data->arg2 = arg2; |
546 | 534 | ||
547 | desc->handle_irq = pre_flow_handler; | 535 | __irq_set_preflow_handler(irq, pre_flow_handler); |
548 | } | 536 | } |
549 | 537 | ||
550 | unsigned int build_irq(int inofixup, unsigned long iclr, unsigned long imap) | 538 | unsigned int build_irq(int inofixup, unsigned long iclr, unsigned long imap) |
@@ -734,7 +722,6 @@ void __irq_entry handler_irq(int pil, struct pt_regs *regs) | |||
734 | orig_sp = set_hardirq_stack(); | 722 | orig_sp = set_hardirq_stack(); |
735 | 723 | ||
736 | while (bucket_pa) { | 724 | while (bucket_pa) { |
737 | struct irq_desc *desc; | ||
738 | unsigned long next_pa; | 725 | unsigned long next_pa; |
739 | unsigned int irq; | 726 | unsigned int irq; |
740 | 727 | ||
@@ -742,10 +729,7 @@ void __irq_entry handler_irq(int pil, struct pt_regs *regs) | |||
742 | irq = bucket_get_irq(bucket_pa); | 729 | irq = bucket_get_irq(bucket_pa); |
743 | bucket_clear_chain_pa(bucket_pa); | 730 | bucket_clear_chain_pa(bucket_pa); |
744 | 731 | ||
745 | desc = irq_desc + irq; | 732 | generic_handle_irq(irq); |
746 | |||
747 | if (!(desc->status & IRQ_DISABLED)) | ||
748 | desc->handle_irq(irq, desc); | ||
749 | 733 | ||
750 | bucket_pa = next_pa; | 734 | bucket_pa = next_pa; |
751 | } | 735 | } |