diff options
author | Rasesh Mody <rmody@brocade.com> | 2010-10-05 11:46:04 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2010-10-05 23:39:38 -0400 |
commit | e2fa6f2ef6e48666b78d4b0f00914b06bb19d298 (patch) | |
tree | 84edb88bf9edc0b017abe28e457858099dba30f7 /drivers/net/bna | |
parent | ebc0ffae5dfb4447e0a431ffe7fe1d467c48bbb9 (diff) |
bna: fix interrupt handling
This fix handles the case when IRQ handler is called (for shared IRQs)
even before the driver is ready to handle interrupts.
Signed-off-by: Debashis Dutt <ddutt@brocade.com>
Signed-off-by: Rasesh Mody <rmody@brocade.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/bna')
-rw-r--r-- | drivers/net/bna/bnad.c | 48 |
1 files changed, 27 insertions, 21 deletions
diff --git a/drivers/net/bna/bnad.c b/drivers/net/bna/bnad.c index e380c0e88f4f..7210c34d2d5b 100644 --- a/drivers/net/bna/bnad.c +++ b/drivers/net/bna/bnad.c | |||
@@ -564,9 +564,11 @@ bnad_disable_rx_irq(struct bnad *bnad, struct bna_ccb *ccb) | |||
564 | static void | 564 | static void |
565 | bnad_enable_rx_irq(struct bnad *bnad, struct bna_ccb *ccb) | 565 | bnad_enable_rx_irq(struct bnad *bnad, struct bna_ccb *ccb) |
566 | { | 566 | { |
567 | spin_lock_irq(&bnad->bna_lock); /* Because of polling context */ | 567 | unsigned long flags; |
568 | |||
569 | spin_lock_irqsave(&bnad->bna_lock, flags); /* Because of polling context */ | ||
568 | bnad_enable_rx_irq_unsafe(ccb); | 570 | bnad_enable_rx_irq_unsafe(ccb); |
569 | spin_unlock_irq(&bnad->bna_lock); | 571 | spin_unlock_irqrestore(&bnad->bna_lock, flags); |
570 | } | 572 | } |
571 | 573 | ||
572 | static void | 574 | static void |
@@ -599,7 +601,7 @@ static irqreturn_t | |||
599 | bnad_msix_mbox_handler(int irq, void *data) | 601 | bnad_msix_mbox_handler(int irq, void *data) |
600 | { | 602 | { |
601 | u32 intr_status; | 603 | u32 intr_status; |
602 | unsigned long flags; | 604 | unsigned long flags; |
603 | struct net_device *netdev = data; | 605 | struct net_device *netdev = data; |
604 | struct bnad *bnad; | 606 | struct bnad *bnad; |
605 | 607 | ||
@@ -630,13 +632,15 @@ bnad_isr(int irq, void *data) | |||
630 | struct bnad_rx_info *rx_info; | 632 | struct bnad_rx_info *rx_info; |
631 | struct bnad_rx_ctrl *rx_ctrl; | 633 | struct bnad_rx_ctrl *rx_ctrl; |
632 | 634 | ||
633 | spin_lock_irqsave(&bnad->bna_lock, flags); | 635 | if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))) |
636 | return IRQ_NONE; | ||
634 | 637 | ||
635 | bna_intr_status_get(&bnad->bna, intr_status); | 638 | bna_intr_status_get(&bnad->bna, intr_status); |
636 | if (!intr_status) { | 639 | |
637 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | 640 | if (unlikely(!intr_status)) |
638 | return IRQ_NONE; | 641 | return IRQ_NONE; |
639 | } | 642 | |
643 | spin_lock_irqsave(&bnad->bna_lock, flags); | ||
640 | 644 | ||
641 | if (BNA_IS_MBOX_ERR_INTR(intr_status)) { | 645 | if (BNA_IS_MBOX_ERR_INTR(intr_status)) { |
642 | bna_mbox_handler(&bnad->bna, intr_status); | 646 | bna_mbox_handler(&bnad->bna, intr_status); |
@@ -672,11 +676,10 @@ bnad_enable_mbox_irq(struct bnad *bnad) | |||
672 | { | 676 | { |
673 | int irq = BNAD_GET_MBOX_IRQ(bnad); | 677 | int irq = BNAD_GET_MBOX_IRQ(bnad); |
674 | 678 | ||
675 | if (!(bnad->cfg_flags & BNAD_CF_MSIX)) | ||
676 | return; | ||
677 | |||
678 | if (test_and_clear_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags)) | 679 | if (test_and_clear_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags)) |
679 | enable_irq(irq); | 680 | if (bnad->cfg_flags & BNAD_CF_MSIX) |
681 | enable_irq(irq); | ||
682 | |||
680 | BNAD_UPDATE_CTR(bnad, mbox_intr_enabled); | 683 | BNAD_UPDATE_CTR(bnad, mbox_intr_enabled); |
681 | } | 684 | } |
682 | 685 | ||
@@ -689,11 +692,11 @@ bnad_disable_mbox_irq(struct bnad *bnad) | |||
689 | { | 692 | { |
690 | int irq = BNAD_GET_MBOX_IRQ(bnad); | 693 | int irq = BNAD_GET_MBOX_IRQ(bnad); |
691 | 694 | ||
692 | if (!(bnad->cfg_flags & BNAD_CF_MSIX)) | ||
693 | return; | ||
694 | 695 | ||
695 | if (!test_and_set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags)) | 696 | if (!test_and_set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags)) |
696 | disable_irq_nosync(irq); | 697 | if (bnad->cfg_flags & BNAD_CF_MSIX) |
698 | disable_irq_nosync(irq); | ||
699 | |||
697 | BNAD_UPDATE_CTR(bnad, mbox_intr_disabled); | 700 | BNAD_UPDATE_CTR(bnad, mbox_intr_disabled); |
698 | } | 701 | } |
699 | 702 | ||
@@ -1045,14 +1048,12 @@ bnad_mbox_irq_free(struct bnad *bnad, | |||
1045 | return; | 1048 | return; |
1046 | 1049 | ||
1047 | spin_lock_irqsave(&bnad->bna_lock, flags); | 1050 | spin_lock_irqsave(&bnad->bna_lock, flags); |
1048 | |||
1049 | bnad_disable_mbox_irq(bnad); | 1051 | bnad_disable_mbox_irq(bnad); |
1052 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | ||
1050 | 1053 | ||
1051 | irq = BNAD_GET_MBOX_IRQ(bnad); | 1054 | irq = BNAD_GET_MBOX_IRQ(bnad); |
1052 | free_irq(irq, bnad->netdev); | 1055 | free_irq(irq, bnad->netdev); |
1053 | 1056 | ||
1054 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | ||
1055 | |||
1056 | kfree(intr_info->idl); | 1057 | kfree(intr_info->idl); |
1057 | } | 1058 | } |
1058 | 1059 | ||
@@ -1094,8 +1095,15 @@ bnad_mbox_irq_alloc(struct bnad *bnad, | |||
1094 | 1095 | ||
1095 | sprintf(bnad->mbox_irq_name, "%s", BNAD_NAME); | 1096 | sprintf(bnad->mbox_irq_name, "%s", BNAD_NAME); |
1096 | 1097 | ||
1098 | /* | ||
1099 | * Set the Mbox IRQ disable flag, so that the IRQ handler | ||
1100 | * called from request_irq() for SHARED IRQs do not execute | ||
1101 | */ | ||
1102 | set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags); | ||
1103 | |||
1097 | err = request_irq(irq, irq_handler, flags, | 1104 | err = request_irq(irq, irq_handler, flags, |
1098 | bnad->mbox_irq_name, bnad->netdev); | 1105 | bnad->mbox_irq_name, bnad->netdev); |
1106 | |||
1099 | if (err) { | 1107 | if (err) { |
1100 | kfree(intr_info->idl); | 1108 | kfree(intr_info->idl); |
1101 | intr_info->idl = NULL; | 1109 | intr_info->idl = NULL; |
@@ -1103,7 +1111,8 @@ bnad_mbox_irq_alloc(struct bnad *bnad, | |||
1103 | } | 1111 | } |
1104 | 1112 | ||
1105 | spin_lock_irqsave(&bnad->bna_lock, flags); | 1113 | spin_lock_irqsave(&bnad->bna_lock, flags); |
1106 | bnad_disable_mbox_irq(bnad); | 1114 | if (bnad->cfg_flags & BNAD_CF_MSIX) |
1115 | disable_irq_nosync(irq); | ||
1107 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | 1116 | spin_unlock_irqrestore(&bnad->bna_lock, flags); |
1108 | return 0; | 1117 | return 0; |
1109 | } | 1118 | } |
@@ -1485,7 +1494,6 @@ bnad_stats_timer_start(struct bnad *bnad) | |||
1485 | jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ)); | 1494 | jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ)); |
1486 | } | 1495 | } |
1487 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | 1496 | spin_unlock_irqrestore(&bnad->bna_lock, flags); |
1488 | |||
1489 | } | 1497 | } |
1490 | 1498 | ||
1491 | /* | 1499 | /* |
@@ -2170,7 +2178,6 @@ bnad_device_disable(struct bnad *bnad) | |||
2170 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | 2178 | spin_unlock_irqrestore(&bnad->bna_lock, flags); |
2171 | 2179 | ||
2172 | wait_for_completion(&bnad->bnad_completions.ioc_comp); | 2180 | wait_for_completion(&bnad->bnad_completions.ioc_comp); |
2173 | |||
2174 | } | 2181 | } |
2175 | 2182 | ||
2176 | static int | 2183 | static int |
@@ -3108,7 +3115,6 @@ bnad_pci_probe(struct pci_dev *pdev, | |||
3108 | 3115 | ||
3109 | spin_lock_irqsave(&bnad->bna_lock, flags); | 3116 | spin_lock_irqsave(&bnad->bna_lock, flags); |
3110 | bna_init(bna, bnad, &pcidev_info, &bnad->res_info[0]); | 3117 | bna_init(bna, bnad, &pcidev_info, &bnad->res_info[0]); |
3111 | |||
3112 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | 3118 | spin_unlock_irqrestore(&bnad->bna_lock, flags); |
3113 | 3119 | ||
3114 | bnad->stats.bna_stats = &bna->stats; | 3120 | bnad->stats.bna_stats = &bna->stats; |