aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRon Mercer <ron.mercer@qlogic.com>2008-10-20 13:30:26 -0400
committerJeff Garzik <jgarzik@redhat.com>2008-10-22 07:00:49 -0400
commitbb0d215c8f970345746129d4c110159b099e032f (patch)
tree4cbfa3aacb56012411b1a790fb889d5a1bc793f2
parentb891a9023bc4fc639b31c234a705e7e51104cf22 (diff)
qlge: Fix MSI/legacy single interrupt bug.
The chip can issue spurious interrupts for single interrupt modes. We use disable to clear the condition and allow processing to continue. Also got rid of legacy specific code since it now needs to be done on MSI single irq also. Signed-off-by: Ron Mercer <ron.mercer@qlogic.com> Signed-off-by: Jeff Garzik <jgarzik@redhat.com>
-rw-r--r--drivers/net/qlge/qlge.h5
-rw-r--r--drivers/net/qlge/qlge_main.c89
2 files changed, 47 insertions, 47 deletions
diff --git a/drivers/net/qlge/qlge.h b/drivers/net/qlge/qlge.h
index 38116f9d4163..ba2e1c5b6bcf 100644
--- a/drivers/net/qlge/qlge.h
+++ b/drivers/net/qlge/qlge.h
@@ -1375,7 +1375,6 @@ struct ql_adapter {
1375 spinlock_t adapter_lock; 1375 spinlock_t adapter_lock;
1376 spinlock_t hw_lock; 1376 spinlock_t hw_lock;
1377 spinlock_t stats_lock; 1377 spinlock_t stats_lock;
1378 spinlock_t legacy_lock; /* used for maintaining legacy intr sync */
1379 1378
1380 /* PCI Bus Relative Register Addresses */ 1379 /* PCI Bus Relative Register Addresses */
1381 void __iomem *reg_base; 1380 void __iomem *reg_base;
@@ -1399,8 +1398,6 @@ struct ql_adapter {
1399 struct msix_entry *msi_x_entry; 1398 struct msix_entry *msi_x_entry;
1400 struct intr_context intr_context[MAX_RX_RINGS]; 1399 struct intr_context intr_context[MAX_RX_RINGS];
1401 1400
1402 int (*legacy_check) (struct ql_adapter *);
1403
1404 int tx_ring_count; /* One per online CPU. */ 1401 int tx_ring_count; /* One per online CPU. */
1405 u32 rss_ring_first_cq_id;/* index of first inbound (rss) rx_ring */ 1402 u32 rss_ring_first_cq_id;/* index of first inbound (rss) rx_ring */
1406 u32 rss_ring_count; /* One per online CPU. */ 1403 u32 rss_ring_count; /* One per online CPU. */
@@ -1502,7 +1499,7 @@ void ql_mpi_work(struct work_struct *work);
1502void ql_mpi_reset_work(struct work_struct *work); 1499void ql_mpi_reset_work(struct work_struct *work);
1503int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 ebit); 1500int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 ebit);
1504void ql_queue_asic_error(struct ql_adapter *qdev); 1501void ql_queue_asic_error(struct ql_adapter *qdev);
1505void ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr); 1502u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr);
1506void ql_set_ethtool_ops(struct net_device *ndev); 1503void ql_set_ethtool_ops(struct net_device *ndev);
1507int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data); 1504int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data);
1508 1505
diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c
index 4b2caa6b7ac5..b83a9c9b6a97 100644
--- a/drivers/net/qlge/qlge_main.c
+++ b/drivers/net/qlge/qlge_main.c
@@ -577,41 +577,53 @@ static void ql_disable_interrupts(struct ql_adapter *qdev)
577 * incremented everytime we queue a worker and decremented everytime 577 * incremented everytime we queue a worker and decremented everytime
578 * a worker finishes. Once it hits zero we enable the interrupt. 578 * a worker finishes. Once it hits zero we enable the interrupt.
579 */ 579 */
580void ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr) 580u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
581{ 581{
582 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) 582 u32 var = 0;
583 unsigned long hw_flags = 0;
584 struct intr_context *ctx = qdev->intr_context + intr;
585
586 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr)) {
587 /* Always enable if we're MSIX multi interrupts and
588 * it's not the default (zeroeth) interrupt.
589 */
583 ql_write32(qdev, INTR_EN, 590 ql_write32(qdev, INTR_EN,
584 qdev->intr_context[intr].intr_en_mask); 591 ctx->intr_en_mask);
585 else { 592 var = ql_read32(qdev, STS);
586 if (qdev->legacy_check) 593 return var;
587 spin_lock(&qdev->legacy_lock);
588 if (atomic_dec_and_test(&qdev->intr_context[intr].irq_cnt)) {
589 QPRINTK(qdev, INTR, ERR, "Enabling interrupt %d.\n",
590 intr);
591 ql_write32(qdev, INTR_EN,
592 qdev->intr_context[intr].intr_en_mask);
593 } else {
594 QPRINTK(qdev, INTR, ERR,
595 "Skip enable, other queue(s) are active.\n");
596 }
597 if (qdev->legacy_check)
598 spin_unlock(&qdev->legacy_lock);
599 } 594 }
595
596 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
597 if (atomic_dec_and_test(&ctx->irq_cnt)) {
598 ql_write32(qdev, INTR_EN,
599 ctx->intr_en_mask);
600 var = ql_read32(qdev, STS);
601 }
602 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
603 return var;
600} 604}
601 605
602static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr) 606static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
603{ 607{
604 u32 var = 0; 608 u32 var = 0;
609 unsigned long hw_flags;
610 struct intr_context *ctx;
605 611
606 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) 612 /* HW disables for us if we're MSIX multi interrupts and
607 goto exit; 613 * it's not the default (zeroeth) interrupt.
608 else if (!atomic_read(&qdev->intr_context[intr].irq_cnt)) { 614 */
615 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr))
616 return 0;
617
618 ctx = qdev->intr_context + intr;
619 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
620 if (!atomic_read(&ctx->irq_cnt)) {
609 ql_write32(qdev, INTR_EN, 621 ql_write32(qdev, INTR_EN,
610 qdev->intr_context[intr].intr_dis_mask); 622 ctx->intr_dis_mask);
611 var = ql_read32(qdev, STS); 623 var = ql_read32(qdev, STS);
612 } 624 }
613 atomic_inc(&qdev->intr_context[intr].irq_cnt); 625 atomic_inc(&ctx->irq_cnt);
614exit: 626 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
615 return var; 627 return var;
616} 628}
617 629
@@ -623,7 +635,9 @@ static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev)
623 * and enables only if the result is zero. 635 * and enables only if the result is zero.
624 * So we precharge it here. 636 * So we precharge it here.
625 */ 637 */
626 atomic_set(&qdev->intr_context[i].irq_cnt, 1); 638 if (unlikely(!test_bit(QL_MSIX_ENABLED, &qdev->flags) ||
639 i == 0))
640 atomic_set(&qdev->intr_context[i].irq_cnt, 1);
627 ql_enable_completion_interrupt(qdev, i); 641 ql_enable_completion_interrupt(qdev, i);
628 } 642 }
629 643
@@ -1725,19 +1739,6 @@ static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
1725 return IRQ_HANDLED; 1739 return IRQ_HANDLED;
1726} 1740}
1727 1741
1728/* We check here to see if we're already handling a legacy
1729 * interrupt. If we are, then it must belong to another
1730 * chip with which we're sharing the interrupt line.
1731 */
1732int ql_legacy_check(struct ql_adapter *qdev)
1733{
1734 int err;
1735 spin_lock(&qdev->legacy_lock);
1736 err = atomic_read(&qdev->intr_context[0].irq_cnt);
1737 spin_unlock(&qdev->legacy_lock);
1738 return err;
1739}
1740
1741/* This handles a fatal error, MPI activity, and the default 1742/* This handles a fatal error, MPI activity, and the default
1742 * rx_ring in an MSI-X multiple vector environment. 1743 * rx_ring in an MSI-X multiple vector environment.
1743 * In MSI/Legacy environment it also process the rest of 1744 * In MSI/Legacy environment it also process the rest of
@@ -1752,12 +1753,15 @@ static irqreturn_t qlge_isr(int irq, void *dev_id)
1752 int i; 1753 int i;
1753 int work_done = 0; 1754 int work_done = 0;
1754 1755
1755 if (qdev->legacy_check && qdev->legacy_check(qdev)) { 1756 spin_lock(&qdev->hw_lock);
1756 QPRINTK(qdev, INTR, INFO, "Already busy, not our interrupt.\n"); 1757 if (atomic_read(&qdev->intr_context[0].irq_cnt)) {
1757 return IRQ_NONE; /* Not our interrupt */ 1758 QPRINTK(qdev, INTR, DEBUG, "Shared Interrupt, Not ours!\n");
1759 spin_unlock(&qdev->hw_lock);
1760 return IRQ_NONE;
1758 } 1761 }
1762 spin_unlock(&qdev->hw_lock);
1759 1763
1760 var = ql_read32(qdev, STS); 1764 var = ql_disable_completion_interrupt(qdev, intr_context->intr);
1761 1765
1762 /* 1766 /*
1763 * Check for fatal error. 1767 * Check for fatal error.
@@ -1823,6 +1827,7 @@ static irqreturn_t qlge_isr(int irq, void *dev_id)
1823 } 1827 }
1824 } 1828 }
1825 } 1829 }
1830 ql_enable_completion_interrupt(qdev, intr_context->intr);
1826 return work_done ? IRQ_HANDLED : IRQ_NONE; 1831 return work_done ? IRQ_HANDLED : IRQ_NONE;
1827} 1832}
1828 1833
@@ -2701,8 +2706,6 @@ msi:
2701 } 2706 }
2702 } 2707 }
2703 irq_type = LEG_IRQ; 2708 irq_type = LEG_IRQ;
2704 spin_lock_init(&qdev->legacy_lock);
2705 qdev->legacy_check = ql_legacy_check;
2706 QPRINTK(qdev, IFUP, DEBUG, "Running with legacy interrupts.\n"); 2709 QPRINTK(qdev, IFUP, DEBUG, "Running with legacy interrupts.\n");
2707} 2710}
2708 2711