aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/qla3xxx.c
diff options
context:
space:
mode:
authorRon Mercer <ron.mercer@qlogic.com>2007-03-26 16:42:59 -0400
committerJeff Garzik <jeff@garzik.org>2007-03-28 02:18:50 -0400
commitf67cac0190623a3cde4d783c7c7205691aa02cc2 (patch)
treeb750742935b406de1505bfccdbd72a0cc291f6d6 /drivers/net/qla3xxx.c
parentb6967eb9cbf38643fc1b5432c36f610a9c565579 (diff)
qla3xxx: bugfix: Dropping interrupt under heavy network load.
Update the rx queue pointer when exiting NAPI poll rather than at the end of each iteration. Remove unnecessary PCI flushes that occurred after every write. Now write all regs and flush once. Signed-off-by: Ron Mercer <ron.mercer@qlogic.com> Signed-off-by: Jeff Garzik <jeff@garzik.org>
Diffstat (limited to 'drivers/net/qla3xxx.c')
-rwxr-xr-xdrivers/net/qla3xxx.c71
1 files changed, 30 insertions, 41 deletions
diff --git a/drivers/net/qla3xxx.c b/drivers/net/qla3xxx.c
index 9952e3931e34..6612936306d7 100755
--- a/drivers/net/qla3xxx.c
+++ b/drivers/net/qla3xxx.c
@@ -1691,6 +1691,27 @@ static int ql_populate_free_queue(struct ql3_adapter *qdev)
1691/* 1691/*
1692 * Caller holds hw_lock. 1692 * Caller holds hw_lock.
1693 */ 1693 */
1694static void ql_update_small_bufq_prod_index(struct ql3_adapter *qdev)
1695{
1696 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
1697 if (qdev->small_buf_release_cnt >= 16) {
1698 while (qdev->small_buf_release_cnt >= 16) {
1699 qdev->small_buf_q_producer_index++;
1700
1701 if (qdev->small_buf_q_producer_index ==
1702 NUM_SBUFQ_ENTRIES)
1703 qdev->small_buf_q_producer_index = 0;
1704 qdev->small_buf_release_cnt -= 8;
1705 }
1706 wmb();
1707 writel(qdev->small_buf_q_producer_index,
1708 &port_regs->CommonRegs.rxSmallQProducerIndex);
1709 }
1710}
1711
1712/*
1713 * Caller holds hw_lock.
1714 */
1694static void ql_update_lrg_bufq_prod_index(struct ql3_adapter *qdev) 1715static void ql_update_lrg_bufq_prod_index(struct ql3_adapter *qdev)
1695{ 1716{
1696 struct bufq_addr_element *lrg_buf_q_ele; 1717 struct bufq_addr_element *lrg_buf_q_ele;
@@ -1732,13 +1753,10 @@ static void ql_update_lrg_bufq_prod_index(struct ql3_adapter *qdev)
1732 lrg_buf_q_ele = qdev->lrg_buf_q_virt_addr; 1753 lrg_buf_q_ele = qdev->lrg_buf_q_virt_addr;
1733 } 1754 }
1734 } 1755 }
1735 1756 wmb();
1736 qdev->lrg_buf_next_free = lrg_buf_q_ele; 1757 qdev->lrg_buf_next_free = lrg_buf_q_ele;
1737 1758 writel(qdev->lrg_buf_q_producer_index,
1738 ql_write_common_reg(qdev, 1759 &port_regs->CommonRegs.rxLargeQProducerIndex);
1739 &port_regs->CommonRegs.
1740 rxLargeQProducerIndex,
1741 qdev->lrg_buf_q_producer_index);
1742 } 1760 }
1743} 1761}
1744 1762
@@ -1944,16 +1962,12 @@ static void ql_process_macip_rx_intr(struct ql3_adapter *qdev,
1944static int ql_tx_rx_clean(struct ql3_adapter *qdev, 1962static int ql_tx_rx_clean(struct ql3_adapter *qdev,
1945 int *tx_cleaned, int *rx_cleaned, int work_to_do) 1963 int *tx_cleaned, int *rx_cleaned, int work_to_do)
1946{ 1964{
1947 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
1948 struct net_rsp_iocb *net_rsp; 1965 struct net_rsp_iocb *net_rsp;
1949 struct net_device *ndev = qdev->ndev; 1966 struct net_device *ndev = qdev->ndev;
1950 unsigned long hw_flags;
1951 int work_done = 0; 1967 int work_done = 0;
1952 1968
1953 u32 rsp_producer_index = le32_to_cpu(*(qdev->prsp_producer_index));
1954
1955 /* While there are entries in the completion queue. */ 1969 /* While there are entries in the completion queue. */
1956 while ((rsp_producer_index != 1970 while ((le32_to_cpu(*(qdev->prsp_producer_index)) !=
1957 qdev->rsp_consumer_index) && (work_done < work_to_do)) { 1971 qdev->rsp_consumer_index) && (work_done < work_to_do)) {
1958 1972
1959 net_rsp = qdev->rsp_current; 1973 net_rsp = qdev->rsp_current;
@@ -2009,33 +2023,7 @@ static int ql_tx_rx_clean(struct ql3_adapter *qdev,
2009 work_done = *tx_cleaned + *rx_cleaned; 2023 work_done = *tx_cleaned + *rx_cleaned;
2010 } 2024 }
2011 2025
2012 if(work_done) { 2026 return work_done;
2013 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
2014
2015 ql_update_lrg_bufq_prod_index(qdev);
2016
2017 if (qdev->small_buf_release_cnt >= 16) {
2018 while (qdev->small_buf_release_cnt >= 16) {
2019 qdev->small_buf_q_producer_index++;
2020
2021 if (qdev->small_buf_q_producer_index ==
2022 NUM_SBUFQ_ENTRIES)
2023 qdev->small_buf_q_producer_index = 0;
2024 qdev->small_buf_release_cnt -= 8;
2025 }
2026
2027 wmb();
2028 ql_write_common_reg(qdev,
2029 &port_regs->CommonRegs.
2030 rxSmallQProducerIndex,
2031 qdev->small_buf_q_producer_index);
2032
2033 }
2034
2035 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
2036 }
2037
2038 return *tx_cleaned + *rx_cleaned;
2039} 2027}
2040 2028
2041static int ql_poll(struct net_device *ndev, int *budget) 2029static int ql_poll(struct net_device *ndev, int *budget)
@@ -2059,9 +2047,10 @@ quit_polling:
2059 netif_rx_complete(ndev); 2047 netif_rx_complete(ndev);
2060 2048
2061 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 2049 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
2062 ql_write_common_reg(qdev, 2050 ql_update_small_bufq_prod_index(qdev);
2063 &port_regs->CommonRegs.rspQConsumerIndex, 2051 ql_update_lrg_bufq_prod_index(qdev);
2064 qdev->rsp_consumer_index); 2052 writel(qdev->rsp_consumer_index,
2053 &port_regs->CommonRegs.rspQConsumerIndex);
2065 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 2054 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
2066 2055
2067 ql_enable_interrupts(qdev); 2056 ql_enable_interrupts(qdev);