diff options
author | Ron Mercer <ron.mercer@qlogic.com> | 2007-02-26 14:06:41 -0500 |
---|---|---|
committer | Jeff Garzik <jeff@garzik.org> | 2007-02-27 04:21:44 -0500 |
commit | 63b66d12de57d8455615d9f619e18824137ed547 (patch) | |
tree | 2230d079a80369c6e3587f63042250399ee70852 /drivers | |
parent | 0f8ab89e825f8c9f1c84c558ad7e2e4006aee0d3 (diff) |
qla3xxx: bugfix tx reset after stress conditions.
To Reproduce the Problem:
To reproduce this panic consistently, we run an intensive network
application like 'netperf' and then switch to a different console.
After waiting for a couple of seconds, you will see a tx reset has occured.
Reason:
We enable interrupts even if we were not running.
Solution:
Now we will enable interrupts only after we are ready to give up the poll
routine.
Signed-off-by: Ron Mercer <ron.mercer@qlogic.com>
Signed-off-by: Jeff Garzik <jeff@garzik.org>
Diffstat (limited to 'drivers')
-rwxr-xr-x | drivers/net/qla3xxx.c | 69 |
1 files changed, 41 insertions, 28 deletions
diff --git a/drivers/net/qla3xxx.c b/drivers/net/qla3xxx.c index 7a2f01a22cbf..5bf446f7be15 100755 --- a/drivers/net/qla3xxx.c +++ b/drivers/net/qla3xxx.c | |||
@@ -1921,10 +1921,11 @@ static int ql_tx_rx_clean(struct ql3_adapter *qdev, | |||
1921 | struct net_rsp_iocb *net_rsp; | 1921 | struct net_rsp_iocb *net_rsp; |
1922 | struct net_device *ndev = qdev->ndev; | 1922 | struct net_device *ndev = qdev->ndev; |
1923 | unsigned long hw_flags; | 1923 | unsigned long hw_flags; |
1924 | int work_done = 0; | ||
1924 | 1925 | ||
1925 | /* While there are entries in the completion queue. */ | 1926 | /* While there are entries in the completion queue. */ |
1926 | while ((cpu_to_le32(*(qdev->prsp_producer_index)) != | 1927 | while ((cpu_to_le32(*(qdev->prsp_producer_index)) != |
1927 | qdev->rsp_consumer_index) && (*rx_cleaned < work_to_do)) { | 1928 | qdev->rsp_consumer_index) && (work_done < work_to_do)) { |
1928 | 1929 | ||
1929 | net_rsp = qdev->rsp_current; | 1930 | net_rsp = qdev->rsp_current; |
1930 | switch (net_rsp->opcode) { | 1931 | switch (net_rsp->opcode) { |
@@ -1975,37 +1976,41 @@ static int ql_tx_rx_clean(struct ql3_adapter *qdev, | |||
1975 | } else { | 1976 | } else { |
1976 | qdev->rsp_current++; | 1977 | qdev->rsp_current++; |
1977 | } | 1978 | } |
1979 | |||
1980 | work_done = *tx_cleaned + *rx_cleaned; | ||
1978 | } | 1981 | } |
1979 | 1982 | ||
1980 | spin_lock_irqsave(&qdev->hw_lock, hw_flags); | 1983 | if(work_done) { |
1984 | spin_lock_irqsave(&qdev->hw_lock, hw_flags); | ||
1981 | 1985 | ||
1982 | ql_update_lrg_bufq_prod_index(qdev); | 1986 | ql_update_lrg_bufq_prod_index(qdev); |
1983 | 1987 | ||
1984 | if (qdev->small_buf_release_cnt >= 16) { | 1988 | if (qdev->small_buf_release_cnt >= 16) { |
1985 | while (qdev->small_buf_release_cnt >= 16) { | 1989 | while (qdev->small_buf_release_cnt >= 16) { |
1986 | qdev->small_buf_q_producer_index++; | 1990 | qdev->small_buf_q_producer_index++; |
1987 | 1991 | ||
1988 | if (qdev->small_buf_q_producer_index == | 1992 | if (qdev->small_buf_q_producer_index == |
1989 | NUM_SBUFQ_ENTRIES) | 1993 | NUM_SBUFQ_ENTRIES) |
1990 | qdev->small_buf_q_producer_index = 0; | 1994 | qdev->small_buf_q_producer_index = 0; |
1991 | qdev->small_buf_release_cnt -= 8; | 1995 | qdev->small_buf_release_cnt -= 8; |
1992 | } | 1996 | } |
1993 | 1997 | ||
1994 | ql_write_common_reg(qdev, | 1998 | wmb(); |
1995 | &port_regs->CommonRegs. | 1999 | ql_write_common_reg(qdev, |
1996 | rxSmallQProducerIndex, | 2000 | &port_regs->CommonRegs. |
1997 | qdev->small_buf_q_producer_index); | 2001 | rxSmallQProducerIndex, |
1998 | } | 2002 | qdev->small_buf_q_producer_index); |
1999 | 2003 | ||
2000 | ql_write_common_reg(qdev, | 2004 | } |
2001 | &port_regs->CommonRegs.rspQConsumerIndex, | ||
2002 | qdev->rsp_consumer_index); | ||
2003 | spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); | ||
2004 | 2005 | ||
2005 | if (unlikely(netif_queue_stopped(qdev->ndev))) { | 2006 | spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); |
2006 | if (netif_queue_stopped(qdev->ndev) && | 2007 | |
2007 | (atomic_read(&qdev->tx_count) > (NUM_REQ_Q_ENTRIES / 4))) | 2008 | if (unlikely(netif_queue_stopped(qdev->ndev))) { |
2008 | netif_wake_queue(qdev->ndev); | 2009 | if (netif_queue_stopped(qdev->ndev) && |
2010 | (atomic_read(&qdev->tx_count) > | ||
2011 | (NUM_REQ_Q_ENTRIES / 4))) | ||
2012 | netif_wake_queue(qdev->ndev); | ||
2013 | } | ||
2009 | } | 2014 | } |
2010 | 2015 | ||
2011 | return *tx_cleaned + *rx_cleaned; | 2016 | return *tx_cleaned + *rx_cleaned; |
@@ -2016,6 +2021,8 @@ static int ql_poll(struct net_device *ndev, int *budget) | |||
2016 | struct ql3_adapter *qdev = netdev_priv(ndev); | 2021 | struct ql3_adapter *qdev = netdev_priv(ndev); |
2017 | int work_to_do = min(*budget, ndev->quota); | 2022 | int work_to_do = min(*budget, ndev->quota); |
2018 | int rx_cleaned = 0, tx_cleaned = 0; | 2023 | int rx_cleaned = 0, tx_cleaned = 0; |
2024 | unsigned long hw_flags; | ||
2025 | struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; | ||
2019 | 2026 | ||
2020 | if (!netif_carrier_ok(ndev)) | 2027 | if (!netif_carrier_ok(ndev)) |
2021 | goto quit_polling; | 2028 | goto quit_polling; |
@@ -2027,6 +2034,13 @@ static int ql_poll(struct net_device *ndev, int *budget) | |||
2027 | if ((!tx_cleaned && !rx_cleaned) || !netif_running(ndev)) { | 2034 | if ((!tx_cleaned && !rx_cleaned) || !netif_running(ndev)) { |
2028 | quit_polling: | 2035 | quit_polling: |
2029 | netif_rx_complete(ndev); | 2036 | netif_rx_complete(ndev); |
2037 | |||
2038 | spin_lock_irqsave(&qdev->hw_lock, hw_flags); | ||
2039 | ql_write_common_reg(qdev, | ||
2040 | &port_regs->CommonRegs.rspQConsumerIndex, | ||
2041 | qdev->rsp_consumer_index); | ||
2042 | spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); | ||
2043 | |||
2030 | ql_enable_interrupts(qdev); | 2044 | ql_enable_interrupts(qdev); |
2031 | return 0; | 2045 | return 0; |
2032 | } | 2046 | } |
@@ -2079,11 +2093,10 @@ static irqreturn_t ql3xxx_isr(int irq, void *dev_id) | |||
2079 | queue_delayed_work(qdev->workqueue, &qdev->reset_work, 0); | 2093 | queue_delayed_work(qdev->workqueue, &qdev->reset_work, 0); |
2080 | spin_unlock(&qdev->adapter_lock); | 2094 | spin_unlock(&qdev->adapter_lock); |
2081 | } else if (value & ISP_IMR_DISABLE_CMPL_INT) { | 2095 | } else if (value & ISP_IMR_DISABLE_CMPL_INT) { |
2082 | ql_disable_interrupts(qdev); | 2096 | if (likely(netif_rx_schedule_prep(ndev))) { |
2083 | if (likely(netif_rx_schedule_prep(ndev))) | 2097 | ql_disable_interrupts(qdev); |
2084 | __netif_rx_schedule(ndev); | 2098 | __netif_rx_schedule(ndev); |
2085 | else | 2099 | } |
2086 | ql_enable_interrupts(qdev); | ||
2087 | } else { | 2100 | } else { |
2088 | return IRQ_NONE; | 2101 | return IRQ_NONE; |
2089 | } | 2102 | } |