diff options
author | Shahed Shaikh <shahed.shaikh@qlogic.com> | 2014-01-03 01:34:28 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2014-01-03 20:44:11 -0500 |
commit | a02bdd423d844f5beb3196922f07c85c2f7691b8 (patch) | |
tree | 0384664864512f1e3ef204c7edf8e7e1a5566ef9 /drivers/net | |
parent | 0d68fc4f1210f8caea2bdd68f99dc6da35ee3740 (diff) |
qlcnic: Fix bug in Tx completion path
o Driver is using common tx_clean_lock for all Tx queues. This patch
adds per queue tx_clean_lock.
o Driver is not updating sw_consumer while processing Tx completion
when interface is going down. Fixed in this patch.
Signed-off-by: Shahed Shaikh <shahed.shaikh@qlogic.com>
Signed-off-by: Manish Chopra <manish.chopra@qlogic.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net')
-rw-r--r-- | drivers/net/ethernet/qlogic/qlcnic/qlcnic.h | 3 | ||||
-rw-r--r-- | drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c | 4 | ||||
-rw-r--r-- | drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c | 8 | ||||
-rw-r--r-- | drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c | 4 |
4 files changed, 12 insertions, 7 deletions
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h index 4dfef81e3c45..ff80cd8f6d2b 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h | |||
@@ -579,6 +579,8 @@ struct qlcnic_host_tx_ring { | |||
579 | dma_addr_t phys_addr; | 579 | dma_addr_t phys_addr; |
580 | dma_addr_t hw_cons_phys_addr; | 580 | dma_addr_t hw_cons_phys_addr; |
581 | struct netdev_queue *txq; | 581 | struct netdev_queue *txq; |
582 | /* Lock to protect Tx descriptors cleanup */ | ||
583 | spinlock_t tx_clean_lock; | ||
582 | } ____cacheline_internodealigned_in_smp; | 584 | } ____cacheline_internodealigned_in_smp; |
583 | 585 | ||
584 | /* | 586 | /* |
@@ -1095,7 +1097,6 @@ struct qlcnic_adapter { | |||
1095 | struct qlcnic_filter_hash rx_fhash; | 1097 | struct qlcnic_filter_hash rx_fhash; |
1096 | struct list_head vf_mc_list; | 1098 | struct list_head vf_mc_list; |
1097 | 1099 | ||
1098 | spinlock_t tx_clean_lock; | ||
1099 | spinlock_t mac_learn_lock; | 1100 | spinlock_t mac_learn_lock; |
1100 | /* spinlock for catching rcv filters for eswitch traffic */ | 1101 | /* spinlock for catching rcv filters for eswitch traffic */ |
1101 | spinlock_t rx_mac_learn_lock; | 1102 | spinlock_t rx_mac_learn_lock; |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c index e9c21e5d0ca9..c4262c23ed7c 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c | |||
@@ -134,6 +134,8 @@ void qlcnic_release_tx_buffers(struct qlcnic_adapter *adapter, | |||
134 | struct qlcnic_skb_frag *buffrag; | 134 | struct qlcnic_skb_frag *buffrag; |
135 | int i, j; | 135 | int i, j; |
136 | 136 | ||
137 | spin_lock(&tx_ring->tx_clean_lock); | ||
138 | |||
137 | cmd_buf = tx_ring->cmd_buf_arr; | 139 | cmd_buf = tx_ring->cmd_buf_arr; |
138 | for (i = 0; i < tx_ring->num_desc; i++) { | 140 | for (i = 0; i < tx_ring->num_desc; i++) { |
139 | buffrag = cmd_buf->frag_array; | 141 | buffrag = cmd_buf->frag_array; |
@@ -157,6 +159,8 @@ void qlcnic_release_tx_buffers(struct qlcnic_adapter *adapter, | |||
157 | } | 159 | } |
158 | cmd_buf++; | 160 | cmd_buf++; |
159 | } | 161 | } |
162 | |||
163 | spin_unlock(&tx_ring->tx_clean_lock); | ||
160 | } | 164 | } |
161 | 165 | ||
162 | void qlcnic_free_sw_resources(struct qlcnic_adapter *adapter) | 166 | void qlcnic_free_sw_resources(struct qlcnic_adapter *adapter) |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c index 136297631999..ad1531ae3aa8 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c | |||
@@ -782,7 +782,7 @@ static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter, | |||
782 | struct net_device *netdev = adapter->netdev; | 782 | struct net_device *netdev = adapter->netdev; |
783 | struct qlcnic_skb_frag *frag; | 783 | struct qlcnic_skb_frag *frag; |
784 | 784 | ||
785 | if (!spin_trylock(&adapter->tx_clean_lock)) | 785 | if (!spin_trylock(&tx_ring->tx_clean_lock)) |
786 | return 1; | 786 | return 1; |
787 | 787 | ||
788 | sw_consumer = tx_ring->sw_consumer; | 788 | sw_consumer = tx_ring->sw_consumer; |
@@ -811,8 +811,9 @@ static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter, | |||
811 | break; | 811 | break; |
812 | } | 812 | } |
813 | 813 | ||
814 | tx_ring->sw_consumer = sw_consumer; | ||
815 | |||
814 | if (count && netif_running(netdev)) { | 816 | if (count && netif_running(netdev)) { |
815 | tx_ring->sw_consumer = sw_consumer; | ||
816 | smp_mb(); | 817 | smp_mb(); |
817 | if (netif_tx_queue_stopped(tx_ring->txq) && | 818 | if (netif_tx_queue_stopped(tx_ring->txq) && |
818 | netif_carrier_ok(netdev)) { | 819 | netif_carrier_ok(netdev)) { |
@@ -838,7 +839,8 @@ static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter, | |||
838 | */ | 839 | */ |
839 | hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer)); | 840 | hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer)); |
840 | done = (sw_consumer == hw_consumer); | 841 | done = (sw_consumer == hw_consumer); |
841 | spin_unlock(&adapter->tx_clean_lock); | 842 | |
843 | spin_unlock(&tx_ring->tx_clean_lock); | ||
842 | 844 | ||
843 | return done; | 845 | return done; |
844 | } | 846 | } |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c index 2c8cac0c6a55..b8a245a79de3 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c | |||
@@ -1756,7 +1756,6 @@ void __qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev) | |||
1756 | if (qlcnic_sriov_vf_check(adapter)) | 1756 | if (qlcnic_sriov_vf_check(adapter)) |
1757 | qlcnic_sriov_cleanup_async_list(&adapter->ahw->sriov->bc); | 1757 | qlcnic_sriov_cleanup_async_list(&adapter->ahw->sriov->bc); |
1758 | smp_mb(); | 1758 | smp_mb(); |
1759 | spin_lock(&adapter->tx_clean_lock); | ||
1760 | netif_carrier_off(netdev); | 1759 | netif_carrier_off(netdev); |
1761 | adapter->ahw->linkup = 0; | 1760 | adapter->ahw->linkup = 0; |
1762 | netif_tx_disable(netdev); | 1761 | netif_tx_disable(netdev); |
@@ -1777,7 +1776,6 @@ void __qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev) | |||
1777 | 1776 | ||
1778 | for (ring = 0; ring < adapter->drv_tx_rings; ring++) | 1777 | for (ring = 0; ring < adapter->drv_tx_rings; ring++) |
1779 | qlcnic_release_tx_buffers(adapter, &adapter->tx_ring[ring]); | 1778 | qlcnic_release_tx_buffers(adapter, &adapter->tx_ring[ring]); |
1780 | spin_unlock(&adapter->tx_clean_lock); | ||
1781 | } | 1779 | } |
1782 | 1780 | ||
1783 | /* Usage: During suspend and firmware recovery module */ | 1781 | /* Usage: During suspend and firmware recovery module */ |
@@ -2172,6 +2170,7 @@ int qlcnic_alloc_tx_rings(struct qlcnic_adapter *adapter, | |||
2172 | } | 2170 | } |
2173 | memset(cmd_buf_arr, 0, TX_BUFF_RINGSIZE(tx_ring)); | 2171 | memset(cmd_buf_arr, 0, TX_BUFF_RINGSIZE(tx_ring)); |
2174 | tx_ring->cmd_buf_arr = cmd_buf_arr; | 2172 | tx_ring->cmd_buf_arr = cmd_buf_arr; |
2173 | spin_lock_init(&tx_ring->tx_clean_lock); | ||
2175 | } | 2174 | } |
2176 | 2175 | ||
2177 | if (qlcnic_83xx_check(adapter) || | 2176 | if (qlcnic_83xx_check(adapter) || |
@@ -2299,7 +2298,6 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
2299 | rwlock_init(&adapter->ahw->crb_lock); | 2298 | rwlock_init(&adapter->ahw->crb_lock); |
2300 | mutex_init(&adapter->ahw->mem_lock); | 2299 | mutex_init(&adapter->ahw->mem_lock); |
2301 | 2300 | ||
2302 | spin_lock_init(&adapter->tx_clean_lock); | ||
2303 | INIT_LIST_HEAD(&adapter->mac_list); | 2301 | INIT_LIST_HEAD(&adapter->mac_list); |
2304 | 2302 | ||
2305 | qlcnic_register_dcb(adapter); | 2303 | qlcnic_register_dcb(adapter); |