aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorRajesh K Borundia <rajesh.borundia@qlogic.com>2010-06-16 22:56:41 -0400
committerDavid S. Miller <davem@davemloft.net>2010-06-17 11:57:56 -0400
commitef71ff833acfd3795c3af1bb800ac186561508ef (patch)
treefa285765677574d4aafb42c9fbbc32443a8e4597 /drivers
parent8f891387aa73b85d2ea8d953e04dac224f687e52 (diff)
qlcnic: fix race in tx stop queue
There is a race between netif_stop_queue and netif_stopped_queue check. So check once again if buffers are available to avoid race. With above logic we can also get rid of tx lock in process_cmd_ring. Signed-off-by: Rajesh K Borundia <rajesh.borundia@qlogic.com> Signed-off-by: Amit Kumar Salecha <amit.salecha@qlogic.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/net/qlcnic/qlcnic.h8
-rw-r--r--drivers/net/qlcnic/qlcnic_hw.c12
-rw-r--r--drivers/net/qlcnic/qlcnic_init.c2
-rw-r--r--drivers/net/qlcnic/qlcnic_main.c23
4 files changed, 26 insertions, 19 deletions
diff --git a/drivers/net/qlcnic/qlcnic.h b/drivers/net/qlcnic/qlcnic.h
index 9970cff598d1..99ccdd8ac419 100644
--- a/drivers/net/qlcnic/qlcnic.h
+++ b/drivers/net/qlcnic/qlcnic.h
@@ -113,8 +113,10 @@
113#define TX_UDPV6_PKT 0x0c 113#define TX_UDPV6_PKT 0x0c
114 114
115/* Tx defines */ 115/* Tx defines */
116#define MAX_BUFFERS_PER_CMD 32 116#define MAX_TSO_HEADER_DESC 2
117#define TX_STOP_THRESH ((MAX_SKB_FRAGS >> 2) + 4) 117#define MGMT_CMD_DESC_RESV 4
118#define TX_STOP_THRESH ((MAX_SKB_FRAGS >> 2) + MAX_TSO_HEADER_DESC \
119 + MGMT_CMD_DESC_RESV)
118#define QLCNIC_MAX_TX_TIMEOUTS 2 120#define QLCNIC_MAX_TX_TIMEOUTS 2
119 121
120/* 122/*
@@ -369,7 +371,7 @@ struct qlcnic_recv_crb {
369 */ 371 */
370struct qlcnic_cmd_buffer { 372struct qlcnic_cmd_buffer {
371 struct sk_buff *skb; 373 struct sk_buff *skb;
372 struct qlcnic_skb_frag frag_array[MAX_BUFFERS_PER_CMD + 1]; 374 struct qlcnic_skb_frag frag_array[MAX_SKB_FRAGS + 1];
373 u32 frag_count; 375 u32 frag_count;
374}; 376};
375 377
diff --git a/drivers/net/qlcnic/qlcnic_hw.c b/drivers/net/qlcnic/qlcnic_hw.c
index f776956d2d6c..d9becb96d403 100644
--- a/drivers/net/qlcnic/qlcnic_hw.c
+++ b/drivers/net/qlcnic/qlcnic_hw.c
@@ -338,9 +338,15 @@ qlcnic_send_cmd_descs(struct qlcnic_adapter *adapter,
338 338
339 if (nr_desc >= qlcnic_tx_avail(tx_ring)) { 339 if (nr_desc >= qlcnic_tx_avail(tx_ring)) {
340 netif_tx_stop_queue(tx_ring->txq); 340 netif_tx_stop_queue(tx_ring->txq);
341 __netif_tx_unlock_bh(tx_ring->txq); 341 smp_mb();
342 adapter->stats.xmit_off++; 342 if (qlcnic_tx_avail(tx_ring) > nr_desc) {
343 return -EBUSY; 343 if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH)
344 netif_tx_wake_queue(tx_ring->txq);
345 } else {
346 adapter->stats.xmit_off++;
347 __netif_tx_unlock_bh(tx_ring->txq);
348 return -EBUSY;
349 }
344 } 350 }
345 351
346 do { 352 do {
diff --git a/drivers/net/qlcnic/qlcnic_init.c b/drivers/net/qlcnic/qlcnic_init.c
index 2bd00d54dd3f..058ce61501c3 100644
--- a/drivers/net/qlcnic/qlcnic_init.c
+++ b/drivers/net/qlcnic/qlcnic_init.c
@@ -181,7 +181,9 @@ skip_rds:
181 181
182 tx_ring = adapter->tx_ring; 182 tx_ring = adapter->tx_ring;
183 vfree(tx_ring->cmd_buf_arr); 183 vfree(tx_ring->cmd_buf_arr);
184 tx_ring->cmd_buf_arr = NULL;
184 kfree(adapter->tx_ring); 185 kfree(adapter->tx_ring);
186 adapter->tx_ring = NULL;
185} 187}
186 188
187int qlcnic_alloc_sw_resources(struct qlcnic_adapter *adapter) 189int qlcnic_alloc_sw_resources(struct qlcnic_adapter *adapter)
diff --git a/drivers/net/qlcnic/qlcnic_main.c b/drivers/net/qlcnic/qlcnic_main.c
index 06d2dfd646fe..655bccd7f8f4 100644
--- a/drivers/net/qlcnic/qlcnic_main.c
+++ b/drivers/net/qlcnic/qlcnic_main.c
@@ -132,12 +132,6 @@ qlcnic_update_cmd_producer(struct qlcnic_adapter *adapter,
132 struct qlcnic_host_tx_ring *tx_ring) 132 struct qlcnic_host_tx_ring *tx_ring)
133{ 133{
134 writel(tx_ring->producer, tx_ring->crb_cmd_producer); 134 writel(tx_ring->producer, tx_ring->crb_cmd_producer);
135
136 if (qlcnic_tx_avail(tx_ring) <= TX_STOP_THRESH) {
137 netif_stop_queue(adapter->netdev);
138 smp_mb();
139 adapter->stats.xmit_off++;
140 }
141} 135}
142 136
143static const u32 msi_tgt_status[8] = { 137static const u32 msi_tgt_status[8] = {
@@ -1137,7 +1131,7 @@ qlcnic_setup_netdev(struct qlcnic_adapter *adapter,
1137 adapter->max_mc_count = 38; 1131 adapter->max_mc_count = 38;
1138 1132
1139 netdev->netdev_ops = &qlcnic_netdev_ops; 1133 netdev->netdev_ops = &qlcnic_netdev_ops;
1140 netdev->watchdog_timeo = 2*HZ; 1134 netdev->watchdog_timeo = 5*HZ;
1141 1135
1142 qlcnic_change_mtu(netdev, netdev->mtu); 1136 qlcnic_change_mtu(netdev, netdev->mtu);
1143 1137
@@ -1709,10 +1703,15 @@ qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1709 /* 4 fragments per cmd des */ 1703 /* 4 fragments per cmd des */
1710 no_of_desc = (frag_count + 3) >> 2; 1704 no_of_desc = (frag_count + 3) >> 2;
1711 1705
1712 if (unlikely(no_of_desc + 2 > qlcnic_tx_avail(tx_ring))) { 1706 if (unlikely(qlcnic_tx_avail(tx_ring) <= TX_STOP_THRESH)) {
1713 netif_stop_queue(netdev); 1707 netif_stop_queue(netdev);
1714 adapter->stats.xmit_off++; 1708 smp_mb();
1715 return NETDEV_TX_BUSY; 1709 if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH)
1710 netif_start_queue(netdev);
1711 else {
1712 adapter->stats.xmit_off++;
1713 return NETDEV_TX_BUSY;
1714 }
1716 } 1715 }
1717 1716
1718 producer = tx_ring->producer; 1717 producer = tx_ring->producer;
@@ -2018,14 +2017,12 @@ static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter)
2018 smp_mb(); 2017 smp_mb();
2019 2018
2020 if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev)) { 2019 if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev)) {
2021 __netif_tx_lock(tx_ring->txq, smp_processor_id());
2022 if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) { 2020 if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) {
2023 netif_wake_queue(netdev); 2021 netif_wake_queue(netdev);
2024 adapter->tx_timeo_cnt = 0;
2025 adapter->stats.xmit_on++; 2022 adapter->stats.xmit_on++;
2026 } 2023 }
2027 __netif_tx_unlock(tx_ring->txq);
2028 } 2024 }
2025 adapter->tx_timeo_cnt = 0;
2029 } 2026 }
2030 /* 2027 /*
2031 * If everything is freed up to consumer then check if the ring is full 2028 * If everything is freed up to consumer then check if the ring is full