aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/netxen/netxen_nic_init.c
diff options
context:
space:
mode:
authorDhananjay Phadke <dhananjay@netxen.com>2009-06-17 13:27:25 -0400
committerDavid S. Miller <davem@davemloft.net>2009-06-18 03:46:14 -0400
commitcb2107be43d2fc5eadec58b92b54bf32c00bfff3 (patch)
treedd04fda143a4a7d71b32b79b508a0566450c22c6 /drivers/net/netxen/netxen_nic_init.c
parentd173346040af497af22818dfc48cff369e67cf50 (diff)
netxen: fix tx ring accounting
This forces every update of tx ring producer to check for availability of space for next full TSO command. Earlier firmware control commands didn't care to pause tx queue. Stop the tx queue if there's not enough space to transmit one full LSO command left on the tx ring after current transmit. This avoids returning NETDEV_TX_BUSY after checking distance between producer and consumer on every cpu. Restart the tx queue only if we have cleaned up enough tx descriptors. Signed-off-by: Dhananjay Phadke <dhananjay@netxen.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/netxen/netxen_nic_init.c')
-rw-r--r--drivers/net/netxen/netxen_nic_init.c11
1 files changed, 5 insertions, 6 deletions
diff --git a/drivers/net/netxen/netxen_nic_init.c b/drivers/net/netxen/netxen_nic_init.c
index 6f77ad58e3b3..bdb143d2b5c7 100644
--- a/drivers/net/netxen/netxen_nic_init.c
+++ b/drivers/net/netxen/netxen_nic_init.c
@@ -1292,7 +1292,6 @@ int netxen_process_cmd_ring(struct netxen_adapter *adapter)
1292 return 1; 1292 return 1;
1293 1293
1294 sw_consumer = tx_ring->sw_consumer; 1294 sw_consumer = tx_ring->sw_consumer;
1295 barrier(); /* hw_consumer can change underneath */
1296 hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer)); 1295 hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
1297 1296
1298 while (sw_consumer != hw_consumer) { 1297 while (sw_consumer != hw_consumer) {
@@ -1319,14 +1318,15 @@ int netxen_process_cmd_ring(struct netxen_adapter *adapter)
1319 break; 1318 break;
1320 } 1319 }
1321 1320
1322 tx_ring->sw_consumer = sw_consumer;
1323
1324 if (count && netif_running(netdev)) { 1321 if (count && netif_running(netdev)) {
1322 tx_ring->sw_consumer = sw_consumer;
1323
1325 smp_mb(); 1324 smp_mb();
1325
1326 if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev)) { 1326 if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev)) {
1327 netif_tx_lock(netdev); 1327 netif_tx_lock(netdev);
1328 netif_wake_queue(netdev); 1328 if (netxen_tx_avail(tx_ring) > TX_STOP_THRESH)
1329 smp_mb(); 1329 netif_wake_queue(netdev);
1330 netif_tx_unlock(netdev); 1330 netif_tx_unlock(netdev);
1331 } 1331 }
1332 } 1332 }
@@ -1343,7 +1343,6 @@ int netxen_process_cmd_ring(struct netxen_adapter *adapter)
1343 * There is still a possible race condition and the host could miss an 1343 * There is still a possible race condition and the host could miss an
1344 * interrupt. The card has to take care of this. 1344 * interrupt. The card has to take care of this.
1345 */ 1345 */
1346 barrier(); /* hw_consumer can change underneath */
1347 hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer)); 1346 hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
1348 done = (sw_consumer == hw_consumer); 1347 done = (sw_consumer == hw_consumer);
1349 spin_unlock(&adapter->tx_clean_lock); 1348 spin_unlock(&adapter->tx_clean_lock);