diff options
author | Jitendra Kalsaria <jitendra.kalsaria@qlogic.com> | 2012-07-10 10:57:32 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2012-07-11 02:28:33 -0400 |
commit | d0de73096e6da4f71cc83623930edcc389a44ca6 (patch) | |
tree | b22891ffd7d457b5508dbd578dff698a71c55a05 | |
parent | 41812db8e2111abbebff4fccffecab1fc1eb090c (diff) |
qlge: Cleanup atomic queue threshold check.
Signed-off-by: Jitendra Kalsaria <jitendra.kalsaria@qlogic.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | drivers/net/ethernet/qlogic/qlge/qlge.h | 1 | ||||
-rw-r--r-- | drivers/net/ethernet/qlogic/qlge/qlge_main.c | 5 |
2 files changed, 1 insertions, 5 deletions
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge.h b/drivers/net/ethernet/qlogic/qlge/qlge.h index 5a639df33f18..6e7050c7127d 100644 --- a/drivers/net/ethernet/qlogic/qlge/qlge.h +++ b/drivers/net/ethernet/qlogic/qlge/qlge.h | |||
@@ -1397,7 +1397,6 @@ struct tx_ring { | |||
1397 | struct tx_ring_desc *q; /* descriptor list for the queue */ | 1397 | struct tx_ring_desc *q; /* descriptor list for the queue */ |
1398 | spinlock_t lock; | 1398 | spinlock_t lock; |
1399 | atomic_t tx_count; /* counts down for every outstanding IO */ | 1399 | atomic_t tx_count; /* counts down for every outstanding IO */ |
1400 | atomic_t queue_stopped; /* Turns queue off when full. */ | ||
1401 | struct delayed_work tx_work; | 1400 | struct delayed_work tx_work; |
1402 | struct ql_adapter *qdev; | 1401 | struct ql_adapter *qdev; |
1403 | u64 tx_packets; | 1402 | u64 tx_packets; |
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c index fb86f06e8f1e..5cfba6aa1b92 100644 --- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c +++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c | |||
@@ -2171,8 +2171,7 @@ static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring) | |||
2171 | ql_write_cq_idx(rx_ring); | 2171 | ql_write_cq_idx(rx_ring); |
2172 | tx_ring = &qdev->tx_ring[net_rsp->txq_idx]; | 2172 | tx_ring = &qdev->tx_ring[net_rsp->txq_idx]; |
2173 | if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id)) { | 2173 | if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id)) { |
2174 | if (atomic_read(&tx_ring->queue_stopped) && | 2174 | if ((atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4))) |
2175 | (atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4))) | ||
2176 | /* | 2175 | /* |
2177 | * The queue got stopped because the tx_ring was full. | 2176 | * The queue got stopped because the tx_ring was full. |
2178 | * Wake it up, because it's now at least 25% empty. | 2177 | * Wake it up, because it's now at least 25% empty. |
@@ -2559,7 +2558,6 @@ static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev) | |||
2559 | "%s: BUG! shutting down tx queue %d due to lack of resources.\n", | 2558 | "%s: BUG! shutting down tx queue %d due to lack of resources.\n", |
2560 | __func__, tx_ring_idx); | 2559 | __func__, tx_ring_idx); |
2561 | netif_stop_subqueue(ndev, tx_ring->wq_id); | 2560 | netif_stop_subqueue(ndev, tx_ring->wq_id); |
2562 | atomic_inc(&tx_ring->queue_stopped); | ||
2563 | tx_ring->tx_errors++; | 2561 | tx_ring->tx_errors++; |
2564 | return NETDEV_TX_BUSY; | 2562 | return NETDEV_TX_BUSY; |
2565 | } | 2563 | } |
@@ -2688,7 +2686,6 @@ static void ql_init_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring) | |||
2688 | tx_ring_desc++; | 2686 | tx_ring_desc++; |
2689 | } | 2687 | } |
2690 | atomic_set(&tx_ring->tx_count, tx_ring->wq_len); | 2688 | atomic_set(&tx_ring->tx_count, tx_ring->wq_len); |
2691 | atomic_set(&tx_ring->queue_stopped, 0); | ||
2692 | } | 2689 | } |
2693 | 2690 | ||
2694 | static void ql_free_tx_resources(struct ql_adapter *qdev, | 2691 | static void ql_free_tx_resources(struct ql_adapter *qdev, |