diff options
author | Krishna Kumar <krkumar2@in.ibm.com> | 2009-08-29 16:21:21 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2009-08-31 01:16:20 -0400 |
commit | 7b3d3e4fc685a7d7e0b4c207ce24dfbab5689eb0 (patch) | |
tree | 37b8181e8cd072f49d23f0e776b367e86b18d41e | |
parent | 6ca8b990e07914a87fd1f6dfc5507f5e1c4572e2 (diff) |
netdevice: Consolidate to use existing macros where available.
Patch compiled and 32 simultaneous netperf testing ran fine.
Signed-off-by: Krishna Kumar <krkumar2@in.ibm.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | include/linux/netdevice.h | 13 |
1 files changed, 7 insertions, 6 deletions
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 9192cdf5bd2c..60d3aac49ed4 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h | |||
@@ -1257,7 +1257,7 @@ static inline void netif_tx_wake_queue(struct netdev_queue *dev_queue) | |||
1257 | { | 1257 | { |
1258 | #ifdef CONFIG_NETPOLL_TRAP | 1258 | #ifdef CONFIG_NETPOLL_TRAP |
1259 | if (netpoll_trap()) { | 1259 | if (netpoll_trap()) { |
1260 | clear_bit(__QUEUE_STATE_XOFF, &dev_queue->state); | 1260 | netif_tx_start_queue(dev_queue); |
1261 | return; | 1261 | return; |
1262 | } | 1262 | } |
1263 | #endif | 1263 | #endif |
@@ -1363,7 +1363,8 @@ static inline int netif_running(const struct net_device *dev) | |||
1363 | static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index) | 1363 | static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index) |
1364 | { | 1364 | { |
1365 | struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); | 1365 | struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); |
1366 | clear_bit(__QUEUE_STATE_XOFF, &txq->state); | 1366 | |
1367 | netif_tx_start_queue(txq); | ||
1367 | } | 1368 | } |
1368 | 1369 | ||
1369 | /** | 1370 | /** |
@@ -1380,7 +1381,7 @@ static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index) | |||
1380 | if (netpoll_trap()) | 1381 | if (netpoll_trap()) |
1381 | return; | 1382 | return; |
1382 | #endif | 1383 | #endif |
1383 | set_bit(__QUEUE_STATE_XOFF, &txq->state); | 1384 | netif_tx_stop_queue(txq); |
1384 | } | 1385 | } |
1385 | 1386 | ||
1386 | /** | 1387 | /** |
@@ -1394,7 +1395,8 @@ static inline int __netif_subqueue_stopped(const struct net_device *dev, | |||
1394 | u16 queue_index) | 1395 | u16 queue_index) |
1395 | { | 1396 | { |
1396 | struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); | 1397 | struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); |
1397 | return test_bit(__QUEUE_STATE_XOFF, &txq->state); | 1398 | |
1399 | return netif_tx_queue_stopped(txq); | ||
1398 | } | 1400 | } |
1399 | 1401 | ||
1400 | static inline int netif_subqueue_stopped(const struct net_device *dev, | 1402 | static inline int netif_subqueue_stopped(const struct net_device *dev, |
@@ -1746,8 +1748,7 @@ static inline void netif_tx_unlock(struct net_device *dev) | |||
1746 | * force a schedule. | 1748 | * force a schedule. |
1747 | */ | 1749 | */ |
1748 | clear_bit(__QUEUE_STATE_FROZEN, &txq->state); | 1750 | clear_bit(__QUEUE_STATE_FROZEN, &txq->state); |
1749 | if (!test_bit(__QUEUE_STATE_XOFF, &txq->state)) | 1751 | netif_schedule_queue(txq); |
1750 | __netif_schedule(txq->qdisc); | ||
1751 | } | 1752 | } |
1752 | spin_unlock(&dev->tx_global_lock); | 1753 | spin_unlock(&dev->tx_global_lock); |
1753 | } | 1754 | } |