diff options
author | Eric Dumazet <eric.dumazet@gmail.com> | 2010-11-23 05:42:02 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2010-11-28 13:47:18 -0500 |
commit | 5a0d2268d259886f0c87131639d19eb4a67b4532 (patch) | |
tree | 952e6225fcee027f3a4ae8e1c2b2c889eff3a8b2 | |
parent | d3c15cab213becc49a6f2ad7f48a59513a5f17dd (diff) |
net: add netif_tx_queue_frozen_or_stopped
When testing struct netdev_queue state against FROZEN bit, we also test
XOFF bit. We can test both bits at once and save some cycles.
Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | include/linux/netdevice.h | 6 | ||||
-rw-r--r-- | net/core/netpoll.c | 3 | ||||
-rw-r--r-- | net/core/pktgen.c | 2 | ||||
-rw-r--r-- | net/sched/sch_generic.c | 8 | ||||
-rw-r--r-- | net/sched/sch_teql.c | 3 |
5 files changed, 10 insertions, 12 deletions
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index badf9285fe0d..7c6ae2f4b9ab 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h | |||
@@ -493,6 +493,8 @@ static inline void napi_synchronize(const struct napi_struct *n) | |||
493 | enum netdev_queue_state_t { | 493 | enum netdev_queue_state_t { |
494 | __QUEUE_STATE_XOFF, | 494 | __QUEUE_STATE_XOFF, |
495 | __QUEUE_STATE_FROZEN, | 495 | __QUEUE_STATE_FROZEN, |
496 | #define QUEUE_STATE_XOFF_OR_FROZEN ((1 << __QUEUE_STATE_XOFF) | \ | ||
497 | (1 << __QUEUE_STATE_FROZEN)) | ||
496 | }; | 498 | }; |
497 | 499 | ||
498 | struct netdev_queue { | 500 | struct netdev_queue { |
@@ -1629,9 +1631,9 @@ static inline int netif_queue_stopped(const struct net_device *dev) | |||
1629 | return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0)); | 1631 | return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0)); |
1630 | } | 1632 | } |
1631 | 1633 | ||
1632 | static inline int netif_tx_queue_frozen(const struct netdev_queue *dev_queue) | 1634 | static inline int netif_tx_queue_frozen_or_stopped(const struct netdev_queue *dev_queue) |
1633 | { | 1635 | { |
1634 | return test_bit(__QUEUE_STATE_FROZEN, &dev_queue->state); | 1636 | return dev_queue->state & QUEUE_STATE_XOFF_OR_FROZEN; |
1635 | } | 1637 | } |
1636 | 1638 | ||
1637 | /** | 1639 | /** |
diff --git a/net/core/netpoll.c b/net/core/netpoll.c index 4e98ffac3af0..ee38acb6d463 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c | |||
@@ -76,8 +76,7 @@ static void queue_process(struct work_struct *work) | |||
76 | 76 | ||
77 | local_irq_save(flags); | 77 | local_irq_save(flags); |
78 | __netif_tx_lock(txq, smp_processor_id()); | 78 | __netif_tx_lock(txq, smp_processor_id()); |
79 | if (netif_tx_queue_stopped(txq) || | 79 | if (netif_tx_queue_frozen_or_stopped(txq) || |
80 | netif_tx_queue_frozen(txq) || | ||
81 | ops->ndo_start_xmit(skb, dev) != NETDEV_TX_OK) { | 80 | ops->ndo_start_xmit(skb, dev) != NETDEV_TX_OK) { |
82 | skb_queue_head(&npinfo->txq, skb); | 81 | skb_queue_head(&npinfo->txq, skb); |
83 | __netif_tx_unlock(txq); | 82 | __netif_tx_unlock(txq); |
diff --git a/net/core/pktgen.c b/net/core/pktgen.c index 2e57830cbeb2..2953b2abc971 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c | |||
@@ -3527,7 +3527,7 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev) | |||
3527 | 3527 | ||
3528 | __netif_tx_lock_bh(txq); | 3528 | __netif_tx_lock_bh(txq); |
3529 | 3529 | ||
3530 | if (unlikely(netif_tx_queue_stopped(txq) || netif_tx_queue_frozen(txq))) { | 3530 | if (unlikely(netif_tx_queue_frozen_or_stopped(txq))) { |
3531 | ret = NETDEV_TX_BUSY; | 3531 | ret = NETDEV_TX_BUSY; |
3532 | pkt_dev->last_ok = 0; | 3532 | pkt_dev->last_ok = 0; |
3533 | goto unlock; | 3533 | goto unlock; |
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index 5dbb3cd96e59..7f0bd8952646 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c | |||
@@ -60,8 +60,7 @@ static inline struct sk_buff *dequeue_skb(struct Qdisc *q) | |||
60 | 60 | ||
61 | /* check the reason of requeuing without tx lock first */ | 61 | /* check the reason of requeuing without tx lock first */ |
62 | txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); | 62 | txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); |
63 | if (!netif_tx_queue_stopped(txq) && | 63 | if (!netif_tx_queue_frozen_or_stopped(txq)) { |
64 | !netif_tx_queue_frozen(txq)) { | ||
65 | q->gso_skb = NULL; | 64 | q->gso_skb = NULL; |
66 | q->q.qlen--; | 65 | q->q.qlen--; |
67 | } else | 66 | } else |
@@ -122,7 +121,7 @@ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q, | |||
122 | spin_unlock(root_lock); | 121 | spin_unlock(root_lock); |
123 | 122 | ||
124 | HARD_TX_LOCK(dev, txq, smp_processor_id()); | 123 | HARD_TX_LOCK(dev, txq, smp_processor_id()); |
125 | if (!netif_tx_queue_stopped(txq) && !netif_tx_queue_frozen(txq)) | 124 | if (!netif_tx_queue_frozen_or_stopped(txq)) |
126 | ret = dev_hard_start_xmit(skb, dev, txq); | 125 | ret = dev_hard_start_xmit(skb, dev, txq); |
127 | 126 | ||
128 | HARD_TX_UNLOCK(dev, txq); | 127 | HARD_TX_UNLOCK(dev, txq); |
@@ -144,8 +143,7 @@ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q, | |||
144 | ret = dev_requeue_skb(skb, q); | 143 | ret = dev_requeue_skb(skb, q); |
145 | } | 144 | } |
146 | 145 | ||
147 | if (ret && (netif_tx_queue_stopped(txq) || | 146 | if (ret && netif_tx_queue_frozen_or_stopped(txq)) |
148 | netif_tx_queue_frozen(txq))) | ||
149 | ret = 0; | 147 | ret = 0; |
150 | 148 | ||
151 | return ret; | 149 | return ret; |
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c index 401af9596709..106479a7c94a 100644 --- a/net/sched/sch_teql.c +++ b/net/sched/sch_teql.c | |||
@@ -309,8 +309,7 @@ restart: | |||
309 | if (__netif_tx_trylock(slave_txq)) { | 309 | if (__netif_tx_trylock(slave_txq)) { |
310 | unsigned int length = qdisc_pkt_len(skb); | 310 | unsigned int length = qdisc_pkt_len(skb); |
311 | 311 | ||
312 | if (!netif_tx_queue_stopped(slave_txq) && | 312 | if (!netif_tx_queue_frozen_or_stopped(slave_txq) && |
313 | !netif_tx_queue_frozen(slave_txq) && | ||
314 | slave_ops->ndo_start_xmit(skb, slave) == NETDEV_TX_OK) { | 313 | slave_ops->ndo_start_xmit(skb, slave) == NETDEV_TX_OK) { |
315 | txq_trans_update(slave_txq); | 314 | txq_trans_update(slave_txq); |
316 | __netif_tx_unlock(slave_txq); | 315 | __netif_tx_unlock(slave_txq); |