diff options
author | Jarek Poplawski <jarkao2@gmail.com> | 2009-11-15 02:20:12 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2009-11-16 01:08:33 -0500 |
commit | 9a1654ba0b50402a6bd03c7b0fe9b0200a5ea7b1 (patch) | |
tree | 3defd37672da2069e6c0ffd86b0b99c694324985 /net/sched/sch_generic.c | |
parent | cb43e23435a66d5ed90f804af9efe9096503979f (diff) |
net: Optimize hard_start_xmit() return checking
Recent changes in the TX error propagation require additional checking
and masking of values returned from hard_start_xmit(), mainly to
separate cases where skb was consumed. This aim can be simplified by
changing the order of NETDEV_TX and NET_XMIT codes, because the latter
are treated similarly to negative (ERRNO) values.
After this change much simpler dev_xmit_complete() is also used in
sch_direct_xmit(), so it is moved to netdevice.h.
Additionally NET_RX definitions in netdevice.h are moved up from
between TX codes to avoid confusion while reading the TX comment.
Signed-off-by: Jarek Poplawski <jarkao2@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sched/sch_generic.c')
-rw-r--r-- | net/sched/sch_generic.c | 23 |
1 files changed, 5 insertions, 18 deletions
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index b13821ad2fb6..5173c1e1b19c 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c | |||
@@ -119,39 +119,26 @@ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q, | |||
119 | spin_unlock(root_lock); | 119 | spin_unlock(root_lock); |
120 | 120 | ||
121 | HARD_TX_LOCK(dev, txq, smp_processor_id()); | 121 | HARD_TX_LOCK(dev, txq, smp_processor_id()); |
122 | if (!netif_tx_queue_stopped(txq) && | 122 | if (!netif_tx_queue_stopped(txq) && !netif_tx_queue_frozen(txq)) |
123 | !netif_tx_queue_frozen(txq)) { | ||
124 | ret = dev_hard_start_xmit(skb, dev, txq); | 123 | ret = dev_hard_start_xmit(skb, dev, txq); |
125 | 124 | ||
126 | /* an error implies that the skb was consumed */ | ||
127 | if (ret < 0) | ||
128 | ret = NETDEV_TX_OK; | ||
129 | /* all NET_XMIT codes map to NETDEV_TX_OK */ | ||
130 | ret &= ~NET_XMIT_MASK; | ||
131 | } | ||
132 | HARD_TX_UNLOCK(dev, txq); | 125 | HARD_TX_UNLOCK(dev, txq); |
133 | 126 | ||
134 | spin_lock(root_lock); | 127 | spin_lock(root_lock); |
135 | 128 | ||
136 | switch (ret) { | 129 | if (dev_xmit_complete(ret)) { |
137 | case NETDEV_TX_OK: | 130 | /* Driver sent out skb successfully or skb was consumed */ |
138 | /* Driver sent out skb successfully */ | ||
139 | ret = qdisc_qlen(q); | 131 | ret = qdisc_qlen(q); |
140 | break; | 132 | } else if (ret == NETDEV_TX_LOCKED) { |
141 | |||
142 | case NETDEV_TX_LOCKED: | ||
143 | /* Driver try lock failed */ | 133 | /* Driver try lock failed */ |
144 | ret = handle_dev_cpu_collision(skb, txq, q); | 134 | ret = handle_dev_cpu_collision(skb, txq, q); |
145 | break; | 135 | } else { |
146 | |||
147 | default: | ||
148 | /* Driver returned NETDEV_TX_BUSY - requeue skb */ | 136 | /* Driver returned NETDEV_TX_BUSY - requeue skb */ |
149 | if (unlikely (ret != NETDEV_TX_BUSY && net_ratelimit())) | 137 | if (unlikely (ret != NETDEV_TX_BUSY && net_ratelimit())) |
150 | printk(KERN_WARNING "BUG %s code %d qlen %d\n", | 138 | printk(KERN_WARNING "BUG %s code %d qlen %d\n", |
151 | dev->name, ret, q->q.qlen); | 139 | dev->name, ret, q->q.qlen); |
152 | 140 | ||
153 | ret = dev_requeue_skb(skb, q); | 141 | ret = dev_requeue_skb(skb, q); |
154 | break; | ||
155 | } | 142 | } |
156 | 143 | ||
157 | if (ret && (netif_tx_queue_stopped(txq) || | 144 | if (ret && (netif_tx_queue_stopped(txq) || |