aboutsummaryrefslogtreecommitdiffstats
path: root/net/sched/sch_generic.c
diff options
context:
space:
mode:
authorJarek Poplawski <jarkao2@gmail.com>2008-10-06 13:41:50 -0400
committerDavid S. Miller <davem@davemloft.net>2008-10-06 13:41:50 -0400
commit6252352d16f7b45a0fd42224f7e70e0288dc4480 (patch)
tree5740210fbe67fb754c47a13cf6d18d4e09a10015 /net/sched/sch_generic.c
parent554794de7949d1a6279336404c066f974d4c2bde (diff)
pkt_sched: Simplify dev_requeue_skb and dequeue_skb
qdisc->requeue was planned to universally replace all requeuing code, but at the top level we never requeue more than one skb, so qdisc-> gso_skb is enough for this. qdisc->requeue would be used on the lower levels only for one level deep requeuing (like in sch_hfsc) after finishing all the changes. Signed-off-by: Jarek Poplawski <jarkao2@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sched/sch_generic.c')
-rw-r--r--net/sched/sch_generic.c21
1 files changed, 5 insertions, 16 deletions
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 3db4cf1bd263..31f6b614b59b 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -44,12 +44,9 @@ static inline int qdisc_qlen(struct Qdisc *q)
44 44
45static inline int dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q) 45static inline int dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q)
46{ 46{
47 if (unlikely(skb->next)) 47 q->gso_skb = skb;
48 q->gso_skb = skb;
49 else
50 __skb_queue_head(&q->requeue, skb);
51
52 __netif_schedule(q); 48 __netif_schedule(q);
49
53 return 0; 50 return 0;
54} 51}
55 52
@@ -57,24 +54,16 @@ static inline struct sk_buff *dequeue_skb(struct Qdisc *q)
57{ 54{
58 struct sk_buff *skb = q->gso_skb; 55 struct sk_buff *skb = q->gso_skb;
59 56
60 if (!skb)
61 skb = skb_peek(&q->requeue);
62
63 if (unlikely(skb)) { 57 if (unlikely(skb)) {
64 struct net_device *dev = qdisc_dev(q); 58 struct net_device *dev = qdisc_dev(q);
65 struct netdev_queue *txq; 59 struct netdev_queue *txq;
66 60
67 /* check the reason of requeuing without tx lock first */ 61 /* check the reason of requeuing without tx lock first */
68 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); 62 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
69 if (!netif_tx_queue_stopped(txq) && 63 if (!netif_tx_queue_stopped(txq) && !netif_tx_queue_frozen(txq))
70 !netif_tx_queue_frozen(txq)) { 64 q->gso_skb = NULL;
71 if (q->gso_skb) 65 else
72 q->gso_skb = NULL;
73 else
74 __skb_unlink(skb, &q->requeue);
75 } else {
76 skb = NULL; 66 skb = NULL;
77 }
78 } else { 67 } else {
79 skb = q->dequeue(q); 68 skb = q->dequeue(q);
80 } 69 }