diff options
author | Jarek Poplawski <jarkao2@gmail.com> | 2008-10-06 12:54:39 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2008-10-06 12:54:39 -0400 |
commit | 554794de7949d1a6279336404c066f974d4c2bde (patch) | |
tree | 90cb890a68bdf6c9947ddfc451f798bffb8f8990 /net/sched | |
parent | 13c1d18931ebb5cf407cb348ef2cd6284d68902d (diff) |
pkt_sched: Fix handling of gso skbs on requeuing
Jay Cliburn noticed and diagnosed a bug triggered in
dev_gso_skb_destructor() after last change from qdisc->gso_skb
to qdisc->requeue list. Since gso_segmented skbs can't be queued
to another list this patch brings back qdisc->gso_skb for them.
Reported-by: Jay Cliburn <jcliburn@gmail.com>
Signed-off-by: Jarek Poplawski <jarkao2@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sched')
-rw-r--r-- | net/sched/sch_generic.c | 22 |
1 files changed, 17 insertions, 5 deletions
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index 5e7e0bd38fe8..3db4cf1bd263 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c | |||
@@ -44,7 +44,10 @@ static inline int qdisc_qlen(struct Qdisc *q) | |||
44 | 44 | ||
45 | static inline int dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q) | 45 | static inline int dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q) |
46 | { | 46 | { |
47 | __skb_queue_head(&q->requeue, skb); | 47 | if (unlikely(skb->next)) |
48 | q->gso_skb = skb; | ||
49 | else | ||
50 | __skb_queue_head(&q->requeue, skb); | ||
48 | 51 | ||
49 | __netif_schedule(q); | 52 | __netif_schedule(q); |
50 | return 0; | 53 | return 0; |
@@ -52,7 +55,10 @@ static inline int dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q) | |||
52 | 55 | ||
53 | static inline struct sk_buff *dequeue_skb(struct Qdisc *q) | 56 | static inline struct sk_buff *dequeue_skb(struct Qdisc *q) |
54 | { | 57 | { |
55 | struct sk_buff *skb = skb_peek(&q->requeue); | 58 | struct sk_buff *skb = q->gso_skb; |
59 | |||
60 | if (!skb) | ||
61 | skb = skb_peek(&q->requeue); | ||
56 | 62 | ||
57 | if (unlikely(skb)) { | 63 | if (unlikely(skb)) { |
58 | struct net_device *dev = qdisc_dev(q); | 64 | struct net_device *dev = qdisc_dev(q); |
@@ -60,10 +66,15 @@ static inline struct sk_buff *dequeue_skb(struct Qdisc *q) | |||
60 | 66 | ||
61 | /* check the reason of requeuing without tx lock first */ | 67 | /* check the reason of requeuing without tx lock first */ |
62 | txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); | 68 | txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); |
63 | if (!netif_tx_queue_stopped(txq) && !netif_tx_queue_frozen(txq)) | 69 | if (!netif_tx_queue_stopped(txq) && |
64 | __skb_unlink(skb, &q->requeue); | 70 | !netif_tx_queue_frozen(txq)) { |
65 | else | 71 | if (q->gso_skb) |
72 | q->gso_skb = NULL; | ||
73 | else | ||
74 | __skb_unlink(skb, &q->requeue); | ||
75 | } else { | ||
66 | skb = NULL; | 76 | skb = NULL; |
77 | } | ||
67 | } else { | 78 | } else { |
68 | skb = q->dequeue(q); | 79 | skb = q->dequeue(q); |
69 | } | 80 | } |
@@ -548,6 +559,7 @@ void qdisc_destroy(struct Qdisc *qdisc) | |||
548 | module_put(ops->owner); | 559 | module_put(ops->owner); |
549 | dev_put(qdisc_dev(qdisc)); | 560 | dev_put(qdisc_dev(qdisc)); |
550 | 561 | ||
562 | kfree_skb(qdisc->gso_skb); | ||
551 | __skb_queue_purge(&qdisc->requeue); | 563 | __skb_queue_purge(&qdisc->requeue); |
552 | 564 | ||
553 | kfree((char *) qdisc - qdisc->padded); | 565 | kfree((char *) qdisc - qdisc->padded); |