aboutsummaryrefslogtreecommitdiffstats
path: root/net/sched
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2008-07-15 23:14:35 -0400
committerDavid S. Miller <davem@davemloft.net>2008-07-17 22:21:18 -0400
commitd3b753db7c4f1f37a98b51974d484fda5d86dab5 (patch)
tree6596288854d9626f935ddf9c014471a4c38b5c74 /net/sched
parentb4c21639ab0f6df07ab7624a8c2f974936708ae5 (diff)
pkt_sched: Move gso_skb into Qdisc.
We liberate any dangling gso_skb during qdisc destruction. It really only matters for the root qdisc. But when qdiscs can be shared by multiple netdev_queue objects, we can't have the gso_skb in the netdev_queue any more. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sched')
-rw-r--r--net/sched/sch_generic.c19
1 files changed, 9 insertions, 10 deletions
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 2f575b9017d1..2bd75befa066 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -77,7 +77,7 @@ static inline int dev_requeue_skb(struct sk_buff *skb,
77 struct Qdisc *q) 77 struct Qdisc *q)
78{ 78{
79 if (unlikely(skb->next)) 79 if (unlikely(skb->next))
80 dev_queue->gso_skb = skb; 80 q->gso_skb = skb;
81 else 81 else
82 q->ops->requeue(skb, q); 82 q->ops->requeue(skb, q);
83 83
@@ -85,13 +85,12 @@ static inline int dev_requeue_skb(struct sk_buff *skb,
85 return 0; 85 return 0;
86} 86}
87 87
88static inline struct sk_buff *dequeue_skb(struct netdev_queue *dev_queue, 88static inline struct sk_buff *dequeue_skb(struct Qdisc *q)
89 struct Qdisc *q)
90{ 89{
91 struct sk_buff *skb; 90 struct sk_buff *skb;
92 91
93 if ((skb = dev_queue->gso_skb)) 92 if ((skb = q->gso_skb))
94 dev_queue->gso_skb = NULL; 93 q->gso_skb = NULL;
95 else 94 else
96 skb = q->dequeue(q); 95 skb = q->dequeue(q);
97 96
@@ -155,10 +154,9 @@ static inline int qdisc_restart(struct netdev_queue *txq)
155 struct sk_buff *skb; 154 struct sk_buff *skb;
156 155
157 /* Dequeue packet */ 156 /* Dequeue packet */
158 if (unlikely((skb = dequeue_skb(txq, q)) == NULL)) 157 if (unlikely((skb = dequeue_skb(q)) == NULL))
159 return 0; 158 return 0;
160 159
161
162 /* And release queue */ 160 /* And release queue */
163 spin_unlock(&txq->lock); 161 spin_unlock(&txq->lock);
164 162
@@ -643,8 +641,8 @@ static void dev_deactivate_queue(struct net_device *dev,
643 void *_qdisc_default) 641 void *_qdisc_default)
644{ 642{
645 struct Qdisc *qdisc_default = _qdisc_default; 643 struct Qdisc *qdisc_default = _qdisc_default;
644 struct sk_buff *skb = NULL;
646 struct Qdisc *qdisc; 645 struct Qdisc *qdisc;
647 struct sk_buff *skb;
648 646
649 spin_lock_bh(&dev_queue->lock); 647 spin_lock_bh(&dev_queue->lock);
650 648
@@ -652,9 +650,10 @@ static void dev_deactivate_queue(struct net_device *dev,
652 if (qdisc) { 650 if (qdisc) {
653 dev_queue->qdisc = qdisc_default; 651 dev_queue->qdisc = qdisc_default;
654 qdisc_reset(qdisc); 652 qdisc_reset(qdisc);
653
654 skb = qdisc->gso_skb;
655 qdisc->gso_skb = NULL;
655 } 656 }
656 skb = dev_queue->gso_skb;
657 dev_queue->gso_skb = NULL;
658 657
659 spin_unlock_bh(&dev_queue->lock); 658 spin_unlock_bh(&dev_queue->lock);
660 659