aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2008-07-15 23:14:35 -0400
committerDavid S. Miller <davem@davemloft.net>2008-07-17 22:21:18 -0400
commitd3b753db7c4f1f37a98b51974d484fda5d86dab5 (patch)
tree6596288854d9626f935ddf9c014471a4c38b5c74
parentb4c21639ab0f6df07ab7624a8c2f974936708ae5 (diff)
pkt_sched: Move gso_skb into Qdisc.
We liberate any dangling gso_skb during qdisc destruction. It really only matters for the root qdisc. But when qdiscs can be shared by multiple netdev_queue objects, we can't have the gso_skb in the netdev_queue any more. Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--include/linux/netdevice.h1
-rw-r--r--include/net/sch_generic.h1
-rw-r--r--net/sched/sch_generic.c19
3 files changed, 10 insertions, 11 deletions
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 787fbfc5aebb..0883fcf2d16a 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -449,7 +449,6 @@ struct netdev_queue {
449 struct net_device *dev; 449 struct net_device *dev;
450 struct Qdisc *qdisc; 450 struct Qdisc *qdisc;
451 unsigned long state; 451 unsigned long state;
452 struct sk_buff *gso_skb;
453 spinlock_t _xmit_lock; 452 spinlock_t _xmit_lock;
454 int xmit_lock_owner; 453 int xmit_lock_owner;
455 struct Qdisc *qdisc_sleeping; 454 struct Qdisc *qdisc_sleeping;
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index b47f556c66f8..b96c3d9e10a8 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -36,6 +36,7 @@ struct Qdisc
36 u32 handle; 36 u32 handle;
37 u32 parent; 37 u32 parent;
38 atomic_t refcnt; 38 atomic_t refcnt;
39 struct sk_buff *gso_skb;
39 struct sk_buff_head q; 40 struct sk_buff_head q;
40 struct netdev_queue *dev_queue; 41 struct netdev_queue *dev_queue;
41 struct list_head list; 42 struct list_head list;
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 2f575b9017d1..2bd75befa066 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -77,7 +77,7 @@ static inline int dev_requeue_skb(struct sk_buff *skb,
77 struct Qdisc *q) 77 struct Qdisc *q)
78{ 78{
79 if (unlikely(skb->next)) 79 if (unlikely(skb->next))
80 dev_queue->gso_skb = skb; 80 q->gso_skb = skb;
81 else 81 else
82 q->ops->requeue(skb, q); 82 q->ops->requeue(skb, q);
83 83
@@ -85,13 +85,12 @@ static inline int dev_requeue_skb(struct sk_buff *skb,
85 return 0; 85 return 0;
86} 86}
87 87
88static inline struct sk_buff *dequeue_skb(struct netdev_queue *dev_queue, 88static inline struct sk_buff *dequeue_skb(struct Qdisc *q)
89 struct Qdisc *q)
90{ 89{
91 struct sk_buff *skb; 90 struct sk_buff *skb;
92 91
93 if ((skb = dev_queue->gso_skb)) 92 if ((skb = q->gso_skb))
94 dev_queue->gso_skb = NULL; 93 q->gso_skb = NULL;
95 else 94 else
96 skb = q->dequeue(q); 95 skb = q->dequeue(q);
97 96
@@ -155,10 +154,9 @@ static inline int qdisc_restart(struct netdev_queue *txq)
155 struct sk_buff *skb; 154 struct sk_buff *skb;
156 155
157 /* Dequeue packet */ 156 /* Dequeue packet */
158 if (unlikely((skb = dequeue_skb(txq, q)) == NULL)) 157 if (unlikely((skb = dequeue_skb(q)) == NULL))
159 return 0; 158 return 0;
160 159
161
162 /* And release queue */ 160 /* And release queue */
163 spin_unlock(&txq->lock); 161 spin_unlock(&txq->lock);
164 162
@@ -643,8 +641,8 @@ static void dev_deactivate_queue(struct net_device *dev,
643 void *_qdisc_default) 641 void *_qdisc_default)
644{ 642{
645 struct Qdisc *qdisc_default = _qdisc_default; 643 struct Qdisc *qdisc_default = _qdisc_default;
644 struct sk_buff *skb = NULL;
646 struct Qdisc *qdisc; 645 struct Qdisc *qdisc;
647 struct sk_buff *skb;
648 646
649 spin_lock_bh(&dev_queue->lock); 647 spin_lock_bh(&dev_queue->lock);
650 648
@@ -652,9 +650,10 @@ static void dev_deactivate_queue(struct net_device *dev,
652 if (qdisc) { 650 if (qdisc) {
653 dev_queue->qdisc = qdisc_default; 651 dev_queue->qdisc = qdisc_default;
654 qdisc_reset(qdisc); 652 qdisc_reset(qdisc);
653
654 skb = qdisc->gso_skb;
655 qdisc->gso_skb = NULL;
655 } 656 }
656 skb = dev_queue->gso_skb;
657 dev_queue->gso_skb = NULL;
658 657
659 spin_unlock_bh(&dev_queue->lock); 658 spin_unlock_bh(&dev_queue->lock);
660 659