diff options
author | David S. Miller <davem@davemloft.net> | 2008-07-17 03:53:03 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2008-07-17 22:21:30 -0400 |
commit | 83874000929ed63aef30b44083a9f713135ff040 (patch) | |
tree | 7646fd185751cad8665eca19aa3f87d13c37eade /net | |
parent | c7e4f3bbb4ba4e48ab3b529d5016e454cee1ccd6 (diff) |
pkt_sched: Kill netdev_queue lock.
We can simply use the qdisc->q.lock for all of the
qdisc tree synchronization.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r-- | net/core/dev.c | 9 | ||||
-rw-r--r-- | net/mac80211/wme.c | 19 | ||||
-rw-r--r-- | net/sched/sch_generic.c | 32 | ||||
-rw-r--r-- | net/sched/sch_teql.c | 7 |
4 files changed, 37 insertions, 30 deletions
diff --git a/net/core/dev.c b/net/core/dev.c index 6741e344ac59..32a13772c1cb 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -2080,10 +2080,12 @@ static int ing_filter(struct sk_buff *skb) | |||
2080 | 2080 | ||
2081 | rxq = &dev->rx_queue; | 2081 | rxq = &dev->rx_queue; |
2082 | 2082 | ||
2083 | spin_lock(&rxq->lock); | 2083 | q = rxq->qdisc; |
2084 | if ((q = rxq->qdisc) != NULL) | 2084 | if (q) { |
2085 | spin_lock(qdisc_lock(q)); | ||
2085 | result = q->enqueue(skb, q); | 2086 | result = q->enqueue(skb, q); |
2086 | spin_unlock(&rxq->lock); | 2087 | spin_unlock(qdisc_lock(q)); |
2088 | } | ||
2087 | 2089 | ||
2088 | return result; | 2090 | return result; |
2089 | } | 2091 | } |
@@ -4173,7 +4175,6 @@ static void netdev_init_one_queue(struct net_device *dev, | |||
4173 | struct netdev_queue *queue, | 4175 | struct netdev_queue *queue, |
4174 | void *_unused) | 4176 | void *_unused) |
4175 | { | 4177 | { |
4176 | spin_lock_init(&queue->lock); | ||
4177 | queue->dev = dev; | 4178 | queue->dev = dev; |
4178 | } | 4179 | } |
4179 | 4180 | ||
diff --git a/net/mac80211/wme.c b/net/mac80211/wme.c index b21cfec4b6ce..6e8099e77043 100644 --- a/net/mac80211/wme.c +++ b/net/mac80211/wme.c | |||
@@ -237,12 +237,14 @@ void ieee80211_ht_agg_queue_remove(struct ieee80211_local *local, | |||
237 | ieee80211_requeue(local, agg_queue); | 237 | ieee80211_requeue(local, agg_queue); |
238 | } else { | 238 | } else { |
239 | struct netdev_queue *txq; | 239 | struct netdev_queue *txq; |
240 | spinlock_t *root_lock; | ||
240 | 241 | ||
241 | txq = netdev_get_tx_queue(local->mdev, agg_queue); | 242 | txq = netdev_get_tx_queue(local->mdev, agg_queue); |
243 | root_lock = qdisc_root_lock(txq->qdisc); | ||
242 | 244 | ||
243 | spin_lock_bh(&txq->lock); | 245 | spin_lock_bh(root_lock); |
244 | qdisc_reset(txq->qdisc); | 246 | qdisc_reset(txq->qdisc); |
245 | spin_unlock_bh(&txq->lock); | 247 | spin_unlock_bh(root_lock); |
246 | } | 248 | } |
247 | } | 249 | } |
248 | 250 | ||
@@ -250,6 +252,7 @@ void ieee80211_requeue(struct ieee80211_local *local, int queue) | |||
250 | { | 252 | { |
251 | struct netdev_queue *txq = netdev_get_tx_queue(local->mdev, queue); | 253 | struct netdev_queue *txq = netdev_get_tx_queue(local->mdev, queue); |
252 | struct sk_buff_head list; | 254 | struct sk_buff_head list; |
255 | spinlock_t *root_lock; | ||
253 | struct Qdisc *qdisc; | 256 | struct Qdisc *qdisc; |
254 | u32 len; | 257 | u32 len; |
255 | 258 | ||
@@ -261,14 +264,15 @@ void ieee80211_requeue(struct ieee80211_local *local, int queue) | |||
261 | 264 | ||
262 | skb_queue_head_init(&list); | 265 | skb_queue_head_init(&list); |
263 | 266 | ||
264 | spin_lock(&txq->lock); | 267 | root_lock = qdisc_root_lock(qdisc); |
268 | spin_lock(root_lock); | ||
265 | for (len = qdisc->q.qlen; len > 0; len--) { | 269 | for (len = qdisc->q.qlen; len > 0; len--) { |
266 | struct sk_buff *skb = qdisc->dequeue(qdisc); | 270 | struct sk_buff *skb = qdisc->dequeue(qdisc); |
267 | 271 | ||
268 | if (skb) | 272 | if (skb) |
269 | __skb_queue_tail(&list, skb); | 273 | __skb_queue_tail(&list, skb); |
270 | } | 274 | } |
271 | spin_unlock(&txq->lock); | 275 | spin_unlock(root_lock); |
272 | 276 | ||
273 | for (len = list.qlen; len > 0; len--) { | 277 | for (len = list.qlen; len > 0; len--) { |
274 | struct sk_buff *skb = __skb_dequeue(&list); | 278 | struct sk_buff *skb = __skb_dequeue(&list); |
@@ -280,12 +284,13 @@ void ieee80211_requeue(struct ieee80211_local *local, int queue) | |||
280 | 284 | ||
281 | txq = netdev_get_tx_queue(local->mdev, new_queue); | 285 | txq = netdev_get_tx_queue(local->mdev, new_queue); |
282 | 286 | ||
283 | spin_lock(&txq->lock); | ||
284 | 287 | ||
285 | qdisc = rcu_dereference(txq->qdisc); | 288 | qdisc = rcu_dereference(txq->qdisc); |
286 | qdisc->enqueue(skb, qdisc); | 289 | root_lock = qdisc_root_lock(qdisc); |
287 | 290 | ||
288 | spin_unlock(&txq->lock); | 291 | spin_lock(root_lock); |
292 | qdisc->enqueue(skb, qdisc); | ||
293 | spin_unlock(root_lock); | ||
289 | } | 294 | } |
290 | 295 | ||
291 | out_unlock: | 296 | out_unlock: |
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index 3d53e92ad9c8..8fc580b3e173 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c | |||
@@ -96,15 +96,15 @@ static inline int handle_dev_cpu_collision(struct sk_buff *skb, | |||
96 | } | 96 | } |
97 | 97 | ||
98 | /* | 98 | /* |
99 | * NOTE: Called under queue->lock with locally disabled BH. | 99 | * NOTE: Called under qdisc_lock(q) with locally disabled BH. |
100 | * | 100 | * |
101 | * __QDISC_STATE_RUNNING guarantees only one CPU can process | 101 | * __QDISC_STATE_RUNNING guarantees only one CPU can process |
102 | * this qdisc at a time. queue->lock serializes queue accesses for | 102 | * this qdisc at a time. qdisc_lock(q) serializes queue accesses for |
103 | * this queue AND txq->qdisc pointer itself. | 103 | * this queue. |
104 | * | 104 | * |
105 | * netif_tx_lock serializes accesses to device driver. | 105 | * netif_tx_lock serializes accesses to device driver. |
106 | * | 106 | * |
107 | * queue->lock and netif_tx_lock are mutually exclusive, | 107 | * qdisc_lock(q) and netif_tx_lock are mutually exclusive, |
108 | * if one is grabbed, another must be free. | 108 | * if one is grabbed, another must be free. |
109 | * | 109 | * |
110 | * Note, that this procedure can be called by a watchdog timer | 110 | * Note, that this procedure can be called by a watchdog timer |
@@ -317,7 +317,6 @@ struct Qdisc_ops noop_qdisc_ops __read_mostly = { | |||
317 | }; | 317 | }; |
318 | 318 | ||
319 | static struct netdev_queue noop_netdev_queue = { | 319 | static struct netdev_queue noop_netdev_queue = { |
320 | .lock = __SPIN_LOCK_UNLOCKED(noop_netdev_queue.lock), | ||
321 | .qdisc = &noop_qdisc, | 320 | .qdisc = &noop_qdisc, |
322 | }; | 321 | }; |
323 | 322 | ||
@@ -327,6 +326,7 @@ struct Qdisc noop_qdisc = { | |||
327 | .flags = TCQ_F_BUILTIN, | 326 | .flags = TCQ_F_BUILTIN, |
328 | .ops = &noop_qdisc_ops, | 327 | .ops = &noop_qdisc_ops, |
329 | .list = LIST_HEAD_INIT(noop_qdisc.list), | 328 | .list = LIST_HEAD_INIT(noop_qdisc.list), |
329 | .q.lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.q.lock), | ||
330 | .dev_queue = &noop_netdev_queue, | 330 | .dev_queue = &noop_netdev_queue, |
331 | }; | 331 | }; |
332 | EXPORT_SYMBOL(noop_qdisc); | 332 | EXPORT_SYMBOL(noop_qdisc); |
@@ -498,7 +498,7 @@ errout: | |||
498 | } | 498 | } |
499 | EXPORT_SYMBOL(qdisc_create_dflt); | 499 | EXPORT_SYMBOL(qdisc_create_dflt); |
500 | 500 | ||
501 | /* Under queue->lock and BH! */ | 501 | /* Under qdisc_root_lock(qdisc) and BH! */ |
502 | 502 | ||
503 | void qdisc_reset(struct Qdisc *qdisc) | 503 | void qdisc_reset(struct Qdisc *qdisc) |
504 | { | 504 | { |
@@ -526,10 +526,12 @@ static void __qdisc_destroy(struct rcu_head *head) | |||
526 | module_put(ops->owner); | 526 | module_put(ops->owner); |
527 | dev_put(qdisc_dev(qdisc)); | 527 | dev_put(qdisc_dev(qdisc)); |
528 | 528 | ||
529 | kfree_skb(qdisc->gso_skb); | ||
530 | |||
529 | kfree((char *) qdisc - qdisc->padded); | 531 | kfree((char *) qdisc - qdisc->padded); |
530 | } | 532 | } |
531 | 533 | ||
532 | /* Under queue->lock and BH! */ | 534 | /* Under qdisc_root_lock(qdisc) and BH! */ |
533 | 535 | ||
534 | void qdisc_destroy(struct Qdisc *qdisc) | 536 | void qdisc_destroy(struct Qdisc *qdisc) |
535 | { | 537 | { |
@@ -586,13 +588,12 @@ static void transition_one_qdisc(struct net_device *dev, | |||
586 | struct netdev_queue *dev_queue, | 588 | struct netdev_queue *dev_queue, |
587 | void *_need_watchdog) | 589 | void *_need_watchdog) |
588 | { | 590 | { |
591 | struct Qdisc *new_qdisc = dev_queue->qdisc_sleeping; | ||
589 | int *need_watchdog_p = _need_watchdog; | 592 | int *need_watchdog_p = _need_watchdog; |
590 | 593 | ||
591 | spin_lock_bh(&dev_queue->lock); | 594 | rcu_assign_pointer(dev_queue->qdisc, new_qdisc); |
592 | rcu_assign_pointer(dev_queue->qdisc, dev_queue->qdisc_sleeping); | 595 | if (new_qdisc != &noqueue_qdisc) |
593 | if (dev_queue->qdisc != &noqueue_qdisc) | ||
594 | *need_watchdog_p = 1; | 596 | *need_watchdog_p = 1; |
595 | spin_unlock_bh(&dev_queue->lock); | ||
596 | } | 597 | } |
597 | 598 | ||
598 | void dev_activate(struct net_device *dev) | 599 | void dev_activate(struct net_device *dev) |
@@ -629,19 +630,16 @@ static void dev_deactivate_queue(struct net_device *dev, | |||
629 | struct sk_buff *skb = NULL; | 630 | struct sk_buff *skb = NULL; |
630 | struct Qdisc *qdisc; | 631 | struct Qdisc *qdisc; |
631 | 632 | ||
632 | spin_lock_bh(&dev_queue->lock); | ||
633 | |||
634 | qdisc = dev_queue->qdisc; | 633 | qdisc = dev_queue->qdisc; |
635 | if (qdisc) { | 634 | if (qdisc) { |
635 | spin_lock_bh(qdisc_lock(qdisc)); | ||
636 | |||
636 | dev_queue->qdisc = qdisc_default; | 637 | dev_queue->qdisc = qdisc_default; |
637 | qdisc_reset(qdisc); | 638 | qdisc_reset(qdisc); |
638 | 639 | ||
639 | skb = qdisc->gso_skb; | 640 | spin_unlock_bh(qdisc_lock(qdisc)); |
640 | qdisc->gso_skb = NULL; | ||
641 | } | 641 | } |
642 | 642 | ||
643 | spin_unlock_bh(&dev_queue->lock); | ||
644 | |||
645 | kfree_skb(skb); | 643 | kfree_skb(skb); |
646 | } | 644 | } |
647 | 645 | ||
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c index ade3372221c7..8b0ff345f9da 100644 --- a/net/sched/sch_teql.c +++ b/net/sched/sch_teql.c | |||
@@ -156,12 +156,15 @@ teql_destroy(struct Qdisc* sch) | |||
156 | master->slaves = NEXT_SLAVE(q); | 156 | master->slaves = NEXT_SLAVE(q); |
157 | if (q == master->slaves) { | 157 | if (q == master->slaves) { |
158 | struct netdev_queue *txq; | 158 | struct netdev_queue *txq; |
159 | spinlock_t *root_lock; | ||
159 | 160 | ||
160 | txq = netdev_get_tx_queue(master->dev, 0); | 161 | txq = netdev_get_tx_queue(master->dev, 0); |
161 | master->slaves = NULL; | 162 | master->slaves = NULL; |
162 | spin_lock_bh(&txq->lock); | 163 | |
164 | root_lock = qdisc_root_lock(txq->qdisc); | ||
165 | spin_lock_bh(root_lock); | ||
163 | qdisc_reset(txq->qdisc); | 166 | qdisc_reset(txq->qdisc); |
164 | spin_unlock_bh(&txq->lock); | 167 | spin_unlock_bh(root_lock); |
165 | } | 168 | } |
166 | } | 169 | } |
167 | skb_queue_purge(&dat->q); | 170 | skb_queue_purge(&dat->q); |