diff options
author | David S. Miller <davem@davemloft.net> | 2008-07-16 06:00:19 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2008-07-17 22:21:27 -0400 |
commit | 53049978df1d9ae55bf397c9879e6b33218352db (patch) | |
tree | 18369747279ef1c0b807fe19e80d5d6c96d099bb /net/sched | |
parent | ead81cc5fc6d996db6afb20f211241612610a07a (diff) |
pkt_sched: Make qdisc grafting locking more specific.
Lock the root of the qdisc being operated upon.
All explicit references to qdisc_tree_lock() are now gone.
The only remaining uses are via the sch_tree_{lock,unlock}()
and tcf_tree_{lock,unlock}() macros.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sched')
-rw-r--r-- | net/sched/sch_api.c | 41 |
1 files changed, 29 insertions, 12 deletions
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index 6958fe7c9a77..74924893ef7f 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c | |||
@@ -441,15 +441,29 @@ static struct Qdisc * | |||
441 | dev_graft_qdisc(struct net_device *dev, struct Qdisc *qdisc) | 441 | dev_graft_qdisc(struct net_device *dev, struct Qdisc *qdisc) |
442 | { | 442 | { |
443 | struct netdev_queue *dev_queue; | 443 | struct netdev_queue *dev_queue; |
444 | spinlock_t *root_lock; | ||
444 | struct Qdisc *oqdisc; | 445 | struct Qdisc *oqdisc; |
446 | int ingress; | ||
445 | 447 | ||
446 | if (dev->flags & IFF_UP) | 448 | if (dev->flags & IFF_UP) |
447 | dev_deactivate(dev); | 449 | dev_deactivate(dev); |
448 | 450 | ||
449 | qdisc_lock_tree(dev); | 451 | ingress = 0; |
450 | if (qdisc && qdisc->flags&TCQ_F_INGRESS) { | 452 | if (qdisc && qdisc->flags&TCQ_F_INGRESS) |
453 | ingress = 1; | ||
454 | |||
455 | if (ingress) { | ||
451 | dev_queue = &dev->rx_queue; | 456 | dev_queue = &dev->rx_queue; |
452 | oqdisc = dev_queue->qdisc; | 457 | oqdisc = dev_queue->qdisc; |
458 | } else { | ||
459 | dev_queue = netdev_get_tx_queue(dev, 0); | ||
460 | oqdisc = dev_queue->qdisc_sleeping; | ||
461 | } | ||
462 | |||
463 | root_lock = qdisc_root_lock(oqdisc); | ||
464 | spin_lock_bh(root_lock); | ||
465 | |||
466 | if (ingress) { | ||
453 | /* Prune old scheduler */ | 467 | /* Prune old scheduler */ |
454 | if (oqdisc && atomic_read(&oqdisc->refcnt) <= 1) { | 468 | if (oqdisc && atomic_read(&oqdisc->refcnt) <= 1) { |
455 | /* delete */ | 469 | /* delete */ |
@@ -460,9 +474,6 @@ dev_graft_qdisc(struct net_device *dev, struct Qdisc *qdisc) | |||
460 | } | 474 | } |
461 | 475 | ||
462 | } else { | 476 | } else { |
463 | dev_queue = netdev_get_tx_queue(dev, 0); | ||
464 | oqdisc = dev_queue->qdisc_sleeping; | ||
465 | |||
466 | /* Prune old scheduler */ | 477 | /* Prune old scheduler */ |
467 | if (oqdisc && atomic_read(&oqdisc->refcnt) <= 1) | 478 | if (oqdisc && atomic_read(&oqdisc->refcnt) <= 1) |
468 | qdisc_reset(oqdisc); | 479 | qdisc_reset(oqdisc); |
@@ -474,7 +485,7 @@ dev_graft_qdisc(struct net_device *dev, struct Qdisc *qdisc) | |||
474 | dev_queue->qdisc = &noop_qdisc; | 485 | dev_queue->qdisc = &noop_qdisc; |
475 | } | 486 | } |
476 | 487 | ||
477 | qdisc_unlock_tree(dev); | 488 | spin_unlock_bh(root_lock); |
478 | 489 | ||
479 | if (dev->flags & IFF_UP) | 490 | if (dev->flags & IFF_UP) |
480 | dev_activate(dev); | 491 | dev_activate(dev); |
@@ -765,10 +776,12 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg) | |||
765 | if ((err = qdisc_graft(dev, p, clid, NULL, &q)) != 0) | 776 | if ((err = qdisc_graft(dev, p, clid, NULL, &q)) != 0) |
766 | return err; | 777 | return err; |
767 | if (q) { | 778 | if (q) { |
779 | spinlock_t *root_lock = qdisc_root_lock(q); | ||
780 | |||
768 | qdisc_notify(skb, n, clid, q, NULL); | 781 | qdisc_notify(skb, n, clid, q, NULL); |
769 | qdisc_lock_tree(dev); | 782 | spin_unlock_bh(root_lock); |
770 | qdisc_destroy(q); | 783 | qdisc_destroy(q); |
771 | qdisc_unlock_tree(dev); | 784 | spin_unlock_bh(root_lock); |
772 | } | 785 | } |
773 | } else { | 786 | } else { |
774 | qdisc_notify(skb, n, clid, NULL, q); | 787 | qdisc_notify(skb, n, clid, NULL, q); |
@@ -911,20 +924,24 @@ create_n_graft: | |||
911 | graft: | 924 | graft: |
912 | if (1) { | 925 | if (1) { |
913 | struct Qdisc *old_q = NULL; | 926 | struct Qdisc *old_q = NULL; |
927 | spinlock_t *root_lock; | ||
928 | |||
914 | err = qdisc_graft(dev, p, clid, q, &old_q); | 929 | err = qdisc_graft(dev, p, clid, q, &old_q); |
915 | if (err) { | 930 | if (err) { |
916 | if (q) { | 931 | if (q) { |
917 | qdisc_lock_tree(dev); | 932 | root_lock = qdisc_root_lock(q); |
933 | spin_lock_bh(root_lock); | ||
918 | qdisc_destroy(q); | 934 | qdisc_destroy(q); |
919 | qdisc_unlock_tree(dev); | 935 | spin_unlock_bh(root_lock); |
920 | } | 936 | } |
921 | return err; | 937 | return err; |
922 | } | 938 | } |
923 | qdisc_notify(skb, n, clid, old_q, q); | 939 | qdisc_notify(skb, n, clid, old_q, q); |
924 | if (old_q) { | 940 | if (old_q) { |
925 | qdisc_lock_tree(dev); | 941 | root_lock = qdisc_root_lock(old_q); |
942 | spin_lock_bh(root_lock); | ||
926 | qdisc_destroy(old_q); | 943 | qdisc_destroy(old_q); |
927 | qdisc_unlock_tree(dev); | 944 | spin_unlock_bh(root_lock); |
928 | } | 945 | } |
929 | } | 946 | } |
930 | return 0; | 947 | return 0; |