diff options
author | David S. Miller <davem@davemloft.net> | 2008-08-18 00:58:07 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2008-08-18 00:58:07 -0400 |
commit | 4335cd2da1e8986fa8aff21a91144d986cb0a5fc (patch) | |
tree | 7c39163199086fedc39adad6c733526658d4bc86 /net/sched | |
parent | def82a1db1fdc4f861c77009e2ee86870c3743b0 (diff) |
pkt_sched: Simplify dev_deactivate() polling loop.
The condition under which the previous qdisc has no more references
after we've attached &noop_qdisc is that both RUNNING and SCHED
are both seen clear while holding the root lock.
So just make specifically that check in the polling loop, instead
of this overly complex "check without then check with lock held"
sequence.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sched')
-rw-r--r-- | net/sched/sch_generic.c | 31 |
1 files changed, 5 insertions, 26 deletions
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index ff1c4557e5f8..30b76aec723b 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c | |||
@@ -653,7 +653,7 @@ static void dev_deactivate_queue(struct net_device *dev, | |||
653 | } | 653 | } |
654 | } | 654 | } |
655 | 655 | ||
656 | static bool some_qdisc_is_busy(struct net_device *dev, int lock) | 656 | static bool some_qdisc_is_busy(struct net_device *dev) |
657 | { | 657 | { |
658 | unsigned int i; | 658 | unsigned int i; |
659 | 659 | ||
@@ -667,14 +667,12 @@ static bool some_qdisc_is_busy(struct net_device *dev, int lock) | |||
667 | q = dev_queue->qdisc_sleeping; | 667 | q = dev_queue->qdisc_sleeping; |
668 | root_lock = qdisc_lock(q); | 668 | root_lock = qdisc_lock(q); |
669 | 669 | ||
670 | if (lock) | 670 | spin_lock_bh(root_lock); |
671 | spin_lock_bh(root_lock); | ||
672 | 671 | ||
673 | val = (test_bit(__QDISC_STATE_RUNNING, &q->state) || | 672 | val = (test_bit(__QDISC_STATE_RUNNING, &q->state) || |
674 | test_bit(__QDISC_STATE_SCHED, &q->state)); | 673 | test_bit(__QDISC_STATE_SCHED, &q->state)); |
675 | 674 | ||
676 | if (lock) | 675 | spin_unlock_bh(root_lock); |
677 | spin_unlock_bh(root_lock); | ||
678 | 676 | ||
679 | if (val) | 677 | if (val) |
680 | return true; | 678 | return true; |
@@ -684,8 +682,6 @@ static bool some_qdisc_is_busy(struct net_device *dev, int lock) | |||
684 | 682 | ||
685 | void dev_deactivate(struct net_device *dev) | 683 | void dev_deactivate(struct net_device *dev) |
686 | { | 684 | { |
687 | bool running; | ||
688 | |||
689 | netdev_for_each_tx_queue(dev, dev_deactivate_queue, &noop_qdisc); | 685 | netdev_for_each_tx_queue(dev, dev_deactivate_queue, &noop_qdisc); |
690 | dev_deactivate_queue(dev, &dev->rx_queue, &noop_qdisc); | 686 | dev_deactivate_queue(dev, &dev->rx_queue, &noop_qdisc); |
691 | 687 | ||
@@ -695,25 +691,8 @@ void dev_deactivate(struct net_device *dev) | |||
695 | synchronize_rcu(); | 691 | synchronize_rcu(); |
696 | 692 | ||
697 | /* Wait for outstanding qdisc_run calls. */ | 693 | /* Wait for outstanding qdisc_run calls. */ |
698 | do { | 694 | while (some_qdisc_is_busy(dev)) |
699 | while (some_qdisc_is_busy(dev, 0)) | 695 | yield(); |
700 | yield(); | ||
701 | |||
702 | /* | ||
703 | * Double-check inside queue lock to ensure that all effects | ||
704 | * of the queue run are visible when we return. | ||
705 | */ | ||
706 | running = some_qdisc_is_busy(dev, 1); | ||
707 | |||
708 | /* | ||
709 | * The running flag should never be set at this point because | ||
710 | * we've already set dev->qdisc to noop_qdisc *inside* the same | ||
711 | * pair of spin locks. That is, if any qdisc_run starts after | ||
712 | * our initial test it should see the noop_qdisc and then | ||
713 | * clear the RUNNING bit before dropping the queue lock. So | ||
714 | * if it is set here then we've found a bug. | ||
715 | */ | ||
716 | } while (WARN_ON_ONCE(running)); | ||
717 | } | 696 | } |
718 | 697 | ||
719 | static void dev_init_scheduler_queue(struct net_device *dev, | 698 | static void dev_init_scheduler_queue(struct net_device *dev, |