diff options
Diffstat (limited to 'net/sched/sch_generic.c')
| -rw-r--r-- | net/sched/sch_generic.c | 26 |
1 files changed, 23 insertions, 3 deletions
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index e01d57692c9a..fa1a6f45dc41 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c | |||
| @@ -556,6 +556,7 @@ void dev_deactivate(struct net_device *dev) | |||
| 556 | { | 556 | { |
| 557 | struct Qdisc *qdisc; | 557 | struct Qdisc *qdisc; |
| 558 | struct sk_buff *skb; | 558 | struct sk_buff *skb; |
| 559 | int running; | ||
| 559 | 560 | ||
| 560 | spin_lock_bh(&dev->queue_lock); | 561 | spin_lock_bh(&dev->queue_lock); |
| 561 | qdisc = dev->qdisc; | 562 | qdisc = dev->qdisc; |
| @@ -571,12 +572,31 @@ void dev_deactivate(struct net_device *dev) | |||
| 571 | 572 | ||
| 572 | dev_watchdog_down(dev); | 573 | dev_watchdog_down(dev); |
| 573 | 574 | ||
| 574 | /* Wait for outstanding dev_queue_xmit calls. */ | 575 | /* Wait for outstanding qdisc-less dev_queue_xmit calls. */ |
| 575 | synchronize_rcu(); | 576 | synchronize_rcu(); |
| 576 | 577 | ||
| 577 | /* Wait for outstanding qdisc_run calls. */ | 578 | /* Wait for outstanding qdisc_run calls. */ |
| 578 | while (test_bit(__LINK_STATE_QDISC_RUNNING, &dev->state)) | 579 | do { |
| 579 | yield(); | 580 | while (test_bit(__LINK_STATE_QDISC_RUNNING, &dev->state)) |
| 581 | yield(); | ||
| 582 | |||
| 583 | /* | ||
| 584 | * Double-check inside queue lock to ensure that all effects | ||
| 585 | * of the queue run are visible when we return. | ||
| 586 | */ | ||
| 587 | spin_lock_bh(&dev->queue_lock); | ||
| 588 | running = test_bit(__LINK_STATE_QDISC_RUNNING, &dev->state); | ||
| 589 | spin_unlock_bh(&dev->queue_lock); | ||
| 590 | |||
| 591 | /* | ||
| 592 | * The running flag should never be set at this point because | ||
| 593 | * we've already set dev->qdisc to noop_qdisc *inside* the same | ||
| 594 | * pair of spin locks. That is, if any qdisc_run starts after | ||
| 595 | * our initial test it should see the noop_qdisc and then | ||
| 596 | * clear the RUNNING bit before dropping the queue lock. So | ||
| 597 | * if it is set here then we've found a bug. | ||
| 598 | */ | ||
| 599 | } while (WARN_ON_ONCE(running)); | ||
| 580 | } | 600 | } |
| 581 | 601 | ||
| 582 | void dev_init_scheduler(struct net_device *dev) | 602 | void dev_init_scheduler(struct net_device *dev) |
