aboutsummaryrefslogtreecommitdiffstats
path: root/net/sched/sch_generic.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/sched/sch_generic.c')
-rw-r--r--net/sched/sch_generic.c74
1 files changed, 21 insertions, 53 deletions
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 468574682caa..ec0a0839ce51 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -215,10 +215,9 @@ static void dev_watchdog(unsigned long arg)
215 time_after(jiffies, (dev->trans_start + 215 time_after(jiffies, (dev->trans_start +
216 dev->watchdog_timeo))) { 216 dev->watchdog_timeo))) {
217 char drivername[64]; 217 char drivername[64];
218 printk(KERN_INFO "NETDEV WATCHDOG: %s (%s): transmit timed out\n", 218 WARN_ONCE(1, KERN_INFO "NETDEV WATCHDOG: %s (%s): transmit timed out\n",
219 dev->name, netdev_drivername(dev, drivername, 64)); 219 dev->name, netdev_drivername(dev, drivername, 64));
220 dev->tx_timeout(dev); 220 dev->tx_timeout(dev);
221 WARN_ON_ONCE(1);
222 } 221 }
223 if (!mod_timer(&dev->watchdog_timer, 222 if (!mod_timer(&dev->watchdog_timer,
224 round_jiffies(jiffies + 223 round_jiffies(jiffies +
@@ -518,15 +517,17 @@ void qdisc_reset(struct Qdisc *qdisc)
518} 517}
519EXPORT_SYMBOL(qdisc_reset); 518EXPORT_SYMBOL(qdisc_reset);
520 519
521/* this is the rcu callback function to clean up a qdisc when there 520void qdisc_destroy(struct Qdisc *qdisc)
522 * are no further references to it */
523
524static void __qdisc_destroy(struct rcu_head *head)
525{ 521{
526 struct Qdisc *qdisc = container_of(head, struct Qdisc, q_rcu);
527 const struct Qdisc_ops *ops = qdisc->ops; 522 const struct Qdisc_ops *ops = qdisc->ops;
528 523
524 if (qdisc->flags & TCQ_F_BUILTIN ||
525 !atomic_dec_and_test(&qdisc->refcnt))
526 return;
527
529#ifdef CONFIG_NET_SCHED 528#ifdef CONFIG_NET_SCHED
529 qdisc_list_del(qdisc);
530
530 qdisc_put_stab(qdisc->stab); 531 qdisc_put_stab(qdisc->stab);
531#endif 532#endif
532 gen_kill_estimator(&qdisc->bstats, &qdisc->rate_est); 533 gen_kill_estimator(&qdisc->bstats, &qdisc->rate_est);
@@ -542,20 +543,6 @@ static void __qdisc_destroy(struct rcu_head *head)
542 543
543 kfree((char *) qdisc - qdisc->padded); 544 kfree((char *) qdisc - qdisc->padded);
544} 545}
545
546/* Under qdisc_lock(qdisc) and BH! */
547
548void qdisc_destroy(struct Qdisc *qdisc)
549{
550 if (qdisc->flags & TCQ_F_BUILTIN ||
551 !atomic_dec_and_test(&qdisc->refcnt))
552 return;
553
554 if (qdisc->parent)
555 list_del(&qdisc->list);
556
557 call_rcu(&qdisc->q_rcu, __qdisc_destroy);
558}
559EXPORT_SYMBOL(qdisc_destroy); 546EXPORT_SYMBOL(qdisc_destroy);
560 547
561static bool dev_all_qdisc_sleeping_noop(struct net_device *dev) 548static bool dev_all_qdisc_sleeping_noop(struct net_device *dev)
@@ -597,6 +584,9 @@ static void transition_one_qdisc(struct net_device *dev,
597 struct Qdisc *new_qdisc = dev_queue->qdisc_sleeping; 584 struct Qdisc *new_qdisc = dev_queue->qdisc_sleeping;
598 int *need_watchdog_p = _need_watchdog; 585 int *need_watchdog_p = _need_watchdog;
599 586
587 if (!(new_qdisc->flags & TCQ_F_BUILTIN))
588 clear_bit(__QDISC_STATE_DEACTIVATED, &new_qdisc->state);
589
600 rcu_assign_pointer(dev_queue->qdisc, new_qdisc); 590 rcu_assign_pointer(dev_queue->qdisc, new_qdisc);
601 if (need_watchdog_p && new_qdisc != &noqueue_qdisc) 591 if (need_watchdog_p && new_qdisc != &noqueue_qdisc)
602 *need_watchdog_p = 1; 592 *need_watchdog_p = 1;
@@ -640,14 +630,17 @@ static void dev_deactivate_queue(struct net_device *dev,
640 if (qdisc) { 630 if (qdisc) {
641 spin_lock_bh(qdisc_lock(qdisc)); 631 spin_lock_bh(qdisc_lock(qdisc));
642 632
643 dev_queue->qdisc = qdisc_default; 633 if (!(qdisc->flags & TCQ_F_BUILTIN))
634 set_bit(__QDISC_STATE_DEACTIVATED, &qdisc->state);
635
636 rcu_assign_pointer(dev_queue->qdisc, qdisc_default);
644 qdisc_reset(qdisc); 637 qdisc_reset(qdisc);
645 638
646 spin_unlock_bh(qdisc_lock(qdisc)); 639 spin_unlock_bh(qdisc_lock(qdisc));
647 } 640 }
648} 641}
649 642
650static bool some_qdisc_is_busy(struct net_device *dev, int lock) 643static bool some_qdisc_is_busy(struct net_device *dev)
651{ 644{
652 unsigned int i; 645 unsigned int i;
653 646
@@ -661,14 +654,12 @@ static bool some_qdisc_is_busy(struct net_device *dev, int lock)
661 q = dev_queue->qdisc_sleeping; 654 q = dev_queue->qdisc_sleeping;
662 root_lock = qdisc_lock(q); 655 root_lock = qdisc_lock(q);
663 656
664 if (lock) 657 spin_lock_bh(root_lock);
665 spin_lock_bh(root_lock);
666 658
667 val = (test_bit(__QDISC_STATE_RUNNING, &q->state) || 659 val = (test_bit(__QDISC_STATE_RUNNING, &q->state) ||
668 test_bit(__QDISC_STATE_SCHED, &q->state)); 660 test_bit(__QDISC_STATE_SCHED, &q->state));
669 661
670 if (lock) 662 spin_unlock_bh(root_lock);
671 spin_unlock_bh(root_lock);
672 663
673 if (val) 664 if (val)
674 return true; 665 return true;
@@ -678,8 +669,6 @@ static bool some_qdisc_is_busy(struct net_device *dev, int lock)
678 669
679void dev_deactivate(struct net_device *dev) 670void dev_deactivate(struct net_device *dev)
680{ 671{
681 bool running;
682
683 netdev_for_each_tx_queue(dev, dev_deactivate_queue, &noop_qdisc); 672 netdev_for_each_tx_queue(dev, dev_deactivate_queue, &noop_qdisc);
684 dev_deactivate_queue(dev, &dev->rx_queue, &noop_qdisc); 673 dev_deactivate_queue(dev, &dev->rx_queue, &noop_qdisc);
685 674
@@ -689,25 +678,8 @@ void dev_deactivate(struct net_device *dev)
689 synchronize_rcu(); 678 synchronize_rcu();
690 679
691 /* Wait for outstanding qdisc_run calls. */ 680 /* Wait for outstanding qdisc_run calls. */
692 do { 681 while (some_qdisc_is_busy(dev))
693 while (some_qdisc_is_busy(dev, 0)) 682 yield();
694 yield();
695
696 /*
697 * Double-check inside queue lock to ensure that all effects
698 * of the queue run are visible when we return.
699 */
700 running = some_qdisc_is_busy(dev, 1);
701
702 /*
703 * The running flag should never be set at this point because
704 * we've already set dev->qdisc to noop_qdisc *inside* the same
705 * pair of spin locks. That is, if any qdisc_run starts after
706 * our initial test it should see the noop_qdisc and then
707 * clear the RUNNING bit before dropping the queue lock. So
708 * if it is set here then we've found a bug.
709 */
710 } while (WARN_ON_ONCE(running));
711} 683}
712 684
713static void dev_init_scheduler_queue(struct net_device *dev, 685static void dev_init_scheduler_queue(struct net_device *dev,
@@ -736,14 +708,10 @@ static void shutdown_scheduler_queue(struct net_device *dev,
736 struct Qdisc *qdisc_default = _qdisc_default; 708 struct Qdisc *qdisc_default = _qdisc_default;
737 709
738 if (qdisc) { 710 if (qdisc) {
739 spinlock_t *root_lock = qdisc_lock(qdisc); 711 rcu_assign_pointer(dev_queue->qdisc, qdisc_default);
740
741 dev_queue->qdisc = qdisc_default;
742 dev_queue->qdisc_sleeping = qdisc_default; 712 dev_queue->qdisc_sleeping = qdisc_default;
743 713
744 spin_lock_bh(root_lock);
745 qdisc_destroy(qdisc); 714 qdisc_destroy(qdisc);
746 spin_unlock_bh(root_lock);
747 } 715 }
748} 716}
749 717