diff options
author | David S. Miller <davem@davemloft.net> | 2008-07-08 20:42:10 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2008-07-08 20:42:10 -0400 |
commit | b0e1e6462df3c5944010b3328a546d8fe5d932cd (patch) | |
tree | 37e3f86d09d8b37deb06cf1c142baeb8246bbf97 /net/sched/sch_generic.c | |
parent | 555353cfa1aee293de445bfa6de43276138ddd82 (diff) |
netdev: Move rest of qdisc state into struct netdev_queue
Now qdisc, qdisc_sleeping, and qdisc_list also live there.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sched/sch_generic.c')
-rw-r--r-- | net/sched/sch_generic.c | 90 |
1 files changed, 57 insertions, 33 deletions
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index 804d44b00348..3223e5ba76aa 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c | |||
@@ -122,7 +122,7 @@ static inline int handle_dev_cpu_collision(struct sk_buff *skb, | |||
122 | * | 122 | * |
123 | * __LINK_STATE_QDISC_RUNNING guarantees only one CPU can process this | 123 | * __LINK_STATE_QDISC_RUNNING guarantees only one CPU can process this |
124 | * device at a time. queue->lock serializes queue accesses for | 124 | * device at a time. queue->lock serializes queue accesses for |
125 | * this device AND dev->qdisc pointer itself. | 125 | * this device AND txq->qdisc pointer itself. |
126 | * | 126 | * |
127 | * netif_tx_lock serializes accesses to device driver. | 127 | * netif_tx_lock serializes accesses to device driver. |
128 | * | 128 | * |
@@ -138,7 +138,8 @@ static inline int handle_dev_cpu_collision(struct sk_buff *skb, | |||
138 | */ | 138 | */ |
139 | static inline int qdisc_restart(struct net_device *dev) | 139 | static inline int qdisc_restart(struct net_device *dev) |
140 | { | 140 | { |
141 | struct Qdisc *q = dev->qdisc; | 141 | struct netdev_queue *txq = &dev->tx_queue; |
142 | struct Qdisc *q = txq->qdisc; | ||
142 | struct sk_buff *skb; | 143 | struct sk_buff *skb; |
143 | int ret = NETDEV_TX_BUSY; | 144 | int ret = NETDEV_TX_BUSY; |
144 | 145 | ||
@@ -148,15 +149,15 @@ static inline int qdisc_restart(struct net_device *dev) | |||
148 | 149 | ||
149 | 150 | ||
150 | /* And release queue */ | 151 | /* And release queue */ |
151 | spin_unlock(&q->dev_queue->lock); | 152 | spin_unlock(&txq->lock); |
152 | 153 | ||
153 | HARD_TX_LOCK(dev, smp_processor_id()); | 154 | HARD_TX_LOCK(dev, smp_processor_id()); |
154 | if (!netif_subqueue_stopped(dev, skb)) | 155 | if (!netif_subqueue_stopped(dev, skb)) |
155 | ret = dev_hard_start_xmit(skb, dev); | 156 | ret = dev_hard_start_xmit(skb, dev); |
156 | HARD_TX_UNLOCK(dev); | 157 | HARD_TX_UNLOCK(dev); |
157 | 158 | ||
158 | spin_lock(&q->dev_queue->lock); | 159 | spin_lock(&txq->lock); |
159 | q = dev->qdisc; | 160 | q = txq->qdisc; |
160 | 161 | ||
161 | switch (ret) { | 162 | switch (ret) { |
162 | case NETDEV_TX_OK: | 163 | case NETDEV_TX_OK: |
@@ -207,9 +208,10 @@ void __qdisc_run(struct net_device *dev) | |||
207 | static void dev_watchdog(unsigned long arg) | 208 | static void dev_watchdog(unsigned long arg) |
208 | { | 209 | { |
209 | struct net_device *dev = (struct net_device *)arg; | 210 | struct net_device *dev = (struct net_device *)arg; |
211 | struct netdev_queue *txq = &dev->tx_queue; | ||
210 | 212 | ||
211 | netif_tx_lock(dev); | 213 | netif_tx_lock(dev); |
212 | if (dev->qdisc != &noop_qdisc) { | 214 | if (txq->qdisc != &noop_qdisc) { |
213 | if (netif_device_present(dev) && | 215 | if (netif_device_present(dev) && |
214 | netif_running(dev) && | 216 | netif_running(dev) && |
215 | netif_carrier_ok(dev)) { | 217 | netif_carrier_ok(dev)) { |
@@ -539,53 +541,63 @@ EXPORT_SYMBOL(qdisc_destroy); | |||
539 | 541 | ||
540 | void dev_activate(struct net_device *dev) | 542 | void dev_activate(struct net_device *dev) |
541 | { | 543 | { |
544 | struct netdev_queue *txq = &dev->tx_queue; | ||
545 | |||
542 | /* No queueing discipline is attached to device; | 546 | /* No queueing discipline is attached to device; |
543 | create default one i.e. pfifo_fast for devices, | 547 | create default one i.e. pfifo_fast for devices, |
544 | which need queueing and noqueue_qdisc for | 548 | which need queueing and noqueue_qdisc for |
545 | virtual interfaces | 549 | virtual interfaces |
546 | */ | 550 | */ |
547 | 551 | ||
548 | if (dev->qdisc_sleeping == &noop_qdisc) { | 552 | if (txq->qdisc_sleeping == &noop_qdisc) { |
549 | struct Qdisc *qdisc; | 553 | struct Qdisc *qdisc; |
550 | if (dev->tx_queue_len) { | 554 | if (dev->tx_queue_len) { |
551 | qdisc = qdisc_create_dflt(dev, &dev->tx_queue, | 555 | qdisc = qdisc_create_dflt(dev, txq, |
552 | &pfifo_fast_ops, | 556 | &pfifo_fast_ops, |
553 | TC_H_ROOT); | 557 | TC_H_ROOT); |
554 | if (qdisc == NULL) { | 558 | if (qdisc == NULL) { |
555 | printk(KERN_INFO "%s: activation failed\n", dev->name); | 559 | printk(KERN_INFO "%s: activation failed\n", dev->name); |
556 | return; | 560 | return; |
557 | } | 561 | } |
558 | list_add_tail(&qdisc->list, &dev->qdisc_list); | 562 | list_add_tail(&qdisc->list, &txq->qdisc_list); |
559 | } else { | 563 | } else { |
560 | qdisc = &noqueue_qdisc; | 564 | qdisc = &noqueue_qdisc; |
561 | } | 565 | } |
562 | dev->qdisc_sleeping = qdisc; | 566 | txq->qdisc_sleeping = qdisc; |
563 | } | 567 | } |
564 | 568 | ||
565 | if (!netif_carrier_ok(dev)) | 569 | if (!netif_carrier_ok(dev)) |
566 | /* Delay activation until next carrier-on event */ | 570 | /* Delay activation until next carrier-on event */ |
567 | return; | 571 | return; |
568 | 572 | ||
569 | spin_lock_bh(&dev->tx_queue.lock); | 573 | spin_lock_bh(&txq->lock); |
570 | rcu_assign_pointer(dev->qdisc, dev->qdisc_sleeping); | 574 | rcu_assign_pointer(txq->qdisc, txq->qdisc_sleeping); |
571 | if (dev->qdisc != &noqueue_qdisc) { | 575 | if (txq->qdisc != &noqueue_qdisc) { |
572 | dev->trans_start = jiffies; | 576 | dev->trans_start = jiffies; |
573 | dev_watchdog_up(dev); | 577 | dev_watchdog_up(dev); |
574 | } | 578 | } |
575 | spin_unlock_bh(&dev->tx_queue.lock); | 579 | spin_unlock_bh(&txq->lock); |
580 | } | ||
581 | |||
582 | static void dev_deactivate_queue(struct net_device *dev, | ||
583 | struct netdev_queue *dev_queue, | ||
584 | struct Qdisc *qdisc_default) | ||
585 | { | ||
586 | struct Qdisc *qdisc = dev_queue->qdisc; | ||
587 | |||
588 | if (qdisc) { | ||
589 | dev_queue->qdisc = qdisc_default; | ||
590 | qdisc_reset(qdisc); | ||
591 | } | ||
576 | } | 592 | } |
577 | 593 | ||
578 | void dev_deactivate(struct net_device *dev) | 594 | void dev_deactivate(struct net_device *dev) |
579 | { | 595 | { |
580 | struct Qdisc *qdisc; | ||
581 | struct sk_buff *skb; | 596 | struct sk_buff *skb; |
582 | int running; | 597 | int running; |
583 | 598 | ||
584 | spin_lock_bh(&dev->tx_queue.lock); | 599 | spin_lock_bh(&dev->tx_queue.lock); |
585 | qdisc = dev->qdisc; | 600 | dev_deactivate_queue(dev, &dev->tx_queue, &noop_qdisc); |
586 | dev->qdisc = &noop_qdisc; | ||
587 | |||
588 | qdisc_reset(qdisc); | ||
589 | 601 | ||
590 | skb = dev->gso_skb; | 602 | skb = dev->gso_skb; |
591 | dev->gso_skb = NULL; | 603 | dev->gso_skb = NULL; |
@@ -622,32 +634,44 @@ void dev_deactivate(struct net_device *dev) | |||
622 | } while (WARN_ON_ONCE(running)); | 634 | } while (WARN_ON_ONCE(running)); |
623 | } | 635 | } |
624 | 636 | ||
637 | static void dev_init_scheduler_queue(struct net_device *dev, | ||
638 | struct netdev_queue *dev_queue, | ||
639 | struct Qdisc *qdisc) | ||
640 | { | ||
641 | dev_queue->qdisc = qdisc; | ||
642 | dev_queue->qdisc_sleeping = qdisc; | ||
643 | INIT_LIST_HEAD(&dev_queue->qdisc_list); | ||
644 | } | ||
645 | |||
625 | void dev_init_scheduler(struct net_device *dev) | 646 | void dev_init_scheduler(struct net_device *dev) |
626 | { | 647 | { |
627 | qdisc_lock_tree(dev); | 648 | qdisc_lock_tree(dev); |
628 | dev->qdisc = &noop_qdisc; | 649 | dev_init_scheduler_queue(dev, &dev->tx_queue, &noop_qdisc); |
629 | dev->qdisc_sleeping = &noop_qdisc; | 650 | dev_init_scheduler_queue(dev, &dev->rx_queue, NULL); |
630 | INIT_LIST_HEAD(&dev->qdisc_list); | ||
631 | qdisc_unlock_tree(dev); | 651 | qdisc_unlock_tree(dev); |
632 | 652 | ||
633 | setup_timer(&dev->watchdog_timer, dev_watchdog, (unsigned long)dev); | 653 | setup_timer(&dev->watchdog_timer, dev_watchdog, (unsigned long)dev); |
634 | } | 654 | } |
635 | 655 | ||
636 | void dev_shutdown(struct net_device *dev) | 656 | static void dev_shutdown_scheduler_queue(struct net_device *dev, |
657 | struct netdev_queue *dev_queue, | ||
658 | struct Qdisc *qdisc_default) | ||
637 | { | 659 | { |
638 | struct Qdisc *qdisc; | 660 | struct Qdisc *qdisc = dev_queue->qdisc_sleeping; |
661 | |||
662 | if (qdisc) { | ||
663 | dev_queue->qdisc = qdisc_default; | ||
664 | dev_queue->qdisc_sleeping = qdisc_default; | ||
639 | 665 | ||
640 | qdisc_lock_tree(dev); | ||
641 | qdisc = dev->qdisc_sleeping; | ||
642 | dev->qdisc = &noop_qdisc; | ||
643 | dev->qdisc_sleeping = &noop_qdisc; | ||
644 | qdisc_destroy(qdisc); | ||
645 | #if defined(CONFIG_NET_SCH_INGRESS) || defined(CONFIG_NET_SCH_INGRESS_MODULE) | ||
646 | if ((qdisc = dev->qdisc_ingress) != NULL) { | ||
647 | dev->qdisc_ingress = NULL; | ||
648 | qdisc_destroy(qdisc); | 666 | qdisc_destroy(qdisc); |
649 | } | 667 | } |
650 | #endif | 668 | } |
669 | |||
670 | void dev_shutdown(struct net_device *dev) | ||
671 | { | ||
672 | qdisc_lock_tree(dev); | ||
673 | dev_shutdown_scheduler_queue(dev, &dev->tx_queue, &noop_qdisc); | ||
674 | dev_shutdown_scheduler_queue(dev, &dev->rx_queue, NULL); | ||
651 | BUG_TRAP(!timer_pending(&dev->watchdog_timer)); | 675 | BUG_TRAP(!timer_pending(&dev->watchdog_timer)); |
652 | qdisc_unlock_tree(dev); | 676 | qdisc_unlock_tree(dev); |
653 | } | 677 | } |