diff options
Diffstat (limited to 'net/sched/sch_generic.c')
-rw-r--r-- | net/sched/sch_generic.c | 138 |
1 files changed, 88 insertions, 50 deletions
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index 2aeb3a4386a1..b4c680900d7a 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c | |||
@@ -60,8 +60,7 @@ static inline struct sk_buff *dequeue_skb(struct Qdisc *q) | |||
60 | 60 | ||
61 | /* check the reason of requeuing without tx lock first */ | 61 | /* check the reason of requeuing without tx lock first */ |
62 | txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); | 62 | txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); |
63 | if (!netif_tx_queue_stopped(txq) && | 63 | if (!netif_tx_queue_frozen_or_stopped(txq)) { |
64 | !netif_tx_queue_frozen(txq)) { | ||
65 | q->gso_skb = NULL; | 64 | q->gso_skb = NULL; |
66 | q->q.qlen--; | 65 | q->q.qlen--; |
67 | } else | 66 | } else |
@@ -88,8 +87,8 @@ static inline int handle_dev_cpu_collision(struct sk_buff *skb, | |||
88 | */ | 87 | */ |
89 | kfree_skb(skb); | 88 | kfree_skb(skb); |
90 | if (net_ratelimit()) | 89 | if (net_ratelimit()) |
91 | printk(KERN_WARNING "Dead loop on netdevice %s, " | 90 | pr_warning("Dead loop on netdevice %s, fix it urgently!\n", |
92 | "fix it urgently!\n", dev_queue->dev->name); | 91 | dev_queue->dev->name); |
93 | ret = qdisc_qlen(q); | 92 | ret = qdisc_qlen(q); |
94 | } else { | 93 | } else { |
95 | /* | 94 | /* |
@@ -122,7 +121,7 @@ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q, | |||
122 | spin_unlock(root_lock); | 121 | spin_unlock(root_lock); |
123 | 122 | ||
124 | HARD_TX_LOCK(dev, txq, smp_processor_id()); | 123 | HARD_TX_LOCK(dev, txq, smp_processor_id()); |
125 | if (!netif_tx_queue_stopped(txq) && !netif_tx_queue_frozen(txq)) | 124 | if (!netif_tx_queue_frozen_or_stopped(txq)) |
126 | ret = dev_hard_start_xmit(skb, dev, txq); | 125 | ret = dev_hard_start_xmit(skb, dev, txq); |
127 | 126 | ||
128 | HARD_TX_UNLOCK(dev, txq); | 127 | HARD_TX_UNLOCK(dev, txq); |
@@ -138,14 +137,13 @@ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q, | |||
138 | } else { | 137 | } else { |
139 | /* Driver returned NETDEV_TX_BUSY - requeue skb */ | 138 | /* Driver returned NETDEV_TX_BUSY - requeue skb */ |
140 | if (unlikely (ret != NETDEV_TX_BUSY && net_ratelimit())) | 139 | if (unlikely (ret != NETDEV_TX_BUSY && net_ratelimit())) |
141 | printk(KERN_WARNING "BUG %s code %d qlen %d\n", | 140 | pr_warning("BUG %s code %d qlen %d\n", |
142 | dev->name, ret, q->q.qlen); | 141 | dev->name, ret, q->q.qlen); |
143 | 142 | ||
144 | ret = dev_requeue_skb(skb, q); | 143 | ret = dev_requeue_skb(skb, q); |
145 | } | 144 | } |
146 | 145 | ||
147 | if (ret && (netif_tx_queue_stopped(txq) || | 146 | if (ret && netif_tx_queue_frozen_or_stopped(txq)) |
148 | netif_tx_queue_frozen(txq))) | ||
149 | ret = 0; | 147 | ret = 0; |
150 | 148 | ||
151 | return ret; | 149 | return ret; |
@@ -253,9 +251,8 @@ static void dev_watchdog(unsigned long arg) | |||
253 | } | 251 | } |
254 | 252 | ||
255 | if (some_queue_timedout) { | 253 | if (some_queue_timedout) { |
256 | char drivername[64]; | ||
257 | WARN_ONCE(1, KERN_INFO "NETDEV WATCHDOG: %s (%s): transmit queue %u timed out\n", | 254 | WARN_ONCE(1, KERN_INFO "NETDEV WATCHDOG: %s (%s): transmit queue %u timed out\n", |
258 | dev->name, netdev_drivername(dev, drivername, 64), i); | 255 | dev->name, netdev_drivername(dev), i); |
259 | dev->netdev_ops->ndo_tx_timeout(dev); | 256 | dev->netdev_ops->ndo_tx_timeout(dev); |
260 | } | 257 | } |
261 | if (!mod_timer(&dev->watchdog_timer, | 258 | if (!mod_timer(&dev->watchdog_timer, |
@@ -383,6 +380,7 @@ struct Qdisc noop_qdisc = { | |||
383 | .list = LIST_HEAD_INIT(noop_qdisc.list), | 380 | .list = LIST_HEAD_INIT(noop_qdisc.list), |
384 | .q.lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.q.lock), | 381 | .q.lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.q.lock), |
385 | .dev_queue = &noop_netdev_queue, | 382 | .dev_queue = &noop_netdev_queue, |
383 | .busylock = __SPIN_LOCK_UNLOCKED(noop_qdisc.busylock), | ||
386 | }; | 384 | }; |
387 | EXPORT_SYMBOL(noop_qdisc); | 385 | EXPORT_SYMBOL(noop_qdisc); |
388 | 386 | ||
@@ -409,11 +407,13 @@ static struct Qdisc noqueue_qdisc = { | |||
409 | .list = LIST_HEAD_INIT(noqueue_qdisc.list), | 407 | .list = LIST_HEAD_INIT(noqueue_qdisc.list), |
410 | .q.lock = __SPIN_LOCK_UNLOCKED(noqueue_qdisc.q.lock), | 408 | .q.lock = __SPIN_LOCK_UNLOCKED(noqueue_qdisc.q.lock), |
411 | .dev_queue = &noqueue_netdev_queue, | 409 | .dev_queue = &noqueue_netdev_queue, |
410 | .busylock = __SPIN_LOCK_UNLOCKED(noqueue_qdisc.busylock), | ||
412 | }; | 411 | }; |
413 | 412 | ||
414 | 413 | ||
415 | static const u8 prio2band[TC_PRIO_MAX+1] = | 414 | static const u8 prio2band[TC_PRIO_MAX + 1] = { |
416 | { 1, 2, 2, 2, 1, 2, 0, 0 , 1, 1, 1, 1, 1, 1, 1, 1 }; | 415 | 1, 2, 2, 2, 1, 2, 0, 0 , 1, 1, 1, 1, 1, 1, 1, 1 |
416 | }; | ||
417 | 417 | ||
418 | /* 3-band FIFO queue: old style, but should be a bit faster than | 418 | /* 3-band FIFO queue: old style, but should be a bit faster than |
419 | generic prio+fifo combination. | 419 | generic prio+fifo combination. |
@@ -445,7 +445,7 @@ static inline struct sk_buff_head *band2list(struct pfifo_fast_priv *priv, | |||
445 | return priv->q + band; | 445 | return priv->q + band; |
446 | } | 446 | } |
447 | 447 | ||
448 | static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc* qdisc) | 448 | static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc *qdisc) |
449 | { | 449 | { |
450 | if (skb_queue_len(&qdisc->q) < qdisc_dev(qdisc)->tx_queue_len) { | 450 | if (skb_queue_len(&qdisc->q) < qdisc_dev(qdisc)->tx_queue_len) { |
451 | int band = prio2band[skb->priority & TC_PRIO_MAX]; | 451 | int band = prio2band[skb->priority & TC_PRIO_MAX]; |
@@ -460,7 +460,7 @@ static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc* qdisc) | |||
460 | return qdisc_drop(skb, qdisc); | 460 | return qdisc_drop(skb, qdisc); |
461 | } | 461 | } |
462 | 462 | ||
463 | static struct sk_buff *pfifo_fast_dequeue(struct Qdisc* qdisc) | 463 | static struct sk_buff *pfifo_fast_dequeue(struct Qdisc *qdisc) |
464 | { | 464 | { |
465 | struct pfifo_fast_priv *priv = qdisc_priv(qdisc); | 465 | struct pfifo_fast_priv *priv = qdisc_priv(qdisc); |
466 | int band = bitmap2band[priv->bitmap]; | 466 | int band = bitmap2band[priv->bitmap]; |
@@ -479,7 +479,7 @@ static struct sk_buff *pfifo_fast_dequeue(struct Qdisc* qdisc) | |||
479 | return NULL; | 479 | return NULL; |
480 | } | 480 | } |
481 | 481 | ||
482 | static struct sk_buff *pfifo_fast_peek(struct Qdisc* qdisc) | 482 | static struct sk_buff *pfifo_fast_peek(struct Qdisc *qdisc) |
483 | { | 483 | { |
484 | struct pfifo_fast_priv *priv = qdisc_priv(qdisc); | 484 | struct pfifo_fast_priv *priv = qdisc_priv(qdisc); |
485 | int band = bitmap2band[priv->bitmap]; | 485 | int band = bitmap2band[priv->bitmap]; |
@@ -493,7 +493,7 @@ static struct sk_buff *pfifo_fast_peek(struct Qdisc* qdisc) | |||
493 | return NULL; | 493 | return NULL; |
494 | } | 494 | } |
495 | 495 | ||
496 | static void pfifo_fast_reset(struct Qdisc* qdisc) | 496 | static void pfifo_fast_reset(struct Qdisc *qdisc) |
497 | { | 497 | { |
498 | int prio; | 498 | int prio; |
499 | struct pfifo_fast_priv *priv = qdisc_priv(qdisc); | 499 | struct pfifo_fast_priv *priv = qdisc_priv(qdisc); |
@@ -510,7 +510,7 @@ static int pfifo_fast_dump(struct Qdisc *qdisc, struct sk_buff *skb) | |||
510 | { | 510 | { |
511 | struct tc_prio_qopt opt = { .bands = PFIFO_FAST_BANDS }; | 511 | struct tc_prio_qopt opt = { .bands = PFIFO_FAST_BANDS }; |
512 | 512 | ||
513 | memcpy(&opt.priomap, prio2band, TC_PRIO_MAX+1); | 513 | memcpy(&opt.priomap, prio2band, TC_PRIO_MAX + 1); |
514 | NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt); | 514 | NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt); |
515 | return skb->len; | 515 | return skb->len; |
516 | 516 | ||
@@ -526,6 +526,8 @@ static int pfifo_fast_init(struct Qdisc *qdisc, struct nlattr *opt) | |||
526 | for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) | 526 | for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) |
527 | skb_queue_head_init(band2list(priv, prio)); | 527 | skb_queue_head_init(band2list(priv, prio)); |
528 | 528 | ||
529 | /* Can by-pass the queue discipline */ | ||
530 | qdisc->flags |= TCQ_F_CAN_BYPASS; | ||
529 | return 0; | 531 | return 0; |
530 | } | 532 | } |
531 | 533 | ||
@@ -540,25 +542,32 @@ struct Qdisc_ops pfifo_fast_ops __read_mostly = { | |||
540 | .dump = pfifo_fast_dump, | 542 | .dump = pfifo_fast_dump, |
541 | .owner = THIS_MODULE, | 543 | .owner = THIS_MODULE, |
542 | }; | 544 | }; |
545 | EXPORT_SYMBOL(pfifo_fast_ops); | ||
543 | 546 | ||
544 | struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue, | 547 | struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue, |
545 | struct Qdisc_ops *ops) | 548 | struct Qdisc_ops *ops) |
546 | { | 549 | { |
547 | void *p; | 550 | void *p; |
548 | struct Qdisc *sch; | 551 | struct Qdisc *sch; |
549 | unsigned int size; | 552 | unsigned int size = QDISC_ALIGN(sizeof(*sch)) + ops->priv_size; |
550 | int err = -ENOBUFS; | 553 | int err = -ENOBUFS; |
551 | 554 | ||
552 | /* ensure that the Qdisc and the private data are 64-byte aligned */ | 555 | p = kzalloc_node(size, GFP_KERNEL, |
553 | size = QDISC_ALIGN(sizeof(*sch)); | 556 | netdev_queue_numa_node_read(dev_queue)); |
554 | size += ops->priv_size + (QDISC_ALIGNTO - 1); | ||
555 | 557 | ||
556 | p = kzalloc(size, GFP_KERNEL); | ||
557 | if (!p) | 558 | if (!p) |
558 | goto errout; | 559 | goto errout; |
559 | sch = (struct Qdisc *) QDISC_ALIGN((unsigned long) p); | 560 | sch = (struct Qdisc *) QDISC_ALIGN((unsigned long) p); |
560 | sch->padded = (char *) sch - (char *) p; | 561 | /* if we got non aligned memory, ask more and do alignment ourself */ |
561 | 562 | if (sch != p) { | |
563 | kfree(p); | ||
564 | p = kzalloc_node(size + QDISC_ALIGNTO - 1, GFP_KERNEL, | ||
565 | netdev_queue_numa_node_read(dev_queue)); | ||
566 | if (!p) | ||
567 | goto errout; | ||
568 | sch = (struct Qdisc *) QDISC_ALIGN((unsigned long) p); | ||
569 | sch->padded = (char *) sch - (char *) p; | ||
570 | } | ||
562 | INIT_LIST_HEAD(&sch->list); | 571 | INIT_LIST_HEAD(&sch->list); |
563 | skb_queue_head_init(&sch->q); | 572 | skb_queue_head_init(&sch->q); |
564 | spin_lock_init(&sch->busylock); | 573 | spin_lock_init(&sch->busylock); |
@@ -574,10 +583,8 @@ errout: | |||
574 | return ERR_PTR(err); | 583 | return ERR_PTR(err); |
575 | } | 584 | } |
576 | 585 | ||
577 | struct Qdisc * qdisc_create_dflt(struct net_device *dev, | 586 | struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue, |
578 | struct netdev_queue *dev_queue, | 587 | struct Qdisc_ops *ops, unsigned int parentid) |
579 | struct Qdisc_ops *ops, | ||
580 | unsigned int parentid) | ||
581 | { | 588 | { |
582 | struct Qdisc *sch; | 589 | struct Qdisc *sch; |
583 | 590 | ||
@@ -630,7 +637,7 @@ void qdisc_destroy(struct Qdisc *qdisc) | |||
630 | #ifdef CONFIG_NET_SCHED | 637 | #ifdef CONFIG_NET_SCHED |
631 | qdisc_list_del(qdisc); | 638 | qdisc_list_del(qdisc); |
632 | 639 | ||
633 | qdisc_put_stab(qdisc->stab); | 640 | qdisc_put_stab(rtnl_dereference(qdisc->stab)); |
634 | #endif | 641 | #endif |
635 | gen_kill_estimator(&qdisc->bstats, &qdisc->rate_est); | 642 | gen_kill_estimator(&qdisc->bstats, &qdisc->rate_est); |
636 | if (ops->reset) | 643 | if (ops->reset) |
@@ -674,25 +681,21 @@ struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue, | |||
674 | 681 | ||
675 | return oqdisc; | 682 | return oqdisc; |
676 | } | 683 | } |
684 | EXPORT_SYMBOL(dev_graft_qdisc); | ||
677 | 685 | ||
678 | static void attach_one_default_qdisc(struct net_device *dev, | 686 | static void attach_one_default_qdisc(struct net_device *dev, |
679 | struct netdev_queue *dev_queue, | 687 | struct netdev_queue *dev_queue, |
680 | void *_unused) | 688 | void *_unused) |
681 | { | 689 | { |
682 | struct Qdisc *qdisc; | 690 | struct Qdisc *qdisc = &noqueue_qdisc; |
683 | 691 | ||
684 | if (dev->tx_queue_len) { | 692 | if (dev->tx_queue_len) { |
685 | qdisc = qdisc_create_dflt(dev, dev_queue, | 693 | qdisc = qdisc_create_dflt(dev_queue, |
686 | &pfifo_fast_ops, TC_H_ROOT); | 694 | &pfifo_fast_ops, TC_H_ROOT); |
687 | if (!qdisc) { | 695 | if (!qdisc) { |
688 | printk(KERN_INFO "%s: activation failed\n", dev->name); | 696 | netdev_info(dev, "activation failed\n"); |
689 | return; | 697 | return; |
690 | } | 698 | } |
691 | |||
692 | /* Can by-pass the queue discipline for default qdisc */ | ||
693 | qdisc->flags |= TCQ_F_CAN_BYPASS; | ||
694 | } else { | ||
695 | qdisc = &noqueue_qdisc; | ||
696 | } | 699 | } |
697 | dev_queue->qdisc_sleeping = qdisc; | 700 | dev_queue->qdisc_sleeping = qdisc; |
698 | } | 701 | } |
@@ -709,7 +712,7 @@ static void attach_default_qdiscs(struct net_device *dev) | |||
709 | dev->qdisc = txq->qdisc_sleeping; | 712 | dev->qdisc = txq->qdisc_sleeping; |
710 | atomic_inc(&dev->qdisc->refcnt); | 713 | atomic_inc(&dev->qdisc->refcnt); |
711 | } else { | 714 | } else { |
712 | qdisc = qdisc_create_dflt(dev, txq, &mq_qdisc_ops, TC_H_ROOT); | 715 | qdisc = qdisc_create_dflt(txq, &mq_qdisc_ops, TC_H_ROOT); |
713 | if (qdisc) { | 716 | if (qdisc) { |
714 | qdisc->ops->attach(qdisc); | 717 | qdisc->ops->attach(qdisc); |
715 | dev->qdisc = qdisc; | 718 | dev->qdisc = qdisc; |
@@ -753,13 +756,15 @@ void dev_activate(struct net_device *dev) | |||
753 | 756 | ||
754 | need_watchdog = 0; | 757 | need_watchdog = 0; |
755 | netdev_for_each_tx_queue(dev, transition_one_qdisc, &need_watchdog); | 758 | netdev_for_each_tx_queue(dev, transition_one_qdisc, &need_watchdog); |
756 | transition_one_qdisc(dev, &dev->rx_queue, NULL); | 759 | if (dev_ingress_queue(dev)) |
760 | transition_one_qdisc(dev, dev_ingress_queue(dev), NULL); | ||
757 | 761 | ||
758 | if (need_watchdog) { | 762 | if (need_watchdog) { |
759 | dev->trans_start = jiffies; | 763 | dev->trans_start = jiffies; |
760 | dev_watchdog_up(dev); | 764 | dev_watchdog_up(dev); |
761 | } | 765 | } |
762 | } | 766 | } |
767 | EXPORT_SYMBOL(dev_activate); | ||
763 | 768 | ||
764 | static void dev_deactivate_queue(struct net_device *dev, | 769 | static void dev_deactivate_queue(struct net_device *dev, |
765 | struct netdev_queue *dev_queue, | 770 | struct netdev_queue *dev_queue, |
@@ -809,20 +814,51 @@ static bool some_qdisc_is_busy(struct net_device *dev) | |||
809 | return false; | 814 | return false; |
810 | } | 815 | } |
811 | 816 | ||
812 | void dev_deactivate(struct net_device *dev) | 817 | /** |
818 | * dev_deactivate_many - deactivate transmissions on several devices | ||
819 | * @head: list of devices to deactivate | ||
820 | * | ||
821 | * This function returns only when all outstanding transmissions | ||
822 | * have completed, unless all devices are in dismantle phase. | ||
823 | */ | ||
824 | void dev_deactivate_many(struct list_head *head) | ||
813 | { | 825 | { |
814 | netdev_for_each_tx_queue(dev, dev_deactivate_queue, &noop_qdisc); | 826 | struct net_device *dev; |
815 | dev_deactivate_queue(dev, &dev->rx_queue, &noop_qdisc); | 827 | bool sync_needed = false; |
816 | 828 | ||
817 | dev_watchdog_down(dev); | 829 | list_for_each_entry(dev, head, unreg_list) { |
830 | netdev_for_each_tx_queue(dev, dev_deactivate_queue, | ||
831 | &noop_qdisc); | ||
832 | if (dev_ingress_queue(dev)) | ||
833 | dev_deactivate_queue(dev, dev_ingress_queue(dev), | ||
834 | &noop_qdisc); | ||
818 | 835 | ||
819 | /* Wait for outstanding qdisc-less dev_queue_xmit calls. */ | 836 | dev_watchdog_down(dev); |
820 | synchronize_rcu(); | 837 | sync_needed |= !dev->dismantle; |
838 | } | ||
839 | |||
840 | /* Wait for outstanding qdisc-less dev_queue_xmit calls. | ||
841 | * This is avoided if all devices are in dismantle phase : | ||
842 | * Caller will call synchronize_net() for us | ||
843 | */ | ||
844 | if (sync_needed) | ||
845 | synchronize_net(); | ||
821 | 846 | ||
822 | /* Wait for outstanding qdisc_run calls. */ | 847 | /* Wait for outstanding qdisc_run calls. */ |
823 | while (some_qdisc_is_busy(dev)) | 848 | list_for_each_entry(dev, head, unreg_list) |
824 | yield(); | 849 | while (some_qdisc_is_busy(dev)) |
850 | yield(); | ||
851 | } | ||
852 | |||
853 | void dev_deactivate(struct net_device *dev) | ||
854 | { | ||
855 | LIST_HEAD(single); | ||
856 | |||
857 | list_add(&dev->unreg_list, &single); | ||
858 | dev_deactivate_many(&single); | ||
859 | list_del(&single); | ||
825 | } | 860 | } |
861 | EXPORT_SYMBOL(dev_deactivate); | ||
826 | 862 | ||
827 | static void dev_init_scheduler_queue(struct net_device *dev, | 863 | static void dev_init_scheduler_queue(struct net_device *dev, |
828 | struct netdev_queue *dev_queue, | 864 | struct netdev_queue *dev_queue, |
@@ -838,7 +874,8 @@ void dev_init_scheduler(struct net_device *dev) | |||
838 | { | 874 | { |
839 | dev->qdisc = &noop_qdisc; | 875 | dev->qdisc = &noop_qdisc; |
840 | netdev_for_each_tx_queue(dev, dev_init_scheduler_queue, &noop_qdisc); | 876 | netdev_for_each_tx_queue(dev, dev_init_scheduler_queue, &noop_qdisc); |
841 | dev_init_scheduler_queue(dev, &dev->rx_queue, &noop_qdisc); | 877 | if (dev_ingress_queue(dev)) |
878 | dev_init_scheduler_queue(dev, dev_ingress_queue(dev), &noop_qdisc); | ||
842 | 879 | ||
843 | setup_timer(&dev->watchdog_timer, dev_watchdog, (unsigned long)dev); | 880 | setup_timer(&dev->watchdog_timer, dev_watchdog, (unsigned long)dev); |
844 | } | 881 | } |
@@ -861,7 +898,8 @@ static void shutdown_scheduler_queue(struct net_device *dev, | |||
861 | void dev_shutdown(struct net_device *dev) | 898 | void dev_shutdown(struct net_device *dev) |
862 | { | 899 | { |
863 | netdev_for_each_tx_queue(dev, shutdown_scheduler_queue, &noop_qdisc); | 900 | netdev_for_each_tx_queue(dev, shutdown_scheduler_queue, &noop_qdisc); |
864 | shutdown_scheduler_queue(dev, &dev->rx_queue, &noop_qdisc); | 901 | if (dev_ingress_queue(dev)) |
902 | shutdown_scheduler_queue(dev, dev_ingress_queue(dev), &noop_qdisc); | ||
865 | qdisc_destroy(dev->qdisc); | 903 | qdisc_destroy(dev->qdisc); |
866 | dev->qdisc = &noop_qdisc; | 904 | dev->qdisc = &noop_qdisc; |
867 | 905 | ||