diff options
Diffstat (limited to 'net/sched/sch_generic.c')
-rw-r--r-- | net/sched/sch_generic.c | 41 |
1 files changed, 28 insertions, 13 deletions
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index 5dbb3cd96e59..34dc598440a2 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c | |||
@@ -60,8 +60,7 @@ static inline struct sk_buff *dequeue_skb(struct Qdisc *q) | |||
60 | 60 | ||
61 | /* check the reason of requeuing without tx lock first */ | 61 | /* check the reason of requeuing without tx lock first */ |
62 | txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); | 62 | txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); |
63 | if (!netif_tx_queue_stopped(txq) && | 63 | if (!netif_tx_queue_frozen_or_stopped(txq)) { |
64 | !netif_tx_queue_frozen(txq)) { | ||
65 | q->gso_skb = NULL; | 64 | q->gso_skb = NULL; |
66 | q->q.qlen--; | 65 | q->q.qlen--; |
67 | } else | 66 | } else |
@@ -122,7 +121,7 @@ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q, | |||
122 | spin_unlock(root_lock); | 121 | spin_unlock(root_lock); |
123 | 122 | ||
124 | HARD_TX_LOCK(dev, txq, smp_processor_id()); | 123 | HARD_TX_LOCK(dev, txq, smp_processor_id()); |
125 | if (!netif_tx_queue_stopped(txq) && !netif_tx_queue_frozen(txq)) | 124 | if (!netif_tx_queue_frozen_or_stopped(txq)) |
126 | ret = dev_hard_start_xmit(skb, dev, txq); | 125 | ret = dev_hard_start_xmit(skb, dev, txq); |
127 | 126 | ||
128 | HARD_TX_UNLOCK(dev, txq); | 127 | HARD_TX_UNLOCK(dev, txq); |
@@ -144,8 +143,7 @@ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q, | |||
144 | ret = dev_requeue_skb(skb, q); | 143 | ret = dev_requeue_skb(skb, q); |
145 | } | 144 | } |
146 | 145 | ||
147 | if (ret && (netif_tx_queue_stopped(txq) || | 146 | if (ret && netif_tx_queue_frozen_or_stopped(txq)) |
148 | netif_tx_queue_frozen(txq))) | ||
149 | ret = 0; | 147 | ret = 0; |
150 | 148 | ||
151 | return ret; | 149 | return ret; |
@@ -555,7 +553,9 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue, | |||
555 | size = QDISC_ALIGN(sizeof(*sch)); | 553 | size = QDISC_ALIGN(sizeof(*sch)); |
556 | size += ops->priv_size + (QDISC_ALIGNTO - 1); | 554 | size += ops->priv_size + (QDISC_ALIGNTO - 1); |
557 | 555 | ||
558 | p = kzalloc(size, GFP_KERNEL); | 556 | p = kzalloc_node(size, GFP_KERNEL, |
557 | netdev_queue_numa_node_read(dev_queue)); | ||
558 | |||
559 | if (!p) | 559 | if (!p) |
560 | goto errout; | 560 | goto errout; |
561 | sch = (struct Qdisc *) QDISC_ALIGN((unsigned long) p); | 561 | sch = (struct Qdisc *) QDISC_ALIGN((unsigned long) p); |
@@ -810,20 +810,35 @@ static bool some_qdisc_is_busy(struct net_device *dev) | |||
810 | return false; | 810 | return false; |
811 | } | 811 | } |
812 | 812 | ||
813 | void dev_deactivate(struct net_device *dev) | 813 | void dev_deactivate_many(struct list_head *head) |
814 | { | 814 | { |
815 | netdev_for_each_tx_queue(dev, dev_deactivate_queue, &noop_qdisc); | 815 | struct net_device *dev; |
816 | if (dev_ingress_queue(dev)) | 816 | |
817 | dev_deactivate_queue(dev, dev_ingress_queue(dev), &noop_qdisc); | 817 | list_for_each_entry(dev, head, unreg_list) { |
818 | netdev_for_each_tx_queue(dev, dev_deactivate_queue, | ||
819 | &noop_qdisc); | ||
820 | if (dev_ingress_queue(dev)) | ||
821 | dev_deactivate_queue(dev, dev_ingress_queue(dev), | ||
822 | &noop_qdisc); | ||
818 | 823 | ||
819 | dev_watchdog_down(dev); | 824 | dev_watchdog_down(dev); |
825 | } | ||
820 | 826 | ||
821 | /* Wait for outstanding qdisc-less dev_queue_xmit calls. */ | 827 | /* Wait for outstanding qdisc-less dev_queue_xmit calls. */ |
822 | synchronize_rcu(); | 828 | synchronize_rcu(); |
823 | 829 | ||
824 | /* Wait for outstanding qdisc_run calls. */ | 830 | /* Wait for outstanding qdisc_run calls. */ |
825 | while (some_qdisc_is_busy(dev)) | 831 | list_for_each_entry(dev, head, unreg_list) |
826 | yield(); | 832 | while (some_qdisc_is_busy(dev)) |
833 | yield(); | ||
834 | } | ||
835 | |||
836 | void dev_deactivate(struct net_device *dev) | ||
837 | { | ||
838 | LIST_HEAD(single); | ||
839 | |||
840 | list_add(&dev->unreg_list, &single); | ||
841 | dev_deactivate_many(&single); | ||
827 | } | 842 | } |
828 | 843 | ||
829 | static void dev_init_scheduler_queue(struct net_device *dev, | 844 | static void dev_init_scheduler_queue(struct net_device *dev, |