aboutsummaryrefslogtreecommitdiffstats
path: root/net/sched/sch_generic.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/sched/sch_generic.c')
-rw-r--r--net/sched/sch_generic.c45
1 files changed, 39 insertions, 6 deletions
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 5173c1e1b19c..2aeb3a4386a1 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -24,7 +24,9 @@
24#include <linux/init.h> 24#include <linux/init.h>
25#include <linux/rcupdate.h> 25#include <linux/rcupdate.h>
26#include <linux/list.h> 26#include <linux/list.h>
27#include <linux/slab.h>
27#include <net/pkt_sched.h> 28#include <net/pkt_sched.h>
29#include <net/dst.h>
28 30
29/* Main transmission queue. */ 31/* Main transmission queue. */
30 32
@@ -39,6 +41,7 @@
39 41
40static inline int dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q) 42static inline int dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q)
41{ 43{
44 skb_dst_force(skb);
42 q->gso_skb = skb; 45 q->gso_skb = skb;
43 q->qstats.requeues++; 46 q->qstats.requeues++;
44 q->q.qlen++; /* it's still part of the queue */ 47 q->q.qlen++; /* it's still part of the queue */
@@ -93,7 +96,7 @@ static inline int handle_dev_cpu_collision(struct sk_buff *skb,
93 * Another cpu is holding lock, requeue & delay xmits for 96 * Another cpu is holding lock, requeue & delay xmits for
94 * some time. 97 * some time.
95 */ 98 */
96 __get_cpu_var(netdev_rx_stat).cpu_collision++; 99 __this_cpu_inc(softnet_data.cpu_collision);
97 ret = dev_requeue_skb(skb, q); 100 ret = dev_requeue_skb(skb, q);
98 } 101 }
99 102
@@ -178,7 +181,7 @@ static inline int qdisc_restart(struct Qdisc *q)
178 skb = dequeue_skb(q); 181 skb = dequeue_skb(q);
179 if (unlikely(!skb)) 182 if (unlikely(!skb))
180 return 0; 183 return 0;
181 184 WARN_ON_ONCE(skb_dst_is_noref(skb));
182 root_lock = qdisc_lock(q); 185 root_lock = qdisc_lock(q);
183 dev = qdisc_dev(q); 186 dev = qdisc_dev(q);
184 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); 187 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
@@ -202,7 +205,7 @@ void __qdisc_run(struct Qdisc *q)
202 } 205 }
203 } 206 }
204 207
205 clear_bit(__QDISC_STATE_RUNNING, &q->state); 208 qdisc_run_end(q);
206} 209}
207 210
208unsigned long dev_trans_start(struct net_device *dev) 211unsigned long dev_trans_start(struct net_device *dev)
@@ -324,6 +327,24 @@ void netif_carrier_off(struct net_device *dev)
324} 327}
325EXPORT_SYMBOL(netif_carrier_off); 328EXPORT_SYMBOL(netif_carrier_off);
326 329
330/**
331 * netif_notify_peers - notify network peers about existence of @dev
332 * @dev: network device
333 *
334 * Generate traffic such that interested network peers are aware of
335 * @dev, such as by generating a gratuitous ARP. This may be used when
336 * a device wants to inform the rest of the network about some sort of
337 * reconfiguration such as a failover event or virtual machine
338 * migration.
339 */
340void netif_notify_peers(struct net_device *dev)
341{
342 rtnl_lock();
343 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev);
344 rtnl_unlock();
345}
346EXPORT_SYMBOL(netif_notify_peers);
347
327/* "NOOP" scheduler: the best scheduler, recommended for all interfaces 348/* "NOOP" scheduler: the best scheduler, recommended for all interfaces
328 under all circumstances. It is difficult to invent anything faster or 349 under all circumstances. It is difficult to invent anything faster or
329 cheaper. 350 cheaper.
@@ -528,7 +549,7 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
528 unsigned int size; 549 unsigned int size;
529 int err = -ENOBUFS; 550 int err = -ENOBUFS;
530 551
531 /* ensure that the Qdisc and the private data are 32-byte aligned */ 552 /* ensure that the Qdisc and the private data are 64-byte aligned */
532 size = QDISC_ALIGN(sizeof(*sch)); 553 size = QDISC_ALIGN(sizeof(*sch));
533 size += ops->priv_size + (QDISC_ALIGNTO - 1); 554 size += ops->priv_size + (QDISC_ALIGNTO - 1);
534 555
@@ -540,6 +561,7 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
540 561
541 INIT_LIST_HEAD(&sch->list); 562 INIT_LIST_HEAD(&sch->list);
542 skb_queue_head_init(&sch->q); 563 skb_queue_head_init(&sch->q);
564 spin_lock_init(&sch->busylock);
543 sch->ops = ops; 565 sch->ops = ops;
544 sch->enqueue = ops->enqueue; 566 sch->enqueue = ops->enqueue;
545 sch->dequeue = ops->dequeue; 567 sch->dequeue = ops->dequeue;
@@ -590,6 +612,13 @@ void qdisc_reset(struct Qdisc *qdisc)
590} 612}
591EXPORT_SYMBOL(qdisc_reset); 613EXPORT_SYMBOL(qdisc_reset);
592 614
615static void qdisc_rcu_free(struct rcu_head *head)
616{
617 struct Qdisc *qdisc = container_of(head, struct Qdisc, rcu_head);
618
619 kfree((char *) qdisc - qdisc->padded);
620}
621
593void qdisc_destroy(struct Qdisc *qdisc) 622void qdisc_destroy(struct Qdisc *qdisc)
594{ 623{
595 const struct Qdisc_ops *ops = qdisc->ops; 624 const struct Qdisc_ops *ops = qdisc->ops;
@@ -613,7 +642,11 @@ void qdisc_destroy(struct Qdisc *qdisc)
613 dev_put(qdisc_dev(qdisc)); 642 dev_put(qdisc_dev(qdisc));
614 643
615 kfree_skb(qdisc->gso_skb); 644 kfree_skb(qdisc->gso_skb);
616 kfree((char *) qdisc - qdisc->padded); 645 /*
646 * gen_estimator est_timer() might access qdisc->q.lock,
647 * wait a RCU grace period before freeing qdisc.
648 */
649 call_rcu(&qdisc->rcu_head, qdisc_rcu_free);
617} 650}
618EXPORT_SYMBOL(qdisc_destroy); 651EXPORT_SYMBOL(qdisc_destroy);
619 652
@@ -765,7 +798,7 @@ static bool some_qdisc_is_busy(struct net_device *dev)
765 798
766 spin_lock_bh(root_lock); 799 spin_lock_bh(root_lock);
767 800
768 val = (test_bit(__QDISC_STATE_RUNNING, &q->state) || 801 val = (qdisc_is_running(q) ||
769 test_bit(__QDISC_STATE_SCHED, &q->state)); 802 test_bit(__QDISC_STATE_SCHED, &q->state));
770 803
771 spin_unlock_bh(root_lock); 804 spin_unlock_bh(root_lock);