aboutsummaryrefslogtreecommitdiffstats
path: root/net/sched/sch_generic.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/sched/sch_generic.c')
-rw-r--r--net/sched/sch_generic.c41
1 files changed, 25 insertions, 16 deletions
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index c3ed4d44fc14..7b5572d6beb5 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -44,23 +44,30 @@ static inline int qdisc_qlen(struct Qdisc *q)
44 44
45static inline int dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q) 45static inline int dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q)
46{ 46{
47 if (unlikely(skb->next)) 47 q->gso_skb = skb;
48 q->gso_skb = skb; 48 q->qstats.requeues++;
49 else
50 q->ops->requeue(skb, q);
51
52 __netif_schedule(q); 49 __netif_schedule(q);
50
53 return 0; 51 return 0;
54} 52}
55 53
56static inline struct sk_buff *dequeue_skb(struct Qdisc *q) 54static inline struct sk_buff *dequeue_skb(struct Qdisc *q)
57{ 55{
58 struct sk_buff *skb; 56 struct sk_buff *skb = q->gso_skb;
57
58 if (unlikely(skb)) {
59 struct net_device *dev = qdisc_dev(q);
60 struct netdev_queue *txq;
59 61
60 if ((skb = q->gso_skb)) 62 /* check the reason of requeuing without tx lock first */
61 q->gso_skb = NULL; 63 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
62 else 64 if (!netif_tx_queue_stopped(txq) && !netif_tx_queue_frozen(txq))
65 q->gso_skb = NULL;
66 else
67 skb = NULL;
68 } else {
63 skb = q->dequeue(q); 69 skb = q->dequeue(q);
70 }
64 71
65 return skb; 72 return skb;
66} 73}
@@ -215,10 +222,9 @@ static void dev_watchdog(unsigned long arg)
215 time_after(jiffies, (dev->trans_start + 222 time_after(jiffies, (dev->trans_start +
216 dev->watchdog_timeo))) { 223 dev->watchdog_timeo))) {
217 char drivername[64]; 224 char drivername[64];
218 printk(KERN_INFO "NETDEV WATCHDOG: %s (%s): transmit timed out\n", 225 WARN_ONCE(1, KERN_INFO "NETDEV WATCHDOG: %s (%s): transmit timed out\n",
219 dev->name, netdev_drivername(dev, drivername, 64)); 226 dev->name, netdev_drivername(dev, drivername, 64));
220 dev->tx_timeout(dev); 227 dev->tx_timeout(dev);
221 WARN_ON_ONCE(1);
222 } 228 }
223 if (!mod_timer(&dev->watchdog_timer, 229 if (!mod_timer(&dev->watchdog_timer,
224 round_jiffies(jiffies + 230 round_jiffies(jiffies +
@@ -328,6 +334,7 @@ struct Qdisc noop_qdisc = {
328 .flags = TCQ_F_BUILTIN, 334 .flags = TCQ_F_BUILTIN,
329 .ops = &noop_qdisc_ops, 335 .ops = &noop_qdisc_ops,
330 .list = LIST_HEAD_INIT(noop_qdisc.list), 336 .list = LIST_HEAD_INIT(noop_qdisc.list),
337 .requeue.lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.q.lock),
331 .q.lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.q.lock), 338 .q.lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.q.lock),
332 .dev_queue = &noop_netdev_queue, 339 .dev_queue = &noop_netdev_queue,
333}; 340};
@@ -353,6 +360,7 @@ static struct Qdisc noqueue_qdisc = {
353 .flags = TCQ_F_BUILTIN, 360 .flags = TCQ_F_BUILTIN,
354 .ops = &noqueue_qdisc_ops, 361 .ops = &noqueue_qdisc_ops,
355 .list = LIST_HEAD_INIT(noqueue_qdisc.list), 362 .list = LIST_HEAD_INIT(noqueue_qdisc.list),
363 .requeue.lock = __SPIN_LOCK_UNLOCKED(noqueue_qdisc.q.lock),
356 .q.lock = __SPIN_LOCK_UNLOCKED(noqueue_qdisc.q.lock), 364 .q.lock = __SPIN_LOCK_UNLOCKED(noqueue_qdisc.q.lock),
357 .dev_queue = &noqueue_netdev_queue, 365 .dev_queue = &noqueue_netdev_queue,
358}; 366};
@@ -473,6 +481,7 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
473 sch->padded = (char *) sch - (char *) p; 481 sch->padded = (char *) sch - (char *) p;
474 482
475 INIT_LIST_HEAD(&sch->list); 483 INIT_LIST_HEAD(&sch->list);
484 skb_queue_head_init(&sch->requeue);
476 skb_queue_head_init(&sch->q); 485 skb_queue_head_init(&sch->q);
477 sch->ops = ops; 486 sch->ops = ops;
478 sch->enqueue = ops->enqueue; 487 sch->enqueue = ops->enqueue;
@@ -526,10 +535,9 @@ void qdisc_destroy(struct Qdisc *qdisc)
526 !atomic_dec_and_test(&qdisc->refcnt)) 535 !atomic_dec_and_test(&qdisc->refcnt))
527 return; 536 return;
528 537
529 if (qdisc->parent)
530 list_del(&qdisc->list);
531
532#ifdef CONFIG_NET_SCHED 538#ifdef CONFIG_NET_SCHED
539 qdisc_list_del(qdisc);
540
533 qdisc_put_stab(qdisc->stab); 541 qdisc_put_stab(qdisc->stab);
534#endif 542#endif
535 gen_kill_estimator(&qdisc->bstats, &qdisc->rate_est); 543 gen_kill_estimator(&qdisc->bstats, &qdisc->rate_est);
@@ -542,6 +550,7 @@ void qdisc_destroy(struct Qdisc *qdisc)
542 dev_put(qdisc_dev(qdisc)); 550 dev_put(qdisc_dev(qdisc));
543 551
544 kfree_skb(qdisc->gso_skb); 552 kfree_skb(qdisc->gso_skb);
553 __skb_queue_purge(&qdisc->requeue);
545 554
546 kfree((char *) qdisc - qdisc->padded); 555 kfree((char *) qdisc - qdisc->padded);
547} 556}
@@ -635,7 +644,7 @@ static void dev_deactivate_queue(struct net_device *dev,
635 if (!(qdisc->flags & TCQ_F_BUILTIN)) 644 if (!(qdisc->flags & TCQ_F_BUILTIN))
636 set_bit(__QDISC_STATE_DEACTIVATED, &qdisc->state); 645 set_bit(__QDISC_STATE_DEACTIVATED, &qdisc->state);
637 646
638 dev_queue->qdisc = qdisc_default; 647 rcu_assign_pointer(dev_queue->qdisc, qdisc_default);
639 qdisc_reset(qdisc); 648 qdisc_reset(qdisc);
640 649
641 spin_unlock_bh(qdisc_lock(qdisc)); 650 spin_unlock_bh(qdisc_lock(qdisc));
@@ -710,7 +719,7 @@ static void shutdown_scheduler_queue(struct net_device *dev,
710 struct Qdisc *qdisc_default = _qdisc_default; 719 struct Qdisc *qdisc_default = _qdisc_default;
711 720
712 if (qdisc) { 721 if (qdisc) {
713 dev_queue->qdisc = qdisc_default; 722 rcu_assign_pointer(dev_queue->qdisc, qdisc_default);
714 dev_queue->qdisc_sleeping = qdisc_default; 723 dev_queue->qdisc_sleeping = qdisc_default;
715 724
716 qdisc_destroy(qdisc); 725 qdisc_destroy(qdisc);