aboutsummaryrefslogtreecommitdiffstats
path: root/net/sched/sch_generic.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/sched/sch_generic.c')
-rw-r--r--net/sched/sch_generic.c59
1 files changed, 33 insertions, 26 deletions
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 34dc598440a2..c84b65920d1b 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -87,8 +87,8 @@ static inline int handle_dev_cpu_collision(struct sk_buff *skb,
87 */ 87 */
88 kfree_skb(skb); 88 kfree_skb(skb);
89 if (net_ratelimit()) 89 if (net_ratelimit())
90 printk(KERN_WARNING "Dead loop on netdevice %s, " 90 pr_warning("Dead loop on netdevice %s, fix it urgently!\n",
91 "fix it urgently!\n", dev_queue->dev->name); 91 dev_queue->dev->name);
92 ret = qdisc_qlen(q); 92 ret = qdisc_qlen(q);
93 } else { 93 } else {
94 /* 94 /*
@@ -137,8 +137,8 @@ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
137 } else { 137 } else {
138 /* Driver returned NETDEV_TX_BUSY - requeue skb */ 138 /* Driver returned NETDEV_TX_BUSY - requeue skb */
139 if (unlikely (ret != NETDEV_TX_BUSY && net_ratelimit())) 139 if (unlikely (ret != NETDEV_TX_BUSY && net_ratelimit()))
140 printk(KERN_WARNING "BUG %s code %d qlen %d\n", 140 pr_warning("BUG %s code %d qlen %d\n",
141 dev->name, ret, q->q.qlen); 141 dev->name, ret, q->q.qlen);
142 142
143 ret = dev_requeue_skb(skb, q); 143 ret = dev_requeue_skb(skb, q);
144 } 144 }
@@ -412,8 +412,9 @@ static struct Qdisc noqueue_qdisc = {
412}; 412};
413 413
414 414
415static const u8 prio2band[TC_PRIO_MAX+1] = 415static const u8 prio2band[TC_PRIO_MAX + 1] = {
416 { 1, 2, 2, 2, 1, 2, 0, 0 , 1, 1, 1, 1, 1, 1, 1, 1 }; 416 1, 2, 2, 2, 1, 2, 0, 0 , 1, 1, 1, 1, 1, 1, 1, 1
417};
417 418
418/* 3-band FIFO queue: old style, but should be a bit faster than 419/* 3-band FIFO queue: old style, but should be a bit faster than
419 generic prio+fifo combination. 420 generic prio+fifo combination.
@@ -445,7 +446,7 @@ static inline struct sk_buff_head *band2list(struct pfifo_fast_priv *priv,
445 return priv->q + band; 446 return priv->q + band;
446} 447}
447 448
448static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc* qdisc) 449static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc *qdisc)
449{ 450{
450 if (skb_queue_len(&qdisc->q) < qdisc_dev(qdisc)->tx_queue_len) { 451 if (skb_queue_len(&qdisc->q) < qdisc_dev(qdisc)->tx_queue_len) {
451 int band = prio2band[skb->priority & TC_PRIO_MAX]; 452 int band = prio2band[skb->priority & TC_PRIO_MAX];
@@ -460,7 +461,7 @@ static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc* qdisc)
460 return qdisc_drop(skb, qdisc); 461 return qdisc_drop(skb, qdisc);
461} 462}
462 463
463static struct sk_buff *pfifo_fast_dequeue(struct Qdisc* qdisc) 464static struct sk_buff *pfifo_fast_dequeue(struct Qdisc *qdisc)
464{ 465{
465 struct pfifo_fast_priv *priv = qdisc_priv(qdisc); 466 struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
466 int band = bitmap2band[priv->bitmap]; 467 int band = bitmap2band[priv->bitmap];
@@ -479,7 +480,7 @@ static struct sk_buff *pfifo_fast_dequeue(struct Qdisc* qdisc)
479 return NULL; 480 return NULL;
480} 481}
481 482
482static struct sk_buff *pfifo_fast_peek(struct Qdisc* qdisc) 483static struct sk_buff *pfifo_fast_peek(struct Qdisc *qdisc)
483{ 484{
484 struct pfifo_fast_priv *priv = qdisc_priv(qdisc); 485 struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
485 int band = bitmap2band[priv->bitmap]; 486 int band = bitmap2band[priv->bitmap];
@@ -493,7 +494,7 @@ static struct sk_buff *pfifo_fast_peek(struct Qdisc* qdisc)
493 return NULL; 494 return NULL;
494} 495}
495 496
496static void pfifo_fast_reset(struct Qdisc* qdisc) 497static void pfifo_fast_reset(struct Qdisc *qdisc)
497{ 498{
498 int prio; 499 int prio;
499 struct pfifo_fast_priv *priv = qdisc_priv(qdisc); 500 struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
@@ -510,7 +511,7 @@ static int pfifo_fast_dump(struct Qdisc *qdisc, struct sk_buff *skb)
510{ 511{
511 struct tc_prio_qopt opt = { .bands = PFIFO_FAST_BANDS }; 512 struct tc_prio_qopt opt = { .bands = PFIFO_FAST_BANDS };
512 513
513 memcpy(&opt.priomap, prio2band, TC_PRIO_MAX+1); 514 memcpy(&opt.priomap, prio2band, TC_PRIO_MAX + 1);
514 NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt); 515 NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
515 return skb->len; 516 return skb->len;
516 517
@@ -526,6 +527,8 @@ static int pfifo_fast_init(struct Qdisc *qdisc, struct nlattr *opt)
526 for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) 527 for (prio = 0; prio < PFIFO_FAST_BANDS; prio++)
527 skb_queue_head_init(band2list(priv, prio)); 528 skb_queue_head_init(band2list(priv, prio));
528 529
530 /* Can by-pass the queue discipline */
531 qdisc->flags |= TCQ_F_CAN_BYPASS;
529 return 0; 532 return 0;
530} 533}
531 534
@@ -540,27 +543,32 @@ struct Qdisc_ops pfifo_fast_ops __read_mostly = {
540 .dump = pfifo_fast_dump, 543 .dump = pfifo_fast_dump,
541 .owner = THIS_MODULE, 544 .owner = THIS_MODULE,
542}; 545};
546EXPORT_SYMBOL(pfifo_fast_ops);
543 547
544struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue, 548struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
545 struct Qdisc_ops *ops) 549 struct Qdisc_ops *ops)
546{ 550{
547 void *p; 551 void *p;
548 struct Qdisc *sch; 552 struct Qdisc *sch;
549 unsigned int size; 553 unsigned int size = QDISC_ALIGN(sizeof(*sch)) + ops->priv_size;
550 int err = -ENOBUFS; 554 int err = -ENOBUFS;
551 555
552 /* ensure that the Qdisc and the private data are 64-byte aligned */
553 size = QDISC_ALIGN(sizeof(*sch));
554 size += ops->priv_size + (QDISC_ALIGNTO - 1);
555
556 p = kzalloc_node(size, GFP_KERNEL, 556 p = kzalloc_node(size, GFP_KERNEL,
557 netdev_queue_numa_node_read(dev_queue)); 557 netdev_queue_numa_node_read(dev_queue));
558 558
559 if (!p) 559 if (!p)
560 goto errout; 560 goto errout;
561 sch = (struct Qdisc *) QDISC_ALIGN((unsigned long) p); 561 sch = (struct Qdisc *) QDISC_ALIGN((unsigned long) p);
562 sch->padded = (char *) sch - (char *) p; 562 /* if we got non aligned memory, ask more and do alignment ourself */
563 563 if (sch != p) {
564 kfree(p);
565 p = kzalloc_node(size + QDISC_ALIGNTO - 1, GFP_KERNEL,
566 netdev_queue_numa_node_read(dev_queue));
567 if (!p)
568 goto errout;
569 sch = (struct Qdisc *) QDISC_ALIGN((unsigned long) p);
570 sch->padded = (char *) sch - (char *) p;
571 }
564 INIT_LIST_HEAD(&sch->list); 572 INIT_LIST_HEAD(&sch->list);
565 skb_queue_head_init(&sch->q); 573 skb_queue_head_init(&sch->q);
566 spin_lock_init(&sch->busylock); 574 spin_lock_init(&sch->busylock);
@@ -630,7 +638,7 @@ void qdisc_destroy(struct Qdisc *qdisc)
630#ifdef CONFIG_NET_SCHED 638#ifdef CONFIG_NET_SCHED
631 qdisc_list_del(qdisc); 639 qdisc_list_del(qdisc);
632 640
633 qdisc_put_stab(qdisc->stab); 641 qdisc_put_stab(rtnl_dereference(qdisc->stab));
634#endif 642#endif
635 gen_kill_estimator(&qdisc->bstats, &qdisc->rate_est); 643 gen_kill_estimator(&qdisc->bstats, &qdisc->rate_est);
636 if (ops->reset) 644 if (ops->reset)
@@ -674,25 +682,21 @@ struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
674 682
675 return oqdisc; 683 return oqdisc;
676} 684}
685EXPORT_SYMBOL(dev_graft_qdisc);
677 686
678static void attach_one_default_qdisc(struct net_device *dev, 687static void attach_one_default_qdisc(struct net_device *dev,
679 struct netdev_queue *dev_queue, 688 struct netdev_queue *dev_queue,
680 void *_unused) 689 void *_unused)
681{ 690{
682 struct Qdisc *qdisc; 691 struct Qdisc *qdisc = &noqueue_qdisc;
683 692
684 if (dev->tx_queue_len) { 693 if (dev->tx_queue_len) {
685 qdisc = qdisc_create_dflt(dev_queue, 694 qdisc = qdisc_create_dflt(dev_queue,
686 &pfifo_fast_ops, TC_H_ROOT); 695 &pfifo_fast_ops, TC_H_ROOT);
687 if (!qdisc) { 696 if (!qdisc) {
688 printk(KERN_INFO "%s: activation failed\n", dev->name); 697 netdev_info(dev, "activation failed\n");
689 return; 698 return;
690 } 699 }
691
692 /* Can by-pass the queue discipline for default qdisc */
693 qdisc->flags |= TCQ_F_CAN_BYPASS;
694 } else {
695 qdisc = &noqueue_qdisc;
696 } 700 }
697 dev_queue->qdisc_sleeping = qdisc; 701 dev_queue->qdisc_sleeping = qdisc;
698} 702}
@@ -761,6 +765,7 @@ void dev_activate(struct net_device *dev)
761 dev_watchdog_up(dev); 765 dev_watchdog_up(dev);
762 } 766 }
763} 767}
768EXPORT_SYMBOL(dev_activate);
764 769
765static void dev_deactivate_queue(struct net_device *dev, 770static void dev_deactivate_queue(struct net_device *dev,
766 struct netdev_queue *dev_queue, 771 struct netdev_queue *dev_queue,
@@ -839,7 +844,9 @@ void dev_deactivate(struct net_device *dev)
839 844
840 list_add(&dev->unreg_list, &single); 845 list_add(&dev->unreg_list, &single);
841 dev_deactivate_many(&single); 846 dev_deactivate_many(&single);
847 list_del(&single);
842} 848}
849EXPORT_SYMBOL(dev_deactivate);
843 850
844static void dev_init_scheduler_queue(struct net_device *dev, 851static void dev_init_scheduler_queue(struct net_device *dev,
845 struct netdev_queue *dev_queue, 852 struct netdev_queue *dev_queue,