aboutsummaryrefslogtreecommitdiffstats
path: root/net/sched
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2009-09-06 04:58:51 -0400
committerDavid S. Miller <davem@davemloft.net>2009-09-06 05:07:05 -0400
commit6ec1c69a8f6492fd25722f4762721921da074c12 (patch)
treea78323d1f7f84acbe08c25d7300b935ae4bb7c62 /net/sched
parent589983cd21f4a2e4ed74a958805a90fa676845c5 (diff)
net_sched: add classful multiqueue dummy scheduler
This patch adds a classful dummy scheduler which can be used as root qdisc for multiqueue devices and exposes each device queue as a child class. This allows to address queues individually and graft them similar to regular classes. Additionally it presents an accumulated view of the statistics of all real root qdiscs in the dummy root. Two new callbacks are added to the qdisc_ops and qdisc_class_ops: - cl_ops->select_queue selects the tx queue number for new child classes. - qdisc_ops->attach() overrides root qdisc device grafting to attach non-shared qdiscs to the queues. Signed-off-by: Patrick McHardy <kaber@trash.net> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sched')
-rw-r--r--net/sched/Makefile2
-rw-r--r--net/sched/sch_api.c18
-rw-r--r--net/sched/sch_generic.c32
-rw-r--r--net/sched/sch_mq.c234
4 files changed, 273 insertions, 13 deletions
diff --git a/net/sched/Makefile b/net/sched/Makefile
index 54d950cd4b8d..f14e71bfa58f 100644
--- a/net/sched/Makefile
+++ b/net/sched/Makefile
@@ -2,7 +2,7 @@
2# Makefile for the Linux Traffic Control Unit. 2# Makefile for the Linux Traffic Control Unit.
3# 3#
4 4
5obj-y := sch_generic.o 5obj-y := sch_generic.o sch_mq.o
6 6
7obj-$(CONFIG_NET_SCHED) += sch_api.o sch_blackhole.o 7obj-$(CONFIG_NET_SCHED) += sch_api.o sch_blackhole.o
8obj-$(CONFIG_NET_CLS) += cls_api.o 8obj-$(CONFIG_NET_CLS) += cls_api.o
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index d71f12be6e29..2a78d5410154 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -678,6 +678,11 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
678 if (dev->flags & IFF_UP) 678 if (dev->flags & IFF_UP)
679 dev_deactivate(dev); 679 dev_deactivate(dev);
680 680
681 if (new && new->ops->attach) {
682 new->ops->attach(new);
683 num_q = 0;
684 }
685
681 for (i = 0; i < num_q; i++) { 686 for (i = 0; i < num_q; i++) {
682 struct netdev_queue *dev_queue = &dev->rx_queue; 687 struct netdev_queue *dev_queue = &dev->rx_queue;
683 688
@@ -692,7 +697,7 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
692 } 697 }
693 698
694 notify_and_destroy(skb, n, classid, dev->qdisc, new); 699 notify_and_destroy(skb, n, classid, dev->qdisc, new);
695 if (new) 700 if (new && !new->ops->attach)
696 atomic_inc(&new->refcnt); 701 atomic_inc(&new->refcnt);
697 dev->qdisc = new ? : &noop_qdisc; 702 dev->qdisc = new ? : &noop_qdisc;
698 703
@@ -1095,10 +1100,16 @@ create_n_graft:
1095 q = qdisc_create(dev, &dev->rx_queue, 1100 q = qdisc_create(dev, &dev->rx_queue,
1096 tcm->tcm_parent, tcm->tcm_parent, 1101 tcm->tcm_parent, tcm->tcm_parent,
1097 tca, &err); 1102 tca, &err);
1098 else 1103 else {
1099 q = qdisc_create(dev, netdev_get_tx_queue(dev, 0), 1104 unsigned int ntx = 0;
1105
1106 if (p && p->ops->cl_ops && p->ops->cl_ops->select_queue)
1107 ntx = p->ops->cl_ops->select_queue(p, tcm);
1108
1109 q = qdisc_create(dev, netdev_get_tx_queue(dev, ntx),
1100 tcm->tcm_parent, tcm->tcm_handle, 1110 tcm->tcm_parent, tcm->tcm_handle,
1101 tca, &err); 1111 tca, &err);
1112 }
1102 if (q == NULL) { 1113 if (q == NULL) {
1103 if (err == -EAGAIN) 1114 if (err == -EAGAIN)
1104 goto replay; 1115 goto replay;
@@ -1674,6 +1685,7 @@ static int __init pktsched_init(void)
1674{ 1685{
1675 register_qdisc(&pfifo_qdisc_ops); 1686 register_qdisc(&pfifo_qdisc_ops);
1676 register_qdisc(&bfifo_qdisc_ops); 1687 register_qdisc(&bfifo_qdisc_ops);
1688 register_qdisc(&mq_qdisc_ops);
1677 proc_net_fops_create(&init_net, "psched", 0, &psched_fops); 1689 proc_net_fops_create(&init_net, "psched", 0, &psched_fops);
1678 1690
1679 rtnl_register(PF_UNSPEC, RTM_NEWQDISC, tc_modify_qdisc, NULL); 1691 rtnl_register(PF_UNSPEC, RTM_NEWQDISC, tc_modify_qdisc, NULL);
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index e7c47ceb0098..4ae6aa562f2b 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -514,7 +514,7 @@ static int pfifo_fast_init(struct Qdisc *qdisc, struct nlattr *opt)
514 return 0; 514 return 0;
515} 515}
516 516
517static struct Qdisc_ops pfifo_fast_ops __read_mostly = { 517struct Qdisc_ops pfifo_fast_ops __read_mostly = {
518 .id = "pfifo_fast", 518 .id = "pfifo_fast",
519 .priv_size = sizeof(struct pfifo_fast_priv), 519 .priv_size = sizeof(struct pfifo_fast_priv),
520 .enqueue = pfifo_fast_enqueue, 520 .enqueue = pfifo_fast_enqueue,
@@ -670,6 +670,26 @@ static void attach_one_default_qdisc(struct net_device *dev,
670 dev_queue->qdisc_sleeping = qdisc; 670 dev_queue->qdisc_sleeping = qdisc;
671} 671}
672 672
673static void attach_default_qdiscs(struct net_device *dev)
674{
675 struct netdev_queue *txq;
676 struct Qdisc *qdisc;
677
678 txq = netdev_get_tx_queue(dev, 0);
679
680 if (!netif_is_multiqueue(dev) || dev->tx_queue_len == 0) {
681 netdev_for_each_tx_queue(dev, attach_one_default_qdisc, NULL);
682 dev->qdisc = txq->qdisc_sleeping;
683 atomic_inc(&dev->qdisc->refcnt);
684 } else {
685 qdisc = qdisc_create_dflt(dev, txq, &mq_qdisc_ops, TC_H_ROOT);
686 if (qdisc) {
687 qdisc->ops->attach(qdisc);
688 dev->qdisc = qdisc;
689 }
690 }
691}
692
673static void transition_one_qdisc(struct net_device *dev, 693static void transition_one_qdisc(struct net_device *dev,
674 struct netdev_queue *dev_queue, 694 struct netdev_queue *dev_queue,
675 void *_need_watchdog) 695 void *_need_watchdog)
@@ -689,7 +709,6 @@ static void transition_one_qdisc(struct net_device *dev,
689 709
690void dev_activate(struct net_device *dev) 710void dev_activate(struct net_device *dev)
691{ 711{
692 struct netdev_queue *txq;
693 int need_watchdog; 712 int need_watchdog;
694 713
695 /* No queueing discipline is attached to device; 714 /* No queueing discipline is attached to device;
@@ -698,13 +717,8 @@ void dev_activate(struct net_device *dev)
698 virtual interfaces 717 virtual interfaces
699 */ 718 */
700 719
701 if (dev->qdisc == &noop_qdisc) { 720 if (dev->qdisc == &noop_qdisc)
702 netdev_for_each_tx_queue(dev, attach_one_default_qdisc, NULL); 721 attach_default_qdiscs(dev);
703
704 txq = netdev_get_tx_queue(dev, 0);
705 dev->qdisc = txq->qdisc_sleeping;
706 atomic_inc(&dev->qdisc->refcnt);
707 }
708 722
709 if (!netif_carrier_ok(dev)) 723 if (!netif_carrier_ok(dev))
710 /* Delay activation until next carrier-on event */ 724 /* Delay activation until next carrier-on event */
diff --git a/net/sched/sch_mq.c b/net/sched/sch_mq.c
new file mode 100644
index 000000000000..c84dec9c8c7d
--- /dev/null
+++ b/net/sched/sch_mq.c
@@ -0,0 +1,234 @@
1/*
2 * net/sched/sch_mq.c Classful multiqueue dummy scheduler
3 *
4 * Copyright (c) 2009 Patrick McHardy <kaber@trash.net>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * version 2 as published by the Free Software Foundation.
9 */
10
11#include <linux/types.h>
12#include <linux/kernel.h>
13#include <linux/string.h>
14#include <linux/errno.h>
15#include <linux/skbuff.h>
16#include <net/netlink.h>
17#include <net/pkt_sched.h>
18
19struct mq_sched {
20 struct Qdisc **qdiscs;
21};
22
23static void mq_destroy(struct Qdisc *sch)
24{
25 struct net_device *dev = qdisc_dev(sch);
26 struct mq_sched *priv = qdisc_priv(sch);
27 unsigned int ntx;
28
29 if (!priv->qdiscs)
30 return;
31 for (ntx = 0; ntx < dev->num_tx_queues && priv->qdiscs[ntx]; ntx++)
32 qdisc_destroy(priv->qdiscs[ntx]);
33 kfree(priv->qdiscs);
34}
35
36static int mq_init(struct Qdisc *sch, struct nlattr *opt)
37{
38 struct net_device *dev = qdisc_dev(sch);
39 struct mq_sched *priv = qdisc_priv(sch);
40 struct netdev_queue *dev_queue;
41 struct Qdisc *qdisc;
42 unsigned int ntx;
43
44 if (sch->parent != TC_H_ROOT)
45 return -EOPNOTSUPP;
46
47 if (!netif_is_multiqueue(dev))
48 return -EOPNOTSUPP;
49
50 /* pre-allocate qdiscs, attachment can't fail */
51 priv->qdiscs = kcalloc(dev->num_tx_queues, sizeof(priv->qdiscs[0]),
52 GFP_KERNEL);
53 if (priv->qdiscs == NULL)
54 return -ENOMEM;
55
56 for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
57 dev_queue = netdev_get_tx_queue(dev, ntx);
58 qdisc = qdisc_create_dflt(dev, dev_queue, &pfifo_fast_ops,
59 TC_H_MAKE(TC_H_MAJ(sch->handle),
60 TC_H_MIN(ntx + 1)));
61 if (qdisc == NULL)
62 goto err;
63 qdisc->flags |= TCQ_F_CAN_BYPASS;
64 priv->qdiscs[ntx] = qdisc;
65 }
66
67 return 0;
68
69err:
70 mq_destroy(sch);
71 return -ENOMEM;
72}
73
74static void mq_attach(struct Qdisc *sch)
75{
76 struct net_device *dev = qdisc_dev(sch);
77 struct mq_sched *priv = qdisc_priv(sch);
78 struct Qdisc *qdisc;
79 unsigned int ntx;
80
81 for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
82 qdisc = priv->qdiscs[ntx];
83 qdisc = dev_graft_qdisc(qdisc->dev_queue, qdisc);
84 if (qdisc)
85 qdisc_destroy(qdisc);
86 }
87 kfree(priv->qdiscs);
88 priv->qdiscs = NULL;
89}
90
91static int mq_dump(struct Qdisc *sch, struct sk_buff *skb)
92{
93 struct net_device *dev = qdisc_dev(sch);
94 struct Qdisc *qdisc;
95 unsigned int ntx;
96
97 sch->q.qlen = 0;
98 memset(&sch->bstats, 0, sizeof(sch->bstats));
99 memset(&sch->qstats, 0, sizeof(sch->qstats));
100
101 for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
102 qdisc = netdev_get_tx_queue(dev, ntx)->qdisc_sleeping;
103 spin_lock_bh(qdisc_lock(qdisc));
104 sch->q.qlen += qdisc->q.qlen;
105 sch->bstats.bytes += qdisc->bstats.bytes;
106 sch->bstats.packets += qdisc->bstats.packets;
107 sch->qstats.qlen += qdisc->qstats.qlen;
108 sch->qstats.backlog += qdisc->qstats.backlog;
109 sch->qstats.drops += qdisc->qstats.drops;
110 sch->qstats.requeues += qdisc->qstats.requeues;
111 sch->qstats.overlimits += qdisc->qstats.overlimits;
112 spin_unlock_bh(qdisc_lock(qdisc));
113 }
114 return 0;
115}
116
117static struct netdev_queue *mq_queue_get(struct Qdisc *sch, unsigned long cl)
118{
119 struct net_device *dev = qdisc_dev(sch);
120 unsigned long ntx = cl - 1;
121
122 if (ntx >= dev->num_tx_queues)
123 return NULL;
124 return netdev_get_tx_queue(dev, ntx);
125}
126
127static unsigned int mq_select_queue(struct Qdisc *sch, struct tcmsg *tcm)
128{
129 unsigned int ntx = TC_H_MIN(tcm->tcm_parent);
130
131 if (!mq_queue_get(sch, ntx))
132 return 0;
133 return ntx - 1;
134}
135
136static int mq_graft(struct Qdisc *sch, unsigned long cl, struct Qdisc *new,
137 struct Qdisc **old)
138{
139 struct netdev_queue *dev_queue = mq_queue_get(sch, cl);
140 struct net_device *dev = qdisc_dev(sch);
141
142 if (dev->flags & IFF_UP)
143 dev_deactivate(dev);
144
145 *old = dev_graft_qdisc(dev_queue, new);
146
147 if (dev->flags & IFF_UP)
148 dev_activate(dev);
149 return 0;
150}
151
152static struct Qdisc *mq_leaf(struct Qdisc *sch, unsigned long cl)
153{
154 struct netdev_queue *dev_queue = mq_queue_get(sch, cl);
155
156 return dev_queue->qdisc_sleeping;
157}
158
159static unsigned long mq_get(struct Qdisc *sch, u32 classid)
160{
161 unsigned int ntx = TC_H_MIN(classid);
162
163 if (!mq_queue_get(sch, ntx))
164 return 0;
165 return ntx;
166}
167
168static void mq_put(struct Qdisc *sch, unsigned long cl)
169{
170 return;
171}
172
173static int mq_dump_class(struct Qdisc *sch, unsigned long cl,
174 struct sk_buff *skb, struct tcmsg *tcm)
175{
176 struct netdev_queue *dev_queue = mq_queue_get(sch, cl);
177
178 tcm->tcm_parent = TC_H_ROOT;
179 tcm->tcm_handle |= TC_H_MIN(cl);
180 tcm->tcm_info = dev_queue->qdisc_sleeping->handle;
181 return 0;
182}
183
184static int mq_dump_class_stats(struct Qdisc *sch, unsigned long cl,
185 struct gnet_dump *d)
186{
187 struct netdev_queue *dev_queue = mq_queue_get(sch, cl);
188
189 sch = dev_queue->qdisc_sleeping;
190 if (gnet_stats_copy_basic(d, &sch->bstats) < 0 ||
191 gnet_stats_copy_queue(d, &sch->qstats) < 0)
192 return -1;
193 return 0;
194}
195
196static void mq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
197{
198 struct net_device *dev = qdisc_dev(sch);
199 unsigned int ntx;
200
201 if (arg->stop)
202 return;
203
204 arg->count = arg->skip;
205 for (ntx = arg->skip; ntx < dev->num_tx_queues; ntx++) {
206 if (arg->fn(sch, ntx + 1, arg) < 0) {
207 arg->stop = 1;
208 break;
209 }
210 arg->count++;
211 }
212}
213
214static const struct Qdisc_class_ops mq_class_ops = {
215 .select_queue = mq_select_queue,
216 .graft = mq_graft,
217 .leaf = mq_leaf,
218 .get = mq_get,
219 .put = mq_put,
220 .walk = mq_walk,
221 .dump = mq_dump_class,
222 .dump_stats = mq_dump_class_stats,
223};
224
225struct Qdisc_ops mq_qdisc_ops __read_mostly = {
226 .cl_ops = &mq_class_ops,
227 .id = "mq",
228 .priv_size = sizeof(struct mq_sched),
229 .init = mq_init,
230 .destroy = mq_destroy,
231 .attach = mq_attach,
232 .dump = mq_dump,
233 .owner = THIS_MODULE,
234};