aboutsummaryrefslogtreecommitdiffstats
path: root/net/sched
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2008-07-15 05:52:19 -0400
committerDavid S. Miller <davem@davemloft.net>2008-07-17 22:21:04 -0400
commit1d8ae3fdeb001b8f534a6782c261aba6ec1779f5 (patch)
tree5c488f59f674ba2c42755a6c34f69ac80824f213 /net/sched
parent09e83b5d7d1878065e2453239b49b684cd0fe4e5 (diff)
pkt_sched: Remove RR scheduler.
This actually fixes a bug added by the RR scheduler changes. The ->bands and ->prio2band parameters were being set outside of the sch_tree_lock() and thus could result in strange behavior and inconsistencies. It might be possible, in the new design (where there will be one qdisc per device TX queue) to allow similar functionality via a TX hash algorithm for RR but I really see no reason to export this aspect of how these multiqueue cards actually implement the scheduling of the the individual DMA TX rings and the single physical MAC/PHY port. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sched')
-rw-r--r--net/sched/sch_prio.c136
1 files changed, 16 insertions, 120 deletions
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c
index 39157f7bc046..536ca474dc69 100644
--- a/net/sched/sch_prio.c
+++ b/net/sched/sch_prio.c
@@ -24,11 +24,9 @@
24struct prio_sched_data 24struct prio_sched_data
25{ 25{
26 int bands; 26 int bands;
27 int curband; /* for round-robin */
28 struct tcf_proto *filter_list; 27 struct tcf_proto *filter_list;
29 u8 prio2band[TC_PRIO_MAX+1]; 28 u8 prio2band[TC_PRIO_MAX+1];
30 struct Qdisc *queues[TCQ_PRIO_BANDS]; 29 struct Qdisc *queues[TCQ_PRIO_BANDS];
31 int mq;
32}; 30};
33 31
34 32
@@ -55,17 +53,14 @@ prio_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
55 if (!q->filter_list || err < 0) { 53 if (!q->filter_list || err < 0) {
56 if (TC_H_MAJ(band)) 54 if (TC_H_MAJ(band))
57 band = 0; 55 band = 0;
58 band = q->prio2band[band&TC_PRIO_MAX]; 56 return q->queues[q->prio2band[band&TC_PRIO_MAX]];
59 goto out;
60 } 57 }
61 band = res.classid; 58 band = res.classid;
62 } 59 }
63 band = TC_H_MIN(band) - 1; 60 band = TC_H_MIN(band) - 1;
64 if (band >= q->bands) 61 if (band >= q->bands)
65 band = q->prio2band[0]; 62 return q->queues[q->prio2band[0]];
66out: 63
67 if (q->mq)
68 skb_set_queue_mapping(skb, band);
69 return q->queues[band]; 64 return q->queues[band];
70} 65}
71 66
@@ -123,68 +118,23 @@ prio_requeue(struct sk_buff *skb, struct Qdisc* sch)
123} 118}
124 119
125 120
126static struct sk_buff * 121static struct sk_buff *prio_dequeue(struct Qdisc* sch)
127prio_dequeue(struct Qdisc* sch)
128{ 122{
129 struct sk_buff *skb;
130 struct prio_sched_data *q = qdisc_priv(sch); 123 struct prio_sched_data *q = qdisc_priv(sch);
131 int prio; 124 int prio;
132 struct Qdisc *qdisc;
133 125
134 for (prio = 0; prio < q->bands; prio++) { 126 for (prio = 0; prio < q->bands; prio++) {
135 /* Check if the target subqueue is available before 127 struct Qdisc *qdisc = q->queues[prio];
136 * pulling an skb. This way we avoid excessive requeues 128 struct sk_buff *skb = qdisc->dequeue(qdisc);
137 * for slower queues. 129 if (skb) {
138 */ 130 sch->q.qlen--;
139 if (!__netif_subqueue_stopped(qdisc_dev(sch), 131 return skb;
140 (q->mq ? prio : 0))) {
141 qdisc = q->queues[prio];
142 skb = qdisc->dequeue(qdisc);
143 if (skb) {
144 sch->q.qlen--;
145 return skb;
146 }
147 } 132 }
148 } 133 }
149 return NULL; 134 return NULL;
150 135
151} 136}
152 137
153static struct sk_buff *rr_dequeue(struct Qdisc* sch)
154{
155 struct sk_buff *skb;
156 struct prio_sched_data *q = qdisc_priv(sch);
157 struct Qdisc *qdisc;
158 int bandcount;
159
160 /* Only take one pass through the queues. If nothing is available,
161 * return nothing.
162 */
163 for (bandcount = 0; bandcount < q->bands; bandcount++) {
164 /* Check if the target subqueue is available before
165 * pulling an skb. This way we avoid excessive requeues
166 * for slower queues. If the queue is stopped, try the
167 * next queue.
168 */
169 if (!__netif_subqueue_stopped(qdisc_dev(sch),
170 (q->mq ? q->curband : 0))) {
171 qdisc = q->queues[q->curband];
172 skb = qdisc->dequeue(qdisc);
173 if (skb) {
174 sch->q.qlen--;
175 q->curband++;
176 if (q->curband >= q->bands)
177 q->curband = 0;
178 return skb;
179 }
180 }
181 q->curband++;
182 if (q->curband >= q->bands)
183 q->curband = 0;
184 }
185 return NULL;
186}
187
188static unsigned int prio_drop(struct Qdisc* sch) 138static unsigned int prio_drop(struct Qdisc* sch)
189{ 139{
190 struct prio_sched_data *q = qdisc_priv(sch); 140 struct prio_sched_data *q = qdisc_priv(sch);
@@ -229,45 +179,22 @@ static int prio_tune(struct Qdisc *sch, struct nlattr *opt)
229{ 179{
230 struct prio_sched_data *q = qdisc_priv(sch); 180 struct prio_sched_data *q = qdisc_priv(sch);
231 struct tc_prio_qopt *qopt; 181 struct tc_prio_qopt *qopt;
232 struct nlattr *tb[TCA_PRIO_MAX + 1];
233 int err;
234 int i; 182 int i;
235 183
236 err = nla_parse_nested_compat(tb, TCA_PRIO_MAX, opt, NULL, qopt, 184 if (nla_len(opt) < sizeof(*qopt))
237 sizeof(*qopt)); 185 return -EINVAL;
238 if (err < 0) 186 qopt = nla_data(opt);
239 return err;
240
241 q->bands = qopt->bands;
242 /* If we're multiqueue, make sure the number of incoming bands
243 * matches the number of queues on the device we're associating with.
244 * If the number of bands requested is zero, then set q->bands to
245 * dev->egress_subqueue_count. Also, the root qdisc must be the
246 * only one that is enabled for multiqueue, since it's the only one
247 * that interacts with the underlying device.
248 */
249 q->mq = nla_get_flag(tb[TCA_PRIO_MQ]);
250 if (q->mq) {
251 if (sch->parent != TC_H_ROOT)
252 return -EINVAL;
253 if (netif_is_multiqueue(qdisc_dev(sch))) {
254 if (q->bands == 0)
255 q->bands = qdisc_dev(sch)->egress_subqueue_count;
256 else if (q->bands != qdisc_dev(sch)->egress_subqueue_count)
257 return -EINVAL;
258 } else
259 return -EOPNOTSUPP;
260 }
261 187
262 if (q->bands > TCQ_PRIO_BANDS || q->bands < 2) 188 if (qopt->bands > TCQ_PRIO_BANDS || qopt->bands < 2)
263 return -EINVAL; 189 return -EINVAL;
264 190
265 for (i=0; i<=TC_PRIO_MAX; i++) { 191 for (i=0; i<=TC_PRIO_MAX; i++) {
266 if (qopt->priomap[i] >= q->bands) 192 if (qopt->priomap[i] >= qopt->bands)
267 return -EINVAL; 193 return -EINVAL;
268 } 194 }
269 195
270 sch_tree_lock(sch); 196 sch_tree_lock(sch);
197 q->bands = qopt->bands;
271 memcpy(q->prio2band, qopt->priomap, TC_PRIO_MAX+1); 198 memcpy(q->prio2band, qopt->priomap, TC_PRIO_MAX+1);
272 199
273 for (i=q->bands; i<TCQ_PRIO_BANDS; i++) { 200 for (i=q->bands; i<TCQ_PRIO_BANDS; i++) {
@@ -333,10 +260,6 @@ static int prio_dump(struct Qdisc *sch, struct sk_buff *skb)
333 nest = nla_nest_compat_start(skb, TCA_OPTIONS, sizeof(opt), &opt); 260 nest = nla_nest_compat_start(skb, TCA_OPTIONS, sizeof(opt), &opt);
334 if (nest == NULL) 261 if (nest == NULL)
335 goto nla_put_failure; 262 goto nla_put_failure;
336 if (q->mq) {
337 if (nla_put_flag(skb, TCA_PRIO_MQ) < 0)
338 goto nla_put_failure;
339 }
340 nla_nest_compat_end(skb, nest); 263 nla_nest_compat_end(skb, nest);
341 264
342 return skb->len; 265 return skb->len;
@@ -509,44 +432,17 @@ static struct Qdisc_ops prio_qdisc_ops __read_mostly = {
509 .owner = THIS_MODULE, 432 .owner = THIS_MODULE,
510}; 433};
511 434
512static struct Qdisc_ops rr_qdisc_ops __read_mostly = {
513 .next = NULL,
514 .cl_ops = &prio_class_ops,
515 .id = "rr",
516 .priv_size = sizeof(struct prio_sched_data),
517 .enqueue = prio_enqueue,
518 .dequeue = rr_dequeue,
519 .requeue = prio_requeue,
520 .drop = prio_drop,
521 .init = prio_init,
522 .reset = prio_reset,
523 .destroy = prio_destroy,
524 .change = prio_tune,
525 .dump = prio_dump,
526 .owner = THIS_MODULE,
527};
528
529static int __init prio_module_init(void) 435static int __init prio_module_init(void)
530{ 436{
531 int err; 437 return register_qdisc(&prio_qdisc_ops);
532
533 err = register_qdisc(&prio_qdisc_ops);
534 if (err < 0)
535 return err;
536 err = register_qdisc(&rr_qdisc_ops);
537 if (err < 0)
538 unregister_qdisc(&prio_qdisc_ops);
539 return err;
540} 438}
541 439
542static void __exit prio_module_exit(void) 440static void __exit prio_module_exit(void)
543{ 441{
544 unregister_qdisc(&prio_qdisc_ops); 442 unregister_qdisc(&prio_qdisc_ops);
545 unregister_qdisc(&rr_qdisc_ops);
546} 443}
547 444
548module_init(prio_module_init) 445module_init(prio_module_init)
549module_exit(prio_module_exit) 446module_exit(prio_module_exit)
550 447
551MODULE_LICENSE("GPL"); 448MODULE_LICENSE("GPL");
552MODULE_ALIAS("sch_rr");