diff options
Diffstat (limited to 'net/sched/sch_drr.c')
-rw-r--r-- | net/sched/sch_drr.c | 519 |
1 files changed, 519 insertions, 0 deletions
diff --git a/net/sched/sch_drr.c b/net/sched/sch_drr.c new file mode 100644 index 000000000000..f6b4fa97df70 --- /dev/null +++ b/net/sched/sch_drr.c | |||
@@ -0,0 +1,519 @@ | |||
1 | /* | ||
2 | * net/sched/sch_drr.c Deficit Round Robin scheduler | ||
3 | * | ||
4 | * Copyright (c) 2008 Patrick McHardy <kaber@trash.net> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * version 2 as published by the Free Software Foundation. | ||
9 | */ | ||
10 | |||
11 | #include <linux/module.h> | ||
12 | #include <linux/init.h> | ||
13 | #include <linux/errno.h> | ||
14 | #include <linux/netdevice.h> | ||
15 | #include <linux/pkt_sched.h> | ||
16 | #include <net/sch_generic.h> | ||
17 | #include <net/pkt_sched.h> | ||
18 | #include <net/pkt_cls.h> | ||
19 | |||
20 | struct drr_class { | ||
21 | struct Qdisc_class_common common; | ||
22 | unsigned int refcnt; | ||
23 | unsigned int filter_cnt; | ||
24 | |||
25 | struct gnet_stats_basic bstats; | ||
26 | struct gnet_stats_queue qstats; | ||
27 | struct gnet_stats_rate_est rate_est; | ||
28 | struct list_head alist; | ||
29 | struct Qdisc *qdisc; | ||
30 | |||
31 | u32 quantum; | ||
32 | u32 deficit; | ||
33 | }; | ||
34 | |||
35 | struct drr_sched { | ||
36 | struct list_head active; | ||
37 | struct tcf_proto *filter_list; | ||
38 | struct Qdisc_class_hash clhash; | ||
39 | }; | ||
40 | |||
41 | static struct drr_class *drr_find_class(struct Qdisc *sch, u32 classid) | ||
42 | { | ||
43 | struct drr_sched *q = qdisc_priv(sch); | ||
44 | struct Qdisc_class_common *clc; | ||
45 | |||
46 | clc = qdisc_class_find(&q->clhash, classid); | ||
47 | if (clc == NULL) | ||
48 | return NULL; | ||
49 | return container_of(clc, struct drr_class, common); | ||
50 | } | ||
51 | |||
52 | static void drr_purge_queue(struct drr_class *cl) | ||
53 | { | ||
54 | unsigned int len = cl->qdisc->q.qlen; | ||
55 | |||
56 | qdisc_reset(cl->qdisc); | ||
57 | qdisc_tree_decrease_qlen(cl->qdisc, len); | ||
58 | } | ||
59 | |||
60 | static const struct nla_policy drr_policy[TCA_DRR_MAX + 1] = { | ||
61 | [TCA_DRR_QUANTUM] = { .type = NLA_U32 }, | ||
62 | }; | ||
63 | |||
64 | static int drr_change_class(struct Qdisc *sch, u32 classid, u32 parentid, | ||
65 | struct nlattr **tca, unsigned long *arg) | ||
66 | { | ||
67 | struct drr_sched *q = qdisc_priv(sch); | ||
68 | struct drr_class *cl = (struct drr_class *)*arg; | ||
69 | struct nlattr *tb[TCA_DRR_MAX + 1]; | ||
70 | u32 quantum; | ||
71 | int err; | ||
72 | |||
73 | err = nla_parse_nested(tb, TCA_DRR_MAX, tca[TCA_OPTIONS], drr_policy); | ||
74 | if (err < 0) | ||
75 | return err; | ||
76 | |||
77 | if (tb[TCA_DRR_QUANTUM]) { | ||
78 | quantum = nla_get_u32(tb[TCA_DRR_QUANTUM]); | ||
79 | if (quantum == 0) | ||
80 | return -EINVAL; | ||
81 | } else | ||
82 | quantum = psched_mtu(qdisc_dev(sch)); | ||
83 | |||
84 | if (cl != NULL) { | ||
85 | if (tca[TCA_RATE]) { | ||
86 | err = gen_replace_estimator(&cl->bstats, &cl->rate_est, | ||
87 | qdisc_root_sleeping_lock(sch), | ||
88 | tca[TCA_RATE]); | ||
89 | if (err) | ||
90 | return err; | ||
91 | } | ||
92 | |||
93 | sch_tree_lock(sch); | ||
94 | if (tb[TCA_DRR_QUANTUM]) | ||
95 | cl->quantum = quantum; | ||
96 | sch_tree_unlock(sch); | ||
97 | |||
98 | return 0; | ||
99 | } | ||
100 | |||
101 | cl = kzalloc(sizeof(struct drr_class), GFP_KERNEL); | ||
102 | if (cl == NULL) | ||
103 | return -ENOBUFS; | ||
104 | |||
105 | cl->refcnt = 1; | ||
106 | cl->common.classid = classid; | ||
107 | cl->quantum = quantum; | ||
108 | cl->qdisc = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue, | ||
109 | &pfifo_qdisc_ops, classid); | ||
110 | if (cl->qdisc == NULL) | ||
111 | cl->qdisc = &noop_qdisc; | ||
112 | |||
113 | if (tca[TCA_RATE]) { | ||
114 | err = gen_replace_estimator(&cl->bstats, &cl->rate_est, | ||
115 | qdisc_root_sleeping_lock(sch), | ||
116 | tca[TCA_RATE]); | ||
117 | if (err) { | ||
118 | qdisc_destroy(cl->qdisc); | ||
119 | kfree(cl); | ||
120 | return err; | ||
121 | } | ||
122 | } | ||
123 | |||
124 | sch_tree_lock(sch); | ||
125 | qdisc_class_hash_insert(&q->clhash, &cl->common); | ||
126 | sch_tree_unlock(sch); | ||
127 | |||
128 | qdisc_class_hash_grow(sch, &q->clhash); | ||
129 | |||
130 | *arg = (unsigned long)cl; | ||
131 | return 0; | ||
132 | } | ||
133 | |||
134 | static void drr_destroy_class(struct Qdisc *sch, struct drr_class *cl) | ||
135 | { | ||
136 | gen_kill_estimator(&cl->bstats, &cl->rate_est); | ||
137 | qdisc_destroy(cl->qdisc); | ||
138 | kfree(cl); | ||
139 | } | ||
140 | |||
141 | static int drr_delete_class(struct Qdisc *sch, unsigned long arg) | ||
142 | { | ||
143 | struct drr_sched *q = qdisc_priv(sch); | ||
144 | struct drr_class *cl = (struct drr_class *)arg; | ||
145 | |||
146 | if (cl->filter_cnt > 0) | ||
147 | return -EBUSY; | ||
148 | |||
149 | sch_tree_lock(sch); | ||
150 | |||
151 | drr_purge_queue(cl); | ||
152 | qdisc_class_hash_remove(&q->clhash, &cl->common); | ||
153 | |||
154 | if (--cl->refcnt == 0) | ||
155 | drr_destroy_class(sch, cl); | ||
156 | |||
157 | sch_tree_unlock(sch); | ||
158 | return 0; | ||
159 | } | ||
160 | |||
161 | static unsigned long drr_get_class(struct Qdisc *sch, u32 classid) | ||
162 | { | ||
163 | struct drr_class *cl = drr_find_class(sch, classid); | ||
164 | |||
165 | if (cl != NULL) | ||
166 | cl->refcnt++; | ||
167 | |||
168 | return (unsigned long)cl; | ||
169 | } | ||
170 | |||
171 | static void drr_put_class(struct Qdisc *sch, unsigned long arg) | ||
172 | { | ||
173 | struct drr_class *cl = (struct drr_class *)arg; | ||
174 | |||
175 | if (--cl->refcnt == 0) | ||
176 | drr_destroy_class(sch, cl); | ||
177 | } | ||
178 | |||
179 | static struct tcf_proto **drr_tcf_chain(struct Qdisc *sch, unsigned long cl) | ||
180 | { | ||
181 | struct drr_sched *q = qdisc_priv(sch); | ||
182 | |||
183 | if (cl) | ||
184 | return NULL; | ||
185 | |||
186 | return &q->filter_list; | ||
187 | } | ||
188 | |||
189 | static unsigned long drr_bind_tcf(struct Qdisc *sch, unsigned long parent, | ||
190 | u32 classid) | ||
191 | { | ||
192 | struct drr_class *cl = drr_find_class(sch, classid); | ||
193 | |||
194 | if (cl != NULL) | ||
195 | cl->filter_cnt++; | ||
196 | |||
197 | return (unsigned long)cl; | ||
198 | } | ||
199 | |||
200 | static void drr_unbind_tcf(struct Qdisc *sch, unsigned long arg) | ||
201 | { | ||
202 | struct drr_class *cl = (struct drr_class *)arg; | ||
203 | |||
204 | cl->filter_cnt--; | ||
205 | } | ||
206 | |||
207 | static int drr_graft_class(struct Qdisc *sch, unsigned long arg, | ||
208 | struct Qdisc *new, struct Qdisc **old) | ||
209 | { | ||
210 | struct drr_class *cl = (struct drr_class *)arg; | ||
211 | |||
212 | if (new == NULL) { | ||
213 | new = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue, | ||
214 | &pfifo_qdisc_ops, cl->common.classid); | ||
215 | if (new == NULL) | ||
216 | new = &noop_qdisc; | ||
217 | } | ||
218 | |||
219 | sch_tree_lock(sch); | ||
220 | drr_purge_queue(cl); | ||
221 | *old = cl->qdisc; | ||
222 | cl->qdisc = new; | ||
223 | sch_tree_unlock(sch); | ||
224 | return 0; | ||
225 | } | ||
226 | |||
227 | static struct Qdisc *drr_class_leaf(struct Qdisc *sch, unsigned long arg) | ||
228 | { | ||
229 | struct drr_class *cl = (struct drr_class *)arg; | ||
230 | |||
231 | return cl->qdisc; | ||
232 | } | ||
233 | |||
234 | static void drr_qlen_notify(struct Qdisc *csh, unsigned long arg) | ||
235 | { | ||
236 | struct drr_class *cl = (struct drr_class *)arg; | ||
237 | |||
238 | if (cl->qdisc->q.qlen == 0) | ||
239 | list_del(&cl->alist); | ||
240 | } | ||
241 | |||
242 | static int drr_dump_class(struct Qdisc *sch, unsigned long arg, | ||
243 | struct sk_buff *skb, struct tcmsg *tcm) | ||
244 | { | ||
245 | struct drr_class *cl = (struct drr_class *)arg; | ||
246 | struct nlattr *nest; | ||
247 | |||
248 | tcm->tcm_parent = TC_H_ROOT; | ||
249 | tcm->tcm_handle = cl->common.classid; | ||
250 | tcm->tcm_info = cl->qdisc->handle; | ||
251 | |||
252 | nest = nla_nest_start(skb, TCA_OPTIONS); | ||
253 | if (nest == NULL) | ||
254 | goto nla_put_failure; | ||
255 | NLA_PUT_U32(skb, TCA_DRR_QUANTUM, cl->quantum); | ||
256 | return nla_nest_end(skb, nest); | ||
257 | |||
258 | nla_put_failure: | ||
259 | nla_nest_cancel(skb, nest); | ||
260 | return -EMSGSIZE; | ||
261 | } | ||
262 | |||
263 | static int drr_dump_class_stats(struct Qdisc *sch, unsigned long arg, | ||
264 | struct gnet_dump *d) | ||
265 | { | ||
266 | struct drr_class *cl = (struct drr_class *)arg; | ||
267 | struct tc_drr_stats xstats; | ||
268 | |||
269 | memset(&xstats, 0, sizeof(xstats)); | ||
270 | if (cl->qdisc->q.qlen) | ||
271 | xstats.deficit = cl->deficit; | ||
272 | |||
273 | if (gnet_stats_copy_basic(d, &cl->bstats) < 0 || | ||
274 | gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 || | ||
275 | gnet_stats_copy_queue(d, &cl->qdisc->qstats) < 0) | ||
276 | return -1; | ||
277 | |||
278 | return gnet_stats_copy_app(d, &xstats, sizeof(xstats)); | ||
279 | } | ||
280 | |||
281 | static void drr_walk(struct Qdisc *sch, struct qdisc_walker *arg) | ||
282 | { | ||
283 | struct drr_sched *q = qdisc_priv(sch); | ||
284 | struct drr_class *cl; | ||
285 | struct hlist_node *n; | ||
286 | unsigned int i; | ||
287 | |||
288 | if (arg->stop) | ||
289 | return; | ||
290 | |||
291 | for (i = 0; i < q->clhash.hashsize; i++) { | ||
292 | hlist_for_each_entry(cl, n, &q->clhash.hash[i], common.hnode) { | ||
293 | if (arg->count < arg->skip) { | ||
294 | arg->count++; | ||
295 | continue; | ||
296 | } | ||
297 | if (arg->fn(sch, (unsigned long)cl, arg) < 0) { | ||
298 | arg->stop = 1; | ||
299 | return; | ||
300 | } | ||
301 | arg->count++; | ||
302 | } | ||
303 | } | ||
304 | } | ||
305 | |||
306 | static struct drr_class *drr_classify(struct sk_buff *skb, struct Qdisc *sch, | ||
307 | int *qerr) | ||
308 | { | ||
309 | struct drr_sched *q = qdisc_priv(sch); | ||
310 | struct drr_class *cl; | ||
311 | struct tcf_result res; | ||
312 | int result; | ||
313 | |||
314 | if (TC_H_MAJ(skb->priority ^ sch->handle) == 0) { | ||
315 | cl = drr_find_class(sch, skb->priority); | ||
316 | if (cl != NULL) | ||
317 | return cl; | ||
318 | } | ||
319 | |||
320 | *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; | ||
321 | result = tc_classify(skb, q->filter_list, &res); | ||
322 | if (result >= 0) { | ||
323 | #ifdef CONFIG_NET_CLS_ACT | ||
324 | switch (result) { | ||
325 | case TC_ACT_QUEUED: | ||
326 | case TC_ACT_STOLEN: | ||
327 | *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN; | ||
328 | case TC_ACT_SHOT: | ||
329 | return NULL; | ||
330 | } | ||
331 | #endif | ||
332 | cl = (struct drr_class *)res.class; | ||
333 | if (cl == NULL) | ||
334 | cl = drr_find_class(sch, res.classid); | ||
335 | return cl; | ||
336 | } | ||
337 | return NULL; | ||
338 | } | ||
339 | |||
340 | static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch) | ||
341 | { | ||
342 | struct drr_sched *q = qdisc_priv(sch); | ||
343 | struct drr_class *cl; | ||
344 | unsigned int len; | ||
345 | int err; | ||
346 | |||
347 | cl = drr_classify(skb, sch, &err); | ||
348 | if (cl == NULL) { | ||
349 | if (err & __NET_XMIT_BYPASS) | ||
350 | sch->qstats.drops++; | ||
351 | kfree_skb(skb); | ||
352 | return err; | ||
353 | } | ||
354 | |||
355 | len = qdisc_pkt_len(skb); | ||
356 | err = qdisc_enqueue(skb, cl->qdisc); | ||
357 | if (unlikely(err != NET_XMIT_SUCCESS)) { | ||
358 | if (net_xmit_drop_count(err)) { | ||
359 | cl->qstats.drops++; | ||
360 | sch->qstats.drops++; | ||
361 | } | ||
362 | return err; | ||
363 | } | ||
364 | |||
365 | if (cl->qdisc->q.qlen == 1) { | ||
366 | list_add_tail(&cl->alist, &q->active); | ||
367 | cl->deficit = cl->quantum; | ||
368 | } | ||
369 | |||
370 | cl->bstats.packets++; | ||
371 | cl->bstats.bytes += len; | ||
372 | sch->bstats.packets++; | ||
373 | sch->bstats.bytes += len; | ||
374 | |||
375 | sch->q.qlen++; | ||
376 | return err; | ||
377 | } | ||
378 | |||
379 | static struct sk_buff *drr_dequeue(struct Qdisc *sch) | ||
380 | { | ||
381 | struct drr_sched *q = qdisc_priv(sch); | ||
382 | struct drr_class *cl; | ||
383 | struct sk_buff *skb; | ||
384 | unsigned int len; | ||
385 | |||
386 | if (list_empty(&q->active)) | ||
387 | goto out; | ||
388 | while (1) { | ||
389 | cl = list_first_entry(&q->active, struct drr_class, alist); | ||
390 | skb = cl->qdisc->ops->peek(cl->qdisc); | ||
391 | if (skb == NULL) | ||
392 | goto out; | ||
393 | |||
394 | len = qdisc_pkt_len(skb); | ||
395 | if (len <= cl->deficit) { | ||
396 | cl->deficit -= len; | ||
397 | skb = qdisc_dequeue_peeked(cl->qdisc); | ||
398 | if (cl->qdisc->q.qlen == 0) | ||
399 | list_del(&cl->alist); | ||
400 | sch->q.qlen--; | ||
401 | return skb; | ||
402 | } | ||
403 | |||
404 | cl->deficit += cl->quantum; | ||
405 | list_move_tail(&cl->alist, &q->active); | ||
406 | } | ||
407 | out: | ||
408 | return NULL; | ||
409 | } | ||
410 | |||
411 | static unsigned int drr_drop(struct Qdisc *sch) | ||
412 | { | ||
413 | struct drr_sched *q = qdisc_priv(sch); | ||
414 | struct drr_class *cl; | ||
415 | unsigned int len; | ||
416 | |||
417 | list_for_each_entry(cl, &q->active, alist) { | ||
418 | if (cl->qdisc->ops->drop) { | ||
419 | len = cl->qdisc->ops->drop(cl->qdisc); | ||
420 | if (len > 0) { | ||
421 | sch->q.qlen--; | ||
422 | if (cl->qdisc->q.qlen == 0) | ||
423 | list_del(&cl->alist); | ||
424 | return len; | ||
425 | } | ||
426 | } | ||
427 | } | ||
428 | return 0; | ||
429 | } | ||
430 | |||
431 | static int drr_init_qdisc(struct Qdisc *sch, struct nlattr *opt) | ||
432 | { | ||
433 | struct drr_sched *q = qdisc_priv(sch); | ||
434 | int err; | ||
435 | |||
436 | err = qdisc_class_hash_init(&q->clhash); | ||
437 | if (err < 0) | ||
438 | return err; | ||
439 | INIT_LIST_HEAD(&q->active); | ||
440 | return 0; | ||
441 | } | ||
442 | |||
443 | static void drr_reset_qdisc(struct Qdisc *sch) | ||
444 | { | ||
445 | struct drr_sched *q = qdisc_priv(sch); | ||
446 | struct drr_class *cl; | ||
447 | struct hlist_node *n; | ||
448 | unsigned int i; | ||
449 | |||
450 | for (i = 0; i < q->clhash.hashsize; i++) { | ||
451 | hlist_for_each_entry(cl, n, &q->clhash.hash[i], common.hnode) { | ||
452 | if (cl->qdisc->q.qlen) | ||
453 | list_del(&cl->alist); | ||
454 | qdisc_reset(cl->qdisc); | ||
455 | } | ||
456 | } | ||
457 | sch->q.qlen = 0; | ||
458 | } | ||
459 | |||
460 | static void drr_destroy_qdisc(struct Qdisc *sch) | ||
461 | { | ||
462 | struct drr_sched *q = qdisc_priv(sch); | ||
463 | struct drr_class *cl; | ||
464 | struct hlist_node *n, *next; | ||
465 | unsigned int i; | ||
466 | |||
467 | tcf_destroy_chain(&q->filter_list); | ||
468 | |||
469 | for (i = 0; i < q->clhash.hashsize; i++) { | ||
470 | hlist_for_each_entry_safe(cl, n, next, &q->clhash.hash[i], | ||
471 | common.hnode) | ||
472 | drr_destroy_class(sch, cl); | ||
473 | } | ||
474 | qdisc_class_hash_destroy(&q->clhash); | ||
475 | } | ||
476 | |||
477 | static const struct Qdisc_class_ops drr_class_ops = { | ||
478 | .change = drr_change_class, | ||
479 | .delete = drr_delete_class, | ||
480 | .get = drr_get_class, | ||
481 | .put = drr_put_class, | ||
482 | .tcf_chain = drr_tcf_chain, | ||
483 | .bind_tcf = drr_bind_tcf, | ||
484 | .unbind_tcf = drr_unbind_tcf, | ||
485 | .graft = drr_graft_class, | ||
486 | .leaf = drr_class_leaf, | ||
487 | .qlen_notify = drr_qlen_notify, | ||
488 | .dump = drr_dump_class, | ||
489 | .dump_stats = drr_dump_class_stats, | ||
490 | .walk = drr_walk, | ||
491 | }; | ||
492 | |||
493 | static struct Qdisc_ops drr_qdisc_ops __read_mostly = { | ||
494 | .cl_ops = &drr_class_ops, | ||
495 | .id = "drr", | ||
496 | .priv_size = sizeof(struct drr_sched), | ||
497 | .enqueue = drr_enqueue, | ||
498 | .dequeue = drr_dequeue, | ||
499 | .peek = qdisc_peek_dequeued, | ||
500 | .drop = drr_drop, | ||
501 | .init = drr_init_qdisc, | ||
502 | .reset = drr_reset_qdisc, | ||
503 | .destroy = drr_destroy_qdisc, | ||
504 | .owner = THIS_MODULE, | ||
505 | }; | ||
506 | |||
507 | static int __init drr_init(void) | ||
508 | { | ||
509 | return register_qdisc(&drr_qdisc_ops); | ||
510 | } | ||
511 | |||
512 | static void __exit drr_exit(void) | ||
513 | { | ||
514 | unregister_qdisc(&drr_qdisc_ops); | ||
515 | } | ||
516 | |||
517 | module_init(drr_init); | ||
518 | module_exit(drr_exit); | ||
519 | MODULE_LICENSE("GPL"); | ||