diff options
author | Stephen Hemminger <shemminger@osdl.org> | 2005-10-30 16:47:34 -0500 |
---|---|---|
committer | Arnaldo Carvalho de Melo <acme@mandriva.com> | 2005-11-05 17:56:41 -0500 |
commit | 300ce174ebc2fcf2b5111a50fa42f79d891927dd (patch) | |
tree | ea7ac40eac2de90be9e5575759bab18029ae2fdf /net/sched | |
parent | 07aaa11540828f4482c09e1a936a1f63cdb9fc9d (diff) |
[NETEM]: Support time based reordering
Change netem to support packets getting reordered because of variations in
delay. Introduce a special case version of FIFO that queues packets in order
based on the netem delay.
Since netem is classful, those users that don't want jitter based reordering
can just insert a pfifo instead of the default.
This required changes to generic skbuff code to allow finer grain manipulation
of sk_buff_head. Insertion into the middle and reverse walk.
Signed-off-by: Stephen Hemminger <shemminger@osdl.org>
Signed-off-by: Arnaldo Carvalho de Melo <acme@mandriva.com>
Diffstat (limited to 'net/sched')
-rw-r--r-- | net/sched/sch_netem.c | 85 |
1 files changed, 84 insertions, 1 deletions
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c index d871fe7f81a9..7c10ef3457d7 100644 --- a/net/sched/sch_netem.c +++ b/net/sched/sch_netem.c | |||
@@ -300,11 +300,16 @@ static void netem_reset(struct Qdisc *sch) | |||
300 | del_timer_sync(&q->timer); | 300 | del_timer_sync(&q->timer); |
301 | } | 301 | } |
302 | 302 | ||
303 | /* Pass size change message down to embedded FIFO */ | ||
303 | static int set_fifo_limit(struct Qdisc *q, int limit) | 304 | static int set_fifo_limit(struct Qdisc *q, int limit) |
304 | { | 305 | { |
305 | struct rtattr *rta; | 306 | struct rtattr *rta; |
306 | int ret = -ENOMEM; | 307 | int ret = -ENOMEM; |
307 | 308 | ||
309 | /* Hack to avoid sending change message to non-FIFO */ | ||
310 | if (strncmp(q->ops->id + 1, "fifo", 4) != 0) | ||
311 | return 0; | ||
312 | |||
308 | rta = kmalloc(RTA_LENGTH(sizeof(struct tc_fifo_qopt)), GFP_KERNEL); | 313 | rta = kmalloc(RTA_LENGTH(sizeof(struct tc_fifo_qopt)), GFP_KERNEL); |
309 | if (rta) { | 314 | if (rta) { |
310 | rta->rta_type = RTM_NEWQDISC; | 315 | rta->rta_type = RTM_NEWQDISC; |
@@ -436,6 +441,84 @@ static int netem_change(struct Qdisc *sch, struct rtattr *opt) | |||
436 | return 0; | 441 | return 0; |
437 | } | 442 | } |
438 | 443 | ||
444 | /* | ||
445 | * Special case version of FIFO queue for use by netem. | ||
446 | * It queues in order based on timestamps in skb's | ||
447 | */ | ||
448 | struct fifo_sched_data { | ||
449 | u32 limit; | ||
450 | }; | ||
451 | |||
452 | static int tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch) | ||
453 | { | ||
454 | struct fifo_sched_data *q = qdisc_priv(sch); | ||
455 | struct sk_buff_head *list = &sch->q; | ||
456 | const struct netem_skb_cb *ncb | ||
457 | = (const struct netem_skb_cb *)nskb->cb; | ||
458 | struct sk_buff *skb; | ||
459 | |||
460 | if (likely(skb_queue_len(list) < q->limit)) { | ||
461 | skb_queue_reverse_walk(list, skb) { | ||
462 | const struct netem_skb_cb *cb | ||
463 | = (const struct netem_skb_cb *)skb->cb; | ||
464 | |||
465 | if (PSCHED_TLESS(cb->time_to_send, ncb->time_to_send)) | ||
466 | break; | ||
467 | } | ||
468 | |||
469 | __skb_queue_after(list, skb, nskb); | ||
470 | |||
471 | sch->qstats.backlog += nskb->len; | ||
472 | sch->bstats.bytes += nskb->len; | ||
473 | sch->bstats.packets++; | ||
474 | |||
475 | return NET_XMIT_SUCCESS; | ||
476 | } | ||
477 | |||
478 | return qdisc_drop(nskb, sch); | ||
479 | } | ||
480 | |||
481 | static int tfifo_init(struct Qdisc *sch, struct rtattr *opt) | ||
482 | { | ||
483 | struct fifo_sched_data *q = qdisc_priv(sch); | ||
484 | |||
485 | if (opt) { | ||
486 | struct tc_fifo_qopt *ctl = RTA_DATA(opt); | ||
487 | if (RTA_PAYLOAD(opt) < sizeof(*ctl)) | ||
488 | return -EINVAL; | ||
489 | |||
490 | q->limit = ctl->limit; | ||
491 | } else | ||
492 | q->limit = max_t(u32, sch->dev->tx_queue_len, 1); | ||
493 | |||
494 | return 0; | ||
495 | } | ||
496 | |||
497 | static int tfifo_dump(struct Qdisc *sch, struct sk_buff *skb) | ||
498 | { | ||
499 | struct fifo_sched_data *q = qdisc_priv(sch); | ||
500 | struct tc_fifo_qopt opt = { .limit = q->limit }; | ||
501 | |||
502 | RTA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt); | ||
503 | return skb->len; | ||
504 | |||
505 | rtattr_failure: | ||
506 | return -1; | ||
507 | } | ||
508 | |||
509 | static struct Qdisc_ops tfifo_qdisc_ops = { | ||
510 | .id = "tfifo", | ||
511 | .priv_size = sizeof(struct fifo_sched_data), | ||
512 | .enqueue = tfifo_enqueue, | ||
513 | .dequeue = qdisc_dequeue_head, | ||
514 | .requeue = qdisc_requeue, | ||
515 | .drop = qdisc_queue_drop, | ||
516 | .init = tfifo_init, | ||
517 | .reset = qdisc_reset_queue, | ||
518 | .change = tfifo_init, | ||
519 | .dump = tfifo_dump, | ||
520 | }; | ||
521 | |||
439 | static int netem_init(struct Qdisc *sch, struct rtattr *opt) | 522 | static int netem_init(struct Qdisc *sch, struct rtattr *opt) |
440 | { | 523 | { |
441 | struct netem_sched_data *q = qdisc_priv(sch); | 524 | struct netem_sched_data *q = qdisc_priv(sch); |
@@ -448,7 +531,7 @@ static int netem_init(struct Qdisc *sch, struct rtattr *opt) | |||
448 | q->timer.function = netem_watchdog; | 531 | q->timer.function = netem_watchdog; |
449 | q->timer.data = (unsigned long) sch; | 532 | q->timer.data = (unsigned long) sch; |
450 | 533 | ||
451 | q->qdisc = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops); | 534 | q->qdisc = qdisc_create_dflt(sch->dev, &tfifo_qdisc_ops); |
452 | if (!q->qdisc) { | 535 | if (!q->qdisc) { |
453 | pr_debug("netem: qdisc create failed\n"); | 536 | pr_debug("netem: qdisc create failed\n"); |
454 | return -ENOMEM; | 537 | return -ENOMEM; |