diff options
author | Stephen Hemminger <shemminger@osdl.org> | 2005-05-26 15:55:48 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2005-05-26 15:55:48 -0400 |
commit | 0dca51d362b8e4af6b0dbc9e54d1e5165341918a (patch) | |
tree | ba19c8dc5601362fdd36c1c4f86f6246d9ed6564 /net/sched/sch_netem.c | |
parent | 0f9f32ac65ee4a452a912a8440cebbc4dff73852 (diff) |
[PKT_SCHED] netem: allow random reordering (with fix)
Here is a fixed up version of the reorder feature of netem.
It is the same as the earlier patch plus with the bugfix from Julio merged in.
Has expected backwards compatibility behaviour.
Go ahead and merge this one, the TCP strangeness I was seeing was due
to the reordering bug, and previous version of TSO patch.
Signed-off-by: Stephen Hemminger <shemminger@osdl.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sched/sch_netem.c')
-rw-r--r-- | net/sched/sch_netem.c | 54 |
1 files changed, 42 insertions, 12 deletions
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c index 48360f7eec5d..bb9bf8d5003c 100644 --- a/net/sched/sch_netem.c +++ b/net/sched/sch_netem.c | |||
@@ -62,11 +62,12 @@ struct netem_sched_data { | |||
62 | u32 gap; | 62 | u32 gap; |
63 | u32 jitter; | 63 | u32 jitter; |
64 | u32 duplicate; | 64 | u32 duplicate; |
65 | u32 reorder; | ||
65 | 66 | ||
66 | struct crndstate { | 67 | struct crndstate { |
67 | unsigned long last; | 68 | unsigned long last; |
68 | unsigned long rho; | 69 | unsigned long rho; |
69 | } delay_cor, loss_cor, dup_cor; | 70 | } delay_cor, loss_cor, dup_cor, reorder_cor; |
70 | 71 | ||
71 | struct disttable { | 72 | struct disttable { |
72 | u32 size; | 73 | u32 size; |
@@ -180,23 +181,23 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
180 | q->duplicate = dupsave; | 181 | q->duplicate = dupsave; |
181 | } | 182 | } |
182 | 183 | ||
183 | /* | 184 | if (q->gap == 0 /* not doing reordering */ |
184 | * Do re-ordering by putting one out of N packets at the front | 185 | || q->counter < q->gap /* inside last reordering gap */ |
185 | * of the queue. | 186 | || q->reorder < get_crandom(&q->reorder_cor)) { |
186 | * gap == 0 is special case for no-reordering. | ||
187 | */ | ||
188 | if (q->gap == 0 || q->counter != q->gap) { | ||
189 | psched_time_t now; | 187 | psched_time_t now; |
190 | PSCHED_GET_TIME(now); | 188 | PSCHED_GET_TIME(now); |
191 | PSCHED_TADD2(now, | 189 | PSCHED_TADD2(now, tabledist(q->latency, q->jitter, |
192 | tabledist(q->latency, q->jitter, &q->delay_cor, q->delay_dist), | 190 | &q->delay_cor, q->delay_dist), |
193 | cb->time_to_send); | 191 | cb->time_to_send); |
194 | |||
195 | ++q->counter; | 192 | ++q->counter; |
196 | ret = q->qdisc->enqueue(skb, q->qdisc); | 193 | ret = q->qdisc->enqueue(skb, q->qdisc); |
197 | } else { | 194 | } else { |
198 | q->counter = 0; | 195 | /* |
196 | * Do re-ordering by putting one out of N packets at the front | ||
197 | * of the queue. | ||
198 | */ | ||
199 | PSCHED_GET_TIME(cb->time_to_send); | 199 | PSCHED_GET_TIME(cb->time_to_send); |
200 | q->counter = 0; | ||
200 | ret = q->qdisc->ops->requeue(skb, q->qdisc); | 201 | ret = q->qdisc->ops->requeue(skb, q->qdisc); |
201 | } | 202 | } |
202 | 203 | ||
@@ -351,6 +352,19 @@ static int get_correlation(struct Qdisc *sch, const struct rtattr *attr) | |||
351 | return 0; | 352 | return 0; |
352 | } | 353 | } |
353 | 354 | ||
355 | static int get_reorder(struct Qdisc *sch, const struct rtattr *attr) | ||
356 | { | ||
357 | struct netem_sched_data *q = qdisc_priv(sch); | ||
358 | const struct tc_netem_reorder *r = RTA_DATA(attr); | ||
359 | |||
360 | if (RTA_PAYLOAD(attr) != sizeof(*r)) | ||
361 | return -EINVAL; | ||
362 | |||
363 | q->reorder = r->probability; | ||
364 | init_crandom(&q->reorder_cor, r->correlation); | ||
365 | return 0; | ||
366 | } | ||
367 | |||
354 | static int netem_change(struct Qdisc *sch, struct rtattr *opt) | 368 | static int netem_change(struct Qdisc *sch, struct rtattr *opt) |
355 | { | 369 | { |
356 | struct netem_sched_data *q = qdisc_priv(sch); | 370 | struct netem_sched_data *q = qdisc_priv(sch); |
@@ -371,9 +385,15 @@ static int netem_change(struct Qdisc *sch, struct rtattr *opt) | |||
371 | q->jitter = qopt->jitter; | 385 | q->jitter = qopt->jitter; |
372 | q->limit = qopt->limit; | 386 | q->limit = qopt->limit; |
373 | q->gap = qopt->gap; | 387 | q->gap = qopt->gap; |
388 | q->counter = 0; | ||
374 | q->loss = qopt->loss; | 389 | q->loss = qopt->loss; |
375 | q->duplicate = qopt->duplicate; | 390 | q->duplicate = qopt->duplicate; |
376 | 391 | ||
392 | /* for compatiablity with earlier versions. | ||
393 | * if gap is set, need to assume 100% probablity | ||
394 | */ | ||
395 | q->reorder = ~0; | ||
396 | |||
377 | /* Handle nested options after initial queue options. | 397 | /* Handle nested options after initial queue options. |
378 | * Should have put all options in nested format but too late now. | 398 | * Should have put all options in nested format but too late now. |
379 | */ | 399 | */ |
@@ -395,6 +415,11 @@ static int netem_change(struct Qdisc *sch, struct rtattr *opt) | |||
395 | if (ret) | 415 | if (ret) |
396 | return ret; | 416 | return ret; |
397 | } | 417 | } |
418 | if (tb[TCA_NETEM_REORDER-1]) { | ||
419 | ret = get_reorder(sch, tb[TCA_NETEM_REORDER-1]); | ||
420 | if (ret) | ||
421 | return ret; | ||
422 | } | ||
398 | } | 423 | } |
399 | 424 | ||
400 | 425 | ||
@@ -412,7 +437,6 @@ static int netem_init(struct Qdisc *sch, struct rtattr *opt) | |||
412 | init_timer(&q->timer); | 437 | init_timer(&q->timer); |
413 | q->timer.function = netem_watchdog; | 438 | q->timer.function = netem_watchdog; |
414 | q->timer.data = (unsigned long) sch; | 439 | q->timer.data = (unsigned long) sch; |
415 | q->counter = 0; | ||
416 | 440 | ||
417 | q->qdisc = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops); | 441 | q->qdisc = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops); |
418 | if (!q->qdisc) { | 442 | if (!q->qdisc) { |
@@ -444,6 +468,7 @@ static int netem_dump(struct Qdisc *sch, struct sk_buff *skb) | |||
444 | struct rtattr *rta = (struct rtattr *) b; | 468 | struct rtattr *rta = (struct rtattr *) b; |
445 | struct tc_netem_qopt qopt; | 469 | struct tc_netem_qopt qopt; |
446 | struct tc_netem_corr cor; | 470 | struct tc_netem_corr cor; |
471 | struct tc_netem_reorder reorder; | ||
447 | 472 | ||
448 | qopt.latency = q->latency; | 473 | qopt.latency = q->latency; |
449 | qopt.jitter = q->jitter; | 474 | qopt.jitter = q->jitter; |
@@ -457,6 +482,11 @@ static int netem_dump(struct Qdisc *sch, struct sk_buff *skb) | |||
457 | cor.loss_corr = q->loss_cor.rho; | 482 | cor.loss_corr = q->loss_cor.rho; |
458 | cor.dup_corr = q->dup_cor.rho; | 483 | cor.dup_corr = q->dup_cor.rho; |
459 | RTA_PUT(skb, TCA_NETEM_CORR, sizeof(cor), &cor); | 484 | RTA_PUT(skb, TCA_NETEM_CORR, sizeof(cor), &cor); |
485 | |||
486 | reorder.probability = q->reorder; | ||
487 | reorder.correlation = q->reorder_cor.rho; | ||
488 | RTA_PUT(skb, TCA_NETEM_REORDER, sizeof(reorder), &reorder); | ||
489 | |||
460 | rta->rta_len = skb->tail - b; | 490 | rta->rta_len = skb->tail - b; |
461 | 491 | ||
462 | return skb->len; | 492 | return skb->len; |