diff options
author | Hagen Paul Pfeifer <hagen@jauu.net> | 2011-11-30 07:20:26 -0500 |
---|---|---|
committer | David S. Miller <davem@drr.davemloft.net> | 2011-11-30 23:18:35 -0500 |
commit | 7bc0f28c7a0cd19f40e5a6e4d0a117db9a4e4cd5 (patch) | |
tree | 4cdbf530b8ed94f73907327dd21f8303085cfc09 /net/sched/sch_netem.c | |
parent | 99d2f47aa9d3ad40daa6ee0770e91b95b71082f0 (diff) |
netem: rate extension
Currently netem is not in the ability to emulate channel bandwidth. Only static
delay (and optional random jitter) can be configured.
To emulate the channel rate the token bucket filter (sch_tbf) can be used. But
TBF has some major emulation flaws. The buffer (token bucket depth/rate) cannot
be 0. Also the idea behind TBF is that the credit (token in buckets) fills if
no packet is transmitted. So that there is always a "positive" credit for new
packets. In real life this behavior contradicts the law of nature where
nothing can travel faster as speed of light. E.g.: on an emulated 1000 byte/s
link a small IPv4/TCP SYN packet with ~50 byte require ~0.05 seconds - not 0
seconds.
Netem is an excellent place to implement a rate limiting feature: static
delay is already implemented, tfifo already has time information and the
user can skip TBF configuration completely.
This patch implement rate feature which can be configured via tc. e.g:
tc qdisc add dev eth0 root netem rate 10kbit
To emulate a link of 5000byte/s and add an additional static delay of 10ms:
tc qdisc add dev eth0 root netem delay 10ms rate 5KBps
Note: similar to TBF the rate extension is bounded to the kernel timing
system. Depending on the architecture timer granularity, higher rates (e.g.
10mbit/s and higher) tend to transmission bursts. Also note: further queues
living in network adaptors; see ethtool(8).
Signed-off-by: Hagen Paul Pfeifer <hagen@jauu.net>
Acked-by: Eric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: David S. Miller <davem@drr.davemloft.net>
Diffstat (limited to 'net/sched/sch_netem.c')
-rw-r--r-- | net/sched/sch_netem.c | 40 |
1 files changed, 40 insertions, 0 deletions
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c index eb3b9a86c6ed..9b7af9f1272f 100644 --- a/net/sched/sch_netem.c +++ b/net/sched/sch_netem.c | |||
@@ -79,6 +79,7 @@ struct netem_sched_data { | |||
79 | u32 duplicate; | 79 | u32 duplicate; |
80 | u32 reorder; | 80 | u32 reorder; |
81 | u32 corrupt; | 81 | u32 corrupt; |
82 | u32 rate; | ||
82 | 83 | ||
83 | struct crndstate { | 84 | struct crndstate { |
84 | u32 last; | 85 | u32 last; |
@@ -298,6 +299,11 @@ static psched_tdiff_t tabledist(psched_tdiff_t mu, psched_tdiff_t sigma, | |||
298 | return x / NETEM_DIST_SCALE + (sigma / NETEM_DIST_SCALE) * t + mu; | 299 | return x / NETEM_DIST_SCALE + (sigma / NETEM_DIST_SCALE) * t + mu; |
299 | } | 300 | } |
300 | 301 | ||
302 | static psched_time_t packet_len_2_sched_time(unsigned int len, u32 rate) | ||
303 | { | ||
304 | return PSCHED_NS2TICKS((u64)len * NSEC_PER_SEC / rate); | ||
305 | } | ||
306 | |||
301 | /* | 307 | /* |
302 | * Insert one skb into qdisc. | 308 | * Insert one skb into qdisc. |
303 | * Note: parent depends on return value to account for queue length. | 309 | * Note: parent depends on return value to account for queue length. |
@@ -371,6 +377,24 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
371 | &q->delay_cor, q->delay_dist); | 377 | &q->delay_cor, q->delay_dist); |
372 | 378 | ||
373 | now = psched_get_time(); | 379 | now = psched_get_time(); |
380 | |||
381 | if (q->rate) { | ||
382 | struct sk_buff_head *list = &q->qdisc->q; | ||
383 | |||
384 | delay += packet_len_2_sched_time(skb->len, q->rate); | ||
385 | |||
386 | if (!skb_queue_empty(list)) { | ||
387 | /* | ||
388 | * Last packet in queue is reference point (now). | ||
389 | * First packet in queue is already in flight, | ||
390 | * calculate this time bonus and substract | ||
391 | * from delay. | ||
392 | */ | ||
393 | delay -= now - netem_skb_cb(skb_peek(list))->time_to_send; | ||
394 | now = netem_skb_cb(skb_peek_tail(list))->time_to_send; | ||
395 | } | ||
396 | } | ||
397 | |||
374 | cb->time_to_send = now + delay; | 398 | cb->time_to_send = now + delay; |
375 | ++q->counter; | 399 | ++q->counter; |
376 | ret = qdisc_enqueue(skb, q->qdisc); | 400 | ret = qdisc_enqueue(skb, q->qdisc); |
@@ -535,6 +559,14 @@ static void get_corrupt(struct Qdisc *sch, const struct nlattr *attr) | |||
535 | init_crandom(&q->corrupt_cor, r->correlation); | 559 | init_crandom(&q->corrupt_cor, r->correlation); |
536 | } | 560 | } |
537 | 561 | ||
562 | static void get_rate(struct Qdisc *sch, const struct nlattr *attr) | ||
563 | { | ||
564 | struct netem_sched_data *q = qdisc_priv(sch); | ||
565 | const struct tc_netem_rate *r = nla_data(attr); | ||
566 | |||
567 | q->rate = r->rate; | ||
568 | } | ||
569 | |||
538 | static int get_loss_clg(struct Qdisc *sch, const struct nlattr *attr) | 570 | static int get_loss_clg(struct Qdisc *sch, const struct nlattr *attr) |
539 | { | 571 | { |
540 | struct netem_sched_data *q = qdisc_priv(sch); | 572 | struct netem_sched_data *q = qdisc_priv(sch); |
@@ -594,6 +626,7 @@ static const struct nla_policy netem_policy[TCA_NETEM_MAX + 1] = { | |||
594 | [TCA_NETEM_CORR] = { .len = sizeof(struct tc_netem_corr) }, | 626 | [TCA_NETEM_CORR] = { .len = sizeof(struct tc_netem_corr) }, |
595 | [TCA_NETEM_REORDER] = { .len = sizeof(struct tc_netem_reorder) }, | 627 | [TCA_NETEM_REORDER] = { .len = sizeof(struct tc_netem_reorder) }, |
596 | [TCA_NETEM_CORRUPT] = { .len = sizeof(struct tc_netem_corrupt) }, | 628 | [TCA_NETEM_CORRUPT] = { .len = sizeof(struct tc_netem_corrupt) }, |
629 | [TCA_NETEM_RATE] = { .len = sizeof(struct tc_netem_rate) }, | ||
597 | [TCA_NETEM_LOSS] = { .type = NLA_NESTED }, | 630 | [TCA_NETEM_LOSS] = { .type = NLA_NESTED }, |
598 | }; | 631 | }; |
599 | 632 | ||
@@ -666,6 +699,9 @@ static int netem_change(struct Qdisc *sch, struct nlattr *opt) | |||
666 | if (tb[TCA_NETEM_CORRUPT]) | 699 | if (tb[TCA_NETEM_CORRUPT]) |
667 | get_corrupt(sch, tb[TCA_NETEM_CORRUPT]); | 700 | get_corrupt(sch, tb[TCA_NETEM_CORRUPT]); |
668 | 701 | ||
702 | if (tb[TCA_NETEM_RATE]) | ||
703 | get_rate(sch, tb[TCA_NETEM_RATE]); | ||
704 | |||
669 | q->loss_model = CLG_RANDOM; | 705 | q->loss_model = CLG_RANDOM; |
670 | if (tb[TCA_NETEM_LOSS]) | 706 | if (tb[TCA_NETEM_LOSS]) |
671 | ret = get_loss_clg(sch, tb[TCA_NETEM_LOSS]); | 707 | ret = get_loss_clg(sch, tb[TCA_NETEM_LOSS]); |
@@ -846,6 +882,7 @@ static int netem_dump(struct Qdisc *sch, struct sk_buff *skb) | |||
846 | struct tc_netem_corr cor; | 882 | struct tc_netem_corr cor; |
847 | struct tc_netem_reorder reorder; | 883 | struct tc_netem_reorder reorder; |
848 | struct tc_netem_corrupt corrupt; | 884 | struct tc_netem_corrupt corrupt; |
885 | struct tc_netem_rate rate; | ||
849 | 886 | ||
850 | qopt.latency = q->latency; | 887 | qopt.latency = q->latency; |
851 | qopt.jitter = q->jitter; | 888 | qopt.jitter = q->jitter; |
@@ -868,6 +905,9 @@ static int netem_dump(struct Qdisc *sch, struct sk_buff *skb) | |||
868 | corrupt.correlation = q->corrupt_cor.rho; | 905 | corrupt.correlation = q->corrupt_cor.rho; |
869 | NLA_PUT(skb, TCA_NETEM_CORRUPT, sizeof(corrupt), &corrupt); | 906 | NLA_PUT(skb, TCA_NETEM_CORRUPT, sizeof(corrupt), &corrupt); |
870 | 907 | ||
908 | rate.rate = q->rate; | ||
909 | NLA_PUT(skb, TCA_NETEM_RATE, sizeof(rate), &rate); | ||
910 | |||
871 | if (dump_loss_model(q, skb) != 0) | 911 | if (dump_loss_model(q, skb) != 0) |
872 | goto nla_put_failure; | 912 | goto nla_put_failure; |
873 | 913 | ||