aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorHagen Paul Pfeifer <hagen@jauu.net>2011-12-12 09:30:00 -0500
committerDavid S. Miller <davem@davemloft.net>2011-12-12 19:44:48 -0500
commit90b41a1cd44cc4e507b554ae5a36562a1ba9a4e8 (patch)
tree4206cd6caab2dcec1d3937d5c0eed04085a153a4 /net
parentc7c6575f254e5621a8408c29bdab0d704c3fab6e (diff)
netem: add cell concept to simulate special MAC behavior
This extension can be used to simulate special link layer characteristics. Simulate because packet data is not modified, only the calculation base is changed to delay a packet based on the original packet size and artificial cell information. packet_overhead can be used to simulate a link layer header compression scheme (e.g. set packet_overhead to -20) or with a positive packet_overhead value an additional MAC header can be simulated. It is also possible to "replace" the 14 byte Ethernet header with something else. cell_size and cell_overhead can be used to simulate link layer schemes, based on cells, like some TDMA schemes. Another application area are MAC schemes using a link layer fragmentation with a (small) header each. Cell size is the maximum amount of data bytes within one cell. Cell overhead is an additional variable to change the per-cell-overhead (e.g. 5 byte header per fragment). Example (5 kbit/s, 20 byte per packet overhead, cell-size 100 byte, per cell overhead 5 byte): tc qdisc add dev eth0 root netem rate 5kbit 20 100 5 Signed-off-by: Hagen Paul Pfeifer <hagen@jauu.net> Signed-off-by: Florian Westphal <fw@strlen.de> Acked-by: Stephen Hemminger <shemminger@vyatta.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r--net/sched/sch_netem.c33
1 files changed, 29 insertions, 4 deletions
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index 3bfd73344f76..1fa2f903d221 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -22,6 +22,7 @@
22#include <linux/skbuff.h> 22#include <linux/skbuff.h>
23#include <linux/vmalloc.h> 23#include <linux/vmalloc.h>
24#include <linux/rtnetlink.h> 24#include <linux/rtnetlink.h>
25#include <linux/reciprocal_div.h>
25 26
26#include <net/netlink.h> 27#include <net/netlink.h>
27#include <net/pkt_sched.h> 28#include <net/pkt_sched.h>
@@ -80,6 +81,10 @@ struct netem_sched_data {
80 u32 reorder; 81 u32 reorder;
81 u32 corrupt; 82 u32 corrupt;
82 u32 rate; 83 u32 rate;
84 s32 packet_overhead;
85 u32 cell_size;
86 u32 cell_size_reciprocal;
87 s32 cell_overhead;
83 88
84 struct crndstate { 89 struct crndstate {
85 u32 last; 90 u32 last;
@@ -299,11 +304,23 @@ static psched_tdiff_t tabledist(psched_tdiff_t mu, psched_tdiff_t sigma,
299 return x / NETEM_DIST_SCALE + (sigma / NETEM_DIST_SCALE) * t + mu; 304 return x / NETEM_DIST_SCALE + (sigma / NETEM_DIST_SCALE) * t + mu;
300} 305}
301 306
302static psched_time_t packet_len_2_sched_time(unsigned int len, u32 rate) 307static psched_time_t packet_len_2_sched_time(unsigned int len, struct netem_sched_data *q)
303{ 308{
304 u64 ticks = (u64)len * NSEC_PER_SEC; 309 u64 ticks;
305 310
306 do_div(ticks, rate); 311 len += q->packet_overhead;
312
313 if (q->cell_size) {
314 u32 cells = reciprocal_divide(len, q->cell_size_reciprocal);
315
316 if (len > cells * q->cell_size) /* extra cell needed for remainder */
317 cells++;
318 len = cells * (q->cell_size + q->cell_overhead);
319 }
320
321 ticks = (u64)len * NSEC_PER_SEC;
322
323 do_div(ticks, q->rate);
307 return PSCHED_NS2TICKS(ticks); 324 return PSCHED_NS2TICKS(ticks);
308} 325}
309 326
@@ -384,7 +401,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
384 if (q->rate) { 401 if (q->rate) {
385 struct sk_buff_head *list = &q->qdisc->q; 402 struct sk_buff_head *list = &q->qdisc->q;
386 403
387 delay += packet_len_2_sched_time(skb->len, q->rate); 404 delay += packet_len_2_sched_time(skb->len, q);
388 405
389 if (!skb_queue_empty(list)) { 406 if (!skb_queue_empty(list)) {
390 /* 407 /*
@@ -568,6 +585,11 @@ static void get_rate(struct Qdisc *sch, const struct nlattr *attr)
568 const struct tc_netem_rate *r = nla_data(attr); 585 const struct tc_netem_rate *r = nla_data(attr);
569 586
570 q->rate = r->rate; 587 q->rate = r->rate;
588 q->packet_overhead = r->packet_overhead;
589 q->cell_size = r->cell_size;
590 if (q->cell_size)
591 q->cell_size_reciprocal = reciprocal_value(q->cell_size);
592 q->cell_overhead = r->cell_overhead;
571} 593}
572 594
573static int get_loss_clg(struct Qdisc *sch, const struct nlattr *attr) 595static int get_loss_clg(struct Qdisc *sch, const struct nlattr *attr)
@@ -909,6 +931,9 @@ static int netem_dump(struct Qdisc *sch, struct sk_buff *skb)
909 NLA_PUT(skb, TCA_NETEM_CORRUPT, sizeof(corrupt), &corrupt); 931 NLA_PUT(skb, TCA_NETEM_CORRUPT, sizeof(corrupt), &corrupt);
910 932
911 rate.rate = q->rate; 933 rate.rate = q->rate;
934 rate.packet_overhead = q->packet_overhead;
935 rate.cell_size = q->cell_size;
936 rate.cell_overhead = q->cell_overhead;
912 NLA_PUT(skb, TCA_NETEM_RATE, sizeof(rate), &rate); 937 NLA_PUT(skb, TCA_NETEM_RATE, sizeof(rate), &rate);
913 938
914 if (dump_loss_model(q, skb) != 0) 939 if (dump_loss_model(q, skb) != 0)