aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorStephen Hemminger <shemminger@osdl.org>2005-12-21 22:03:44 -0500
committerDavid S. Miller <davem@sunset.davemloft.net>2006-01-03 16:11:05 -0500
commitc865e5d99e25a171e8262fc0f7ba608568633c64 (patch)
tree5d8cd6a5a4623d3497f2eb0c14e80511f5b2ef73 /net
parent8cbb512e50fb702b5b1d444f76ebcdb53577b2ec (diff)
[PKT_SCHED] netem: packet corruption option
Here is a new feature for netem in 2.6.16. It adds the ability to randomly corrupt packets with netem. A version was done by Hagen Paul Pfeifer, but I redid it to handle the cases of backwards compatibility with netlink interface and presence of hardware checksum offload. It is useful for testing hardware offload in devices. Signed-off-by: Stephen Hemminger <shemminger@osdl.org> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r--net/sched/sch_netem.c49
1 files changed, 46 insertions, 3 deletions
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index 82fb07aa06a5..ba5283204837 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -25,7 +25,7 @@
25 25
26#include <net/pkt_sched.h> 26#include <net/pkt_sched.h>
27 27
28#define VERSION "1.1" 28#define VERSION "1.2"
29 29
30/* Network Emulation Queuing algorithm. 30/* Network Emulation Queuing algorithm.
31 ==================================== 31 ====================================
@@ -65,11 +65,12 @@ struct netem_sched_data {
65 u32 jitter; 65 u32 jitter;
66 u32 duplicate; 66 u32 duplicate;
67 u32 reorder; 67 u32 reorder;
68 u32 corrupt;
68 69
69 struct crndstate { 70 struct crndstate {
70 unsigned long last; 71 unsigned long last;
71 unsigned long rho; 72 unsigned long rho;
72 } delay_cor, loss_cor, dup_cor, reorder_cor; 73 } delay_cor, loss_cor, dup_cor, reorder_cor, corrupt_cor;
73 74
74 struct disttable { 75 struct disttable {
75 u32 size; 76 u32 size;
@@ -183,6 +184,23 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
183 q->duplicate = dupsave; 184 q->duplicate = dupsave;
184 } 185 }
185 186
187 /*
188 * Randomized packet corruption.
189 * Make copy if needed since we are modifying
190 * If packet is going to be hardware checksummed, then
191 * do it now in software before we mangle it.
192 */
193 if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) {
194 if (!(skb = skb_unshare(skb, GFP_ATOMIC))
195 || (skb->ip_summed == CHECKSUM_HW
196 && skb_checksum_help(skb, 0))) {
197 sch->qstats.drops++;
198 return NET_XMIT_DROP;
199 }
200
201 skb->data[net_random() % skb_headlen(skb)] ^= 1<<(net_random() % 8);
202 }
203
186 if (q->gap == 0 /* not doing reordering */ 204 if (q->gap == 0 /* not doing reordering */
187 || q->counter < q->gap /* inside last reordering gap */ 205 || q->counter < q->gap /* inside last reordering gap */
188 || q->reorder < get_crandom(&q->reorder_cor)) { 206 || q->reorder < get_crandom(&q->reorder_cor)) {
@@ -382,6 +400,20 @@ static int get_reorder(struct Qdisc *sch, const struct rtattr *attr)
382 return 0; 400 return 0;
383} 401}
384 402
403static int get_corrupt(struct Qdisc *sch, const struct rtattr *attr)
404{
405 struct netem_sched_data *q = qdisc_priv(sch);
406 const struct tc_netem_corrupt *r = RTA_DATA(attr);
407
408 if (RTA_PAYLOAD(attr) != sizeof(*r))
409 return -EINVAL;
410
411 q->corrupt = r->probability;
412 init_crandom(&q->corrupt_cor, r->correlation);
413 return 0;
414}
415
416/* Parse netlink message to set options */
385static int netem_change(struct Qdisc *sch, struct rtattr *opt) 417static int netem_change(struct Qdisc *sch, struct rtattr *opt)
386{ 418{
387 struct netem_sched_data *q = qdisc_priv(sch); 419 struct netem_sched_data *q = qdisc_priv(sch);
@@ -432,13 +464,19 @@ static int netem_change(struct Qdisc *sch, struct rtattr *opt)
432 if (ret) 464 if (ret)
433 return ret; 465 return ret;
434 } 466 }
467
435 if (tb[TCA_NETEM_REORDER-1]) { 468 if (tb[TCA_NETEM_REORDER-1]) {
436 ret = get_reorder(sch, tb[TCA_NETEM_REORDER-1]); 469 ret = get_reorder(sch, tb[TCA_NETEM_REORDER-1]);
437 if (ret) 470 if (ret)
438 return ret; 471 return ret;
439 } 472 }
440 }
441 473
474 if (tb[TCA_NETEM_CORRUPT-1]) {
475 ret = get_corrupt(sch, tb[TCA_NETEM_CORRUPT-1]);
476 if (ret)
477 return ret;
478 }
479 }
442 480
443 return 0; 481 return 0;
444} 482}
@@ -564,6 +602,7 @@ static int netem_dump(struct Qdisc *sch, struct sk_buff *skb)
564 struct tc_netem_qopt qopt; 602 struct tc_netem_qopt qopt;
565 struct tc_netem_corr cor; 603 struct tc_netem_corr cor;
566 struct tc_netem_reorder reorder; 604 struct tc_netem_reorder reorder;
605 struct tc_netem_corrupt corrupt;
567 606
568 qopt.latency = q->latency; 607 qopt.latency = q->latency;
569 qopt.jitter = q->jitter; 608 qopt.jitter = q->jitter;
@@ -582,6 +621,10 @@ static int netem_dump(struct Qdisc *sch, struct sk_buff *skb)
582 reorder.correlation = q->reorder_cor.rho; 621 reorder.correlation = q->reorder_cor.rho;
583 RTA_PUT(skb, TCA_NETEM_REORDER, sizeof(reorder), &reorder); 622 RTA_PUT(skb, TCA_NETEM_REORDER, sizeof(reorder), &reorder);
584 623
624 corrupt.probability = q->corrupt;
625 corrupt.correlation = q->corrupt_cor.rho;
626 RTA_PUT(skb, TCA_NETEM_CORRUPT, sizeof(corrupt), &corrupt);
627
585 rta->rta_len = skb->tail - b; 628 rta->rta_len = skb->tail - b;
586 629
587 return skb->len; 630 return skb->len;