aboutsummaryrefslogtreecommitdiffstats
path: root/net/sched/sch_netem.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/sched/sch_netem.c')
-rw-r--r--net/sched/sch_netem.c411
1 files changed, 379 insertions, 32 deletions
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index 6a3006b38dc5..69c35f6cd13f 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -19,12 +19,13 @@
19#include <linux/kernel.h> 19#include <linux/kernel.h>
20#include <linux/errno.h> 20#include <linux/errno.h>
21#include <linux/skbuff.h> 21#include <linux/skbuff.h>
22#include <linux/vmalloc.h>
22#include <linux/rtnetlink.h> 23#include <linux/rtnetlink.h>
23 24
24#include <net/netlink.h> 25#include <net/netlink.h>
25#include <net/pkt_sched.h> 26#include <net/pkt_sched.h>
26 27
27#define VERSION "1.2" 28#define VERSION "1.3"
28 29
29/* Network Emulation Queuing algorithm. 30/* Network Emulation Queuing algorithm.
30 ==================================== 31 ====================================
@@ -47,6 +48,20 @@
47 layering other disciplines. It does not need to do bandwidth 48 layering other disciplines. It does not need to do bandwidth
48 control either since that can be handled by using token 49 control either since that can be handled by using token
49 bucket or other rate control. 50 bucket or other rate control.
51
52 Correlated Loss Generator models
53
54 Added generation of correlated loss according to the
55 "Gilbert-Elliot" model, a 4-state markov model.
56
57 References:
58 [1] NetemCLG Home http://netgroup.uniroma2.it/NetemCLG
59 [2] S. Salsano, F. Ludovici, A. Ordine, "Definition of a general
60 and intuitive loss model for packet networks and its implementation
61 in the Netem module in the Linux kernel", available in [1]
62
63 Authors: Stefano Salsano <stefano.salsano at uniroma2.it
64 Fabio Ludovici <fabio.ludovici at yahoo.it>
50*/ 65*/
51 66
52struct netem_sched_data { 67struct netem_sched_data {
@@ -73,6 +88,26 @@ struct netem_sched_data {
73 u32 size; 88 u32 size;
74 s16 table[0]; 89 s16 table[0];
75 } *delay_dist; 90 } *delay_dist;
91
92 enum {
93 CLG_RANDOM,
94 CLG_4_STATES,
95 CLG_GILB_ELL,
96 } loss_model;
97
98 /* Correlated Loss Generation models */
99 struct clgstate {
100 /* state of the Markov chain */
101 u8 state;
102
103 /* 4-states and Gilbert-Elliot models */
104 u32 a1; /* p13 for 4-states or p for GE */
105 u32 a2; /* p31 for 4-states or r for GE */
106 u32 a3; /* p32 for 4-states or h for GE */
107 u32 a4; /* p14 for 4-states or 1-k for GE */
108 u32 a5; /* p23 used only in 4-states */
109 } clg;
110
76}; 111};
77 112
78/* Time stamp put into socket buffer control block */ 113/* Time stamp put into socket buffer control block */
@@ -115,6 +150,122 @@ static u32 get_crandom(struct crndstate *state)
115 return answer; 150 return answer;
116} 151}
117 152
153/* loss_4state - 4-state model loss generator
154 * Generates losses according to the 4-state Markov chain adopted in
155 * the GI (General and Intuitive) loss model.
156 */
157static bool loss_4state(struct netem_sched_data *q)
158{
159 struct clgstate *clg = &q->clg;
160 u32 rnd = net_random();
161
162 /*
163 * Makes a comparison between rnd and the transition
164 * probabilities outgoing from the current state, then decides the
165 * next state and if the next packet has to be transmitted or lost.
166 * The four states correspond to:
167 * 1 => successfully transmitted packets within a gap period
168 * 4 => isolated losses within a gap period
169 * 3 => lost packets within a burst period
170 * 2 => successfully transmitted packets within a burst period
171 */
172 switch (clg->state) {
173 case 1:
174 if (rnd < clg->a4) {
175 clg->state = 4;
176 return true;
177 } else if (clg->a4 < rnd && rnd < clg->a1) {
178 clg->state = 3;
179 return true;
180 } else if (clg->a1 < rnd)
181 clg->state = 1;
182
183 break;
184 case 2:
185 if (rnd < clg->a5) {
186 clg->state = 3;
187 return true;
188 } else
189 clg->state = 2;
190
191 break;
192 case 3:
193 if (rnd < clg->a3)
194 clg->state = 2;
195 else if (clg->a3 < rnd && rnd < clg->a2 + clg->a3) {
196 clg->state = 1;
197 return true;
198 } else if (clg->a2 + clg->a3 < rnd) {
199 clg->state = 3;
200 return true;
201 }
202 break;
203 case 4:
204 clg->state = 1;
205 break;
206 }
207
208 return false;
209}
210
211/* loss_gilb_ell - Gilbert-Elliot model loss generator
212 * Generates losses according to the Gilbert-Elliot loss model or
213 * its special cases (Gilbert or Simple Gilbert)
214 *
215 * Makes a comparison between random number and the transition
216 * probabilities outgoing from the current state, then decides the
217 * next state. A second random number is extracted and the comparison
218 * with the loss probability of the current state decides if the next
219 * packet will be transmitted or lost.
220 */
221static bool loss_gilb_ell(struct netem_sched_data *q)
222{
223 struct clgstate *clg = &q->clg;
224
225 switch (clg->state) {
226 case 1:
227 if (net_random() < clg->a1)
228 clg->state = 2;
229 if (net_random() < clg->a4)
230 return true;
231 case 2:
232 if (net_random() < clg->a2)
233 clg->state = 1;
234 if (clg->a3 > net_random())
235 return true;
236 }
237
238 return false;
239}
240
241static bool loss_event(struct netem_sched_data *q)
242{
243 switch (q->loss_model) {
244 case CLG_RANDOM:
245 /* Random packet drop 0 => none, ~0 => all */
246 return q->loss && q->loss >= get_crandom(&q->loss_cor);
247
248 case CLG_4_STATES:
249 /* 4state loss model algorithm (used also for GI model)
250 * Extracts a value from the markov 4 state loss generator,
251 * if it is 1 drops a packet and if needed writes the event in
252 * the kernel logs
253 */
254 return loss_4state(q);
255
256 case CLG_GILB_ELL:
257 /* Gilbert-Elliot loss model algorithm
258 * Extracts a value from the Gilbert-Elliot loss generator,
259 * if it is 1 drops a packet and if needed writes the event in
260 * the kernel logs
261 */
262 return loss_gilb_ell(q);
263 }
264
265 return false; /* not reached */
266}
267
268
118/* tabledist - return a pseudo-randomly distributed value with mean mu and 269/* tabledist - return a pseudo-randomly distributed value with mean mu and
119 * std deviation sigma. Uses table lookup to approximate the desired 270 * std deviation sigma. Uses table lookup to approximate the desired
120 * distribution, and a uniformly-distributed pseudo-random source. 271 * distribution, and a uniformly-distributed pseudo-random source.
@@ -161,14 +312,12 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
161 int ret; 312 int ret;
162 int count = 1; 313 int count = 1;
163 314
164 pr_debug("netem_enqueue skb=%p\n", skb);
165
166 /* Random duplication */ 315 /* Random duplication */
167 if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor)) 316 if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor))
168 ++count; 317 ++count;
169 318
170 /* Random packet drop 0 => none, ~0 => all */ 319 /* Drop packet? */
171 if (q->loss && q->loss >= get_crandom(&q->loss_cor)) 320 if (loss_event(q))
172 --count; 321 --count;
173 322
174 if (count == 0) { 323 if (count == 0) {
@@ -211,8 +360,8 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
211 } 360 }
212 361
213 cb = netem_skb_cb(skb); 362 cb = netem_skb_cb(skb);
214 if (q->gap == 0 || /* not doing reordering */ 363 if (q->gap == 0 || /* not doing reordering */
215 q->counter < q->gap || /* inside last reordering gap */ 364 q->counter < q->gap || /* inside last reordering gap */
216 q->reorder < get_crandom(&q->reorder_cor)) { 365 q->reorder < get_crandom(&q->reorder_cor)) {
217 psched_time_t now; 366 psched_time_t now;
218 psched_tdiff_t delay; 367 psched_tdiff_t delay;
@@ -238,17 +387,18 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
238 ret = NET_XMIT_SUCCESS; 387 ret = NET_XMIT_SUCCESS;
239 } 388 }
240 389
241 if (likely(ret == NET_XMIT_SUCCESS)) { 390 if (ret != NET_XMIT_SUCCESS) {
242 sch->q.qlen++; 391 if (net_xmit_drop_count(ret)) {
243 } else if (net_xmit_drop_count(ret)) { 392 sch->qstats.drops++;
244 sch->qstats.drops++; 393 return ret;
394 }
245 } 395 }
246 396
247 pr_debug("netem: enqueue ret %d\n", ret); 397 sch->q.qlen++;
248 return ret; 398 return NET_XMIT_SUCCESS;
249} 399}
250 400
251static unsigned int netem_drop(struct Qdisc* sch) 401static unsigned int netem_drop(struct Qdisc *sch)
252{ 402{
253 struct netem_sched_data *q = qdisc_priv(sch); 403 struct netem_sched_data *q = qdisc_priv(sch);
254 unsigned int len = 0; 404 unsigned int len = 0;
@@ -265,7 +415,7 @@ static struct sk_buff *netem_dequeue(struct Qdisc *sch)
265 struct netem_sched_data *q = qdisc_priv(sch); 415 struct netem_sched_data *q = qdisc_priv(sch);
266 struct sk_buff *skb; 416 struct sk_buff *skb;
267 417
268 if (sch->flags & TCQ_F_THROTTLED) 418 if (qdisc_is_throttled(sch))
269 return NULL; 419 return NULL;
270 420
271 skb = q->qdisc->ops->peek(q->qdisc); 421 skb = q->qdisc->ops->peek(q->qdisc);
@@ -287,9 +437,10 @@ static struct sk_buff *netem_dequeue(struct Qdisc *sch)
287 if (G_TC_FROM(skb->tc_verd) & AT_INGRESS) 437 if (G_TC_FROM(skb->tc_verd) & AT_INGRESS)
288 skb->tstamp.tv64 = 0; 438 skb->tstamp.tv64 = 0;
289#endif 439#endif
290 pr_debug("netem_dequeue: return skb=%p\n", skb); 440
291 qdisc_bstats_update(sch, skb);
292 sch->q.qlen--; 441 sch->q.qlen--;
442 qdisc_unthrottled(sch);
443 qdisc_bstats_update(sch, skb);
293 return skb; 444 return skb;
294 } 445 }
295 446
@@ -308,6 +459,16 @@ static void netem_reset(struct Qdisc *sch)
308 qdisc_watchdog_cancel(&q->watchdog); 459 qdisc_watchdog_cancel(&q->watchdog);
309} 460}
310 461
462static void dist_free(struct disttable *d)
463{
464 if (d) {
465 if (is_vmalloc_addr(d))
466 vfree(d);
467 else
468 kfree(d);
469 }
470}
471
311/* 472/*
312 * Distribution data is a variable size payload containing 473 * Distribution data is a variable size payload containing
313 * signed 16 bit values. 474 * signed 16 bit values.
@@ -315,16 +476,20 @@ static void netem_reset(struct Qdisc *sch)
315static int get_dist_table(struct Qdisc *sch, const struct nlattr *attr) 476static int get_dist_table(struct Qdisc *sch, const struct nlattr *attr)
316{ 477{
317 struct netem_sched_data *q = qdisc_priv(sch); 478 struct netem_sched_data *q = qdisc_priv(sch);
318 unsigned long n = nla_len(attr)/sizeof(__s16); 479 size_t n = nla_len(attr)/sizeof(__s16);
319 const __s16 *data = nla_data(attr); 480 const __s16 *data = nla_data(attr);
320 spinlock_t *root_lock; 481 spinlock_t *root_lock;
321 struct disttable *d; 482 struct disttable *d;
322 int i; 483 int i;
484 size_t s;
323 485
324 if (n > 65536) 486 if (n > NETEM_DIST_MAX)
325 return -EINVAL; 487 return -EINVAL;
326 488
327 d = kmalloc(sizeof(*d) + n*sizeof(d->table[0]), GFP_KERNEL); 489 s = sizeof(struct disttable) + n * sizeof(s16);
490 d = kmalloc(s, GFP_KERNEL);
491 if (!d)
492 d = vmalloc(s);
328 if (!d) 493 if (!d)
329 return -ENOMEM; 494 return -ENOMEM;
330 495
@@ -335,7 +500,7 @@ static int get_dist_table(struct Qdisc *sch, const struct nlattr *attr)
335 root_lock = qdisc_root_sleeping_lock(sch); 500 root_lock = qdisc_root_sleeping_lock(sch);
336 501
337 spin_lock_bh(root_lock); 502 spin_lock_bh(root_lock);
338 kfree(q->delay_dist); 503 dist_free(q->delay_dist);
339 q->delay_dist = d; 504 q->delay_dist = d;
340 spin_unlock_bh(root_lock); 505 spin_unlock_bh(root_lock);
341 return 0; 506 return 0;
@@ -369,10 +534,66 @@ static void get_corrupt(struct Qdisc *sch, const struct nlattr *attr)
369 init_crandom(&q->corrupt_cor, r->correlation); 534 init_crandom(&q->corrupt_cor, r->correlation);
370} 535}
371 536
537static int get_loss_clg(struct Qdisc *sch, const struct nlattr *attr)
538{
539 struct netem_sched_data *q = qdisc_priv(sch);
540 const struct nlattr *la;
541 int rem;
542
543 nla_for_each_nested(la, attr, rem) {
544 u16 type = nla_type(la);
545
546 switch(type) {
547 case NETEM_LOSS_GI: {
548 const struct tc_netem_gimodel *gi = nla_data(la);
549
550 if (nla_len(la) != sizeof(struct tc_netem_gimodel)) {
551 pr_info("netem: incorrect gi model size\n");
552 return -EINVAL;
553 }
554
555 q->loss_model = CLG_4_STATES;
556
557 q->clg.state = 1;
558 q->clg.a1 = gi->p13;
559 q->clg.a2 = gi->p31;
560 q->clg.a3 = gi->p32;
561 q->clg.a4 = gi->p14;
562 q->clg.a5 = gi->p23;
563 break;
564 }
565
566 case NETEM_LOSS_GE: {
567 const struct tc_netem_gemodel *ge = nla_data(la);
568
569 if (nla_len(la) != sizeof(struct tc_netem_gemodel)) {
570 pr_info("netem: incorrect gi model size\n");
571 return -EINVAL;
572 }
573
574 q->loss_model = CLG_GILB_ELL;
575 q->clg.state = 1;
576 q->clg.a1 = ge->p;
577 q->clg.a2 = ge->r;
578 q->clg.a3 = ge->h;
579 q->clg.a4 = ge->k1;
580 break;
581 }
582
583 default:
584 pr_info("netem: unknown loss type %u\n", type);
585 return -EINVAL;
586 }
587 }
588
589 return 0;
590}
591
372static const struct nla_policy netem_policy[TCA_NETEM_MAX + 1] = { 592static const struct nla_policy netem_policy[TCA_NETEM_MAX + 1] = {
373 [TCA_NETEM_CORR] = { .len = sizeof(struct tc_netem_corr) }, 593 [TCA_NETEM_CORR] = { .len = sizeof(struct tc_netem_corr) },
374 [TCA_NETEM_REORDER] = { .len = sizeof(struct tc_netem_reorder) }, 594 [TCA_NETEM_REORDER] = { .len = sizeof(struct tc_netem_reorder) },
375 [TCA_NETEM_CORRUPT] = { .len = sizeof(struct tc_netem_corrupt) }, 595 [TCA_NETEM_CORRUPT] = { .len = sizeof(struct tc_netem_corrupt) },
596 [TCA_NETEM_LOSS] = { .type = NLA_NESTED },
376}; 597};
377 598
378static int parse_attr(struct nlattr *tb[], int maxtype, struct nlattr *nla, 599static int parse_attr(struct nlattr *tb[], int maxtype, struct nlattr *nla,
@@ -380,11 +601,15 @@ static int parse_attr(struct nlattr *tb[], int maxtype, struct nlattr *nla,
380{ 601{
381 int nested_len = nla_len(nla) - NLA_ALIGN(len); 602 int nested_len = nla_len(nla) - NLA_ALIGN(len);
382 603
383 if (nested_len < 0) 604 if (nested_len < 0) {
605 pr_info("netem: invalid attributes len %d\n", nested_len);
384 return -EINVAL; 606 return -EINVAL;
607 }
608
385 if (nested_len >= nla_attr_size(0)) 609 if (nested_len >= nla_attr_size(0))
386 return nla_parse(tb, maxtype, nla_data(nla) + NLA_ALIGN(len), 610 return nla_parse(tb, maxtype, nla_data(nla) + NLA_ALIGN(len),
387 nested_len, policy); 611 nested_len, policy);
612
388 memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1)); 613 memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1));
389 return 0; 614 return 0;
390} 615}
@@ -407,7 +632,7 @@ static int netem_change(struct Qdisc *sch, struct nlattr *opt)
407 632
408 ret = fifo_set_limit(q->qdisc, qopt->limit); 633 ret = fifo_set_limit(q->qdisc, qopt->limit);
409 if (ret) { 634 if (ret) {
410 pr_debug("netem: can't set fifo limit\n"); 635 pr_info("netem: can't set fifo limit\n");
411 return ret; 636 return ret;
412 } 637 }
413 638
@@ -440,7 +665,11 @@ static int netem_change(struct Qdisc *sch, struct nlattr *opt)
440 if (tb[TCA_NETEM_CORRUPT]) 665 if (tb[TCA_NETEM_CORRUPT])
441 get_corrupt(sch, tb[TCA_NETEM_CORRUPT]); 666 get_corrupt(sch, tb[TCA_NETEM_CORRUPT]);
442 667
443 return 0; 668 q->loss_model = CLG_RANDOM;
669 if (tb[TCA_NETEM_LOSS])
670 ret = get_loss_clg(sch, tb[TCA_NETEM_LOSS]);
671
672 return ret;
444} 673}
445 674
446/* 675/*
@@ -535,16 +764,17 @@ static int netem_init(struct Qdisc *sch, struct nlattr *opt)
535 764
536 qdisc_watchdog_init(&q->watchdog, sch); 765 qdisc_watchdog_init(&q->watchdog, sch);
537 766
767 q->loss_model = CLG_RANDOM;
538 q->qdisc = qdisc_create_dflt(sch->dev_queue, &tfifo_qdisc_ops, 768 q->qdisc = qdisc_create_dflt(sch->dev_queue, &tfifo_qdisc_ops,
539 TC_H_MAKE(sch->handle, 1)); 769 TC_H_MAKE(sch->handle, 1));
540 if (!q->qdisc) { 770 if (!q->qdisc) {
541 pr_debug("netem: qdisc create failed\n"); 771 pr_notice("netem: qdisc create tfifo qdisc failed\n");
542 return -ENOMEM; 772 return -ENOMEM;
543 } 773 }
544 774
545 ret = netem_change(sch, opt); 775 ret = netem_change(sch, opt);
546 if (ret) { 776 if (ret) {
547 pr_debug("netem: change failed\n"); 777 pr_info("netem: change failed\n");
548 qdisc_destroy(q->qdisc); 778 qdisc_destroy(q->qdisc);
549 } 779 }
550 return ret; 780 return ret;
@@ -556,14 +786,61 @@ static void netem_destroy(struct Qdisc *sch)
556 786
557 qdisc_watchdog_cancel(&q->watchdog); 787 qdisc_watchdog_cancel(&q->watchdog);
558 qdisc_destroy(q->qdisc); 788 qdisc_destroy(q->qdisc);
559 kfree(q->delay_dist); 789 dist_free(q->delay_dist);
790}
791
792static int dump_loss_model(const struct netem_sched_data *q,
793 struct sk_buff *skb)
794{
795 struct nlattr *nest;
796
797 nest = nla_nest_start(skb, TCA_NETEM_LOSS);
798 if (nest == NULL)
799 goto nla_put_failure;
800
801 switch (q->loss_model) {
802 case CLG_RANDOM:
803 /* legacy loss model */
804 nla_nest_cancel(skb, nest);
805 return 0; /* no data */
806
807 case CLG_4_STATES: {
808 struct tc_netem_gimodel gi = {
809 .p13 = q->clg.a1,
810 .p31 = q->clg.a2,
811 .p32 = q->clg.a3,
812 .p14 = q->clg.a4,
813 .p23 = q->clg.a5,
814 };
815
816 NLA_PUT(skb, NETEM_LOSS_GI, sizeof(gi), &gi);
817 break;
818 }
819 case CLG_GILB_ELL: {
820 struct tc_netem_gemodel ge = {
821 .p = q->clg.a1,
822 .r = q->clg.a2,
823 .h = q->clg.a3,
824 .k1 = q->clg.a4,
825 };
826
827 NLA_PUT(skb, NETEM_LOSS_GE, sizeof(ge), &ge);
828 break;
829 }
830 }
831
832 nla_nest_end(skb, nest);
833 return 0;
834
835nla_put_failure:
836 nla_nest_cancel(skb, nest);
837 return -1;
560} 838}
561 839
562static int netem_dump(struct Qdisc *sch, struct sk_buff *skb) 840static int netem_dump(struct Qdisc *sch, struct sk_buff *skb)
563{ 841{
564 const struct netem_sched_data *q = qdisc_priv(sch); 842 const struct netem_sched_data *q = qdisc_priv(sch);
565 unsigned char *b = skb_tail_pointer(skb); 843 struct nlattr *nla = (struct nlattr *) skb_tail_pointer(skb);
566 struct nlattr *nla = (struct nlattr *) b;
567 struct tc_netem_qopt qopt; 844 struct tc_netem_qopt qopt;
568 struct tc_netem_corr cor; 845 struct tc_netem_corr cor;
569 struct tc_netem_reorder reorder; 846 struct tc_netem_reorder reorder;
@@ -590,17 +867,87 @@ static int netem_dump(struct Qdisc *sch, struct sk_buff *skb)
590 corrupt.correlation = q->corrupt_cor.rho; 867 corrupt.correlation = q->corrupt_cor.rho;
591 NLA_PUT(skb, TCA_NETEM_CORRUPT, sizeof(corrupt), &corrupt); 868 NLA_PUT(skb, TCA_NETEM_CORRUPT, sizeof(corrupt), &corrupt);
592 869
593 nla->nla_len = skb_tail_pointer(skb) - b; 870 if (dump_loss_model(q, skb) != 0)
871 goto nla_put_failure;
594 872
595 return skb->len; 873 return nla_nest_end(skb, nla);
596 874
597nla_put_failure: 875nla_put_failure:
598 nlmsg_trim(skb, b); 876 nlmsg_trim(skb, nla);
599 return -1; 877 return -1;
600} 878}
601 879
880static int netem_dump_class(struct Qdisc *sch, unsigned long cl,
881 struct sk_buff *skb, struct tcmsg *tcm)
882{
883 struct netem_sched_data *q = qdisc_priv(sch);
884
885 if (cl != 1) /* only one class */
886 return -ENOENT;
887
888 tcm->tcm_handle |= TC_H_MIN(1);
889 tcm->tcm_info = q->qdisc->handle;
890
891 return 0;
892}
893
894static int netem_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
895 struct Qdisc **old)
896{
897 struct netem_sched_data *q = qdisc_priv(sch);
898
899 if (new == NULL)
900 new = &noop_qdisc;
901
902 sch_tree_lock(sch);
903 *old = q->qdisc;
904 q->qdisc = new;
905 qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
906 qdisc_reset(*old);
907 sch_tree_unlock(sch);
908
909 return 0;
910}
911
912static struct Qdisc *netem_leaf(struct Qdisc *sch, unsigned long arg)
913{
914 struct netem_sched_data *q = qdisc_priv(sch);
915 return q->qdisc;
916}
917
918static unsigned long netem_get(struct Qdisc *sch, u32 classid)
919{
920 return 1;
921}
922
923static void netem_put(struct Qdisc *sch, unsigned long arg)
924{
925}
926
927static void netem_walk(struct Qdisc *sch, struct qdisc_walker *walker)
928{
929 if (!walker->stop) {
930 if (walker->count >= walker->skip)
931 if (walker->fn(sch, 1, walker) < 0) {
932 walker->stop = 1;
933 return;
934 }
935 walker->count++;
936 }
937}
938
939static const struct Qdisc_class_ops netem_class_ops = {
940 .graft = netem_graft,
941 .leaf = netem_leaf,
942 .get = netem_get,
943 .put = netem_put,
944 .walk = netem_walk,
945 .dump = netem_dump_class,
946};
947
602static struct Qdisc_ops netem_qdisc_ops __read_mostly = { 948static struct Qdisc_ops netem_qdisc_ops __read_mostly = {
603 .id = "netem", 949 .id = "netem",
950 .cl_ops = &netem_class_ops,
604 .priv_size = sizeof(struct netem_sched_data), 951 .priv_size = sizeof(struct netem_sched_data),
605 .enqueue = netem_enqueue, 952 .enqueue = netem_enqueue,
606 .dequeue = netem_dequeue, 953 .dequeue = netem_dequeue,