diff options
author | Thomas Graf <tgraf@suug.ch> | 2005-11-05 15:14:25 -0500 |
---|---|---|
committer | Thomas Graf <tgr@axs.localdomain> | 2005-11-05 16:02:28 -0500 |
commit | 1e4dfaf9b99a8b652e8421936fd5fe2459da8265 (patch) | |
tree | c2ecbf7558fcd34b054f28a797d6f3e88ab468a2 /net/sched/sch_gred.c | |
parent | 6214e653cc578947bf83d6766339a18a41c5b923 (diff) |
[PKT_SCHED]: GRED: Cleanup and remove unnecessary code
Removes unnecessary includes, initializers, and simplifies
the code a bit.
Signed-off-by: Thomas Graf <tgraf@suug.ch>
Signed-off-by: Arnaldo Carvalho de Melo <acme@mandriva.com>
Diffstat (limited to 'net/sched/sch_gred.c')
-rw-r--r-- | net/sched/sch_gred.c | 100 |
1 files changed, 31 insertions, 69 deletions
diff --git a/net/sched/sch_gred.c b/net/sched/sch_gred.c index 897e6df81b1f..1fb34be32f7c 100644 --- a/net/sched/sch_gred.c +++ b/net/sched/sch_gred.c | |||
@@ -15,50 +15,18 @@ | |||
15 | * from Ren Liu | 15 | * from Ren Liu |
16 | * - More error checks | 16 | * - More error checks |
17 | * | 17 | * |
18 | * | 18 | * For all the glorious comments look at include/net/red.h |
19 | * | ||
20 | * For all the glorious comments look at Alexey's sch_red.c | ||
21 | */ | 19 | */ |
22 | 20 | ||
23 | #include <linux/config.h> | 21 | #include <linux/config.h> |
24 | #include <linux/module.h> | 22 | #include <linux/module.h> |
25 | #include <asm/uaccess.h> | ||
26 | #include <asm/system.h> | ||
27 | #include <linux/bitops.h> | ||
28 | #include <linux/types.h> | 23 | #include <linux/types.h> |
29 | #include <linux/kernel.h> | 24 | #include <linux/kernel.h> |
30 | #include <linux/sched.h> | ||
31 | #include <linux/string.h> | ||
32 | #include <linux/mm.h> | ||
33 | #include <linux/socket.h> | ||
34 | #include <linux/sockios.h> | ||
35 | #include <linux/in.h> | ||
36 | #include <linux/errno.h> | ||
37 | #include <linux/interrupt.h> | ||
38 | #include <linux/if_ether.h> | ||
39 | #include <linux/inet.h> | ||
40 | #include <linux/netdevice.h> | 25 | #include <linux/netdevice.h> |
41 | #include <linux/etherdevice.h> | ||
42 | #include <linux/notifier.h> | ||
43 | #include <net/ip.h> | ||
44 | #include <net/route.h> | ||
45 | #include <linux/skbuff.h> | 26 | #include <linux/skbuff.h> |
46 | #include <net/sock.h> | ||
47 | #include <net/pkt_sched.h> | 27 | #include <net/pkt_sched.h> |
48 | #include <net/red.h> | 28 | #include <net/red.h> |
49 | 29 | ||
50 | #if 1 /* control */ | ||
51 | #define DPRINTK(format,args...) printk(KERN_DEBUG format,##args) | ||
52 | #else | ||
53 | #define DPRINTK(format,args...) | ||
54 | #endif | ||
55 | |||
56 | #if 0 /* data */ | ||
57 | #define D2PRINTK(format,args...) printk(KERN_DEBUG format,##args) | ||
58 | #else | ||
59 | #define D2PRINTK(format,args...) | ||
60 | #endif | ||
61 | |||
62 | #define GRED_DEF_PRIO (MAX_DPs / 2) | 30 | #define GRED_DEF_PRIO (MAX_DPs / 2) |
63 | #define GRED_VQ_MASK (MAX_DPs - 1) | 31 | #define GRED_VQ_MASK (MAX_DPs - 1) |
64 | 32 | ||
@@ -72,7 +40,7 @@ struct gred_sched_data | |||
72 | u32 bytesin; /* bytes seen on virtualQ so far*/ | 40 | u32 bytesin; /* bytes seen on virtualQ so far*/ |
73 | u32 packetsin; /* packets seen on virtualQ so far*/ | 41 | u32 packetsin; /* packets seen on virtualQ so far*/ |
74 | u32 backlog; /* bytes on the virtualQ */ | 42 | u32 backlog; /* bytes on the virtualQ */ |
75 | u8 prio; /* the prio of this vq */ | 43 | u8 prio; /* the prio of this vq */ |
76 | 44 | ||
77 | struct red_parms parms; | 45 | struct red_parms parms; |
78 | struct red_stats stats; | 46 | struct red_stats stats; |
@@ -87,8 +55,8 @@ struct gred_sched | |||
87 | { | 55 | { |
88 | struct gred_sched_data *tab[MAX_DPs]; | 56 | struct gred_sched_data *tab[MAX_DPs]; |
89 | unsigned long flags; | 57 | unsigned long flags; |
90 | u32 DPs; | 58 | u32 DPs; |
91 | u32 def; | 59 | u32 def; |
92 | struct red_parms wred_set; | 60 | struct red_parms wred_set; |
93 | }; | 61 | }; |
94 | 62 | ||
@@ -172,13 +140,11 @@ static inline void gred_store_wred_set(struct gred_sched *table, | |||
172 | table->wred_set.qavg = q->parms.qavg; | 140 | table->wred_set.qavg = q->parms.qavg; |
173 | } | 141 | } |
174 | 142 | ||
175 | static int | 143 | static int gred_enqueue(struct sk_buff *skb, struct Qdisc* sch) |
176 | gred_enqueue(struct sk_buff *skb, struct Qdisc* sch) | ||
177 | { | 144 | { |
178 | struct gred_sched_data *q=NULL; | 145 | struct gred_sched_data *q=NULL; |
179 | struct gred_sched *t= qdisc_priv(sch); | 146 | struct gred_sched *t= qdisc_priv(sch); |
180 | unsigned long qavg = 0; | 147 | unsigned long qavg = 0; |
181 | int i=0; | ||
182 | u16 dp = tc_index_to_dp(skb); | 148 | u16 dp = tc_index_to_dp(skb); |
183 | 149 | ||
184 | if (dp >= t->DPs || (q = t->tab[dp]) == NULL) { | 150 | if (dp >= t->DPs || (q = t->tab[dp]) == NULL) { |
@@ -200,26 +166,23 @@ gred_enqueue(struct sk_buff *skb, struct Qdisc* sch) | |||
200 | skb->tc_index = (skb->tc_index & ~GRED_VQ_MASK) | dp; | 166 | skb->tc_index = (skb->tc_index & ~GRED_VQ_MASK) | dp; |
201 | } | 167 | } |
202 | 168 | ||
203 | /* sum up all the qaves of prios <= to ours to get the new qave*/ | 169 | /* sum up all the qaves of prios <= to ours to get the new qave */ |
204 | if (!gred_wred_mode(t) && gred_rio_mode(t)) { | 170 | if (!gred_wred_mode(t) && gred_rio_mode(t)) { |
205 | for (i=0;i<t->DPs;i++) { | 171 | int i; |
206 | if ((!t->tab[i]) || (i==q->DP)) | 172 | |
207 | continue; | 173 | for (i = 0; i < t->DPs; i++) { |
208 | 174 | if (t->tab[i] && t->tab[i]->prio < q->prio && | |
209 | if (t->tab[i]->prio < q->prio && | ||
210 | !red_is_idling(&t->tab[i]->parms)) | 175 | !red_is_idling(&t->tab[i]->parms)) |
211 | qavg +=t->tab[i]->parms.qavg; | 176 | qavg +=t->tab[i]->parms.qavg; |
212 | } | 177 | } |
213 | 178 | ||
214 | } | 179 | } |
215 | 180 | ||
216 | q->packetsin++; | 181 | q->packetsin++; |
217 | q->bytesin+=skb->len; | 182 | q->bytesin += skb->len; |
218 | 183 | ||
219 | if (gred_wred_mode(t)) { | 184 | if (gred_wred_mode(t)) |
220 | qavg = 0; | ||
221 | gred_load_wred_set(t, q); | 185 | gred_load_wred_set(t, q); |
222 | } | ||
223 | 186 | ||
224 | q->parms.qavg = red_calc_qavg(&q->parms, gred_backlog(t, q, sch)); | 187 | q->parms.qavg = red_calc_qavg(&q->parms, gred_backlog(t, q, sch)); |
225 | 188 | ||
@@ -258,8 +221,7 @@ congestion_drop: | |||
258 | return NET_XMIT_CN; | 221 | return NET_XMIT_CN; |
259 | } | 222 | } |
260 | 223 | ||
261 | static int | 224 | static int gred_requeue(struct sk_buff *skb, struct Qdisc* sch) |
262 | gred_requeue(struct sk_buff *skb, struct Qdisc* sch) | ||
263 | { | 225 | { |
264 | struct gred_sched *t = qdisc_priv(sch); | 226 | struct gred_sched *t = qdisc_priv(sch); |
265 | struct gred_sched_data *q; | 227 | struct gred_sched_data *q; |
@@ -279,16 +241,15 @@ gred_requeue(struct sk_buff *skb, struct Qdisc* sch) | |||
279 | return qdisc_requeue(skb, sch); | 241 | return qdisc_requeue(skb, sch); |
280 | } | 242 | } |
281 | 243 | ||
282 | static struct sk_buff * | 244 | static struct sk_buff *gred_dequeue(struct Qdisc* sch) |
283 | gred_dequeue(struct Qdisc* sch) | ||
284 | { | 245 | { |
285 | struct sk_buff *skb; | 246 | struct sk_buff *skb; |
286 | struct gred_sched_data *q; | 247 | struct gred_sched *t = qdisc_priv(sch); |
287 | struct gred_sched *t= qdisc_priv(sch); | ||
288 | 248 | ||
289 | skb = qdisc_dequeue_head(sch); | 249 | skb = qdisc_dequeue_head(sch); |
290 | 250 | ||
291 | if (skb) { | 251 | if (skb) { |
252 | struct gred_sched_data *q; | ||
292 | u16 dp = tc_index_to_dp(skb); | 253 | u16 dp = tc_index_to_dp(skb); |
293 | 254 | ||
294 | if (dp >= t->DPs || (q = t->tab[dp]) == NULL) { | 255 | if (dp >= t->DPs || (q = t->tab[dp]) == NULL) { |
@@ -315,13 +276,12 @@ gred_dequeue(struct Qdisc* sch) | |||
315 | static unsigned int gred_drop(struct Qdisc* sch) | 276 | static unsigned int gred_drop(struct Qdisc* sch) |
316 | { | 277 | { |
317 | struct sk_buff *skb; | 278 | struct sk_buff *skb; |
318 | 279 | struct gred_sched *t = qdisc_priv(sch); | |
319 | struct gred_sched_data *q; | ||
320 | struct gred_sched *t= qdisc_priv(sch); | ||
321 | 280 | ||
322 | skb = qdisc_dequeue_tail(sch); | 281 | skb = qdisc_dequeue_tail(sch); |
323 | if (skb) { | 282 | if (skb) { |
324 | unsigned int len = skb->len; | 283 | unsigned int len = skb->len; |
284 | struct gred_sched_data *q; | ||
325 | u16 dp = tc_index_to_dp(skb); | 285 | u16 dp = tc_index_to_dp(skb); |
326 | 286 | ||
327 | if (dp >= t->DPs || (q = t->tab[dp]) == NULL) { | 287 | if (dp >= t->DPs || (q = t->tab[dp]) == NULL) { |
@@ -351,15 +311,16 @@ static unsigned int gred_drop(struct Qdisc* sch) | |||
351 | static void gred_reset(struct Qdisc* sch) | 311 | static void gred_reset(struct Qdisc* sch) |
352 | { | 312 | { |
353 | int i; | 313 | int i; |
354 | struct gred_sched_data *q; | 314 | struct gred_sched *t = qdisc_priv(sch); |
355 | struct gred_sched *t= qdisc_priv(sch); | ||
356 | 315 | ||
357 | qdisc_reset_queue(sch); | 316 | qdisc_reset_queue(sch); |
358 | 317 | ||
359 | for (i=0;i<t->DPs;i++) { | 318 | for (i = 0; i < t->DPs; i++) { |
360 | q= t->tab[i]; | 319 | struct gred_sched_data *q = t->tab[i]; |
361 | if (!q) | 320 | |
362 | continue; | 321 | if (!q) |
322 | continue; | ||
323 | |||
363 | red_restart(&q->parms); | 324 | red_restart(&q->parms); |
364 | q->backlog = 0; | 325 | q->backlog = 0; |
365 | } | 326 | } |
@@ -590,15 +551,13 @@ static void gred_destroy(struct Qdisc *sch) | |||
590 | struct gred_sched *table = qdisc_priv(sch); | 551 | struct gred_sched *table = qdisc_priv(sch); |
591 | int i; | 552 | int i; |
592 | 553 | ||
593 | for (i = 0;i < table->DPs; i++) { | 554 | for (i = 0; i < table->DPs; i++) { |
594 | if (table->tab[i]) | 555 | if (table->tab[i]) |
595 | gred_destroy_vq(table->tab[i]); | 556 | gred_destroy_vq(table->tab[i]); |
596 | } | 557 | } |
597 | } | 558 | } |
598 | 559 | ||
599 | static struct Qdisc_ops gred_qdisc_ops = { | 560 | static struct Qdisc_ops gred_qdisc_ops = { |
600 | .next = NULL, | ||
601 | .cl_ops = NULL, | ||
602 | .id = "gred", | 561 | .id = "gred", |
603 | .priv_size = sizeof(struct gred_sched), | 562 | .priv_size = sizeof(struct gred_sched), |
604 | .enqueue = gred_enqueue, | 563 | .enqueue = gred_enqueue, |
@@ -617,10 +576,13 @@ static int __init gred_module_init(void) | |||
617 | { | 576 | { |
618 | return register_qdisc(&gred_qdisc_ops); | 577 | return register_qdisc(&gred_qdisc_ops); |
619 | } | 578 | } |
620 | static void __exit gred_module_exit(void) | 579 | |
580 | static void __exit gred_module_exit(void) | ||
621 | { | 581 | { |
622 | unregister_qdisc(&gred_qdisc_ops); | 582 | unregister_qdisc(&gred_qdisc_ops); |
623 | } | 583 | } |
584 | |||
624 | module_init(gred_module_init) | 585 | module_init(gred_module_init) |
625 | module_exit(gred_module_exit) | 586 | module_exit(gred_module_exit) |
587 | |||
626 | MODULE_LICENSE("GPL"); | 588 | MODULE_LICENSE("GPL"); |