aboutsummaryrefslogtreecommitdiffstats
path: root/net/sched/sch_gred.c
diff options
context:
space:
mode:
authorThomas Graf <tgraf@suug.ch>2005-11-05 15:14:16 -0500
committerThomas Graf <tgr@axs.localdomain>2005-11-05 16:02:27 -0500
commit22b33429ab93155895854e9518a253680a920493 (patch)
tree9beebaec505ad2c5c4bc645c0880c488bcb53900 /net/sched/sch_gred.c
parentf62d6b936df500247474c13360eb23e1b602bad0 (diff)
[PKT_SCHED]: GRED: Use new generic red interface
Simplifies code a lot by separating the red algorithm and the queueing logic. We now differentiate between probability marks and forced marks but sum them together again to not break backwards compatibility. This brings GRED back to the level of RED and improves the accuracy of the averge queue length calculations when stab suggests a zero shift. Signed-off-by: Thomas Graf <tgraf@suug.ch> Signed-off-by: Arnaldo Carvalho de Melo <acme@mandriva.com>
Diffstat (limited to 'net/sched/sch_gred.c')
-rw-r--r--net/sched/sch_gred.c224
1 files changed, 91 insertions, 133 deletions
diff --git a/net/sched/sch_gred.c b/net/sched/sch_gred.c
index ca6cb271493b..a52490c7af3c 100644
--- a/net/sched/sch_gred.c
+++ b/net/sched/sch_gred.c
@@ -45,6 +45,7 @@
45#include <linux/skbuff.h> 45#include <linux/skbuff.h>
46#include <net/sock.h> 46#include <net/sock.h>
47#include <net/pkt_sched.h> 47#include <net/pkt_sched.h>
48#include <net/red.h>
48 49
49#if 1 /* control */ 50#if 1 /* control */
50#define DPRINTK(format,args...) printk(KERN_DEBUG format,##args) 51#define DPRINTK(format,args...) printk(KERN_DEBUG format,##args)
@@ -65,32 +66,15 @@ struct gred_sched;
65 66
66struct gred_sched_data 67struct gred_sched_data
67{ 68{
68/* Parameters */
69 u32 limit; /* HARD maximal queue length */ 69 u32 limit; /* HARD maximal queue length */
70 u32 qth_min; /* Min average length threshold: A scaled */
71 u32 qth_max; /* Max average length threshold: A scaled */
72 u32 DP; /* the drop pramaters */ 70 u32 DP; /* the drop pramaters */
73 char Wlog; /* log(W) */
74 char Plog; /* random number bits */
75 u32 Scell_max;
76 u32 Rmask;
77 u32 bytesin; /* bytes seen on virtualQ so far*/ 71 u32 bytesin; /* bytes seen on virtualQ so far*/
78 u32 packetsin; /* packets seen on virtualQ so far*/ 72 u32 packetsin; /* packets seen on virtualQ so far*/
79 u32 backlog; /* bytes on the virtualQ */ 73 u32 backlog; /* bytes on the virtualQ */
80 u32 forced; /* packets dropped for exceeding limits */
81 u32 early; /* packets dropped as a warning */
82 u32 other; /* packets dropped by invoking drop() */
83 u32 pdrop; /* packets dropped because we exceeded physical queue limits */
84 char Scell_log;
85 u8 Stab[256];
86 u8 prio; /* the prio of this vq */ 74 u8 prio; /* the prio of this vq */
87 75
88/* Variables */ 76 struct red_parms parms;
89 unsigned long qave; /* Average queue length: A scaled */ 77 struct red_stats stats;
90 int qcount; /* Packets since last random number generation */
91 u32 qR; /* Cached random number */
92
93 psched_time_t qidlestart; /* Start of idle period */
94}; 78};
95 79
96enum { 80enum {
@@ -159,13 +143,22 @@ static inline int gred_wred_mode_check(struct Qdisc *sch)
159 return 0; 143 return 0;
160} 144}
161 145
146static inline unsigned int gred_backlog(struct gred_sched *table,
147 struct gred_sched_data *q,
148 struct Qdisc *sch)
149{
150 if (gred_wred_mode(table))
151 return sch->qstats.backlog;
152 else
153 return q->backlog;
154}
155
162static int 156static int
163gred_enqueue(struct sk_buff *skb, struct Qdisc* sch) 157gred_enqueue(struct sk_buff *skb, struct Qdisc* sch)
164{ 158{
165 psched_time_t now;
166 struct gred_sched_data *q=NULL; 159 struct gred_sched_data *q=NULL;
167 struct gred_sched *t= qdisc_priv(sch); 160 struct gred_sched *t= qdisc_priv(sch);
168 unsigned long qave=0; 161 unsigned long qavg = 0;
169 int i=0; 162 int i=0;
170 163
171 if (!t->initd && skb_queue_len(&sch->q) < (sch->dev->tx_queue_len ? : 1)) { 164 if (!t->initd && skb_queue_len(&sch->q) < (sch->dev->tx_queue_len ? : 1)) {
@@ -195,8 +188,9 @@ gred_enqueue(struct sk_buff *skb, struct Qdisc* sch)
195 if ((!t->tab[i]) || (i==q->DP)) 188 if ((!t->tab[i]) || (i==q->DP))
196 continue; 189 continue;
197 190
198 if ((t->tab[i]->prio < q->prio) && (PSCHED_IS_PASTPERFECT(t->tab[i]->qidlestart))) 191 if (t->tab[i]->prio < q->prio &&
199 qave +=t->tab[i]->qave; 192 !red_is_idling(&t->tab[i]->parms))
193 qavg +=t->tab[i]->parms.qavg;
200 } 194 }
201 195
202 } 196 }
@@ -205,68 +199,49 @@ gred_enqueue(struct sk_buff *skb, struct Qdisc* sch)
205 q->bytesin+=skb->len; 199 q->bytesin+=skb->len;
206 200
207 if (gred_wred_mode(t)) { 201 if (gred_wred_mode(t)) {
208 qave=0; 202 qavg = 0;
209 q->qave=t->tab[t->def]->qave; 203 q->parms.qavg = t->tab[t->def]->parms.qavg;
210 q->qidlestart=t->tab[t->def]->qidlestart; 204 q->parms.qidlestart = t->tab[t->def]->parms.qidlestart;
211 } 205 }
212 206
213 if (!PSCHED_IS_PASTPERFECT(q->qidlestart)) { 207 q->parms.qavg = red_calc_qavg(&q->parms, gred_backlog(t, q, sch));
214 long us_idle;
215 PSCHED_GET_TIME(now);
216 us_idle = PSCHED_TDIFF_SAFE(now, q->qidlestart, q->Scell_max);
217 PSCHED_SET_PASTPERFECT(q->qidlestart);
218 208
219 q->qave >>= q->Stab[(us_idle>>q->Scell_log)&0xFF]; 209 if (red_is_idling(&q->parms))
220 } else { 210 red_end_of_idle_period(&q->parms);
221 if (gred_wred_mode(t)) {
222 q->qave += sch->qstats.backlog - (q->qave >> q->Wlog);
223 } else {
224 q->qave += q->backlog - (q->qave >> q->Wlog);
225 }
226
227 }
228
229 211
230 if (gred_wred_mode(t)) 212 if (gred_wred_mode(t))
231 t->tab[t->def]->qave=q->qave; 213 t->tab[t->def]->parms.qavg = q->parms.qavg;
232 214
233 if ((q->qave+qave) < q->qth_min) { 215 switch (red_action(&q->parms, q->parms.qavg + qavg)) {
234 q->qcount = -1; 216 case RED_DONT_MARK:
235enqueue: 217 break;
236 if (q->backlog + skb->len <= q->limit) {
237 q->backlog += skb->len;
238do_enqueue:
239 __skb_queue_tail(&sch->q, skb);
240 sch->qstats.backlog += skb->len;
241 sch->bstats.bytes += skb->len;
242 sch->bstats.packets++;
243 return 0;
244 } else {
245 q->pdrop++;
246 }
247 218
248drop: 219 case RED_PROB_MARK:
249 kfree_skb(skb); 220 sch->qstats.overlimits++;
250 sch->qstats.drops++; 221 q->stats.prob_drop++;
251 return NET_XMIT_DROP; 222 goto drop;
252 } 223
253 if ((q->qave+qave) >= q->qth_max) { 224 case RED_HARD_MARK:
254 q->qcount = -1; 225 sch->qstats.overlimits++;
255 sch->qstats.overlimits++; 226 q->stats.forced_drop++;
256 q->forced++; 227 goto drop;
257 goto drop;
258 } 228 }
259 if (++q->qcount) { 229
260 if ((((qave+q->qave) - q->qth_min)>>q->Wlog)*q->qcount < q->qR) 230 if (q->backlog + skb->len <= q->limit) {
261 goto enqueue; 231 q->backlog += skb->len;
262 q->qcount = 0; 232do_enqueue:
263 q->qR = net_random()&q->Rmask; 233 __skb_queue_tail(&sch->q, skb);
264 sch->qstats.overlimits++; 234 sch->qstats.backlog += skb->len;
265 q->early++; 235 sch->bstats.bytes += skb->len;
266 goto drop; 236 sch->bstats.packets++;
237 return 0;
267 } 238 }
268 q->qR = net_random()&q->Rmask; 239
269 goto enqueue; 240 q->stats.pdrop++;
241drop:
242 kfree_skb(skb);
243 sch->qstats.drops++;
244 return NET_XMIT_DROP;
270} 245}
271 246
272static int 247static int
@@ -276,7 +251,9 @@ gred_requeue(struct sk_buff *skb, struct Qdisc* sch)
276 struct gred_sched *t= qdisc_priv(sch); 251 struct gred_sched *t= qdisc_priv(sch);
277 q= t->tab[(skb->tc_index&0xf)]; 252 q= t->tab[(skb->tc_index&0xf)];
278/* error checking here -- probably unnecessary */ 253/* error checking here -- probably unnecessary */
279 PSCHED_SET_PASTPERFECT(q->qidlestart); 254
255 if (red_is_idling(&q->parms))
256 red_end_of_idle_period(&q->parms);
280 257
281 __skb_queue_head(&sch->q, skb); 258 __skb_queue_head(&sch->q, skb);
282 sch->qstats.backlog += skb->len; 259 sch->qstats.backlog += skb->len;
@@ -299,7 +276,7 @@ gred_dequeue(struct Qdisc* sch)
299 if (q) { 276 if (q) {
300 q->backlog -= skb->len; 277 q->backlog -= skb->len;
301 if (!q->backlog && !gred_wred_mode(t)) 278 if (!q->backlog && !gred_wred_mode(t))
302 PSCHED_GET_TIME(q->qidlestart); 279 red_start_of_idle_period(&q->parms);
303 } else { 280 } else {
304 D2PRINTK("gred_dequeue: skb has bad tcindex %x\n",skb->tc_index&0xf); 281 D2PRINTK("gred_dequeue: skb has bad tcindex %x\n",skb->tc_index&0xf);
305 } 282 }
@@ -312,7 +289,7 @@ gred_dequeue(struct Qdisc* sch)
312 D2PRINTK("no default VQ set: Results will be " 289 D2PRINTK("no default VQ set: Results will be "
313 "screwed up\n"); 290 "screwed up\n");
314 else 291 else
315 PSCHED_GET_TIME(q->qidlestart); 292 red_start_of_idle_period(&q->parms);
316 } 293 }
317 294
318 return NULL; 295 return NULL;
@@ -333,9 +310,9 @@ static unsigned int gred_drop(struct Qdisc* sch)
333 q= t->tab[(skb->tc_index&0xf)]; 310 q= t->tab[(skb->tc_index&0xf)];
334 if (q) { 311 if (q) {
335 q->backlog -= len; 312 q->backlog -= len;
336 q->other++; 313 q->stats.other++;
337 if (!q->backlog && !gred_wred_mode(t)) 314 if (!q->backlog && !gred_wred_mode(t))
338 PSCHED_GET_TIME(q->qidlestart); 315 red_start_of_idle_period(&q->parms);
339 } else { 316 } else {
340 D2PRINTK("gred_dequeue: skb has bad tcindex %x\n",skb->tc_index&0xf); 317 D2PRINTK("gred_dequeue: skb has bad tcindex %x\n",skb->tc_index&0xf);
341 } 318 }
@@ -350,7 +327,7 @@ static unsigned int gred_drop(struct Qdisc* sch)
350 return 0; 327 return 0;
351 } 328 }
352 329
353 PSCHED_GET_TIME(q->qidlestart); 330 red_start_of_idle_period(&q->parms);
354 return 0; 331 return 0;
355 332
356} 333}
@@ -369,14 +346,12 @@ static void gred_reset(struct Qdisc* sch)
369 q= t->tab[i]; 346 q= t->tab[i];
370 if (!q) 347 if (!q)
371 continue; 348 continue;
372 PSCHED_SET_PASTPERFECT(q->qidlestart); 349 red_restart(&q->parms);
373 q->qave = 0;
374 q->qcount = -1;
375 q->backlog = 0; 350 q->backlog = 0;
376 q->other=0; 351 q->stats.other = 0;
377 q->forced=0; 352 q->stats.forced_drop = 0;
378 q->pdrop=0; 353 q->stats.prob_drop = 0;
379 q->early=0; 354 q->stats.pdrop = 0;
380 } 355 }
381} 356}
382 357
@@ -450,25 +425,19 @@ static inline int gred_change_vq(struct Qdisc *sch, int dp,
450 q = table->tab[dp]; 425 q = table->tab[dp];
451 q->DP = dp; 426 q->DP = dp;
452 q->prio = prio; 427 q->prio = prio;
453
454 q->Wlog = ctl->Wlog;
455 q->Plog = ctl->Plog;
456 q->limit = ctl->limit; 428 q->limit = ctl->limit;
457 q->Scell_log = ctl->Scell_log; 429
458 q->Rmask = ctl->Plog < 32 ? ((1<<ctl->Plog) - 1) : ~0UL; 430 if (q->backlog == 0)
459 q->Scell_max = (255<<q->Scell_log); 431 red_end_of_idle_period(&q->parms);
460 q->qth_min = ctl->qth_min<<ctl->Wlog; 432
461 q->qth_max = ctl->qth_max<<ctl->Wlog; 433 red_set_parms(&q->parms,
462 q->qave=0; 434 ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Plog,
463 q->backlog=0; 435 ctl->Scell_log, stab);
464 q->qcount = -1; 436
465 q->other=0; 437 q->stats.other = 0;
466 q->forced=0; 438 q->stats.forced_drop = 0;
467 q->pdrop=0; 439 q->stats.prob_drop = 0;
468 q->early=0; 440 q->stats.pdrop = 0;
469
470 PSCHED_SET_PASTPERFECT(q->qidlestart);
471 memcpy(q->Stab, stab, 256);
472 441
473 return 0; 442 return 0;
474} 443}
@@ -592,37 +561,26 @@ static int gred_dump(struct Qdisc *sch, struct sk_buff *skb)
592 opt.DP = q->DP; 561 opt.DP = q->DP;
593 opt.backlog = q->backlog; 562 opt.backlog = q->backlog;
594 opt.prio = q->prio; 563 opt.prio = q->prio;
595 opt.qth_min = q->qth_min >> q->Wlog; 564 opt.qth_min = q->parms.qth_min >> q->parms.Wlog;
596 opt.qth_max = q->qth_max >> q->Wlog; 565 opt.qth_max = q->parms.qth_max >> q->parms.Wlog;
597 opt.Wlog = q->Wlog; 566 opt.Wlog = q->parms.Wlog;
598 opt.Plog = q->Plog; 567 opt.Plog = q->parms.Plog;
599 opt.Scell_log = q->Scell_log; 568 opt.Scell_log = q->parms.Scell_log;
600 opt.other = q->other; 569 opt.other = q->stats.other;
601 opt.early = q->early; 570 opt.early = q->stats.prob_drop;
602 opt.forced = q->forced; 571 opt.forced = q->stats.forced_drop;
603 opt.pdrop = q->pdrop; 572 opt.pdrop = q->stats.pdrop;
604 opt.packets = q->packetsin; 573 opt.packets = q->packetsin;
605 opt.bytesin = q->bytesin; 574 opt.bytesin = q->bytesin;
606 575
607 if (q->qave) { 576 if (gred_wred_mode(table)) {
608 if (gred_wred_mode(table)) { 577 q->parms.qidlestart =
609 q->qidlestart=table->tab[table->def]->qidlestart; 578 table->tab[table->def]->parms.qidlestart;
610 q->qave=table->tab[table->def]->qave; 579 q->parms.qavg = table->tab[table->def]->parms.qavg;
611 }
612 if (!PSCHED_IS_PASTPERFECT(q->qidlestart)) {
613 long idle;
614 unsigned long qave;
615 psched_time_t now;
616 PSCHED_GET_TIME(now);
617 idle = PSCHED_TDIFF_SAFE(now, q->qidlestart, q->Scell_max);
618 qave = q->qave >> q->Stab[(idle>>q->Scell_log)&0xFF];
619 opt.qave = qave >> q->Wlog;
620
621 } else {
622 opt.qave = q->qave >> q->Wlog;
623 }
624 } 580 }
625 581
582 opt.qave = red_calc_qavg(&q->parms, q->parms.qavg);
583
626append_opt: 584append_opt:
627 RTA_APPEND(skb, sizeof(opt), &opt); 585 RTA_APPEND(skb, sizeof(opt), &opt);
628 } 586 }