diff options
author | Thomas Graf <tgraf@suug.ch> | 2005-11-05 15:14:09 -0500 |
---|---|---|
committer | Thomas Graf <tgr@axs.localdomain> | 2005-11-05 16:02:25 -0500 |
commit | dea3f62852f98670b72ad355c67bd55c9af58530 (patch) | |
tree | 979515dfac29d831393bdc6c6226575122dcca79 /net | |
parent | dba051f36a47989b20b248248ffef7984a2f6013 (diff) |
[PKT_SCHED]: GRED: Cleanup equalize flag and add new WRED mode detection
Introduces a flags variable using bitops and transforms eqp to use
it. Converts the conditions of the form (wred && rio) to (wred)
since wred can only be enabled in rio mode anyway.
The patch also improves WRED mode detection. The current behaviour
does not allow WRED mode to be turned off again without removing
the whole qdisc first. The new algorithm checks each VQ against
each other looking for equal priorities every time a VQ is changed
or added. The performance is poor, O(n**2), but it's used only
during administrative tasks and the number of VQs is strictly
limited.
Signed-off-by: Thomas Graf <tgraf@suug.ch>
Signed-off-by: Arnaldo Carvalho de Melo <acme@mandriva.com>
Diffstat (limited to 'net')
-rw-r--r-- | net/sched/sch_gred.c | 87 |
1 files changed, 65 insertions, 22 deletions
diff --git a/net/sched/sch_gred.c b/net/sched/sch_gred.c index 25c171c32715..4ced47bf6089 100644 --- a/net/sched/sch_gred.c +++ b/net/sched/sch_gred.c | |||
@@ -91,16 +91,57 @@ struct gred_sched_data | |||
91 | psched_time_t qidlestart; /* Start of idle period */ | 91 | psched_time_t qidlestart; /* Start of idle period */ |
92 | }; | 92 | }; |
93 | 93 | ||
94 | enum { | ||
95 | GRED_WRED_MODE = 1, | ||
96 | }; | ||
97 | |||
94 | struct gred_sched | 98 | struct gred_sched |
95 | { | 99 | { |
96 | struct gred_sched_data *tab[MAX_DPs]; | 100 | struct gred_sched_data *tab[MAX_DPs]; |
101 | unsigned long flags; | ||
97 | u32 DPs; | 102 | u32 DPs; |
98 | u32 def; | 103 | u32 def; |
99 | u8 initd; | 104 | u8 initd; |
100 | u8 grio; | 105 | u8 grio; |
101 | u8 eqp; | ||
102 | }; | 106 | }; |
103 | 107 | ||
108 | static inline int gred_wred_mode(struct gred_sched *table) | ||
109 | { | ||
110 | return test_bit(GRED_WRED_MODE, &table->flags); | ||
111 | } | ||
112 | |||
113 | static inline void gred_enable_wred_mode(struct gred_sched *table) | ||
114 | { | ||
115 | __set_bit(GRED_WRED_MODE, &table->flags); | ||
116 | } | ||
117 | |||
118 | static inline void gred_disable_wred_mode(struct gred_sched *table) | ||
119 | { | ||
120 | __clear_bit(GRED_WRED_MODE, &table->flags); | ||
121 | } | ||
122 | |||
123 | static inline int gred_wred_mode_check(struct Qdisc *sch) | ||
124 | { | ||
125 | struct gred_sched *table = qdisc_priv(sch); | ||
126 | int i; | ||
127 | |||
128 | /* Really ugly O(n^2) but shouldn't be necessary too frequent. */ | ||
129 | for (i = 0; i < table->DPs; i++) { | ||
130 | struct gred_sched_data *q = table->tab[i]; | ||
131 | int n; | ||
132 | |||
133 | if (q == NULL) | ||
134 | continue; | ||
135 | |||
136 | for (n = 0; n < table->DPs; n++) | ||
137 | if (table->tab[n] && table->tab[n] != q && | ||
138 | table->tab[n]->prio == q->prio) | ||
139 | return 1; | ||
140 | } | ||
141 | |||
142 | return 0; | ||
143 | } | ||
144 | |||
104 | static int | 145 | static int |
105 | gred_enqueue(struct sk_buff *skb, struct Qdisc* sch) | 146 | gred_enqueue(struct sk_buff *skb, struct Qdisc* sch) |
106 | { | 147 | { |
@@ -132,7 +173,7 @@ gred_enqueue(struct sk_buff *skb, struct Qdisc* sch) | |||
132 | "general backlog %d\n",skb->tc_index&0xf,sch->handle,q->backlog, | 173 | "general backlog %d\n",skb->tc_index&0xf,sch->handle,q->backlog, |
133 | sch->qstats.backlog); | 174 | sch->qstats.backlog); |
134 | /* sum up all the qaves of prios <= to ours to get the new qave*/ | 175 | /* sum up all the qaves of prios <= to ours to get the new qave*/ |
135 | if (!t->eqp && t->grio) { | 176 | if (!gred_wred_mode(t) && t->grio) { |
136 | for (i=0;i<t->DPs;i++) { | 177 | for (i=0;i<t->DPs;i++) { |
137 | if ((!t->tab[i]) || (i==q->DP)) | 178 | if ((!t->tab[i]) || (i==q->DP)) |
138 | continue; | 179 | continue; |
@@ -146,7 +187,7 @@ gred_enqueue(struct sk_buff *skb, struct Qdisc* sch) | |||
146 | q->packetsin++; | 187 | q->packetsin++; |
147 | q->bytesin+=skb->len; | 188 | q->bytesin+=skb->len; |
148 | 189 | ||
149 | if (t->eqp && t->grio) { | 190 | if (gred_wred_mode(t)) { |
150 | qave=0; | 191 | qave=0; |
151 | q->qave=t->tab[t->def]->qave; | 192 | q->qave=t->tab[t->def]->qave; |
152 | q->qidlestart=t->tab[t->def]->qidlestart; | 193 | q->qidlestart=t->tab[t->def]->qidlestart; |
@@ -160,7 +201,7 @@ gred_enqueue(struct sk_buff *skb, struct Qdisc* sch) | |||
160 | 201 | ||
161 | q->qave >>= q->Stab[(us_idle>>q->Scell_log)&0xFF]; | 202 | q->qave >>= q->Stab[(us_idle>>q->Scell_log)&0xFF]; |
162 | } else { | 203 | } else { |
163 | if (t->eqp) { | 204 | if (gred_wred_mode(t)) { |
164 | q->qave += sch->qstats.backlog - (q->qave >> q->Wlog); | 205 | q->qave += sch->qstats.backlog - (q->qave >> q->Wlog); |
165 | } else { | 206 | } else { |
166 | q->qave += q->backlog - (q->qave >> q->Wlog); | 207 | q->qave += q->backlog - (q->qave >> q->Wlog); |
@@ -169,7 +210,7 @@ gred_enqueue(struct sk_buff *skb, struct Qdisc* sch) | |||
169 | } | 210 | } |
170 | 211 | ||
171 | 212 | ||
172 | if (t->eqp && t->grio) | 213 | if (gred_wred_mode(t)) |
173 | t->tab[t->def]->qave=q->qave; | 214 | t->tab[t->def]->qave=q->qave; |
174 | 215 | ||
175 | if ((q->qave+qave) < q->qth_min) { | 216 | if ((q->qave+qave) < q->qth_min) { |
@@ -240,7 +281,7 @@ gred_dequeue(struct Qdisc* sch) | |||
240 | q= t->tab[(skb->tc_index&0xf)]; | 281 | q= t->tab[(skb->tc_index&0xf)]; |
241 | if (q) { | 282 | if (q) { |
242 | q->backlog -= skb->len; | 283 | q->backlog -= skb->len; |
243 | if (!q->backlog && !t->eqp) | 284 | if (!q->backlog && !gred_wred_mode(t)) |
244 | PSCHED_GET_TIME(q->qidlestart); | 285 | PSCHED_GET_TIME(q->qidlestart); |
245 | } else { | 286 | } else { |
246 | D2PRINTK("gred_dequeue: skb has bad tcindex %x\n",skb->tc_index&0xf); | 287 | D2PRINTK("gred_dequeue: skb has bad tcindex %x\n",skb->tc_index&0xf); |
@@ -248,7 +289,7 @@ gred_dequeue(struct Qdisc* sch) | |||
248 | return skb; | 289 | return skb; |
249 | } | 290 | } |
250 | 291 | ||
251 | if (t->eqp) { | 292 | if (gred_wred_mode(t)) { |
252 | q= t->tab[t->def]; | 293 | q= t->tab[t->def]; |
253 | if (!q) | 294 | if (!q) |
254 | D2PRINTK("no default VQ set: Results will be " | 295 | D2PRINTK("no default VQ set: Results will be " |
@@ -276,7 +317,7 @@ static unsigned int gred_drop(struct Qdisc* sch) | |||
276 | if (q) { | 317 | if (q) { |
277 | q->backlog -= len; | 318 | q->backlog -= len; |
278 | q->other++; | 319 | q->other++; |
279 | if (!q->backlog && !t->eqp) | 320 | if (!q->backlog && !gred_wred_mode(t)) |
280 | PSCHED_GET_TIME(q->qidlestart); | 321 | PSCHED_GET_TIME(q->qidlestart); |
281 | } else { | 322 | } else { |
282 | D2PRINTK("gred_dequeue: skb has bad tcindex %x\n",skb->tc_index&0xf); | 323 | D2PRINTK("gred_dequeue: skb has bad tcindex %x\n",skb->tc_index&0xf); |
@@ -330,7 +371,6 @@ static int gred_change(struct Qdisc *sch, struct rtattr *opt) | |||
330 | struct tc_gred_sopt *sopt; | 371 | struct tc_gred_sopt *sopt; |
331 | struct rtattr *tb[TCA_GRED_STAB]; | 372 | struct rtattr *tb[TCA_GRED_STAB]; |
332 | struct rtattr *tb2[TCA_GRED_DPS]; | 373 | struct rtattr *tb2[TCA_GRED_DPS]; |
333 | int i; | ||
334 | 374 | ||
335 | if (opt == NULL || rtattr_parse_nested(tb, TCA_GRED_STAB, opt)) | 375 | if (opt == NULL || rtattr_parse_nested(tb, TCA_GRED_STAB, opt)) |
336 | return -EINVAL; | 376 | return -EINVAL; |
@@ -344,7 +384,17 @@ static int gred_change(struct Qdisc *sch, struct rtattr *opt) | |||
344 | sopt = RTA_DATA(tb2[TCA_GRED_DPS-1]); | 384 | sopt = RTA_DATA(tb2[TCA_GRED_DPS-1]); |
345 | table->DPs=sopt->DPs; | 385 | table->DPs=sopt->DPs; |
346 | table->def=sopt->def_DP; | 386 | table->def=sopt->def_DP; |
347 | table->grio=sopt->grio; | 387 | |
388 | if (sopt->grio) { | ||
389 | table->grio = 1; | ||
390 | gred_disable_wred_mode(table); | ||
391 | if (gred_wred_mode_check(sch)) | ||
392 | gred_enable_wred_mode(table); | ||
393 | } else { | ||
394 | table->grio = 0; | ||
395 | gred_disable_wred_mode(table); | ||
396 | } | ||
397 | |||
348 | table->initd=0; | 398 | table->initd=0; |
349 | /* probably need to clear all the table DP entries as well */ | 399 | /* probably need to clear all the table DP entries as well */ |
350 | return 0; | 400 | return 0; |
@@ -413,17 +463,10 @@ static int gred_change(struct Qdisc *sch, struct rtattr *opt) | |||
413 | PSCHED_SET_PASTPERFECT(q->qidlestart); | 463 | PSCHED_SET_PASTPERFECT(q->qidlestart); |
414 | memcpy(q->Stab, RTA_DATA(tb[TCA_GRED_STAB-1]), 256); | 464 | memcpy(q->Stab, RTA_DATA(tb[TCA_GRED_STAB-1]), 256); |
415 | 465 | ||
416 | if ( table->initd && table->grio) { | 466 | if (table->grio) { |
417 | /* this looks ugly but it's not in the fast path */ | 467 | gred_disable_wred_mode(table); |
418 | for (i=0;i<table->DPs;i++) { | 468 | if (gred_wred_mode_check(sch)) |
419 | if ((!table->tab[i]) || (i==q->DP) ) | 469 | gred_enable_wred_mode(table); |
420 | continue; | ||
421 | if (table->tab[i]->prio == q->prio ){ | ||
422 | /* WRED mode detected */ | ||
423 | table->eqp=1; | ||
424 | break; | ||
425 | } | ||
426 | } | ||
427 | } | 470 | } |
428 | 471 | ||
429 | if (!table->initd) { | 472 | if (!table->initd) { |
@@ -541,7 +584,7 @@ static int gred_dump(struct Qdisc *sch, struct sk_buff *skb) | |||
541 | dst->DP=q->DP; | 584 | dst->DP=q->DP; |
542 | dst->backlog=q->backlog; | 585 | dst->backlog=q->backlog; |
543 | if (q->qave) { | 586 | if (q->qave) { |
544 | if (table->eqp && table->grio) { | 587 | if (gred_wred_mode(table)) { |
545 | q->qidlestart=table->tab[table->def]->qidlestart; | 588 | q->qidlestart=table->tab[table->def]->qidlestart; |
546 | q->qave=table->tab[table->def]->qave; | 589 | q->qave=table->tab[table->def]->qave; |
547 | } | 590 | } |