diff options
author | Eric Dumazet <eric.dumazet@gmail.com> | 2012-01-04 21:25:16 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2012-01-05 14:01:21 -0500 |
commit | eeca6688d6599c28bc449a45facb67d7f203be74 (patch) | |
tree | 5cabbf24a3c1ee2d7757c873ba6449296a8ef7b7 | |
parent | 18cb809850fb499ad9bf288696a95f4071f73931 (diff) |
net_sched: red: split red_parms into parms and vars
This patch splits the red_parms structure into two components.
One holding the RED 'constant' parameters, and one containing the
variables.
This permits a size reduction of GRED qdisc, and is a preliminary step
to add an optional RED unit to SFQ.
SFQRED will have a single red_parms structure shared by all flows, and a
private red_vars per flow.
Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
CC: Dave Taht <dave.taht@gmail.com>
CC: Stephen Hemminger <shemminger@vyatta.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | include/net/red.h | 98 | ||||
-rw-r--r-- | net/sched/sch_choke.c | 40 | ||||
-rw-r--r-- | net/sched/sch_gred.c | 45 | ||||
-rw-r--r-- | net/sched/sch_red.c | 29 |
4 files changed, 117 insertions, 95 deletions
diff --git a/include/net/red.h b/include/net/red.h index ef715a16cce4..baab385a4736 100644 --- a/include/net/red.h +++ b/include/net/red.h | |||
@@ -137,7 +137,9 @@ struct red_parms { | |||
137 | u8 Wlog; /* log(W) */ | 137 | u8 Wlog; /* log(W) */ |
138 | u8 Plog; /* random number bits */ | 138 | u8 Plog; /* random number bits */ |
139 | u8 Stab[RED_STAB_SIZE]; | 139 | u8 Stab[RED_STAB_SIZE]; |
140 | }; | ||
140 | 141 | ||
142 | struct red_vars { | ||
141 | /* Variables */ | 143 | /* Variables */ |
142 | int qcount; /* Number of packets since last random | 144 | int qcount; /* Number of packets since last random |
143 | number generation */ | 145 | number generation */ |
@@ -152,6 +154,16 @@ static inline u32 red_maxp(u8 Plog) | |||
152 | return Plog < 32 ? (~0U >> Plog) : ~0U; | 154 | return Plog < 32 ? (~0U >> Plog) : ~0U; |
153 | } | 155 | } |
154 | 156 | ||
157 | static inline void red_set_vars(struct red_vars *v) | ||
158 | { | ||
159 | /* Reset average queue length, the value is strictly bound | ||
160 | * to the parameters below, reseting hurts a bit but leaving | ||
161 | * it might result in an unreasonable qavg for a while. --TGR | ||
162 | */ | ||
163 | v->qavg = 0; | ||
164 | |||
165 | v->qcount = -1; | ||
166 | } | ||
155 | 167 | ||
156 | static inline void red_set_parms(struct red_parms *p, | 168 | static inline void red_set_parms(struct red_parms *p, |
157 | u32 qth_min, u32 qth_max, u8 Wlog, u8 Plog, | 169 | u32 qth_min, u32 qth_max, u8 Wlog, u8 Plog, |
@@ -160,13 +172,6 @@ static inline void red_set_parms(struct red_parms *p, | |||
160 | int delta = qth_max - qth_min; | 172 | int delta = qth_max - qth_min; |
161 | u32 max_p_delta; | 173 | u32 max_p_delta; |
162 | 174 | ||
163 | /* Reset average queue length, the value is strictly bound | ||
164 | * to the parameters below, reseting hurts a bit but leaving | ||
165 | * it might result in an unreasonable qavg for a while. --TGR | ||
166 | */ | ||
167 | p->qavg = 0; | ||
168 | |||
169 | p->qcount = -1; | ||
170 | p->qth_min = qth_min << Wlog; | 175 | p->qth_min = qth_min << Wlog; |
171 | p->qth_max = qth_max << Wlog; | 176 | p->qth_max = qth_max << Wlog; |
172 | p->Wlog = Wlog; | 177 | p->Wlog = Wlog; |
@@ -197,31 +202,32 @@ static inline void red_set_parms(struct red_parms *p, | |||
197 | memcpy(p->Stab, stab, sizeof(p->Stab)); | 202 | memcpy(p->Stab, stab, sizeof(p->Stab)); |
198 | } | 203 | } |
199 | 204 | ||
200 | static inline int red_is_idling(const struct red_parms *p) | 205 | static inline int red_is_idling(const struct red_vars *v) |
201 | { | 206 | { |
202 | return p->qidlestart.tv64 != 0; | 207 | return v->qidlestart.tv64 != 0; |
203 | } | 208 | } |
204 | 209 | ||
205 | static inline void red_start_of_idle_period(struct red_parms *p) | 210 | static inline void red_start_of_idle_period(struct red_vars *v) |
206 | { | 211 | { |
207 | p->qidlestart = ktime_get(); | 212 | v->qidlestart = ktime_get(); |
208 | } | 213 | } |
209 | 214 | ||
210 | static inline void red_end_of_idle_period(struct red_parms *p) | 215 | static inline void red_end_of_idle_period(struct red_vars *v) |
211 | { | 216 | { |
212 | p->qidlestart.tv64 = 0; | 217 | v->qidlestart.tv64 = 0; |
213 | } | 218 | } |
214 | 219 | ||
215 | static inline void red_restart(struct red_parms *p) | 220 | static inline void red_restart(struct red_vars *v) |
216 | { | 221 | { |
217 | red_end_of_idle_period(p); | 222 | red_end_of_idle_period(v); |
218 | p->qavg = 0; | 223 | v->qavg = 0; |
219 | p->qcount = -1; | 224 | v->qcount = -1; |
220 | } | 225 | } |
221 | 226 | ||
222 | static inline unsigned long red_calc_qavg_from_idle_time(const struct red_parms *p) | 227 | static inline unsigned long red_calc_qavg_from_idle_time(const struct red_parms *p, |
228 | const struct red_vars *v) | ||
223 | { | 229 | { |
224 | s64 delta = ktime_us_delta(ktime_get(), p->qidlestart); | 230 | s64 delta = ktime_us_delta(ktime_get(), v->qidlestart); |
225 | long us_idle = min_t(s64, delta, p->Scell_max); | 231 | long us_idle = min_t(s64, delta, p->Scell_max); |
226 | int shift; | 232 | int shift; |
227 | 233 | ||
@@ -248,7 +254,7 @@ static inline unsigned long red_calc_qavg_from_idle_time(const struct red_parms | |||
248 | shift = p->Stab[(us_idle >> p->Scell_log) & RED_STAB_MASK]; | 254 | shift = p->Stab[(us_idle >> p->Scell_log) & RED_STAB_MASK]; |
249 | 255 | ||
250 | if (shift) | 256 | if (shift) |
251 | return p->qavg >> shift; | 257 | return v->qavg >> shift; |
252 | else { | 258 | else { |
253 | /* Approximate initial part of exponent with linear function: | 259 | /* Approximate initial part of exponent with linear function: |
254 | * | 260 | * |
@@ -257,16 +263,17 @@ static inline unsigned long red_calc_qavg_from_idle_time(const struct red_parms | |||
257 | * Seems, it is the best solution to | 263 | * Seems, it is the best solution to |
258 | * problem of too coarse exponent tabulation. | 264 | * problem of too coarse exponent tabulation. |
259 | */ | 265 | */ |
260 | us_idle = (p->qavg * (u64)us_idle) >> p->Scell_log; | 266 | us_idle = (v->qavg * (u64)us_idle) >> p->Scell_log; |
261 | 267 | ||
262 | if (us_idle < (p->qavg >> 1)) | 268 | if (us_idle < (v->qavg >> 1)) |
263 | return p->qavg - us_idle; | 269 | return v->qavg - us_idle; |
264 | else | 270 | else |
265 | return p->qavg >> 1; | 271 | return v->qavg >> 1; |
266 | } | 272 | } |
267 | } | 273 | } |
268 | 274 | ||
269 | static inline unsigned long red_calc_qavg_no_idle_time(const struct red_parms *p, | 275 | static inline unsigned long red_calc_qavg_no_idle_time(const struct red_parms *p, |
276 | const struct red_vars *v, | ||
270 | unsigned int backlog) | 277 | unsigned int backlog) |
271 | { | 278 | { |
272 | /* | 279 | /* |
@@ -278,16 +285,17 @@ static inline unsigned long red_calc_qavg_no_idle_time(const struct red_parms *p | |||
278 | * | 285 | * |
279 | * --ANK (980924) | 286 | * --ANK (980924) |
280 | */ | 287 | */ |
281 | return p->qavg + (backlog - (p->qavg >> p->Wlog)); | 288 | return v->qavg + (backlog - (v->qavg >> p->Wlog)); |
282 | } | 289 | } |
283 | 290 | ||
284 | static inline unsigned long red_calc_qavg(const struct red_parms *p, | 291 | static inline unsigned long red_calc_qavg(const struct red_parms *p, |
292 | const struct red_vars *v, | ||
285 | unsigned int backlog) | 293 | unsigned int backlog) |
286 | { | 294 | { |
287 | if (!red_is_idling(p)) | 295 | if (!red_is_idling(v)) |
288 | return red_calc_qavg_no_idle_time(p, backlog); | 296 | return red_calc_qavg_no_idle_time(p, v, backlog); |
289 | else | 297 | else |
290 | return red_calc_qavg_from_idle_time(p); | 298 | return red_calc_qavg_from_idle_time(p, v); |
291 | } | 299 | } |
292 | 300 | ||
293 | 301 | ||
@@ -296,7 +304,9 @@ static inline u32 red_random(const struct red_parms *p) | |||
296 | return reciprocal_divide(net_random(), p->max_P_reciprocal); | 304 | return reciprocal_divide(net_random(), p->max_P_reciprocal); |
297 | } | 305 | } |
298 | 306 | ||
299 | static inline int red_mark_probability(const struct red_parms *p, unsigned long qavg) | 307 | static inline int red_mark_probability(const struct red_parms *p, |
308 | const struct red_vars *v, | ||
309 | unsigned long qavg) | ||
300 | { | 310 | { |
301 | /* The formula used below causes questions. | 311 | /* The formula used below causes questions. |
302 | 312 | ||
@@ -314,7 +324,7 @@ static inline int red_mark_probability(const struct red_parms *p, unsigned long | |||
314 | 324 | ||
315 | Any questions? --ANK (980924) | 325 | Any questions? --ANK (980924) |
316 | */ | 326 | */ |
317 | return !(((qavg - p->qth_min) >> p->Wlog) * p->qcount < p->qR); | 327 | return !(((qavg - p->qth_min) >> p->Wlog) * v->qcount < v->qR); |
318 | } | 328 | } |
319 | 329 | ||
320 | enum { | 330 | enum { |
@@ -323,7 +333,7 @@ enum { | |||
323 | RED_ABOVE_MAX_TRESH, | 333 | RED_ABOVE_MAX_TRESH, |
324 | }; | 334 | }; |
325 | 335 | ||
326 | static inline int red_cmp_thresh(struct red_parms *p, unsigned long qavg) | 336 | static inline int red_cmp_thresh(const struct red_parms *p, unsigned long qavg) |
327 | { | 337 | { |
328 | if (qavg < p->qth_min) | 338 | if (qavg < p->qth_min) |
329 | return RED_BELOW_MIN_THRESH; | 339 | return RED_BELOW_MIN_THRESH; |
@@ -339,27 +349,29 @@ enum { | |||
339 | RED_HARD_MARK, | 349 | RED_HARD_MARK, |
340 | }; | 350 | }; |
341 | 351 | ||
342 | static inline int red_action(struct red_parms *p, unsigned long qavg) | 352 | static inline int red_action(const struct red_parms *p, |
353 | struct red_vars *v, | ||
354 | unsigned long qavg) | ||
343 | { | 355 | { |
344 | switch (red_cmp_thresh(p, qavg)) { | 356 | switch (red_cmp_thresh(p, qavg)) { |
345 | case RED_BELOW_MIN_THRESH: | 357 | case RED_BELOW_MIN_THRESH: |
346 | p->qcount = -1; | 358 | v->qcount = -1; |
347 | return RED_DONT_MARK; | 359 | return RED_DONT_MARK; |
348 | 360 | ||
349 | case RED_BETWEEN_TRESH: | 361 | case RED_BETWEEN_TRESH: |
350 | if (++p->qcount) { | 362 | if (++v->qcount) { |
351 | if (red_mark_probability(p, qavg)) { | 363 | if (red_mark_probability(p, v, qavg)) { |
352 | p->qcount = 0; | 364 | v->qcount = 0; |
353 | p->qR = red_random(p); | 365 | v->qR = red_random(p); |
354 | return RED_PROB_MARK; | 366 | return RED_PROB_MARK; |
355 | } | 367 | } |
356 | } else | 368 | } else |
357 | p->qR = red_random(p); | 369 | v->qR = red_random(p); |
358 | 370 | ||
359 | return RED_DONT_MARK; | 371 | return RED_DONT_MARK; |
360 | 372 | ||
361 | case RED_ABOVE_MAX_TRESH: | 373 | case RED_ABOVE_MAX_TRESH: |
362 | p->qcount = -1; | 374 | v->qcount = -1; |
363 | return RED_HARD_MARK; | 375 | return RED_HARD_MARK; |
364 | } | 376 | } |
365 | 377 | ||
@@ -367,14 +379,14 @@ static inline int red_action(struct red_parms *p, unsigned long qavg) | |||
367 | return RED_DONT_MARK; | 379 | return RED_DONT_MARK; |
368 | } | 380 | } |
369 | 381 | ||
370 | static inline void red_adaptative_algo(struct red_parms *p) | 382 | static inline void red_adaptative_algo(struct red_parms *p, struct red_vars *v) |
371 | { | 383 | { |
372 | unsigned long qavg; | 384 | unsigned long qavg; |
373 | u32 max_p_delta; | 385 | u32 max_p_delta; |
374 | 386 | ||
375 | qavg = p->qavg; | 387 | qavg = v->qavg; |
376 | if (red_is_idling(p)) | 388 | if (red_is_idling(v)) |
377 | qavg = red_calc_qavg_from_idle_time(p); | 389 | qavg = red_calc_qavg_from_idle_time(p, v); |
378 | 390 | ||
379 | /* p->qavg is fixed point number with point at Wlog */ | 391 | /* p->qavg is fixed point number with point at Wlog */ |
380 | qavg >>= p->Wlog; | 392 | qavg >>= p->Wlog; |
diff --git a/net/sched/sch_choke.c b/net/sched/sch_choke.c index bef00acb8bd2..e465064d39a3 100644 --- a/net/sched/sch_choke.c +++ b/net/sched/sch_choke.c | |||
@@ -57,6 +57,7 @@ struct choke_sched_data { | |||
57 | struct red_parms parms; | 57 | struct red_parms parms; |
58 | 58 | ||
59 | /* Variables */ | 59 | /* Variables */ |
60 | struct red_vars vars; | ||
60 | struct tcf_proto *filter_list; | 61 | struct tcf_proto *filter_list; |
61 | struct { | 62 | struct { |
62 | u32 prob_drop; /* Early probability drops */ | 63 | u32 prob_drop; /* Early probability drops */ |
@@ -265,7 +266,7 @@ static bool choke_match_random(const struct choke_sched_data *q, | |||
265 | static int choke_enqueue(struct sk_buff *skb, struct Qdisc *sch) | 266 | static int choke_enqueue(struct sk_buff *skb, struct Qdisc *sch) |
266 | { | 267 | { |
267 | struct choke_sched_data *q = qdisc_priv(sch); | 268 | struct choke_sched_data *q = qdisc_priv(sch); |
268 | struct red_parms *p = &q->parms; | 269 | const struct red_parms *p = &q->parms; |
269 | int ret = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; | 270 | int ret = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; |
270 | 271 | ||
271 | if (q->filter_list) { | 272 | if (q->filter_list) { |
@@ -276,13 +277,13 @@ static int choke_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
276 | 277 | ||
277 | choke_skb_cb(skb)->keys_valid = 0; | 278 | choke_skb_cb(skb)->keys_valid = 0; |
278 | /* Compute average queue usage (see RED) */ | 279 | /* Compute average queue usage (see RED) */ |
279 | p->qavg = red_calc_qavg(p, sch->q.qlen); | 280 | q->vars.qavg = red_calc_qavg(p, &q->vars, sch->q.qlen); |
280 | if (red_is_idling(p)) | 281 | if (red_is_idling(&q->vars)) |
281 | red_end_of_idle_period(p); | 282 | red_end_of_idle_period(&q->vars); |
282 | 283 | ||
283 | /* Is queue small? */ | 284 | /* Is queue small? */ |
284 | if (p->qavg <= p->qth_min) | 285 | if (q->vars.qavg <= p->qth_min) |
285 | p->qcount = -1; | 286 | q->vars.qcount = -1; |
286 | else { | 287 | else { |
287 | unsigned int idx; | 288 | unsigned int idx; |
288 | 289 | ||
@@ -294,8 +295,8 @@ static int choke_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
294 | } | 295 | } |
295 | 296 | ||
296 | /* Queue is large, always mark/drop */ | 297 | /* Queue is large, always mark/drop */ |
297 | if (p->qavg > p->qth_max) { | 298 | if (q->vars.qavg > p->qth_max) { |
298 | p->qcount = -1; | 299 | q->vars.qcount = -1; |
299 | 300 | ||
300 | sch->qstats.overlimits++; | 301 | sch->qstats.overlimits++; |
301 | if (use_harddrop(q) || !use_ecn(q) || | 302 | if (use_harddrop(q) || !use_ecn(q) || |
@@ -305,10 +306,10 @@ static int choke_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
305 | } | 306 | } |
306 | 307 | ||
307 | q->stats.forced_mark++; | 308 | q->stats.forced_mark++; |
308 | } else if (++p->qcount) { | 309 | } else if (++q->vars.qcount) { |
309 | if (red_mark_probability(p, p->qavg)) { | 310 | if (red_mark_probability(p, &q->vars, q->vars.qavg)) { |
310 | p->qcount = 0; | 311 | q->vars.qcount = 0; |
311 | p->qR = red_random(p); | 312 | q->vars.qR = red_random(p); |
312 | 313 | ||
313 | sch->qstats.overlimits++; | 314 | sch->qstats.overlimits++; |
314 | if (!use_ecn(q) || !INET_ECN_set_ce(skb)) { | 315 | if (!use_ecn(q) || !INET_ECN_set_ce(skb)) { |
@@ -319,7 +320,7 @@ static int choke_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
319 | q->stats.prob_mark++; | 320 | q->stats.prob_mark++; |
320 | } | 321 | } |
321 | } else | 322 | } else |
322 | p->qR = red_random(p); | 323 | q->vars.qR = red_random(p); |
323 | } | 324 | } |
324 | 325 | ||
325 | /* Admit new packet */ | 326 | /* Admit new packet */ |
@@ -353,8 +354,8 @@ static struct sk_buff *choke_dequeue(struct Qdisc *sch) | |||
353 | struct sk_buff *skb; | 354 | struct sk_buff *skb; |
354 | 355 | ||
355 | if (q->head == q->tail) { | 356 | if (q->head == q->tail) { |
356 | if (!red_is_idling(&q->parms)) | 357 | if (!red_is_idling(&q->vars)) |
357 | red_start_of_idle_period(&q->parms); | 358 | red_start_of_idle_period(&q->vars); |
358 | return NULL; | 359 | return NULL; |
359 | } | 360 | } |
360 | 361 | ||
@@ -377,8 +378,8 @@ static unsigned int choke_drop(struct Qdisc *sch) | |||
377 | if (len > 0) | 378 | if (len > 0) |
378 | q->stats.other++; | 379 | q->stats.other++; |
379 | else { | 380 | else { |
380 | if (!red_is_idling(&q->parms)) | 381 | if (!red_is_idling(&q->vars)) |
381 | red_start_of_idle_period(&q->parms); | 382 | red_start_of_idle_period(&q->vars); |
382 | } | 383 | } |
383 | 384 | ||
384 | return len; | 385 | return len; |
@@ -388,7 +389,7 @@ static void choke_reset(struct Qdisc *sch) | |||
388 | { | 389 | { |
389 | struct choke_sched_data *q = qdisc_priv(sch); | 390 | struct choke_sched_data *q = qdisc_priv(sch); |
390 | 391 | ||
391 | red_restart(&q->parms); | 392 | red_restart(&q->vars); |
392 | } | 393 | } |
393 | 394 | ||
394 | static const struct nla_policy choke_policy[TCA_CHOKE_MAX + 1] = { | 395 | static const struct nla_policy choke_policy[TCA_CHOKE_MAX + 1] = { |
@@ -482,9 +483,10 @@ static int choke_change(struct Qdisc *sch, struct nlattr *opt) | |||
482 | ctl->Plog, ctl->Scell_log, | 483 | ctl->Plog, ctl->Scell_log, |
483 | nla_data(tb[TCA_CHOKE_STAB]), | 484 | nla_data(tb[TCA_CHOKE_STAB]), |
484 | max_P); | 485 | max_P); |
486 | red_set_vars(&q->vars); | ||
485 | 487 | ||
486 | if (q->head == q->tail) | 488 | if (q->head == q->tail) |
487 | red_end_of_idle_period(&q->parms); | 489 | red_end_of_idle_period(&q->vars); |
488 | 490 | ||
489 | sch_tree_unlock(sch); | 491 | sch_tree_unlock(sch); |
490 | choke_free(old); | 492 | choke_free(old); |
diff --git a/net/sched/sch_gred.c b/net/sched/sch_gred.c index 53204de71c39..0b15236be7b6 100644 --- a/net/sched/sch_gred.c +++ b/net/sched/sch_gred.c | |||
@@ -41,6 +41,7 @@ struct gred_sched_data { | |||
41 | u8 prio; /* the prio of this vq */ | 41 | u8 prio; /* the prio of this vq */ |
42 | 42 | ||
43 | struct red_parms parms; | 43 | struct red_parms parms; |
44 | struct red_vars vars; | ||
44 | struct red_stats stats; | 45 | struct red_stats stats; |
45 | }; | 46 | }; |
46 | 47 | ||
@@ -55,7 +56,7 @@ struct gred_sched { | |||
55 | u32 red_flags; | 56 | u32 red_flags; |
56 | u32 DPs; | 57 | u32 DPs; |
57 | u32 def; | 58 | u32 def; |
58 | struct red_parms wred_set; | 59 | struct red_vars wred_set; |
59 | }; | 60 | }; |
60 | 61 | ||
61 | static inline int gred_wred_mode(struct gred_sched *table) | 62 | static inline int gred_wred_mode(struct gred_sched *table) |
@@ -125,17 +126,17 @@ static inline u16 tc_index_to_dp(struct sk_buff *skb) | |||
125 | return skb->tc_index & GRED_VQ_MASK; | 126 | return skb->tc_index & GRED_VQ_MASK; |
126 | } | 127 | } |
127 | 128 | ||
128 | static inline void gred_load_wred_set(struct gred_sched *table, | 129 | static inline void gred_load_wred_set(const struct gred_sched *table, |
129 | struct gred_sched_data *q) | 130 | struct gred_sched_data *q) |
130 | { | 131 | { |
131 | q->parms.qavg = table->wred_set.qavg; | 132 | q->vars.qavg = table->wred_set.qavg; |
132 | q->parms.qidlestart = table->wred_set.qidlestart; | 133 | q->vars.qidlestart = table->wred_set.qidlestart; |
133 | } | 134 | } |
134 | 135 | ||
135 | static inline void gred_store_wred_set(struct gred_sched *table, | 136 | static inline void gred_store_wred_set(struct gred_sched *table, |
136 | struct gred_sched_data *q) | 137 | struct gred_sched_data *q) |
137 | { | 138 | { |
138 | table->wred_set.qavg = q->parms.qavg; | 139 | table->wred_set.qavg = q->vars.qavg; |
139 | } | 140 | } |
140 | 141 | ||
141 | static inline int gred_use_ecn(struct gred_sched *t) | 142 | static inline int gred_use_ecn(struct gred_sched *t) |
@@ -170,7 +171,7 @@ static int gred_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
170 | goto drop; | 171 | goto drop; |
171 | } | 172 | } |
172 | 173 | ||
173 | /* fix tc_index? --could be controvesial but needed for | 174 | /* fix tc_index? --could be controversial but needed for |
174 | requeueing */ | 175 | requeueing */ |
175 | skb->tc_index = (skb->tc_index & ~GRED_VQ_MASK) | dp; | 176 | skb->tc_index = (skb->tc_index & ~GRED_VQ_MASK) | dp; |
176 | } | 177 | } |
@@ -181,8 +182,8 @@ static int gred_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
181 | 182 | ||
182 | for (i = 0; i < t->DPs; i++) { | 183 | for (i = 0; i < t->DPs; i++) { |
183 | if (t->tab[i] && t->tab[i]->prio < q->prio && | 184 | if (t->tab[i] && t->tab[i]->prio < q->prio && |
184 | !red_is_idling(&t->tab[i]->parms)) | 185 | !red_is_idling(&t->tab[i]->vars)) |
185 | qavg += t->tab[i]->parms.qavg; | 186 | qavg += t->tab[i]->vars.qavg; |
186 | } | 187 | } |
187 | 188 | ||
188 | } | 189 | } |
@@ -193,15 +194,17 @@ static int gred_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
193 | if (gred_wred_mode(t)) | 194 | if (gred_wred_mode(t)) |
194 | gred_load_wred_set(t, q); | 195 | gred_load_wred_set(t, q); |
195 | 196 | ||
196 | q->parms.qavg = red_calc_qavg(&q->parms, gred_backlog(t, q, sch)); | 197 | q->vars.qavg = red_calc_qavg(&q->parms, |
198 | &q->vars, | ||
199 | gred_backlog(t, q, sch)); | ||
197 | 200 | ||
198 | if (red_is_idling(&q->parms)) | 201 | if (red_is_idling(&q->vars)) |
199 | red_end_of_idle_period(&q->parms); | 202 | red_end_of_idle_period(&q->vars); |
200 | 203 | ||
201 | if (gred_wred_mode(t)) | 204 | if (gred_wred_mode(t)) |
202 | gred_store_wred_set(t, q); | 205 | gred_store_wred_set(t, q); |
203 | 206 | ||
204 | switch (red_action(&q->parms, q->parms.qavg + qavg)) { | 207 | switch (red_action(&q->parms, &q->vars, q->vars.qavg + qavg)) { |
205 | case RED_DONT_MARK: | 208 | case RED_DONT_MARK: |
206 | break; | 209 | break; |
207 | 210 | ||
@@ -260,7 +263,7 @@ static struct sk_buff *gred_dequeue(struct Qdisc *sch) | |||
260 | q->backlog -= qdisc_pkt_len(skb); | 263 | q->backlog -= qdisc_pkt_len(skb); |
261 | 264 | ||
262 | if (!q->backlog && !gred_wred_mode(t)) | 265 | if (!q->backlog && !gred_wred_mode(t)) |
263 | red_start_of_idle_period(&q->parms); | 266 | red_start_of_idle_period(&q->vars); |
264 | } | 267 | } |
265 | 268 | ||
266 | return skb; | 269 | return skb; |
@@ -293,7 +296,7 @@ static unsigned int gred_drop(struct Qdisc *sch) | |||
293 | q->stats.other++; | 296 | q->stats.other++; |
294 | 297 | ||
295 | if (!q->backlog && !gred_wred_mode(t)) | 298 | if (!q->backlog && !gred_wred_mode(t)) |
296 | red_start_of_idle_period(&q->parms); | 299 | red_start_of_idle_period(&q->vars); |
297 | } | 300 | } |
298 | 301 | ||
299 | qdisc_drop(skb, sch); | 302 | qdisc_drop(skb, sch); |
@@ -320,7 +323,7 @@ static void gred_reset(struct Qdisc *sch) | |||
320 | if (!q) | 323 | if (!q) |
321 | continue; | 324 | continue; |
322 | 325 | ||
323 | red_restart(&q->parms); | 326 | red_restart(&q->vars); |
324 | q->backlog = 0; | 327 | q->backlog = 0; |
325 | } | 328 | } |
326 | } | 329 | } |
@@ -398,12 +401,12 @@ static inline int gred_change_vq(struct Qdisc *sch, int dp, | |||
398 | q->limit = ctl->limit; | 401 | q->limit = ctl->limit; |
399 | 402 | ||
400 | if (q->backlog == 0) | 403 | if (q->backlog == 0) |
401 | red_end_of_idle_period(&q->parms); | 404 | red_end_of_idle_period(&q->vars); |
402 | 405 | ||
403 | red_set_parms(&q->parms, | 406 | red_set_parms(&q->parms, |
404 | ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Plog, | 407 | ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Plog, |
405 | ctl->Scell_log, stab, max_P); | 408 | ctl->Scell_log, stab, max_P); |
406 | 409 | red_set_vars(&q->vars); | |
407 | return 0; | 410 | return 0; |
408 | } | 411 | } |
409 | 412 | ||
@@ -563,12 +566,12 @@ static int gred_dump(struct Qdisc *sch, struct sk_buff *skb) | |||
563 | opt.bytesin = q->bytesin; | 566 | opt.bytesin = q->bytesin; |
564 | 567 | ||
565 | if (gred_wred_mode(table)) { | 568 | if (gred_wred_mode(table)) { |
566 | q->parms.qidlestart = | 569 | q->vars.qidlestart = |
567 | table->tab[table->def]->parms.qidlestart; | 570 | table->tab[table->def]->vars.qidlestart; |
568 | q->parms.qavg = table->tab[table->def]->parms.qavg; | 571 | q->vars.qavg = table->tab[table->def]->vars.qavg; |
569 | } | 572 | } |
570 | 573 | ||
571 | opt.qave = red_calc_qavg(&q->parms, q->parms.qavg); | 574 | opt.qave = red_calc_qavg(&q->parms, &q->vars, q->vars.qavg); |
572 | 575 | ||
573 | append_opt: | 576 | append_opt: |
574 | if (nla_append(skb, sizeof(opt), &opt) < 0) | 577 | if (nla_append(skb, sizeof(opt), &opt) < 0) |
diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c index ce2256a17d7e..a5cc3012cf42 100644 --- a/net/sched/sch_red.c +++ b/net/sched/sch_red.c | |||
@@ -41,6 +41,7 @@ struct red_sched_data { | |||
41 | unsigned char flags; | 41 | unsigned char flags; |
42 | struct timer_list adapt_timer; | 42 | struct timer_list adapt_timer; |
43 | struct red_parms parms; | 43 | struct red_parms parms; |
44 | struct red_vars vars; | ||
44 | struct red_stats stats; | 45 | struct red_stats stats; |
45 | struct Qdisc *qdisc; | 46 | struct Qdisc *qdisc; |
46 | }; | 47 | }; |
@@ -61,12 +62,14 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
61 | struct Qdisc *child = q->qdisc; | 62 | struct Qdisc *child = q->qdisc; |
62 | int ret; | 63 | int ret; |
63 | 64 | ||
64 | q->parms.qavg = red_calc_qavg(&q->parms, child->qstats.backlog); | 65 | q->vars.qavg = red_calc_qavg(&q->parms, |
66 | &q->vars, | ||
67 | child->qstats.backlog); | ||
65 | 68 | ||
66 | if (red_is_idling(&q->parms)) | 69 | if (red_is_idling(&q->vars)) |
67 | red_end_of_idle_period(&q->parms); | 70 | red_end_of_idle_period(&q->vars); |
68 | 71 | ||
69 | switch (red_action(&q->parms, q->parms.qavg)) { | 72 | switch (red_action(&q->parms, &q->vars, q->vars.qavg)) { |
70 | case RED_DONT_MARK: | 73 | case RED_DONT_MARK: |
71 | break; | 74 | break; |
72 | 75 | ||
@@ -117,8 +120,8 @@ static struct sk_buff *red_dequeue(struct Qdisc *sch) | |||
117 | qdisc_bstats_update(sch, skb); | 120 | qdisc_bstats_update(sch, skb); |
118 | sch->q.qlen--; | 121 | sch->q.qlen--; |
119 | } else { | 122 | } else { |
120 | if (!red_is_idling(&q->parms)) | 123 | if (!red_is_idling(&q->vars)) |
121 | red_start_of_idle_period(&q->parms); | 124 | red_start_of_idle_period(&q->vars); |
122 | } | 125 | } |
123 | return skb; | 126 | return skb; |
124 | } | 127 | } |
@@ -144,8 +147,8 @@ static unsigned int red_drop(struct Qdisc *sch) | |||
144 | return len; | 147 | return len; |
145 | } | 148 | } |
146 | 149 | ||
147 | if (!red_is_idling(&q->parms)) | 150 | if (!red_is_idling(&q->vars)) |
148 | red_start_of_idle_period(&q->parms); | 151 | red_start_of_idle_period(&q->vars); |
149 | 152 | ||
150 | return 0; | 153 | return 0; |
151 | } | 154 | } |
@@ -156,7 +159,7 @@ static void red_reset(struct Qdisc *sch) | |||
156 | 159 | ||
157 | qdisc_reset(q->qdisc); | 160 | qdisc_reset(q->qdisc); |
158 | sch->q.qlen = 0; | 161 | sch->q.qlen = 0; |
159 | red_restart(&q->parms); | 162 | red_restart(&q->vars); |
160 | } | 163 | } |
161 | 164 | ||
162 | static void red_destroy(struct Qdisc *sch) | 165 | static void red_destroy(struct Qdisc *sch) |
@@ -212,17 +215,19 @@ static int red_change(struct Qdisc *sch, struct nlattr *opt) | |||
212 | q->qdisc = child; | 215 | q->qdisc = child; |
213 | } | 216 | } |
214 | 217 | ||
215 | red_set_parms(&q->parms, ctl->qth_min, ctl->qth_max, ctl->Wlog, | 218 | red_set_parms(&q->parms, |
219 | ctl->qth_min, ctl->qth_max, ctl->Wlog, | ||
216 | ctl->Plog, ctl->Scell_log, | 220 | ctl->Plog, ctl->Scell_log, |
217 | nla_data(tb[TCA_RED_STAB]), | 221 | nla_data(tb[TCA_RED_STAB]), |
218 | max_P); | 222 | max_P); |
223 | red_set_vars(&q->vars); | ||
219 | 224 | ||
220 | del_timer(&q->adapt_timer); | 225 | del_timer(&q->adapt_timer); |
221 | if (ctl->flags & TC_RED_ADAPTATIVE) | 226 | if (ctl->flags & TC_RED_ADAPTATIVE) |
222 | mod_timer(&q->adapt_timer, jiffies + HZ/2); | 227 | mod_timer(&q->adapt_timer, jiffies + HZ/2); |
223 | 228 | ||
224 | if (!q->qdisc->q.qlen) | 229 | if (!q->qdisc->q.qlen) |
225 | red_start_of_idle_period(&q->parms); | 230 | red_start_of_idle_period(&q->vars); |
226 | 231 | ||
227 | sch_tree_unlock(sch); | 232 | sch_tree_unlock(sch); |
228 | return 0; | 233 | return 0; |
@@ -235,7 +240,7 @@ static inline void red_adaptative_timer(unsigned long arg) | |||
235 | spinlock_t *root_lock = qdisc_lock(qdisc_root_sleeping(sch)); | 240 | spinlock_t *root_lock = qdisc_lock(qdisc_root_sleeping(sch)); |
236 | 241 | ||
237 | spin_lock(root_lock); | 242 | spin_lock(root_lock); |
238 | red_adaptative_algo(&q->parms); | 243 | red_adaptative_algo(&q->parms, &q->vars); |
239 | mod_timer(&q->adapt_timer, jiffies + HZ/2); | 244 | mod_timer(&q->adapt_timer, jiffies + HZ/2); |
240 | spin_unlock(root_lock); | 245 | spin_unlock(root_lock); |
241 | } | 246 | } |