aboutsummaryrefslogtreecommitdiffstats
path: root/net/sched/sch_choke.c
diff options
context:
space:
mode:
authorEric Dumazet <eric.dumazet@gmail.com>2012-01-04 21:25:16 -0500
committerDavid S. Miller <davem@davemloft.net>2012-01-05 14:01:21 -0500
commiteeca6688d6599c28bc449a45facb67d7f203be74 (patch)
tree5cabbf24a3c1ee2d7757c873ba6449296a8ef7b7 /net/sched/sch_choke.c
parent18cb809850fb499ad9bf288696a95f4071f73931 (diff)
net_sched: red: split red_parms into parms and vars
This patch splits the red_parms structure into two components. One holding the RED 'constant' parameters, and one containing the variables. This permits a size reduction of GRED qdisc, and is a preliminary step to add an optional RED unit to SFQ. SFQRED will have a single red_parms structure shared by all flows, and a private red_vars per flow. Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com> CC: Dave Taht <dave.taht@gmail.com> CC: Stephen Hemminger <shemminger@vyatta.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sched/sch_choke.c')
-rw-r--r--net/sched/sch_choke.c40
1 files changed, 21 insertions, 19 deletions
diff --git a/net/sched/sch_choke.c b/net/sched/sch_choke.c
index bef00acb8bd2..e465064d39a3 100644
--- a/net/sched/sch_choke.c
+++ b/net/sched/sch_choke.c
@@ -57,6 +57,7 @@ struct choke_sched_data {
57 struct red_parms parms; 57 struct red_parms parms;
58 58
59/* Variables */ 59/* Variables */
60 struct red_vars vars;
60 struct tcf_proto *filter_list; 61 struct tcf_proto *filter_list;
61 struct { 62 struct {
62 u32 prob_drop; /* Early probability drops */ 63 u32 prob_drop; /* Early probability drops */
@@ -265,7 +266,7 @@ static bool choke_match_random(const struct choke_sched_data *q,
265static int choke_enqueue(struct sk_buff *skb, struct Qdisc *sch) 266static int choke_enqueue(struct sk_buff *skb, struct Qdisc *sch)
266{ 267{
267 struct choke_sched_data *q = qdisc_priv(sch); 268 struct choke_sched_data *q = qdisc_priv(sch);
268 struct red_parms *p = &q->parms; 269 const struct red_parms *p = &q->parms;
269 int ret = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; 270 int ret = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
270 271
271 if (q->filter_list) { 272 if (q->filter_list) {
@@ -276,13 +277,13 @@ static int choke_enqueue(struct sk_buff *skb, struct Qdisc *sch)
276 277
277 choke_skb_cb(skb)->keys_valid = 0; 278 choke_skb_cb(skb)->keys_valid = 0;
278 /* Compute average queue usage (see RED) */ 279 /* Compute average queue usage (see RED) */
279 p->qavg = red_calc_qavg(p, sch->q.qlen); 280 q->vars.qavg = red_calc_qavg(p, &q->vars, sch->q.qlen);
280 if (red_is_idling(p)) 281 if (red_is_idling(&q->vars))
281 red_end_of_idle_period(p); 282 red_end_of_idle_period(&q->vars);
282 283
283 /* Is queue small? */ 284 /* Is queue small? */
284 if (p->qavg <= p->qth_min) 285 if (q->vars.qavg <= p->qth_min)
285 p->qcount = -1; 286 q->vars.qcount = -1;
286 else { 287 else {
287 unsigned int idx; 288 unsigned int idx;
288 289
@@ -294,8 +295,8 @@ static int choke_enqueue(struct sk_buff *skb, struct Qdisc *sch)
294 } 295 }
295 296
296 /* Queue is large, always mark/drop */ 297 /* Queue is large, always mark/drop */
297 if (p->qavg > p->qth_max) { 298 if (q->vars.qavg > p->qth_max) {
298 p->qcount = -1; 299 q->vars.qcount = -1;
299 300
300 sch->qstats.overlimits++; 301 sch->qstats.overlimits++;
301 if (use_harddrop(q) || !use_ecn(q) || 302 if (use_harddrop(q) || !use_ecn(q) ||
@@ -305,10 +306,10 @@ static int choke_enqueue(struct sk_buff *skb, struct Qdisc *sch)
305 } 306 }
306 307
307 q->stats.forced_mark++; 308 q->stats.forced_mark++;
308 } else if (++p->qcount) { 309 } else if (++q->vars.qcount) {
309 if (red_mark_probability(p, p->qavg)) { 310 if (red_mark_probability(p, &q->vars, q->vars.qavg)) {
310 p->qcount = 0; 311 q->vars.qcount = 0;
311 p->qR = red_random(p); 312 q->vars.qR = red_random(p);
312 313
313 sch->qstats.overlimits++; 314 sch->qstats.overlimits++;
314 if (!use_ecn(q) || !INET_ECN_set_ce(skb)) { 315 if (!use_ecn(q) || !INET_ECN_set_ce(skb)) {
@@ -319,7 +320,7 @@ static int choke_enqueue(struct sk_buff *skb, struct Qdisc *sch)
319 q->stats.prob_mark++; 320 q->stats.prob_mark++;
320 } 321 }
321 } else 322 } else
322 p->qR = red_random(p); 323 q->vars.qR = red_random(p);
323 } 324 }
324 325
325 /* Admit new packet */ 326 /* Admit new packet */
@@ -353,8 +354,8 @@ static struct sk_buff *choke_dequeue(struct Qdisc *sch)
353 struct sk_buff *skb; 354 struct sk_buff *skb;
354 355
355 if (q->head == q->tail) { 356 if (q->head == q->tail) {
356 if (!red_is_idling(&q->parms)) 357 if (!red_is_idling(&q->vars))
357 red_start_of_idle_period(&q->parms); 358 red_start_of_idle_period(&q->vars);
358 return NULL; 359 return NULL;
359 } 360 }
360 361
@@ -377,8 +378,8 @@ static unsigned int choke_drop(struct Qdisc *sch)
377 if (len > 0) 378 if (len > 0)
378 q->stats.other++; 379 q->stats.other++;
379 else { 380 else {
380 if (!red_is_idling(&q->parms)) 381 if (!red_is_idling(&q->vars))
381 red_start_of_idle_period(&q->parms); 382 red_start_of_idle_period(&q->vars);
382 } 383 }
383 384
384 return len; 385 return len;
@@ -388,7 +389,7 @@ static void choke_reset(struct Qdisc *sch)
388{ 389{
389 struct choke_sched_data *q = qdisc_priv(sch); 390 struct choke_sched_data *q = qdisc_priv(sch);
390 391
391 red_restart(&q->parms); 392 red_restart(&q->vars);
392} 393}
393 394
394static const struct nla_policy choke_policy[TCA_CHOKE_MAX + 1] = { 395static const struct nla_policy choke_policy[TCA_CHOKE_MAX + 1] = {
@@ -482,9 +483,10 @@ static int choke_change(struct Qdisc *sch, struct nlattr *opt)
482 ctl->Plog, ctl->Scell_log, 483 ctl->Plog, ctl->Scell_log,
483 nla_data(tb[TCA_CHOKE_STAB]), 484 nla_data(tb[TCA_CHOKE_STAB]),
484 max_P); 485 max_P);
486 red_set_vars(&q->vars);
485 487
486 if (q->head == q->tail) 488 if (q->head == q->tail)
487 red_end_of_idle_period(&q->parms); 489 red_end_of_idle_period(&q->vars);
488 490
489 sch_tree_unlock(sch); 491 sch_tree_unlock(sch);
490 choke_free(old); 492 choke_free(old);