aboutsummaryrefslogtreecommitdiffstats
path: root/net/sched/sch_gred.c
diff options
context:
space:
mode:
authorDavid Ward <david.ward@ll.mit.edu>2012-09-13 01:22:35 -0400
committerDavid S. Miller <davem@davemloft.net>2012-09-13 16:10:13 -0400
commitba1bf474eae07a128fae6252bf7d68eaef0eacf8 (patch)
treeb26f1d2656b2659935eff175c168604753f7a5e9 /net/sched/sch_gred.c
parent1fe37b106b039d9358fd1211c39b1fa199e547a8 (diff)
net_sched: gred: actually perform idling in WRED mode
gred_dequeue() and gred_drop() do not seem to get called when the queue is empty, meaning that we never start idling while in WRED mode. And since qidlestart is not stored by gred_store_wred_set(), we would never stop idling while in WRED mode if we ever started. This messes up the average queue size calculation that influences packet marking/dropping behavior. Now, we start WRED mode idling as we are removing the last packet from the queue. Also we now actually stop WRED mode idling when we are enqueuing a packet. Cc: Bruce Osler <brosler@cisco.com> Signed-off-by: David Ward <david.ward@ll.mit.edu> Acked-by: Jamal Hadi Salim <jhs@mojatatu.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sched/sch_gred.c')
-rw-r--r--net/sched/sch_gred.c26
1 files changed, 15 insertions, 11 deletions
diff --git a/net/sched/sch_gred.c b/net/sched/sch_gred.c
index b2570b59d85e..d42234c0f13b 100644
--- a/net/sched/sch_gred.c
+++ b/net/sched/sch_gred.c
@@ -136,6 +136,7 @@ static inline void gred_store_wred_set(struct gred_sched *table,
136 struct gred_sched_data *q) 136 struct gred_sched_data *q)
137{ 137{
138 table->wred_set.qavg = q->vars.qavg; 138 table->wred_set.qavg = q->vars.qavg;
139 table->wred_set.qidlestart = q->vars.qidlestart;
139} 140}
140 141
141static inline int gred_use_ecn(struct gred_sched *t) 142static inline int gred_use_ecn(struct gred_sched *t)
@@ -259,16 +260,18 @@ static struct sk_buff *gred_dequeue(struct Qdisc *sch)
259 } else { 260 } else {
260 q->backlog -= qdisc_pkt_len(skb); 261 q->backlog -= qdisc_pkt_len(skb);
261 262
262 if (!q->backlog && !gred_wred_mode(t)) 263 if (gred_wred_mode(t)) {
263 red_start_of_idle_period(&q->vars); 264 if (!sch->qstats.backlog)
265 red_start_of_idle_period(&t->wred_set);
266 } else {
267 if (!q->backlog)
268 red_start_of_idle_period(&q->vars);
269 }
264 } 270 }
265 271
266 return skb; 272 return skb;
267 } 273 }
268 274
269 if (gred_wred_mode(t) && !red_is_idling(&t->wred_set))
270 red_start_of_idle_period(&t->wred_set);
271
272 return NULL; 275 return NULL;
273} 276}
274 277
@@ -290,19 +293,20 @@ static unsigned int gred_drop(struct Qdisc *sch)
290 q->backlog -= len; 293 q->backlog -= len;
291 q->stats.other++; 294 q->stats.other++;
292 295
293 if (!q->backlog && !gred_wred_mode(t)) 296 if (gred_wred_mode(t)) {
294 red_start_of_idle_period(&q->vars); 297 if (!sch->qstats.backlog)
298 red_start_of_idle_period(&t->wred_set);
299 } else {
300 if (!q->backlog)
301 red_start_of_idle_period(&q->vars);
302 }
295 } 303 }
296 304
297 qdisc_drop(skb, sch); 305 qdisc_drop(skb, sch);
298 return len; 306 return len;
299 } 307 }
300 308
301 if (gred_wred_mode(t) && !red_is_idling(&t->wred_set))
302 red_start_of_idle_period(&t->wred_set);
303
304 return 0; 309 return 0;
305
306} 310}
307 311
308static void gred_reset(struct Qdisc *sch) 312static void gred_reset(struct Qdisc *sch)