diff options
author | Eric Dumazet <edumazet@google.com> | 2012-05-16 00:39:09 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2012-05-16 15:30:26 -0400 |
commit | 865ec5523dadbedefbc5710a68969f686a28d928 (patch) | |
tree | b9f056cdea6922d5fc3ed035764660bb6fb152b3 | |
parent | c27b46e7f1cbf3be95a4cf5840c76a7b7d54b26f (diff) |
fq_codel: should use qdisc backlog as threshold
codel_should_drop() logic allows a packet being not dropped if queue
size is under max packet size.
In fq_codel, we have two possible backlogs : The qdisc global one, and
the flow local one.
The meaningful one for codel_should_drop() should be the global backlog,
not the per flow one, so that thin flows can have a non zero drop/mark
probability.
Signed-off-by: Eric Dumazet <edumazet@google.com>
Cc: Dave Taht <dave.taht@bufferbloat.net>
Cc: Kathleen Nichols <nichols@pollere.com>
Cc: Van Jacobson <van@pollere.net>
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | include/net/codel.h | 15 | ||||
-rw-r--r-- | net/sched/sch_codel.c | 4 | ||||
-rw-r--r-- | net/sched/sch_fq_codel.c | 5 |
3 files changed, 12 insertions, 12 deletions
diff --git a/include/net/codel.h b/include/net/codel.h index 7546517326b5..550debfc2403 100644 --- a/include/net/codel.h +++ b/include/net/codel.h | |||
@@ -205,7 +205,7 @@ static codel_time_t codel_control_law(codel_time_t t, | |||
205 | 205 | ||
206 | 206 | ||
207 | static bool codel_should_drop(const struct sk_buff *skb, | 207 | static bool codel_should_drop(const struct sk_buff *skb, |
208 | unsigned int *backlog, | 208 | struct Qdisc *sch, |
209 | struct codel_vars *vars, | 209 | struct codel_vars *vars, |
210 | struct codel_params *params, | 210 | struct codel_params *params, |
211 | struct codel_stats *stats, | 211 | struct codel_stats *stats, |
@@ -219,13 +219,13 @@ static bool codel_should_drop(const struct sk_buff *skb, | |||
219 | } | 219 | } |
220 | 220 | ||
221 | vars->ldelay = now - codel_get_enqueue_time(skb); | 221 | vars->ldelay = now - codel_get_enqueue_time(skb); |
222 | *backlog -= qdisc_pkt_len(skb); | 222 | sch->qstats.backlog -= qdisc_pkt_len(skb); |
223 | 223 | ||
224 | if (unlikely(qdisc_pkt_len(skb) > stats->maxpacket)) | 224 | if (unlikely(qdisc_pkt_len(skb) > stats->maxpacket)) |
225 | stats->maxpacket = qdisc_pkt_len(skb); | 225 | stats->maxpacket = qdisc_pkt_len(skb); |
226 | 226 | ||
227 | if (codel_time_before(vars->ldelay, params->target) || | 227 | if (codel_time_before(vars->ldelay, params->target) || |
228 | *backlog <= stats->maxpacket) { | 228 | sch->qstats.backlog <= stats->maxpacket) { |
229 | /* went below - stay below for at least interval */ | 229 | /* went below - stay below for at least interval */ |
230 | vars->first_above_time = 0; | 230 | vars->first_above_time = 0; |
231 | return false; | 231 | return false; |
@@ -249,8 +249,7 @@ static struct sk_buff *codel_dequeue(struct Qdisc *sch, | |||
249 | struct codel_params *params, | 249 | struct codel_params *params, |
250 | struct codel_vars *vars, | 250 | struct codel_vars *vars, |
251 | struct codel_stats *stats, | 251 | struct codel_stats *stats, |
252 | codel_skb_dequeue_t dequeue_func, | 252 | codel_skb_dequeue_t dequeue_func) |
253 | u32 *backlog) | ||
254 | { | 253 | { |
255 | struct sk_buff *skb = dequeue_func(vars, sch); | 254 | struct sk_buff *skb = dequeue_func(vars, sch); |
256 | codel_time_t now; | 255 | codel_time_t now; |
@@ -261,7 +260,7 @@ static struct sk_buff *codel_dequeue(struct Qdisc *sch, | |||
261 | return skb; | 260 | return skb; |
262 | } | 261 | } |
263 | now = codel_get_time(); | 262 | now = codel_get_time(); |
264 | drop = codel_should_drop(skb, backlog, vars, params, stats, now); | 263 | drop = codel_should_drop(skb, sch, vars, params, stats, now); |
265 | if (vars->dropping) { | 264 | if (vars->dropping) { |
266 | if (!drop) { | 265 | if (!drop) { |
267 | /* sojourn time below target - leave dropping state */ | 266 | /* sojourn time below target - leave dropping state */ |
@@ -292,7 +291,7 @@ static struct sk_buff *codel_dequeue(struct Qdisc *sch, | |||
292 | qdisc_drop(skb, sch); | 291 | qdisc_drop(skb, sch); |
293 | stats->drop_count++; | 292 | stats->drop_count++; |
294 | skb = dequeue_func(vars, sch); | 293 | skb = dequeue_func(vars, sch); |
295 | if (!codel_should_drop(skb, backlog, | 294 | if (!codel_should_drop(skb, sch, |
296 | vars, params, stats, now)) { | 295 | vars, params, stats, now)) { |
297 | /* leave dropping state */ | 296 | /* leave dropping state */ |
298 | vars->dropping = false; | 297 | vars->dropping = false; |
@@ -313,7 +312,7 @@ static struct sk_buff *codel_dequeue(struct Qdisc *sch, | |||
313 | stats->drop_count++; | 312 | stats->drop_count++; |
314 | 313 | ||
315 | skb = dequeue_func(vars, sch); | 314 | skb = dequeue_func(vars, sch); |
316 | drop = codel_should_drop(skb, backlog, vars, params, | 315 | drop = codel_should_drop(skb, sch, vars, params, |
317 | stats, now); | 316 | stats, now); |
318 | } | 317 | } |
319 | vars->dropping = true; | 318 | vars->dropping = true; |
diff --git a/net/sched/sch_codel.c b/net/sched/sch_codel.c index 213ef60bced8..2f9ab17db85a 100644 --- a/net/sched/sch_codel.c +++ b/net/sched/sch_codel.c | |||
@@ -77,8 +77,8 @@ static struct sk_buff *codel_qdisc_dequeue(struct Qdisc *sch) | |||
77 | struct codel_sched_data *q = qdisc_priv(sch); | 77 | struct codel_sched_data *q = qdisc_priv(sch); |
78 | struct sk_buff *skb; | 78 | struct sk_buff *skb; |
79 | 79 | ||
80 | skb = codel_dequeue(sch, &q->params, &q->vars, &q->stats, | 80 | skb = codel_dequeue(sch, &q->params, &q->vars, &q->stats, dequeue); |
81 | dequeue, &sch->qstats.backlog); | 81 | |
82 | /* We cant call qdisc_tree_decrease_qlen() if our qlen is 0, | 82 | /* We cant call qdisc_tree_decrease_qlen() if our qlen is 0, |
83 | * or HTB crashes. Defer it for next round. | 83 | * or HTB crashes. Defer it for next round. |
84 | */ | 84 | */ |
diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c index 337ff204f272..9fc1c62ec80e 100644 --- a/net/sched/sch_fq_codel.c +++ b/net/sched/sch_fq_codel.c | |||
@@ -217,13 +217,14 @@ static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
217 | */ | 217 | */ |
218 | static struct sk_buff *dequeue(struct codel_vars *vars, struct Qdisc *sch) | 218 | static struct sk_buff *dequeue(struct codel_vars *vars, struct Qdisc *sch) |
219 | { | 219 | { |
220 | struct fq_codel_sched_data *q = qdisc_priv(sch); | ||
220 | struct fq_codel_flow *flow; | 221 | struct fq_codel_flow *flow; |
221 | struct sk_buff *skb = NULL; | 222 | struct sk_buff *skb = NULL; |
222 | 223 | ||
223 | flow = container_of(vars, struct fq_codel_flow, cvars); | 224 | flow = container_of(vars, struct fq_codel_flow, cvars); |
224 | if (flow->head) { | 225 | if (flow->head) { |
225 | skb = dequeue_head(flow); | 226 | skb = dequeue_head(flow); |
226 | sch->qstats.backlog -= qdisc_pkt_len(skb); | 227 | q->backlogs[flow - q->flows] -= qdisc_pkt_len(skb); |
227 | sch->q.qlen--; | 228 | sch->q.qlen--; |
228 | } | 229 | } |
229 | return skb; | 230 | return skb; |
@@ -256,7 +257,7 @@ begin: | |||
256 | prev_ecn_mark = q->cstats.ecn_mark; | 257 | prev_ecn_mark = q->cstats.ecn_mark; |
257 | 258 | ||
258 | skb = codel_dequeue(sch, &q->cparams, &flow->cvars, &q->cstats, | 259 | skb = codel_dequeue(sch, &q->cparams, &flow->cvars, &q->cstats, |
259 | dequeue, &q->backlogs[flow - q->flows]); | 260 | dequeue); |
260 | 261 | ||
261 | flow->dropped += q->cstats.drop_count - prev_drop_count; | 262 | flow->dropped += q->cstats.drop_count - prev_drop_count; |
262 | flow->dropped += q->cstats.ecn_mark - prev_ecn_mark; | 263 | flow->dropped += q->cstats.ecn_mark - prev_ecn_mark; |