aboutsummaryrefslogtreecommitdiffstats
path: root/net/sched
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2012-05-16 00:39:09 -0400
committerDavid S. Miller <davem@davemloft.net>2012-05-16 15:30:26 -0400
commit865ec5523dadbedefbc5710a68969f686a28d928 (patch)
treeb9f056cdea6922d5fc3ed035764660bb6fb152b3 /net/sched
parentc27b46e7f1cbf3be95a4cf5840c76a7b7d54b26f (diff)
fq_codel: should use qdisc backlog as threshold
codel_should_drop() logic allows a packet being not dropped if queue size is under max packet size. In fq_codel, we have two possible backlogs : The qdisc global one, and the flow local one. The meaningful one for codel_should_drop() should be the global backlog, not the per flow one, so that thin flows can have a non zero drop/mark probability. Signed-off-by: Eric Dumazet <edumazet@google.com> Cc: Dave Taht <dave.taht@bufferbloat.net> Cc: Kathleen Nichols <nichols@pollere.com> Cc: Van Jacobson <van@pollere.net> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sched')
-rw-r--r--net/sched/sch_codel.c4
-rw-r--r--net/sched/sch_fq_codel.c5
2 files changed, 5 insertions, 4 deletions
diff --git a/net/sched/sch_codel.c b/net/sched/sch_codel.c
index 213ef60bced8..2f9ab17db85a 100644
--- a/net/sched/sch_codel.c
+++ b/net/sched/sch_codel.c
@@ -77,8 +77,8 @@ static struct sk_buff *codel_qdisc_dequeue(struct Qdisc *sch)
77 struct codel_sched_data *q = qdisc_priv(sch); 77 struct codel_sched_data *q = qdisc_priv(sch);
78 struct sk_buff *skb; 78 struct sk_buff *skb;
79 79
80 skb = codel_dequeue(sch, &q->params, &q->vars, &q->stats, 80 skb = codel_dequeue(sch, &q->params, &q->vars, &q->stats, dequeue);
81 dequeue, &sch->qstats.backlog); 81
82 /* We cant call qdisc_tree_decrease_qlen() if our qlen is 0, 82 /* We cant call qdisc_tree_decrease_qlen() if our qlen is 0,
83 * or HTB crashes. Defer it for next round. 83 * or HTB crashes. Defer it for next round.
84 */ 84 */
diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c
index 337ff204f272..9fc1c62ec80e 100644
--- a/net/sched/sch_fq_codel.c
+++ b/net/sched/sch_fq_codel.c
@@ -217,13 +217,14 @@ static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch)
217 */ 217 */
218static struct sk_buff *dequeue(struct codel_vars *vars, struct Qdisc *sch) 218static struct sk_buff *dequeue(struct codel_vars *vars, struct Qdisc *sch)
219{ 219{
220 struct fq_codel_sched_data *q = qdisc_priv(sch);
220 struct fq_codel_flow *flow; 221 struct fq_codel_flow *flow;
221 struct sk_buff *skb = NULL; 222 struct sk_buff *skb = NULL;
222 223
223 flow = container_of(vars, struct fq_codel_flow, cvars); 224 flow = container_of(vars, struct fq_codel_flow, cvars);
224 if (flow->head) { 225 if (flow->head) {
225 skb = dequeue_head(flow); 226 skb = dequeue_head(flow);
226 sch->qstats.backlog -= qdisc_pkt_len(skb); 227 q->backlogs[flow - q->flows] -= qdisc_pkt_len(skb);
227 sch->q.qlen--; 228 sch->q.qlen--;
228 } 229 }
229 return skb; 230 return skb;
@@ -256,7 +257,7 @@ begin:
256 prev_ecn_mark = q->cstats.ecn_mark; 257 prev_ecn_mark = q->cstats.ecn_mark;
257 258
258 skb = codel_dequeue(sch, &q->cparams, &flow->cvars, &q->cstats, 259 skb = codel_dequeue(sch, &q->cparams, &flow->cvars, &q->cstats,
259 dequeue, &q->backlogs[flow - q->flows]); 260 dequeue);
260 261
261 flow->dropped += q->cstats.drop_count - prev_drop_count; 262 flow->dropped += q->cstats.drop_count - prev_drop_count;
262 flow->dropped += q->cstats.ecn_mark - prev_ecn_mark; 263 flow->dropped += q->cstats.ecn_mark - prev_ecn_mark;