diff options
author | Eric Dumazet <edumazet@google.com> | 2016-06-04 15:55:13 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2016-06-07 17:49:56 -0400 |
commit | 80e509db54c81247b32fcb75bb1730fc789b893d (patch) | |
tree | 6252613ac72781d36051be76b3248cce6f9c3e9a /net | |
parent | 5b6c1b4d46b0dae4edea636a776d09f2064f4cd7 (diff) |
fq_codel: fix NET_XMIT_CN behavior
My prior attempt to fix the backlogs of parents failed.
If we return NET_XMIT_CN, our parents wont increase their backlog,
so our qdisc_tree_reduce_backlog() should take this into account.
v2: Florian Westphal pointed out that we could drop the packet,
so we need to save qdisc_pkt_len(skb) in a temp variable before
calling fq_codel_drop()
Fixes: 9d18562a2278 ("fq_codel: add batch ability to fq_codel_drop()")
Fixes: 2ccccf5fb43f ("net_sched: update hierarchical backlog too")
Reported-by: Stas Nichiporovich <stasn77@gmail.com>
Signed-off-by: Eric Dumazet <edumazet@google.com>
Cc: WANG Cong <xiyou.wangcong@gmail.com>
Cc: Jamal Hadi Salim <jhs@mojatatu.com>
Acked-by: Jamal Hadi Salim <jhs@mojatatu.com>
Acked-by: Cong Wang <xiyou.wangcong@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r-- | net/sched/sch_fq_codel.c | 24 |
1 files changed, 18 insertions, 6 deletions
diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c index 6883a8971562..fff7867f4a4f 100644 --- a/net/sched/sch_fq_codel.c +++ b/net/sched/sch_fq_codel.c | |||
@@ -199,6 +199,7 @@ static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
199 | unsigned int idx, prev_backlog, prev_qlen; | 199 | unsigned int idx, prev_backlog, prev_qlen; |
200 | struct fq_codel_flow *flow; | 200 | struct fq_codel_flow *flow; |
201 | int uninitialized_var(ret); | 201 | int uninitialized_var(ret); |
202 | unsigned int pkt_len; | ||
202 | bool memory_limited; | 203 | bool memory_limited; |
203 | 204 | ||
204 | idx = fq_codel_classify(skb, sch, &ret); | 205 | idx = fq_codel_classify(skb, sch, &ret); |
@@ -230,6 +231,8 @@ static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
230 | prev_backlog = sch->qstats.backlog; | 231 | prev_backlog = sch->qstats.backlog; |
231 | prev_qlen = sch->q.qlen; | 232 | prev_qlen = sch->q.qlen; |
232 | 233 | ||
234 | /* save this packet length as it might be dropped by fq_codel_drop() */ | ||
235 | pkt_len = qdisc_pkt_len(skb); | ||
233 | /* fq_codel_drop() is quite expensive, as it performs a linear search | 236 | /* fq_codel_drop() is quite expensive, as it performs a linear search |
234 | * in q->backlogs[] to find a fat flow. | 237 | * in q->backlogs[] to find a fat flow. |
235 | * So instead of dropping a single packet, drop half of its backlog | 238 | * So instead of dropping a single packet, drop half of its backlog |
@@ -237,14 +240,23 @@ static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
237 | */ | 240 | */ |
238 | ret = fq_codel_drop(sch, q->drop_batch_size); | 241 | ret = fq_codel_drop(sch, q->drop_batch_size); |
239 | 242 | ||
240 | q->drop_overlimit += prev_qlen - sch->q.qlen; | 243 | prev_qlen -= sch->q.qlen; |
244 | prev_backlog -= sch->qstats.backlog; | ||
245 | q->drop_overlimit += prev_qlen; | ||
241 | if (memory_limited) | 246 | if (memory_limited) |
242 | q->drop_overmemory += prev_qlen - sch->q.qlen; | 247 | q->drop_overmemory += prev_qlen; |
243 | /* As we dropped packet(s), better let upper stack know this */ | ||
244 | qdisc_tree_reduce_backlog(sch, prev_qlen - sch->q.qlen, | ||
245 | prev_backlog - sch->qstats.backlog); | ||
246 | 248 | ||
247 | return ret == idx ? NET_XMIT_CN : NET_XMIT_SUCCESS; | 249 | /* As we dropped packet(s), better let upper stack know this. |
250 | * If we dropped a packet for this flow, return NET_XMIT_CN, | ||
251 | * but in this case, our parents wont increase their backlogs. | ||
252 | */ | ||
253 | if (ret == idx) { | ||
254 | qdisc_tree_reduce_backlog(sch, prev_qlen - 1, | ||
255 | prev_backlog - pkt_len); | ||
256 | return NET_XMIT_CN; | ||
257 | } | ||
258 | qdisc_tree_reduce_backlog(sch, prev_qlen, prev_backlog); | ||
259 | return NET_XMIT_SUCCESS; | ||
248 | } | 260 | } |
249 | 261 | ||
250 | /* This is the specific function called from codel_dequeue() | 262 | /* This is the specific function called from codel_dequeue() |