diff options
author | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2016-06-20 11:25:44 -0400 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2016-06-20 11:25:44 -0400 |
commit | af52739b922f656eb1f39016fabaabe4baeda2e2 (patch) | |
tree | 79a7aa810d0493cd0cf4adebac26d37f12e8b545 /net/sched/sch_fq_codel.c | |
parent | 25ed6a5e97809129a1bc852b6b5c7d03baa112c4 (diff) | |
parent | 33688abb2802ff3a230bd2441f765477b94cc89e (diff) |
Merge 4.7-rc4 into staging-next
We want the fixes in here, and we can resolve a merge issue in
drivers/iio/industrialio-trigger.c
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'net/sched/sch_fq_codel.c')
-rw-r--r-- | net/sched/sch_fq_codel.c | 26 |
1 files changed, 19 insertions, 7 deletions
diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c index 6883a8971562..da250b2e06ae 100644 --- a/net/sched/sch_fq_codel.c +++ b/net/sched/sch_fq_codel.c | |||
@@ -199,6 +199,7 @@ static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
199 | unsigned int idx, prev_backlog, prev_qlen; | 199 | unsigned int idx, prev_backlog, prev_qlen; |
200 | struct fq_codel_flow *flow; | 200 | struct fq_codel_flow *flow; |
201 | int uninitialized_var(ret); | 201 | int uninitialized_var(ret); |
202 | unsigned int pkt_len; | ||
202 | bool memory_limited; | 203 | bool memory_limited; |
203 | 204 | ||
204 | idx = fq_codel_classify(skb, sch, &ret); | 205 | idx = fq_codel_classify(skb, sch, &ret); |
@@ -230,6 +231,8 @@ static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
230 | prev_backlog = sch->qstats.backlog; | 231 | prev_backlog = sch->qstats.backlog; |
231 | prev_qlen = sch->q.qlen; | 232 | prev_qlen = sch->q.qlen; |
232 | 233 | ||
234 | /* save this packet length as it might be dropped by fq_codel_drop() */ | ||
235 | pkt_len = qdisc_pkt_len(skb); | ||
233 | /* fq_codel_drop() is quite expensive, as it performs a linear search | 236 | /* fq_codel_drop() is quite expensive, as it performs a linear search |
234 | * in q->backlogs[] to find a fat flow. | 237 | * in q->backlogs[] to find a fat flow. |
235 | * So instead of dropping a single packet, drop half of its backlog | 238 | * So instead of dropping a single packet, drop half of its backlog |
@@ -237,14 +240,23 @@ static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
237 | */ | 240 | */ |
238 | ret = fq_codel_drop(sch, q->drop_batch_size); | 241 | ret = fq_codel_drop(sch, q->drop_batch_size); |
239 | 242 | ||
240 | q->drop_overlimit += prev_qlen - sch->q.qlen; | 243 | prev_qlen -= sch->q.qlen; |
244 | prev_backlog -= sch->qstats.backlog; | ||
245 | q->drop_overlimit += prev_qlen; | ||
241 | if (memory_limited) | 246 | if (memory_limited) |
242 | q->drop_overmemory += prev_qlen - sch->q.qlen; | 247 | q->drop_overmemory += prev_qlen; |
243 | /* As we dropped packet(s), better let upper stack know this */ | ||
244 | qdisc_tree_reduce_backlog(sch, prev_qlen - sch->q.qlen, | ||
245 | prev_backlog - sch->qstats.backlog); | ||
246 | 248 | ||
247 | return ret == idx ? NET_XMIT_CN : NET_XMIT_SUCCESS; | 249 | /* As we dropped packet(s), better let upper stack know this. |
250 | * If we dropped a packet for this flow, return NET_XMIT_CN, | ||
251 | * but in this case, our parents wont increase their backlogs. | ||
252 | */ | ||
253 | if (ret == idx) { | ||
254 | qdisc_tree_reduce_backlog(sch, prev_qlen - 1, | ||
255 | prev_backlog - pkt_len); | ||
256 | return NET_XMIT_CN; | ||
257 | } | ||
258 | qdisc_tree_reduce_backlog(sch, prev_qlen, prev_backlog); | ||
259 | return NET_XMIT_SUCCESS; | ||
248 | } | 260 | } |
249 | 261 | ||
250 | /* This is the specific function called from codel_dequeue() | 262 | /* This is the specific function called from codel_dequeue() |
@@ -649,7 +661,7 @@ static int fq_codel_dump_class_stats(struct Qdisc *sch, unsigned long cl, | |||
649 | qs.backlog = q->backlogs[idx]; | 661 | qs.backlog = q->backlogs[idx]; |
650 | qs.drops = flow->dropped; | 662 | qs.drops = flow->dropped; |
651 | } | 663 | } |
652 | if (gnet_stats_copy_queue(d, NULL, &qs, 0) < 0) | 664 | if (gnet_stats_copy_queue(d, NULL, &qs, qs.qlen) < 0) |
653 | return -1; | 665 | return -1; |
654 | if (idx < q->flows_cnt) | 666 | if (idx < q->flows_cnt) |
655 | return gnet_stats_copy_app(d, &xstats, sizeof(xstats)); | 667 | return gnet_stats_copy_app(d, &xstats, sizeof(xstats)); |