aboutsummaryrefslogtreecommitdiffstats
path: root/net/sched/sch_fq.c
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2013-09-27 17:20:01 -0400
committerDavid S. Miller <davem@davemloft.net>2013-09-30 15:51:23 -0400
commit8d34ce10c59b40e0ce2685341c4e93416f505e45 (patch)
treeb74018d5a5aaab268f1e236eafade3730fdf89eb /net/sched/sch_fq.c
parentb86783587b3d1d552326d955acee37eac48800f1 (diff)
pkt_sched: fq: qdisc dismantle fixes
fq_reset() should drops all packets in queue, including throttled flows. This patch moves code from fq_destroy() to fq_reset() to do the cleaning. fq_change() must stop calling fq_dequeue() if all remaining packets are from throttled flows. Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sched/sch_fq.c')
-rw-r--r--net/sched/sch_fq.c57
1 files changed, 37 insertions, 20 deletions
diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c
index 32ad015ee8ce..fc6de56a331e 100644
--- a/net/sched/sch_fq.c
+++ b/net/sched/sch_fq.c
@@ -285,7 +285,7 @@ static struct fq_flow *fq_classify(struct sk_buff *skb, struct fq_sched_data *q)
285 285
286 286
287/* remove one skb from head of flow queue */ 287/* remove one skb from head of flow queue */
288static struct sk_buff *fq_dequeue_head(struct fq_flow *flow) 288static struct sk_buff *fq_dequeue_head(struct Qdisc *sch, struct fq_flow *flow)
289{ 289{
290 struct sk_buff *skb = flow->head; 290 struct sk_buff *skb = flow->head;
291 291
@@ -293,6 +293,8 @@ static struct sk_buff *fq_dequeue_head(struct fq_flow *flow)
293 flow->head = skb->next; 293 flow->head = skb->next;
294 skb->next = NULL; 294 skb->next = NULL;
295 flow->qlen--; 295 flow->qlen--;
296 sch->qstats.backlog -= qdisc_pkt_len(skb);
297 sch->q.qlen--;
296 } 298 }
297 return skb; 299 return skb;
298} 300}
@@ -419,7 +421,7 @@ static struct sk_buff *fq_dequeue(struct Qdisc *sch)
419 struct sk_buff *skb; 421 struct sk_buff *skb;
420 struct fq_flow *f; 422 struct fq_flow *f;
421 423
422 skb = fq_dequeue_head(&q->internal); 424 skb = fq_dequeue_head(sch, &q->internal);
423 if (skb) 425 if (skb)
424 goto out; 426 goto out;
425 fq_check_throttled(q, now); 427 fq_check_throttled(q, now);
@@ -449,7 +451,7 @@ begin:
449 goto begin; 451 goto begin;
450 } 452 }
451 453
452 skb = fq_dequeue_head(f); 454 skb = fq_dequeue_head(sch, f);
453 if (!skb) { 455 if (!skb) {
454 head->first = f->next; 456 head->first = f->next;
455 /* force a pass through old_flows to prevent starvation */ 457 /* force a pass through old_flows to prevent starvation */
@@ -490,19 +492,44 @@ begin:
490 } 492 }
491 } 493 }
492out: 494out:
493 sch->qstats.backlog -= qdisc_pkt_len(skb);
494 qdisc_bstats_update(sch, skb); 495 qdisc_bstats_update(sch, skb);
495 sch->q.qlen--;
496 qdisc_unthrottled(sch); 496 qdisc_unthrottled(sch);
497 return skb; 497 return skb;
498} 498}
499 499
500static void fq_reset(struct Qdisc *sch) 500static void fq_reset(struct Qdisc *sch)
501{ 501{
502 struct fq_sched_data *q = qdisc_priv(sch);
503 struct rb_root *root;
502 struct sk_buff *skb; 504 struct sk_buff *skb;
505 struct rb_node *p;
506 struct fq_flow *f;
507 unsigned int idx;
503 508
504 while ((skb = fq_dequeue(sch)) != NULL) 509 while ((skb = fq_dequeue_head(sch, &q->internal)) != NULL)
505 kfree_skb(skb); 510 kfree_skb(skb);
511
512 if (!q->fq_root)
513 return;
514
515 for (idx = 0; idx < (1U << q->fq_trees_log); idx++) {
516 root = &q->fq_root[idx];
517 while ((p = rb_first(root)) != NULL) {
518 f = container_of(p, struct fq_flow, fq_node);
519 rb_erase(p, root);
520
521 while ((skb = fq_dequeue_head(sch, f)) != NULL)
522 kfree_skb(skb);
523
524 kmem_cache_free(fq_flow_cachep, f);
525 }
526 }
527 q->new_flows.first = NULL;
528 q->old_flows.first = NULL;
529 q->delayed = RB_ROOT;
530 q->flows = 0;
531 q->inactive_flows = 0;
532 q->throttled_flows = 0;
506} 533}
507 534
508static void fq_rehash(struct fq_sched_data *q, 535static void fq_rehash(struct fq_sched_data *q,
@@ -645,6 +672,8 @@ static int fq_change(struct Qdisc *sch, struct nlattr *opt)
645 while (sch->q.qlen > sch->limit) { 672 while (sch->q.qlen > sch->limit) {
646 struct sk_buff *skb = fq_dequeue(sch); 673 struct sk_buff *skb = fq_dequeue(sch);
647 674
675 if (!skb)
676 break;
648 kfree_skb(skb); 677 kfree_skb(skb);
649 drop_count++; 678 drop_count++;
650 } 679 }
@@ -657,21 +686,9 @@ static int fq_change(struct Qdisc *sch, struct nlattr *opt)
657static void fq_destroy(struct Qdisc *sch) 686static void fq_destroy(struct Qdisc *sch)
658{ 687{
659 struct fq_sched_data *q = qdisc_priv(sch); 688 struct fq_sched_data *q = qdisc_priv(sch);
660 struct rb_root *root;
661 struct rb_node *p;
662 unsigned int idx;
663 689
664 if (q->fq_root) { 690 fq_reset(sch);
665 for (idx = 0; idx < (1U << q->fq_trees_log); idx++) { 691 kfree(q->fq_root);
666 root = &q->fq_root[idx];
667 while ((p = rb_first(root)) != NULL) {
668 rb_erase(p, root);
669 kmem_cache_free(fq_flow_cachep,
670 container_of(p, struct fq_flow, fq_node));
671 }
672 }
673 kfree(q->fq_root);
674 }
675 qdisc_watchdog_cancel(&q->watchdog); 692 qdisc_watchdog_cancel(&q->watchdog);
676} 693}
677 694