aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/uapi/linux/pkt_sched.h3
-rw-r--r--net/sched/sch_fq.c22
2 files changed, 21 insertions, 4 deletions
diff --git a/include/uapi/linux/pkt_sched.h b/include/uapi/linux/pkt_sched.h
index 885001b62c83..a806687ad98f 100644
--- a/include/uapi/linux/pkt_sched.h
+++ b/include/uapi/linux/pkt_sched.h
@@ -768,6 +768,9 @@ enum {
768 TCA_FQ_FLOW_MAX_RATE, /* per flow max rate */ 768 TCA_FQ_FLOW_MAX_RATE, /* per flow max rate */
769 769
770 TCA_FQ_BUCKETS_LOG, /* log2(number of buckets) */ 770 TCA_FQ_BUCKETS_LOG, /* log2(number of buckets) */
771
772 TCA_FQ_FLOW_REFILL_DELAY, /* flow credit refill delay in usec */
773
771 __TCA_FQ_MAX 774 __TCA_FQ_MAX
772}; 775};
773 776
diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c
index 26906ab51c52..95d843961907 100644
--- a/net/sched/sch_fq.c
+++ b/net/sched/sch_fq.c
@@ -88,6 +88,7 @@ struct fq_sched_data {
88 struct fq_flow internal; /* for non classified or high prio packets */ 88 struct fq_flow internal; /* for non classified or high prio packets */
89 u32 quantum; 89 u32 quantum;
90 u32 initial_quantum; 90 u32 initial_quantum;
91 u32 flow_refill_delay;
91 u32 flow_max_rate; /* optional max rate per flow */ 92 u32 flow_max_rate; /* optional max rate per flow */
92 u32 flow_plimit; /* max packets per flow */ 93 u32 flow_plimit; /* max packets per flow */
93 struct rb_root *fq_root; 94 struct rb_root *fq_root;
@@ -114,6 +115,7 @@ static struct fq_flow detached, throttled;
114static void fq_flow_set_detached(struct fq_flow *f) 115static void fq_flow_set_detached(struct fq_flow *f)
115{ 116{
116 f->next = &detached; 117 f->next = &detached;
118 f->age = jiffies;
117} 119}
118 120
119static bool fq_flow_is_detached(const struct fq_flow *f) 121static bool fq_flow_is_detached(const struct fq_flow *f)
@@ -366,17 +368,20 @@ static int fq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
366 } 368 }
367 369
368 f->qlen++; 370 f->qlen++;
369 flow_queue_add(f, skb);
370 if (skb_is_retransmit(skb)) 371 if (skb_is_retransmit(skb))
371 q->stat_tcp_retrans++; 372 q->stat_tcp_retrans++;
372 sch->qstats.backlog += qdisc_pkt_len(skb); 373 sch->qstats.backlog += qdisc_pkt_len(skb);
373 if (fq_flow_is_detached(f)) { 374 if (fq_flow_is_detached(f)) {
374 fq_flow_add_tail(&q->new_flows, f); 375 fq_flow_add_tail(&q->new_flows, f);
375 if (q->quantum > f->credit) 376 if (time_after(jiffies, f->age + q->flow_refill_delay))
376 f->credit = q->quantum; 377 f->credit = max_t(u32, f->credit, q->quantum);
377 q->inactive_flows--; 378 q->inactive_flows--;
378 qdisc_unthrottled(sch); 379 qdisc_unthrottled(sch);
379 } 380 }
381
382 /* Note: this overwrites f->age */
383 flow_queue_add(f, skb);
384
380 if (unlikely(f == &q->internal)) { 385 if (unlikely(f == &q->internal)) {
381 q->stat_internal_packets++; 386 q->stat_internal_packets++;
382 qdisc_unthrottled(sch); 387 qdisc_unthrottled(sch);
@@ -454,7 +459,6 @@ begin:
454 fq_flow_add_tail(&q->old_flows, f); 459 fq_flow_add_tail(&q->old_flows, f);
455 } else { 460 } else {
456 fq_flow_set_detached(f); 461 fq_flow_set_detached(f);
457 f->age = jiffies;
458 q->inactive_flows++; 462 q->inactive_flows++;
459 } 463 }
460 goto begin; 464 goto begin;
@@ -608,6 +612,7 @@ static const struct nla_policy fq_policy[TCA_FQ_MAX + 1] = {
608 [TCA_FQ_FLOW_DEFAULT_RATE] = { .type = NLA_U32 }, 612 [TCA_FQ_FLOW_DEFAULT_RATE] = { .type = NLA_U32 },
609 [TCA_FQ_FLOW_MAX_RATE] = { .type = NLA_U32 }, 613 [TCA_FQ_FLOW_MAX_RATE] = { .type = NLA_U32 },
610 [TCA_FQ_BUCKETS_LOG] = { .type = NLA_U32 }, 614 [TCA_FQ_BUCKETS_LOG] = { .type = NLA_U32 },
615 [TCA_FQ_FLOW_REFILL_DELAY] = { .type = NLA_U32 },
611}; 616};
612 617
613static int fq_change(struct Qdisc *sch, struct nlattr *opt) 618static int fq_change(struct Qdisc *sch, struct nlattr *opt)
@@ -664,6 +669,12 @@ static int fq_change(struct Qdisc *sch, struct nlattr *opt)
664 err = -EINVAL; 669 err = -EINVAL;
665 } 670 }
666 671
672 if (tb[TCA_FQ_FLOW_REFILL_DELAY]) {
673 u32 usecs_delay = nla_get_u32(tb[TCA_FQ_FLOW_REFILL_DELAY]) ;
674
675 q->flow_refill_delay = usecs_to_jiffies(usecs_delay);
676 }
677
667 if (!err) 678 if (!err)
668 err = fq_resize(q, fq_log); 679 err = fq_resize(q, fq_log);
669 680
@@ -699,6 +710,7 @@ static int fq_init(struct Qdisc *sch, struct nlattr *opt)
699 q->flow_plimit = 100; 710 q->flow_plimit = 100;
700 q->quantum = 2 * psched_mtu(qdisc_dev(sch)); 711 q->quantum = 2 * psched_mtu(qdisc_dev(sch));
701 q->initial_quantum = 10 * psched_mtu(qdisc_dev(sch)); 712 q->initial_quantum = 10 * psched_mtu(qdisc_dev(sch));
713 q->flow_refill_delay = msecs_to_jiffies(40);
702 q->flow_max_rate = ~0U; 714 q->flow_max_rate = ~0U;
703 q->rate_enable = 1; 715 q->rate_enable = 1;
704 q->new_flows.first = NULL; 716 q->new_flows.first = NULL;
@@ -733,6 +745,8 @@ static int fq_dump(struct Qdisc *sch, struct sk_buff *skb)
733 nla_put_u32(skb, TCA_FQ_INITIAL_QUANTUM, q->initial_quantum) || 745 nla_put_u32(skb, TCA_FQ_INITIAL_QUANTUM, q->initial_quantum) ||
734 nla_put_u32(skb, TCA_FQ_RATE_ENABLE, q->rate_enable) || 746 nla_put_u32(skb, TCA_FQ_RATE_ENABLE, q->rate_enable) ||
735 nla_put_u32(skb, TCA_FQ_FLOW_MAX_RATE, q->flow_max_rate) || 747 nla_put_u32(skb, TCA_FQ_FLOW_MAX_RATE, q->flow_max_rate) ||
748 nla_put_u32(skb, TCA_FQ_FLOW_REFILL_DELAY,
749 jiffies_to_usecs(q->flow_refill_delay)) ||
736 nla_put_u32(skb, TCA_FQ_BUCKETS_LOG, q->fq_trees_log)) 750 nla_put_u32(skb, TCA_FQ_BUCKETS_LOG, q->fq_trees_log))
737 goto nla_put_failure; 751 goto nla_put_failure;
738 752