aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2013-10-08 18:16:00 -0400
committerDavid S. Miller <davem@davemloft.net>2013-10-08 21:54:01 -0400
commit7eec4174ff29cd42f2acfae8112f51c228545d40 (patch)
tree435e9d12eec1944c4302d95cedc7fa46deb73a04 /net
parent2b1f18a4d6ae8057a93e736a34cdcca925279403 (diff)
pkt_sched: fq: fix non TCP flows pacing
Steinar reported FQ pacing was not working for UDP flows. It looks like the initial sk->sk_pacing_rate value of 0 was a wrong choice. We should init it to ~0U (unlimited) Then, TCA_FQ_FLOW_DEFAULT_RATE should be removed because it makes no real sense. The default rate is really unlimited, and we need to avoid a zero divide. Reported-by: Steinar H. Gunderson <sesse@google.com> Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r--net/core/sock.c1
-rw-r--r--net/sched/sch_fq.c20
2 files changed, 10 insertions, 11 deletions
diff --git a/net/core/sock.c b/net/core/sock.c
index 5b6beba494a3..0b39e7ae4383 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -2319,6 +2319,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
2319 sk->sk_ll_usec = sysctl_net_busy_read; 2319 sk->sk_ll_usec = sysctl_net_busy_read;
2320#endif 2320#endif
2321 2321
2322 sk->sk_pacing_rate = ~0U;
2322 /* 2323 /*
2323 * Before updating sk_refcnt, we must commit prior changes to memory 2324 * Before updating sk_refcnt, we must commit prior changes to memory
2324 * (Documentation/RCU/rculist_nulls.txt for details) 2325 * (Documentation/RCU/rculist_nulls.txt for details)
diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c
index 48501a2baf75..a9dfdda9ed1d 100644
--- a/net/sched/sch_fq.c
+++ b/net/sched/sch_fq.c
@@ -472,20 +472,16 @@ begin:
472 if (f->credit > 0 || !q->rate_enable) 472 if (f->credit > 0 || !q->rate_enable)
473 goto out; 473 goto out;
474 474
475 if (skb->sk && skb->sk->sk_state != TCP_TIME_WAIT) { 475 rate = q->flow_max_rate;
476 rate = skb->sk->sk_pacing_rate ?: q->flow_default_rate; 476 if (skb->sk && skb->sk->sk_state != TCP_TIME_WAIT)
477 rate = min(skb->sk->sk_pacing_rate, rate);
477 478
478 rate = min(rate, q->flow_max_rate); 479 if (rate != ~0U) {
479 } else {
480 rate = q->flow_max_rate;
481 if (rate == ~0U)
482 goto out;
483 }
484 if (rate) {
485 u32 plen = max(qdisc_pkt_len(skb), q->quantum); 480 u32 plen = max(qdisc_pkt_len(skb), q->quantum);
486 u64 len = (u64)plen * NSEC_PER_SEC; 481 u64 len = (u64)plen * NSEC_PER_SEC;
487 482
488 do_div(len, rate); 483 if (likely(rate))
484 do_div(len, rate);
489 /* Since socket rate can change later, 485 /* Since socket rate can change later,
490 * clamp the delay to 125 ms. 486 * clamp the delay to 125 ms.
491 * TODO: maybe segment the too big skb, as in commit 487 * TODO: maybe segment the too big skb, as in commit
@@ -735,12 +731,14 @@ static int fq_dump(struct Qdisc *sch, struct sk_buff *skb)
735 if (opts == NULL) 731 if (opts == NULL)
736 goto nla_put_failure; 732 goto nla_put_failure;
737 733
734 /* TCA_FQ_FLOW_DEFAULT_RATE is not used anymore,
735 * do not bother giving its value
736 */
738 if (nla_put_u32(skb, TCA_FQ_PLIMIT, sch->limit) || 737 if (nla_put_u32(skb, TCA_FQ_PLIMIT, sch->limit) ||
739 nla_put_u32(skb, TCA_FQ_FLOW_PLIMIT, q->flow_plimit) || 738 nla_put_u32(skb, TCA_FQ_FLOW_PLIMIT, q->flow_plimit) ||
740 nla_put_u32(skb, TCA_FQ_QUANTUM, q->quantum) || 739 nla_put_u32(skb, TCA_FQ_QUANTUM, q->quantum) ||
741 nla_put_u32(skb, TCA_FQ_INITIAL_QUANTUM, q->initial_quantum) || 740 nla_put_u32(skb, TCA_FQ_INITIAL_QUANTUM, q->initial_quantum) ||
742 nla_put_u32(skb, TCA_FQ_RATE_ENABLE, q->rate_enable) || 741 nla_put_u32(skb, TCA_FQ_RATE_ENABLE, q->rate_enable) ||
743 nla_put_u32(skb, TCA_FQ_FLOW_DEFAULT_RATE, q->flow_default_rate) ||
744 nla_put_u32(skb, TCA_FQ_FLOW_MAX_RATE, q->flow_max_rate) || 742 nla_put_u32(skb, TCA_FQ_FLOW_MAX_RATE, q->flow_max_rate) ||
745 nla_put_u32(skb, TCA_FQ_BUCKETS_LOG, q->fq_trees_log)) 743 nla_put_u32(skb, TCA_FQ_BUCKETS_LOG, q->fq_trees_log))
746 goto nla_put_failure; 744 goto nla_put_failure;