aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2018-05-02 13:03:30 -0400
committerDavid S. Miller <davem@davemloft.net>2018-05-02 16:37:38 -0400
commit7df40c2673a1307c3260aab6f9d4b9bf97ca8fd7 (patch)
treee2da27920cae457805549786292b4dc6e0b592d8
parent30ca22e4a5d0063dd9a9cdf35cd139c5807cbeb3 (diff)
net_sched: fq: take care of throttled flows before reuse
Normally, a socket can not be freed/reused unless all its TX packets left qdisc and were TX-completed. However connect(AF_UNSPEC) allows this to happen. With commit fc59d5bdf1e3 ("pkt_sched: fq: clear time_next_packet for reused flows") we cleared f->time_next_packet but took no special action if the flow was still in the throttled rb-tree. Since f->time_next_packet is the key used in the rb-tree searches, blindly clearing it might break rb-tree integrity. We need to make sure the flow is no longer in the rb-tree to avoid this problem. Fixes: fc59d5bdf1e3 ("pkt_sched: fq: clear time_next_packet for reused flows") Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--net/sched/sch_fq.c37
1 files changed, 25 insertions, 12 deletions
diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c
index a366e4c9413a..4808713c73b9 100644
--- a/net/sched/sch_fq.c
+++ b/net/sched/sch_fq.c
@@ -128,6 +128,28 @@ static bool fq_flow_is_detached(const struct fq_flow *f)
128 return f->next == &detached; 128 return f->next == &detached;
129} 129}
130 130
131static bool fq_flow_is_throttled(const struct fq_flow *f)
132{
133 return f->next == &throttled;
134}
135
136static void fq_flow_add_tail(struct fq_flow_head *head, struct fq_flow *flow)
137{
138 if (head->first)
139 head->last->next = flow;
140 else
141 head->first = flow;
142 head->last = flow;
143 flow->next = NULL;
144}
145
146static void fq_flow_unset_throttled(struct fq_sched_data *q, struct fq_flow *f)
147{
148 rb_erase(&f->rate_node, &q->delayed);
149 q->throttled_flows--;
150 fq_flow_add_tail(&q->old_flows, f);
151}
152
131static void fq_flow_set_throttled(struct fq_sched_data *q, struct fq_flow *f) 153static void fq_flow_set_throttled(struct fq_sched_data *q, struct fq_flow *f)
132{ 154{
133 struct rb_node **p = &q->delayed.rb_node, *parent = NULL; 155 struct rb_node **p = &q->delayed.rb_node, *parent = NULL;
@@ -155,15 +177,6 @@ static void fq_flow_set_throttled(struct fq_sched_data *q, struct fq_flow *f)
155 177
156static struct kmem_cache *fq_flow_cachep __read_mostly; 178static struct kmem_cache *fq_flow_cachep __read_mostly;
157 179
158static void fq_flow_add_tail(struct fq_flow_head *head, struct fq_flow *flow)
159{
160 if (head->first)
161 head->last->next = flow;
162 else
163 head->first = flow;
164 head->last = flow;
165 flow->next = NULL;
166}
167 180
168/* limit number of collected flows per round */ 181/* limit number of collected flows per round */
169#define FQ_GC_MAX 8 182#define FQ_GC_MAX 8
@@ -267,6 +280,8 @@ static struct fq_flow *fq_classify(struct sk_buff *skb, struct fq_sched_data *q)
267 f->socket_hash != sk->sk_hash)) { 280 f->socket_hash != sk->sk_hash)) {
268 f->credit = q->initial_quantum; 281 f->credit = q->initial_quantum;
269 f->socket_hash = sk->sk_hash; 282 f->socket_hash = sk->sk_hash;
283 if (fq_flow_is_throttled(f))
284 fq_flow_unset_throttled(q, f);
270 f->time_next_packet = 0ULL; 285 f->time_next_packet = 0ULL;
271 } 286 }
272 return f; 287 return f;
@@ -438,9 +453,7 @@ static void fq_check_throttled(struct fq_sched_data *q, u64 now)
438 q->time_next_delayed_flow = f->time_next_packet; 453 q->time_next_delayed_flow = f->time_next_packet;
439 break; 454 break;
440 } 455 }
441 rb_erase(p, &q->delayed); 456 fq_flow_unset_throttled(q, f);
442 q->throttled_flows--;
443 fq_flow_add_tail(&q->old_flows, f);
444 } 457 }
445} 458}
446 459