diff options
author | Eric Dumazet <edumazet@google.com> | 2012-07-29 19:20:37 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2012-07-30 17:53:22 -0400 |
commit | 404e0a8b6a55d5e1cd138c6deb1bca9abdf75d8c (patch) | |
tree | 38e9748d38c415cc97b973fecb9279cd43f76393 /net/core | |
parent | cca32e4bf999a34ac08d959f351f2b30bcd02460 (diff) |
net: ipv4: fix RCU races on dst refcounts
commit c6cffba4ffa2 (ipv4: Fix input route performance regression.)
added various fatal races with dst refcounts.
crashes happen on tcp workloads if routes are added/deleted at the same
time.
The dst_free() calls from free_fib_info_rcu() are clearly racy.
We need instead regular dst refcounting (dst_release()) and make
sure dst_release() is aware of RCU grace periods :
Add DST_RCU_FREE flag so that dst_release() respects an RCU grace period
before dst destruction for cached dst
Introduce a new inet_sk_rx_dst_set() helper, using atomic_inc_not_zero()
to make sure we dont increase a zero refcount (On a dst currently
waiting an rcu grace period before destruction)
rt_cache_route() must take a reference on the new cached route, and
release it if was not able to install it.
With this patch, my machines survive various benchmarks.
Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core')
-rw-r--r-- | net/core/dst.c | 26 |
1 files changed, 21 insertions, 5 deletions
diff --git a/net/core/dst.c b/net/core/dst.c index 069d51d29414..d9e33ebe170f 100644 --- a/net/core/dst.c +++ b/net/core/dst.c | |||
@@ -258,6 +258,15 @@ again: | |||
258 | } | 258 | } |
259 | EXPORT_SYMBOL(dst_destroy); | 259 | EXPORT_SYMBOL(dst_destroy); |
260 | 260 | ||
261 | static void dst_rcu_destroy(struct rcu_head *head) | ||
262 | { | ||
263 | struct dst_entry *dst = container_of(head, struct dst_entry, rcu_head); | ||
264 | |||
265 | dst = dst_destroy(dst); | ||
266 | if (dst) | ||
267 | __dst_free(dst); | ||
268 | } | ||
269 | |||
261 | void dst_release(struct dst_entry *dst) | 270 | void dst_release(struct dst_entry *dst) |
262 | { | 271 | { |
263 | if (dst) { | 272 | if (dst) { |
@@ -265,10 +274,14 @@ void dst_release(struct dst_entry *dst) | |||
265 | 274 | ||
266 | newrefcnt = atomic_dec_return(&dst->__refcnt); | 275 | newrefcnt = atomic_dec_return(&dst->__refcnt); |
267 | WARN_ON(newrefcnt < 0); | 276 | WARN_ON(newrefcnt < 0); |
268 | if (unlikely(dst->flags & DST_NOCACHE) && !newrefcnt) { | 277 | if (unlikely(dst->flags & (DST_NOCACHE | DST_RCU_FREE)) && !newrefcnt) { |
269 | dst = dst_destroy(dst); | 278 | if (dst->flags & DST_RCU_FREE) { |
270 | if (dst) | 279 | call_rcu_bh(&dst->rcu_head, dst_rcu_destroy); |
271 | __dst_free(dst); | 280 | } else { |
281 | dst = dst_destroy(dst); | ||
282 | if (dst) | ||
283 | __dst_free(dst); | ||
284 | } | ||
272 | } | 285 | } |
273 | } | 286 | } |
274 | } | 287 | } |
@@ -320,11 +333,14 @@ EXPORT_SYMBOL(__dst_destroy_metrics_generic); | |||
320 | */ | 333 | */ |
321 | void skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst) | 334 | void skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst) |
322 | { | 335 | { |
336 | bool hold; | ||
337 | |||
323 | WARN_ON(!rcu_read_lock_held() && !rcu_read_lock_bh_held()); | 338 | WARN_ON(!rcu_read_lock_held() && !rcu_read_lock_bh_held()); |
324 | /* If dst not in cache, we must take a reference, because | 339 | /* If dst not in cache, we must take a reference, because |
325 | * dst_release() will destroy dst as soon as its refcount becomes zero | 340 | * dst_release() will destroy dst as soon as its refcount becomes zero |
326 | */ | 341 | */ |
327 | if (unlikely(dst->flags & DST_NOCACHE)) { | 342 | hold = (dst->flags & (DST_NOCACHE | DST_RCU_FREE)) == DST_NOCACHE; |
343 | if (unlikely(hold)) { | ||
328 | dst_hold(dst); | 344 | dst_hold(dst); |
329 | skb_dst_set(skb, dst); | 345 | skb_dst_set(skb, dst); |
330 | } else { | 346 | } else { |