aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/inet_timewait_sock.c
diff options
context:
space:
mode:
authorEric W. Biederman <ebiederm@xmission.com>2009-12-02 21:29:08 -0500
committerDavid S. Miller <davem@davemloft.net>2009-12-03 15:23:47 -0500
commit575f4cd5a5b639457747434dbe18d175fa767db4 (patch)
tree476224db152a2b5bef33046944cb77b5822fae26 /net/ipv4/inet_timewait_sock.c
parente9c5158ac26affd5d8ce006521bdfb7148090e18 (diff)
net: Use rcu lookups in inet_twsk_purge.
While we are looking up entries to free there is no reason to take the lock in inet_twsk_purge. We have to drop locks and restart occassionally anyway so adding a few more in case we get on the wrong list because of a timewait move is no big deal. At the same time not taking the lock for long periods of time is much more polite to the rest of the users of the hash table. In my test configuration of killing 4k network namespaces this change causes 4k back to back runs of inet_twsk_purge on an empty hash table to go from roughly 20.7s to 3.3s, and the total time to destroy 4k network namespaces goes from roughly 44s to 3.3s. Signed-off-by: Eric W. Biederman <ebiederm@xmission.com> Acked-by: Eric Dumazet <eric.dumazet@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/inet_timewait_sock.c')
-rw-r--r--net/ipv4/inet_timewait_sock.c39
1 files changed, 24 insertions, 15 deletions
diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c
index 1f5d508bb18b..d38ca7c77b93 100644
--- a/net/ipv4/inet_timewait_sock.c
+++ b/net/ipv4/inet_timewait_sock.c
@@ -427,31 +427,40 @@ void inet_twsk_purge(struct net *net, struct inet_hashinfo *hashinfo,
427 struct inet_timewait_sock *tw; 427 struct inet_timewait_sock *tw;
428 struct sock *sk; 428 struct sock *sk;
429 struct hlist_nulls_node *node; 429 struct hlist_nulls_node *node;
430 int h; 430 unsigned int slot;
431 431
432 local_bh_disable(); 432 for (slot = 0; slot <= hashinfo->ehash_mask; slot++) {
433 for (h = 0; h <= hashinfo->ehash_mask; h++) { 433 struct inet_ehash_bucket *head = &hashinfo->ehash[slot];
434 struct inet_ehash_bucket *head = 434restart_rcu:
435 inet_ehash_bucket(hashinfo, h); 435 rcu_read_lock();
436 spinlock_t *lock = inet_ehash_lockp(hashinfo, h);
437restart: 436restart:
438 spin_lock(lock); 437 sk_nulls_for_each_rcu(sk, node, &head->twchain) {
439 sk_nulls_for_each(sk, node, &head->twchain) {
440
441 tw = inet_twsk(sk); 438 tw = inet_twsk(sk);
442 if (!net_eq(twsk_net(tw), net) || 439 if (!net_eq(twsk_net(tw), net) ||
443 tw->tw_family != family) 440 tw->tw_family != family)
444 continue; 441 continue;
445 442
446 atomic_inc(&tw->tw_refcnt); 443 if (unlikely(!atomic_inc_not_zero(&tw->tw_refcnt)))
447 spin_unlock(lock); 444 continue;
445
446 if (unlikely(!net_eq(twsk_net(tw), net) ||
447 tw->tw_family != family)) {
448 inet_twsk_put(tw);
449 goto restart;
450 }
451
452 rcu_read_unlock();
448 inet_twsk_deschedule(tw, twdr); 453 inet_twsk_deschedule(tw, twdr);
449 inet_twsk_put(tw); 454 inet_twsk_put(tw);
450 455 goto restart_rcu;
451 goto restart;
452 } 456 }
453 spin_unlock(lock); 457 /* If the nulls value we got at the end of this lookup is
458 * not the expected one, we must restart lookup.
459 * We probably met an item that was moved to another chain.
460 */
461 if (get_nulls_value(node) != slot)
462 goto restart;
463 rcu_read_unlock();
454 } 464 }
455 local_bh_enable();
456} 465}
457EXPORT_SYMBOL_GPL(inet_twsk_purge); 466EXPORT_SYMBOL_GPL(inet_twsk_purge);