aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/inet_timewait_sock.c
diff options
context:
space:
mode:
authorEric Dumazet <dada1@cosmosbay.com>2008-11-20 23:39:09 -0500
committerDavid S. Miller <davem@davemloft.net>2008-11-20 23:39:09 -0500
commit9db66bdcc83749affe61c61eb8ff3cf08f42afec (patch)
tree81bb20e4f569d3b44731498428277db9d77fa7a9 /net/ipv4/inet_timewait_sock.c
parentb8c26a33c8b6f0a150e9cb38ed80b890be55395c (diff)
net: convert TCP/DCCP ehash rwlocks to spinlocks
Now TCP & DCCP use RCU lookups, we can convert ehash rwlocks to spinlocks. /proc/net/tcp and other seq_file 'readers' can safely be converted to 'writers'. This should speedup writers, since spin_lock()/spin_unlock() only use one atomic operation instead of two for write_lock()/write_unlock() Signed-off-by: Eric Dumazet <dada1@cosmosbay.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/inet_timewait_sock.c')
-rw-r--r--net/ipv4/inet_timewait_sock.c22
1 files changed, 11 insertions, 11 deletions
diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c
index 60689951ecdb..8554d0ea1719 100644
--- a/net/ipv4/inet_timewait_sock.c
+++ b/net/ipv4/inet_timewait_sock.c
@@ -20,16 +20,16 @@ static void __inet_twsk_kill(struct inet_timewait_sock *tw,
20 struct inet_bind_hashbucket *bhead; 20 struct inet_bind_hashbucket *bhead;
21 struct inet_bind_bucket *tb; 21 struct inet_bind_bucket *tb;
22 /* Unlink from established hashes. */ 22 /* Unlink from established hashes. */
23 rwlock_t *lock = inet_ehash_lockp(hashinfo, tw->tw_hash); 23 spinlock_t *lock = inet_ehash_lockp(hashinfo, tw->tw_hash);
24 24
25 write_lock(lock); 25 spin_lock(lock);
26 if (hlist_nulls_unhashed(&tw->tw_node)) { 26 if (hlist_nulls_unhashed(&tw->tw_node)) {
27 write_unlock(lock); 27 spin_unlock(lock);
28 return; 28 return;
29 } 29 }
30 hlist_nulls_del_rcu(&tw->tw_node); 30 hlist_nulls_del_rcu(&tw->tw_node);
31 sk_nulls_node_init(&tw->tw_node); 31 sk_nulls_node_init(&tw->tw_node);
32 write_unlock(lock); 32 spin_unlock(lock);
33 33
34 /* Disassociate with bind bucket. */ 34 /* Disassociate with bind bucket. */
35 bhead = &hashinfo->bhash[inet_bhashfn(twsk_net(tw), tw->tw_num, 35 bhead = &hashinfo->bhash[inet_bhashfn(twsk_net(tw), tw->tw_num,
@@ -76,7 +76,7 @@ void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
76 const struct inet_sock *inet = inet_sk(sk); 76 const struct inet_sock *inet = inet_sk(sk);
77 const struct inet_connection_sock *icsk = inet_csk(sk); 77 const struct inet_connection_sock *icsk = inet_csk(sk);
78 struct inet_ehash_bucket *ehead = inet_ehash_bucket(hashinfo, sk->sk_hash); 78 struct inet_ehash_bucket *ehead = inet_ehash_bucket(hashinfo, sk->sk_hash);
79 rwlock_t *lock = inet_ehash_lockp(hashinfo, sk->sk_hash); 79 spinlock_t *lock = inet_ehash_lockp(hashinfo, sk->sk_hash);
80 struct inet_bind_hashbucket *bhead; 80 struct inet_bind_hashbucket *bhead;
81 /* Step 1: Put TW into bind hash. Original socket stays there too. 81 /* Step 1: Put TW into bind hash. Original socket stays there too.
82 Note, that any socket with inet->num != 0 MUST be bound in 82 Note, that any socket with inet->num != 0 MUST be bound in
@@ -90,7 +90,7 @@ void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
90 inet_twsk_add_bind_node(tw, &tw->tw_tb->owners); 90 inet_twsk_add_bind_node(tw, &tw->tw_tb->owners);
91 spin_unlock(&bhead->lock); 91 spin_unlock(&bhead->lock);
92 92
93 write_lock(lock); 93 spin_lock(lock);
94 94
95 /* 95 /*
96 * Step 2: Hash TW into TIMEWAIT chain. 96 * Step 2: Hash TW into TIMEWAIT chain.
@@ -104,7 +104,7 @@ void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
104 if (__sk_nulls_del_node_init_rcu(sk)) 104 if (__sk_nulls_del_node_init_rcu(sk))
105 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); 105 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
106 106
107 write_unlock(lock); 107 spin_unlock(lock);
108} 108}
109 109
110EXPORT_SYMBOL_GPL(__inet_twsk_hashdance); 110EXPORT_SYMBOL_GPL(__inet_twsk_hashdance);
@@ -427,9 +427,9 @@ void inet_twsk_purge(struct net *net, struct inet_hashinfo *hashinfo,
427 for (h = 0; h < (hashinfo->ehash_size); h++) { 427 for (h = 0; h < (hashinfo->ehash_size); h++) {
428 struct inet_ehash_bucket *head = 428 struct inet_ehash_bucket *head =
429 inet_ehash_bucket(hashinfo, h); 429 inet_ehash_bucket(hashinfo, h);
430 rwlock_t *lock = inet_ehash_lockp(hashinfo, h); 430 spinlock_t *lock = inet_ehash_lockp(hashinfo, h);
431restart: 431restart:
432 write_lock(lock); 432 spin_lock(lock);
433 sk_nulls_for_each(sk, node, &head->twchain) { 433 sk_nulls_for_each(sk, node, &head->twchain) {
434 434
435 tw = inet_twsk(sk); 435 tw = inet_twsk(sk);
@@ -438,13 +438,13 @@ restart:
438 continue; 438 continue;
439 439
440 atomic_inc(&tw->tw_refcnt); 440 atomic_inc(&tw->tw_refcnt);
441 write_unlock(lock); 441 spin_unlock(lock);
442 inet_twsk_deschedule(tw, twdr); 442 inet_twsk_deschedule(tw, twdr);
443 inet_twsk_put(tw); 443 inet_twsk_put(tw);
444 444
445 goto restart; 445 goto restart;
446 } 446 }
447 write_unlock(lock); 447 spin_unlock(lock);
448 } 448 }
449 local_bh_enable(); 449 local_bh_enable();
450} 450}