aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv6/inet6_hashtables.c
diff options
context:
space:
mode:
authorEric Dumazet <dada1@cosmosbay.com>2008-11-20 23:39:09 -0500
committerDavid S. Miller <davem@davemloft.net>2008-11-20 23:39:09 -0500
commit9db66bdcc83749affe61c61eb8ff3cf08f42afec (patch)
tree81bb20e4f569d3b44731498428277db9d77fa7a9 /net/ipv6/inet6_hashtables.c
parentb8c26a33c8b6f0a150e9cb38ed80b890be55395c (diff)
net: convert TCP/DCCP ehash rwlocks to spinlocks
Now TCP & DCCP use RCU lookups, we can convert ehash rwlocks to spinlocks. /proc/net/tcp and other seq_file 'readers' can safely be converted to 'writers'. This should speedup writers, since spin_lock()/spin_unlock() only use one atomic operation instead of two for write_lock()/write_unlock() Signed-off-by: Eric Dumazet <dada1@cosmosbay.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv6/inet6_hashtables.c')
-rw-r--r--net/ipv6/inet6_hashtables.c15
1 files changed, 7 insertions, 8 deletions
diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c
index 21544b9be259..e0fd68187f83 100644
--- a/net/ipv6/inet6_hashtables.c
+++ b/net/ipv6/inet6_hashtables.c
@@ -38,14 +38,14 @@ void __inet6_hash(struct sock *sk)
38 } else { 38 } else {
39 unsigned int hash; 39 unsigned int hash;
40 struct hlist_nulls_head *list; 40 struct hlist_nulls_head *list;
41 rwlock_t *lock; 41 spinlock_t *lock;
42 42
43 sk->sk_hash = hash = inet6_sk_ehashfn(sk); 43 sk->sk_hash = hash = inet6_sk_ehashfn(sk);
44 list = &inet_ehash_bucket(hashinfo, hash)->chain; 44 list = &inet_ehash_bucket(hashinfo, hash)->chain;
45 lock = inet_ehash_lockp(hashinfo, hash); 45 lock = inet_ehash_lockp(hashinfo, hash);
46 write_lock(lock); 46 spin_lock(lock);
47 __sk_nulls_add_node_rcu(sk, list); 47 __sk_nulls_add_node_rcu(sk, list);
48 write_unlock(lock); 48 spin_unlock(lock);
49 } 49 }
50 50
51 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); 51 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
@@ -195,13 +195,12 @@ static int __inet6_check_established(struct inet_timewait_death_row *death_row,
195 const unsigned int hash = inet6_ehashfn(net, daddr, lport, saddr, 195 const unsigned int hash = inet6_ehashfn(net, daddr, lport, saddr,
196 inet->dport); 196 inet->dport);
197 struct inet_ehash_bucket *head = inet_ehash_bucket(hinfo, hash); 197 struct inet_ehash_bucket *head = inet_ehash_bucket(hinfo, hash);
198 rwlock_t *lock = inet_ehash_lockp(hinfo, hash); 198 spinlock_t *lock = inet_ehash_lockp(hinfo, hash);
199 struct sock *sk2; 199 struct sock *sk2;
200 const struct hlist_nulls_node *node; 200 const struct hlist_nulls_node *node;
201 struct inet_timewait_sock *tw; 201 struct inet_timewait_sock *tw;
202 202
203 prefetch(head->chain.first); 203 spin_lock(lock);
204 write_lock(lock);
205 204
206 /* Check TIME-WAIT sockets first. */ 205 /* Check TIME-WAIT sockets first. */
207 sk_nulls_for_each(sk2, node, &head->twchain) { 206 sk_nulls_for_each(sk2, node, &head->twchain) {
@@ -230,8 +229,8 @@ unique:
230 WARN_ON(!sk_unhashed(sk)); 229 WARN_ON(!sk_unhashed(sk));
231 __sk_nulls_add_node_rcu(sk, &head->chain); 230 __sk_nulls_add_node_rcu(sk, &head->chain);
232 sk->sk_hash = hash; 231 sk->sk_hash = hash;
232 spin_unlock(lock);
233 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); 233 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
234 write_unlock(lock);
235 234
236 if (twp != NULL) { 235 if (twp != NULL) {
237 *twp = tw; 236 *twp = tw;
@@ -246,7 +245,7 @@ unique:
246 return 0; 245 return 0;
247 246
248not_unique: 247not_unique:
249 write_unlock(lock); 248 spin_unlock(lock);
250 return -EADDRNOTAVAIL; 249 return -EADDRNOTAVAIL;
251} 250}
252 251