diff options
author | Eric Dumazet <dada1@cosmosbay.com> | 2008-11-20 23:39:09 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2008-11-20 23:39:09 -0500 |
commit | 9db66bdcc83749affe61c61eb8ff3cf08f42afec (patch) | |
tree | 81bb20e4f569d3b44731498428277db9d77fa7a9 /net/ipv4/inet_hashtables.c | |
parent | b8c26a33c8b6f0a150e9cb38ed80b890be55395c (diff) |
net: convert TCP/DCCP ehash rwlocks to spinlocks
Now TCP & DCCP use RCU lookups, we can convert ehash rwlocks to spinlocks.
/proc/net/tcp and other seq_file 'readers' can safely be converted to 'writers'.
This should speedup writers, since spin_lock()/spin_unlock()
only use one atomic operation instead of two for write_lock()/write_unlock()
Signed-off-by: Eric Dumazet <dada1@cosmosbay.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/inet_hashtables.c')
-rw-r--r-- | net/ipv4/inet_hashtables.c | 21 |
1 files changed, 10 insertions, 11 deletions
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c index 377d004e5723..4c273a9981a6 100644 --- a/net/ipv4/inet_hashtables.c +++ b/net/ipv4/inet_hashtables.c | |||
@@ -271,13 +271,12 @@ static int __inet_check_established(struct inet_timewait_death_row *death_row, | |||
271 | struct net *net = sock_net(sk); | 271 | struct net *net = sock_net(sk); |
272 | unsigned int hash = inet_ehashfn(net, daddr, lport, saddr, inet->dport); | 272 | unsigned int hash = inet_ehashfn(net, daddr, lport, saddr, inet->dport); |
273 | struct inet_ehash_bucket *head = inet_ehash_bucket(hinfo, hash); | 273 | struct inet_ehash_bucket *head = inet_ehash_bucket(hinfo, hash); |
274 | rwlock_t *lock = inet_ehash_lockp(hinfo, hash); | 274 | spinlock_t *lock = inet_ehash_lockp(hinfo, hash); |
275 | struct sock *sk2; | 275 | struct sock *sk2; |
276 | const struct hlist_nulls_node *node; | 276 | const struct hlist_nulls_node *node; |
277 | struct inet_timewait_sock *tw; | 277 | struct inet_timewait_sock *tw; |
278 | 278 | ||
279 | prefetch(head->chain.first); | 279 | spin_lock(lock); |
280 | write_lock(lock); | ||
281 | 280 | ||
282 | /* Check TIME-WAIT sockets first. */ | 281 | /* Check TIME-WAIT sockets first. */ |
283 | sk_nulls_for_each(sk2, node, &head->twchain) { | 282 | sk_nulls_for_each(sk2, node, &head->twchain) { |
@@ -308,8 +307,8 @@ unique: | |||
308 | sk->sk_hash = hash; | 307 | sk->sk_hash = hash; |
309 | WARN_ON(!sk_unhashed(sk)); | 308 | WARN_ON(!sk_unhashed(sk)); |
310 | __sk_nulls_add_node_rcu(sk, &head->chain); | 309 | __sk_nulls_add_node_rcu(sk, &head->chain); |
310 | spin_unlock(lock); | ||
311 | sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); | 311 | sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); |
312 | write_unlock(lock); | ||
313 | 312 | ||
314 | if (twp) { | 313 | if (twp) { |
315 | *twp = tw; | 314 | *twp = tw; |
@@ -325,7 +324,7 @@ unique: | |||
325 | return 0; | 324 | return 0; |
326 | 325 | ||
327 | not_unique: | 326 | not_unique: |
328 | write_unlock(lock); | 327 | spin_unlock(lock); |
329 | return -EADDRNOTAVAIL; | 328 | return -EADDRNOTAVAIL; |
330 | } | 329 | } |
331 | 330 | ||
@@ -340,7 +339,7 @@ void __inet_hash_nolisten(struct sock *sk) | |||
340 | { | 339 | { |
341 | struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo; | 340 | struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo; |
342 | struct hlist_nulls_head *list; | 341 | struct hlist_nulls_head *list; |
343 | rwlock_t *lock; | 342 | spinlock_t *lock; |
344 | struct inet_ehash_bucket *head; | 343 | struct inet_ehash_bucket *head; |
345 | 344 | ||
346 | WARN_ON(!sk_unhashed(sk)); | 345 | WARN_ON(!sk_unhashed(sk)); |
@@ -350,10 +349,10 @@ void __inet_hash_nolisten(struct sock *sk) | |||
350 | list = &head->chain; | 349 | list = &head->chain; |
351 | lock = inet_ehash_lockp(hashinfo, sk->sk_hash); | 350 | lock = inet_ehash_lockp(hashinfo, sk->sk_hash); |
352 | 351 | ||
353 | write_lock(lock); | 352 | spin_lock(lock); |
354 | __sk_nulls_add_node_rcu(sk, list); | 353 | __sk_nulls_add_node_rcu(sk, list); |
354 | spin_unlock(lock); | ||
355 | sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); | 355 | sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); |
356 | write_unlock(lock); | ||
357 | } | 356 | } |
358 | EXPORT_SYMBOL_GPL(__inet_hash_nolisten); | 357 | EXPORT_SYMBOL_GPL(__inet_hash_nolisten); |
359 | 358 | ||
@@ -402,12 +401,12 @@ void inet_unhash(struct sock *sk) | |||
402 | sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); | 401 | sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); |
403 | spin_unlock_bh(&ilb->lock); | 402 | spin_unlock_bh(&ilb->lock); |
404 | } else { | 403 | } else { |
405 | rwlock_t *lock = inet_ehash_lockp(hashinfo, sk->sk_hash); | 404 | spinlock_t *lock = inet_ehash_lockp(hashinfo, sk->sk_hash); |
406 | 405 | ||
407 | write_lock_bh(lock); | 406 | spin_lock_bh(lock); |
408 | if (__sk_nulls_del_node_init_rcu(sk)) | 407 | if (__sk_nulls_del_node_init_rcu(sk)) |
409 | sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); | 408 | sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); |
410 | write_unlock_bh(lock); | 409 | spin_unlock_bh(lock); |
411 | } | 410 | } |
412 | } | 411 | } |
413 | EXPORT_SYMBOL_GPL(inet_unhash); | 412 | EXPORT_SYMBOL_GPL(inet_unhash); |