aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp_ipv4.c
diff options
context:
space:
mode:
authorEric Dumazet <dada1@cosmosbay.com>2008-11-20 23:39:09 -0500
committerDavid S. Miller <davem@davemloft.net>2008-11-20 23:39:09 -0500
commit9db66bdcc83749affe61c61eb8ff3cf08f42afec (patch)
tree81bb20e4f569d3b44731498428277db9d77fa7a9 /net/ipv4/tcp_ipv4.c
parentb8c26a33c8b6f0a150e9cb38ed80b890be55395c (diff)
net: convert TCP/DCCP ehash rwlocks to spinlocks
Now TCP & DCCP use RCU lookups, we can convert ehash rwlocks to spinlocks. /proc/net/tcp and other seq_file 'readers' can safely be converted to 'writers'. This should speedup writers, since spin_lock()/spin_unlock() only use one atomic operation instead of two for write_lock()/write_unlock() Signed-off-by: Eric Dumazet <dada1@cosmosbay.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp_ipv4.c')
-rw-r--r--net/ipv4/tcp_ipv4.c12
1 files changed, 6 insertions, 6 deletions
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 330b08a1227..a81caa1be0c 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1970,13 +1970,13 @@ static void *established_get_first(struct seq_file *seq)
1970 struct sock *sk; 1970 struct sock *sk;
1971 struct hlist_nulls_node *node; 1971 struct hlist_nulls_node *node;
1972 struct inet_timewait_sock *tw; 1972 struct inet_timewait_sock *tw;
1973 rwlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket); 1973 spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket);
1974 1974
1975 /* Lockless fast path for the common case of empty buckets */ 1975 /* Lockless fast path for the common case of empty buckets */
1976 if (empty_bucket(st)) 1976 if (empty_bucket(st))
1977 continue; 1977 continue;
1978 1978
1979 read_lock_bh(lock); 1979 spin_lock_bh(lock);
1980 sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) { 1980 sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
1981 if (sk->sk_family != st->family || 1981 if (sk->sk_family != st->family ||
1982 !net_eq(sock_net(sk), net)) { 1982 !net_eq(sock_net(sk), net)) {
@@ -1995,7 +1995,7 @@ static void *established_get_first(struct seq_file *seq)
1995 rc = tw; 1995 rc = tw;
1996 goto out; 1996 goto out;
1997 } 1997 }
1998 read_unlock_bh(lock); 1998 spin_unlock_bh(lock);
1999 st->state = TCP_SEQ_STATE_ESTABLISHED; 1999 st->state = TCP_SEQ_STATE_ESTABLISHED;
2000 } 2000 }
2001out: 2001out:
@@ -2023,7 +2023,7 @@ get_tw:
2023 cur = tw; 2023 cur = tw;
2024 goto out; 2024 goto out;
2025 } 2025 }
2026 read_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket)); 2026 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2027 st->state = TCP_SEQ_STATE_ESTABLISHED; 2027 st->state = TCP_SEQ_STATE_ESTABLISHED;
2028 2028
2029 /* Look for next non empty bucket */ 2029 /* Look for next non empty bucket */
@@ -2033,7 +2033,7 @@ get_tw:
2033 if (st->bucket >= tcp_hashinfo.ehash_size) 2033 if (st->bucket >= tcp_hashinfo.ehash_size)
2034 return NULL; 2034 return NULL;
2035 2035
2036 read_lock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket)); 2036 spin_lock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2037 sk = sk_nulls_head(&tcp_hashinfo.ehash[st->bucket].chain); 2037 sk = sk_nulls_head(&tcp_hashinfo.ehash[st->bucket].chain);
2038 } else 2038 } else
2039 sk = sk_nulls_next(sk); 2039 sk = sk_nulls_next(sk);
@@ -2134,7 +2134,7 @@ static void tcp_seq_stop(struct seq_file *seq, void *v)
2134 case TCP_SEQ_STATE_TIME_WAIT: 2134 case TCP_SEQ_STATE_TIME_WAIT:
2135 case TCP_SEQ_STATE_ESTABLISHED: 2135 case TCP_SEQ_STATE_ESTABLISHED:
2136 if (v) 2136 if (v)
2137 read_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket)); 2137 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2138 break; 2138 break;
2139 } 2139 }
2140} 2140}