aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/inet_timewait_sock.c
diff options
context:
space:
mode:
authorEric Dumazet <dada1@cosmosbay.com>2007-11-07 05:40:20 -0500
committerDavid S. Miller <davem@sunset.davemloft.net>2007-11-07 07:15:11 -0500
commit230140cffa7feae90ad50bf259db1fa07674f3a7 (patch)
tree815472add31606423a508a17806b7884f0ab3e2e /net/ipv4/inet_timewait_sock.c
parentefac52762b1e3fe3035d29e82d8ee1aebc45e4a7 (diff)
[INET]: Remove per bucket rwlock in tcp/dccp ehash table.
As done two years ago on IP route cache table (commit 22c047ccbc68fa8f3fa57f0e8f906479a062c426) , we can avoid using one lock per hash bucket for the huge TCP/DCCP hash tables. On a typical x86_64 platform, this saves about 2MB or 4MB of ram, for litle performance differences. (we hit a different cache line for the rwlock, but then the bucket cache line have a better sharing factor among cpus, since we dirty it less often). For netstat or ss commands that want a full scan of hash table, we perform fewer memory accesses. Using a 'small' table of hashed rwlocks should be more than enough to provide correct SMP concurrency between different buckets, without using too much memory. Sizing of this table depends on num_possible_cpus() and various CONFIG settings. This patch provides some locking abstraction that may ease a future work using a different model for TCP/DCCP table. Signed-off-by: Eric Dumazet <dada1@cosmosbay.com> Acked-by: Arnaldo Carvalho de Melo <acme@redhat.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/inet_timewait_sock.c')
-rw-r--r--net/ipv4/inet_timewait_sock.c13
1 files changed, 7 insertions, 6 deletions
diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c
index 4e189e28f306..a60b99e0ebdc 100644
--- a/net/ipv4/inet_timewait_sock.c
+++ b/net/ipv4/inet_timewait_sock.c
@@ -20,16 +20,16 @@ static void __inet_twsk_kill(struct inet_timewait_sock *tw,
20 struct inet_bind_hashbucket *bhead; 20 struct inet_bind_hashbucket *bhead;
21 struct inet_bind_bucket *tb; 21 struct inet_bind_bucket *tb;
22 /* Unlink from established hashes. */ 22 /* Unlink from established hashes. */
23 struct inet_ehash_bucket *ehead = inet_ehash_bucket(hashinfo, tw->tw_hash); 23 rwlock_t *lock = inet_ehash_lockp(hashinfo, tw->tw_hash);
24 24
25 write_lock(&ehead->lock); 25 write_lock(lock);
26 if (hlist_unhashed(&tw->tw_node)) { 26 if (hlist_unhashed(&tw->tw_node)) {
27 write_unlock(&ehead->lock); 27 write_unlock(lock);
28 return; 28 return;
29 } 29 }
30 __hlist_del(&tw->tw_node); 30 __hlist_del(&tw->tw_node);
31 sk_node_init(&tw->tw_node); 31 sk_node_init(&tw->tw_node);
32 write_unlock(&ehead->lock); 32 write_unlock(lock);
33 33
34 /* Disassociate with bind bucket. */ 34 /* Disassociate with bind bucket. */
35 bhead = &hashinfo->bhash[inet_bhashfn(tw->tw_num, hashinfo->bhash_size)]; 35 bhead = &hashinfo->bhash[inet_bhashfn(tw->tw_num, hashinfo->bhash_size)];
@@ -59,6 +59,7 @@ void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
59 const struct inet_sock *inet = inet_sk(sk); 59 const struct inet_sock *inet = inet_sk(sk);
60 const struct inet_connection_sock *icsk = inet_csk(sk); 60 const struct inet_connection_sock *icsk = inet_csk(sk);
61 struct inet_ehash_bucket *ehead = inet_ehash_bucket(hashinfo, sk->sk_hash); 61 struct inet_ehash_bucket *ehead = inet_ehash_bucket(hashinfo, sk->sk_hash);
62 rwlock_t *lock = inet_ehash_lockp(hashinfo, sk->sk_hash);
62 struct inet_bind_hashbucket *bhead; 63 struct inet_bind_hashbucket *bhead;
63 /* Step 1: Put TW into bind hash. Original socket stays there too. 64 /* Step 1: Put TW into bind hash. Original socket stays there too.
64 Note, that any socket with inet->num != 0 MUST be bound in 65 Note, that any socket with inet->num != 0 MUST be bound in
@@ -71,7 +72,7 @@ void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
71 inet_twsk_add_bind_node(tw, &tw->tw_tb->owners); 72 inet_twsk_add_bind_node(tw, &tw->tw_tb->owners);
72 spin_unlock(&bhead->lock); 73 spin_unlock(&bhead->lock);
73 74
74 write_lock(&ehead->lock); 75 write_lock(lock);
75 76
76 /* Step 2: Remove SK from established hash. */ 77 /* Step 2: Remove SK from established hash. */
77 if (__sk_del_node_init(sk)) 78 if (__sk_del_node_init(sk))
@@ -81,7 +82,7 @@ void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
81 inet_twsk_add_node(tw, &ehead->twchain); 82 inet_twsk_add_node(tw, &ehead->twchain);
82 atomic_inc(&tw->tw_refcnt); 83 atomic_inc(&tw->tw_refcnt);
83 84
84 write_unlock(&ehead->lock); 85 write_unlock(lock);
85} 86}
86 87
87EXPORT_SYMBOL_GPL(__inet_twsk_hashdance); 88EXPORT_SYMBOL_GPL(__inet_twsk_hashdance);