diff options
author | Eric Dumazet <dada1@cosmosbay.com> | 2008-11-20 03:40:07 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2008-11-20 03:40:07 -0500 |
commit | 5caea4ea7088e80ac5410d04660346094608b909 (patch) | |
tree | fad95133683c002d24ff5de7fb756dad806b41ed /include | |
parent | d8b83c57a7e497cba9b5cb156e63176323035785 (diff) |
net: listening_hash get a spinlock per bucket
This patch prepares RCU migration of listening_hash table for
TCP/DCCP protocols.
listening_hash table being small (32 slots per protocol), we add
a spinlock for each slot, instead of a single rwlock for whole table.
This should reduce hold time of readers, and writers concurrency.
Signed-off-by: Eric Dumazet <dada1@cosmosbay.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include')
-rw-r--r-- | include/net/inet_hashtables.h | 45 |
1 files changed, 15 insertions, 30 deletions
diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h index 481896045111..62d2dd0d7860 100644 --- a/include/net/inet_hashtables.h +++ b/include/net/inet_hashtables.h | |||
@@ -99,6 +99,11 @@ struct inet_bind_hashbucket { | |||
99 | struct hlist_head chain; | 99 | struct hlist_head chain; |
100 | }; | 100 | }; |
101 | 101 | ||
102 | struct inet_listen_hashbucket { | ||
103 | spinlock_t lock; | ||
104 | struct hlist_head head; | ||
105 | }; | ||
106 | |||
102 | /* This is for listening sockets, thus all sockets which possess wildcards. */ | 107 | /* This is for listening sockets, thus all sockets which possess wildcards. */ |
103 | #define INET_LHTABLE_SIZE 32 /* Yes, really, this is all you need. */ | 108 | #define INET_LHTABLE_SIZE 32 /* Yes, really, this is all you need. */ |
104 | 109 | ||
@@ -123,22 +128,21 @@ struct inet_hashinfo { | |||
123 | unsigned int bhash_size; | 128 | unsigned int bhash_size; |
124 | /* Note : 4 bytes padding on 64 bit arches */ | 129 | /* Note : 4 bytes padding on 64 bit arches */ |
125 | 130 | ||
126 | /* All sockets in TCP_LISTEN state will be in here. This is the only | 131 | struct kmem_cache *bind_bucket_cachep; |
127 | * table where wildcard'd TCP sockets can exist. Hash function here | ||
128 | * is just local port number. | ||
129 | */ | ||
130 | struct hlist_head listening_hash[INET_LHTABLE_SIZE]; | ||
131 | 132 | ||
132 | /* All the above members are written once at bootup and | 133 | /* All the above members are written once at bootup and |
133 | * never written again _or_ are predominantly read-access. | 134 | * never written again _or_ are predominantly read-access. |
134 | * | 135 | * |
135 | * Now align to a new cache line as all the following members | 136 | * Now align to a new cache line as all the following members |
136 | * are often dirty. | 137 | * might be often dirty. |
138 | */ | ||
139 | /* All sockets in TCP_LISTEN state will be in here. This is the only | ||
140 | * table where wildcard'd TCP sockets can exist. Hash function here | ||
141 | * is just local port number. | ||
137 | */ | 142 | */ |
138 | rwlock_t lhash_lock ____cacheline_aligned; | 143 | struct inet_listen_hashbucket listening_hash[INET_LHTABLE_SIZE] |
139 | atomic_t lhash_users; | 144 | ____cacheline_aligned_in_smp; |
140 | wait_queue_head_t lhash_wait; | 145 | |
141 | struct kmem_cache *bind_bucket_cachep; | ||
142 | }; | 146 | }; |
143 | 147 | ||
144 | static inline struct inet_ehash_bucket *inet_ehash_bucket( | 148 | static inline struct inet_ehash_bucket *inet_ehash_bucket( |
@@ -236,26 +240,7 @@ extern void __inet_inherit_port(struct sock *sk, struct sock *child); | |||
236 | 240 | ||
237 | extern void inet_put_port(struct sock *sk); | 241 | extern void inet_put_port(struct sock *sk); |
238 | 242 | ||
239 | extern void inet_listen_wlock(struct inet_hashinfo *hashinfo); | 243 | void inet_hashinfo_init(struct inet_hashinfo *h); |
240 | |||
241 | /* | ||
242 | * - We may sleep inside this lock. | ||
243 | * - If sleeping is not required (or called from BH), | ||
244 | * use plain read_(un)lock(&inet_hashinfo.lhash_lock). | ||
245 | */ | ||
246 | static inline void inet_listen_lock(struct inet_hashinfo *hashinfo) | ||
247 | { | ||
248 | /* read_lock synchronizes to candidates to writers */ | ||
249 | read_lock(&hashinfo->lhash_lock); | ||
250 | atomic_inc(&hashinfo->lhash_users); | ||
251 | read_unlock(&hashinfo->lhash_lock); | ||
252 | } | ||
253 | |||
254 | static inline void inet_listen_unlock(struct inet_hashinfo *hashinfo) | ||
255 | { | ||
256 | if (atomic_dec_and_test(&hashinfo->lhash_users)) | ||
257 | wake_up(&hashinfo->lhash_wait); | ||
258 | } | ||
259 | 244 | ||
260 | extern void __inet_hash_nolisten(struct sock *sk); | 245 | extern void __inet_hash_nolisten(struct sock *sk); |
261 | extern void inet_hash(struct sock *sk); | 246 | extern void inet_hash(struct sock *sk); |