aboutsummaryrefslogtreecommitdiffstats
path: root/include/net/inet_hashtables.h
diff options
context:
space:
mode:
authorEric Dumazet <dada1@cosmosbay.com>2008-11-20 23:39:09 -0500
committerDavid S. Miller <davem@davemloft.net>2008-11-20 23:39:09 -0500
commit9db66bdcc83749affe61c61eb8ff3cf08f42afec (patch)
tree81bb20e4f569d3b44731498428277db9d77fa7a9 /include/net/inet_hashtables.h
parentb8c26a33c8b6f0a150e9cb38ed80b890be55395c (diff)
net: convert TCP/DCCP ehash rwlocks to spinlocks
Now TCP & DCCP use RCU lookups, we can convert ehash rwlocks to spinlocks. /proc/net/tcp and other seq_file 'readers' can safely be converted to 'writers'. This should speedup writers, since spin_lock()/spin_unlock() only use one atomic operation instead of two for write_lock()/write_unlock() Signed-off-by: Eric Dumazet <dada1@cosmosbay.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include/net/inet_hashtables.h')
-rw-r--r--include/net/inet_hashtables.h14
1 files changed, 7 insertions, 7 deletions
diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h
index 62d2dd0d7860..28b3ee3e8d6d 100644
--- a/include/net/inet_hashtables.h
+++ b/include/net/inet_hashtables.h
@@ -116,7 +116,7 @@ struct inet_hashinfo {
116 * TIME_WAIT sockets use a separate chain (twchain). 116 * TIME_WAIT sockets use a separate chain (twchain).
117 */ 117 */
118 struct inet_ehash_bucket *ehash; 118 struct inet_ehash_bucket *ehash;
119 rwlock_t *ehash_locks; 119 spinlock_t *ehash_locks;
120 unsigned int ehash_size; 120 unsigned int ehash_size;
121 unsigned int ehash_locks_mask; 121 unsigned int ehash_locks_mask;
122 122
@@ -152,7 +152,7 @@ static inline struct inet_ehash_bucket *inet_ehash_bucket(
152 return &hashinfo->ehash[hash & (hashinfo->ehash_size - 1)]; 152 return &hashinfo->ehash[hash & (hashinfo->ehash_size - 1)];
153} 153}
154 154
155static inline rwlock_t *inet_ehash_lockp( 155static inline spinlock_t *inet_ehash_lockp(
156 struct inet_hashinfo *hashinfo, 156 struct inet_hashinfo *hashinfo,
157 unsigned int hash) 157 unsigned int hash)
158{ 158{
@@ -177,16 +177,16 @@ static inline int inet_ehash_locks_alloc(struct inet_hashinfo *hashinfo)
177 size = 4096; 177 size = 4096;
178 if (sizeof(rwlock_t) != 0) { 178 if (sizeof(rwlock_t) != 0) {
179#ifdef CONFIG_NUMA 179#ifdef CONFIG_NUMA
180 if (size * sizeof(rwlock_t) > PAGE_SIZE) 180 if (size * sizeof(spinlock_t) > PAGE_SIZE)
181 hashinfo->ehash_locks = vmalloc(size * sizeof(rwlock_t)); 181 hashinfo->ehash_locks = vmalloc(size * sizeof(spinlock_t));
182 else 182 else
183#endif 183#endif
184 hashinfo->ehash_locks = kmalloc(size * sizeof(rwlock_t), 184 hashinfo->ehash_locks = kmalloc(size * sizeof(spinlock_t),
185 GFP_KERNEL); 185 GFP_KERNEL);
186 if (!hashinfo->ehash_locks) 186 if (!hashinfo->ehash_locks)
187 return ENOMEM; 187 return ENOMEM;
188 for (i = 0; i < size; i++) 188 for (i = 0; i < size; i++)
189 rwlock_init(&hashinfo->ehash_locks[i]); 189 spin_lock_init(&hashinfo->ehash_locks[i]);
190 } 190 }
191 hashinfo->ehash_locks_mask = size - 1; 191 hashinfo->ehash_locks_mask = size - 1;
192 return 0; 192 return 0;
@@ -197,7 +197,7 @@ static inline void inet_ehash_locks_free(struct inet_hashinfo *hashinfo)
197 if (hashinfo->ehash_locks) { 197 if (hashinfo->ehash_locks) {
198#ifdef CONFIG_NUMA 198#ifdef CONFIG_NUMA
199 unsigned int size = (hashinfo->ehash_locks_mask + 1) * 199 unsigned int size = (hashinfo->ehash_locks_mask + 1) *
200 sizeof(rwlock_t); 200 sizeof(spinlock_t);
201 if (size > PAGE_SIZE) 201 if (size > PAGE_SIZE)
202 vfree(hashinfo->ehash_locks); 202 vfree(hashinfo->ehash_locks);
203 else 203 else