aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/inet_hashtables.c
diff options
context:
space:
mode:
authorPavel Emelyanov <xemul@openvz.org>2007-12-20 18:31:33 -0500
committerDavid S. Miller <davem@davemloft.net>2008-01-28 17:59:26 -0500
commit152da81deb9a4870aeac352336184b2b14d4b2ba (patch)
tree63db8e49be92e7f23667d8c3356177840287118f /net/ipv4/inet_hashtables.c
parentd6701191329b51793bc56724548f0863d2149c29 (diff)
[INET]: Uninline the __inet_hash function.
This one is used in quite many places in the networking code and seems to big to be inline. After the patch net/ipv4/build-in.o loses ~650 bytes: add/remove: 2/0 grow/shrink: 0/5 up/down: 461/-1114 (-653) function old new delta __inet_hash_nolisten - 282 +282 __inet_hash - 179 +179 tcp_sacktag_write_queue 2255 2254 -1 __inet_lookup_listener 284 274 -10 tcp_v4_syn_recv_sock 755 493 -262 tcp_v4_hash 389 35 -354 inet_hash_connect 1086 599 -487 This version addresses the issue pointed by Eric, that while being inline this function was optimized by gcc in respect to the 'listen_possible' argument. Signed-off-by: Pavel Emelyanov <xemul@openvz.org> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/inet_hashtables.c')
-rw-r--r--net/ipv4/inet_hashtables.c46
1 files changed, 44 insertions, 2 deletions
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index 67704da04fc4..8dfd5a691e5f 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
@@ -267,6 +267,48 @@ static inline u32 inet_sk_port_offset(const struct sock *sk)
267 inet->dport); 267 inet->dport);
268} 268}
269 269
270void __inet_hash_nolisten(struct inet_hashinfo *hashinfo, struct sock *sk)
271{
272 struct hlist_head *list;
273 rwlock_t *lock;
274 struct inet_ehash_bucket *head;
275
276 BUG_TRAP(sk_unhashed(sk));
277
278 sk->sk_hash = inet_sk_ehashfn(sk);
279 head = inet_ehash_bucket(hashinfo, sk->sk_hash);
280 list = &head->chain;
281 lock = inet_ehash_lockp(hashinfo, sk->sk_hash);
282
283 write_lock(lock);
284 __sk_add_node(sk, list);
285 sock_prot_inc_use(sk->sk_prot);
286 write_unlock(lock);
287}
288EXPORT_SYMBOL_GPL(__inet_hash_nolisten);
289
290void __inet_hash(struct inet_hashinfo *hashinfo, struct sock *sk)
291{
292 struct hlist_head *list;
293 rwlock_t *lock;
294
295 if (sk->sk_state != TCP_LISTEN) {
296 __inet_hash_nolisten(hashinfo, sk);
297 return;
298 }
299
300 BUG_TRAP(sk_unhashed(sk));
301 list = &hashinfo->listening_hash[inet_sk_listen_hashfn(sk)];
302 lock = &hashinfo->lhash_lock;
303
304 inet_listen_wlock(hashinfo);
305 __sk_add_node(sk, list);
306 sock_prot_inc_use(sk->sk_prot);
307 write_unlock(lock);
308 wake_up(&hashinfo->lhash_wait);
309}
310EXPORT_SYMBOL_GPL(__inet_hash);
311
270/* 312/*
271 * Bind a port for a connect operation and hash it. 313 * Bind a port for a connect operation and hash it.
272 */ 314 */
@@ -334,7 +376,7 @@ ok:
334 inet_bind_hash(sk, tb, port); 376 inet_bind_hash(sk, tb, port);
335 if (sk_unhashed(sk)) { 377 if (sk_unhashed(sk)) {
336 inet_sk(sk)->sport = htons(port); 378 inet_sk(sk)->sport = htons(port);
337 __inet_hash(hinfo, sk, 0); 379 __inet_hash_nolisten(hinfo, sk);
338 } 380 }
339 spin_unlock(&head->lock); 381 spin_unlock(&head->lock);
340 382
@@ -351,7 +393,7 @@ ok:
351 tb = inet_csk(sk)->icsk_bind_hash; 393 tb = inet_csk(sk)->icsk_bind_hash;
352 spin_lock_bh(&head->lock); 394 spin_lock_bh(&head->lock);
353 if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) { 395 if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) {
354 __inet_hash(hinfo, sk, 0); 396 __inet_hash_nolisten(hinfo, sk);
355 spin_unlock_bh(&head->lock); 397 spin_unlock_bh(&head->lock);
356 return 0; 398 return 0;
357 } else { 399 } else {