diff options
author | Eric Dumazet <dada1@cosmosbay.com> | 2007-03-04 19:05:44 -0500 |
---|---|---|
committer | David S. Miller <davem@sunset.davemloft.net> | 2007-04-26 01:23:27 -0400 |
commit | fa438ccfdfd3f6db02c13b61b21454eb81cd6a13 (patch) | |
tree | a1759259d7543586185e2fb9db21461147944f18 | |
parent | e317f6f69cb95527799d308a9421b7dc1252989a (diff) |
[NET]: Keep sk_backlog near sk_lock
sk_backlog is a critical field of struct sock. (known famous words)
It is (ab)used in hot paths, in particular in release_sock(), tcp_recvmsg(),
tcp_v4_rcv(), sk_receive_skb().
It really makes sense to place it next to sk_lock, because sk_backlog is only
used after sk_lock locked (and thus memory cache line in L1 cache). This
should reduce cache misses and sk_lock acquisition time.
(In theory, we could only move the head pointer near sk_lock, and leaving tail
far away, because 'tail' is normally not so hot, but keep it simple :) )
Signed-off-by: Eric Dumazet <dada1@cosmosbay.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | include/net/sock.h | 18 | ||||
-rw-r--r-- | net/core/sock.c | 2 |
2 files changed, 10 insertions, 10 deletions
diff --git a/include/net/sock.h b/include/net/sock.h index 2c7d60ca3548..a3366c3c837a 100644 --- a/include/net/sock.h +++ b/include/net/sock.h | |||
@@ -202,6 +202,15 @@ struct sock { | |||
202 | unsigned short sk_type; | 202 | unsigned short sk_type; |
203 | int sk_rcvbuf; | 203 | int sk_rcvbuf; |
204 | socket_lock_t sk_lock; | 204 | socket_lock_t sk_lock; |
205 | /* | ||
206 | * The backlog queue is special, it is always used with | ||
207 | * the per-socket spinlock held and requires low latency | ||
208 | * access. Therefore we special case it's implementation. | ||
209 | */ | ||
210 | struct { | ||
211 | struct sk_buff *head; | ||
212 | struct sk_buff *tail; | ||
213 | } sk_backlog; | ||
205 | wait_queue_head_t *sk_sleep; | 214 | wait_queue_head_t *sk_sleep; |
206 | struct dst_entry *sk_dst_cache; | 215 | struct dst_entry *sk_dst_cache; |
207 | struct xfrm_policy *sk_policy[2]; | 216 | struct xfrm_policy *sk_policy[2]; |
@@ -221,15 +230,6 @@ struct sock { | |||
221 | int sk_rcvlowat; | 230 | int sk_rcvlowat; |
222 | unsigned long sk_flags; | 231 | unsigned long sk_flags; |
223 | unsigned long sk_lingertime; | 232 | unsigned long sk_lingertime; |
224 | /* | ||
225 | * The backlog queue is special, it is always used with | ||
226 | * the per-socket spinlock held and requires low latency | ||
227 | * access. Therefore we special case it's implementation. | ||
228 | */ | ||
229 | struct { | ||
230 | struct sk_buff *head; | ||
231 | struct sk_buff *tail; | ||
232 | } sk_backlog; | ||
233 | struct sk_buff_head sk_error_queue; | 233 | struct sk_buff_head sk_error_queue; |
234 | struct proto *sk_prot_creator; | 234 | struct proto *sk_prot_creator; |
235 | rwlock_t sk_callback_lock; | 235 | rwlock_t sk_callback_lock; |
diff --git a/net/core/sock.c b/net/core/sock.c index 27c4f62382bd..6d35d5775ba8 100644 --- a/net/core/sock.c +++ b/net/core/sock.c | |||
@@ -904,6 +904,7 @@ struct sock *sk_clone(const struct sock *sk, const gfp_t priority) | |||
904 | sk_node_init(&newsk->sk_node); | 904 | sk_node_init(&newsk->sk_node); |
905 | sock_lock_init(newsk); | 905 | sock_lock_init(newsk); |
906 | bh_lock_sock(newsk); | 906 | bh_lock_sock(newsk); |
907 | newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL; | ||
907 | 908 | ||
908 | atomic_set(&newsk->sk_rmem_alloc, 0); | 909 | atomic_set(&newsk->sk_rmem_alloc, 0); |
909 | atomic_set(&newsk->sk_wmem_alloc, 0); | 910 | atomic_set(&newsk->sk_wmem_alloc, 0); |
@@ -923,7 +924,6 @@ struct sock *sk_clone(const struct sock *sk, const gfp_t priority) | |||
923 | newsk->sk_wmem_queued = 0; | 924 | newsk->sk_wmem_queued = 0; |
924 | newsk->sk_forward_alloc = 0; | 925 | newsk->sk_forward_alloc = 0; |
925 | newsk->sk_send_head = NULL; | 926 | newsk->sk_send_head = NULL; |
926 | newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL; | ||
927 | newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK; | 927 | newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK; |
928 | 928 | ||
929 | sock_reset_flag(newsk, SOCK_DONE); | 929 | sock_reset_flag(newsk, SOCK_DONE); |