aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorEric Dumazet <eric.dumazet@gmail.com>2009-10-06 20:28:29 -0400
committerDavid S. Miller <davem@davemloft.net>2009-10-06 20:28:29 -0400
commitbcdce7195e0eab55b37dbd53be53057f38006380 (patch)
treeb4480a2e43561adf2060d1b8da818535cc91dc04 /net
parentbd32cafc4707ccc1e66fafdb47fac42217569070 (diff)
net: speedup sk_wake_async()
An incoming datagram must bring into cpu cache *lot* of cache lines, in particular : (other parts omitted (hash chains, ip route cache...)) On 32bit arches : offsetof(struct sock, sk_rcvbuf) =0x30 (read) offsetof(struct sock, sk_lock) =0x34 (rw) offsetof(struct sock, sk_sleep) =0x50 (read) offsetof(struct sock, sk_rmem_alloc) =0x64 (rw) offsetof(struct sock, sk_receive_queue)=0x74 (rw) offsetof(struct sock, sk_forward_alloc)=0x98 (rw) offsetof(struct sock, sk_callback_lock)=0xcc (rw) offsetof(struct sock, sk_drops) =0xd8 (read if we add dropcount support, rw if frame dropped) offsetof(struct sock, sk_filter) =0xf8 (read) offsetof(struct sock, sk_socket) =0x138 (read) offsetof(struct sock, sk_data_ready) =0x15c (read) We can avoid sk->sk_socket and socket->fasync_list referencing on sockets with no fasync() structures. (socket->fasync_list ptr is probably already in cache because it shares a cache line with socket->wait, ie location pointed by sk->sk_sleep) This avoids one cache line load per incoming packet for common cases (no fasync()) We can leave (or even move in a future patch) sk->sk_socket in a cold location Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r--net/socket.c3
1 files changed, 3 insertions, 0 deletions
diff --git a/net/socket.c b/net/socket.c
index 75655365b5fd..d53ad11558c3 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -1100,11 +1100,14 @@ static int sock_fasync(int fd, struct file *filp, int on)
1100 fna->fa_next = sock->fasync_list; 1100 fna->fa_next = sock->fasync_list;
1101 write_lock_bh(&sk->sk_callback_lock); 1101 write_lock_bh(&sk->sk_callback_lock);
1102 sock->fasync_list = fna; 1102 sock->fasync_list = fna;
1103 sock_set_flag(sk, SOCK_FASYNC);
1103 write_unlock_bh(&sk->sk_callback_lock); 1104 write_unlock_bh(&sk->sk_callback_lock);
1104 } else { 1105 } else {
1105 if (fa != NULL) { 1106 if (fa != NULL) {
1106 write_lock_bh(&sk->sk_callback_lock); 1107 write_lock_bh(&sk->sk_callback_lock);
1107 *prev = fa->fa_next; 1108 *prev = fa->fa_next;
1109 if (!sock->fasync_list)
1110 sock_reset_flag(sk, SOCK_FASYNC);
1108 write_unlock_bh(&sk->sk_callback_lock); 1111 write_unlock_bh(&sk->sk_callback_lock);
1109 kfree(fa); 1112 kfree(fa);
1110 } 1113 }