diff options
author | Eric Dumazet <edumazet@google.com> | 2015-03-22 13:22:21 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2015-03-23 16:52:26 -0400 |
commit | b282705336e03fc7b9377a278939594870a40f96 (patch) | |
tree | fbb9b0bf127fb3910e65b6ff6566fc12396385e4 /net/ipv4/inet_diag.c | |
parent | 8b929ab12fb2ab960adb3c3ec8d107fef5ff3243 (diff) |
net: convert syn_wait_lock to a spinlock
This is a low hanging fruit, as we'll get rid of syn_wait_lock eventually.
We hold syn_wait_lock for such small sections, that it makes no sense to use
a read/write lock. A spin lock is simply faster.
Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/inet_diag.c')
-rw-r--r-- | net/ipv4/inet_diag.c | 4 |
1 files changed, 2 insertions, 2 deletions
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c index f984b2001d0a..76322c9867d5 100644 --- a/net/ipv4/inet_diag.c +++ b/net/ipv4/inet_diag.c | |||
@@ -728,7 +728,7 @@ static int inet_diag_dump_reqs(struct sk_buff *skb, struct sock *sk, | |||
728 | 728 | ||
729 | entry.family = sk->sk_family; | 729 | entry.family = sk->sk_family; |
730 | 730 | ||
731 | read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock); | 731 | spin_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock); |
732 | 732 | ||
733 | lopt = icsk->icsk_accept_queue.listen_opt; | 733 | lopt = icsk->icsk_accept_queue.listen_opt; |
734 | if (!lopt || !listen_sock_qlen(lopt)) | 734 | if (!lopt || !listen_sock_qlen(lopt)) |
@@ -776,7 +776,7 @@ static int inet_diag_dump_reqs(struct sk_buff *skb, struct sock *sk, | |||
776 | } | 776 | } |
777 | 777 | ||
778 | out: | 778 | out: |
779 | read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock); | 779 | spin_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock); |
780 | 780 | ||
781 | return err; | 781 | return err; |
782 | } | 782 | } |