diff options
author | Eric Dumazet <dada1@cosmosbay.com> | 2008-11-20 03:40:07 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2008-11-20 03:40:07 -0500 |
commit | 5caea4ea7088e80ac5410d04660346094608b909 (patch) | |
tree | fad95133683c002d24ff5de7fb756dad806b41ed /net/ipv4/inet_diag.c | |
parent | d8b83c57a7e497cba9b5cb156e63176323035785 (diff) |
net: listening_hash get a spinlock per bucket
This patch prepares RCU migration of listening_hash table for
TCP/DCCP protocols.
listening_hash table being small (32 slots per protocol), we add
a spinlock for each slot, instead of a single rwlock for whole table.
This should reduce hold time of readers, and writers concurrency.
Signed-off-by: Eric Dumazet <dada1@cosmosbay.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/inet_diag.c')
-rw-r--r-- | net/ipv4/inet_diag.c | 12 |
1 files changed, 7 insertions, 5 deletions
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c index 41b36720e977..1cb154ed75ad 100644 --- a/net/ipv4/inet_diag.c +++ b/net/ipv4/inet_diag.c | |||
@@ -718,13 +718,15 @@ static int inet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb) | |||
718 | if (!(r->idiag_states & (TCPF_LISTEN | TCPF_SYN_RECV))) | 718 | if (!(r->idiag_states & (TCPF_LISTEN | TCPF_SYN_RECV))) |
719 | goto skip_listen_ht; | 719 | goto skip_listen_ht; |
720 | 720 | ||
721 | inet_listen_lock(hashinfo); | ||
722 | for (i = s_i; i < INET_LHTABLE_SIZE; i++) { | 721 | for (i = s_i; i < INET_LHTABLE_SIZE; i++) { |
723 | struct sock *sk; | 722 | struct sock *sk; |
724 | struct hlist_node *node; | 723 | struct hlist_node *node; |
724 | struct inet_listen_hashbucket *ilb; | ||
725 | 725 | ||
726 | num = 0; | 726 | num = 0; |
727 | sk_for_each(sk, node, &hashinfo->listening_hash[i]) { | 727 | ilb = &hashinfo->listening_hash[i]; |
728 | spin_lock_bh(&ilb->lock); | ||
729 | sk_for_each(sk, node, &ilb->head) { | ||
728 | struct inet_sock *inet = inet_sk(sk); | 730 | struct inet_sock *inet = inet_sk(sk); |
729 | 731 | ||
730 | if (num < s_num) { | 732 | if (num < s_num) { |
@@ -742,7 +744,7 @@ static int inet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb) | |||
742 | goto syn_recv; | 744 | goto syn_recv; |
743 | 745 | ||
744 | if (inet_csk_diag_dump(sk, skb, cb) < 0) { | 746 | if (inet_csk_diag_dump(sk, skb, cb) < 0) { |
745 | inet_listen_unlock(hashinfo); | 747 | spin_unlock_bh(&ilb->lock); |
746 | goto done; | 748 | goto done; |
747 | } | 749 | } |
748 | 750 | ||
@@ -751,7 +753,7 @@ syn_recv: | |||
751 | goto next_listen; | 753 | goto next_listen; |
752 | 754 | ||
753 | if (inet_diag_dump_reqs(skb, sk, cb) < 0) { | 755 | if (inet_diag_dump_reqs(skb, sk, cb) < 0) { |
754 | inet_listen_unlock(hashinfo); | 756 | spin_unlock_bh(&ilb->lock); |
755 | goto done; | 757 | goto done; |
756 | } | 758 | } |
757 | 759 | ||
@@ -760,12 +762,12 @@ next_listen: | |||
760 | cb->args[4] = 0; | 762 | cb->args[4] = 0; |
761 | ++num; | 763 | ++num; |
762 | } | 764 | } |
765 | spin_unlock_bh(&ilb->lock); | ||
763 | 766 | ||
764 | s_num = 0; | 767 | s_num = 0; |
765 | cb->args[3] = 0; | 768 | cb->args[3] = 0; |
766 | cb->args[4] = 0; | 769 | cb->args[4] = 0; |
767 | } | 770 | } |
768 | inet_listen_unlock(hashinfo); | ||
769 | skip_listen_ht: | 771 | skip_listen_ht: |
770 | cb->args[0] = 1; | 772 | cb->args[0] = 1; |
771 | s_i = num = s_num = 0; | 773 | s_i = num = s_num = 0; |