diff options
author | stephen hemminger <shemminger@vyatta.com> | 2010-02-22 02:57:19 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2010-02-22 18:45:56 -0500 |
commit | 7f6b9dbd5afbd966a82dcbafc5ed62305eb9d479 (patch) | |
tree | 7d6795317f67d79919df5bb6b52121ad94d8ff37 /net | |
parent | 808f5114a9206fee855117d416440e1071ab375c (diff) |
af_key: locking change
Get rid of custom locking that was using wait queue, lock, and atomic
to basically build a queued mutex. Use RCU for read side.
Signed-off-by: Stephen Hemminger <shemminger@vyatta.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r-- | net/key/af_key.c | 76 |
1 files changed, 16 insertions, 60 deletions
diff --git a/net/key/af_key.c b/net/key/af_key.c index a20d2fa88db9..da2fe5f57619 100644 --- a/net/key/af_key.c +++ b/net/key/af_key.c | |||
@@ -41,9 +41,7 @@ struct netns_pfkey { | |||
41 | struct hlist_head table; | 41 | struct hlist_head table; |
42 | atomic_t socks_nr; | 42 | atomic_t socks_nr; |
43 | }; | 43 | }; |
44 | static DECLARE_WAIT_QUEUE_HEAD(pfkey_table_wait); | 44 | static DEFINE_MUTEX(pfkey_mutex); |
45 | static DEFINE_RWLOCK(pfkey_table_lock); | ||
46 | static atomic_t pfkey_table_users = ATOMIC_INIT(0); | ||
47 | 45 | ||
48 | struct pfkey_sock { | 46 | struct pfkey_sock { |
49 | /* struct sock must be the first member of struct pfkey_sock */ | 47 | /* struct sock must be the first member of struct pfkey_sock */ |
@@ -108,50 +106,6 @@ static void pfkey_sock_destruct(struct sock *sk) | |||
108 | atomic_dec(&net_pfkey->socks_nr); | 106 | atomic_dec(&net_pfkey->socks_nr); |
109 | } | 107 | } |
110 | 108 | ||
111 | static void pfkey_table_grab(void) | ||
112 | { | ||
113 | write_lock_bh(&pfkey_table_lock); | ||
114 | |||
115 | if (atomic_read(&pfkey_table_users)) { | ||
116 | DECLARE_WAITQUEUE(wait, current); | ||
117 | |||
118 | add_wait_queue_exclusive(&pfkey_table_wait, &wait); | ||
119 | for(;;) { | ||
120 | set_current_state(TASK_UNINTERRUPTIBLE); | ||
121 | if (atomic_read(&pfkey_table_users) == 0) | ||
122 | break; | ||
123 | write_unlock_bh(&pfkey_table_lock); | ||
124 | schedule(); | ||
125 | write_lock_bh(&pfkey_table_lock); | ||
126 | } | ||
127 | |||
128 | __set_current_state(TASK_RUNNING); | ||
129 | remove_wait_queue(&pfkey_table_wait, &wait); | ||
130 | } | ||
131 | } | ||
132 | |||
133 | static __inline__ void pfkey_table_ungrab(void) | ||
134 | { | ||
135 | write_unlock_bh(&pfkey_table_lock); | ||
136 | wake_up(&pfkey_table_wait); | ||
137 | } | ||
138 | |||
139 | static __inline__ void pfkey_lock_table(void) | ||
140 | { | ||
141 | /* read_lock() synchronizes us to pfkey_table_grab */ | ||
142 | |||
143 | read_lock(&pfkey_table_lock); | ||
144 | atomic_inc(&pfkey_table_users); | ||
145 | read_unlock(&pfkey_table_lock); | ||
146 | } | ||
147 | |||
148 | static __inline__ void pfkey_unlock_table(void) | ||
149 | { | ||
150 | if (atomic_dec_and_test(&pfkey_table_users)) | ||
151 | wake_up(&pfkey_table_wait); | ||
152 | } | ||
153 | |||
154 | |||
155 | static const struct proto_ops pfkey_ops; | 109 | static const struct proto_ops pfkey_ops; |
156 | 110 | ||
157 | static void pfkey_insert(struct sock *sk) | 111 | static void pfkey_insert(struct sock *sk) |
@@ -159,16 +113,16 @@ static void pfkey_insert(struct sock *sk) | |||
159 | struct net *net = sock_net(sk); | 113 | struct net *net = sock_net(sk); |
160 | struct netns_pfkey *net_pfkey = net_generic(net, pfkey_net_id); | 114 | struct netns_pfkey *net_pfkey = net_generic(net, pfkey_net_id); |
161 | 115 | ||
162 | pfkey_table_grab(); | 116 | mutex_lock(&pfkey_mutex); |
163 | sk_add_node(sk, &net_pfkey->table); | 117 | sk_add_node_rcu(sk, &net_pfkey->table); |
164 | pfkey_table_ungrab(); | 118 | mutex_unlock(&pfkey_mutex); |
165 | } | 119 | } |
166 | 120 | ||
167 | static void pfkey_remove(struct sock *sk) | 121 | static void pfkey_remove(struct sock *sk) |
168 | { | 122 | { |
169 | pfkey_table_grab(); | 123 | mutex_lock(&pfkey_mutex); |
170 | sk_del_node_init(sk); | 124 | sk_del_node_init_rcu(sk); |
171 | pfkey_table_ungrab(); | 125 | mutex_unlock(&pfkey_mutex); |
172 | } | 126 | } |
173 | 127 | ||
174 | static struct proto key_proto = { | 128 | static struct proto key_proto = { |
@@ -223,6 +177,8 @@ static int pfkey_release(struct socket *sock) | |||
223 | sock_orphan(sk); | 177 | sock_orphan(sk); |
224 | sock->sk = NULL; | 178 | sock->sk = NULL; |
225 | skb_queue_purge(&sk->sk_write_queue); | 179 | skb_queue_purge(&sk->sk_write_queue); |
180 | |||
181 | synchronize_rcu(); | ||
226 | sock_put(sk); | 182 | sock_put(sk); |
227 | 183 | ||
228 | return 0; | 184 | return 0; |
@@ -277,8 +233,8 @@ static int pfkey_broadcast(struct sk_buff *skb, gfp_t allocation, | |||
277 | if (!skb) | 233 | if (!skb) |
278 | return -ENOMEM; | 234 | return -ENOMEM; |
279 | 235 | ||
280 | pfkey_lock_table(); | 236 | rcu_read_lock(); |
281 | sk_for_each(sk, node, &net_pfkey->table) { | 237 | sk_for_each_rcu(sk, node, &net_pfkey->table) { |
282 | struct pfkey_sock *pfk = pfkey_sk(sk); | 238 | struct pfkey_sock *pfk = pfkey_sk(sk); |
283 | int err2; | 239 | int err2; |
284 | 240 | ||
@@ -309,7 +265,7 @@ static int pfkey_broadcast(struct sk_buff *skb, gfp_t allocation, | |||
309 | if ((broadcast_flags & BROADCAST_REGISTERED) && err) | 265 | if ((broadcast_flags & BROADCAST_REGISTERED) && err) |
310 | err = err2; | 266 | err = err2; |
311 | } | 267 | } |
312 | pfkey_unlock_table(); | 268 | rcu_read_unlock(); |
313 | 269 | ||
314 | if (one_sk != NULL) | 270 | if (one_sk != NULL) |
315 | err = pfkey_broadcast_one(skb, &skb2, allocation, one_sk); | 271 | err = pfkey_broadcast_one(skb, &skb2, allocation, one_sk); |
@@ -3702,8 +3658,8 @@ static void *pfkey_seq_start(struct seq_file *f, loff_t *ppos) | |||
3702 | struct net *net = seq_file_net(f); | 3658 | struct net *net = seq_file_net(f); |
3703 | struct netns_pfkey *net_pfkey = net_generic(net, pfkey_net_id); | 3659 | struct netns_pfkey *net_pfkey = net_generic(net, pfkey_net_id); |
3704 | 3660 | ||
3705 | read_lock(&pfkey_table_lock); | 3661 | rcu_read_lock(); |
3706 | return seq_hlist_start_head(&net_pfkey->table, *ppos); | 3662 | return seq_hlist_start_head_rcu(&net_pfkey->table, *ppos); |
3707 | } | 3663 | } |
3708 | 3664 | ||
3709 | static void *pfkey_seq_next(struct seq_file *f, void *v, loff_t *ppos) | 3665 | static void *pfkey_seq_next(struct seq_file *f, void *v, loff_t *ppos) |
@@ -3711,12 +3667,12 @@ static void *pfkey_seq_next(struct seq_file *f, void *v, loff_t *ppos) | |||
3711 | struct net *net = seq_file_net(f); | 3667 | struct net *net = seq_file_net(f); |
3712 | struct netns_pfkey *net_pfkey = net_generic(net, pfkey_net_id); | 3668 | struct netns_pfkey *net_pfkey = net_generic(net, pfkey_net_id); |
3713 | 3669 | ||
3714 | return seq_hlist_next(v, &net_pfkey->table, ppos); | 3670 | return seq_hlist_next_rcu(v, &net_pfkey->table, ppos); |
3715 | } | 3671 | } |
3716 | 3672 | ||
3717 | static void pfkey_seq_stop(struct seq_file *f, void *v) | 3673 | static void pfkey_seq_stop(struct seq_file *f, void *v) |
3718 | { | 3674 | { |
3719 | read_unlock(&pfkey_table_lock); | 3675 | rcu_read_unlock(); |
3720 | } | 3676 | } |
3721 | 3677 | ||
3722 | static const struct seq_operations pfkey_seq_ops = { | 3678 | static const struct seq_operations pfkey_seq_ops = { |