aboutsummaryrefslogtreecommitdiffstats
path: root/net/iucv
diff options
context:
space:
mode:
authorUrsula Braun <braunu@de.ibm.com>2007-07-14 22:04:25 -0400
committerDavid S. Miller <davem@davemloft.net>2007-07-14 22:04:25 -0400
commitfebca281f677a775c61cd0572c2f35e4ead9e7d5 (patch)
tree64a58deba476ff3dbc7468d6d2e8e33e1351bf68 /net/iucv
parent13fdc9a74df0fec70f421c6891e184ed8c3b9088 (diff)
[AF_IUCV]: Add lock when updating accept_q
The accept_queue of an af_iucv socket will be corrupted, if adding and deleting of entries in this queue occurs at the same time (connect request from one client, while accept call is processed for another client). Solution: add locking when updating accept_q Signed-off-by: Ursula Braun <braunu@de.ibm.com> Acked-by: Frank Pavlic <fpavlic@de.ibm.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/iucv')
-rw-r--r--net/iucv/af_iucv.c16
1 files changed, 14 insertions, 2 deletions
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index d9e9ddb8eac5..53ae14c35f70 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -219,6 +219,7 @@ static struct sock *iucv_sock_alloc(struct socket *sock, int proto, gfp_t prio)
219 219
220 sock_init_data(sock, sk); 220 sock_init_data(sock, sk);
221 INIT_LIST_HEAD(&iucv_sk(sk)->accept_q); 221 INIT_LIST_HEAD(&iucv_sk(sk)->accept_q);
222 spin_lock_init(&iucv_sk(sk)->accept_q_lock);
222 skb_queue_head_init(&iucv_sk(sk)->send_skb_q); 223 skb_queue_head_init(&iucv_sk(sk)->send_skb_q);
223 skb_queue_head_init(&iucv_sk(sk)->backlog_skb_q); 224 skb_queue_head_init(&iucv_sk(sk)->backlog_skb_q);
224 iucv_sk(sk)->send_tag = 0; 225 iucv_sk(sk)->send_tag = 0;
@@ -274,15 +275,25 @@ void iucv_sock_unlink(struct iucv_sock_list *l, struct sock *sk)
274 275
275void iucv_accept_enqueue(struct sock *parent, struct sock *sk) 276void iucv_accept_enqueue(struct sock *parent, struct sock *sk)
276{ 277{
278 unsigned long flags;
279 struct iucv_sock *par = iucv_sk(parent);
280
277 sock_hold(sk); 281 sock_hold(sk);
278 list_add_tail(&iucv_sk(sk)->accept_q, &iucv_sk(parent)->accept_q); 282 spin_lock_irqsave(&par->accept_q_lock, flags);
283 list_add_tail(&iucv_sk(sk)->accept_q, &par->accept_q);
284 spin_unlock_irqrestore(&par->accept_q_lock, flags);
279 iucv_sk(sk)->parent = parent; 285 iucv_sk(sk)->parent = parent;
280 parent->sk_ack_backlog++; 286 parent->sk_ack_backlog++;
281} 287}
282 288
283void iucv_accept_unlink(struct sock *sk) 289void iucv_accept_unlink(struct sock *sk)
284{ 290{
291 unsigned long flags;
292 struct iucv_sock *par = iucv_sk(iucv_sk(sk)->parent);
293
294 spin_lock_irqsave(&par->accept_q_lock, flags);
285 list_del_init(&iucv_sk(sk)->accept_q); 295 list_del_init(&iucv_sk(sk)->accept_q);
296 spin_unlock_irqrestore(&par->accept_q_lock, flags);
286 iucv_sk(sk)->parent->sk_ack_backlog--; 297 iucv_sk(sk)->parent->sk_ack_backlog--;
287 iucv_sk(sk)->parent = NULL; 298 iucv_sk(sk)->parent = NULL;
288 sock_put(sk); 299 sock_put(sk);
@@ -298,8 +309,8 @@ struct sock *iucv_accept_dequeue(struct sock *parent, struct socket *newsock)
298 lock_sock(sk); 309 lock_sock(sk);
299 310
300 if (sk->sk_state == IUCV_CLOSED) { 311 if (sk->sk_state == IUCV_CLOSED) {
301 release_sock(sk);
302 iucv_accept_unlink(sk); 312 iucv_accept_unlink(sk);
313 release_sock(sk);
303 continue; 314 continue;
304 } 315 }
305 316
@@ -879,6 +890,7 @@ static int iucv_callback_connreq(struct iucv_path *path,
879 /* Find out if this path belongs to af_iucv. */ 890 /* Find out if this path belongs to af_iucv. */
880 read_lock(&iucv_sk_list.lock); 891 read_lock(&iucv_sk_list.lock);
881 iucv = NULL; 892 iucv = NULL;
893 sk = NULL;
882 sk_for_each(sk, node, &iucv_sk_list.head) 894 sk_for_each(sk, node, &iucv_sk_list.head)
883 if (sk->sk_state == IUCV_LISTEN && 895 if (sk->sk_state == IUCV_LISTEN &&
884 !memcmp(&iucv_sk(sk)->src_name, src_name, 8)) { 896 !memcmp(&iucv_sk(sk)->src_name, src_name, 8)) {