aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/net/iucv/af_iucv.h1
-rw-r--r--net/iucv/af_iucv.c16
2 files changed, 15 insertions, 2 deletions
diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h
index f9bd11be1891..b6c468cd7f5b 100644
--- a/include/net/iucv/af_iucv.h
+++ b/include/net/iucv/af_iucv.h
@@ -60,6 +60,7 @@ struct iucv_sock {
60 char dst_user_id[8]; 60 char dst_user_id[8];
61 char dst_name[8]; 61 char dst_name[8];
62 struct list_head accept_q; 62 struct list_head accept_q;
63 spinlock_t accept_q_lock;
63 struct sock *parent; 64 struct sock *parent;
64 struct iucv_path *path; 65 struct iucv_path *path;
65 struct sk_buff_head send_skb_q; 66 struct sk_buff_head send_skb_q;
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index d9e9ddb8eac5..53ae14c35f70 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -219,6 +219,7 @@ static struct sock *iucv_sock_alloc(struct socket *sock, int proto, gfp_t prio)
219 219
220 sock_init_data(sock, sk); 220 sock_init_data(sock, sk);
221 INIT_LIST_HEAD(&iucv_sk(sk)->accept_q); 221 INIT_LIST_HEAD(&iucv_sk(sk)->accept_q);
222 spin_lock_init(&iucv_sk(sk)->accept_q_lock);
222 skb_queue_head_init(&iucv_sk(sk)->send_skb_q); 223 skb_queue_head_init(&iucv_sk(sk)->send_skb_q);
223 skb_queue_head_init(&iucv_sk(sk)->backlog_skb_q); 224 skb_queue_head_init(&iucv_sk(sk)->backlog_skb_q);
224 iucv_sk(sk)->send_tag = 0; 225 iucv_sk(sk)->send_tag = 0;
@@ -274,15 +275,25 @@ void iucv_sock_unlink(struct iucv_sock_list *l, struct sock *sk)
274 275
275void iucv_accept_enqueue(struct sock *parent, struct sock *sk) 276void iucv_accept_enqueue(struct sock *parent, struct sock *sk)
276{ 277{
278 unsigned long flags;
279 struct iucv_sock *par = iucv_sk(parent);
280
277 sock_hold(sk); 281 sock_hold(sk);
278 list_add_tail(&iucv_sk(sk)->accept_q, &iucv_sk(parent)->accept_q); 282 spin_lock_irqsave(&par->accept_q_lock, flags);
283 list_add_tail(&iucv_sk(sk)->accept_q, &par->accept_q);
284 spin_unlock_irqrestore(&par->accept_q_lock, flags);
279 iucv_sk(sk)->parent = parent; 285 iucv_sk(sk)->parent = parent;
280 parent->sk_ack_backlog++; 286 parent->sk_ack_backlog++;
281} 287}
282 288
283void iucv_accept_unlink(struct sock *sk) 289void iucv_accept_unlink(struct sock *sk)
284{ 290{
291 unsigned long flags;
292 struct iucv_sock *par = iucv_sk(iucv_sk(sk)->parent);
293
294 spin_lock_irqsave(&par->accept_q_lock, flags);
285 list_del_init(&iucv_sk(sk)->accept_q); 295 list_del_init(&iucv_sk(sk)->accept_q);
296 spin_unlock_irqrestore(&par->accept_q_lock, flags);
286 iucv_sk(sk)->parent->sk_ack_backlog--; 297 iucv_sk(sk)->parent->sk_ack_backlog--;
287 iucv_sk(sk)->parent = NULL; 298 iucv_sk(sk)->parent = NULL;
288 sock_put(sk); 299 sock_put(sk);
@@ -298,8 +309,8 @@ struct sock *iucv_accept_dequeue(struct sock *parent, struct socket *newsock)
298 lock_sock(sk); 309 lock_sock(sk);
299 310
300 if (sk->sk_state == IUCV_CLOSED) { 311 if (sk->sk_state == IUCV_CLOSED) {
301 release_sock(sk);
302 iucv_accept_unlink(sk); 312 iucv_accept_unlink(sk);
313 release_sock(sk);
303 continue; 314 continue;
304 } 315 }
305 316
@@ -879,6 +890,7 @@ static int iucv_callback_connreq(struct iucv_path *path,
879 /* Find out if this path belongs to af_iucv. */ 890 /* Find out if this path belongs to af_iucv. */
880 read_lock(&iucv_sk_list.lock); 891 read_lock(&iucv_sk_list.lock);
881 iucv = NULL; 892 iucv = NULL;
893 sk = NULL;
882 sk_for_each(sk, node, &iucv_sk_list.head) 894 sk_for_each(sk, node, &iucv_sk_list.head)
883 if (sk->sk_state == IUCV_LISTEN && 895 if (sk->sk_state == IUCV_LISTEN &&
884 !memcmp(&iucv_sk(sk)->src_name, src_name, 8)) { 896 !memcmp(&iucv_sk(sk)->src_name, src_name, 8)) {