aboutsummaryrefslogtreecommitdiffstats
path: root/net/iucv/af_iucv.c
diff options
context:
space:
mode:
authorUrsula Braun <ursula.braun@de.ibm.com>2011-12-19 17:56:31 -0500
committerDavid S. Miller <davem@davemloft.net>2011-12-20 14:05:03 -0500
commitaac6399c6a08334282653a86ce760cff3e1755b7 (patch)
tree58cc7b284f22ce3cc0a5491c6af9e22ab2385b33 /net/iucv/af_iucv.c
parent9e8ba5f3ec35cba4fd8a8bebda548c4db2651e40 (diff)
af_iucv: get rid of state IUCV_SEVERED
af_iucv differs unnecessarily between state IUCV_SEVERED and IUCV_DISCONN. This patch removes state IUCV_SEVERED. While simplifying af_iucv, this patch removes the 2nd invocation of cpcmd as well. Signed-off-by: Ursula Braun <ursula.braun@de.ibm.com> Signed-off-by: Frank Blaschka <frank.blaschka@de.ibm.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/iucv/af_iucv.c')
-rw-r--r--net/iucv/af_iucv.c35
1 files changed, 8 insertions, 27 deletions
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index 109e5123c9f1..d5c5b8fd1d01 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -178,7 +178,6 @@ static int afiucv_pm_freeze(struct device *dev)
178 iucv_skb_queue_purge(&iucv->send_skb_q); 178 iucv_skb_queue_purge(&iucv->send_skb_q);
179 skb_queue_purge(&iucv->backlog_skb_q); 179 skb_queue_purge(&iucv->backlog_skb_q);
180 switch (sk->sk_state) { 180 switch (sk->sk_state) {
181 case IUCV_SEVERED:
182 case IUCV_DISCONN: 181 case IUCV_DISCONN:
183 case IUCV_CLOSING: 182 case IUCV_CLOSING:
184 case IUCV_CONNECTED: 183 case IUCV_CONNECTED:
@@ -223,7 +222,6 @@ static int afiucv_pm_restore_thaw(struct device *dev)
223 sk->sk_state_change(sk); 222 sk->sk_state_change(sk);
224 break; 223 break;
225 case IUCV_DISCONN: 224 case IUCV_DISCONN:
226 case IUCV_SEVERED:
227 case IUCV_CLOSING: 225 case IUCV_CLOSING:
228 case IUCV_LISTEN: 226 case IUCV_LISTEN:
229 case IUCV_BOUND: 227 case IUCV_BOUND:
@@ -661,16 +659,12 @@ struct sock *iucv_accept_dequeue(struct sock *parent, struct socket *newsock)
661 } 659 }
662 660
663 if (sk->sk_state == IUCV_CONNECTED || 661 if (sk->sk_state == IUCV_CONNECTED ||
664 sk->sk_state == IUCV_SEVERED || 662 sk->sk_state == IUCV_DISCONN ||
665 sk->sk_state == IUCV_DISCONN || /* due to PM restore */
666 !newsock) { 663 !newsock) {
667 iucv_accept_unlink(sk); 664 iucv_accept_unlink(sk);
668 if (newsock) 665 if (newsock)
669 sock_graft(sk, newsock); 666 sock_graft(sk, newsock);
670 667
671 if (sk->sk_state == IUCV_SEVERED)
672 sk->sk_state = IUCV_DISCONN;
673
674 release_sock(sk); 668 release_sock(sk);
675 return sk; 669 return sk;
676 } 670 }
@@ -760,16 +754,13 @@ done:
760static int iucv_sock_autobind(struct sock *sk) 754static int iucv_sock_autobind(struct sock *sk)
761{ 755{
762 struct iucv_sock *iucv = iucv_sk(sk); 756 struct iucv_sock *iucv = iucv_sk(sk);
763 char query_buffer[80];
764 char name[12]; 757 char name[12];
765 int err = 0; 758 int err = 0;
766 759
767 /* Set the userid and name */ 760 if (unlikely(!pr_iucv))
768 cpcmd("QUERY USERID", query_buffer, sizeof(query_buffer), &err);
769 if (unlikely(err))
770 return -EPROTO; 761 return -EPROTO;
771 762
772 memcpy(iucv->src_user_id, query_buffer, 8); 763 memcpy(iucv->src_user_id, iucv_userid, 8);
773 764
774 write_lock_bh(&iucv_sk_list.lock); 765 write_lock_bh(&iucv_sk_list.lock);
775 766
@@ -1345,7 +1336,7 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
1345 int blen; 1336 int blen;
1346 int err = 0; 1337 int err = 0;
1347 1338
1348 if ((sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_SEVERED) && 1339 if ((sk->sk_state == IUCV_DISCONN) &&
1349 skb_queue_empty(&iucv->backlog_skb_q) && 1340 skb_queue_empty(&iucv->backlog_skb_q) &&
1350 skb_queue_empty(&sk->sk_receive_queue) && 1341 skb_queue_empty(&sk->sk_receive_queue) &&
1351 list_empty(&iucv->message_q.list)) 1342 list_empty(&iucv->message_q.list))
@@ -1492,7 +1483,7 @@ unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
1492 if (sk->sk_state == IUCV_CLOSED) 1483 if (sk->sk_state == IUCV_CLOSED)
1493 mask |= POLLHUP; 1484 mask |= POLLHUP;
1494 1485
1495 if (sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_SEVERED) 1486 if (sk->sk_state == IUCV_DISCONN)
1496 mask |= POLLIN; 1487 mask |= POLLIN;
1497 1488
1498 if (sock_writeable(sk)) 1489 if (sock_writeable(sk))
@@ -1519,7 +1510,6 @@ static int iucv_sock_shutdown(struct socket *sock, int how)
1519 switch (sk->sk_state) { 1510 switch (sk->sk_state) {
1520 case IUCV_DISCONN: 1511 case IUCV_DISCONN:
1521 case IUCV_CLOSING: 1512 case IUCV_CLOSING:
1522 case IUCV_SEVERED:
1523 case IUCV_CLOSED: 1513 case IUCV_CLOSED:
1524 err = -ENOTCONN; 1514 err = -ENOTCONN;
1525 goto fail; 1515 goto fail;
@@ -1874,10 +1864,7 @@ static void iucv_callback_connrej(struct iucv_path *path, u8 ipuser[16])
1874{ 1864{
1875 struct sock *sk = path->private; 1865 struct sock *sk = path->private;
1876 1866
1877 if (!list_empty(&iucv_sk(sk)->accept_q)) 1867 sk->sk_state = IUCV_DISCONN;
1878 sk->sk_state = IUCV_SEVERED;
1879 else
1880 sk->sk_state = IUCV_DISCONN;
1881 1868
1882 sk->sk_state_change(sk); 1869 sk->sk_state_change(sk);
1883} 1870}
@@ -2037,10 +2024,7 @@ static int afiucv_hs_callback_fin(struct sock *sk, struct sk_buff *skb)
2037 /* other end of connection closed */ 2024 /* other end of connection closed */
2038 if (iucv) { 2025 if (iucv) {
2039 bh_lock_sock(sk); 2026 bh_lock_sock(sk);
2040 if (!list_empty(&iucv->accept_q)) 2027 sk->sk_state = IUCV_DISCONN;
2041 sk->sk_state = IUCV_SEVERED;
2042 else
2043 sk->sk_state = IUCV_DISCONN;
2044 sk->sk_state_change(sk); 2028 sk->sk_state_change(sk);
2045 bh_unlock_sock(sk); 2029 bh_unlock_sock(sk);
2046 } 2030 }
@@ -2269,10 +2253,7 @@ static void afiucv_hs_callback_txnotify(struct sk_buff *skb,
2269 __skb_unlink(this, list); 2253 __skb_unlink(this, list);
2270 dev_put(this->dev); 2254 dev_put(this->dev);
2271 kfree_skb(this); 2255 kfree_skb(this);
2272 if (!list_empty(&iucv->accept_q)) 2256 sk->sk_state = IUCV_DISCONN;
2273 sk->sk_state = IUCV_SEVERED;
2274 else
2275 sk->sk_state = IUCV_DISCONN;
2276 sk->sk_state_change(sk); 2257 sk->sk_state_change(sk);
2277 break; 2258 break;
2278 } 2259 }