aboutsummaryrefslogtreecommitdiffstats
path: root/net/iucv
diff options
context:
space:
mode:
authorUrsula Braun <ursula.braun@de.ibm.com>2012-03-06 21:06:24 -0500
committerDavid S. Miller <davem@davemloft.net>2012-03-08 01:52:24 -0500
commit82492a355fac112908271faa74f473a38c1fb647 (patch)
tree725f392bf364d81f098f2de14490e2a876603f75 /net/iucv
parent9fbd87d413921f36d2f55cee1d082323e6eb1d5f (diff)
af_iucv: add shutdown for HS transport
AF_IUCV sockets offer a shutdown function. This patch makes sure shutdown works for HS transport as well. Signed-off-by: Ursula Braun <ursula.braun@de.ibm.com> Signed-off-by: Frank Blaschka <frank.blaschka@de.ibm.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/iucv')
-rw-r--r--net/iucv/af_iucv.c79
1 files changed, 52 insertions, 27 deletions
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index 31537c5eb17..07d7d55a1b9 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -165,8 +165,6 @@ static int afiucv_pm_freeze(struct device *dev)
165 read_lock(&iucv_sk_list.lock); 165 read_lock(&iucv_sk_list.lock);
166 sk_for_each(sk, node, &iucv_sk_list.head) { 166 sk_for_each(sk, node, &iucv_sk_list.head) {
167 iucv = iucv_sk(sk); 167 iucv = iucv_sk(sk);
168 skb_queue_purge(&iucv->send_skb_q);
169 skb_queue_purge(&iucv->backlog_skb_q);
170 switch (sk->sk_state) { 168 switch (sk->sk_state) {
171 case IUCV_DISCONN: 169 case IUCV_DISCONN:
172 case IUCV_CLOSING: 170 case IUCV_CLOSING:
@@ -405,7 +403,19 @@ static struct sock *__iucv_get_sock_by_name(char *nm)
405static void iucv_sock_destruct(struct sock *sk) 403static void iucv_sock_destruct(struct sock *sk)
406{ 404{
407 skb_queue_purge(&sk->sk_receive_queue); 405 skb_queue_purge(&sk->sk_receive_queue);
408 skb_queue_purge(&sk->sk_write_queue); 406 skb_queue_purge(&sk->sk_error_queue);
407
408 sk_mem_reclaim(sk);
409
410 if (!sock_flag(sk, SOCK_DEAD)) {
411 pr_err("Attempt to release alive iucv socket %p\n", sk);
412 return;
413 }
414
415 WARN_ON(atomic_read(&sk->sk_rmem_alloc));
416 WARN_ON(atomic_read(&sk->sk_wmem_alloc));
417 WARN_ON(sk->sk_wmem_queued);
418 WARN_ON(sk->sk_forward_alloc);
409} 419}
410 420
411/* Cleanup Listen */ 421/* Cleanup Listen */
@@ -1342,6 +1352,8 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
1342 1352
1343 rlen = skb->len; /* real length of skb */ 1353 rlen = skb->len; /* real length of skb */
1344 copied = min_t(unsigned int, rlen, len); 1354 copied = min_t(unsigned int, rlen, len);
1355 if (!rlen)
1356 sk->sk_shutdown = sk->sk_shutdown | RCV_SHUTDOWN;
1345 1357
1346 cskb = skb; 1358 cskb = skb;
1347 if (skb_copy_datagram_iovec(cskb, 0, msg->msg_iov, copied)) { 1359 if (skb_copy_datagram_iovec(cskb, 0, msg->msg_iov, copied)) {
@@ -1493,42 +1505,47 @@ static int iucv_sock_shutdown(struct socket *sock, int how)
1493 1505
1494 lock_sock(sk); 1506 lock_sock(sk);
1495 switch (sk->sk_state) { 1507 switch (sk->sk_state) {
1508 case IUCV_LISTEN:
1496 case IUCV_DISCONN: 1509 case IUCV_DISCONN:
1497 case IUCV_CLOSING: 1510 case IUCV_CLOSING:
1498 case IUCV_CLOSED: 1511 case IUCV_CLOSED:
1499 err = -ENOTCONN; 1512 err = -ENOTCONN;
1500 goto fail; 1513 goto fail;
1501
1502 default: 1514 default:
1503 sk->sk_shutdown |= how;
1504 break; 1515 break;
1505 } 1516 }
1506 1517
1507 if (how == SEND_SHUTDOWN || how == SHUTDOWN_MASK) { 1518 if (how == SEND_SHUTDOWN || how == SHUTDOWN_MASK) {
1508 txmsg.class = 0; 1519 if (iucv->transport == AF_IUCV_TRANS_IUCV) {
1509 txmsg.tag = 0; 1520 txmsg.class = 0;
1510 err = pr_iucv->message_send(iucv->path, &txmsg, IUCV_IPRMDATA, 1521 txmsg.tag = 0;
1511 0, (void *) iprm_shutdown, 8); 1522 err = pr_iucv->message_send(iucv->path, &txmsg,
1512 if (err) { 1523 IUCV_IPRMDATA, 0, (void *) iprm_shutdown, 8);
1513 switch (err) { 1524 if (err) {
1514 case 1: 1525 switch (err) {
1515 err = -ENOTCONN; 1526 case 1:
1516 break; 1527 err = -ENOTCONN;
1517 case 2: 1528 break;
1518 err = -ECONNRESET; 1529 case 2:
1519 break; 1530 err = -ECONNRESET;
1520 default: 1531 break;
1521 err = -ENOTCONN; 1532 default:
1522 break; 1533 err = -ENOTCONN;
1534 break;
1535 }
1523 } 1536 }
1524 } 1537 } else
1538 iucv_send_ctrl(sk, AF_IUCV_FLAG_SHT);
1525 } 1539 }
1526 1540
1541 sk->sk_shutdown |= how;
1527 if (how == RCV_SHUTDOWN || how == SHUTDOWN_MASK) { 1542 if (how == RCV_SHUTDOWN || how == SHUTDOWN_MASK) {
1528 err = pr_iucv->path_quiesce(iucv->path, NULL); 1543 if (iucv->transport == AF_IUCV_TRANS_IUCV) {
1529 if (err) 1544 err = pr_iucv->path_quiesce(iucv->path, NULL);
1530 err = -ENOTCONN; 1545 if (err)
1531 1546 err = -ENOTCONN;
1547/* skb_queue_purge(&sk->sk_receive_queue); */
1548 }
1532 skb_queue_purge(&sk->sk_receive_queue); 1549 skb_queue_purge(&sk->sk_receive_queue);
1533 } 1550 }
1534 1551
@@ -2066,8 +2083,13 @@ static int afiucv_hs_callback_rx(struct sock *sk, struct sk_buff *skb)
2066 return NET_RX_SUCCESS; 2083 return NET_RX_SUCCESS;
2067 } 2084 }
2068 2085
2086 if (sk->sk_shutdown & RCV_SHUTDOWN) {
2087 kfree_skb(skb);
2088 return NET_RX_SUCCESS;
2089 }
2090
2069 /* write stuff from iucv_msg to skb cb */ 2091 /* write stuff from iucv_msg to skb cb */
2070 if (skb->len <= sizeof(struct af_iucv_trans_hdr)) { 2092 if (skb->len < sizeof(struct af_iucv_trans_hdr)) {
2071 kfree_skb(skb); 2093 kfree_skb(skb);
2072 return NET_RX_SUCCESS; 2094 return NET_RX_SUCCESS;
2073 } 2095 }
@@ -2173,7 +2195,10 @@ static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev,
2173 kfree_skb(skb); 2195 kfree_skb(skb);
2174 break; 2196 break;
2175 } 2197 }
2176 /* fall through */ 2198 /* fall through and receive non-zero length data */
2199 case (AF_IUCV_FLAG_SHT):
2200 /* shutdown request */
2201 /* fall through and receive zero length data */
2177 case 0: 2202 case 0:
2178 /* plain data frame */ 2203 /* plain data frame */
2179 memcpy(CB_TRGCLS(skb), &trans_hdr->iucv_hdr.class, 2204 memcpy(CB_TRGCLS(skb), &trans_hdr->iucv_hdr.class,