aboutsummaryrefslogtreecommitdiffstats
path: root/net/iucv
diff options
context:
space:
mode:
Diffstat (limited to 'net/iucv')
-rw-r--r--net/iucv/af_iucv.c404
-rw-r--r--net/iucv/iucv.c43
2 files changed, 386 insertions, 61 deletions
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index b51c9187c347..0fc00087ea8b 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -29,10 +29,7 @@
29#include <net/iucv/iucv.h> 29#include <net/iucv/iucv.h>
30#include <net/iucv/af_iucv.h> 30#include <net/iucv/af_iucv.h>
31 31
32#define CONFIG_IUCV_SOCK_DEBUG 1 32#define VERSION "1.1"
33
34#define IPRMDATA 0x80
35#define VERSION "1.0"
36 33
37static char iucv_userid[80]; 34static char iucv_userid[80];
38 35
@@ -44,6 +41,19 @@ static struct proto iucv_proto = {
44 .obj_size = sizeof(struct iucv_sock), 41 .obj_size = sizeof(struct iucv_sock),
45}; 42};
46 43
44/* special AF_IUCV IPRM messages */
45static const u8 iprm_shutdown[8] =
46 {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01};
47
48#define TRGCLS_SIZE (sizeof(((struct iucv_message *)0)->class))
49
50/* macros to set/get socket control buffer at correct offset */
51#define CB_TAG(skb) ((skb)->cb) /* iucv message tag */
52#define CB_TAG_LEN (sizeof(((struct iucv_message *) 0)->tag))
53#define CB_TRGCLS(skb) ((skb)->cb + CB_TAG_LEN) /* iucv msg target class */
54#define CB_TRGCLS_LEN (TRGCLS_SIZE)
55
56
47static void iucv_sock_kill(struct sock *sk); 57static void iucv_sock_kill(struct sock *sk);
48static void iucv_sock_close(struct sock *sk); 58static void iucv_sock_close(struct sock *sk);
49 59
@@ -54,6 +64,7 @@ static void iucv_callback_connack(struct iucv_path *, u8 ipuser[16]);
54static int iucv_callback_connreq(struct iucv_path *, u8 ipvmid[8], 64static int iucv_callback_connreq(struct iucv_path *, u8 ipvmid[8],
55 u8 ipuser[16]); 65 u8 ipuser[16]);
56static void iucv_callback_connrej(struct iucv_path *, u8 ipuser[16]); 66static void iucv_callback_connrej(struct iucv_path *, u8 ipuser[16]);
67static void iucv_callback_shutdown(struct iucv_path *, u8 ipuser[16]);
57 68
58static struct iucv_sock_list iucv_sk_list = { 69static struct iucv_sock_list iucv_sk_list = {
59 .lock = __RW_LOCK_UNLOCKED(iucv_sk_list.lock), 70 .lock = __RW_LOCK_UNLOCKED(iucv_sk_list.lock),
@@ -65,7 +76,8 @@ static struct iucv_handler af_iucv_handler = {
65 .path_complete = iucv_callback_connack, 76 .path_complete = iucv_callback_connack,
66 .path_severed = iucv_callback_connrej, 77 .path_severed = iucv_callback_connrej,
67 .message_pending = iucv_callback_rx, 78 .message_pending = iucv_callback_rx,
68 .message_complete = iucv_callback_txdone 79 .message_complete = iucv_callback_txdone,
80 .path_quiesced = iucv_callback_shutdown,
69}; 81};
70 82
71static inline void high_nmcpy(unsigned char *dst, char *src) 83static inline void high_nmcpy(unsigned char *dst, char *src)
@@ -78,6 +90,37 @@ static inline void low_nmcpy(unsigned char *dst, char *src)
78 memcpy(&dst[8], src, 8); 90 memcpy(&dst[8], src, 8);
79} 91}
80 92
93/**
94 * iucv_msg_length() - Returns the length of an iucv message.
95 * @msg: Pointer to struct iucv_message, MUST NOT be NULL
96 *
97 * The function returns the length of the specified iucv message @msg of data
98 * stored in a buffer and of data stored in the parameter list (PRMDATA).
99 *
100 * For IUCV_IPRMDATA, AF_IUCV uses the following convention to transport socket
101 * data:
102 * PRMDATA[0..6] socket data (max 7 bytes);
103 * PRMDATA[7] socket data length value (len is 0xff - PRMDATA[7])
104 *
105 * The socket data length is computed by substracting the socket data length
106 * value from 0xFF.
107 * If the socket data len is greater 7, then PRMDATA can be used for special
108 * notifications (see iucv_sock_shutdown); and further,
109 * if the socket data len is > 7, the function returns 8.
110 *
111 * Use this function to allocate socket buffers to store iucv message data.
112 */
113static inline size_t iucv_msg_length(struct iucv_message *msg)
114{
115 size_t datalen;
116
117 if (msg->flags & IUCV_IPRMDATA) {
118 datalen = 0xff - msg->rmmsg[7];
119 return (datalen < 8) ? datalen : 8;
120 }
121 return msg->length;
122}
123
81/* Timers */ 124/* Timers */
82static void iucv_sock_timeout(unsigned long arg) 125static void iucv_sock_timeout(unsigned long arg)
83{ 126{
@@ -225,6 +268,8 @@ static struct sock *iucv_sock_alloc(struct socket *sock, int proto, gfp_t prio)
225 spin_lock_init(&iucv_sk(sk)->message_q.lock); 268 spin_lock_init(&iucv_sk(sk)->message_q.lock);
226 skb_queue_head_init(&iucv_sk(sk)->backlog_skb_q); 269 skb_queue_head_init(&iucv_sk(sk)->backlog_skb_q);
227 iucv_sk(sk)->send_tag = 0; 270 iucv_sk(sk)->send_tag = 0;
271 iucv_sk(sk)->flags = 0;
272 iucv_sk(sk)->msglimit = IUCV_QUEUELEN_DEFAULT;
228 iucv_sk(sk)->path = NULL; 273 iucv_sk(sk)->path = NULL;
229 memset(&iucv_sk(sk)->src_user_id , 0, 32); 274 memset(&iucv_sk(sk)->src_user_id , 0, 32);
230 275
@@ -248,11 +293,22 @@ static int iucv_sock_create(struct net *net, struct socket *sock, int protocol)
248{ 293{
249 struct sock *sk; 294 struct sock *sk;
250 295
251 if (sock->type != SOCK_STREAM) 296 if (protocol && protocol != PF_IUCV)
252 return -ESOCKTNOSUPPORT; 297 return -EPROTONOSUPPORT;
253 298
254 sock->state = SS_UNCONNECTED; 299 sock->state = SS_UNCONNECTED;
255 sock->ops = &iucv_sock_ops; 300
301 switch (sock->type) {
302 case SOCK_STREAM:
303 sock->ops = &iucv_sock_ops;
304 break;
305 case SOCK_SEQPACKET:
306 /* currently, proto ops can handle both sk types */
307 sock->ops = &iucv_sock_ops;
308 break;
309 default:
310 return -ESOCKTNOSUPPORT;
311 }
256 312
257 sk = iucv_sock_alloc(sock, protocol, GFP_KERNEL); 313 sk = iucv_sock_alloc(sock, protocol, GFP_KERNEL);
258 if (!sk) 314 if (!sk)
@@ -463,11 +519,9 @@ static int iucv_sock_connect(struct socket *sock, struct sockaddr *addr,
463 if (sk->sk_state != IUCV_OPEN && sk->sk_state != IUCV_BOUND) 519 if (sk->sk_state != IUCV_OPEN && sk->sk_state != IUCV_BOUND)
464 return -EBADFD; 520 return -EBADFD;
465 521
466 if (sk->sk_type != SOCK_STREAM) 522 if (sk->sk_type != SOCK_STREAM && sk->sk_type != SOCK_SEQPACKET)
467 return -EINVAL; 523 return -EINVAL;
468 524
469 iucv = iucv_sk(sk);
470
471 if (sk->sk_state == IUCV_OPEN) { 525 if (sk->sk_state == IUCV_OPEN) {
472 err = iucv_sock_autobind(sk); 526 err = iucv_sock_autobind(sk);
473 if (unlikely(err)) 527 if (unlikely(err))
@@ -486,8 +540,8 @@ static int iucv_sock_connect(struct socket *sock, struct sockaddr *addr,
486 540
487 iucv = iucv_sk(sk); 541 iucv = iucv_sk(sk);
488 /* Create path. */ 542 /* Create path. */
489 iucv->path = iucv_path_alloc(IUCV_QUEUELEN_DEFAULT, 543 iucv->path = iucv_path_alloc(iucv->msglimit,
490 IPRMDATA, GFP_KERNEL); 544 IUCV_IPRMDATA, GFP_KERNEL);
491 if (!iucv->path) { 545 if (!iucv->path) {
492 err = -ENOMEM; 546 err = -ENOMEM;
493 goto done; 547 goto done;
@@ -521,8 +575,7 @@ static int iucv_sock_connect(struct socket *sock, struct sockaddr *addr,
521 } 575 }
522 576
523 if (sk->sk_state == IUCV_DISCONN) { 577 if (sk->sk_state == IUCV_DISCONN) {
524 release_sock(sk); 578 err = -ECONNREFUSED;
525 return -ECONNREFUSED;
526 } 579 }
527 580
528 if (err) { 581 if (err) {
@@ -545,7 +598,10 @@ static int iucv_sock_listen(struct socket *sock, int backlog)
545 lock_sock(sk); 598 lock_sock(sk);
546 599
547 err = -EINVAL; 600 err = -EINVAL;
548 if (sk->sk_state != IUCV_BOUND || sock->type != SOCK_STREAM) 601 if (sk->sk_state != IUCV_BOUND)
602 goto done;
603
604 if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
549 goto done; 605 goto done;
550 606
551 sk->sk_max_ack_backlog = backlog; 607 sk->sk_max_ack_backlog = backlog;
@@ -636,6 +692,30 @@ static int iucv_sock_getname(struct socket *sock, struct sockaddr *addr,
636 return 0; 692 return 0;
637} 693}
638 694
695/**
696 * iucv_send_iprm() - Send socket data in parameter list of an iucv message.
697 * @path: IUCV path
698 * @msg: Pointer to a struct iucv_message
699 * @skb: The socket data to send, skb->len MUST BE <= 7
700 *
701 * Send the socket data in the parameter list in the iucv message
702 * (IUCV_IPRMDATA). The socket data is stored at index 0 to 6 in the parameter
703 * list and the socket data len at index 7 (last byte).
704 * See also iucv_msg_length().
705 *
706 * Returns the error code from the iucv_message_send() call.
707 */
708static int iucv_send_iprm(struct iucv_path *path, struct iucv_message *msg,
709 struct sk_buff *skb)
710{
711 u8 prmdata[8];
712
713 memcpy(prmdata, (void *) skb->data, skb->len);
714 prmdata[7] = 0xff - (u8) skb->len;
715 return iucv_message_send(path, msg, IUCV_IPRMDATA, 0,
716 (void *) prmdata, 8);
717}
718
639static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock, 719static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
640 struct msghdr *msg, size_t len) 720 struct msghdr *msg, size_t len)
641{ 721{
@@ -643,6 +723,8 @@ static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
643 struct iucv_sock *iucv = iucv_sk(sk); 723 struct iucv_sock *iucv = iucv_sk(sk);
644 struct sk_buff *skb; 724 struct sk_buff *skb;
645 struct iucv_message txmsg; 725 struct iucv_message txmsg;
726 struct cmsghdr *cmsg;
727 int cmsg_done;
646 char user_id[9]; 728 char user_id[9];
647 char appl_id[9]; 729 char appl_id[9];
648 int err; 730 int err;
@@ -654,6 +736,10 @@ static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
654 if (msg->msg_flags & MSG_OOB) 736 if (msg->msg_flags & MSG_OOB)
655 return -EOPNOTSUPP; 737 return -EOPNOTSUPP;
656 738
739 /* SOCK_SEQPACKET: we do not support segmented records */
740 if (sk->sk_type == SOCK_SEQPACKET && !(msg->msg_flags & MSG_EOR))
741 return -EOPNOTSUPP;
742
657 lock_sock(sk); 743 lock_sock(sk);
658 744
659 if (sk->sk_shutdown & SEND_SHUTDOWN) { 745 if (sk->sk_shutdown & SEND_SHUTDOWN) {
@@ -662,6 +748,52 @@ static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
662 } 748 }
663 749
664 if (sk->sk_state == IUCV_CONNECTED) { 750 if (sk->sk_state == IUCV_CONNECTED) {
751 /* initialize defaults */
752 cmsg_done = 0; /* check for duplicate headers */
753 txmsg.class = 0;
754
755 /* iterate over control messages */
756 for (cmsg = CMSG_FIRSTHDR(msg); cmsg;
757 cmsg = CMSG_NXTHDR(msg, cmsg)) {
758
759 if (!CMSG_OK(msg, cmsg)) {
760 err = -EINVAL;
761 goto out;
762 }
763
764 if (cmsg->cmsg_level != SOL_IUCV)
765 continue;
766
767 if (cmsg->cmsg_type & cmsg_done) {
768 err = -EINVAL;
769 goto out;
770 }
771 cmsg_done |= cmsg->cmsg_type;
772
773 switch (cmsg->cmsg_type) {
774 case SCM_IUCV_TRGCLS:
775 if (cmsg->cmsg_len != CMSG_LEN(TRGCLS_SIZE)) {
776 err = -EINVAL;
777 goto out;
778 }
779
780 /* set iucv message target class */
781 memcpy(&txmsg.class,
782 (void *) CMSG_DATA(cmsg), TRGCLS_SIZE);
783
784 break;
785
786 default:
787 err = -EINVAL;
788 goto out;
789 break;
790 }
791 }
792
793 /* allocate one skb for each iucv message:
794 * this is fine for SOCK_SEQPACKET (unless we want to support
795 * segmented records using the MSG_EOR flag), but
796 * for SOCK_STREAM we might want to improve it in future */
665 if (!(skb = sock_alloc_send_skb(sk, len, 797 if (!(skb = sock_alloc_send_skb(sk, len,
666 msg->msg_flags & MSG_DONTWAIT, 798 msg->msg_flags & MSG_DONTWAIT,
667 &err))) 799 &err)))
@@ -672,13 +804,33 @@ static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
672 goto fail; 804 goto fail;
673 } 805 }
674 806
675 txmsg.class = 0; 807 /* increment and save iucv message tag for msg_completion cbk */
676 memcpy(&txmsg.class, skb->data, skb->len >= 4 ? 4 : skb->len);
677 txmsg.tag = iucv->send_tag++; 808 txmsg.tag = iucv->send_tag++;
678 memcpy(skb->cb, &txmsg.tag, 4); 809 memcpy(CB_TAG(skb), &txmsg.tag, CB_TAG_LEN);
679 skb_queue_tail(&iucv->send_skb_q, skb); 810 skb_queue_tail(&iucv->send_skb_q, skb);
680 err = iucv_message_send(iucv->path, &txmsg, 0, 0, 811
681 (void *) skb->data, skb->len); 812 if (((iucv->path->flags & IUCV_IPRMDATA) & iucv->flags)
813 && skb->len <= 7) {
814 err = iucv_send_iprm(iucv->path, &txmsg, skb);
815
816 /* on success: there is no message_complete callback
817 * for an IPRMDATA msg; remove skb from send queue */
818 if (err == 0) {
819 skb_unlink(skb, &iucv->send_skb_q);
820 kfree_skb(skb);
821 }
822
823 /* this error should never happen since the
824 * IUCV_IPRMDATA path flag is set... sever path */
825 if (err == 0x15) {
826 iucv_path_sever(iucv->path, NULL);
827 skb_unlink(skb, &iucv->send_skb_q);
828 err = -EPIPE;
829 goto fail;
830 }
831 } else
832 err = iucv_message_send(iucv->path, &txmsg, 0, 0,
833 (void *) skb->data, skb->len);
682 if (err) { 834 if (err) {
683 if (err == 3) { 835 if (err == 3) {
684 user_id[8] = 0; 836 user_id[8] = 0;
@@ -725,6 +877,10 @@ static int iucv_fragment_skb(struct sock *sk, struct sk_buff *skb, int len)
725 if (!nskb) 877 if (!nskb)
726 return -ENOMEM; 878 return -ENOMEM;
727 879
880 /* copy target class to control buffer of new skb */
881 memcpy(CB_TRGCLS(nskb), CB_TRGCLS(skb), CB_TRGCLS_LEN);
882
883 /* copy data fragment */
728 memcpy(nskb->data, skb->data + copied, size); 884 memcpy(nskb->data, skb->data + copied, size);
729 copied += size; 885 copied += size;
730 dataleft -= size; 886 dataleft -= size;
@@ -744,19 +900,33 @@ static void iucv_process_message(struct sock *sk, struct sk_buff *skb,
744 struct iucv_message *msg) 900 struct iucv_message *msg)
745{ 901{
746 int rc; 902 int rc;
903 unsigned int len;
904
905 len = iucv_msg_length(msg);
906
907 /* store msg target class in the second 4 bytes of skb ctrl buffer */
908 /* Note: the first 4 bytes are reserved for msg tag */
909 memcpy(CB_TRGCLS(skb), &msg->class, CB_TRGCLS_LEN);
747 910
748 if (msg->flags & IPRMDATA) { 911 /* check for special IPRM messages (e.g. iucv_sock_shutdown) */
749 skb->data = NULL; 912 if ((msg->flags & IUCV_IPRMDATA) && len > 7) {
750 skb->len = 0; 913 if (memcmp(msg->rmmsg, iprm_shutdown, 8) == 0) {
914 skb->data = NULL;
915 skb->len = 0;
916 }
751 } else { 917 } else {
752 rc = iucv_message_receive(path, msg, 0, skb->data, 918 rc = iucv_message_receive(path, msg, msg->flags & IUCV_IPRMDATA,
753 msg->length, NULL); 919 skb->data, len, NULL);
754 if (rc) { 920 if (rc) {
755 kfree_skb(skb); 921 kfree_skb(skb);
756 return; 922 return;
757 } 923 }
758 if (skb->truesize >= sk->sk_rcvbuf / 4) { 924 /* we need to fragment iucv messages for SOCK_STREAM only;
759 rc = iucv_fragment_skb(sk, skb, msg->length); 925 * for SOCK_SEQPACKET, it is only relevant if we support
926 * record segmentation using MSG_EOR (see also recvmsg()) */
927 if (sk->sk_type == SOCK_STREAM &&
928 skb->truesize >= sk->sk_rcvbuf / 4) {
929 rc = iucv_fragment_skb(sk, skb, len);
760 kfree_skb(skb); 930 kfree_skb(skb);
761 skb = NULL; 931 skb = NULL;
762 if (rc) { 932 if (rc) {
@@ -767,7 +937,7 @@ static void iucv_process_message(struct sock *sk, struct sk_buff *skb,
767 } else { 937 } else {
768 skb_reset_transport_header(skb); 938 skb_reset_transport_header(skb);
769 skb_reset_network_header(skb); 939 skb_reset_network_header(skb);
770 skb->len = msg->length; 940 skb->len = len;
771 } 941 }
772 } 942 }
773 943
@@ -782,7 +952,7 @@ static void iucv_process_message_q(struct sock *sk)
782 struct sock_msg_q *p, *n; 952 struct sock_msg_q *p, *n;
783 953
784 list_for_each_entry_safe(p, n, &iucv->message_q.list, list) { 954 list_for_each_entry_safe(p, n, &iucv->message_q.list, list) {
785 skb = alloc_skb(p->msg.length, GFP_ATOMIC | GFP_DMA); 955 skb = alloc_skb(iucv_msg_length(&p->msg), GFP_ATOMIC | GFP_DMA);
786 if (!skb) 956 if (!skb)
787 break; 957 break;
788 iucv_process_message(sk, skb, p->path, &p->msg); 958 iucv_process_message(sk, skb, p->path, &p->msg);
@@ -799,7 +969,7 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
799 int noblock = flags & MSG_DONTWAIT; 969 int noblock = flags & MSG_DONTWAIT;
800 struct sock *sk = sock->sk; 970 struct sock *sk = sock->sk;
801 struct iucv_sock *iucv = iucv_sk(sk); 971 struct iucv_sock *iucv = iucv_sk(sk);
802 int target, copied = 0; 972 unsigned int copied, rlen;
803 struct sk_buff *skb, *rskb, *cskb; 973 struct sk_buff *skb, *rskb, *cskb;
804 int err = 0; 974 int err = 0;
805 975
@@ -823,25 +993,45 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
823 return err; 993 return err;
824 } 994 }
825 995
826 copied = min_t(unsigned int, skb->len, len); 996 rlen = skb->len; /* real length of skb */
997 copied = min_t(unsigned int, rlen, len);
827 998
828 cskb = skb; 999 cskb = skb;
829 if (memcpy_toiovec(msg->msg_iov, cskb->data, copied)) { 1000 if (memcpy_toiovec(msg->msg_iov, cskb->data, copied)) {
830 skb_queue_head(&sk->sk_receive_queue, skb); 1001 if (!(flags & MSG_PEEK))
831 if (copied == 0) 1002 skb_queue_head(&sk->sk_receive_queue, skb);
832 return -EFAULT; 1003 return -EFAULT;
833 goto done;
834 } 1004 }
835 1005
836 len -= copied; 1006 /* SOCK_SEQPACKET: set MSG_TRUNC if recv buf size is too small */
1007 if (sk->sk_type == SOCK_SEQPACKET) {
1008 if (copied < rlen)
1009 msg->msg_flags |= MSG_TRUNC;
1010 /* each iucv message contains a complete record */
1011 msg->msg_flags |= MSG_EOR;
1012 }
1013
1014 /* create control message to store iucv msg target class:
1015 * get the trgcls from the control buffer of the skb due to
1016 * fragmentation of original iucv message. */
1017 err = put_cmsg(msg, SOL_IUCV, SCM_IUCV_TRGCLS,
1018 CB_TRGCLS_LEN, CB_TRGCLS(skb));
1019 if (err) {
1020 if (!(flags & MSG_PEEK))
1021 skb_queue_head(&sk->sk_receive_queue, skb);
1022 return err;
1023 }
837 1024
838 /* Mark read part of skb as used */ 1025 /* Mark read part of skb as used */
839 if (!(flags & MSG_PEEK)) { 1026 if (!(flags & MSG_PEEK)) {
840 skb_pull(skb, copied);
841 1027
842 if (skb->len) { 1028 /* SOCK_STREAM: re-queue skb if it contains unreceived data */
843 skb_queue_head(&sk->sk_receive_queue, skb); 1029 if (sk->sk_type == SOCK_STREAM) {
844 goto done; 1030 skb_pull(skb, copied);
1031 if (skb->len) {
1032 skb_queue_head(&sk->sk_receive_queue, skb);
1033 goto done;
1034 }
845 } 1035 }
846 1036
847 kfree_skb(skb); 1037 kfree_skb(skb);
@@ -866,7 +1056,11 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
866 } 1056 }
867 1057
868done: 1058done:
869 return err ? : copied; 1059 /* SOCK_SEQPACKET: return real length if MSG_TRUNC is set */
1060 if (sk->sk_type == SOCK_SEQPACKET && (flags & MSG_TRUNC))
1061 copied = rlen;
1062
1063 return copied;
870} 1064}
871 1065
872static inline unsigned int iucv_accept_poll(struct sock *parent) 1066static inline unsigned int iucv_accept_poll(struct sock *parent)
@@ -928,7 +1122,6 @@ static int iucv_sock_shutdown(struct socket *sock, int how)
928 struct iucv_sock *iucv = iucv_sk(sk); 1122 struct iucv_sock *iucv = iucv_sk(sk);
929 struct iucv_message txmsg; 1123 struct iucv_message txmsg;
930 int err = 0; 1124 int err = 0;
931 u8 prmmsg[8] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01};
932 1125
933 how++; 1126 how++;
934 1127
@@ -953,7 +1146,7 @@ static int iucv_sock_shutdown(struct socket *sock, int how)
953 txmsg.class = 0; 1146 txmsg.class = 0;
954 txmsg.tag = 0; 1147 txmsg.tag = 0;
955 err = iucv_message_send(iucv->path, &txmsg, IUCV_IPRMDATA, 0, 1148 err = iucv_message_send(iucv->path, &txmsg, IUCV_IPRMDATA, 0,
956 (void *) prmmsg, 8); 1149 (void *) iprm_shutdown, 8);
957 if (err) { 1150 if (err) {
958 switch (err) { 1151 switch (err) {
959 case 1: 1152 case 1:
@@ -1007,6 +1200,98 @@ static int iucv_sock_release(struct socket *sock)
1007 return err; 1200 return err;
1008} 1201}
1009 1202
1203/* getsockopt and setsockopt */
1204static int iucv_sock_setsockopt(struct socket *sock, int level, int optname,
1205 char __user *optval, int optlen)
1206{
1207 struct sock *sk = sock->sk;
1208 struct iucv_sock *iucv = iucv_sk(sk);
1209 int val;
1210 int rc;
1211
1212 if (level != SOL_IUCV)
1213 return -ENOPROTOOPT;
1214
1215 if (optlen < sizeof(int))
1216 return -EINVAL;
1217
1218 if (get_user(val, (int __user *) optval))
1219 return -EFAULT;
1220
1221 rc = 0;
1222
1223 lock_sock(sk);
1224 switch (optname) {
1225 case SO_IPRMDATA_MSG:
1226 if (val)
1227 iucv->flags |= IUCV_IPRMDATA;
1228 else
1229 iucv->flags &= ~IUCV_IPRMDATA;
1230 break;
1231 case SO_MSGLIMIT:
1232 switch (sk->sk_state) {
1233 case IUCV_OPEN:
1234 case IUCV_BOUND:
1235 if (val < 1 || val > (u16)(~0))
1236 rc = -EINVAL;
1237 else
1238 iucv->msglimit = val;
1239 break;
1240 default:
1241 rc = -EINVAL;
1242 break;
1243 }
1244 break;
1245 default:
1246 rc = -ENOPROTOOPT;
1247 break;
1248 }
1249 release_sock(sk);
1250
1251 return rc;
1252}
1253
1254static int iucv_sock_getsockopt(struct socket *sock, int level, int optname,
1255 char __user *optval, int __user *optlen)
1256{
1257 struct sock *sk = sock->sk;
1258 struct iucv_sock *iucv = iucv_sk(sk);
1259 int val, len;
1260
1261 if (level != SOL_IUCV)
1262 return -ENOPROTOOPT;
1263
1264 if (get_user(len, optlen))
1265 return -EFAULT;
1266
1267 if (len < 0)
1268 return -EINVAL;
1269
1270 len = min_t(unsigned int, len, sizeof(int));
1271
1272 switch (optname) {
1273 case SO_IPRMDATA_MSG:
1274 val = (iucv->flags & IUCV_IPRMDATA) ? 1 : 0;
1275 break;
1276 case SO_MSGLIMIT:
1277 lock_sock(sk);
1278 val = (iucv->path != NULL) ? iucv->path->msglim /* connected */
1279 : iucv->msglimit; /* default */
1280 release_sock(sk);
1281 break;
1282 default:
1283 return -ENOPROTOOPT;
1284 }
1285
1286 if (put_user(len, optlen))
1287 return -EFAULT;
1288 if (copy_to_user(optval, &val, len))
1289 return -EFAULT;
1290
1291 return 0;
1292}
1293
1294
1010/* Callback wrappers - called from iucv base support */ 1295/* Callback wrappers - called from iucv base support */
1011static int iucv_callback_connreq(struct iucv_path *path, 1296static int iucv_callback_connreq(struct iucv_path *path,
1012 u8 ipvmid[8], u8 ipuser[16]) 1297 u8 ipvmid[8], u8 ipuser[16])
@@ -1060,7 +1345,7 @@ static int iucv_callback_connreq(struct iucv_path *path,
1060 } 1345 }
1061 1346
1062 /* Create the new socket */ 1347 /* Create the new socket */
1063 nsk = iucv_sock_alloc(NULL, SOCK_STREAM, GFP_ATOMIC); 1348 nsk = iucv_sock_alloc(NULL, sk->sk_type, GFP_ATOMIC);
1064 if (!nsk) { 1349 if (!nsk) {
1065 err = iucv_path_sever(path, user_data); 1350 err = iucv_path_sever(path, user_data);
1066 iucv_path_free(path); 1351 iucv_path_free(path);
@@ -1083,7 +1368,9 @@ static int iucv_callback_connreq(struct iucv_path *path,
1083 memcpy(nuser_data + 8, niucv->src_name, 8); 1368 memcpy(nuser_data + 8, niucv->src_name, 8);
1084 ASCEBC(nuser_data + 8, 8); 1369 ASCEBC(nuser_data + 8, 8);
1085 1370
1086 path->msglim = IUCV_QUEUELEN_DEFAULT; 1371 /* set message limit for path based on msglimit of accepting socket */
1372 niucv->msglimit = iucv->msglimit;
1373 path->msglim = iucv->msglimit;
1087 err = iucv_path_accept(path, &af_iucv_handler, nuser_data, nsk); 1374 err = iucv_path_accept(path, &af_iucv_handler, nuser_data, nsk);
1088 if (err) { 1375 if (err) {
1089 err = iucv_path_sever(path, user_data); 1376 err = iucv_path_sever(path, user_data);
@@ -1131,11 +1418,11 @@ static void iucv_callback_rx(struct iucv_path *path, struct iucv_message *msg)
1131 goto save_message; 1418 goto save_message;
1132 1419
1133 len = atomic_read(&sk->sk_rmem_alloc); 1420 len = atomic_read(&sk->sk_rmem_alloc);
1134 len += msg->length + sizeof(struct sk_buff); 1421 len += iucv_msg_length(msg) + sizeof(struct sk_buff);
1135 if (len > sk->sk_rcvbuf) 1422 if (len > sk->sk_rcvbuf)
1136 goto save_message; 1423 goto save_message;
1137 1424
1138 skb = alloc_skb(msg->length, GFP_ATOMIC | GFP_DMA); 1425 skb = alloc_skb(iucv_msg_length(msg), GFP_ATOMIC | GFP_DMA);
1139 if (!skb) 1426 if (!skb)
1140 goto save_message; 1427 goto save_message;
1141 1428
@@ -1170,7 +1457,7 @@ static void iucv_callback_txdone(struct iucv_path *path,
1170 spin_lock_irqsave(&list->lock, flags); 1457 spin_lock_irqsave(&list->lock, flags);
1171 1458
1172 while (list_skb != (struct sk_buff *)list) { 1459 while (list_skb != (struct sk_buff *)list) {
1173 if (!memcmp(&msg->tag, list_skb->cb, 4)) { 1460 if (!memcmp(&msg->tag, CB_TAG(list_skb), CB_TAG_LEN)) {
1174 this = list_skb; 1461 this = list_skb;
1175 break; 1462 break;
1176 } 1463 }
@@ -1206,6 +1493,21 @@ static void iucv_callback_connrej(struct iucv_path *path, u8 ipuser[16])
1206 sk->sk_state_change(sk); 1493 sk->sk_state_change(sk);
1207} 1494}
1208 1495
1496/* called if the other communication side shuts down its RECV direction;
1497 * in turn, the callback sets SEND_SHUTDOWN to disable sending of data.
1498 */
1499static void iucv_callback_shutdown(struct iucv_path *path, u8 ipuser[16])
1500{
1501 struct sock *sk = path->private;
1502
1503 bh_lock_sock(sk);
1504 if (sk->sk_state != IUCV_CLOSED) {
1505 sk->sk_shutdown |= SEND_SHUTDOWN;
1506 sk->sk_state_change(sk);
1507 }
1508 bh_unlock_sock(sk);
1509}
1510
1209static struct proto_ops iucv_sock_ops = { 1511static struct proto_ops iucv_sock_ops = {
1210 .family = PF_IUCV, 1512 .family = PF_IUCV,
1211 .owner = THIS_MODULE, 1513 .owner = THIS_MODULE,
@@ -1222,8 +1524,8 @@ static struct proto_ops iucv_sock_ops = {
1222 .mmap = sock_no_mmap, 1524 .mmap = sock_no_mmap,
1223 .socketpair = sock_no_socketpair, 1525 .socketpair = sock_no_socketpair,
1224 .shutdown = iucv_sock_shutdown, 1526 .shutdown = iucv_sock_shutdown,
1225 .setsockopt = sock_no_setsockopt, 1527 .setsockopt = iucv_sock_setsockopt,
1226 .getsockopt = sock_no_getsockopt 1528 .getsockopt = iucv_sock_getsockopt,
1227}; 1529};
1228 1530
1229static struct net_proto_family iucv_sock_family_ops = { 1531static struct net_proto_family iucv_sock_family_ops = {
diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
index a35240f61ec3..61e8038a55ee 100644
--- a/net/iucv/iucv.c
+++ b/net/iucv/iucv.c
@@ -280,6 +280,7 @@ union iucv_param {
280 * Anchor for per-cpu IUCV command parameter block. 280 * Anchor for per-cpu IUCV command parameter block.
281 */ 281 */
282static union iucv_param *iucv_param[NR_CPUS]; 282static union iucv_param *iucv_param[NR_CPUS];
283static union iucv_param *iucv_param_irq[NR_CPUS];
283 284
284/** 285/**
285 * iucv_call_b2f0 286 * iucv_call_b2f0
@@ -358,7 +359,7 @@ static void iucv_allow_cpu(void *data)
358 * 0x10 - Flag to allow priority message completion interrupts 359 * 0x10 - Flag to allow priority message completion interrupts
359 * 0x08 - Flag to allow IUCV control interrupts 360 * 0x08 - Flag to allow IUCV control interrupts
360 */ 361 */
361 parm = iucv_param[cpu]; 362 parm = iucv_param_irq[cpu];
362 memset(parm, 0, sizeof(union iucv_param)); 363 memset(parm, 0, sizeof(union iucv_param));
363 parm->set_mask.ipmask = 0xf8; 364 parm->set_mask.ipmask = 0xf8;
364 iucv_call_b2f0(IUCV_SETMASK, parm); 365 iucv_call_b2f0(IUCV_SETMASK, parm);
@@ -379,7 +380,7 @@ static void iucv_block_cpu(void *data)
379 union iucv_param *parm; 380 union iucv_param *parm;
380 381
381 /* Disable all iucv interrupts. */ 382 /* Disable all iucv interrupts. */
382 parm = iucv_param[cpu]; 383 parm = iucv_param_irq[cpu];
383 memset(parm, 0, sizeof(union iucv_param)); 384 memset(parm, 0, sizeof(union iucv_param));
384 iucv_call_b2f0(IUCV_SETMASK, parm); 385 iucv_call_b2f0(IUCV_SETMASK, parm);
385 386
@@ -403,7 +404,7 @@ static void iucv_declare_cpu(void *data)
403 return; 404 return;
404 405
405 /* Declare interrupt buffer. */ 406 /* Declare interrupt buffer. */
406 parm = iucv_param[cpu]; 407 parm = iucv_param_irq[cpu];
407 memset(parm, 0, sizeof(union iucv_param)); 408 memset(parm, 0, sizeof(union iucv_param));
408 parm->db.ipbfadr1 = virt_to_phys(iucv_irq_data[cpu]); 409 parm->db.ipbfadr1 = virt_to_phys(iucv_irq_data[cpu]);
409 rc = iucv_call_b2f0(IUCV_DECLARE_BUFFER, parm); 410 rc = iucv_call_b2f0(IUCV_DECLARE_BUFFER, parm);
@@ -460,7 +461,7 @@ static void iucv_retrieve_cpu(void *data)
460 iucv_block_cpu(NULL); 461 iucv_block_cpu(NULL);
461 462
462 /* Retrieve interrupt buffer. */ 463 /* Retrieve interrupt buffer. */
463 parm = iucv_param[cpu]; 464 parm = iucv_param_irq[cpu];
464 iucv_call_b2f0(IUCV_RETRIEVE_BUFFER, parm); 465 iucv_call_b2f0(IUCV_RETRIEVE_BUFFER, parm);
465 466
466 /* Clear indication that an iucv buffer exists for this cpu. */ 467 /* Clear indication that an iucv buffer exists for this cpu. */
@@ -574,11 +575,22 @@ static int __cpuinit iucv_cpu_notify(struct notifier_block *self,
574 iucv_irq_data[cpu] = NULL; 575 iucv_irq_data[cpu] = NULL;
575 return NOTIFY_BAD; 576 return NOTIFY_BAD;
576 } 577 }
578 iucv_param_irq[cpu] = kmalloc_node(sizeof(union iucv_param),
579 GFP_KERNEL|GFP_DMA, cpu_to_node(cpu));
580 if (!iucv_param_irq[cpu]) {
581 kfree(iucv_param[cpu]);
582 iucv_param[cpu] = NULL;
583 kfree(iucv_irq_data[cpu]);
584 iucv_irq_data[cpu] = NULL;
585 return NOTIFY_BAD;
586 }
577 break; 587 break;
578 case CPU_UP_CANCELED: 588 case CPU_UP_CANCELED:
579 case CPU_UP_CANCELED_FROZEN: 589 case CPU_UP_CANCELED_FROZEN:
580 case CPU_DEAD: 590 case CPU_DEAD:
581 case CPU_DEAD_FROZEN: 591 case CPU_DEAD_FROZEN:
592 kfree(iucv_param_irq[cpu]);
593 iucv_param_irq[cpu] = NULL;
582 kfree(iucv_param[cpu]); 594 kfree(iucv_param[cpu]);
583 iucv_param[cpu] = NULL; 595 iucv_param[cpu] = NULL;
584 kfree(iucv_irq_data[cpu]); 596 kfree(iucv_irq_data[cpu]);
@@ -625,7 +637,7 @@ static int iucv_sever_pathid(u16 pathid, u8 userdata[16])
625{ 637{
626 union iucv_param *parm; 638 union iucv_param *parm;
627 639
628 parm = iucv_param[smp_processor_id()]; 640 parm = iucv_param_irq[smp_processor_id()];
629 memset(parm, 0, sizeof(union iucv_param)); 641 memset(parm, 0, sizeof(union iucv_param));
630 if (userdata) 642 if (userdata)
631 memcpy(parm->ctrl.ipuser, userdata, sizeof(parm->ctrl.ipuser)); 643 memcpy(parm->ctrl.ipuser, userdata, sizeof(parm->ctrl.ipuser));
@@ -918,10 +930,8 @@ int iucv_path_sever(struct iucv_path *path, u8 userdata[16])
918 if (iucv_active_cpu != smp_processor_id()) 930 if (iucv_active_cpu != smp_processor_id())
919 spin_lock_bh(&iucv_table_lock); 931 spin_lock_bh(&iucv_table_lock);
920 rc = iucv_sever_pathid(path->pathid, userdata); 932 rc = iucv_sever_pathid(path->pathid, userdata);
921 if (!rc) { 933 iucv_path_table[path->pathid] = NULL;
922 iucv_path_table[path->pathid] = NULL; 934 list_del_init(&path->list);
923 list_del_init(&path->list);
924 }
925 if (iucv_active_cpu != smp_processor_id()) 935 if (iucv_active_cpu != smp_processor_id())
926 spin_unlock_bh(&iucv_table_lock); 936 spin_unlock_bh(&iucv_table_lock);
927 preempt_enable(); 937 preempt_enable();
@@ -1378,6 +1388,8 @@ static void iucv_path_complete(struct iucv_irq_data *data)
1378 struct iucv_path_complete *ipc = (void *) data; 1388 struct iucv_path_complete *ipc = (void *) data;
1379 struct iucv_path *path = iucv_path_table[ipc->ippathid]; 1389 struct iucv_path *path = iucv_path_table[ipc->ippathid];
1380 1390
1391 if (path)
1392 path->flags = ipc->ipflags1;
1381 if (path && path->handler && path->handler->path_complete) 1393 if (path && path->handler && path->handler->path_complete)
1382 path->handler->path_complete(path, ipc->ipuser); 1394 path->handler->path_complete(path, ipc->ipuser);
1383} 1395}
@@ -1413,7 +1425,7 @@ static void iucv_path_severed(struct iucv_irq_data *data)
1413 else { 1425 else {
1414 iucv_sever_pathid(path->pathid, NULL); 1426 iucv_sever_pathid(path->pathid, NULL);
1415 iucv_path_table[path->pathid] = NULL; 1427 iucv_path_table[path->pathid] = NULL;
1416 list_del_init(&path->list); 1428 list_del(&path->list);
1417 iucv_path_free(path); 1429 iucv_path_free(path);
1418 } 1430 }
1419} 1431}
@@ -1717,6 +1729,13 @@ static int __init iucv_init(void)
1717 rc = -ENOMEM; 1729 rc = -ENOMEM;
1718 goto out_free; 1730 goto out_free;
1719 } 1731 }
1732 iucv_param_irq[cpu] = kmalloc_node(sizeof(union iucv_param),
1733 GFP_KERNEL|GFP_DMA, cpu_to_node(cpu));
1734 if (!iucv_param_irq[cpu]) {
1735 rc = -ENOMEM;
1736 goto out_free;
1737 }
1738
1720 } 1739 }
1721 rc = register_hotcpu_notifier(&iucv_cpu_notifier); 1740 rc = register_hotcpu_notifier(&iucv_cpu_notifier);
1722 if (rc) 1741 if (rc)
@@ -1734,6 +1753,8 @@ out_cpu:
1734 unregister_hotcpu_notifier(&iucv_cpu_notifier); 1753 unregister_hotcpu_notifier(&iucv_cpu_notifier);
1735out_free: 1754out_free:
1736 for_each_possible_cpu(cpu) { 1755 for_each_possible_cpu(cpu) {
1756 kfree(iucv_param_irq[cpu]);
1757 iucv_param_irq[cpu] = NULL;
1737 kfree(iucv_param[cpu]); 1758 kfree(iucv_param[cpu]);
1738 iucv_param[cpu] = NULL; 1759 iucv_param[cpu] = NULL;
1739 kfree(iucv_irq_data[cpu]); 1760 kfree(iucv_irq_data[cpu]);
@@ -1764,6 +1785,8 @@ static void __exit iucv_exit(void)
1764 spin_unlock_irq(&iucv_queue_lock); 1785 spin_unlock_irq(&iucv_queue_lock);
1765 unregister_hotcpu_notifier(&iucv_cpu_notifier); 1786 unregister_hotcpu_notifier(&iucv_cpu_notifier);
1766 for_each_possible_cpu(cpu) { 1787 for_each_possible_cpu(cpu) {
1788 kfree(iucv_param_irq[cpu]);
1789 iucv_param_irq[cpu] = NULL;
1767 kfree(iucv_param[cpu]); 1790 kfree(iucv_param[cpu]);
1768 iucv_param[cpu] = NULL; 1791 iucv_param[cpu] = NULL;
1769 kfree(iucv_irq_data[cpu]); 1792 kfree(iucv_irq_data[cpu]);