aboutsummaryrefslogtreecommitdiffstats
path: root/net/iucv/af_iucv.c
diff options
context:
space:
mode:
authorHendrik Brueckner <brueckner@linux.vnet.ibm.com>2009-04-21 19:26:25 -0400
committerDavid S. Miller <davem@davemloft.net>2009-04-23 07:04:36 -0400
commitaa8e71f58ab8e01d63c33df40ff1bcb997c9df92 (patch)
tree5f489c3dd44e519f331af87305b12ed0e2ee64cc /net/iucv/af_iucv.c
parent44b1e6b5f9a93cc2ba024e09cf137d5f1b5f8426 (diff)
af_iucv: Provide new socket type SOCK_SEQPACKET
This patch provides the socket type SOCK_SEQPACKET in addition to SOCK_STREAM. AF_IUCV sockets of type SOCK_SEQPACKET supports an 1:1 mapping of socket read or write operations to complete IUCV messages. Socket data or IUCV message data is not fragmented as this is the case for SOCK_STREAM sockets. The intention is to help application developers who write applications or device drivers using native IUCV interfaces (Linux kernel or z/VM IUCV interfaces). Signed-off-by: Hendrik Brueckner <brueckner@linux.vnet.ibm.com> Signed-off-by: Ursula Braun <ursula.braun@de.ibm.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/iucv/af_iucv.c')
-rw-r--r--net/iucv/af_iucv.c73
1 files changed, 56 insertions, 17 deletions
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index 47c5c8d3703f..95e38d3d2d74 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -289,11 +289,22 @@ static int iucv_sock_create(struct net *net, struct socket *sock, int protocol)
289{ 289{
290 struct sock *sk; 290 struct sock *sk;
291 291
292 if (sock->type != SOCK_STREAM) 292 if (protocol && protocol != PF_IUCV)
293 return -ESOCKTNOSUPPORT; 293 return -EPROTONOSUPPORT;
294 294
295 sock->state = SS_UNCONNECTED; 295 sock->state = SS_UNCONNECTED;
296 sock->ops = &iucv_sock_ops; 296
297 switch (sock->type) {
298 case SOCK_STREAM:
299 sock->ops = &iucv_sock_ops;
300 break;
301 case SOCK_SEQPACKET:
302 /* currently, proto ops can handle both sk types */
303 sock->ops = &iucv_sock_ops;
304 break;
305 default:
306 return -ESOCKTNOSUPPORT;
307 }
297 308
298 sk = iucv_sock_alloc(sock, protocol, GFP_KERNEL); 309 sk = iucv_sock_alloc(sock, protocol, GFP_KERNEL);
299 if (!sk) 310 if (!sk)
@@ -504,11 +515,9 @@ static int iucv_sock_connect(struct socket *sock, struct sockaddr *addr,
504 if (sk->sk_state != IUCV_OPEN && sk->sk_state != IUCV_BOUND) 515 if (sk->sk_state != IUCV_OPEN && sk->sk_state != IUCV_BOUND)
505 return -EBADFD; 516 return -EBADFD;
506 517
507 if (sk->sk_type != SOCK_STREAM) 518 if (sk->sk_type != SOCK_STREAM && sk->sk_type != SOCK_SEQPACKET)
508 return -EINVAL; 519 return -EINVAL;
509 520
510 iucv = iucv_sk(sk);
511
512 if (sk->sk_state == IUCV_OPEN) { 521 if (sk->sk_state == IUCV_OPEN) {
513 err = iucv_sock_autobind(sk); 522 err = iucv_sock_autobind(sk);
514 if (unlikely(err)) 523 if (unlikely(err))
@@ -585,7 +594,10 @@ static int iucv_sock_listen(struct socket *sock, int backlog)
585 lock_sock(sk); 594 lock_sock(sk);
586 595
587 err = -EINVAL; 596 err = -EINVAL;
588 if (sk->sk_state != IUCV_BOUND || sock->type != SOCK_STREAM) 597 if (sk->sk_state != IUCV_BOUND)
598 goto done;
599
600 if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
589 goto done; 601 goto done;
590 602
591 sk->sk_max_ack_backlog = backlog; 603 sk->sk_max_ack_backlog = backlog;
@@ -720,6 +732,10 @@ static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
720 if (msg->msg_flags & MSG_OOB) 732 if (msg->msg_flags & MSG_OOB)
721 return -EOPNOTSUPP; 733 return -EOPNOTSUPP;
722 734
735 /* SOCK_SEQPACKET: we do not support segmented records */
736 if (sk->sk_type == SOCK_SEQPACKET && !(msg->msg_flags & MSG_EOR))
737 return -EOPNOTSUPP;
738
723 lock_sock(sk); 739 lock_sock(sk);
724 740
725 if (sk->sk_shutdown & SEND_SHUTDOWN) { 741 if (sk->sk_shutdown & SEND_SHUTDOWN) {
@@ -770,6 +786,10 @@ static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
770 } 786 }
771 } 787 }
772 788
789 /* allocate one skb for each iucv message:
790 * this is fine for SOCK_SEQPACKET (unless we want to support
791 * segmented records using the MSG_EOR flag), but
792 * for SOCK_STREAM we might want to improve it in future */
773 if (!(skb = sock_alloc_send_skb(sk, len, 793 if (!(skb = sock_alloc_send_skb(sk, len,
774 msg->msg_flags & MSG_DONTWAIT, 794 msg->msg_flags & MSG_DONTWAIT,
775 &err))) 795 &err)))
@@ -897,7 +917,11 @@ static void iucv_process_message(struct sock *sk, struct sk_buff *skb,
897 kfree_skb(skb); 917 kfree_skb(skb);
898 return; 918 return;
899 } 919 }
900 if (skb->truesize >= sk->sk_rcvbuf / 4) { 920 /* we need to fragment iucv messages for SOCK_STREAM only;
921 * for SOCK_SEQPACKET, it is only relevant if we support
922 * record segmentation using MSG_EOR (see also recvmsg()) */
923 if (sk->sk_type == SOCK_STREAM &&
924 skb->truesize >= sk->sk_rcvbuf / 4) {
901 rc = iucv_fragment_skb(sk, skb, len); 925 rc = iucv_fragment_skb(sk, skb, len);
902 kfree_skb(skb); 926 kfree_skb(skb);
903 skb = NULL; 927 skb = NULL;
@@ -941,7 +965,8 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
941 int noblock = flags & MSG_DONTWAIT; 965 int noblock = flags & MSG_DONTWAIT;
942 struct sock *sk = sock->sk; 966 struct sock *sk = sock->sk;
943 struct iucv_sock *iucv = iucv_sk(sk); 967 struct iucv_sock *iucv = iucv_sk(sk);
944 int target, copied = 0; 968 int target;
969 unsigned int copied, rlen;
945 struct sk_buff *skb, *rskb, *cskb; 970 struct sk_buff *skb, *rskb, *cskb;
946 int err = 0; 971 int err = 0;
947 972
@@ -963,7 +988,8 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
963 return err; 988 return err;
964 } 989 }
965 990
966 copied = min_t(unsigned int, skb->len, len); 991 rlen = skb->len; /* real length of skb */
992 copied = min_t(unsigned int, rlen, len);
967 993
968 cskb = skb; 994 cskb = skb;
969 if (memcpy_toiovec(msg->msg_iov, cskb->data, copied)) { 995 if (memcpy_toiovec(msg->msg_iov, cskb->data, copied)) {
@@ -973,7 +999,13 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
973 goto done; 999 goto done;
974 } 1000 }
975 1001
976 len -= copied; 1002 /* SOCK_SEQPACKET: set MSG_TRUNC if recv buf size is too small */
1003 if (sk->sk_type == SOCK_SEQPACKET) {
1004 if (copied < rlen)
1005 msg->msg_flags |= MSG_TRUNC;
1006 /* each iucv message contains a complete record */
1007 msg->msg_flags |= MSG_EOR;
1008 }
977 1009
978 /* create control message to store iucv msg target class: 1010 /* create control message to store iucv msg target class:
979 * get the trgcls from the control buffer of the skb due to 1011 * get the trgcls from the control buffer of the skb due to
@@ -988,11 +1020,14 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
988 1020
989 /* Mark read part of skb as used */ 1021 /* Mark read part of skb as used */
990 if (!(flags & MSG_PEEK)) { 1022 if (!(flags & MSG_PEEK)) {
991 skb_pull(skb, copied);
992 1023
993 if (skb->len) { 1024 /* SOCK_STREAM: re-queue skb if it contains unreceived data */
994 skb_queue_head(&sk->sk_receive_queue, skb); 1025 if (sk->sk_type == SOCK_STREAM) {
995 goto done; 1026 skb_pull(skb, copied);
1027 if (skb->len) {
1028 skb_queue_head(&sk->sk_receive_queue, skb);
1029 goto done;
1030 }
996 } 1031 }
997 1032
998 kfree_skb(skb); 1033 kfree_skb(skb);
@@ -1019,7 +1054,11 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
1019 skb_queue_head(&sk->sk_receive_queue, skb); 1054 skb_queue_head(&sk->sk_receive_queue, skb);
1020 1055
1021done: 1056done:
1022 return err ? : copied; 1057 /* SOCK_SEQPACKET: return real length if MSG_TRUNC is set */
1058 if (sk->sk_type == SOCK_SEQPACKET && (flags & MSG_TRUNC))
1059 copied = rlen;
1060
1061 return copied;
1023} 1062}
1024 1063
1025static inline unsigned int iucv_accept_poll(struct sock *parent) 1064static inline unsigned int iucv_accept_poll(struct sock *parent)
@@ -1281,7 +1320,7 @@ static int iucv_callback_connreq(struct iucv_path *path,
1281 } 1320 }
1282 1321
1283 /* Create the new socket */ 1322 /* Create the new socket */
1284 nsk = iucv_sock_alloc(NULL, SOCK_STREAM, GFP_ATOMIC); 1323 nsk = iucv_sock_alloc(NULL, sk->sk_type, GFP_ATOMIC);
1285 if (!nsk) { 1324 if (!nsk) {
1286 err = iucv_path_sever(path, user_data); 1325 err = iucv_path_sever(path, user_data);
1287 iucv_path_free(path); 1326 iucv_path_free(path);