aboutsummaryrefslogtreecommitdiffstats
path: root/net/iucv/af_iucv.c
diff options
context:
space:
mode:
authorUrsula Braun <ursula.braun@de.ibm.com>2011-08-07 21:33:54 -0400
committerDavid S. Miller <davem@davemloft.net>2011-08-13 04:10:16 -0400
commit3881ac441f642d56503818123446f7298442236b (patch)
tree79dfd48212f559c18e70ed5d142101ce37897152 /net/iucv/af_iucv.c
parent4dc83dfd3efa015628ebaa7245d342c8d5ca0298 (diff)
af_iucv: add HiperSockets transport
The current transport mechanism for af_iucv is the z/VM offered communications facility IUCV. To provide equivalent support when running Linux in an LPAR, HiperSockets transport is added to the AF_IUCV address family. It requires explicit binding of an AF_IUCV socket to a HiperSockets device. A new packet_type ETH_P_AF_IUCV is announced. An af_iucv specific transport header is defined preceding the skb data. A small protocol is implemented for connecting and for flow control/congestion management. Signed-off-by: Ursula Braun <ursula.braun@de.ibm.com> Signed-off-by: Frank Blaschka <frank.blaschka@de.ibm.com> Reviewed-by: Hendrik Brueckner <brueckner@linux.vnet.ibm.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/iucv/af_iucv.c')
-rw-r--r--net/iucv/af_iucv.c749
1 files changed, 677 insertions, 72 deletions
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index 1d8cb72da3a4..c39f3a43cd80 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -27,10 +27,9 @@
27#include <asm/cpcmd.h> 27#include <asm/cpcmd.h>
28#include <linux/kmod.h> 28#include <linux/kmod.h>
29 29
30#include <net/iucv/iucv.h>
31#include <net/iucv/af_iucv.h> 30#include <net/iucv/af_iucv.h>
32 31
33#define VERSION "1.1" 32#define VERSION "1.2"
34 33
35static char iucv_userid[80]; 34static char iucv_userid[80];
36 35
@@ -92,6 +91,12 @@ do { \
92static void iucv_sock_kill(struct sock *sk); 91static void iucv_sock_kill(struct sock *sk);
93static void iucv_sock_close(struct sock *sk); 92static void iucv_sock_close(struct sock *sk);
94 93
94static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev,
95 struct packet_type *pt, struct net_device *orig_dev);
96static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock,
97 struct sk_buff *skb, u8 flags);
98static void afiucv_hs_callback_txnotify(struct sk_buff *, enum iucv_tx_notify);
99
95/* Call Back functions */ 100/* Call Back functions */
96static void iucv_callback_rx(struct iucv_path *, struct iucv_message *); 101static void iucv_callback_rx(struct iucv_path *, struct iucv_message *);
97static void iucv_callback_txdone(struct iucv_path *, struct iucv_message *); 102static void iucv_callback_txdone(struct iucv_path *, struct iucv_message *);
@@ -296,7 +301,11 @@ static inline int iucv_below_msglim(struct sock *sk)
296 301
297 if (sk->sk_state != IUCV_CONNECTED) 302 if (sk->sk_state != IUCV_CONNECTED)
298 return 1; 303 return 1;
299 return (skb_queue_len(&iucv->send_skb_q) < iucv->path->msglim); 304 if (iucv->transport == AF_IUCV_TRANS_IUCV)
305 return (skb_queue_len(&iucv->send_skb_q) < iucv->path->msglim);
306 else
307 return ((atomic_read(&iucv->msg_sent) < iucv->msglimit_peer) &&
308 (atomic_read(&iucv->pendings) <= 0));
300} 309}
301 310
302/** 311/**
@@ -314,6 +323,79 @@ static void iucv_sock_wake_msglim(struct sock *sk)
314 rcu_read_unlock(); 323 rcu_read_unlock();
315} 324}
316 325
326/**
327 * afiucv_hs_send() - send a message through HiperSockets transport
328 */
329static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock,
330 struct sk_buff *skb, u8 flags)
331{
332 struct net *net = sock_net(sock);
333 struct iucv_sock *iucv = iucv_sk(sock);
334 struct af_iucv_trans_hdr *phs_hdr;
335 struct sk_buff *nskb;
336 int err, confirm_recv = 0;
337
338 memset(skb->head, 0, ETH_HLEN);
339 phs_hdr = (struct af_iucv_trans_hdr *)skb_push(skb,
340 sizeof(struct af_iucv_trans_hdr));
341 skb_reset_mac_header(skb);
342 skb_reset_network_header(skb);
343 skb_push(skb, ETH_HLEN);
344 skb_reset_mac_header(skb);
345 memset(phs_hdr, 0, sizeof(struct af_iucv_trans_hdr));
346
347 phs_hdr->magic = ETH_P_AF_IUCV;
348 phs_hdr->version = 1;
349 phs_hdr->flags = flags;
350 if (flags == AF_IUCV_FLAG_SYN)
351 phs_hdr->window = iucv->msglimit;
352 else if ((flags == AF_IUCV_FLAG_WIN) || !flags) {
353 confirm_recv = atomic_read(&iucv->msg_recv);
354 phs_hdr->window = confirm_recv;
355 if (confirm_recv)
356 phs_hdr->flags = phs_hdr->flags | AF_IUCV_FLAG_WIN;
357 }
358 memcpy(phs_hdr->destUserID, iucv->dst_user_id, 8);
359 memcpy(phs_hdr->destAppName, iucv->dst_name, 8);
360 memcpy(phs_hdr->srcUserID, iucv->src_user_id, 8);
361 memcpy(phs_hdr->srcAppName, iucv->src_name, 8);
362 ASCEBC(phs_hdr->destUserID, sizeof(phs_hdr->destUserID));
363 ASCEBC(phs_hdr->destAppName, sizeof(phs_hdr->destAppName));
364 ASCEBC(phs_hdr->srcUserID, sizeof(phs_hdr->srcUserID));
365 ASCEBC(phs_hdr->srcAppName, sizeof(phs_hdr->srcAppName));
366 if (imsg)
367 memcpy(&phs_hdr->iucv_hdr, imsg, sizeof(struct iucv_message));
368
369 rcu_read_lock();
370 skb->dev = dev_get_by_index_rcu(net, sock->sk_bound_dev_if);
371 rcu_read_unlock();
372 if (!skb->dev)
373 return -ENODEV;
374 if (!(skb->dev->flags & IFF_UP))
375 return -ENETDOWN;
376 if (skb->len > skb->dev->mtu) {
377 if (sock->sk_type == SOCK_SEQPACKET)
378 return -EMSGSIZE;
379 else
380 skb_trim(skb, skb->dev->mtu);
381 }
382 skb->protocol = ETH_P_AF_IUCV;
383 skb_shinfo(skb)->tx_flags |= SKBTX_DRV_NEEDS_SK_REF;
384 nskb = skb_clone(skb, GFP_ATOMIC);
385 if (!nskb)
386 return -ENOMEM;
387 skb_queue_tail(&iucv->send_skb_q, nskb);
388 err = dev_queue_xmit(skb);
389 if (err) {
390 skb_unlink(nskb, &iucv->send_skb_q);
391 kfree_skb(nskb);
392 } else {
393 atomic_sub(confirm_recv, &iucv->msg_recv);
394 WARN_ON(atomic_read(&iucv->msg_recv) < 0);
395 }
396 return err;
397}
398
317/* Timers */ 399/* Timers */
318static void iucv_sock_timeout(unsigned long arg) 400static void iucv_sock_timeout(unsigned long arg)
319{ 401{
@@ -382,6 +464,8 @@ static void iucv_sock_close(struct sock *sk)
382 unsigned char user_data[16]; 464 unsigned char user_data[16];
383 struct iucv_sock *iucv = iucv_sk(sk); 465 struct iucv_sock *iucv = iucv_sk(sk);
384 unsigned long timeo; 466 unsigned long timeo;
467 int err, blen;
468 struct sk_buff *skb;
385 469
386 iucv_sock_clear_timer(sk); 470 iucv_sock_clear_timer(sk);
387 lock_sock(sk); 471 lock_sock(sk);
@@ -392,6 +476,20 @@ static void iucv_sock_close(struct sock *sk)
392 break; 476 break;
393 477
394 case IUCV_CONNECTED: 478 case IUCV_CONNECTED:
479 if (iucv->transport == AF_IUCV_TRANS_HIPER) {
480 /* send fin */
481 blen = sizeof(struct af_iucv_trans_hdr) + ETH_HLEN;
482 skb = sock_alloc_send_skb(sk, blen, 1, &err);
483 if (skb) {
484 skb_reserve(skb,
485 sizeof(struct af_iucv_trans_hdr) +
486 ETH_HLEN);
487 err = afiucv_hs_send(NULL, sk, skb,
488 AF_IUCV_FLAG_FIN);
489 }
490 sk->sk_state = IUCV_DISCONN;
491 sk->sk_state_change(sk);
492 }
395 case IUCV_DISCONN: 493 case IUCV_DISCONN:
396 sk->sk_state = IUCV_CLOSING; 494 sk->sk_state = IUCV_CLOSING;
397 sk->sk_state_change(sk); 495 sk->sk_state_change(sk);
@@ -461,10 +559,18 @@ static struct sock *iucv_sock_alloc(struct socket *sock, int proto, gfp_t prio)
461 spin_lock_init(&iucv->message_q.lock); 559 spin_lock_init(&iucv->message_q.lock);
462 skb_queue_head_init(&iucv->backlog_skb_q); 560 skb_queue_head_init(&iucv->backlog_skb_q);
463 iucv->send_tag = 0; 561 iucv->send_tag = 0;
562 atomic_set(&iucv->pendings, 0);
464 iucv->flags = 0; 563 iucv->flags = 0;
465 iucv->msglimit = IUCV_QUEUELEN_DEFAULT; 564 iucv->msglimit = 0;
565 atomic_set(&iucv->msg_sent, 0);
566 atomic_set(&iucv->msg_recv, 0);
466 iucv->path = NULL; 567 iucv->path = NULL;
568 iucv->sk_txnotify = afiucv_hs_callback_txnotify;
467 memset(&iucv->src_user_id , 0, 32); 569 memset(&iucv->src_user_id , 0, 32);
570 if (pr_iucv)
571 iucv->transport = AF_IUCV_TRANS_IUCV;
572 else
573 iucv->transport = AF_IUCV_TRANS_HIPER;
468 574
469 sk->sk_destruct = iucv_sock_destruct; 575 sk->sk_destruct = iucv_sock_destruct;
470 sk->sk_sndtimeo = IUCV_CONN_TIMEOUT; 576 sk->sk_sndtimeo = IUCV_CONN_TIMEOUT;
@@ -595,7 +701,9 @@ static int iucv_sock_bind(struct socket *sock, struct sockaddr *addr,
595 struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr; 701 struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr;
596 struct sock *sk = sock->sk; 702 struct sock *sk = sock->sk;
597 struct iucv_sock *iucv; 703 struct iucv_sock *iucv;
598 int err; 704 int err = 0;
705 struct net_device *dev;
706 char uid[9];
599 707
600 /* Verify the input sockaddr */ 708 /* Verify the input sockaddr */
601 if (!addr || addr->sa_family != AF_IUCV) 709 if (!addr || addr->sa_family != AF_IUCV)
@@ -614,19 +722,46 @@ static int iucv_sock_bind(struct socket *sock, struct sockaddr *addr,
614 err = -EADDRINUSE; 722 err = -EADDRINUSE;
615 goto done_unlock; 723 goto done_unlock;
616 } 724 }
617 if (iucv->path) { 725 if (iucv->path)
618 err = 0;
619 goto done_unlock; 726 goto done_unlock;
620 }
621 727
622 /* Bind the socket */ 728 /* Bind the socket */
623 memcpy(iucv->src_name, sa->siucv_name, 8);
624 729
625 /* Copy the user id */ 730 if (pr_iucv)
626 memcpy(iucv->src_user_id, iucv_userid, 8); 731 if (!memcmp(sa->siucv_user_id, iucv_userid, 8))
627 sk->sk_state = IUCV_BOUND; 732 goto vm_bind; /* VM IUCV transport */
628 err = 0;
629 733
734 /* try hiper transport */
735 memcpy(uid, sa->siucv_user_id, sizeof(uid));
736 ASCEBC(uid, 8);
737 rcu_read_lock();
738 for_each_netdev_rcu(&init_net, dev) {
739 if (!memcmp(dev->perm_addr, uid, 8)) {
740 memcpy(iucv->src_name, sa->siucv_name, 8);
741 memcpy(iucv->src_user_id, sa->siucv_user_id, 8);
742 sock->sk->sk_bound_dev_if = dev->ifindex;
743 sk->sk_state = IUCV_BOUND;
744 iucv->transport = AF_IUCV_TRANS_HIPER;
745 if (!iucv->msglimit)
746 iucv->msglimit = IUCV_HIPER_MSGLIM_DEFAULT;
747 rcu_read_unlock();
748 goto done_unlock;
749 }
750 }
751 rcu_read_unlock();
752vm_bind:
753 if (pr_iucv) {
754 /* use local userid for backward compat */
755 memcpy(iucv->src_name, sa->siucv_name, 8);
756 memcpy(iucv->src_user_id, iucv_userid, 8);
757 sk->sk_state = IUCV_BOUND;
758 iucv->transport = AF_IUCV_TRANS_IUCV;
759 if (!iucv->msglimit)
760 iucv->msglimit = IUCV_QUEUELEN_DEFAULT;
761 goto done_unlock;
762 }
763 /* found no dev to bind */
764 err = -ENODEV;
630done_unlock: 765done_unlock:
631 /* Release the socket list lock */ 766 /* Release the socket list lock */
632 write_unlock_bh(&iucv_sk_list.lock); 767 write_unlock_bh(&iucv_sk_list.lock);
@@ -662,12 +797,33 @@ static int iucv_sock_autobind(struct sock *sk)
662 797
663 memcpy(&iucv->src_name, name, 8); 798 memcpy(&iucv->src_name, name, 8);
664 799
800 if (!iucv->msglimit)
801 iucv->msglimit = IUCV_QUEUELEN_DEFAULT;
802
665 return err; 803 return err;
666} 804}
667 805
668/* Connect an unconnected socket */ 806static int afiucv_hs_connect(struct socket *sock)
669static int iucv_sock_connect(struct socket *sock, struct sockaddr *addr, 807{
670 int alen, int flags) 808 struct sock *sk = sock->sk;
809 struct sk_buff *skb;
810 int blen = sizeof(struct af_iucv_trans_hdr) + ETH_HLEN;
811 int err = 0;
812
813 /* send syn */
814 skb = sock_alloc_send_skb(sk, blen, 1, &err);
815 if (!skb) {
816 err = -ENOMEM;
817 goto done;
818 }
819 skb->dev = NULL;
820 skb_reserve(skb, blen);
821 err = afiucv_hs_send(NULL, sk, skb, AF_IUCV_FLAG_SYN);
822done:
823 return err;
824}
825
826static int afiucv_path_connect(struct socket *sock, struct sockaddr *addr)
671{ 827{
672 struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr; 828 struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr;
673 struct sock *sk = sock->sk; 829 struct sock *sk = sock->sk;
@@ -675,27 +831,6 @@ static int iucv_sock_connect(struct socket *sock, struct sockaddr *addr,
675 unsigned char user_data[16]; 831 unsigned char user_data[16];
676 int err; 832 int err;
677 833
678 if (addr->sa_family != AF_IUCV || alen < sizeof(struct sockaddr_iucv))
679 return -EINVAL;
680
681 if (sk->sk_state != IUCV_OPEN && sk->sk_state != IUCV_BOUND)
682 return -EBADFD;
683
684 if (sk->sk_type != SOCK_STREAM && sk->sk_type != SOCK_SEQPACKET)
685 return -EINVAL;
686
687 if (sk->sk_state == IUCV_OPEN) {
688 err = iucv_sock_autobind(sk);
689 if (unlikely(err))
690 return err;
691 }
692
693 lock_sock(sk);
694
695 /* Set the destination information */
696 memcpy(iucv->dst_user_id, sa->siucv_user_id, 8);
697 memcpy(iucv->dst_name, sa->siucv_name, 8);
698
699 high_nmcpy(user_data, sa->siucv_name); 834 high_nmcpy(user_data, sa->siucv_name);
700 low_nmcpy(user_data, iucv->src_name); 835 low_nmcpy(user_data, iucv->src_name);
701 ASCEBC(user_data, sizeof(user_data)); 836 ASCEBC(user_data, sizeof(user_data));
@@ -728,20 +863,61 @@ static int iucv_sock_connect(struct socket *sock, struct sockaddr *addr,
728 err = -ECONNREFUSED; 863 err = -ECONNREFUSED;
729 break; 864 break;
730 } 865 }
731 goto done;
732 } 866 }
867done:
868 return err;
869}
733 870
734 if (sk->sk_state != IUCV_CONNECTED) { 871/* Connect an unconnected socket */
872static int iucv_sock_connect(struct socket *sock, struct sockaddr *addr,
873 int alen, int flags)
874{
875 struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr;
876 struct sock *sk = sock->sk;
877 struct iucv_sock *iucv = iucv_sk(sk);
878 int err;
879
880 if (addr->sa_family != AF_IUCV || alen < sizeof(struct sockaddr_iucv))
881 return -EINVAL;
882
883 if (sk->sk_state != IUCV_OPEN && sk->sk_state != IUCV_BOUND)
884 return -EBADFD;
885
886 if (sk->sk_state == IUCV_OPEN &&
887 iucv->transport == AF_IUCV_TRANS_HIPER)
888 return -EBADFD; /* explicit bind required */
889
890 if (sk->sk_type != SOCK_STREAM && sk->sk_type != SOCK_SEQPACKET)
891 return -EINVAL;
892
893 if (sk->sk_state == IUCV_OPEN) {
894 err = iucv_sock_autobind(sk);
895 if (unlikely(err))
896 return err;
897 }
898
899 lock_sock(sk);
900
901 /* Set the destination information */
902 memcpy(iucv->dst_user_id, sa->siucv_user_id, 8);
903 memcpy(iucv->dst_name, sa->siucv_name, 8);
904
905 if (iucv->transport == AF_IUCV_TRANS_HIPER)
906 err = afiucv_hs_connect(sock);
907 else
908 err = afiucv_path_connect(sock, addr);
909 if (err)
910 goto done;
911
912 if (sk->sk_state != IUCV_CONNECTED)
735 err = iucv_sock_wait(sk, iucv_sock_in_state(sk, IUCV_CONNECTED, 913 err = iucv_sock_wait(sk, iucv_sock_in_state(sk, IUCV_CONNECTED,
736 IUCV_DISCONN), 914 IUCV_DISCONN),
737 sock_sndtimeo(sk, flags & O_NONBLOCK)); 915 sock_sndtimeo(sk, flags & O_NONBLOCK));
738 }
739 916
740 if (sk->sk_state == IUCV_DISCONN) { 917 if (sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_CLOSED)
741 err = -ECONNREFUSED; 918 err = -ECONNREFUSED;
742 }
743 919
744 if (err) { 920 if (err && iucv->transport == AF_IUCV_TRANS_IUCV) {
745 pr_iucv->path_sever(iucv->path, NULL); 921 pr_iucv->path_sever(iucv->path, NULL);
746 iucv_path_free(iucv->path); 922 iucv_path_free(iucv->path);
747 iucv->path = NULL; 923 iucv->path = NULL;
@@ -965,9 +1141,16 @@ static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
965 * this is fine for SOCK_SEQPACKET (unless we want to support 1141 * this is fine for SOCK_SEQPACKET (unless we want to support
966 * segmented records using the MSG_EOR flag), but 1142 * segmented records using the MSG_EOR flag), but
967 * for SOCK_STREAM we might want to improve it in future */ 1143 * for SOCK_STREAM we might want to improve it in future */
968 skb = sock_alloc_send_skb(sk, len, noblock, &err); 1144 if (iucv->transport == AF_IUCV_TRANS_HIPER)
1145 skb = sock_alloc_send_skb(sk,
1146 len + sizeof(struct af_iucv_trans_hdr) + ETH_HLEN,
1147 noblock, &err);
1148 else
1149 skb = sock_alloc_send_skb(sk, len, noblock, &err);
969 if (!skb) 1150 if (!skb)
970 goto out; 1151 goto out;
1152 if (iucv->transport == AF_IUCV_TRANS_HIPER)
1153 skb_reserve(skb, sizeof(struct af_iucv_trans_hdr) + ETH_HLEN);
971 if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) { 1154 if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
972 err = -EFAULT; 1155 err = -EFAULT;
973 goto fail; 1156 goto fail;
@@ -988,6 +1171,15 @@ static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
988 /* increment and save iucv message tag for msg_completion cbk */ 1171 /* increment and save iucv message tag for msg_completion cbk */
989 txmsg.tag = iucv->send_tag++; 1172 txmsg.tag = iucv->send_tag++;
990 memcpy(CB_TAG(skb), &txmsg.tag, CB_TAG_LEN); 1173 memcpy(CB_TAG(skb), &txmsg.tag, CB_TAG_LEN);
1174 if (iucv->transport == AF_IUCV_TRANS_HIPER) {
1175 atomic_inc(&iucv->msg_sent);
1176 err = afiucv_hs_send(&txmsg, sk, skb, 0);
1177 if (err) {
1178 atomic_dec(&iucv->msg_sent);
1179 goto fail;
1180 }
1181 goto release;
1182 }
991 skb_queue_tail(&iucv->send_skb_q, skb); 1183 skb_queue_tail(&iucv->send_skb_q, skb);
992 1184
993 if (((iucv->path->flags & IUCV_IPRMDATA) & iucv->flags) 1185 if (((iucv->path->flags & IUCV_IPRMDATA) & iucv->flags)
@@ -1028,6 +1220,7 @@ static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
1028 goto fail; 1220 goto fail;
1029 } 1221 }
1030 1222
1223release:
1031 release_sock(sk); 1224 release_sock(sk);
1032 return len; 1225 return len;
1033 1226
@@ -1160,7 +1353,8 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
1160 struct sock *sk = sock->sk; 1353 struct sock *sk = sock->sk;
1161 struct iucv_sock *iucv = iucv_sk(sk); 1354 struct iucv_sock *iucv = iucv_sk(sk);
1162 unsigned int copied, rlen; 1355 unsigned int copied, rlen;
1163 struct sk_buff *skb, *rskb, *cskb; 1356 struct sk_buff *skb, *rskb, *cskb, *sskb;
1357 int blen;
1164 int err = 0; 1358 int err = 0;
1165 1359
1166 if ((sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_SEVERED) && 1360 if ((sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_SEVERED) &&
@@ -1185,7 +1379,7 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
1185 copied = min_t(unsigned int, rlen, len); 1379 copied = min_t(unsigned int, rlen, len);
1186 1380
1187 cskb = skb; 1381 cskb = skb;
1188 if (memcpy_toiovec(msg->msg_iov, cskb->data, copied)) { 1382 if (skb_copy_datagram_iovec(cskb, 0, msg->msg_iov, copied)) {
1189 if (!(flags & MSG_PEEK)) 1383 if (!(flags & MSG_PEEK))
1190 skb_queue_head(&sk->sk_receive_queue, skb); 1384 skb_queue_head(&sk->sk_receive_queue, skb);
1191 return -EFAULT; 1385 return -EFAULT;
@@ -1223,6 +1417,7 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
1223 } 1417 }
1224 1418
1225 kfree_skb(skb); 1419 kfree_skb(skb);
1420 atomic_inc(&iucv->msg_recv);
1226 1421
1227 /* Queue backlog skbs */ 1422 /* Queue backlog skbs */
1228 spin_lock_bh(&iucv->message_q.lock); 1423 spin_lock_bh(&iucv->message_q.lock);
@@ -1239,6 +1434,24 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
1239 if (skb_queue_empty(&iucv->backlog_skb_q)) { 1434 if (skb_queue_empty(&iucv->backlog_skb_q)) {
1240 if (!list_empty(&iucv->message_q.list)) 1435 if (!list_empty(&iucv->message_q.list))
1241 iucv_process_message_q(sk); 1436 iucv_process_message_q(sk);
1437 if (atomic_read(&iucv->msg_recv) >=
1438 iucv->msglimit / 2) {
1439 /* send WIN to peer */
1440 blen = sizeof(struct af_iucv_trans_hdr) +
1441 ETH_HLEN;
1442 sskb = sock_alloc_send_skb(sk, blen, 1, &err);
1443 if (sskb) {
1444 skb_reserve(sskb,
1445 sizeof(struct af_iucv_trans_hdr)
1446 + ETH_HLEN);
1447 err = afiucv_hs_send(NULL, sk, sskb,
1448 AF_IUCV_FLAG_WIN);
1449 }
1450 if (err) {
1451 sk->sk_state = IUCV_DISCONN;
1452 sk->sk_state_change(sk);
1453 }
1454 }
1242 } 1455 }
1243 spin_unlock_bh(&iucv->message_q.lock); 1456 spin_unlock_bh(&iucv->message_q.lock);
1244 } 1457 }
@@ -1698,6 +1911,389 @@ static void iucv_callback_shutdown(struct iucv_path *path, u8 ipuser[16])
1698 bh_unlock_sock(sk); 1911 bh_unlock_sock(sk);
1699} 1912}
1700 1913
1914/***************** HiperSockets transport callbacks ********************/
1915static void afiucv_swap_src_dest(struct sk_buff *skb)
1916{
1917 struct af_iucv_trans_hdr *trans_hdr =
1918 (struct af_iucv_trans_hdr *)skb->data;
1919 char tmpID[8];
1920 char tmpName[8];
1921
1922 ASCEBC(trans_hdr->destUserID, sizeof(trans_hdr->destUserID));
1923 ASCEBC(trans_hdr->destAppName, sizeof(trans_hdr->destAppName));
1924 ASCEBC(trans_hdr->srcUserID, sizeof(trans_hdr->srcUserID));
1925 ASCEBC(trans_hdr->srcAppName, sizeof(trans_hdr->srcAppName));
1926 memcpy(tmpID, trans_hdr->srcUserID, 8);
1927 memcpy(tmpName, trans_hdr->srcAppName, 8);
1928 memcpy(trans_hdr->srcUserID, trans_hdr->destUserID, 8);
1929 memcpy(trans_hdr->srcAppName, trans_hdr->destAppName, 8);
1930 memcpy(trans_hdr->destUserID, tmpID, 8);
1931 memcpy(trans_hdr->destAppName, tmpName, 8);
1932 skb_push(skb, ETH_HLEN);
1933 memset(skb->data, 0, ETH_HLEN);
1934}
1935
1936/**
1937 * afiucv_hs_callback_syn - react on received SYN
1938 **/
1939static int afiucv_hs_callback_syn(struct sock *sk, struct sk_buff *skb)
1940{
1941 struct sock *nsk;
1942 struct iucv_sock *iucv, *niucv;
1943 struct af_iucv_trans_hdr *trans_hdr;
1944 int err;
1945
1946 iucv = iucv_sk(sk);
1947 trans_hdr = (struct af_iucv_trans_hdr *)skb->data;
1948 if (!iucv) {
1949 /* no sock - connection refused */
1950 afiucv_swap_src_dest(skb);
1951 trans_hdr->flags = AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_FIN;
1952 err = dev_queue_xmit(skb);
1953 goto out;
1954 }
1955
1956 nsk = iucv_sock_alloc(NULL, sk->sk_type, GFP_ATOMIC);
1957 bh_lock_sock(sk);
1958 if ((sk->sk_state != IUCV_LISTEN) ||
1959 sk_acceptq_is_full(sk) ||
1960 !nsk) {
1961 /* error on server socket - connection refused */
1962 if (nsk)
1963 sk_free(nsk);
1964 afiucv_swap_src_dest(skb);
1965 trans_hdr->flags = AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_FIN;
1966 err = dev_queue_xmit(skb);
1967 bh_unlock_sock(sk);
1968 goto out;
1969 }
1970
1971 niucv = iucv_sk(nsk);
1972 iucv_sock_init(nsk, sk);
1973 niucv->transport = AF_IUCV_TRANS_HIPER;
1974 niucv->msglimit = iucv->msglimit;
1975 if (!trans_hdr->window)
1976 niucv->msglimit_peer = IUCV_HIPER_MSGLIM_DEFAULT;
1977 else
1978 niucv->msglimit_peer = trans_hdr->window;
1979 memcpy(niucv->dst_name, trans_hdr->srcAppName, 8);
1980 memcpy(niucv->dst_user_id, trans_hdr->srcUserID, 8);
1981 memcpy(niucv->src_name, iucv->src_name, 8);
1982 memcpy(niucv->src_user_id, iucv->src_user_id, 8);
1983 nsk->sk_bound_dev_if = sk->sk_bound_dev_if;
1984 afiucv_swap_src_dest(skb);
1985 trans_hdr->flags = AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_ACK;
1986 trans_hdr->window = niucv->msglimit;
1987 /* if receiver acks the xmit connection is established */
1988 err = dev_queue_xmit(skb);
1989 if (!err) {
1990 iucv_accept_enqueue(sk, nsk);
1991 nsk->sk_state = IUCV_CONNECTED;
1992 sk->sk_data_ready(sk, 1);
1993 } else
1994 iucv_sock_kill(nsk);
1995 bh_unlock_sock(sk);
1996
1997out:
1998 return NET_RX_SUCCESS;
1999}
2000
2001/**
2002 * afiucv_hs_callback_synack() - react on received SYN-ACK
2003 **/
2004static int afiucv_hs_callback_synack(struct sock *sk, struct sk_buff *skb)
2005{
2006 struct iucv_sock *iucv = iucv_sk(sk);
2007 struct af_iucv_trans_hdr *trans_hdr =
2008 (struct af_iucv_trans_hdr *)skb->data;
2009
2010 if (!iucv)
2011 goto out;
2012 if (sk->sk_state != IUCV_BOUND)
2013 goto out;
2014 bh_lock_sock(sk);
2015 iucv->msglimit_peer = trans_hdr->window;
2016 sk->sk_state = IUCV_CONNECTED;
2017 sk->sk_state_change(sk);
2018 bh_unlock_sock(sk);
2019out:
2020 kfree_skb(skb);
2021 return NET_RX_SUCCESS;
2022}
2023
2024/**
2025 * afiucv_hs_callback_synfin() - react on received SYN_FIN
2026 **/
2027static int afiucv_hs_callback_synfin(struct sock *sk, struct sk_buff *skb)
2028{
2029 struct iucv_sock *iucv = iucv_sk(sk);
2030
2031 if (!iucv)
2032 goto out;
2033 if (sk->sk_state != IUCV_BOUND)
2034 goto out;
2035 bh_lock_sock(sk);
2036 sk->sk_state = IUCV_DISCONN;
2037 sk->sk_state_change(sk);
2038 bh_unlock_sock(sk);
2039out:
2040 kfree_skb(skb);
2041 return NET_RX_SUCCESS;
2042}
2043
2044/**
2045 * afiucv_hs_callback_fin() - react on received FIN
2046 **/
2047static int afiucv_hs_callback_fin(struct sock *sk, struct sk_buff *skb)
2048{
2049 struct iucv_sock *iucv = iucv_sk(sk);
2050
2051 /* other end of connection closed */
2052 if (iucv) {
2053 bh_lock_sock(sk);
2054 if (!list_empty(&iucv->accept_q))
2055 sk->sk_state = IUCV_SEVERED;
2056 else
2057 sk->sk_state = IUCV_DISCONN;
2058 sk->sk_state_change(sk);
2059 bh_unlock_sock(sk);
2060 }
2061 kfree_skb(skb);
2062 return NET_RX_SUCCESS;
2063}
2064
2065/**
2066 * afiucv_hs_callback_win() - react on received WIN
2067 **/
2068static int afiucv_hs_callback_win(struct sock *sk, struct sk_buff *skb)
2069{
2070 struct iucv_sock *iucv = iucv_sk(sk);
2071 struct af_iucv_trans_hdr *trans_hdr =
2072 (struct af_iucv_trans_hdr *)skb->data;
2073
2074 if (!iucv)
2075 return NET_RX_SUCCESS;
2076
2077 if (sk->sk_state != IUCV_CONNECTED)
2078 return NET_RX_SUCCESS;
2079
2080 atomic_sub(trans_hdr->window, &iucv->msg_sent);
2081 iucv_sock_wake_msglim(sk);
2082 return NET_RX_SUCCESS;
2083}
2084
2085/**
2086 * afiucv_hs_callback_rx() - react on received data
2087 **/
2088static int afiucv_hs_callback_rx(struct sock *sk, struct sk_buff *skb)
2089{
2090 struct iucv_sock *iucv = iucv_sk(sk);
2091
2092 if (!iucv) {
2093 kfree_skb(skb);
2094 return NET_RX_SUCCESS;
2095 }
2096
2097 if (sk->sk_state != IUCV_CONNECTED) {
2098 kfree_skb(skb);
2099 return NET_RX_SUCCESS;
2100 }
2101
2102 /* write stuff from iucv_msg to skb cb */
2103 if (skb->len <= sizeof(struct af_iucv_trans_hdr)) {
2104 kfree_skb(skb);
2105 return NET_RX_SUCCESS;
2106 }
2107 skb_pull(skb, sizeof(struct af_iucv_trans_hdr));
2108 skb_reset_transport_header(skb);
2109 skb_reset_network_header(skb);
2110 spin_lock(&iucv->message_q.lock);
2111 if (skb_queue_empty(&iucv->backlog_skb_q)) {
2112 if (sock_queue_rcv_skb(sk, skb)) {
2113 /* handle rcv queue full */
2114 skb_queue_tail(&iucv->backlog_skb_q, skb);
2115 }
2116 } else
2117 skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, skb);
2118 spin_unlock(&iucv->message_q.lock);
2119 return NET_RX_SUCCESS;
2120}
2121
2122/**
2123 * afiucv_hs_rcv() - base function for arriving data through HiperSockets
2124 * transport
2125 * called from netif RX softirq
2126 **/
2127static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev,
2128 struct packet_type *pt, struct net_device *orig_dev)
2129{
2130 struct hlist_node *node;
2131 struct sock *sk;
2132 struct iucv_sock *iucv;
2133 struct af_iucv_trans_hdr *trans_hdr;
2134 char nullstring[8];
2135 int err = 0;
2136
2137 skb_pull(skb, ETH_HLEN);
2138 trans_hdr = (struct af_iucv_trans_hdr *)skb->data;
2139 EBCASC(trans_hdr->destAppName, sizeof(trans_hdr->destAppName));
2140 EBCASC(trans_hdr->destUserID, sizeof(trans_hdr->destUserID));
2141 EBCASC(trans_hdr->srcAppName, sizeof(trans_hdr->srcAppName));
2142 EBCASC(trans_hdr->srcUserID, sizeof(trans_hdr->srcUserID));
2143 memset(nullstring, 0, sizeof(nullstring));
2144 iucv = NULL;
2145 sk = NULL;
2146 read_lock(&iucv_sk_list.lock);
2147 sk_for_each(sk, node, &iucv_sk_list.head) {
2148 if (trans_hdr->flags == AF_IUCV_FLAG_SYN) {
2149 if ((!memcmp(&iucv_sk(sk)->src_name,
2150 trans_hdr->destAppName, 8)) &&
2151 (!memcmp(&iucv_sk(sk)->src_user_id,
2152 trans_hdr->destUserID, 8)) &&
2153 (!memcmp(&iucv_sk(sk)->dst_name, nullstring, 8)) &&
2154 (!memcmp(&iucv_sk(sk)->dst_user_id,
2155 nullstring, 8))) {
2156 iucv = iucv_sk(sk);
2157 break;
2158 }
2159 } else {
2160 if ((!memcmp(&iucv_sk(sk)->src_name,
2161 trans_hdr->destAppName, 8)) &&
2162 (!memcmp(&iucv_sk(sk)->src_user_id,
2163 trans_hdr->destUserID, 8)) &&
2164 (!memcmp(&iucv_sk(sk)->dst_name,
2165 trans_hdr->srcAppName, 8)) &&
2166 (!memcmp(&iucv_sk(sk)->dst_user_id,
2167 trans_hdr->srcUserID, 8))) {
2168 iucv = iucv_sk(sk);
2169 break;
2170 }
2171 }
2172 }
2173 read_unlock(&iucv_sk_list.lock);
2174 if (!iucv)
2175 sk = NULL;
2176
2177 /* no sock
2178 how should we send with no sock
2179 1) send without sock no send rc checking?
2180 2) introduce default sock to handle this cases
2181
2182 SYN -> send SYN|ACK in good case, send SYN|FIN in bad case
2183 data -> send FIN
2184 SYN|ACK, SYN|FIN, FIN -> no action? */
2185
2186 switch (trans_hdr->flags) {
2187 case AF_IUCV_FLAG_SYN:
2188 /* connect request */
2189 err = afiucv_hs_callback_syn(sk, skb);
2190 break;
2191 case (AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_ACK):
2192 /* connect request confirmed */
2193 err = afiucv_hs_callback_synack(sk, skb);
2194 break;
2195 case (AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_FIN):
2196 /* connect request refused */
2197 err = afiucv_hs_callback_synfin(sk, skb);
2198 break;
2199 case (AF_IUCV_FLAG_FIN):
2200 /* close request */
2201 err = afiucv_hs_callback_fin(sk, skb);
2202 break;
2203 case (AF_IUCV_FLAG_WIN):
2204 err = afiucv_hs_callback_win(sk, skb);
2205 if (skb->len > sizeof(struct af_iucv_trans_hdr))
2206 err = afiucv_hs_callback_rx(sk, skb);
2207 else
2208 kfree(skb);
2209 break;
2210 case 0:
2211 /* plain data frame */
2212 err = afiucv_hs_callback_rx(sk, skb);
2213 break;
2214 default:
2215 ;
2216 }
2217
2218 return err;
2219}
2220
2221/**
2222 * afiucv_hs_callback_txnotify() - handle send notifcations from HiperSockets
2223 * transport
2224 **/
2225static void afiucv_hs_callback_txnotify(struct sk_buff *skb,
2226 enum iucv_tx_notify n)
2227{
2228 struct sock *isk = skb->sk;
2229 struct sock *sk = NULL;
2230 struct iucv_sock *iucv = NULL;
2231 struct sk_buff_head *list;
2232 struct sk_buff *list_skb;
2233 struct sk_buff *this = NULL;
2234 unsigned long flags;
2235 struct hlist_node *node;
2236
2237 read_lock(&iucv_sk_list.lock);
2238 sk_for_each(sk, node, &iucv_sk_list.head)
2239 if (sk == isk) {
2240 iucv = iucv_sk(sk);
2241 break;
2242 }
2243 read_unlock(&iucv_sk_list.lock);
2244
2245 if (!iucv)
2246 return;
2247
2248 bh_lock_sock(sk);
2249 list = &iucv->send_skb_q;
2250 list_skb = list->next;
2251 if (skb_queue_empty(list))
2252 goto out_unlock;
2253
2254 spin_lock_irqsave(&list->lock, flags);
2255 while (list_skb != (struct sk_buff *)list) {
2256 if (skb_shinfo(list_skb) == skb_shinfo(skb)) {
2257 this = list_skb;
2258 switch (n) {
2259 case TX_NOTIFY_OK:
2260 __skb_unlink(this, list);
2261 iucv_sock_wake_msglim(sk);
2262 kfree_skb(this);
2263 break;
2264 case TX_NOTIFY_PENDING:
2265 atomic_inc(&iucv->pendings);
2266 break;
2267 case TX_NOTIFY_DELAYED_OK:
2268 __skb_unlink(this, list);
2269 atomic_dec(&iucv->pendings);
2270 if (atomic_read(&iucv->pendings) <= 0)
2271 iucv_sock_wake_msglim(sk);
2272 kfree_skb(this);
2273 break;
2274 case TX_NOTIFY_UNREACHABLE:
2275 case TX_NOTIFY_DELAYED_UNREACHABLE:
2276 case TX_NOTIFY_TPQFULL: /* not yet used */
2277 case TX_NOTIFY_GENERALERROR:
2278 case TX_NOTIFY_DELAYED_GENERALERROR:
2279 __skb_unlink(this, list);
2280 kfree_skb(this);
2281 if (!list_empty(&iucv->accept_q))
2282 sk->sk_state = IUCV_SEVERED;
2283 else
2284 sk->sk_state = IUCV_DISCONN;
2285 sk->sk_state_change(sk);
2286 break;
2287 }
2288 break;
2289 }
2290 list_skb = list_skb->next;
2291 }
2292 spin_unlock_irqrestore(&list->lock, flags);
2293
2294out_unlock:
2295 bh_unlock_sock(sk);
2296}
1701static const struct proto_ops iucv_sock_ops = { 2297static const struct proto_ops iucv_sock_ops = {
1702 .family = PF_IUCV, 2298 .family = PF_IUCV,
1703 .owner = THIS_MODULE, 2299 .owner = THIS_MODULE,
@@ -1724,7 +2320,12 @@ static const struct net_proto_family iucv_sock_family_ops = {
1724 .create = iucv_sock_create, 2320 .create = iucv_sock_create,
1725}; 2321};
1726 2322
1727static int __init afiucv_iucv_init(void) 2323static struct packet_type iucv_packet_type = {
2324 .type = cpu_to_be16(ETH_P_AF_IUCV),
2325 .func = afiucv_hs_rcv,
2326};
2327
2328static int afiucv_iucv_init(void)
1728{ 2329{
1729 int err; 2330 int err;
1730 2331
@@ -1763,24 +2364,22 @@ static int __init afiucv_init(void)
1763{ 2364{
1764 int err; 2365 int err;
1765 2366
1766 if (!MACHINE_IS_VM) { 2367 if (MACHINE_IS_VM) {
1767 pr_err("The af_iucv module cannot be loaded" 2368 cpcmd("QUERY USERID", iucv_userid, sizeof(iucv_userid), &err);
1768 " without z/VM\n"); 2369 if (unlikely(err)) {
1769 err = -EPROTONOSUPPORT; 2370 WARN_ON(err);
1770 goto out; 2371 err = -EPROTONOSUPPORT;
1771 } 2372 goto out;
1772 cpcmd("QUERY USERID", iucv_userid, sizeof(iucv_userid), &err); 2373 }
1773 if (unlikely(err)) {
1774 WARN_ON(err);
1775 err = -EPROTONOSUPPORT;
1776 goto out;
1777 }
1778 2374
1779 pr_iucv = try_then_request_module(symbol_get(iucv_if), "iucv"); 2375 pr_iucv = try_then_request_module(symbol_get(iucv_if), "iucv");
1780 if (!pr_iucv) { 2376 if (!pr_iucv) {
1781 printk(KERN_WARNING "iucv_if lookup failed\n"); 2377 printk(KERN_WARNING "iucv_if lookup failed\n");
1782 err = -EPROTONOSUPPORT; 2378 memset(&iucv_userid, 0, sizeof(iucv_userid));
1783 goto out; 2379 }
2380 } else {
2381 memset(&iucv_userid, 0, sizeof(iucv_userid));
2382 pr_iucv = NULL;
1784 } 2383 }
1785 2384
1786 err = proto_register(&iucv_proto, 0); 2385 err = proto_register(&iucv_proto, 0);
@@ -1790,10 +2389,12 @@ static int __init afiucv_init(void)
1790 if (err) 2389 if (err)
1791 goto out_proto; 2390 goto out_proto;
1792 2391
1793 err = afiucv_iucv_init(); 2392 if (pr_iucv) {
1794 if (err) 2393 err = afiucv_iucv_init();
1795 goto out_sock; 2394 if (err)
1796 2395 goto out_sock;
2396 }
2397 dev_add_pack(&iucv_packet_type);
1797 return 0; 2398 return 0;
1798 2399
1799out_sock: 2400out_sock:
@@ -1808,10 +2409,13 @@ out:
1808 2409
1809static void __exit afiucv_exit(void) 2410static void __exit afiucv_exit(void)
1810{ 2411{
1811 device_unregister(af_iucv_dev); 2412 if (pr_iucv) {
1812 driver_unregister(&af_iucv_driver); 2413 device_unregister(af_iucv_dev);
1813 pr_iucv->iucv_unregister(&af_iucv_handler, 0); 2414 driver_unregister(&af_iucv_driver);
1814 symbol_put(iucv_if); 2415 pr_iucv->iucv_unregister(&af_iucv_handler, 0);
2416 symbol_put(iucv_if);
2417 }
2418 dev_remove_pack(&iucv_packet_type);
1815 sock_unregister(PF_IUCV); 2419 sock_unregister(PF_IUCV);
1816 proto_unregister(&iucv_proto); 2420 proto_unregister(&iucv_proto);
1817} 2421}
@@ -1824,3 +2428,4 @@ MODULE_DESCRIPTION("IUCV Sockets ver " VERSION);
1824MODULE_VERSION(VERSION); 2428MODULE_VERSION(VERSION);
1825MODULE_LICENSE("GPL"); 2429MODULE_LICENSE("GPL");
1826MODULE_ALIAS_NETPROTO(PF_IUCV); 2430MODULE_ALIAS_NETPROTO(PF_IUCV);
2431