aboutsummaryrefslogtreecommitdiffstats
path: root/net/iucv
diff options
context:
space:
mode:
Diffstat (limited to 'net/iucv')
-rw-r--r--net/iucv/Kconfig14
-rw-r--r--net/iucv/af_iucv.c870
-rw-r--r--net/iucv/iucv.c23
3 files changed, 784 insertions, 123 deletions
diff --git a/net/iucv/Kconfig b/net/iucv/Kconfig
index 16ce9cd4f39e..497fbe732def 100644
--- a/net/iucv/Kconfig
+++ b/net/iucv/Kconfig
@@ -1,15 +1,17 @@
1config IUCV 1config IUCV
2 tristate "IUCV support (S390 - z/VM only)"
3 depends on S390 2 depends on S390
3 def_tristate y if S390
4 prompt "IUCV support (S390 - z/VM only)"
4 help 5 help
5 Select this option if you want to use inter-user communication 6 Select this option if you want to use inter-user communication
6 under VM or VIF. If you run on z/VM, say "Y" to enable a fast 7 under VM or VIF. If you run on z/VM, say "Y" to enable a fast
7 communication link between VM guests. 8 communication link between VM guests.
8 9
9config AFIUCV 10config AFIUCV
10 tristate "AF_IUCV support (S390 - z/VM only)" 11 depends on S390
11 depends on IUCV 12 def_tristate m if QETH_L3 || IUCV
13 prompt "AF_IUCV Socket support (S390 - z/VM and HiperSockets transport)"
12 help 14 help
13 Select this option if you want to use inter-user communication under 15 Select this option if you want to use AF_IUCV socket applications
14 VM or VIF sockets. If you run on z/VM, say "Y" to enable a fast 16 based on z/VM inter-user communication vehicle or based on
15 communication link between VM guests. 17 HiperSockets.
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index e2013e434d03..274d150320c0 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -27,10 +27,9 @@
27#include <asm/cpcmd.h> 27#include <asm/cpcmd.h>
28#include <linux/kmod.h> 28#include <linux/kmod.h>
29 29
30#include <net/iucv/iucv.h>
31#include <net/iucv/af_iucv.h> 30#include <net/iucv/af_iucv.h>
32 31
33#define VERSION "1.1" 32#define VERSION "1.2"
34 33
35static char iucv_userid[80]; 34static char iucv_userid[80];
36 35
@@ -42,6 +41,8 @@ static struct proto iucv_proto = {
42 .obj_size = sizeof(struct iucv_sock), 41 .obj_size = sizeof(struct iucv_sock),
43}; 42};
44 43
44static struct iucv_interface *pr_iucv;
45
45/* special AF_IUCV IPRM messages */ 46/* special AF_IUCV IPRM messages */
46static const u8 iprm_shutdown[8] = 47static const u8 iprm_shutdown[8] =
47 {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01}; 48 {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01};
@@ -90,6 +91,12 @@ do { \
90static void iucv_sock_kill(struct sock *sk); 91static void iucv_sock_kill(struct sock *sk);
91static void iucv_sock_close(struct sock *sk); 92static void iucv_sock_close(struct sock *sk);
92 93
94static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev,
95 struct packet_type *pt, struct net_device *orig_dev);
96static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock,
97 struct sk_buff *skb, u8 flags);
98static void afiucv_hs_callback_txnotify(struct sk_buff *, enum iucv_tx_notify);
99
93/* Call Back functions */ 100/* Call Back functions */
94static void iucv_callback_rx(struct iucv_path *, struct iucv_message *); 101static void iucv_callback_rx(struct iucv_path *, struct iucv_message *);
95static void iucv_callback_txdone(struct iucv_path *, struct iucv_message *); 102static void iucv_callback_txdone(struct iucv_path *, struct iucv_message *);
@@ -165,7 +172,7 @@ static int afiucv_pm_freeze(struct device *dev)
165 case IUCV_CLOSING: 172 case IUCV_CLOSING:
166 case IUCV_CONNECTED: 173 case IUCV_CONNECTED:
167 if (iucv->path) { 174 if (iucv->path) {
168 err = iucv_path_sever(iucv->path, NULL); 175 err = pr_iucv->path_sever(iucv->path, NULL);
169 iucv_path_free(iucv->path); 176 iucv_path_free(iucv->path);
170 iucv->path = NULL; 177 iucv->path = NULL;
171 } 178 }
@@ -229,7 +236,7 @@ static const struct dev_pm_ops afiucv_pm_ops = {
229static struct device_driver af_iucv_driver = { 236static struct device_driver af_iucv_driver = {
230 .owner = THIS_MODULE, 237 .owner = THIS_MODULE,
231 .name = "afiucv", 238 .name = "afiucv",
232 .bus = &iucv_bus, 239 .bus = NULL,
233 .pm = &afiucv_pm_ops, 240 .pm = &afiucv_pm_ops,
234}; 241};
235 242
@@ -294,7 +301,11 @@ static inline int iucv_below_msglim(struct sock *sk)
294 301
295 if (sk->sk_state != IUCV_CONNECTED) 302 if (sk->sk_state != IUCV_CONNECTED)
296 return 1; 303 return 1;
297 return (skb_queue_len(&iucv->send_skb_q) < iucv->path->msglim); 304 if (iucv->transport == AF_IUCV_TRANS_IUCV)
305 return (skb_queue_len(&iucv->send_skb_q) < iucv->path->msglim);
306 else
307 return ((atomic_read(&iucv->msg_sent) < iucv->msglimit_peer) &&
308 (atomic_read(&iucv->pendings) <= 0));
298} 309}
299 310
300/** 311/**
@@ -312,6 +323,79 @@ static void iucv_sock_wake_msglim(struct sock *sk)
312 rcu_read_unlock(); 323 rcu_read_unlock();
313} 324}
314 325
326/**
327 * afiucv_hs_send() - send a message through HiperSockets transport
328 */
329static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock,
330 struct sk_buff *skb, u8 flags)
331{
332 struct net *net = sock_net(sock);
333 struct iucv_sock *iucv = iucv_sk(sock);
334 struct af_iucv_trans_hdr *phs_hdr;
335 struct sk_buff *nskb;
336 int err, confirm_recv = 0;
337
338 memset(skb->head, 0, ETH_HLEN);
339 phs_hdr = (struct af_iucv_trans_hdr *)skb_push(skb,
340 sizeof(struct af_iucv_trans_hdr));
341 skb_reset_mac_header(skb);
342 skb_reset_network_header(skb);
343 skb_push(skb, ETH_HLEN);
344 skb_reset_mac_header(skb);
345 memset(phs_hdr, 0, sizeof(struct af_iucv_trans_hdr));
346
347 phs_hdr->magic = ETH_P_AF_IUCV;
348 phs_hdr->version = 1;
349 phs_hdr->flags = flags;
350 if (flags == AF_IUCV_FLAG_SYN)
351 phs_hdr->window = iucv->msglimit;
352 else if ((flags == AF_IUCV_FLAG_WIN) || !flags) {
353 confirm_recv = atomic_read(&iucv->msg_recv);
354 phs_hdr->window = confirm_recv;
355 if (confirm_recv)
356 phs_hdr->flags = phs_hdr->flags | AF_IUCV_FLAG_WIN;
357 }
358 memcpy(phs_hdr->destUserID, iucv->dst_user_id, 8);
359 memcpy(phs_hdr->destAppName, iucv->dst_name, 8);
360 memcpy(phs_hdr->srcUserID, iucv->src_user_id, 8);
361 memcpy(phs_hdr->srcAppName, iucv->src_name, 8);
362 ASCEBC(phs_hdr->destUserID, sizeof(phs_hdr->destUserID));
363 ASCEBC(phs_hdr->destAppName, sizeof(phs_hdr->destAppName));
364 ASCEBC(phs_hdr->srcUserID, sizeof(phs_hdr->srcUserID));
365 ASCEBC(phs_hdr->srcAppName, sizeof(phs_hdr->srcAppName));
366 if (imsg)
367 memcpy(&phs_hdr->iucv_hdr, imsg, sizeof(struct iucv_message));
368
369 rcu_read_lock();
370 skb->dev = dev_get_by_index_rcu(net, sock->sk_bound_dev_if);
371 rcu_read_unlock();
372 if (!skb->dev)
373 return -ENODEV;
374 if (!(skb->dev->flags & IFF_UP))
375 return -ENETDOWN;
376 if (skb->len > skb->dev->mtu) {
377 if (sock->sk_type == SOCK_SEQPACKET)
378 return -EMSGSIZE;
379 else
380 skb_trim(skb, skb->dev->mtu);
381 }
382 skb->protocol = ETH_P_AF_IUCV;
383 skb_shinfo(skb)->tx_flags |= SKBTX_DRV_NEEDS_SK_REF;
384 nskb = skb_clone(skb, GFP_ATOMIC);
385 if (!nskb)
386 return -ENOMEM;
387 skb_queue_tail(&iucv->send_skb_q, nskb);
388 err = dev_queue_xmit(skb);
389 if (err) {
390 skb_unlink(nskb, &iucv->send_skb_q);
391 kfree_skb(nskb);
392 } else {
393 atomic_sub(confirm_recv, &iucv->msg_recv);
394 WARN_ON(atomic_read(&iucv->msg_recv) < 0);
395 }
396 return err;
397}
398
315/* Timers */ 399/* Timers */
316static void iucv_sock_timeout(unsigned long arg) 400static void iucv_sock_timeout(unsigned long arg)
317{ 401{
@@ -380,6 +464,8 @@ static void iucv_sock_close(struct sock *sk)
380 unsigned char user_data[16]; 464 unsigned char user_data[16];
381 struct iucv_sock *iucv = iucv_sk(sk); 465 struct iucv_sock *iucv = iucv_sk(sk);
382 unsigned long timeo; 466 unsigned long timeo;
467 int err, blen;
468 struct sk_buff *skb;
383 469
384 iucv_sock_clear_timer(sk); 470 iucv_sock_clear_timer(sk);
385 lock_sock(sk); 471 lock_sock(sk);
@@ -390,6 +476,20 @@ static void iucv_sock_close(struct sock *sk)
390 break; 476 break;
391 477
392 case IUCV_CONNECTED: 478 case IUCV_CONNECTED:
479 if (iucv->transport == AF_IUCV_TRANS_HIPER) {
480 /* send fin */
481 blen = sizeof(struct af_iucv_trans_hdr) + ETH_HLEN;
482 skb = sock_alloc_send_skb(sk, blen, 1, &err);
483 if (skb) {
484 skb_reserve(skb,
485 sizeof(struct af_iucv_trans_hdr) +
486 ETH_HLEN);
487 err = afiucv_hs_send(NULL, sk, skb,
488 AF_IUCV_FLAG_FIN);
489 }
490 sk->sk_state = IUCV_DISCONN;
491 sk->sk_state_change(sk);
492 }
393 case IUCV_DISCONN: 493 case IUCV_DISCONN:
394 sk->sk_state = IUCV_CLOSING; 494 sk->sk_state = IUCV_CLOSING;
395 sk->sk_state_change(sk); 495 sk->sk_state_change(sk);
@@ -412,7 +512,7 @@ static void iucv_sock_close(struct sock *sk)
412 low_nmcpy(user_data, iucv->src_name); 512 low_nmcpy(user_data, iucv->src_name);
413 high_nmcpy(user_data, iucv->dst_name); 513 high_nmcpy(user_data, iucv->dst_name);
414 ASCEBC(user_data, sizeof(user_data)); 514 ASCEBC(user_data, sizeof(user_data));
415 iucv_path_sever(iucv->path, user_data); 515 pr_iucv->path_sever(iucv->path, user_data);
416 iucv_path_free(iucv->path); 516 iucv_path_free(iucv->path);
417 iucv->path = NULL; 517 iucv->path = NULL;
418 } 518 }
@@ -444,23 +544,33 @@ static void iucv_sock_init(struct sock *sk, struct sock *parent)
444static struct sock *iucv_sock_alloc(struct socket *sock, int proto, gfp_t prio) 544static struct sock *iucv_sock_alloc(struct socket *sock, int proto, gfp_t prio)
445{ 545{
446 struct sock *sk; 546 struct sock *sk;
547 struct iucv_sock *iucv;
447 548
448 sk = sk_alloc(&init_net, PF_IUCV, prio, &iucv_proto); 549 sk = sk_alloc(&init_net, PF_IUCV, prio, &iucv_proto);
449 if (!sk) 550 if (!sk)
450 return NULL; 551 return NULL;
552 iucv = iucv_sk(sk);
451 553
452 sock_init_data(sock, sk); 554 sock_init_data(sock, sk);
453 INIT_LIST_HEAD(&iucv_sk(sk)->accept_q); 555 INIT_LIST_HEAD(&iucv->accept_q);
454 spin_lock_init(&iucv_sk(sk)->accept_q_lock); 556 spin_lock_init(&iucv->accept_q_lock);
455 skb_queue_head_init(&iucv_sk(sk)->send_skb_q); 557 skb_queue_head_init(&iucv->send_skb_q);
456 INIT_LIST_HEAD(&iucv_sk(sk)->message_q.list); 558 INIT_LIST_HEAD(&iucv->message_q.list);
457 spin_lock_init(&iucv_sk(sk)->message_q.lock); 559 spin_lock_init(&iucv->message_q.lock);
458 skb_queue_head_init(&iucv_sk(sk)->backlog_skb_q); 560 skb_queue_head_init(&iucv->backlog_skb_q);
459 iucv_sk(sk)->send_tag = 0; 561 iucv->send_tag = 0;
460 iucv_sk(sk)->flags = 0; 562 atomic_set(&iucv->pendings, 0);
461 iucv_sk(sk)->msglimit = IUCV_QUEUELEN_DEFAULT; 563 iucv->flags = 0;
462 iucv_sk(sk)->path = NULL; 564 iucv->msglimit = 0;
463 memset(&iucv_sk(sk)->src_user_id , 0, 32); 565 atomic_set(&iucv->msg_sent, 0);
566 atomic_set(&iucv->msg_recv, 0);
567 iucv->path = NULL;
568 iucv->sk_txnotify = afiucv_hs_callback_txnotify;
569 memset(&iucv->src_user_id , 0, 32);
570 if (pr_iucv)
571 iucv->transport = AF_IUCV_TRANS_IUCV;
572 else
573 iucv->transport = AF_IUCV_TRANS_HIPER;
464 574
465 sk->sk_destruct = iucv_sock_destruct; 575 sk->sk_destruct = iucv_sock_destruct;
466 sk->sk_sndtimeo = IUCV_CONN_TIMEOUT; 576 sk->sk_sndtimeo = IUCV_CONN_TIMEOUT;
@@ -591,7 +701,9 @@ static int iucv_sock_bind(struct socket *sock, struct sockaddr *addr,
591 struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr; 701 struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr;
592 struct sock *sk = sock->sk; 702 struct sock *sk = sock->sk;
593 struct iucv_sock *iucv; 703 struct iucv_sock *iucv;
594 int err; 704 int err = 0;
705 struct net_device *dev;
706 char uid[9];
595 707
596 /* Verify the input sockaddr */ 708 /* Verify the input sockaddr */
597 if (!addr || addr->sa_family != AF_IUCV) 709 if (!addr || addr->sa_family != AF_IUCV)
@@ -610,19 +722,46 @@ static int iucv_sock_bind(struct socket *sock, struct sockaddr *addr,
610 err = -EADDRINUSE; 722 err = -EADDRINUSE;
611 goto done_unlock; 723 goto done_unlock;
612 } 724 }
613 if (iucv->path) { 725 if (iucv->path)
614 err = 0;
615 goto done_unlock; 726 goto done_unlock;
616 }
617 727
618 /* Bind the socket */ 728 /* Bind the socket */
619 memcpy(iucv->src_name, sa->siucv_name, 8);
620 729
621 /* Copy the user id */ 730 if (pr_iucv)
622 memcpy(iucv->src_user_id, iucv_userid, 8); 731 if (!memcmp(sa->siucv_user_id, iucv_userid, 8))
623 sk->sk_state = IUCV_BOUND; 732 goto vm_bind; /* VM IUCV transport */
624 err = 0;
625 733
734 /* try hiper transport */
735 memcpy(uid, sa->siucv_user_id, sizeof(uid));
736 ASCEBC(uid, 8);
737 rcu_read_lock();
738 for_each_netdev_rcu(&init_net, dev) {
739 if (!memcmp(dev->perm_addr, uid, 8)) {
740 memcpy(iucv->src_name, sa->siucv_name, 8);
741 memcpy(iucv->src_user_id, sa->siucv_user_id, 8);
742 sock->sk->sk_bound_dev_if = dev->ifindex;
743 sk->sk_state = IUCV_BOUND;
744 iucv->transport = AF_IUCV_TRANS_HIPER;
745 if (!iucv->msglimit)
746 iucv->msglimit = IUCV_HIPER_MSGLIM_DEFAULT;
747 rcu_read_unlock();
748 goto done_unlock;
749 }
750 }
751 rcu_read_unlock();
752vm_bind:
753 if (pr_iucv) {
754 /* use local userid for backward compat */
755 memcpy(iucv->src_name, sa->siucv_name, 8);
756 memcpy(iucv->src_user_id, iucv_userid, 8);
757 sk->sk_state = IUCV_BOUND;
758 iucv->transport = AF_IUCV_TRANS_IUCV;
759 if (!iucv->msglimit)
760 iucv->msglimit = IUCV_QUEUELEN_DEFAULT;
761 goto done_unlock;
762 }
763 /* found no dev to bind */
764 err = -ENODEV;
626done_unlock: 765done_unlock:
627 /* Release the socket list lock */ 766 /* Release the socket list lock */
628 write_unlock_bh(&iucv_sk_list.lock); 767 write_unlock_bh(&iucv_sk_list.lock);
@@ -658,45 +797,44 @@ static int iucv_sock_autobind(struct sock *sk)
658 797
659 memcpy(&iucv->src_name, name, 8); 798 memcpy(&iucv->src_name, name, 8);
660 799
800 if (!iucv->msglimit)
801 iucv->msglimit = IUCV_QUEUELEN_DEFAULT;
802
661 return err; 803 return err;
662} 804}
663 805
664/* Connect an unconnected socket */ 806static int afiucv_hs_connect(struct socket *sock)
665static int iucv_sock_connect(struct socket *sock, struct sockaddr *addr,
666 int alen, int flags)
667{ 807{
668 struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr;
669 struct sock *sk = sock->sk; 808 struct sock *sk = sock->sk;
670 struct iucv_sock *iucv; 809 struct sk_buff *skb;
671 unsigned char user_data[16]; 810 int blen = sizeof(struct af_iucv_trans_hdr) + ETH_HLEN;
672 int err; 811 int err = 0;
673
674 if (addr->sa_family != AF_IUCV || alen < sizeof(struct sockaddr_iucv))
675 return -EINVAL;
676
677 if (sk->sk_state != IUCV_OPEN && sk->sk_state != IUCV_BOUND)
678 return -EBADFD;
679
680 if (sk->sk_type != SOCK_STREAM && sk->sk_type != SOCK_SEQPACKET)
681 return -EINVAL;
682 812
683 if (sk->sk_state == IUCV_OPEN) { 813 /* send syn */
684 err = iucv_sock_autobind(sk); 814 skb = sock_alloc_send_skb(sk, blen, 1, &err);
685 if (unlikely(err)) 815 if (!skb) {
686 return err; 816 err = -ENOMEM;
817 goto done;
687 } 818 }
819 skb->dev = NULL;
820 skb_reserve(skb, blen);
821 err = afiucv_hs_send(NULL, sk, skb, AF_IUCV_FLAG_SYN);
822done:
823 return err;
824}
688 825
689 lock_sock(sk); 826static int afiucv_path_connect(struct socket *sock, struct sockaddr *addr)
690 827{
691 /* Set the destination information */ 828 struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr;
692 memcpy(iucv_sk(sk)->dst_user_id, sa->siucv_user_id, 8); 829 struct sock *sk = sock->sk;
693 memcpy(iucv_sk(sk)->dst_name, sa->siucv_name, 8); 830 struct iucv_sock *iucv = iucv_sk(sk);
831 unsigned char user_data[16];
832 int err;
694 833
695 high_nmcpy(user_data, sa->siucv_name); 834 high_nmcpy(user_data, sa->siucv_name);
696 low_nmcpy(user_data, iucv_sk(sk)->src_name); 835 low_nmcpy(user_data, iucv->src_name);
697 ASCEBC(user_data, sizeof(user_data)); 836 ASCEBC(user_data, sizeof(user_data));
698 837
699 iucv = iucv_sk(sk);
700 /* Create path. */ 838 /* Create path. */
701 iucv->path = iucv_path_alloc(iucv->msglimit, 839 iucv->path = iucv_path_alloc(iucv->msglimit,
702 IUCV_IPRMDATA, GFP_KERNEL); 840 IUCV_IPRMDATA, GFP_KERNEL);
@@ -704,8 +842,9 @@ static int iucv_sock_connect(struct socket *sock, struct sockaddr *addr,
704 err = -ENOMEM; 842 err = -ENOMEM;
705 goto done; 843 goto done;
706 } 844 }
707 err = iucv_path_connect(iucv->path, &af_iucv_handler, 845 err = pr_iucv->path_connect(iucv->path, &af_iucv_handler,
708 sa->siucv_user_id, NULL, user_data, sk); 846 sa->siucv_user_id, NULL, user_data,
847 sk);
709 if (err) { 848 if (err) {
710 iucv_path_free(iucv->path); 849 iucv_path_free(iucv->path);
711 iucv->path = NULL; 850 iucv->path = NULL;
@@ -724,21 +863,62 @@ static int iucv_sock_connect(struct socket *sock, struct sockaddr *addr,
724 err = -ECONNREFUSED; 863 err = -ECONNREFUSED;
725 break; 864 break;
726 } 865 }
727 goto done;
728 } 866 }
867done:
868 return err;
869}
729 870
730 if (sk->sk_state != IUCV_CONNECTED) { 871/* Connect an unconnected socket */
872static int iucv_sock_connect(struct socket *sock, struct sockaddr *addr,
873 int alen, int flags)
874{
875 struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr;
876 struct sock *sk = sock->sk;
877 struct iucv_sock *iucv = iucv_sk(sk);
878 int err;
879
880 if (addr->sa_family != AF_IUCV || alen < sizeof(struct sockaddr_iucv))
881 return -EINVAL;
882
883 if (sk->sk_state != IUCV_OPEN && sk->sk_state != IUCV_BOUND)
884 return -EBADFD;
885
886 if (sk->sk_state == IUCV_OPEN &&
887 iucv->transport == AF_IUCV_TRANS_HIPER)
888 return -EBADFD; /* explicit bind required */
889
890 if (sk->sk_type != SOCK_STREAM && sk->sk_type != SOCK_SEQPACKET)
891 return -EINVAL;
892
893 if (sk->sk_state == IUCV_OPEN) {
894 err = iucv_sock_autobind(sk);
895 if (unlikely(err))
896 return err;
897 }
898
899 lock_sock(sk);
900
901 /* Set the destination information */
902 memcpy(iucv->dst_user_id, sa->siucv_user_id, 8);
903 memcpy(iucv->dst_name, sa->siucv_name, 8);
904
905 if (iucv->transport == AF_IUCV_TRANS_HIPER)
906 err = afiucv_hs_connect(sock);
907 else
908 err = afiucv_path_connect(sock, addr);
909 if (err)
910 goto done;
911
912 if (sk->sk_state != IUCV_CONNECTED)
731 err = iucv_sock_wait(sk, iucv_sock_in_state(sk, IUCV_CONNECTED, 913 err = iucv_sock_wait(sk, iucv_sock_in_state(sk, IUCV_CONNECTED,
732 IUCV_DISCONN), 914 IUCV_DISCONN),
733 sock_sndtimeo(sk, flags & O_NONBLOCK)); 915 sock_sndtimeo(sk, flags & O_NONBLOCK));
734 }
735 916
736 if (sk->sk_state == IUCV_DISCONN) { 917 if (sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_CLOSED)
737 err = -ECONNREFUSED; 918 err = -ECONNREFUSED;
738 }
739 919
740 if (err) { 920 if (err && iucv->transport == AF_IUCV_TRANS_IUCV) {
741 iucv_path_sever(iucv->path, NULL); 921 pr_iucv->path_sever(iucv->path, NULL);
742 iucv_path_free(iucv->path); 922 iucv_path_free(iucv->path);
743 iucv->path = NULL; 923 iucv->path = NULL;
744 } 924 }
@@ -833,20 +1013,21 @@ static int iucv_sock_getname(struct socket *sock, struct sockaddr *addr,
833{ 1013{
834 struct sockaddr_iucv *siucv = (struct sockaddr_iucv *) addr; 1014 struct sockaddr_iucv *siucv = (struct sockaddr_iucv *) addr;
835 struct sock *sk = sock->sk; 1015 struct sock *sk = sock->sk;
1016 struct iucv_sock *iucv = iucv_sk(sk);
836 1017
837 addr->sa_family = AF_IUCV; 1018 addr->sa_family = AF_IUCV;
838 *len = sizeof(struct sockaddr_iucv); 1019 *len = sizeof(struct sockaddr_iucv);
839 1020
840 if (peer) { 1021 if (peer) {
841 memcpy(siucv->siucv_user_id, iucv_sk(sk)->dst_user_id, 8); 1022 memcpy(siucv->siucv_user_id, iucv->dst_user_id, 8);
842 memcpy(siucv->siucv_name, &iucv_sk(sk)->dst_name, 8); 1023 memcpy(siucv->siucv_name, iucv->dst_name, 8);
843 } else { 1024 } else {
844 memcpy(siucv->siucv_user_id, iucv_sk(sk)->src_user_id, 8); 1025 memcpy(siucv->siucv_user_id, iucv->src_user_id, 8);
845 memcpy(siucv->siucv_name, iucv_sk(sk)->src_name, 8); 1026 memcpy(siucv->siucv_name, iucv->src_name, 8);
846 } 1027 }
847 memset(&siucv->siucv_port, 0, sizeof(siucv->siucv_port)); 1028 memset(&siucv->siucv_port, 0, sizeof(siucv->siucv_port));
848 memset(&siucv->siucv_addr, 0, sizeof(siucv->siucv_addr)); 1029 memset(&siucv->siucv_addr, 0, sizeof(siucv->siucv_addr));
849 memset(siucv->siucv_nodeid, 0, sizeof(siucv->siucv_nodeid)); 1030 memset(&siucv->siucv_nodeid, 0, sizeof(siucv->siucv_nodeid));
850 1031
851 return 0; 1032 return 0;
852} 1033}
@@ -871,7 +1052,7 @@ static int iucv_send_iprm(struct iucv_path *path, struct iucv_message *msg,
871 1052
872 memcpy(prmdata, (void *) skb->data, skb->len); 1053 memcpy(prmdata, (void *) skb->data, skb->len);
873 prmdata[7] = 0xff - (u8) skb->len; 1054 prmdata[7] = 0xff - (u8) skb->len;
874 return iucv_message_send(path, msg, IUCV_IPRMDATA, 0, 1055 return pr_iucv->message_send(path, msg, IUCV_IPRMDATA, 0,
875 (void *) prmdata, 8); 1056 (void *) prmdata, 8);
876} 1057}
877 1058
@@ -960,9 +1141,16 @@ static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
960 * this is fine for SOCK_SEQPACKET (unless we want to support 1141 * this is fine for SOCK_SEQPACKET (unless we want to support
961 * segmented records using the MSG_EOR flag), but 1142 * segmented records using the MSG_EOR flag), but
962 * for SOCK_STREAM we might want to improve it in future */ 1143 * for SOCK_STREAM we might want to improve it in future */
963 skb = sock_alloc_send_skb(sk, len, noblock, &err); 1144 if (iucv->transport == AF_IUCV_TRANS_HIPER)
1145 skb = sock_alloc_send_skb(sk,
1146 len + sizeof(struct af_iucv_trans_hdr) + ETH_HLEN,
1147 noblock, &err);
1148 else
1149 skb = sock_alloc_send_skb(sk, len, noblock, &err);
964 if (!skb) 1150 if (!skb)
965 goto out; 1151 goto out;
1152 if (iucv->transport == AF_IUCV_TRANS_HIPER)
1153 skb_reserve(skb, sizeof(struct af_iucv_trans_hdr) + ETH_HLEN);
966 if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) { 1154 if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
967 err = -EFAULT; 1155 err = -EFAULT;
968 goto fail; 1156 goto fail;
@@ -983,6 +1171,15 @@ static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
983 /* increment and save iucv message tag for msg_completion cbk */ 1171 /* increment and save iucv message tag for msg_completion cbk */
984 txmsg.tag = iucv->send_tag++; 1172 txmsg.tag = iucv->send_tag++;
985 memcpy(CB_TAG(skb), &txmsg.tag, CB_TAG_LEN); 1173 memcpy(CB_TAG(skb), &txmsg.tag, CB_TAG_LEN);
1174 if (iucv->transport == AF_IUCV_TRANS_HIPER) {
1175 atomic_inc(&iucv->msg_sent);
1176 err = afiucv_hs_send(&txmsg, sk, skb, 0);
1177 if (err) {
1178 atomic_dec(&iucv->msg_sent);
1179 goto fail;
1180 }
1181 goto release;
1182 }
986 skb_queue_tail(&iucv->send_skb_q, skb); 1183 skb_queue_tail(&iucv->send_skb_q, skb);
987 1184
988 if (((iucv->path->flags & IUCV_IPRMDATA) & iucv->flags) 1185 if (((iucv->path->flags & IUCV_IPRMDATA) & iucv->flags)
@@ -999,13 +1196,13 @@ static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
999 /* this error should never happen since the 1196 /* this error should never happen since the
1000 * IUCV_IPRMDATA path flag is set... sever path */ 1197 * IUCV_IPRMDATA path flag is set... sever path */
1001 if (err == 0x15) { 1198 if (err == 0x15) {
1002 iucv_path_sever(iucv->path, NULL); 1199 pr_iucv->path_sever(iucv->path, NULL);
1003 skb_unlink(skb, &iucv->send_skb_q); 1200 skb_unlink(skb, &iucv->send_skb_q);
1004 err = -EPIPE; 1201 err = -EPIPE;
1005 goto fail; 1202 goto fail;
1006 } 1203 }
1007 } else 1204 } else
1008 err = iucv_message_send(iucv->path, &txmsg, 0, 0, 1205 err = pr_iucv->message_send(iucv->path, &txmsg, 0, 0,
1009 (void *) skb->data, skb->len); 1206 (void *) skb->data, skb->len);
1010 if (err) { 1207 if (err) {
1011 if (err == 3) { 1208 if (err == 3) {
@@ -1023,6 +1220,7 @@ static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
1023 goto fail; 1220 goto fail;
1024 } 1221 }
1025 1222
1223release:
1026 release_sock(sk); 1224 release_sock(sk);
1027 return len; 1225 return len;
1028 1226
@@ -1095,8 +1293,9 @@ static void iucv_process_message(struct sock *sk, struct sk_buff *skb,
1095 skb->len = 0; 1293 skb->len = 0;
1096 } 1294 }
1097 } else { 1295 } else {
1098 rc = iucv_message_receive(path, msg, msg->flags & IUCV_IPRMDATA, 1296 rc = pr_iucv->message_receive(path, msg,
1099 skb->data, len, NULL); 1297 msg->flags & IUCV_IPRMDATA,
1298 skb->data, len, NULL);
1100 if (rc) { 1299 if (rc) {
1101 kfree_skb(skb); 1300 kfree_skb(skb);
1102 return; 1301 return;
@@ -1110,7 +1309,7 @@ static void iucv_process_message(struct sock *sk, struct sk_buff *skb,
1110 kfree_skb(skb); 1309 kfree_skb(skb);
1111 skb = NULL; 1310 skb = NULL;
1112 if (rc) { 1311 if (rc) {
1113 iucv_path_sever(path, NULL); 1312 pr_iucv->path_sever(path, NULL);
1114 return; 1313 return;
1115 } 1314 }
1116 skb = skb_dequeue(&iucv_sk(sk)->backlog_skb_q); 1315 skb = skb_dequeue(&iucv_sk(sk)->backlog_skb_q);
@@ -1154,7 +1353,8 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
1154 struct sock *sk = sock->sk; 1353 struct sock *sk = sock->sk;
1155 struct iucv_sock *iucv = iucv_sk(sk); 1354 struct iucv_sock *iucv = iucv_sk(sk);
1156 unsigned int copied, rlen; 1355 unsigned int copied, rlen;
1157 struct sk_buff *skb, *rskb, *cskb; 1356 struct sk_buff *skb, *rskb, *cskb, *sskb;
1357 int blen;
1158 int err = 0; 1358 int err = 0;
1159 1359
1160 if ((sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_SEVERED) && 1360 if ((sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_SEVERED) &&
@@ -1179,7 +1379,7 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
1179 copied = min_t(unsigned int, rlen, len); 1379 copied = min_t(unsigned int, rlen, len);
1180 1380
1181 cskb = skb; 1381 cskb = skb;
1182 if (memcpy_toiovec(msg->msg_iov, cskb->data, copied)) { 1382 if (skb_copy_datagram_iovec(cskb, 0, msg->msg_iov, copied)) {
1183 if (!(flags & MSG_PEEK)) 1383 if (!(flags & MSG_PEEK))
1184 skb_queue_head(&sk->sk_receive_queue, skb); 1384 skb_queue_head(&sk->sk_receive_queue, skb);
1185 return -EFAULT; 1385 return -EFAULT;
@@ -1217,6 +1417,7 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
1217 } 1417 }
1218 1418
1219 kfree_skb(skb); 1419 kfree_skb(skb);
1420 atomic_inc(&iucv->msg_recv);
1220 1421
1221 /* Queue backlog skbs */ 1422 /* Queue backlog skbs */
1222 spin_lock_bh(&iucv->message_q.lock); 1423 spin_lock_bh(&iucv->message_q.lock);
@@ -1233,6 +1434,24 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
1233 if (skb_queue_empty(&iucv->backlog_skb_q)) { 1434 if (skb_queue_empty(&iucv->backlog_skb_q)) {
1234 if (!list_empty(&iucv->message_q.list)) 1435 if (!list_empty(&iucv->message_q.list))
1235 iucv_process_message_q(sk); 1436 iucv_process_message_q(sk);
1437 if (atomic_read(&iucv->msg_recv) >=
1438 iucv->msglimit / 2) {
1439 /* send WIN to peer */
1440 blen = sizeof(struct af_iucv_trans_hdr) +
1441 ETH_HLEN;
1442 sskb = sock_alloc_send_skb(sk, blen, 1, &err);
1443 if (sskb) {
1444 skb_reserve(sskb,
1445 sizeof(struct af_iucv_trans_hdr)
1446 + ETH_HLEN);
1447 err = afiucv_hs_send(NULL, sk, sskb,
1448 AF_IUCV_FLAG_WIN);
1449 }
1450 if (err) {
1451 sk->sk_state = IUCV_DISCONN;
1452 sk->sk_state_change(sk);
1453 }
1454 }
1236 } 1455 }
1237 spin_unlock_bh(&iucv->message_q.lock); 1456 spin_unlock_bh(&iucv->message_q.lock);
1238 } 1457 }
@@ -1327,8 +1546,8 @@ static int iucv_sock_shutdown(struct socket *sock, int how)
1327 if (how == SEND_SHUTDOWN || how == SHUTDOWN_MASK) { 1546 if (how == SEND_SHUTDOWN || how == SHUTDOWN_MASK) {
1328 txmsg.class = 0; 1547 txmsg.class = 0;
1329 txmsg.tag = 0; 1548 txmsg.tag = 0;
1330 err = iucv_message_send(iucv->path, &txmsg, IUCV_IPRMDATA, 0, 1549 err = pr_iucv->message_send(iucv->path, &txmsg, IUCV_IPRMDATA,
1331 (void *) iprm_shutdown, 8); 1550 0, (void *) iprm_shutdown, 8);
1332 if (err) { 1551 if (err) {
1333 switch (err) { 1552 switch (err) {
1334 case 1: 1553 case 1:
@@ -1345,7 +1564,7 @@ static int iucv_sock_shutdown(struct socket *sock, int how)
1345 } 1564 }
1346 1565
1347 if (how == RCV_SHUTDOWN || how == SHUTDOWN_MASK) { 1566 if (how == RCV_SHUTDOWN || how == SHUTDOWN_MASK) {
1348 err = iucv_path_quiesce(iucv_sk(sk)->path, NULL); 1567 err = pr_iucv->path_quiesce(iucv->path, NULL);
1349 if (err) 1568 if (err)
1350 err = -ENOTCONN; 1569 err = -ENOTCONN;
1351 1570
@@ -1372,7 +1591,7 @@ static int iucv_sock_release(struct socket *sock)
1372 1591
1373 /* Unregister with IUCV base support */ 1592 /* Unregister with IUCV base support */
1374 if (iucv_sk(sk)->path) { 1593 if (iucv_sk(sk)->path) {
1375 iucv_path_sever(iucv_sk(sk)->path, NULL); 1594 pr_iucv->path_sever(iucv_sk(sk)->path, NULL);
1376 iucv_path_free(iucv_sk(sk)->path); 1595 iucv_path_free(iucv_sk(sk)->path);
1377 iucv_sk(sk)->path = NULL; 1596 iucv_sk(sk)->path = NULL;
1378 } 1597 }
@@ -1514,14 +1733,14 @@ static int iucv_callback_connreq(struct iucv_path *path,
1514 high_nmcpy(user_data, iucv->dst_name); 1733 high_nmcpy(user_data, iucv->dst_name);
1515 ASCEBC(user_data, sizeof(user_data)); 1734 ASCEBC(user_data, sizeof(user_data));
1516 if (sk->sk_state != IUCV_LISTEN) { 1735 if (sk->sk_state != IUCV_LISTEN) {
1517 err = iucv_path_sever(path, user_data); 1736 err = pr_iucv->path_sever(path, user_data);
1518 iucv_path_free(path); 1737 iucv_path_free(path);
1519 goto fail; 1738 goto fail;
1520 } 1739 }
1521 1740
1522 /* Check for backlog size */ 1741 /* Check for backlog size */
1523 if (sk_acceptq_is_full(sk)) { 1742 if (sk_acceptq_is_full(sk)) {
1524 err = iucv_path_sever(path, user_data); 1743 err = pr_iucv->path_sever(path, user_data);
1525 iucv_path_free(path); 1744 iucv_path_free(path);
1526 goto fail; 1745 goto fail;
1527 } 1746 }
@@ -1529,7 +1748,7 @@ static int iucv_callback_connreq(struct iucv_path *path,
1529 /* Create the new socket */ 1748 /* Create the new socket */
1530 nsk = iucv_sock_alloc(NULL, sk->sk_type, GFP_ATOMIC); 1749 nsk = iucv_sock_alloc(NULL, sk->sk_type, GFP_ATOMIC);
1531 if (!nsk) { 1750 if (!nsk) {
1532 err = iucv_path_sever(path, user_data); 1751 err = pr_iucv->path_sever(path, user_data);
1533 iucv_path_free(path); 1752 iucv_path_free(path);
1534 goto fail; 1753 goto fail;
1535 } 1754 }
@@ -1553,9 +1772,9 @@ static int iucv_callback_connreq(struct iucv_path *path,
1553 /* set message limit for path based on msglimit of accepting socket */ 1772 /* set message limit for path based on msglimit of accepting socket */
1554 niucv->msglimit = iucv->msglimit; 1773 niucv->msglimit = iucv->msglimit;
1555 path->msglim = iucv->msglimit; 1774 path->msglim = iucv->msglimit;
1556 err = iucv_path_accept(path, &af_iucv_handler, nuser_data, nsk); 1775 err = pr_iucv->path_accept(path, &af_iucv_handler, nuser_data, nsk);
1557 if (err) { 1776 if (err) {
1558 err = iucv_path_sever(path, user_data); 1777 err = pr_iucv->path_sever(path, user_data);
1559 iucv_path_free(path); 1778 iucv_path_free(path);
1560 iucv_sock_kill(nsk); 1779 iucv_sock_kill(nsk);
1561 goto fail; 1780 goto fail;
@@ -1589,7 +1808,7 @@ static void iucv_callback_rx(struct iucv_path *path, struct iucv_message *msg)
1589 int len; 1808 int len;
1590 1809
1591 if (sk->sk_shutdown & RCV_SHUTDOWN) { 1810 if (sk->sk_shutdown & RCV_SHUTDOWN) {
1592 iucv_message_reject(path, msg); 1811 pr_iucv->message_reject(path, msg);
1593 return; 1812 return;
1594 } 1813 }
1595 1814
@@ -1600,7 +1819,7 @@ static void iucv_callback_rx(struct iucv_path *path, struct iucv_message *msg)
1600 goto save_message; 1819 goto save_message;
1601 1820
1602 len = atomic_read(&sk->sk_rmem_alloc); 1821 len = atomic_read(&sk->sk_rmem_alloc);
1603 len += iucv_msg_length(msg) + sizeof(struct sk_buff); 1822 len += SKB_TRUESIZE(iucv_msg_length(msg));
1604 if (len > sk->sk_rcvbuf) 1823 if (len > sk->sk_rcvbuf)
1605 goto save_message; 1824 goto save_message;
1606 1825
@@ -1692,6 +1911,389 @@ static void iucv_callback_shutdown(struct iucv_path *path, u8 ipuser[16])
1692 bh_unlock_sock(sk); 1911 bh_unlock_sock(sk);
1693} 1912}
1694 1913
1914/***************** HiperSockets transport callbacks ********************/
1915static void afiucv_swap_src_dest(struct sk_buff *skb)
1916{
1917 struct af_iucv_trans_hdr *trans_hdr =
1918 (struct af_iucv_trans_hdr *)skb->data;
1919 char tmpID[8];
1920 char tmpName[8];
1921
1922 ASCEBC(trans_hdr->destUserID, sizeof(trans_hdr->destUserID));
1923 ASCEBC(trans_hdr->destAppName, sizeof(trans_hdr->destAppName));
1924 ASCEBC(trans_hdr->srcUserID, sizeof(trans_hdr->srcUserID));
1925 ASCEBC(trans_hdr->srcAppName, sizeof(trans_hdr->srcAppName));
1926 memcpy(tmpID, trans_hdr->srcUserID, 8);
1927 memcpy(tmpName, trans_hdr->srcAppName, 8);
1928 memcpy(trans_hdr->srcUserID, trans_hdr->destUserID, 8);
1929 memcpy(trans_hdr->srcAppName, trans_hdr->destAppName, 8);
1930 memcpy(trans_hdr->destUserID, tmpID, 8);
1931 memcpy(trans_hdr->destAppName, tmpName, 8);
1932 skb_push(skb, ETH_HLEN);
1933 memset(skb->data, 0, ETH_HLEN);
1934}
1935
1936/**
1937 * afiucv_hs_callback_syn - react on received SYN
1938 **/
1939static int afiucv_hs_callback_syn(struct sock *sk, struct sk_buff *skb)
1940{
1941 struct sock *nsk;
1942 struct iucv_sock *iucv, *niucv;
1943 struct af_iucv_trans_hdr *trans_hdr;
1944 int err;
1945
1946 iucv = iucv_sk(sk);
1947 trans_hdr = (struct af_iucv_trans_hdr *)skb->data;
1948 if (!iucv) {
1949 /* no sock - connection refused */
1950 afiucv_swap_src_dest(skb);
1951 trans_hdr->flags = AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_FIN;
1952 err = dev_queue_xmit(skb);
1953 goto out;
1954 }
1955
1956 nsk = iucv_sock_alloc(NULL, sk->sk_type, GFP_ATOMIC);
1957 bh_lock_sock(sk);
1958 if ((sk->sk_state != IUCV_LISTEN) ||
1959 sk_acceptq_is_full(sk) ||
1960 !nsk) {
1961 /* error on server socket - connection refused */
1962 if (nsk)
1963 sk_free(nsk);
1964 afiucv_swap_src_dest(skb);
1965 trans_hdr->flags = AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_FIN;
1966 err = dev_queue_xmit(skb);
1967 bh_unlock_sock(sk);
1968 goto out;
1969 }
1970
1971 niucv = iucv_sk(nsk);
1972 iucv_sock_init(nsk, sk);
1973 niucv->transport = AF_IUCV_TRANS_HIPER;
1974 niucv->msglimit = iucv->msglimit;
1975 if (!trans_hdr->window)
1976 niucv->msglimit_peer = IUCV_HIPER_MSGLIM_DEFAULT;
1977 else
1978 niucv->msglimit_peer = trans_hdr->window;
1979 memcpy(niucv->dst_name, trans_hdr->srcAppName, 8);
1980 memcpy(niucv->dst_user_id, trans_hdr->srcUserID, 8);
1981 memcpy(niucv->src_name, iucv->src_name, 8);
1982 memcpy(niucv->src_user_id, iucv->src_user_id, 8);
1983 nsk->sk_bound_dev_if = sk->sk_bound_dev_if;
1984 afiucv_swap_src_dest(skb);
1985 trans_hdr->flags = AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_ACK;
1986 trans_hdr->window = niucv->msglimit;
1987 /* if receiver acks the xmit connection is established */
1988 err = dev_queue_xmit(skb);
1989 if (!err) {
1990 iucv_accept_enqueue(sk, nsk);
1991 nsk->sk_state = IUCV_CONNECTED;
1992 sk->sk_data_ready(sk, 1);
1993 } else
1994 iucv_sock_kill(nsk);
1995 bh_unlock_sock(sk);
1996
1997out:
1998 return NET_RX_SUCCESS;
1999}
2000
2001/**
2002 * afiucv_hs_callback_synack() - react on received SYN-ACK
2003 **/
2004static int afiucv_hs_callback_synack(struct sock *sk, struct sk_buff *skb)
2005{
2006 struct iucv_sock *iucv = iucv_sk(sk);
2007 struct af_iucv_trans_hdr *trans_hdr =
2008 (struct af_iucv_trans_hdr *)skb->data;
2009
2010 if (!iucv)
2011 goto out;
2012 if (sk->sk_state != IUCV_BOUND)
2013 goto out;
2014 bh_lock_sock(sk);
2015 iucv->msglimit_peer = trans_hdr->window;
2016 sk->sk_state = IUCV_CONNECTED;
2017 sk->sk_state_change(sk);
2018 bh_unlock_sock(sk);
2019out:
2020 kfree_skb(skb);
2021 return NET_RX_SUCCESS;
2022}
2023
2024/**
2025 * afiucv_hs_callback_synfin() - react on received SYN_FIN
2026 **/
2027static int afiucv_hs_callback_synfin(struct sock *sk, struct sk_buff *skb)
2028{
2029 struct iucv_sock *iucv = iucv_sk(sk);
2030
2031 if (!iucv)
2032 goto out;
2033 if (sk->sk_state != IUCV_BOUND)
2034 goto out;
2035 bh_lock_sock(sk);
2036 sk->sk_state = IUCV_DISCONN;
2037 sk->sk_state_change(sk);
2038 bh_unlock_sock(sk);
2039out:
2040 kfree_skb(skb);
2041 return NET_RX_SUCCESS;
2042}
2043
2044/**
2045 * afiucv_hs_callback_fin() - react on received FIN
2046 **/
2047static int afiucv_hs_callback_fin(struct sock *sk, struct sk_buff *skb)
2048{
2049 struct iucv_sock *iucv = iucv_sk(sk);
2050
2051 /* other end of connection closed */
2052 if (iucv) {
2053 bh_lock_sock(sk);
2054 if (!list_empty(&iucv->accept_q))
2055 sk->sk_state = IUCV_SEVERED;
2056 else
2057 sk->sk_state = IUCV_DISCONN;
2058 sk->sk_state_change(sk);
2059 bh_unlock_sock(sk);
2060 }
2061 kfree_skb(skb);
2062 return NET_RX_SUCCESS;
2063}
2064
2065/**
2066 * afiucv_hs_callback_win() - react on received WIN
2067 **/
2068static int afiucv_hs_callback_win(struct sock *sk, struct sk_buff *skb)
2069{
2070 struct iucv_sock *iucv = iucv_sk(sk);
2071 struct af_iucv_trans_hdr *trans_hdr =
2072 (struct af_iucv_trans_hdr *)skb->data;
2073
2074 if (!iucv)
2075 return NET_RX_SUCCESS;
2076
2077 if (sk->sk_state != IUCV_CONNECTED)
2078 return NET_RX_SUCCESS;
2079
2080 atomic_sub(trans_hdr->window, &iucv->msg_sent);
2081 iucv_sock_wake_msglim(sk);
2082 return NET_RX_SUCCESS;
2083}
2084
2085/**
2086 * afiucv_hs_callback_rx() - react on received data
2087 **/
2088static int afiucv_hs_callback_rx(struct sock *sk, struct sk_buff *skb)
2089{
2090 struct iucv_sock *iucv = iucv_sk(sk);
2091
2092 if (!iucv) {
2093 kfree_skb(skb);
2094 return NET_RX_SUCCESS;
2095 }
2096
2097 if (sk->sk_state != IUCV_CONNECTED) {
2098 kfree_skb(skb);
2099 return NET_RX_SUCCESS;
2100 }
2101
2102 /* write stuff from iucv_msg to skb cb */
2103 if (skb->len <= sizeof(struct af_iucv_trans_hdr)) {
2104 kfree_skb(skb);
2105 return NET_RX_SUCCESS;
2106 }
2107 skb_pull(skb, sizeof(struct af_iucv_trans_hdr));
2108 skb_reset_transport_header(skb);
2109 skb_reset_network_header(skb);
2110 spin_lock(&iucv->message_q.lock);
2111 if (skb_queue_empty(&iucv->backlog_skb_q)) {
2112 if (sock_queue_rcv_skb(sk, skb)) {
2113 /* handle rcv queue full */
2114 skb_queue_tail(&iucv->backlog_skb_q, skb);
2115 }
2116 } else
2117 skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, skb);
2118 spin_unlock(&iucv->message_q.lock);
2119 return NET_RX_SUCCESS;
2120}
2121
2122/**
2123 * afiucv_hs_rcv() - base function for arriving data through HiperSockets
2124 * transport
2125 * called from netif RX softirq
2126 **/
2127static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev,
2128 struct packet_type *pt, struct net_device *orig_dev)
2129{
2130 struct hlist_node *node;
2131 struct sock *sk;
2132 struct iucv_sock *iucv;
2133 struct af_iucv_trans_hdr *trans_hdr;
2134 char nullstring[8];
2135 int err = 0;
2136
2137 skb_pull(skb, ETH_HLEN);
2138 trans_hdr = (struct af_iucv_trans_hdr *)skb->data;
2139 EBCASC(trans_hdr->destAppName, sizeof(trans_hdr->destAppName));
2140 EBCASC(trans_hdr->destUserID, sizeof(trans_hdr->destUserID));
2141 EBCASC(trans_hdr->srcAppName, sizeof(trans_hdr->srcAppName));
2142 EBCASC(trans_hdr->srcUserID, sizeof(trans_hdr->srcUserID));
2143 memset(nullstring, 0, sizeof(nullstring));
2144 iucv = NULL;
2145 sk = NULL;
2146 read_lock(&iucv_sk_list.lock);
2147 sk_for_each(sk, node, &iucv_sk_list.head) {
2148 if (trans_hdr->flags == AF_IUCV_FLAG_SYN) {
2149 if ((!memcmp(&iucv_sk(sk)->src_name,
2150 trans_hdr->destAppName, 8)) &&
2151 (!memcmp(&iucv_sk(sk)->src_user_id,
2152 trans_hdr->destUserID, 8)) &&
2153 (!memcmp(&iucv_sk(sk)->dst_name, nullstring, 8)) &&
2154 (!memcmp(&iucv_sk(sk)->dst_user_id,
2155 nullstring, 8))) {
2156 iucv = iucv_sk(sk);
2157 break;
2158 }
2159 } else {
2160 if ((!memcmp(&iucv_sk(sk)->src_name,
2161 trans_hdr->destAppName, 8)) &&
2162 (!memcmp(&iucv_sk(sk)->src_user_id,
2163 trans_hdr->destUserID, 8)) &&
2164 (!memcmp(&iucv_sk(sk)->dst_name,
2165 trans_hdr->srcAppName, 8)) &&
2166 (!memcmp(&iucv_sk(sk)->dst_user_id,
2167 trans_hdr->srcUserID, 8))) {
2168 iucv = iucv_sk(sk);
2169 break;
2170 }
2171 }
2172 }
2173 read_unlock(&iucv_sk_list.lock);
2174 if (!iucv)
2175 sk = NULL;
2176
2177 /* no sock
2178 how should we send with no sock
2179 1) send without sock no send rc checking?
2180 2) introduce default sock to handle this cases
2181
2182 SYN -> send SYN|ACK in good case, send SYN|FIN in bad case
2183 data -> send FIN
2184 SYN|ACK, SYN|FIN, FIN -> no action? */
2185
2186 switch (trans_hdr->flags) {
2187 case AF_IUCV_FLAG_SYN:
2188 /* connect request */
2189 err = afiucv_hs_callback_syn(sk, skb);
2190 break;
2191 case (AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_ACK):
2192 /* connect request confirmed */
2193 err = afiucv_hs_callback_synack(sk, skb);
2194 break;
2195 case (AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_FIN):
2196 /* connect request refused */
2197 err = afiucv_hs_callback_synfin(sk, skb);
2198 break;
2199 case (AF_IUCV_FLAG_FIN):
2200 /* close request */
2201 err = afiucv_hs_callback_fin(sk, skb);
2202 break;
2203 case (AF_IUCV_FLAG_WIN):
2204 err = afiucv_hs_callback_win(sk, skb);
2205 if (skb->len > sizeof(struct af_iucv_trans_hdr))
2206 err = afiucv_hs_callback_rx(sk, skb);
2207 else
2208 kfree(skb);
2209 break;
2210 case 0:
2211 /* plain data frame */
2212 err = afiucv_hs_callback_rx(sk, skb);
2213 break;
2214 default:
2215 ;
2216 }
2217
2218 return err;
2219}
2220
2221/**
2222 * afiucv_hs_callback_txnotify() - handle send notifcations from HiperSockets
2223 * transport
2224 **/
2225static void afiucv_hs_callback_txnotify(struct sk_buff *skb,
2226 enum iucv_tx_notify n)
2227{
2228 struct sock *isk = skb->sk;
2229 struct sock *sk = NULL;
2230 struct iucv_sock *iucv = NULL;
2231 struct sk_buff_head *list;
2232 struct sk_buff *list_skb;
2233 struct sk_buff *this = NULL;
2234 unsigned long flags;
2235 struct hlist_node *node;
2236
2237 read_lock(&iucv_sk_list.lock);
2238 sk_for_each(sk, node, &iucv_sk_list.head)
2239 if (sk == isk) {
2240 iucv = iucv_sk(sk);
2241 break;
2242 }
2243 read_unlock(&iucv_sk_list.lock);
2244
2245 if (!iucv)
2246 return;
2247
2248 bh_lock_sock(sk);
2249 list = &iucv->send_skb_q;
2250 list_skb = list->next;
2251 if (skb_queue_empty(list))
2252 goto out_unlock;
2253
2254 spin_lock_irqsave(&list->lock, flags);
2255 while (list_skb != (struct sk_buff *)list) {
2256 if (skb_shinfo(list_skb) == skb_shinfo(skb)) {
2257 this = list_skb;
2258 switch (n) {
2259 case TX_NOTIFY_OK:
2260 __skb_unlink(this, list);
2261 iucv_sock_wake_msglim(sk);
2262 kfree_skb(this);
2263 break;
2264 case TX_NOTIFY_PENDING:
2265 atomic_inc(&iucv->pendings);
2266 break;
2267 case TX_NOTIFY_DELAYED_OK:
2268 __skb_unlink(this, list);
2269 atomic_dec(&iucv->pendings);
2270 if (atomic_read(&iucv->pendings) <= 0)
2271 iucv_sock_wake_msglim(sk);
2272 kfree_skb(this);
2273 break;
2274 case TX_NOTIFY_UNREACHABLE:
2275 case TX_NOTIFY_DELAYED_UNREACHABLE:
2276 case TX_NOTIFY_TPQFULL: /* not yet used */
2277 case TX_NOTIFY_GENERALERROR:
2278 case TX_NOTIFY_DELAYED_GENERALERROR:
2279 __skb_unlink(this, list);
2280 kfree_skb(this);
2281 if (!list_empty(&iucv->accept_q))
2282 sk->sk_state = IUCV_SEVERED;
2283 else
2284 sk->sk_state = IUCV_DISCONN;
2285 sk->sk_state_change(sk);
2286 break;
2287 }
2288 break;
2289 }
2290 list_skb = list_skb->next;
2291 }
2292 spin_unlock_irqrestore(&list->lock, flags);
2293
2294out_unlock:
2295 bh_unlock_sock(sk);
2296}
1695static const struct proto_ops iucv_sock_ops = { 2297static const struct proto_ops iucv_sock_ops = {
1696 .family = PF_IUCV, 2298 .family = PF_IUCV,
1697 .owner = THIS_MODULE, 2299 .owner = THIS_MODULE,
@@ -1718,71 +2320,104 @@ static const struct net_proto_family iucv_sock_family_ops = {
1718 .create = iucv_sock_create, 2320 .create = iucv_sock_create,
1719}; 2321};
1720 2322
1721static int __init afiucv_init(void) 2323static struct packet_type iucv_packet_type = {
2324 .type = cpu_to_be16(ETH_P_AF_IUCV),
2325 .func = afiucv_hs_rcv,
2326};
2327
2328static int afiucv_iucv_init(void)
1722{ 2329{
1723 int err; 2330 int err;
1724 2331
1725 if (!MACHINE_IS_VM) { 2332 err = pr_iucv->iucv_register(&af_iucv_handler, 0);
1726 pr_err("The af_iucv module cannot be loaded"
1727 " without z/VM\n");
1728 err = -EPROTONOSUPPORT;
1729 goto out;
1730 }
1731 cpcmd("QUERY USERID", iucv_userid, sizeof(iucv_userid), &err);
1732 if (unlikely(err)) {
1733 WARN_ON(err);
1734 err = -EPROTONOSUPPORT;
1735 goto out;
1736 }
1737
1738 err = iucv_register(&af_iucv_handler, 0);
1739 if (err) 2333 if (err)
1740 goto out; 2334 goto out;
1741 err = proto_register(&iucv_proto, 0);
1742 if (err)
1743 goto out_iucv;
1744 err = sock_register(&iucv_sock_family_ops);
1745 if (err)
1746 goto out_proto;
1747 /* establish dummy device */ 2335 /* establish dummy device */
2336 af_iucv_driver.bus = pr_iucv->bus;
1748 err = driver_register(&af_iucv_driver); 2337 err = driver_register(&af_iucv_driver);
1749 if (err) 2338 if (err)
1750 goto out_sock; 2339 goto out_iucv;
1751 af_iucv_dev = kzalloc(sizeof(struct device), GFP_KERNEL); 2340 af_iucv_dev = kzalloc(sizeof(struct device), GFP_KERNEL);
1752 if (!af_iucv_dev) { 2341 if (!af_iucv_dev) {
1753 err = -ENOMEM; 2342 err = -ENOMEM;
1754 goto out_driver; 2343 goto out_driver;
1755 } 2344 }
1756 dev_set_name(af_iucv_dev, "af_iucv"); 2345 dev_set_name(af_iucv_dev, "af_iucv");
1757 af_iucv_dev->bus = &iucv_bus; 2346 af_iucv_dev->bus = pr_iucv->bus;
1758 af_iucv_dev->parent = iucv_root; 2347 af_iucv_dev->parent = pr_iucv->root;
1759 af_iucv_dev->release = (void (*)(struct device *))kfree; 2348 af_iucv_dev->release = (void (*)(struct device *))kfree;
1760 af_iucv_dev->driver = &af_iucv_driver; 2349 af_iucv_dev->driver = &af_iucv_driver;
1761 err = device_register(af_iucv_dev); 2350 err = device_register(af_iucv_dev);
1762 if (err) 2351 if (err)
1763 goto out_driver; 2352 goto out_driver;
1764
1765 return 0; 2353 return 0;
1766 2354
1767out_driver: 2355out_driver:
1768 driver_unregister(&af_iucv_driver); 2356 driver_unregister(&af_iucv_driver);
2357out_iucv:
2358 pr_iucv->iucv_unregister(&af_iucv_handler, 0);
2359out:
2360 return err;
2361}
2362
2363static int __init afiucv_init(void)
2364{
2365 int err;
2366
2367 if (MACHINE_IS_VM) {
2368 cpcmd("QUERY USERID", iucv_userid, sizeof(iucv_userid), &err);
2369 if (unlikely(err)) {
2370 WARN_ON(err);
2371 err = -EPROTONOSUPPORT;
2372 goto out;
2373 }
2374
2375 pr_iucv = try_then_request_module(symbol_get(iucv_if), "iucv");
2376 if (!pr_iucv) {
2377 printk(KERN_WARNING "iucv_if lookup failed\n");
2378 memset(&iucv_userid, 0, sizeof(iucv_userid));
2379 }
2380 } else {
2381 memset(&iucv_userid, 0, sizeof(iucv_userid));
2382 pr_iucv = NULL;
2383 }
2384
2385 err = proto_register(&iucv_proto, 0);
2386 if (err)
2387 goto out;
2388 err = sock_register(&iucv_sock_family_ops);
2389 if (err)
2390 goto out_proto;
2391
2392 if (pr_iucv) {
2393 err = afiucv_iucv_init();
2394 if (err)
2395 goto out_sock;
2396 }
2397 dev_add_pack(&iucv_packet_type);
2398 return 0;
2399
1769out_sock: 2400out_sock:
1770 sock_unregister(PF_IUCV); 2401 sock_unregister(PF_IUCV);
1771out_proto: 2402out_proto:
1772 proto_unregister(&iucv_proto); 2403 proto_unregister(&iucv_proto);
1773out_iucv:
1774 iucv_unregister(&af_iucv_handler, 0);
1775out: 2404out:
2405 if (pr_iucv)
2406 symbol_put(iucv_if);
1776 return err; 2407 return err;
1777} 2408}
1778 2409
1779static void __exit afiucv_exit(void) 2410static void __exit afiucv_exit(void)
1780{ 2411{
1781 device_unregister(af_iucv_dev); 2412 if (pr_iucv) {
1782 driver_unregister(&af_iucv_driver); 2413 device_unregister(af_iucv_dev);
2414 driver_unregister(&af_iucv_driver);
2415 pr_iucv->iucv_unregister(&af_iucv_handler, 0);
2416 symbol_put(iucv_if);
2417 }
2418 dev_remove_pack(&iucv_packet_type);
1783 sock_unregister(PF_IUCV); 2419 sock_unregister(PF_IUCV);
1784 proto_unregister(&iucv_proto); 2420 proto_unregister(&iucv_proto);
1785 iucv_unregister(&af_iucv_handler, 0);
1786} 2421}
1787 2422
1788module_init(afiucv_init); 2423module_init(afiucv_init);
@@ -1793,3 +2428,4 @@ MODULE_DESCRIPTION("IUCV Sockets ver " VERSION);
1793MODULE_VERSION(VERSION); 2428MODULE_VERSION(VERSION);
1794MODULE_LICENSE("GPL"); 2429MODULE_LICENSE("GPL");
1795MODULE_ALIAS_NETPROTO(PF_IUCV); 2430MODULE_ALIAS_NETPROTO(PF_IUCV);
2431
diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
index 075a3808aa40..403be43b793d 100644
--- a/net/iucv/iucv.c
+++ b/net/iucv/iucv.c
@@ -1974,6 +1974,27 @@ out:
1974 return rc; 1974 return rc;
1975} 1975}
1976 1976
1977struct iucv_interface iucv_if = {
1978 .message_receive = iucv_message_receive,
1979 .__message_receive = __iucv_message_receive,
1980 .message_reply = iucv_message_reply,
1981 .message_reject = iucv_message_reject,
1982 .message_send = iucv_message_send,
1983 .__message_send = __iucv_message_send,
1984 .message_send2way = iucv_message_send2way,
1985 .message_purge = iucv_message_purge,
1986 .path_accept = iucv_path_accept,
1987 .path_connect = iucv_path_connect,
1988 .path_quiesce = iucv_path_quiesce,
1989 .path_resume = iucv_path_resume,
1990 .path_sever = iucv_path_sever,
1991 .iucv_register = iucv_register,
1992 .iucv_unregister = iucv_unregister,
1993 .bus = NULL,
1994 .root = NULL,
1995};
1996EXPORT_SYMBOL(iucv_if);
1997
1977/** 1998/**
1978 * iucv_init 1999 * iucv_init
1979 * 2000 *
@@ -2038,6 +2059,8 @@ static int __init iucv_init(void)
2038 rc = bus_register(&iucv_bus); 2059 rc = bus_register(&iucv_bus);
2039 if (rc) 2060 if (rc)
2040 goto out_reboot; 2061 goto out_reboot;
2062 iucv_if.root = iucv_root;
2063 iucv_if.bus = &iucv_bus;
2041 return 0; 2064 return 0;
2042 2065
2043out_reboot: 2066out_reboot: