diff options
Diffstat (limited to 'net/iucv')
-rw-r--r-- | net/iucv/af_iucv.c | 159 |
1 files changed, 130 insertions, 29 deletions
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c index e84c924a81ee..026704a47296 100644 --- a/net/iucv/af_iucv.c +++ b/net/iucv/af_iucv.c | |||
@@ -147,6 +147,7 @@ static void iucv_sock_close(struct sock *sk) | |||
147 | unsigned char user_data[16]; | 147 | unsigned char user_data[16]; |
148 | struct iucv_sock *iucv = iucv_sk(sk); | 148 | struct iucv_sock *iucv = iucv_sk(sk); |
149 | int err; | 149 | int err; |
150 | unsigned long timeo; | ||
150 | 151 | ||
151 | iucv_sock_clear_timer(sk); | 152 | iucv_sock_clear_timer(sk); |
152 | lock_sock(sk); | 153 | lock_sock(sk); |
@@ -159,6 +160,21 @@ static void iucv_sock_close(struct sock *sk) | |||
159 | case IUCV_CONNECTED: | 160 | case IUCV_CONNECTED: |
160 | case IUCV_DISCONN: | 161 | case IUCV_DISCONN: |
161 | err = 0; | 162 | err = 0; |
163 | |||
164 | sk->sk_state = IUCV_CLOSING; | ||
165 | sk->sk_state_change(sk); | ||
166 | |||
167 | if(!skb_queue_empty(&iucv->send_skb_q)) { | ||
168 | if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime) | ||
169 | timeo = sk->sk_lingertime; | ||
170 | else | ||
171 | timeo = IUCV_DISCONN_TIMEOUT; | ||
172 | err = iucv_sock_wait_state(sk, IUCV_CLOSED, 0, timeo); | ||
173 | } | ||
174 | |||
175 | sk->sk_state = IUCV_CLOSED; | ||
176 | sk->sk_state_change(sk); | ||
177 | |||
162 | if (iucv->path) { | 178 | if (iucv->path) { |
163 | low_nmcpy(user_data, iucv->src_name); | 179 | low_nmcpy(user_data, iucv->src_name); |
164 | high_nmcpy(user_data, iucv->dst_name); | 180 | high_nmcpy(user_data, iucv->dst_name); |
@@ -168,12 +184,11 @@ static void iucv_sock_close(struct sock *sk) | |||
168 | iucv->path = NULL; | 184 | iucv->path = NULL; |
169 | } | 185 | } |
170 | 186 | ||
171 | sk->sk_state = IUCV_CLOSED; | ||
172 | sk->sk_state_change(sk); | ||
173 | sk->sk_err = ECONNRESET; | 187 | sk->sk_err = ECONNRESET; |
174 | sk->sk_state_change(sk); | 188 | sk->sk_state_change(sk); |
175 | 189 | ||
176 | skb_queue_purge(&iucv->send_skb_q); | 190 | skb_queue_purge(&iucv->send_skb_q); |
191 | skb_queue_purge(&iucv->backlog_skb_q); | ||
177 | 192 | ||
178 | sock_set_flag(sk, SOCK_ZAPPED); | 193 | sock_set_flag(sk, SOCK_ZAPPED); |
179 | break; | 194 | break; |
@@ -204,6 +219,7 @@ static struct sock *iucv_sock_alloc(struct socket *sock, int proto, gfp_t prio) | |||
204 | sock_init_data(sock, sk); | 219 | sock_init_data(sock, sk); |
205 | INIT_LIST_HEAD(&iucv_sk(sk)->accept_q); | 220 | INIT_LIST_HEAD(&iucv_sk(sk)->accept_q); |
206 | skb_queue_head_init(&iucv_sk(sk)->send_skb_q); | 221 | skb_queue_head_init(&iucv_sk(sk)->send_skb_q); |
222 | skb_queue_head_init(&iucv_sk(sk)->backlog_skb_q); | ||
207 | iucv_sk(sk)->send_tag = 0; | 223 | iucv_sk(sk)->send_tag = 0; |
208 | 224 | ||
209 | sk->sk_destruct = iucv_sock_destruct; | 225 | sk->sk_destruct = iucv_sock_destruct; |
@@ -510,7 +526,7 @@ static int iucv_sock_accept(struct socket *sock, struct socket *newsock, | |||
510 | long timeo; | 526 | long timeo; |
511 | int err = 0; | 527 | int err = 0; |
512 | 528 | ||
513 | lock_sock(sk); | 529 | lock_sock_nested(sk, SINGLE_DEPTH_NESTING); |
514 | 530 | ||
515 | if (sk->sk_state != IUCV_LISTEN) { | 531 | if (sk->sk_state != IUCV_LISTEN) { |
516 | err = -EBADFD; | 532 | err = -EBADFD; |
@@ -530,7 +546,7 @@ static int iucv_sock_accept(struct socket *sock, struct socket *newsock, | |||
530 | 546 | ||
531 | release_sock(sk); | 547 | release_sock(sk); |
532 | timeo = schedule_timeout(timeo); | 548 | timeo = schedule_timeout(timeo); |
533 | lock_sock(sk); | 549 | lock_sock_nested(sk, SINGLE_DEPTH_NESTING); |
534 | 550 | ||
535 | if (sk->sk_state != IUCV_LISTEN) { | 551 | if (sk->sk_state != IUCV_LISTEN) { |
536 | err = -EBADFD; | 552 | err = -EBADFD; |
@@ -606,7 +622,7 @@ static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock, | |||
606 | if(!(skb = sock_alloc_send_skb(sk, len, | 622 | if(!(skb = sock_alloc_send_skb(sk, len, |
607 | msg->msg_flags & MSG_DONTWAIT, | 623 | msg->msg_flags & MSG_DONTWAIT, |
608 | &err))) | 624 | &err))) |
609 | return err; | 625 | goto out; |
610 | 626 | ||
611 | if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)){ | 627 | if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)){ |
612 | err = -EFAULT; | 628 | err = -EFAULT; |
@@ -647,10 +663,16 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
647 | { | 663 | { |
648 | int noblock = flags & MSG_DONTWAIT; | 664 | int noblock = flags & MSG_DONTWAIT; |
649 | struct sock *sk = sock->sk; | 665 | struct sock *sk = sock->sk; |
666 | struct iucv_sock *iucv = iucv_sk(sk); | ||
650 | int target, copied = 0; | 667 | int target, copied = 0; |
651 | struct sk_buff *skb; | 668 | struct sk_buff *skb, *rskb, *cskb; |
652 | int err = 0; | 669 | int err = 0; |
653 | 670 | ||
671 | if ((sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_SEVERED) && | ||
672 | skb_queue_empty(&iucv->backlog_skb_q) && | ||
673 | skb_queue_empty(&sk->sk_receive_queue)) | ||
674 | return 0; | ||
675 | |||
654 | if (flags & (MSG_OOB)) | 676 | if (flags & (MSG_OOB)) |
655 | return -EOPNOTSUPP; | 677 | return -EOPNOTSUPP; |
656 | 678 | ||
@@ -665,10 +687,12 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
665 | 687 | ||
666 | copied = min_t(unsigned int, skb->len, len); | 688 | copied = min_t(unsigned int, skb->len, len); |
667 | 689 | ||
668 | if (memcpy_toiovec(msg->msg_iov, skb->data, copied)) { | 690 | cskb = skb; |
691 | if (memcpy_toiovec(msg->msg_iov, cskb->data, copied)) { | ||
669 | skb_queue_head(&sk->sk_receive_queue, skb); | 692 | skb_queue_head(&sk->sk_receive_queue, skb); |
670 | if (copied == 0) | 693 | if (copied == 0) |
671 | return -EFAULT; | 694 | return -EFAULT; |
695 | goto done; | ||
672 | } | 696 | } |
673 | 697 | ||
674 | len -= copied; | 698 | len -= copied; |
@@ -683,6 +707,18 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
683 | } | 707 | } |
684 | 708 | ||
685 | kfree_skb(skb); | 709 | kfree_skb(skb); |
710 | |||
711 | /* Queue backlog skbs */ | ||
712 | rskb = skb_dequeue(&iucv_sk(sk)->backlog_skb_q); | ||
713 | while(rskb) { | ||
714 | if (sock_queue_rcv_skb(sk, rskb)) { | ||
715 | skb_queue_head(&iucv_sk(sk)->backlog_skb_q, | ||
716 | rskb); | ||
717 | break; | ||
718 | } else { | ||
719 | rskb = skb_dequeue(&iucv_sk(sk)->backlog_skb_q); | ||
720 | } | ||
721 | } | ||
686 | } else | 722 | } else |
687 | skb_queue_head(&sk->sk_receive_queue, skb); | 723 | skb_queue_head(&sk->sk_receive_queue, skb); |
688 | 724 | ||
@@ -732,6 +768,9 @@ unsigned int iucv_sock_poll(struct file *file, struct socket *sock, | |||
732 | if (sk->sk_state == IUCV_CLOSED) | 768 | if (sk->sk_state == IUCV_CLOSED) |
733 | mask |= POLLHUP; | 769 | mask |= POLLHUP; |
734 | 770 | ||
771 | if (sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_SEVERED) | ||
772 | mask |= POLLIN; | ||
773 | |||
735 | if (sock_writeable(sk)) | 774 | if (sock_writeable(sk)) |
736 | mask |= POLLOUT | POLLWRNORM | POLLWRBAND; | 775 | mask |= POLLOUT | POLLWRNORM | POLLWRBAND; |
737 | else | 776 | else |
@@ -817,13 +856,6 @@ static int iucv_sock_release(struct socket *sock) | |||
817 | iucv_sk(sk)->path = NULL; | 856 | iucv_sk(sk)->path = NULL; |
818 | } | 857 | } |
819 | 858 | ||
820 | if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime){ | ||
821 | lock_sock(sk); | ||
822 | err = iucv_sock_wait_state(sk, IUCV_CLOSED, 0, | ||
823 | sk->sk_lingertime); | ||
824 | release_sock(sk); | ||
825 | } | ||
826 | |||
827 | sock_orphan(sk); | 859 | sock_orphan(sk); |
828 | iucv_sock_kill(sk); | 860 | iucv_sock_kill(sk); |
829 | return err; | 861 | return err; |
@@ -927,18 +959,52 @@ static void iucv_callback_connack(struct iucv_path *path, u8 ipuser[16]) | |||
927 | sk->sk_state_change(sk); | 959 | sk->sk_state_change(sk); |
928 | } | 960 | } |
929 | 961 | ||
962 | static int iucv_fragment_skb(struct sock *sk, struct sk_buff *skb, int len, | ||
963 | struct sk_buff_head fragmented_skb_q) | ||
964 | { | ||
965 | int dataleft, size, copied = 0; | ||
966 | struct sk_buff *nskb; | ||
967 | |||
968 | dataleft = len; | ||
969 | while(dataleft) { | ||
970 | if (dataleft >= sk->sk_rcvbuf / 4) | ||
971 | size = sk->sk_rcvbuf / 4; | ||
972 | else | ||
973 | size = dataleft; | ||
974 | |||
975 | nskb = alloc_skb(size, GFP_ATOMIC | GFP_DMA); | ||
976 | if (!nskb) | ||
977 | return -ENOMEM; | ||
978 | |||
979 | memcpy(nskb->data, skb->data + copied, size); | ||
980 | copied += size; | ||
981 | dataleft -= size; | ||
982 | |||
983 | nskb->h.raw = nskb->data; | ||
984 | nskb->nh.raw = nskb->data; | ||
985 | nskb->len = size; | ||
986 | |||
987 | skb_queue_tail(fragmented_skb_q, nskb); | ||
988 | } | ||
989 | |||
990 | return 0; | ||
991 | } | ||
930 | static void iucv_callback_rx(struct iucv_path *path, struct iucv_message *msg) | 992 | static void iucv_callback_rx(struct iucv_path *path, struct iucv_message *msg) |
931 | { | 993 | { |
932 | struct sock *sk = path->private; | 994 | struct sock *sk = path->private; |
933 | struct sk_buff *skb; | 995 | struct iucv_sock *iucv = iucv_sk(sk); |
996 | struct sk_buff *skb, *fskb; | ||
997 | struct sk_buff_head fragmented_skb_q; | ||
934 | int rc; | 998 | int rc; |
935 | 999 | ||
1000 | skb_queue_head_init(&fragmented_skb_q); | ||
1001 | |||
936 | if (sk->sk_shutdown & RCV_SHUTDOWN) | 1002 | if (sk->sk_shutdown & RCV_SHUTDOWN) |
937 | return; | 1003 | return; |
938 | 1004 | ||
939 | skb = alloc_skb(msg->length, GFP_ATOMIC | GFP_DMA); | 1005 | skb = alloc_skb(msg->length, GFP_ATOMIC | GFP_DMA); |
940 | if (!skb) { | 1006 | if (!skb) { |
941 | iucv_message_reject(path, msg); | 1007 | iucv_path_sever(path, NULL); |
942 | return; | 1008 | return; |
943 | } | 1009 | } |
944 | 1010 | ||
@@ -952,14 +1018,39 @@ static void iucv_callback_rx(struct iucv_path *path, struct iucv_message *msg) | |||
952 | kfree_skb(skb); | 1018 | kfree_skb(skb); |
953 | return; | 1019 | return; |
954 | } | 1020 | } |
1021 | if (skb->truesize >= sk->sk_rcvbuf / 4) { | ||
1022 | rc = iucv_fragment_skb(sk, skb, msg->length, | ||
1023 | &fragmented_skb_q); | ||
1024 | kfree_skb(skb); | ||
1025 | skb = NULL; | ||
1026 | if (rc) { | ||
1027 | iucv_path_sever(path, NULL); | ||
1028 | return; | ||
1029 | } | ||
1030 | } else { | ||
1031 | skb_reset_transport_header(skb); | ||
1032 | skb_reset_network_header(skb); | ||
1033 | skb->len = msg->length; | ||
1034 | } | ||
1035 | } | ||
1036 | /* Queue the fragmented skb */ | ||
1037 | fskb = skb_dequeue(&fragmented_skb_q); | ||
1038 | while(fskb) { | ||
1039 | if (!skb_queue_empty(&iucv->backlog_skb_q)) | ||
1040 | skb_queue_tail(&iucv->backlog_skb_q, fskb); | ||
1041 | else if (sock_queue_rcv_skb(sk, fskb)) | ||
1042 | skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, fskb); | ||
1043 | fskb = skb_dequeue(&fragmented_skb_q); | ||
1044 | } | ||
955 | 1045 | ||
956 | skb_reset_transport_header(skb); | 1046 | /* Queue the original skb if it exists (was not fragmented) */ |
957 | skb_reset_network_header(skb); | 1047 | if (skb) { |
958 | skb->len = msg->length; | 1048 | if (!skb_queue_empty(&iucv->backlog_skb_q)) |
1049 | skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, skb); | ||
1050 | else if (sock_queue_rcv_skb(sk, skb)) | ||
1051 | skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, skb); | ||
959 | } | 1052 | } |
960 | 1053 | ||
961 | if (sock_queue_rcv_skb(sk, skb)) | ||
962 | kfree_skb(skb); | ||
963 | } | 1054 | } |
964 | 1055 | ||
965 | static void iucv_callback_txdone(struct iucv_path *path, | 1056 | static void iucv_callback_txdone(struct iucv_path *path, |
@@ -971,17 +1062,27 @@ static void iucv_callback_txdone(struct iucv_path *path, | |||
971 | struct sk_buff *list_skb = list->next; | 1062 | struct sk_buff *list_skb = list->next; |
972 | unsigned long flags; | 1063 | unsigned long flags; |
973 | 1064 | ||
974 | spin_lock_irqsave(&list->lock, flags); | 1065 | if (list_skb) { |
1066 | spin_lock_irqsave(&list->lock, flags); | ||
1067 | |||
1068 | do { | ||
1069 | this = list_skb; | ||
1070 | list_skb = list_skb->next; | ||
1071 | } while (memcmp(&msg->tag, this->cb, 4) && list_skb); | ||
1072 | |||
1073 | spin_unlock_irqrestore(&list->lock, flags); | ||
975 | 1074 | ||
976 | do { | 1075 | skb_unlink(this, &iucv_sk(sk)->send_skb_q); |
977 | this = list_skb; | 1076 | kfree_skb(this); |
978 | list_skb = list_skb->next; | 1077 | } |
979 | } while (memcmp(&msg->tag, this->cb, 4)); | ||
980 | 1078 | ||
981 | spin_unlock_irqrestore(&list->lock, flags); | 1079 | if (sk->sk_state == IUCV_CLOSING){ |
1080 | if (skb_queue_empty(&iucv_sk(sk)->send_skb_q)) { | ||
1081 | sk->sk_state = IUCV_CLOSED; | ||
1082 | sk->sk_state_change(sk); | ||
1083 | } | ||
1084 | } | ||
982 | 1085 | ||
983 | skb_unlink(this, &iucv_sk(sk)->send_skb_q); | ||
984 | kfree_skb(this); | ||
985 | } | 1086 | } |
986 | 1087 | ||
987 | static void iucv_callback_connrej(struct iucv_path *path, u8 ipuser[16]) | 1088 | static void iucv_callback_connrej(struct iucv_path *path, u8 ipuser[16]) |