diff options
Diffstat (limited to 'net/iucv/af_iucv.c')
-rw-r--r-- | net/iucv/af_iucv.c | 193 |
1 files changed, 148 insertions, 45 deletions
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c index e84c924a81ee..d9e9ddb8eac5 100644 --- a/net/iucv/af_iucv.c +++ b/net/iucv/af_iucv.c | |||
@@ -45,7 +45,8 @@ static struct proto iucv_proto = { | |||
45 | static void iucv_callback_rx(struct iucv_path *, struct iucv_message *); | 45 | static void iucv_callback_rx(struct iucv_path *, struct iucv_message *); |
46 | static void iucv_callback_txdone(struct iucv_path *, struct iucv_message *); | 46 | static void iucv_callback_txdone(struct iucv_path *, struct iucv_message *); |
47 | static void iucv_callback_connack(struct iucv_path *, u8 ipuser[16]); | 47 | static void iucv_callback_connack(struct iucv_path *, u8 ipuser[16]); |
48 | static int iucv_callback_connreq(struct iucv_path *, u8 ipvmid[8], u8 ipuser[16]); | 48 | static int iucv_callback_connreq(struct iucv_path *, u8 ipvmid[8], |
49 | u8 ipuser[16]); | ||
49 | static void iucv_callback_connrej(struct iucv_path *, u8 ipuser[16]); | 50 | static void iucv_callback_connrej(struct iucv_path *, u8 ipuser[16]); |
50 | 51 | ||
51 | static struct iucv_sock_list iucv_sk_list = { | 52 | static struct iucv_sock_list iucv_sk_list = { |
@@ -147,11 +148,12 @@ static void iucv_sock_close(struct sock *sk) | |||
147 | unsigned char user_data[16]; | 148 | unsigned char user_data[16]; |
148 | struct iucv_sock *iucv = iucv_sk(sk); | 149 | struct iucv_sock *iucv = iucv_sk(sk); |
149 | int err; | 150 | int err; |
151 | unsigned long timeo; | ||
150 | 152 | ||
151 | iucv_sock_clear_timer(sk); | 153 | iucv_sock_clear_timer(sk); |
152 | lock_sock(sk); | 154 | lock_sock(sk); |
153 | 155 | ||
154 | switch(sk->sk_state) { | 156 | switch (sk->sk_state) { |
155 | case IUCV_LISTEN: | 157 | case IUCV_LISTEN: |
156 | iucv_sock_cleanup_listen(sk); | 158 | iucv_sock_cleanup_listen(sk); |
157 | break; | 159 | break; |
@@ -159,6 +161,21 @@ static void iucv_sock_close(struct sock *sk) | |||
159 | case IUCV_CONNECTED: | 161 | case IUCV_CONNECTED: |
160 | case IUCV_DISCONN: | 162 | case IUCV_DISCONN: |
161 | err = 0; | 163 | err = 0; |
164 | |||
165 | sk->sk_state = IUCV_CLOSING; | ||
166 | sk->sk_state_change(sk); | ||
167 | |||
168 | if (!skb_queue_empty(&iucv->send_skb_q)) { | ||
169 | if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime) | ||
170 | timeo = sk->sk_lingertime; | ||
171 | else | ||
172 | timeo = IUCV_DISCONN_TIMEOUT; | ||
173 | err = iucv_sock_wait_state(sk, IUCV_CLOSED, 0, timeo); | ||
174 | } | ||
175 | |||
176 | sk->sk_state = IUCV_CLOSED; | ||
177 | sk->sk_state_change(sk); | ||
178 | |||
162 | if (iucv->path) { | 179 | if (iucv->path) { |
163 | low_nmcpy(user_data, iucv->src_name); | 180 | low_nmcpy(user_data, iucv->src_name); |
164 | high_nmcpy(user_data, iucv->dst_name); | 181 | high_nmcpy(user_data, iucv->dst_name); |
@@ -168,12 +185,11 @@ static void iucv_sock_close(struct sock *sk) | |||
168 | iucv->path = NULL; | 185 | iucv->path = NULL; |
169 | } | 186 | } |
170 | 187 | ||
171 | sk->sk_state = IUCV_CLOSED; | ||
172 | sk->sk_state_change(sk); | ||
173 | sk->sk_err = ECONNRESET; | 188 | sk->sk_err = ECONNRESET; |
174 | sk->sk_state_change(sk); | 189 | sk->sk_state_change(sk); |
175 | 190 | ||
176 | skb_queue_purge(&iucv->send_skb_q); | 191 | skb_queue_purge(&iucv->send_skb_q); |
192 | skb_queue_purge(&iucv->backlog_skb_q); | ||
177 | 193 | ||
178 | sock_set_flag(sk, SOCK_ZAPPED); | 194 | sock_set_flag(sk, SOCK_ZAPPED); |
179 | break; | 195 | break; |
@@ -204,6 +220,7 @@ static struct sock *iucv_sock_alloc(struct socket *sock, int proto, gfp_t prio) | |||
204 | sock_init_data(sock, sk); | 220 | sock_init_data(sock, sk); |
205 | INIT_LIST_HEAD(&iucv_sk(sk)->accept_q); | 221 | INIT_LIST_HEAD(&iucv_sk(sk)->accept_q); |
206 | skb_queue_head_init(&iucv_sk(sk)->send_skb_q); | 222 | skb_queue_head_init(&iucv_sk(sk)->send_skb_q); |
223 | skb_queue_head_init(&iucv_sk(sk)->backlog_skb_q); | ||
207 | iucv_sk(sk)->send_tag = 0; | 224 | iucv_sk(sk)->send_tag = 0; |
208 | 225 | ||
209 | sk->sk_destruct = iucv_sock_destruct; | 226 | sk->sk_destruct = iucv_sock_destruct; |
@@ -276,7 +293,7 @@ struct sock *iucv_accept_dequeue(struct sock *parent, struct socket *newsock) | |||
276 | struct iucv_sock *isk, *n; | 293 | struct iucv_sock *isk, *n; |
277 | struct sock *sk; | 294 | struct sock *sk; |
278 | 295 | ||
279 | list_for_each_entry_safe(isk, n, &iucv_sk(parent)->accept_q, accept_q){ | 296 | list_for_each_entry_safe(isk, n, &iucv_sk(parent)->accept_q, accept_q) { |
280 | sk = (struct sock *) isk; | 297 | sk = (struct sock *) isk; |
281 | lock_sock(sk); | 298 | lock_sock(sk); |
282 | 299 | ||
@@ -510,7 +527,7 @@ static int iucv_sock_accept(struct socket *sock, struct socket *newsock, | |||
510 | long timeo; | 527 | long timeo; |
511 | int err = 0; | 528 | int err = 0; |
512 | 529 | ||
513 | lock_sock(sk); | 530 | lock_sock_nested(sk, SINGLE_DEPTH_NESTING); |
514 | 531 | ||
515 | if (sk->sk_state != IUCV_LISTEN) { | 532 | if (sk->sk_state != IUCV_LISTEN) { |
516 | err = -EBADFD; | 533 | err = -EBADFD; |
@@ -521,7 +538,7 @@ static int iucv_sock_accept(struct socket *sock, struct socket *newsock, | |||
521 | 538 | ||
522 | /* Wait for an incoming connection */ | 539 | /* Wait for an incoming connection */ |
523 | add_wait_queue_exclusive(sk->sk_sleep, &wait); | 540 | add_wait_queue_exclusive(sk->sk_sleep, &wait); |
524 | while (!(nsk = iucv_accept_dequeue(sk, newsock))){ | 541 | while (!(nsk = iucv_accept_dequeue(sk, newsock))) { |
525 | set_current_state(TASK_INTERRUPTIBLE); | 542 | set_current_state(TASK_INTERRUPTIBLE); |
526 | if (!timeo) { | 543 | if (!timeo) { |
527 | err = -EAGAIN; | 544 | err = -EAGAIN; |
@@ -530,7 +547,7 @@ static int iucv_sock_accept(struct socket *sock, struct socket *newsock, | |||
530 | 547 | ||
531 | release_sock(sk); | 548 | release_sock(sk); |
532 | timeo = schedule_timeout(timeo); | 549 | timeo = schedule_timeout(timeo); |
533 | lock_sock(sk); | 550 | lock_sock_nested(sk, SINGLE_DEPTH_NESTING); |
534 | 551 | ||
535 | if (sk->sk_state != IUCV_LISTEN) { | 552 | if (sk->sk_state != IUCV_LISTEN) { |
536 | err = -EBADFD; | 553 | err = -EBADFD; |
@@ -602,13 +619,13 @@ static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock, | |||
602 | goto out; | 619 | goto out; |
603 | } | 620 | } |
604 | 621 | ||
605 | if (sk->sk_state == IUCV_CONNECTED){ | 622 | if (sk->sk_state == IUCV_CONNECTED) { |
606 | if(!(skb = sock_alloc_send_skb(sk, len, | 623 | if (!(skb = sock_alloc_send_skb(sk, len, |
607 | msg->msg_flags & MSG_DONTWAIT, | 624 | msg->msg_flags & MSG_DONTWAIT, |
608 | &err))) | 625 | &err))) |
609 | return err; | 626 | goto out; |
610 | 627 | ||
611 | if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)){ | 628 | if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) { |
612 | err = -EFAULT; | 629 | err = -EFAULT; |
613 | goto fail; | 630 | goto fail; |
614 | } | 631 | } |
@@ -647,10 +664,16 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
647 | { | 664 | { |
648 | int noblock = flags & MSG_DONTWAIT; | 665 | int noblock = flags & MSG_DONTWAIT; |
649 | struct sock *sk = sock->sk; | 666 | struct sock *sk = sock->sk; |
667 | struct iucv_sock *iucv = iucv_sk(sk); | ||
650 | int target, copied = 0; | 668 | int target, copied = 0; |
651 | struct sk_buff *skb; | 669 | struct sk_buff *skb, *rskb, *cskb; |
652 | int err = 0; | 670 | int err = 0; |
653 | 671 | ||
672 | if ((sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_SEVERED) && | ||
673 | skb_queue_empty(&iucv->backlog_skb_q) && | ||
674 | skb_queue_empty(&sk->sk_receive_queue)) | ||
675 | return 0; | ||
676 | |||
654 | if (flags & (MSG_OOB)) | 677 | if (flags & (MSG_OOB)) |
655 | return -EOPNOTSUPP; | 678 | return -EOPNOTSUPP; |
656 | 679 | ||
@@ -665,10 +688,12 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
665 | 688 | ||
666 | copied = min_t(unsigned int, skb->len, len); | 689 | copied = min_t(unsigned int, skb->len, len); |
667 | 690 | ||
668 | if (memcpy_toiovec(msg->msg_iov, skb->data, copied)) { | 691 | cskb = skb; |
692 | if (memcpy_toiovec(msg->msg_iov, cskb->data, copied)) { | ||
669 | skb_queue_head(&sk->sk_receive_queue, skb); | 693 | skb_queue_head(&sk->sk_receive_queue, skb); |
670 | if (copied == 0) | 694 | if (copied == 0) |
671 | return -EFAULT; | 695 | return -EFAULT; |
696 | goto done; | ||
672 | } | 697 | } |
673 | 698 | ||
674 | len -= copied; | 699 | len -= copied; |
@@ -683,6 +708,18 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
683 | } | 708 | } |
684 | 709 | ||
685 | kfree_skb(skb); | 710 | kfree_skb(skb); |
711 | |||
712 | /* Queue backlog skbs */ | ||
713 | rskb = skb_dequeue(&iucv_sk(sk)->backlog_skb_q); | ||
714 | while (rskb) { | ||
715 | if (sock_queue_rcv_skb(sk, rskb)) { | ||
716 | skb_queue_head(&iucv_sk(sk)->backlog_skb_q, | ||
717 | rskb); | ||
718 | break; | ||
719 | } else { | ||
720 | rskb = skb_dequeue(&iucv_sk(sk)->backlog_skb_q); | ||
721 | } | ||
722 | } | ||
686 | } else | 723 | } else |
687 | skb_queue_head(&sk->sk_receive_queue, skb); | 724 | skb_queue_head(&sk->sk_receive_queue, skb); |
688 | 725 | ||
@@ -695,7 +732,7 @@ static inline unsigned int iucv_accept_poll(struct sock *parent) | |||
695 | struct iucv_sock *isk, *n; | 732 | struct iucv_sock *isk, *n; |
696 | struct sock *sk; | 733 | struct sock *sk; |
697 | 734 | ||
698 | list_for_each_entry_safe(isk, n, &iucv_sk(parent)->accept_q, accept_q){ | 735 | list_for_each_entry_safe(isk, n, &iucv_sk(parent)->accept_q, accept_q) { |
699 | sk = (struct sock *) isk; | 736 | sk = (struct sock *) isk; |
700 | 737 | ||
701 | if (sk->sk_state == IUCV_CONNECTED) | 738 | if (sk->sk_state == IUCV_CONNECTED) |
@@ -726,12 +763,15 @@ unsigned int iucv_sock_poll(struct file *file, struct socket *sock, | |||
726 | mask |= POLLHUP; | 763 | mask |= POLLHUP; |
727 | 764 | ||
728 | if (!skb_queue_empty(&sk->sk_receive_queue) || | 765 | if (!skb_queue_empty(&sk->sk_receive_queue) || |
729 | (sk->sk_shutdown & RCV_SHUTDOWN)) | 766 | (sk->sk_shutdown & RCV_SHUTDOWN)) |
730 | mask |= POLLIN | POLLRDNORM; | 767 | mask |= POLLIN | POLLRDNORM; |
731 | 768 | ||
732 | if (sk->sk_state == IUCV_CLOSED) | 769 | if (sk->sk_state == IUCV_CLOSED) |
733 | mask |= POLLHUP; | 770 | mask |= POLLHUP; |
734 | 771 | ||
772 | if (sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_SEVERED) | ||
773 | mask |= POLLIN; | ||
774 | |||
735 | if (sock_writeable(sk)) | 775 | if (sock_writeable(sk)) |
736 | mask |= POLLOUT | POLLWRNORM | POLLWRBAND; | 776 | mask |= POLLOUT | POLLWRNORM | POLLWRBAND; |
737 | else | 777 | else |
@@ -754,7 +794,7 @@ static int iucv_sock_shutdown(struct socket *sock, int how) | |||
754 | return -EINVAL; | 794 | return -EINVAL; |
755 | 795 | ||
756 | lock_sock(sk); | 796 | lock_sock(sk); |
757 | switch(sk->sk_state) { | 797 | switch (sk->sk_state) { |
758 | case IUCV_CLOSED: | 798 | case IUCV_CLOSED: |
759 | err = -ENOTCONN; | 799 | err = -ENOTCONN; |
760 | goto fail; | 800 | goto fail; |
@@ -770,7 +810,7 @@ static int iucv_sock_shutdown(struct socket *sock, int how) | |||
770 | err = iucv_message_send(iucv->path, &txmsg, IUCV_IPRMDATA, 0, | 810 | err = iucv_message_send(iucv->path, &txmsg, IUCV_IPRMDATA, 0, |
771 | (void *) prmmsg, 8); | 811 | (void *) prmmsg, 8); |
772 | if (err) { | 812 | if (err) { |
773 | switch(err) { | 813 | switch (err) { |
774 | case 1: | 814 | case 1: |
775 | err = -ENOTCONN; | 815 | err = -ENOTCONN; |
776 | break; | 816 | break; |
@@ -817,13 +857,6 @@ static int iucv_sock_release(struct socket *sock) | |||
817 | iucv_sk(sk)->path = NULL; | 857 | iucv_sk(sk)->path = NULL; |
818 | } | 858 | } |
819 | 859 | ||
820 | if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime){ | ||
821 | lock_sock(sk); | ||
822 | err = iucv_sock_wait_state(sk, IUCV_CLOSED, 0, | ||
823 | sk->sk_lingertime); | ||
824 | release_sock(sk); | ||
825 | } | ||
826 | |||
827 | sock_orphan(sk); | 860 | sock_orphan(sk); |
828 | iucv_sock_kill(sk); | 861 | iucv_sock_kill(sk); |
829 | return err; | 862 | return err; |
@@ -880,7 +913,7 @@ static int iucv_callback_connreq(struct iucv_path *path, | |||
880 | 913 | ||
881 | /* Create the new socket */ | 914 | /* Create the new socket */ |
882 | nsk = iucv_sock_alloc(NULL, SOCK_STREAM, GFP_ATOMIC); | 915 | nsk = iucv_sock_alloc(NULL, SOCK_STREAM, GFP_ATOMIC); |
883 | if (!nsk){ | 916 | if (!nsk) { |
884 | err = iucv_path_sever(path, user_data); | 917 | err = iucv_path_sever(path, user_data); |
885 | goto fail; | 918 | goto fail; |
886 | } | 919 | } |
@@ -903,7 +936,7 @@ static int iucv_callback_connreq(struct iucv_path *path, | |||
903 | 936 | ||
904 | path->msglim = IUCV_QUEUELEN_DEFAULT; | 937 | path->msglim = IUCV_QUEUELEN_DEFAULT; |
905 | err = iucv_path_accept(path, &af_iucv_handler, nuser_data, nsk); | 938 | err = iucv_path_accept(path, &af_iucv_handler, nuser_data, nsk); |
906 | if (err){ | 939 | if (err) { |
907 | err = iucv_path_sever(path, user_data); | 940 | err = iucv_path_sever(path, user_data); |
908 | goto fail; | 941 | goto fail; |
909 | } | 942 | } |
@@ -927,18 +960,53 @@ static void iucv_callback_connack(struct iucv_path *path, u8 ipuser[16]) | |||
927 | sk->sk_state_change(sk); | 960 | sk->sk_state_change(sk); |
928 | } | 961 | } |
929 | 962 | ||
963 | static int iucv_fragment_skb(struct sock *sk, struct sk_buff *skb, int len, | ||
964 | struct sk_buff_head *fragmented_skb_q) | ||
965 | { | ||
966 | int dataleft, size, copied = 0; | ||
967 | struct sk_buff *nskb; | ||
968 | |||
969 | dataleft = len; | ||
970 | while (dataleft) { | ||
971 | if (dataleft >= sk->sk_rcvbuf / 4) | ||
972 | size = sk->sk_rcvbuf / 4; | ||
973 | else | ||
974 | size = dataleft; | ||
975 | |||
976 | nskb = alloc_skb(size, GFP_ATOMIC | GFP_DMA); | ||
977 | if (!nskb) | ||
978 | return -ENOMEM; | ||
979 | |||
980 | memcpy(nskb->data, skb->data + copied, size); | ||
981 | copied += size; | ||
982 | dataleft -= size; | ||
983 | |||
984 | skb_reset_transport_header(nskb); | ||
985 | skb_reset_network_header(nskb); | ||
986 | nskb->len = size; | ||
987 | |||
988 | skb_queue_tail(fragmented_skb_q, nskb); | ||
989 | } | ||
990 | |||
991 | return 0; | ||
992 | } | ||
993 | |||
930 | static void iucv_callback_rx(struct iucv_path *path, struct iucv_message *msg) | 994 | static void iucv_callback_rx(struct iucv_path *path, struct iucv_message *msg) |
931 | { | 995 | { |
932 | struct sock *sk = path->private; | 996 | struct sock *sk = path->private; |
933 | struct sk_buff *skb; | 997 | struct iucv_sock *iucv = iucv_sk(sk); |
998 | struct sk_buff *skb, *fskb; | ||
999 | struct sk_buff_head fragmented_skb_q; | ||
934 | int rc; | 1000 | int rc; |
935 | 1001 | ||
1002 | skb_queue_head_init(&fragmented_skb_q); | ||
1003 | |||
936 | if (sk->sk_shutdown & RCV_SHUTDOWN) | 1004 | if (sk->sk_shutdown & RCV_SHUTDOWN) |
937 | return; | 1005 | return; |
938 | 1006 | ||
939 | skb = alloc_skb(msg->length, GFP_ATOMIC | GFP_DMA); | 1007 | skb = alloc_skb(msg->length, GFP_ATOMIC | GFP_DMA); |
940 | if (!skb) { | 1008 | if (!skb) { |
941 | iucv_message_reject(path, msg); | 1009 | iucv_path_sever(path, NULL); |
942 | return; | 1010 | return; |
943 | } | 1011 | } |
944 | 1012 | ||
@@ -952,14 +1020,39 @@ static void iucv_callback_rx(struct iucv_path *path, struct iucv_message *msg) | |||
952 | kfree_skb(skb); | 1020 | kfree_skb(skb); |
953 | return; | 1021 | return; |
954 | } | 1022 | } |
1023 | if (skb->truesize >= sk->sk_rcvbuf / 4) { | ||
1024 | rc = iucv_fragment_skb(sk, skb, msg->length, | ||
1025 | &fragmented_skb_q); | ||
1026 | kfree_skb(skb); | ||
1027 | skb = NULL; | ||
1028 | if (rc) { | ||
1029 | iucv_path_sever(path, NULL); | ||
1030 | return; | ||
1031 | } | ||
1032 | } else { | ||
1033 | skb_reset_transport_header(skb); | ||
1034 | skb_reset_network_header(skb); | ||
1035 | skb->len = msg->length; | ||
1036 | } | ||
1037 | } | ||
1038 | /* Queue the fragmented skb */ | ||
1039 | fskb = skb_dequeue(&fragmented_skb_q); | ||
1040 | while (fskb) { | ||
1041 | if (!skb_queue_empty(&iucv->backlog_skb_q)) | ||
1042 | skb_queue_tail(&iucv->backlog_skb_q, fskb); | ||
1043 | else if (sock_queue_rcv_skb(sk, fskb)) | ||
1044 | skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, fskb); | ||
1045 | fskb = skb_dequeue(&fragmented_skb_q); | ||
1046 | } | ||
955 | 1047 | ||
956 | skb_reset_transport_header(skb); | 1048 | /* Queue the original skb if it exists (was not fragmented) */ |
957 | skb_reset_network_header(skb); | 1049 | if (skb) { |
958 | skb->len = msg->length; | 1050 | if (!skb_queue_empty(&iucv->backlog_skb_q)) |
1051 | skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, skb); | ||
1052 | else if (sock_queue_rcv_skb(sk, skb)) | ||
1053 | skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, skb); | ||
959 | } | 1054 | } |
960 | 1055 | ||
961 | if (sock_queue_rcv_skb(sk, skb)) | ||
962 | kfree_skb(skb); | ||
963 | } | 1056 | } |
964 | 1057 | ||
965 | static void iucv_callback_txdone(struct iucv_path *path, | 1058 | static void iucv_callback_txdone(struct iucv_path *path, |
@@ -971,17 +1064,27 @@ static void iucv_callback_txdone(struct iucv_path *path, | |||
971 | struct sk_buff *list_skb = list->next; | 1064 | struct sk_buff *list_skb = list->next; |
972 | unsigned long flags; | 1065 | unsigned long flags; |
973 | 1066 | ||
974 | spin_lock_irqsave(&list->lock, flags); | 1067 | if (list_skb) { |
1068 | spin_lock_irqsave(&list->lock, flags); | ||
1069 | |||
1070 | do { | ||
1071 | this = list_skb; | ||
1072 | list_skb = list_skb->next; | ||
1073 | } while (memcmp(&msg->tag, this->cb, 4) && list_skb); | ||
1074 | |||
1075 | spin_unlock_irqrestore(&list->lock, flags); | ||
975 | 1076 | ||
976 | do { | 1077 | skb_unlink(this, &iucv_sk(sk)->send_skb_q); |
977 | this = list_skb; | 1078 | kfree_skb(this); |
978 | list_skb = list_skb->next; | 1079 | } |
979 | } while (memcmp(&msg->tag, this->cb, 4)); | ||
980 | 1080 | ||
981 | spin_unlock_irqrestore(&list->lock, flags); | 1081 | if (sk->sk_state == IUCV_CLOSING) { |
1082 | if (skb_queue_empty(&iucv_sk(sk)->send_skb_q)) { | ||
1083 | sk->sk_state = IUCV_CLOSED; | ||
1084 | sk->sk_state_change(sk); | ||
1085 | } | ||
1086 | } | ||
982 | 1087 | ||
983 | skb_unlink(this, &iucv_sk(sk)->send_skb_q); | ||
984 | kfree_skb(this); | ||
985 | } | 1088 | } |
986 | 1089 | ||
987 | static void iucv_callback_connrej(struct iucv_path *path, u8 ipuser[16]) | 1090 | static void iucv_callback_connrej(struct iucv_path *path, u8 ipuser[16]) |
@@ -1022,7 +1125,7 @@ static struct net_proto_family iucv_sock_family_ops = { | |||
1022 | .create = iucv_sock_create, | 1125 | .create = iucv_sock_create, |
1023 | }; | 1126 | }; |
1024 | 1127 | ||
1025 | static int afiucv_init(void) | 1128 | static int __init afiucv_init(void) |
1026 | { | 1129 | { |
1027 | int err; | 1130 | int err; |
1028 | 1131 | ||