diff options
Diffstat (limited to 'net/iucv/af_iucv.c')
-rw-r--r-- | net/iucv/af_iucv.c | 44 |
1 files changed, 23 insertions, 21 deletions
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c index 026704a47296..2f1373855a8b 100644 --- a/net/iucv/af_iucv.c +++ b/net/iucv/af_iucv.c | |||
@@ -45,7 +45,8 @@ static struct proto iucv_proto = { | |||
45 | static void iucv_callback_rx(struct iucv_path *, struct iucv_message *); | 45 | static void iucv_callback_rx(struct iucv_path *, struct iucv_message *); |
46 | static void iucv_callback_txdone(struct iucv_path *, struct iucv_message *); | 46 | static void iucv_callback_txdone(struct iucv_path *, struct iucv_message *); |
47 | static void iucv_callback_connack(struct iucv_path *, u8 ipuser[16]); | 47 | static void iucv_callback_connack(struct iucv_path *, u8 ipuser[16]); |
48 | static int iucv_callback_connreq(struct iucv_path *, u8 ipvmid[8], u8 ipuser[16]); | 48 | static int iucv_callback_connreq(struct iucv_path *, u8 ipvmid[8], |
49 | u8 ipuser[16]); | ||
49 | static void iucv_callback_connrej(struct iucv_path *, u8 ipuser[16]); | 50 | static void iucv_callback_connrej(struct iucv_path *, u8 ipuser[16]); |
50 | 51 | ||
51 | static struct iucv_sock_list iucv_sk_list = { | 52 | static struct iucv_sock_list iucv_sk_list = { |
@@ -152,7 +153,7 @@ static void iucv_sock_close(struct sock *sk) | |||
152 | iucv_sock_clear_timer(sk); | 153 | iucv_sock_clear_timer(sk); |
153 | lock_sock(sk); | 154 | lock_sock(sk); |
154 | 155 | ||
155 | switch(sk->sk_state) { | 156 | switch (sk->sk_state) { |
156 | case IUCV_LISTEN: | 157 | case IUCV_LISTEN: |
157 | iucv_sock_cleanup_listen(sk); | 158 | iucv_sock_cleanup_listen(sk); |
158 | break; | 159 | break; |
@@ -164,7 +165,7 @@ static void iucv_sock_close(struct sock *sk) | |||
164 | sk->sk_state = IUCV_CLOSING; | 165 | sk->sk_state = IUCV_CLOSING; |
165 | sk->sk_state_change(sk); | 166 | sk->sk_state_change(sk); |
166 | 167 | ||
167 | if(!skb_queue_empty(&iucv->send_skb_q)) { | 168 | if (!skb_queue_empty(&iucv->send_skb_q)) { |
168 | if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime) | 169 | if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime) |
169 | timeo = sk->sk_lingertime; | 170 | timeo = sk->sk_lingertime; |
170 | else | 171 | else |
@@ -292,7 +293,7 @@ struct sock *iucv_accept_dequeue(struct sock *parent, struct socket *newsock) | |||
292 | struct iucv_sock *isk, *n; | 293 | struct iucv_sock *isk, *n; |
293 | struct sock *sk; | 294 | struct sock *sk; |
294 | 295 | ||
295 | list_for_each_entry_safe(isk, n, &iucv_sk(parent)->accept_q, accept_q){ | 296 | list_for_each_entry_safe(isk, n, &iucv_sk(parent)->accept_q, accept_q) { |
296 | sk = (struct sock *) isk; | 297 | sk = (struct sock *) isk; |
297 | lock_sock(sk); | 298 | lock_sock(sk); |
298 | 299 | ||
@@ -537,7 +538,7 @@ static int iucv_sock_accept(struct socket *sock, struct socket *newsock, | |||
537 | 538 | ||
538 | /* Wait for an incoming connection */ | 539 | /* Wait for an incoming connection */ |
539 | add_wait_queue_exclusive(sk->sk_sleep, &wait); | 540 | add_wait_queue_exclusive(sk->sk_sleep, &wait); |
540 | while (!(nsk = iucv_accept_dequeue(sk, newsock))){ | 541 | while (!(nsk = iucv_accept_dequeue(sk, newsock))) { |
541 | set_current_state(TASK_INTERRUPTIBLE); | 542 | set_current_state(TASK_INTERRUPTIBLE); |
542 | if (!timeo) { | 543 | if (!timeo) { |
543 | err = -EAGAIN; | 544 | err = -EAGAIN; |
@@ -618,13 +619,13 @@ static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock, | |||
618 | goto out; | 619 | goto out; |
619 | } | 620 | } |
620 | 621 | ||
621 | if (sk->sk_state == IUCV_CONNECTED){ | 622 | if (sk->sk_state == IUCV_CONNECTED) { |
622 | if(!(skb = sock_alloc_send_skb(sk, len, | 623 | if (!(skb = sock_alloc_send_skb(sk, len, |
623 | msg->msg_flags & MSG_DONTWAIT, | 624 | msg->msg_flags & MSG_DONTWAIT, |
624 | &err))) | 625 | &err))) |
625 | goto out; | 626 | goto out; |
626 | 627 | ||
627 | if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)){ | 628 | if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) { |
628 | err = -EFAULT; | 629 | err = -EFAULT; |
629 | goto fail; | 630 | goto fail; |
630 | } | 631 | } |
@@ -710,7 +711,7 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
710 | 711 | ||
711 | /* Queue backlog skbs */ | 712 | /* Queue backlog skbs */ |
712 | rskb = skb_dequeue(&iucv_sk(sk)->backlog_skb_q); | 713 | rskb = skb_dequeue(&iucv_sk(sk)->backlog_skb_q); |
713 | while(rskb) { | 714 | while (rskb) { |
714 | if (sock_queue_rcv_skb(sk, rskb)) { | 715 | if (sock_queue_rcv_skb(sk, rskb)) { |
715 | skb_queue_head(&iucv_sk(sk)->backlog_skb_q, | 716 | skb_queue_head(&iucv_sk(sk)->backlog_skb_q, |
716 | rskb); | 717 | rskb); |
@@ -731,7 +732,7 @@ static inline unsigned int iucv_accept_poll(struct sock *parent) | |||
731 | struct iucv_sock *isk, *n; | 732 | struct iucv_sock *isk, *n; |
732 | struct sock *sk; | 733 | struct sock *sk; |
733 | 734 | ||
734 | list_for_each_entry_safe(isk, n, &iucv_sk(parent)->accept_q, accept_q){ | 735 | list_for_each_entry_safe(isk, n, &iucv_sk(parent)->accept_q, accept_q) { |
735 | sk = (struct sock *) isk; | 736 | sk = (struct sock *) isk; |
736 | 737 | ||
737 | if (sk->sk_state == IUCV_CONNECTED) | 738 | if (sk->sk_state == IUCV_CONNECTED) |
@@ -762,7 +763,7 @@ unsigned int iucv_sock_poll(struct file *file, struct socket *sock, | |||
762 | mask |= POLLHUP; | 763 | mask |= POLLHUP; |
763 | 764 | ||
764 | if (!skb_queue_empty(&sk->sk_receive_queue) || | 765 | if (!skb_queue_empty(&sk->sk_receive_queue) || |
765 | (sk->sk_shutdown & RCV_SHUTDOWN)) | 766 | (sk->sk_shutdown & RCV_SHUTDOWN)) |
766 | mask |= POLLIN | POLLRDNORM; | 767 | mask |= POLLIN | POLLRDNORM; |
767 | 768 | ||
768 | if (sk->sk_state == IUCV_CLOSED) | 769 | if (sk->sk_state == IUCV_CLOSED) |
@@ -793,7 +794,7 @@ static int iucv_sock_shutdown(struct socket *sock, int how) | |||
793 | return -EINVAL; | 794 | return -EINVAL; |
794 | 795 | ||
795 | lock_sock(sk); | 796 | lock_sock(sk); |
796 | switch(sk->sk_state) { | 797 | switch (sk->sk_state) { |
797 | case IUCV_CLOSED: | 798 | case IUCV_CLOSED: |
798 | err = -ENOTCONN; | 799 | err = -ENOTCONN; |
799 | goto fail; | 800 | goto fail; |
@@ -809,7 +810,7 @@ static int iucv_sock_shutdown(struct socket *sock, int how) | |||
809 | err = iucv_message_send(iucv->path, &txmsg, IUCV_IPRMDATA, 0, | 810 | err = iucv_message_send(iucv->path, &txmsg, IUCV_IPRMDATA, 0, |
810 | (void *) prmmsg, 8); | 811 | (void *) prmmsg, 8); |
811 | if (err) { | 812 | if (err) { |
812 | switch(err) { | 813 | switch (err) { |
813 | case 1: | 814 | case 1: |
814 | err = -ENOTCONN; | 815 | err = -ENOTCONN; |
815 | break; | 816 | break; |
@@ -912,7 +913,7 @@ static int iucv_callback_connreq(struct iucv_path *path, | |||
912 | 913 | ||
913 | /* Create the new socket */ | 914 | /* Create the new socket */ |
914 | nsk = iucv_sock_alloc(NULL, SOCK_STREAM, GFP_ATOMIC); | 915 | nsk = iucv_sock_alloc(NULL, SOCK_STREAM, GFP_ATOMIC); |
915 | if (!nsk){ | 916 | if (!nsk) { |
916 | err = iucv_path_sever(path, user_data); | 917 | err = iucv_path_sever(path, user_data); |
917 | goto fail; | 918 | goto fail; |
918 | } | 919 | } |
@@ -935,7 +936,7 @@ static int iucv_callback_connreq(struct iucv_path *path, | |||
935 | 936 | ||
936 | path->msglim = IUCV_QUEUELEN_DEFAULT; | 937 | path->msglim = IUCV_QUEUELEN_DEFAULT; |
937 | err = iucv_path_accept(path, &af_iucv_handler, nuser_data, nsk); | 938 | err = iucv_path_accept(path, &af_iucv_handler, nuser_data, nsk); |
938 | if (err){ | 939 | if (err) { |
939 | err = iucv_path_sever(path, user_data); | 940 | err = iucv_path_sever(path, user_data); |
940 | goto fail; | 941 | goto fail; |
941 | } | 942 | } |
@@ -966,7 +967,7 @@ static int iucv_fragment_skb(struct sock *sk, struct sk_buff *skb, int len, | |||
966 | struct sk_buff *nskb; | 967 | struct sk_buff *nskb; |
967 | 968 | ||
968 | dataleft = len; | 969 | dataleft = len; |
969 | while(dataleft) { | 970 | while (dataleft) { |
970 | if (dataleft >= sk->sk_rcvbuf / 4) | 971 | if (dataleft >= sk->sk_rcvbuf / 4) |
971 | size = sk->sk_rcvbuf / 4; | 972 | size = sk->sk_rcvbuf / 4; |
972 | else | 973 | else |
@@ -989,6 +990,7 @@ static int iucv_fragment_skb(struct sock *sk, struct sk_buff *skb, int len, | |||
989 | 990 | ||
990 | return 0; | 991 | return 0; |
991 | } | 992 | } |
993 | |||
992 | static void iucv_callback_rx(struct iucv_path *path, struct iucv_message *msg) | 994 | static void iucv_callback_rx(struct iucv_path *path, struct iucv_message *msg) |
993 | { | 995 | { |
994 | struct sock *sk = path->private; | 996 | struct sock *sk = path->private; |
@@ -1035,7 +1037,7 @@ static void iucv_callback_rx(struct iucv_path *path, struct iucv_message *msg) | |||
1035 | } | 1037 | } |
1036 | /* Queue the fragmented skb */ | 1038 | /* Queue the fragmented skb */ |
1037 | fskb = skb_dequeue(&fragmented_skb_q); | 1039 | fskb = skb_dequeue(&fragmented_skb_q); |
1038 | while(fskb) { | 1040 | while (fskb) { |
1039 | if (!skb_queue_empty(&iucv->backlog_skb_q)) | 1041 | if (!skb_queue_empty(&iucv->backlog_skb_q)) |
1040 | skb_queue_tail(&iucv->backlog_skb_q, fskb); | 1042 | skb_queue_tail(&iucv->backlog_skb_q, fskb); |
1041 | else if (sock_queue_rcv_skb(sk, fskb)) | 1043 | else if (sock_queue_rcv_skb(sk, fskb)) |
@@ -1076,7 +1078,7 @@ static void iucv_callback_txdone(struct iucv_path *path, | |||
1076 | kfree_skb(this); | 1078 | kfree_skb(this); |
1077 | } | 1079 | } |
1078 | 1080 | ||
1079 | if (sk->sk_state == IUCV_CLOSING){ | 1081 | if (sk->sk_state == IUCV_CLOSING) { |
1080 | if (skb_queue_empty(&iucv_sk(sk)->send_skb_q)) { | 1082 | if (skb_queue_empty(&iucv_sk(sk)->send_skb_q)) { |
1081 | sk->sk_state = IUCV_CLOSED; | 1083 | sk->sk_state = IUCV_CLOSED; |
1082 | sk->sk_state_change(sk); | 1084 | sk->sk_state_change(sk); |
@@ -1123,7 +1125,7 @@ static struct net_proto_family iucv_sock_family_ops = { | |||
1123 | .create = iucv_sock_create, | 1125 | .create = iucv_sock_create, |
1124 | }; | 1126 | }; |
1125 | 1127 | ||
1126 | static int afiucv_init(void) | 1128 | static int __init afiucv_init(void) |
1127 | { | 1129 | { |
1128 | int err; | 1130 | int err; |
1129 | 1131 | ||