diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2009-06-22 14:57:09 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-06-22 14:57:09 -0400 |
commit | 5165aece0efac6574fc3e32b6f1c2a964820d1c6 (patch) | |
tree | 73131c06a021578a47526a95bad391dbd9d3b932 /net/iucv/af_iucv.c | |
parent | e38be994b9cad09b0d8d78a1875d7e8a2e115d29 (diff) | |
parent | f6b24caaf933a466397915a08e30e885a32f905a (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next-2.6: (43 commits)
via-velocity: Fix velocity driver unmapping incorrect size.
mlx4_en: Remove redundant refill code on RX
mlx4_en: Removed redundant check on lso header size
mlx4_en: Cancel port_up check in transmit function
mlx4_en: using stop/start_all_queues
mlx4_en: Removed redundant skb->len check
mlx4_en: Counting all the dropped packets on the TX side
usbnet cdc_subset: fix issues talking to PXA gadgets
Net: qla3xxx, remove sleeping in atomic
ipv4: fix NULL pointer + success return in route lookup path
isdn: clean up documentation index
cfg80211: validate station settings
cfg80211: allow setting station parameters in mesh
cfg80211: allow adding/deleting stations on mesh
ath5k: fix beacon_int handling
MAINTAINERS: Fix Atheros pattern paths
ath9k: restore PS mode, before we put the chip into FULL SLEEP state.
ath9k: wait for beacon frame along with CAB
acer-wmi: fix rfkill conversion
ath5k: avoid PCI FATAL interrupts by restoring RETRY_TIMEOUT disabling
...
Diffstat (limited to 'net/iucv/af_iucv.c')
-rw-r--r-- | net/iucv/af_iucv.c | 297 |
1 files changed, 179 insertions, 118 deletions
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c index 656cbd195825..6be5f92d1094 100644 --- a/net/iucv/af_iucv.c +++ b/net/iucv/af_iucv.c | |||
@@ -54,6 +54,38 @@ static const u8 iprm_shutdown[8] = | |||
54 | #define CB_TRGCLS(skb) ((skb)->cb + CB_TAG_LEN) /* iucv msg target class */ | 54 | #define CB_TRGCLS(skb) ((skb)->cb + CB_TAG_LEN) /* iucv msg target class */ |
55 | #define CB_TRGCLS_LEN (TRGCLS_SIZE) | 55 | #define CB_TRGCLS_LEN (TRGCLS_SIZE) |
56 | 56 | ||
57 | #define __iucv_sock_wait(sk, condition, timeo, ret) \ | ||
58 | do { \ | ||
59 | DEFINE_WAIT(__wait); \ | ||
60 | long __timeo = timeo; \ | ||
61 | ret = 0; \ | ||
62 | while (!(condition)) { \ | ||
63 | prepare_to_wait(sk->sk_sleep, &__wait, TASK_INTERRUPTIBLE); \ | ||
64 | if (!__timeo) { \ | ||
65 | ret = -EAGAIN; \ | ||
66 | break; \ | ||
67 | } \ | ||
68 | if (signal_pending(current)) { \ | ||
69 | ret = sock_intr_errno(__timeo); \ | ||
70 | break; \ | ||
71 | } \ | ||
72 | release_sock(sk); \ | ||
73 | __timeo = schedule_timeout(__timeo); \ | ||
74 | lock_sock(sk); \ | ||
75 | ret = sock_error(sk); \ | ||
76 | if (ret) \ | ||
77 | break; \ | ||
78 | } \ | ||
79 | finish_wait(sk->sk_sleep, &__wait); \ | ||
80 | } while (0) | ||
81 | |||
82 | #define iucv_sock_wait(sk, condition, timeo) \ | ||
83 | ({ \ | ||
84 | int __ret = 0; \ | ||
85 | if (!(condition)) \ | ||
86 | __iucv_sock_wait(sk, condition, timeo, __ret); \ | ||
87 | __ret; \ | ||
88 | }) | ||
57 | 89 | ||
58 | static void iucv_sock_kill(struct sock *sk); | 90 | static void iucv_sock_kill(struct sock *sk); |
59 | static void iucv_sock_close(struct sock *sk); | 91 | static void iucv_sock_close(struct sock *sk); |
@@ -238,6 +270,48 @@ static inline size_t iucv_msg_length(struct iucv_message *msg) | |||
238 | return msg->length; | 270 | return msg->length; |
239 | } | 271 | } |
240 | 272 | ||
273 | /** | ||
274 | * iucv_sock_in_state() - check for specific states | ||
275 | * @sk: sock structure | ||
276 | * @state: first iucv sk state | ||
277 | * @state: second iucv sk state | ||
278 | * | ||
279 | * Returns true if the socket in either in the first or second state. | ||
280 | */ | ||
281 | static int iucv_sock_in_state(struct sock *sk, int state, int state2) | ||
282 | { | ||
283 | return (sk->sk_state == state || sk->sk_state == state2); | ||
284 | } | ||
285 | |||
286 | /** | ||
287 | * iucv_below_msglim() - function to check if messages can be sent | ||
288 | * @sk: sock structure | ||
289 | * | ||
290 | * Returns true if the send queue length is lower than the message limit. | ||
291 | * Always returns true if the socket is not connected (no iucv path for | ||
292 | * checking the message limit). | ||
293 | */ | ||
294 | static inline int iucv_below_msglim(struct sock *sk) | ||
295 | { | ||
296 | struct iucv_sock *iucv = iucv_sk(sk); | ||
297 | |||
298 | if (sk->sk_state != IUCV_CONNECTED) | ||
299 | return 1; | ||
300 | return (skb_queue_len(&iucv->send_skb_q) < iucv->path->msglim); | ||
301 | } | ||
302 | |||
303 | /** | ||
304 | * iucv_sock_wake_msglim() - Wake up thread waiting on msg limit | ||
305 | */ | ||
306 | static void iucv_sock_wake_msglim(struct sock *sk) | ||
307 | { | ||
308 | read_lock(&sk->sk_callback_lock); | ||
309 | if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) | ||
310 | wake_up_interruptible_all(sk->sk_sleep); | ||
311 | sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT); | ||
312 | read_unlock(&sk->sk_callback_lock); | ||
313 | } | ||
314 | |||
241 | /* Timers */ | 315 | /* Timers */ |
242 | static void iucv_sock_timeout(unsigned long arg) | 316 | static void iucv_sock_timeout(unsigned long arg) |
243 | { | 317 | { |
@@ -329,7 +403,9 @@ static void iucv_sock_close(struct sock *sk) | |||
329 | timeo = sk->sk_lingertime; | 403 | timeo = sk->sk_lingertime; |
330 | else | 404 | else |
331 | timeo = IUCV_DISCONN_TIMEOUT; | 405 | timeo = IUCV_DISCONN_TIMEOUT; |
332 | err = iucv_sock_wait_state(sk, IUCV_CLOSED, 0, timeo); | 406 | err = iucv_sock_wait(sk, |
407 | iucv_sock_in_state(sk, IUCV_CLOSED, 0), | ||
408 | timeo); | ||
333 | } | 409 | } |
334 | 410 | ||
335 | case IUCV_CLOSING: /* fall through */ | 411 | case IUCV_CLOSING: /* fall through */ |
@@ -510,39 +586,6 @@ struct sock *iucv_accept_dequeue(struct sock *parent, struct socket *newsock) | |||
510 | return NULL; | 586 | return NULL; |
511 | } | 587 | } |
512 | 588 | ||
513 | int iucv_sock_wait_state(struct sock *sk, int state, int state2, | ||
514 | unsigned long timeo) | ||
515 | { | ||
516 | DECLARE_WAITQUEUE(wait, current); | ||
517 | int err = 0; | ||
518 | |||
519 | add_wait_queue(sk->sk_sleep, &wait); | ||
520 | while (sk->sk_state != state && sk->sk_state != state2) { | ||
521 | set_current_state(TASK_INTERRUPTIBLE); | ||
522 | |||
523 | if (!timeo) { | ||
524 | err = -EAGAIN; | ||
525 | break; | ||
526 | } | ||
527 | |||
528 | if (signal_pending(current)) { | ||
529 | err = sock_intr_errno(timeo); | ||
530 | break; | ||
531 | } | ||
532 | |||
533 | release_sock(sk); | ||
534 | timeo = schedule_timeout(timeo); | ||
535 | lock_sock(sk); | ||
536 | |||
537 | err = sock_error(sk); | ||
538 | if (err) | ||
539 | break; | ||
540 | } | ||
541 | set_current_state(TASK_RUNNING); | ||
542 | remove_wait_queue(sk->sk_sleep, &wait); | ||
543 | return err; | ||
544 | } | ||
545 | |||
546 | /* Bind an unbound socket */ | 589 | /* Bind an unbound socket */ |
547 | static int iucv_sock_bind(struct socket *sock, struct sockaddr *addr, | 590 | static int iucv_sock_bind(struct socket *sock, struct sockaddr *addr, |
548 | int addr_len) | 591 | int addr_len) |
@@ -687,8 +730,9 @@ static int iucv_sock_connect(struct socket *sock, struct sockaddr *addr, | |||
687 | } | 730 | } |
688 | 731 | ||
689 | if (sk->sk_state != IUCV_CONNECTED) { | 732 | if (sk->sk_state != IUCV_CONNECTED) { |
690 | err = iucv_sock_wait_state(sk, IUCV_CONNECTED, IUCV_DISCONN, | 733 | err = iucv_sock_wait(sk, iucv_sock_in_state(sk, IUCV_CONNECTED, |
691 | sock_sndtimeo(sk, flags & O_NONBLOCK)); | 734 | IUCV_DISCONN), |
735 | sock_sndtimeo(sk, flags & O_NONBLOCK)); | ||
692 | } | 736 | } |
693 | 737 | ||
694 | if (sk->sk_state == IUCV_DISCONN) { | 738 | if (sk->sk_state == IUCV_DISCONN) { |
@@ -842,9 +886,11 @@ static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock, | |||
842 | struct iucv_message txmsg; | 886 | struct iucv_message txmsg; |
843 | struct cmsghdr *cmsg; | 887 | struct cmsghdr *cmsg; |
844 | int cmsg_done; | 888 | int cmsg_done; |
889 | long timeo; | ||
845 | char user_id[9]; | 890 | char user_id[9]; |
846 | char appl_id[9]; | 891 | char appl_id[9]; |
847 | int err; | 892 | int err; |
893 | int noblock = msg->msg_flags & MSG_DONTWAIT; | ||
848 | 894 | ||
849 | err = sock_error(sk); | 895 | err = sock_error(sk); |
850 | if (err) | 896 | if (err) |
@@ -864,108 +910,119 @@ static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock, | |||
864 | goto out; | 910 | goto out; |
865 | } | 911 | } |
866 | 912 | ||
867 | if (sk->sk_state == IUCV_CONNECTED) { | 913 | /* Return if the socket is not in connected state */ |
868 | /* initialize defaults */ | 914 | if (sk->sk_state != IUCV_CONNECTED) { |
869 | cmsg_done = 0; /* check for duplicate headers */ | 915 | err = -ENOTCONN; |
870 | txmsg.class = 0; | 916 | goto out; |
917 | } | ||
871 | 918 | ||
872 | /* iterate over control messages */ | 919 | /* initialize defaults */ |
873 | for (cmsg = CMSG_FIRSTHDR(msg); cmsg; | 920 | cmsg_done = 0; /* check for duplicate headers */ |
874 | cmsg = CMSG_NXTHDR(msg, cmsg)) { | 921 | txmsg.class = 0; |
875 | 922 | ||
876 | if (!CMSG_OK(msg, cmsg)) { | 923 | /* iterate over control messages */ |
877 | err = -EINVAL; | 924 | for (cmsg = CMSG_FIRSTHDR(msg); cmsg; |
878 | goto out; | 925 | cmsg = CMSG_NXTHDR(msg, cmsg)) { |
879 | } | 926 | |
927 | if (!CMSG_OK(msg, cmsg)) { | ||
928 | err = -EINVAL; | ||
929 | goto out; | ||
930 | } | ||
931 | |||
932 | if (cmsg->cmsg_level != SOL_IUCV) | ||
933 | continue; | ||
880 | 934 | ||
881 | if (cmsg->cmsg_level != SOL_IUCV) | 935 | if (cmsg->cmsg_type & cmsg_done) { |
882 | continue; | 936 | err = -EINVAL; |
937 | goto out; | ||
938 | } | ||
939 | cmsg_done |= cmsg->cmsg_type; | ||
883 | 940 | ||
884 | if (cmsg->cmsg_type & cmsg_done) { | 941 | switch (cmsg->cmsg_type) { |
942 | case SCM_IUCV_TRGCLS: | ||
943 | if (cmsg->cmsg_len != CMSG_LEN(TRGCLS_SIZE)) { | ||
885 | err = -EINVAL; | 944 | err = -EINVAL; |
886 | goto out; | 945 | goto out; |
887 | } | 946 | } |
888 | cmsg_done |= cmsg->cmsg_type; | ||
889 | |||
890 | switch (cmsg->cmsg_type) { | ||
891 | case SCM_IUCV_TRGCLS: | ||
892 | if (cmsg->cmsg_len != CMSG_LEN(TRGCLS_SIZE)) { | ||
893 | err = -EINVAL; | ||
894 | goto out; | ||
895 | } | ||
896 | 947 | ||
897 | /* set iucv message target class */ | 948 | /* set iucv message target class */ |
898 | memcpy(&txmsg.class, | 949 | memcpy(&txmsg.class, |
899 | (void *) CMSG_DATA(cmsg), TRGCLS_SIZE); | 950 | (void *) CMSG_DATA(cmsg), TRGCLS_SIZE); |
900 | 951 | ||
901 | break; | 952 | break; |
902 | 953 | ||
903 | default: | 954 | default: |
904 | err = -EINVAL; | 955 | err = -EINVAL; |
905 | goto out; | 956 | goto out; |
906 | break; | 957 | break; |
907 | } | ||
908 | } | 958 | } |
959 | } | ||
909 | 960 | ||
910 | /* allocate one skb for each iucv message: | 961 | /* allocate one skb for each iucv message: |
911 | * this is fine for SOCK_SEQPACKET (unless we want to support | 962 | * this is fine for SOCK_SEQPACKET (unless we want to support |
912 | * segmented records using the MSG_EOR flag), but | 963 | * segmented records using the MSG_EOR flag), but |
913 | * for SOCK_STREAM we might want to improve it in future */ | 964 | * for SOCK_STREAM we might want to improve it in future */ |
914 | if (!(skb = sock_alloc_send_skb(sk, len, | 965 | skb = sock_alloc_send_skb(sk, len, noblock, &err); |
915 | msg->msg_flags & MSG_DONTWAIT, | 966 | if (!skb) |
916 | &err))) | 967 | goto out; |
917 | goto out; | 968 | if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) { |
969 | err = -EFAULT; | ||
970 | goto fail; | ||
971 | } | ||
918 | 972 | ||
919 | if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) { | 973 | /* wait if outstanding messages for iucv path has reached */ |
920 | err = -EFAULT; | 974 | timeo = sock_sndtimeo(sk, noblock); |
921 | goto fail; | 975 | err = iucv_sock_wait(sk, iucv_below_msglim(sk), timeo); |
922 | } | 976 | if (err) |
977 | goto fail; | ||
923 | 978 | ||
924 | /* increment and save iucv message tag for msg_completion cbk */ | 979 | /* return -ECONNRESET if the socket is no longer connected */ |
925 | txmsg.tag = iucv->send_tag++; | 980 | if (sk->sk_state != IUCV_CONNECTED) { |
926 | memcpy(CB_TAG(skb), &txmsg.tag, CB_TAG_LEN); | 981 | err = -ECONNRESET; |
927 | skb_queue_tail(&iucv->send_skb_q, skb); | 982 | goto fail; |
983 | } | ||
928 | 984 | ||
929 | if (((iucv->path->flags & IUCV_IPRMDATA) & iucv->flags) | 985 | /* increment and save iucv message tag for msg_completion cbk */ |
930 | && skb->len <= 7) { | 986 | txmsg.tag = iucv->send_tag++; |
931 | err = iucv_send_iprm(iucv->path, &txmsg, skb); | 987 | memcpy(CB_TAG(skb), &txmsg.tag, CB_TAG_LEN); |
988 | skb_queue_tail(&iucv->send_skb_q, skb); | ||
932 | 989 | ||
933 | /* on success: there is no message_complete callback | 990 | if (((iucv->path->flags & IUCV_IPRMDATA) & iucv->flags) |
934 | * for an IPRMDATA msg; remove skb from send queue */ | 991 | && skb->len <= 7) { |
935 | if (err == 0) { | 992 | err = iucv_send_iprm(iucv->path, &txmsg, skb); |
936 | skb_unlink(skb, &iucv->send_skb_q); | ||
937 | kfree_skb(skb); | ||
938 | } | ||
939 | 993 | ||
940 | /* this error should never happen since the | 994 | /* on success: there is no message_complete callback |
941 | * IUCV_IPRMDATA path flag is set... sever path */ | 995 | * for an IPRMDATA msg; remove skb from send queue */ |
942 | if (err == 0x15) { | 996 | if (err == 0) { |
943 | iucv_path_sever(iucv->path, NULL); | 997 | skb_unlink(skb, &iucv->send_skb_q); |
944 | skb_unlink(skb, &iucv->send_skb_q); | 998 | kfree_skb(skb); |
945 | err = -EPIPE; | 999 | } |
946 | goto fail; | 1000 | |
947 | } | 1001 | /* this error should never happen since the |
948 | } else | 1002 | * IUCV_IPRMDATA path flag is set... sever path */ |
949 | err = iucv_message_send(iucv->path, &txmsg, 0, 0, | 1003 | if (err == 0x15) { |
950 | (void *) skb->data, skb->len); | 1004 | iucv_path_sever(iucv->path, NULL); |
951 | if (err) { | ||
952 | if (err == 3) { | ||
953 | user_id[8] = 0; | ||
954 | memcpy(user_id, iucv->dst_user_id, 8); | ||
955 | appl_id[8] = 0; | ||
956 | memcpy(appl_id, iucv->dst_name, 8); | ||
957 | pr_err("Application %s on z/VM guest %s" | ||
958 | " exceeds message limit\n", | ||
959 | user_id, appl_id); | ||
960 | } | ||
961 | skb_unlink(skb, &iucv->send_skb_q); | 1005 | skb_unlink(skb, &iucv->send_skb_q); |
962 | err = -EPIPE; | 1006 | err = -EPIPE; |
963 | goto fail; | 1007 | goto fail; |
964 | } | 1008 | } |
965 | 1009 | } else | |
966 | } else { | 1010 | err = iucv_message_send(iucv->path, &txmsg, 0, 0, |
967 | err = -ENOTCONN; | 1011 | (void *) skb->data, skb->len); |
968 | goto out; | 1012 | if (err) { |
1013 | if (err == 3) { | ||
1014 | user_id[8] = 0; | ||
1015 | memcpy(user_id, iucv->dst_user_id, 8); | ||
1016 | appl_id[8] = 0; | ||
1017 | memcpy(appl_id, iucv->dst_name, 8); | ||
1018 | pr_err("Application %s on z/VM guest %s" | ||
1019 | " exceeds message limit\n", | ||
1020 | appl_id, user_id); | ||
1021 | err = -EAGAIN; | ||
1022 | } else | ||
1023 | err = -EPIPE; | ||
1024 | skb_unlink(skb, &iucv->send_skb_q); | ||
1025 | goto fail; | ||
969 | } | 1026 | } |
970 | 1027 | ||
971 | release_sock(sk); | 1028 | release_sock(sk); |
@@ -1581,7 +1638,11 @@ static void iucv_callback_txdone(struct iucv_path *path, | |||
1581 | 1638 | ||
1582 | spin_unlock_irqrestore(&list->lock, flags); | 1639 | spin_unlock_irqrestore(&list->lock, flags); |
1583 | 1640 | ||
1584 | kfree_skb(this); | 1641 | if (this) { |
1642 | kfree_skb(this); | ||
1643 | /* wake up any process waiting for sending */ | ||
1644 | iucv_sock_wake_msglim(sk); | ||
1645 | } | ||
1585 | } | 1646 | } |
1586 | BUG_ON(!this); | 1647 | BUG_ON(!this); |
1587 | 1648 | ||