aboutsummaryrefslogtreecommitdiffstats
path: root/net/iucv/af_iucv.c
diff options
context:
space:
mode:
authorHendrik Brueckner <brueckner@linux.vnet.ibm.com>2009-06-17 17:54:48 -0400
committerDavid S. Miller <davem@davemloft.net>2009-06-19 03:10:40 -0400
commit0ea920d211e0a870871965418923b08da2025b4a (patch)
tree5dc3451a2011f0d32839b5aef20d9aaec1da78cf /net/iucv/af_iucv.c
parentbb664f49f8be17d7b8bf9821144e8a53d7fcfe8a (diff)
af_iucv: Return -EAGAIN if iucv msg limit is exceeded
If the iucv message limit for a communication path is exceeded, sendmsg() returns -EAGAIN instead of -EPIPE. The calling application can then handle this error situtation, e.g. to try again after waiting some time. For blocking sockets, sendmsg() waits up to the socket timeout before returning -EAGAIN. For the new wait condition, a macro has been introduced and the iucv_sock_wait_state() has been refactored to this macro. Signed-off-by: Hendrik Brueckner <brueckner@linux.vnet.ibm.com> Signed-off-by: Ursula Braun <ursula.braun@de.ibm.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/iucv/af_iucv.c')
-rw-r--r--net/iucv/af_iucv.c144
1 files changed, 103 insertions, 41 deletions
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index 42b7198a6883..abadb4a846cf 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -53,6 +53,38 @@ static const u8 iprm_shutdown[8] =
53#define CB_TRGCLS(skb) ((skb)->cb + CB_TAG_LEN) /* iucv msg target class */ 53#define CB_TRGCLS(skb) ((skb)->cb + CB_TAG_LEN) /* iucv msg target class */
54#define CB_TRGCLS_LEN (TRGCLS_SIZE) 54#define CB_TRGCLS_LEN (TRGCLS_SIZE)
55 55
56#define __iucv_sock_wait(sk, condition, timeo, ret) \
57do { \
58 DEFINE_WAIT(__wait); \
59 long __timeo = timeo; \
60 ret = 0; \
61 while (!(condition)) { \
62 prepare_to_wait(sk->sk_sleep, &__wait, TASK_INTERRUPTIBLE); \
63 if (!__timeo) { \
64 ret = -EAGAIN; \
65 break; \
66 } \
67 if (signal_pending(current)) { \
68 ret = sock_intr_errno(__timeo); \
69 break; \
70 } \
71 release_sock(sk); \
72 __timeo = schedule_timeout(__timeo); \
73 lock_sock(sk); \
74 ret = sock_error(sk); \
75 if (ret) \
76 break; \
77 } \
78 finish_wait(sk->sk_sleep, &__wait); \
79} while (0)
80
81#define iucv_sock_wait(sk, condition, timeo) \
82({ \
83 int __ret = 0; \
84 if (!(condition)) \
85 __iucv_sock_wait(sk, condition, timeo, __ret); \
86 __ret; \
87})
56 88
57static void iucv_sock_kill(struct sock *sk); 89static void iucv_sock_kill(struct sock *sk);
58static void iucv_sock_close(struct sock *sk); 90static void iucv_sock_close(struct sock *sk);
@@ -121,6 +153,48 @@ static inline size_t iucv_msg_length(struct iucv_message *msg)
121 return msg->length; 153 return msg->length;
122} 154}
123 155
156/**
157 * iucv_sock_in_state() - check for specific states
158 * @sk: sock structure
159 * @state: first iucv sk state
160 * @state: second iucv sk state
161 *
162 * Returns true if the socket in either in the first or second state.
163 */
164static int iucv_sock_in_state(struct sock *sk, int state, int state2)
165{
166 return (sk->sk_state == state || sk->sk_state == state2);
167}
168
169/**
170 * iucv_below_msglim() - function to check if messages can be sent
171 * @sk: sock structure
172 *
173 * Returns true if the send queue length is lower than the message limit.
174 * Always returns true if the socket is not connected (no iucv path for
175 * checking the message limit).
176 */
177static inline int iucv_below_msglim(struct sock *sk)
178{
179 struct iucv_sock *iucv = iucv_sk(sk);
180
181 if (sk->sk_state != IUCV_CONNECTED)
182 return 1;
183 return (skb_queue_len(&iucv->send_skb_q) < iucv->path->msglim);
184}
185
186/**
187 * iucv_sock_wake_msglim() - Wake up thread waiting on msg limit
188 */
189static void iucv_sock_wake_msglim(struct sock *sk)
190{
191 read_lock(&sk->sk_callback_lock);
192 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
193 wake_up_interruptible_all(sk->sk_sleep);
194 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
195 read_unlock(&sk->sk_callback_lock);
196}
197
124/* Timers */ 198/* Timers */
125static void iucv_sock_timeout(unsigned long arg) 199static void iucv_sock_timeout(unsigned long arg)
126{ 200{
@@ -212,7 +286,9 @@ static void iucv_sock_close(struct sock *sk)
212 timeo = sk->sk_lingertime; 286 timeo = sk->sk_lingertime;
213 else 287 else
214 timeo = IUCV_DISCONN_TIMEOUT; 288 timeo = IUCV_DISCONN_TIMEOUT;
215 err = iucv_sock_wait_state(sk, IUCV_CLOSED, 0, timeo); 289 err = iucv_sock_wait(sk,
290 iucv_sock_in_state(sk, IUCV_CLOSED, 0),
291 timeo);
216 } 292 }
217 293
218 case IUCV_CLOSING: /* fall through */ 294 case IUCV_CLOSING: /* fall through */
@@ -393,39 +469,6 @@ struct sock *iucv_accept_dequeue(struct sock *parent, struct socket *newsock)
393 return NULL; 469 return NULL;
394} 470}
395 471
396int iucv_sock_wait_state(struct sock *sk, int state, int state2,
397 unsigned long timeo)
398{
399 DECLARE_WAITQUEUE(wait, current);
400 int err = 0;
401
402 add_wait_queue(sk->sk_sleep, &wait);
403 while (sk->sk_state != state && sk->sk_state != state2) {
404 set_current_state(TASK_INTERRUPTIBLE);
405
406 if (!timeo) {
407 err = -EAGAIN;
408 break;
409 }
410
411 if (signal_pending(current)) {
412 err = sock_intr_errno(timeo);
413 break;
414 }
415
416 release_sock(sk);
417 timeo = schedule_timeout(timeo);
418 lock_sock(sk);
419
420 err = sock_error(sk);
421 if (err)
422 break;
423 }
424 set_current_state(TASK_RUNNING);
425 remove_wait_queue(sk->sk_sleep, &wait);
426 return err;
427}
428
429/* Bind an unbound socket */ 472/* Bind an unbound socket */
430static int iucv_sock_bind(struct socket *sock, struct sockaddr *addr, 473static int iucv_sock_bind(struct socket *sock, struct sockaddr *addr,
431 int addr_len) 474 int addr_len)
@@ -570,8 +613,9 @@ static int iucv_sock_connect(struct socket *sock, struct sockaddr *addr,
570 } 613 }
571 614
572 if (sk->sk_state != IUCV_CONNECTED) { 615 if (sk->sk_state != IUCV_CONNECTED) {
573 err = iucv_sock_wait_state(sk, IUCV_CONNECTED, IUCV_DISCONN, 616 err = iucv_sock_wait(sk, iucv_sock_in_state(sk, IUCV_CONNECTED,
574 sock_sndtimeo(sk, flags & O_NONBLOCK)); 617 IUCV_DISCONN),
618 sock_sndtimeo(sk, flags & O_NONBLOCK));
575 } 619 }
576 620
577 if (sk->sk_state == IUCV_DISCONN) { 621 if (sk->sk_state == IUCV_DISCONN) {
@@ -725,9 +769,11 @@ static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
725 struct iucv_message txmsg; 769 struct iucv_message txmsg;
726 struct cmsghdr *cmsg; 770 struct cmsghdr *cmsg;
727 int cmsg_done; 771 int cmsg_done;
772 long timeo;
728 char user_id[9]; 773 char user_id[9];
729 char appl_id[9]; 774 char appl_id[9];
730 int err; 775 int err;
776 int noblock = msg->msg_flags & MSG_DONTWAIT;
731 777
732 err = sock_error(sk); 778 err = sock_error(sk);
733 if (err) 779 if (err)
@@ -799,8 +845,7 @@ static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
799 * this is fine for SOCK_SEQPACKET (unless we want to support 845 * this is fine for SOCK_SEQPACKET (unless we want to support
800 * segmented records using the MSG_EOR flag), but 846 * segmented records using the MSG_EOR flag), but
801 * for SOCK_STREAM we might want to improve it in future */ 847 * for SOCK_STREAM we might want to improve it in future */
802 skb = sock_alloc_send_skb(sk, len, msg->msg_flags & MSG_DONTWAIT, 848 skb = sock_alloc_send_skb(sk, len, noblock, &err);
803 &err);
804 if (!skb) 849 if (!skb)
805 goto out; 850 goto out;
806 if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) { 851 if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
@@ -808,6 +853,18 @@ static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
808 goto fail; 853 goto fail;
809 } 854 }
810 855
856 /* wait if outstanding messages for iucv path has reached */
857 timeo = sock_sndtimeo(sk, noblock);
858 err = iucv_sock_wait(sk, iucv_below_msglim(sk), timeo);
859 if (err)
860 goto fail;
861
862 /* return -ECONNRESET if the socket is no longer connected */
863 if (sk->sk_state != IUCV_CONNECTED) {
864 err = -ECONNRESET;
865 goto fail;
866 }
867
811 /* increment and save iucv message tag for msg_completion cbk */ 868 /* increment and save iucv message tag for msg_completion cbk */
812 txmsg.tag = iucv->send_tag++; 869 txmsg.tag = iucv->send_tag++;
813 memcpy(CB_TAG(skb), &txmsg.tag, CB_TAG_LEN); 870 memcpy(CB_TAG(skb), &txmsg.tag, CB_TAG_LEN);
@@ -844,9 +901,10 @@ static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
844 pr_err("Application %s on z/VM guest %s" 901 pr_err("Application %s on z/VM guest %s"
845 " exceeds message limit\n", 902 " exceeds message limit\n",
846 appl_id, user_id); 903 appl_id, user_id);
847 } 904 err = -EAGAIN;
905 } else
906 err = -EPIPE;
848 skb_unlink(skb, &iucv->send_skb_q); 907 skb_unlink(skb, &iucv->send_skb_q);
849 err = -EPIPE;
850 goto fail; 908 goto fail;
851 } 909 }
852 910
@@ -1463,7 +1521,11 @@ static void iucv_callback_txdone(struct iucv_path *path,
1463 1521
1464 spin_unlock_irqrestore(&list->lock, flags); 1522 spin_unlock_irqrestore(&list->lock, flags);
1465 1523
1466 kfree_skb(this); 1524 if (this) {
1525 kfree_skb(this);
1526 /* wake up any process waiting for sending */
1527 iucv_sock_wake_msglim(sk);
1528 }
1467 } 1529 }
1468 BUG_ON(!this); 1530 BUG_ON(!this);
1469 1531