aboutsummaryrefslogtreecommitdiffstats
path: root/net/core/sock.c
diff options
context:
space:
mode:
authorSabrina Dubroca <sd@queasysnail.net>2015-07-24 12:19:25 -0400
committerDavid S. Miller <davem@davemloft.net>2015-07-27 04:06:53 -0400
commitdfbafc995304ebb9a9b03f65083e6e9cea143b20 (patch)
treedb3c54339548ef9e9e9b053e3c5d2f77c1dc560f /net/core/sock.c
parent3d3af8859284f982ad62980b3787f05259f1dd10 (diff)
tcp: fix recv with flags MSG_WAITALL | MSG_PEEK
Currently, tcp_recvmsg enters a busy loop in sk_wait_data if called with flags = MSG_WAITALL | MSG_PEEK. sk_wait_data waits for sk_receive_queue not empty, but in this case, the receive queue is not empty, but does not contain any skb that we can use. Add a "last skb seen on receive queue" argument to sk_wait_data, so that it sleeps until the receive queue has new skbs. Link: https://bugzilla.kernel.org/show_bug.cgi?id=99461 Link: https://sourceware.org/bugzilla/show_bug.cgi?id=18493 Link: https://bugzilla.redhat.com/show_bug.cgi?id=1205258 Reported-by: Enrico Scholz <rh-bugzilla@ensc.de> Reported-by: Dan Searle <dan@censornet.com> Signed-off-by: Sabrina Dubroca <sd@queasysnail.net> Acked-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core/sock.c')
-rw-r--r--net/core/sock.c5
1 files changed, 3 insertions, 2 deletions
diff --git a/net/core/sock.c b/net/core/sock.c
index 08f16db46070..8a14f1285fc4 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1967,20 +1967,21 @@ static void __release_sock(struct sock *sk)
1967 * sk_wait_data - wait for data to arrive at sk_receive_queue 1967 * sk_wait_data - wait for data to arrive at sk_receive_queue
1968 * @sk: sock to wait on 1968 * @sk: sock to wait on
1969 * @timeo: for how long 1969 * @timeo: for how long
1970 * @skb: last skb seen on sk_receive_queue
1970 * 1971 *
1971 * Now socket state including sk->sk_err is changed only under lock, 1972 * Now socket state including sk->sk_err is changed only under lock,
1972 * hence we may omit checks after joining wait queue. 1973 * hence we may omit checks after joining wait queue.
1973 * We check receive queue before schedule() only as optimization; 1974 * We check receive queue before schedule() only as optimization;
1974 * it is very likely that release_sock() added new data. 1975 * it is very likely that release_sock() added new data.
1975 */ 1976 */
1976int sk_wait_data(struct sock *sk, long *timeo) 1977int sk_wait_data(struct sock *sk, long *timeo, const struct sk_buff *skb)
1977{ 1978{
1978 int rc; 1979 int rc;
1979 DEFINE_WAIT(wait); 1980 DEFINE_WAIT(wait);
1980 1981
1981 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); 1982 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1982 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); 1983 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1983 rc = sk_wait_event(sk, timeo, !skb_queue_empty(&sk->sk_receive_queue)); 1984 rc = sk_wait_event(sk, timeo, skb_peek_tail(&sk->sk_receive_queue) != skb);
1984 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); 1985 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1985 finish_wait(sk_sleep(sk), &wait); 1986 finish_wait(sk_sleep(sk), &wait);
1986 return rc; 1987 return rc;