aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp_input.c
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2015-05-15 15:39:29 -0400
committerDavid S. Miller <davem@davemloft.net>2015-05-17 22:45:49 -0400
commit76dfa6082032b5c179864816fa508879421678eb (patch)
treee710efd16263290f153b9f565449bdef7a48f464 /net/ipv4/tcp_input.c
parent8e4d980ac21596a9b91d8e720c77ad081975a0a8 (diff)
tcp: allow one skb to be received per socket under memory pressure
While testing tight tcp_mem settings, I found tcp sessions could be stuck because we do not allow even one skb to be received on them. By allowing one skb to be received, we introduce fairness and eventuallu force memory hogs to release their allocation. Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp_input.c')
-rw-r--r--net/ipv4/tcp_input.c10
1 files changed, 6 insertions, 4 deletions
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 093779f7e893..40c435997e54 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -4507,10 +4507,12 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
4507 4507
4508 if (eaten <= 0) { 4508 if (eaten <= 0) {
4509queue_and_out: 4509queue_and_out:
4510 if (eaten < 0 && 4510 if (eaten < 0) {
4511 tcp_try_rmem_schedule(sk, skb, skb->truesize)) 4511 if (skb_queue_len(&sk->sk_receive_queue) == 0)
4512 goto drop; 4512 sk_forced_mem_schedule(sk, skb->truesize);
4513 4513 else if (tcp_try_rmem_schedule(sk, skb, skb->truesize))
4514 goto drop;
4515 }
4514 eaten = tcp_queue_rcv(sk, skb, 0, &fragstolen); 4516 eaten = tcp_queue_rcv(sk, skb, 0, &fragstolen);
4515 } 4517 }
4516 tcp_rcv_nxt_update(tp, TCP_SKB_CB(skb)->end_seq); 4518 tcp_rcv_nxt_update(tp, TCP_SKB_CB(skb)->end_seq);