aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp_ipv4.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/ipv4/tcp_ipv4.c')
-rw-r--r--net/ipv4/tcp_ipv4.c10
1 files changed, 5 insertions, 5 deletions
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 510f7a3c758b..87b173b563b0 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1506,16 +1506,16 @@ bool tcp_prequeue(struct sock *sk, struct sk_buff *skb)
1506 1506
1507 __skb_queue_tail(&tp->ucopy.prequeue, skb); 1507 __skb_queue_tail(&tp->ucopy.prequeue, skb);
1508 tp->ucopy.memory += skb->truesize; 1508 tp->ucopy.memory += skb->truesize;
1509 if (tp->ucopy.memory > sk->sk_rcvbuf) { 1509 if (skb_queue_len(&tp->ucopy.prequeue) >= 32 ||
1510 tp->ucopy.memory + atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf) {
1510 struct sk_buff *skb1; 1511 struct sk_buff *skb1;
1511 1512
1512 BUG_ON(sock_owned_by_user(sk)); 1513 BUG_ON(sock_owned_by_user(sk));
1514 __NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPPREQUEUEDROPPED,
1515 skb_queue_len(&tp->ucopy.prequeue));
1513 1516
1514 while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) { 1517 while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL)
1515 sk_backlog_rcv(sk, skb1); 1518 sk_backlog_rcv(sk, skb1);
1516 __NET_INC_STATS(sock_net(sk),
1517 LINUX_MIB_TCPPREQUEUEDROPPED);
1518 }
1519 1519
1520 tp->ucopy.memory = 0; 1520 tp->ucopy.memory = 0;
1521 } else if (skb_queue_len(&tp->ucopy.prequeue) == 1) { 1521 } else if (skb_queue_len(&tp->ucopy.prequeue) == 1) {