aboutsummaryrefslogtreecommitdiffstats
path: root/include/net/sock.h
diff options
context:
space:
mode:
authorEric Dumazet <eric.dumazet@gmail.com>2011-12-21 02:11:44 -0500
committerDavid S. Miller <davem@davemloft.net>2011-12-23 02:15:14 -0500
commit0fd7bac6b6157eed6cf0cb86a1e88ba29e57c033 (patch)
treebcc24e9c63587bc1e8e15ad60654de9c6f72883e /include/net/sock.h
parenta0a129f8b6cff54ab479324a54aefdab5db4f240 (diff)
net: relax rcvbuf limits
skb->truesize might be big even for a small packet. Its even bigger after commit 87fb4b7b533 (net: more accurate skb truesize) and big MTU. We should allow queueing at least one packet per receiver, even with a low RCVBUF setting. Reported-by: Michal Simek <monstr@monstr.eu> Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include/net/sock.h')
-rw-r--r--include/net/sock.h4
1 files changed, 3 insertions, 1 deletions
diff --git a/include/net/sock.h b/include/net/sock.h
index abb6e0f0c3c3..32e39371fba6 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -637,12 +637,14 @@ static inline void __sk_add_backlog(struct sock *sk, struct sk_buff *skb)
637 637
638/* 638/*
639 * Take into account size of receive queue and backlog queue 639 * Take into account size of receive queue and backlog queue
640 * Do not take into account this skb truesize,
641 * to allow even a single big packet to come.
640 */ 642 */
641static inline bool sk_rcvqueues_full(const struct sock *sk, const struct sk_buff *skb) 643static inline bool sk_rcvqueues_full(const struct sock *sk, const struct sk_buff *skb)
642{ 644{
643 unsigned int qsize = sk->sk_backlog.len + atomic_read(&sk->sk_rmem_alloc); 645 unsigned int qsize = sk->sk_backlog.len + atomic_read(&sk->sk_rmem_alloc);
644 646
645 return qsize + skb->truesize > sk->sk_rcvbuf; 647 return qsize > sk->sk_rcvbuf;
646} 648}
647 649
648/* The per-socket spinlock must be held here. */ 650/* The per-socket spinlock must be held here. */