aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorEric Dumazet <eric.dumazet@gmail.com>2011-12-21 02:11:44 -0500
committerDavid S. Miller <davem@davemloft.net>2011-12-23 02:15:14 -0500
commit0fd7bac6b6157eed6cf0cb86a1e88ba29e57c033 (patch)
treebcc24e9c63587bc1e8e15ad60654de9c6f72883e /net
parenta0a129f8b6cff54ab479324a54aefdab5db4f240 (diff)
net: relax rcvbuf limits
skb->truesize might be big even for a small packet. Its even bigger after commit 87fb4b7b533 (net: more accurate skb truesize) and big MTU. We should allow queueing at least one packet per receiver, even with a low RCVBUF setting. Reported-by: Michal Simek <monstr@monstr.eu> Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r--net/core/sock.c6
-rw-r--r--net/packet/af_packet.c6
2 files changed, 3 insertions, 9 deletions
diff --git a/net/core/sock.c b/net/core/sock.c
index 4ed7b1d12f5e..b23f174ab84c 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -288,11 +288,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
288 unsigned long flags; 288 unsigned long flags;
289 struct sk_buff_head *list = &sk->sk_receive_queue; 289 struct sk_buff_head *list = &sk->sk_receive_queue;
290 290
291 /* Cast sk->rcvbuf to unsigned... It's pointless, but reduces 291 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
292 number of warnings when compiling with -W --ANK
293 */
294 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
295 (unsigned)sk->sk_rcvbuf) {
296 atomic_inc(&sk->sk_drops); 292 atomic_inc(&sk->sk_drops);
297 trace_sock_rcvqueue_full(sk, skb); 293 trace_sock_rcvqueue_full(sk, skb);
298 return -ENOMEM; 294 return -ENOMEM;
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 82a6f34d39d0..3891702b81df 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -1630,8 +1630,7 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
1630 if (snaplen > res) 1630 if (snaplen > res)
1631 snaplen = res; 1631 snaplen = res;
1632 1632
1633 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= 1633 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
1634 (unsigned)sk->sk_rcvbuf)
1635 goto drop_n_acct; 1634 goto drop_n_acct;
1636 1635
1637 if (skb_shared(skb)) { 1636 if (skb_shared(skb)) {
@@ -1762,8 +1761,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
1762 if (po->tp_version <= TPACKET_V2) { 1761 if (po->tp_version <= TPACKET_V2) {
1763 if (macoff + snaplen > po->rx_ring.frame_size) { 1762 if (macoff + snaplen > po->rx_ring.frame_size) {
1764 if (po->copy_thresh && 1763 if (po->copy_thresh &&
1765 atomic_read(&sk->sk_rmem_alloc) + skb->truesize 1764 atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) {
1766 < (unsigned)sk->sk_rcvbuf) {
1767 if (skb_shared(skb)) { 1765 if (skb_shared(skb)) {
1768 copy_skb = skb_clone(skb, GFP_ATOMIC); 1766 copy_skb = skb_clone(skb, GFP_ATOMIC);
1769 } else { 1767 } else {