aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2012-04-22 19:34:26 -0400
committerDavid S. Miller <davem@davemloft.net>2012-04-23 22:28:28 -0400
commitf545a38f74584cc7424cb74f792a00c6d2589485 (patch)
treeb272cbfed3267a7750f55f23989e1b070ae6ac3e /net/ipv4
parentb98985073bc5403ef1320866e4ef8bbc5d587ceb (diff)
net: add a limit parameter to sk_add_backlog()
sk_add_backlog() & sk_rcvqueues_full() hard coded sk_rcvbuf as the memory limit. We need to make this limit a parameter for TCP use. No functional change expected in this patch, all callers still using the old sk_rcvbuf limit. Signed-off-by: Eric Dumazet <edumazet@google.com> Cc: Neal Cardwell <ncardwell@google.com> Cc: Tom Herbert <therbert@google.com> Cc: Maciej Żenczykowski <maze@google.com> Cc: Yuchung Cheng <ycheng@google.com> Cc: Ilpo Järvinen <ilpo.jarvinen@helsinki.fi> Cc: Rick Jones <rick.jones2@hp.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4')
-rw-r--r--net/ipv4/tcp_ipv4.c2
-rw-r--r--net/ipv4/udp.c4
2 files changed, 3 insertions, 3 deletions
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 0883921b20c1..917607e9bd5b 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1752,7 +1752,7 @@ process:
1752 if (!tcp_prequeue(sk, skb)) 1752 if (!tcp_prequeue(sk, skb))
1753 ret = tcp_v4_do_rcv(sk, skb); 1753 ret = tcp_v4_do_rcv(sk, skb);
1754 } 1754 }
1755 } else if (unlikely(sk_add_backlog(sk, skb))) { 1755 } else if (unlikely(sk_add_backlog(sk, skb, sk->sk_rcvbuf))) {
1756 bh_unlock_sock(sk); 1756 bh_unlock_sock(sk);
1757 NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP); 1757 NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
1758 goto discard_and_relse; 1758 goto discard_and_relse;
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 3430e8fc18de..279fd0846302 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1479,7 +1479,7 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
1479 goto drop; 1479 goto drop;
1480 1480
1481 1481
1482 if (sk_rcvqueues_full(sk, skb)) 1482 if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf))
1483 goto drop; 1483 goto drop;
1484 1484
1485 rc = 0; 1485 rc = 0;
@@ -1488,7 +1488,7 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
1488 bh_lock_sock(sk); 1488 bh_lock_sock(sk);
1489 if (!sock_owned_by_user(sk)) 1489 if (!sock_owned_by_user(sk))
1490 rc = __udp_queue_rcv_skb(sk, skb); 1490 rc = __udp_queue_rcv_skb(sk, skb);
1491 else if (sk_add_backlog(sk, skb)) { 1491 else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) {
1492 bh_unlock_sock(sk); 1492 bh_unlock_sock(sk);
1493 goto drop; 1493 goto drop;
1494 } 1494 }