aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2012-04-22 19:34:26 -0400
committerDavid S. Miller <davem@davemloft.net>2012-04-23 22:28:28 -0400
commitf545a38f74584cc7424cb74f792a00c6d2589485 (patch)
treeb272cbfed3267a7750f55f23989e1b070ae6ac3e
parentb98985073bc5403ef1320866e4ef8bbc5d587ceb (diff)
net: add a limit parameter to sk_add_backlog()
sk_add_backlog() & sk_rcvqueues_full() hard coded sk_rcvbuf as the memory limit. We need to make this limit a parameter for TCP use. No functional change expected in this patch, all callers still using the old sk_rcvbuf limit. Signed-off-by: Eric Dumazet <edumazet@google.com> Cc: Neal Cardwell <ncardwell@google.com> Cc: Tom Herbert <therbert@google.com> Cc: Maciej Żenczykowski <maze@google.com> Cc: Yuchung Cheng <ycheng@google.com> Cc: Ilpo Järvinen <ilpo.jarvinen@helsinki.fi> Cc: Rick Jones <rick.jones2@hp.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--include/net/sock.h10
-rw-r--r--net/core/sock.c4
-rw-r--r--net/ipv4/tcp_ipv4.c2
-rw-r--r--net/ipv4/udp.c4
-rw-r--r--net/ipv6/tcp_ipv6.c2
-rw-r--r--net/ipv6/udp.c8
-rw-r--r--net/llc/llc_conn.c2
-rw-r--r--net/sctp/input.c4
-rw-r--r--net/tipc/socket.c2
-rw-r--r--net/x25/x25_dev.c2
10 files changed, 21 insertions, 19 deletions
diff --git a/include/net/sock.h b/include/net/sock.h
index 4cdb9b3050f4..4e9d01e491d5 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -709,17 +709,19 @@ static inline void __sk_add_backlog(struct sock *sk, struct sk_buff *skb)
709 * Do not take into account this skb truesize, 709 * Do not take into account this skb truesize,
710 * to allow even a single big packet to come. 710 * to allow even a single big packet to come.
711 */ 711 */
712static inline bool sk_rcvqueues_full(const struct sock *sk, const struct sk_buff *skb) 712static inline bool sk_rcvqueues_full(const struct sock *sk, const struct sk_buff *skb,
713 unsigned int limit)
713{ 714{
714 unsigned int qsize = sk->sk_backlog.len + atomic_read(&sk->sk_rmem_alloc); 715 unsigned int qsize = sk->sk_backlog.len + atomic_read(&sk->sk_rmem_alloc);
715 716
716 return qsize > sk->sk_rcvbuf; 717 return qsize > limit;
717} 718}
718 719
719/* The per-socket spinlock must be held here. */ 720/* The per-socket spinlock must be held here. */
720static inline __must_check int sk_add_backlog(struct sock *sk, struct sk_buff *skb) 721static inline __must_check int sk_add_backlog(struct sock *sk, struct sk_buff *skb,
722 unsigned int limit)
721{ 723{
722 if (sk_rcvqueues_full(sk, skb)) 724 if (sk_rcvqueues_full(sk, skb, limit))
723 return -ENOBUFS; 725 return -ENOBUFS;
724 726
725 __sk_add_backlog(sk, skb); 727 __sk_add_backlog(sk, skb);
diff --git a/net/core/sock.c b/net/core/sock.c
index 679c5bbe2bed..0431aaf7473a 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -389,7 +389,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
389 389
390 skb->dev = NULL; 390 skb->dev = NULL;
391 391
392 if (sk_rcvqueues_full(sk, skb)) { 392 if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf)) {
393 atomic_inc(&sk->sk_drops); 393 atomic_inc(&sk->sk_drops);
394 goto discard_and_relse; 394 goto discard_and_relse;
395 } 395 }
@@ -406,7 +406,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
406 rc = sk_backlog_rcv(sk, skb); 406 rc = sk_backlog_rcv(sk, skb);
407 407
408 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_); 408 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
409 } else if (sk_add_backlog(sk, skb)) { 409 } else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) {
410 bh_unlock_sock(sk); 410 bh_unlock_sock(sk);
411 atomic_inc(&sk->sk_drops); 411 atomic_inc(&sk->sk_drops);
412 goto discard_and_relse; 412 goto discard_and_relse;
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 0883921b20c1..917607e9bd5b 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1752,7 +1752,7 @@ process:
1752 if (!tcp_prequeue(sk, skb)) 1752 if (!tcp_prequeue(sk, skb))
1753 ret = tcp_v4_do_rcv(sk, skb); 1753 ret = tcp_v4_do_rcv(sk, skb);
1754 } 1754 }
1755 } else if (unlikely(sk_add_backlog(sk, skb))) { 1755 } else if (unlikely(sk_add_backlog(sk, skb, sk->sk_rcvbuf))) {
1756 bh_unlock_sock(sk); 1756 bh_unlock_sock(sk);
1757 NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP); 1757 NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
1758 goto discard_and_relse; 1758 goto discard_and_relse;
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 3430e8fc18de..279fd0846302 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1479,7 +1479,7 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
1479 goto drop; 1479 goto drop;
1480 1480
1481 1481
1482 if (sk_rcvqueues_full(sk, skb)) 1482 if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf))
1483 goto drop; 1483 goto drop;
1484 1484
1485 rc = 0; 1485 rc = 0;
@@ -1488,7 +1488,7 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
1488 bh_lock_sock(sk); 1488 bh_lock_sock(sk);
1489 if (!sock_owned_by_user(sk)) 1489 if (!sock_owned_by_user(sk))
1490 rc = __udp_queue_rcv_skb(sk, skb); 1490 rc = __udp_queue_rcv_skb(sk, skb);
1491 else if (sk_add_backlog(sk, skb)) { 1491 else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) {
1492 bh_unlock_sock(sk); 1492 bh_unlock_sock(sk);
1493 goto drop; 1493 goto drop;
1494 } 1494 }
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 8044f6ac1301..b04e6d8a8371 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -1654,7 +1654,7 @@ process:
1654 if (!tcp_prequeue(sk, skb)) 1654 if (!tcp_prequeue(sk, skb))
1655 ret = tcp_v6_do_rcv(sk, skb); 1655 ret = tcp_v6_do_rcv(sk, skb);
1656 } 1656 }
1657 } else if (unlikely(sk_add_backlog(sk, skb))) { 1657 } else if (unlikely(sk_add_backlog(sk, skb, sk->sk_rcvbuf))) {
1658 bh_unlock_sock(sk); 1658 bh_unlock_sock(sk);
1659 NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP); 1659 NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
1660 goto discard_and_relse; 1660 goto discard_and_relse;
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 37b0699e95e5..d39bbc9e0622 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -611,14 +611,14 @@ static void flush_stack(struct sock **stack, unsigned int count,
611 611
612 sk = stack[i]; 612 sk = stack[i];
613 if (skb1) { 613 if (skb1) {
614 if (sk_rcvqueues_full(sk, skb1)) { 614 if (sk_rcvqueues_full(sk, skb1, sk->sk_rcvbuf)) {
615 kfree_skb(skb1); 615 kfree_skb(skb1);
616 goto drop; 616 goto drop;
617 } 617 }
618 bh_lock_sock(sk); 618 bh_lock_sock(sk);
619 if (!sock_owned_by_user(sk)) 619 if (!sock_owned_by_user(sk))
620 udpv6_queue_rcv_skb(sk, skb1); 620 udpv6_queue_rcv_skb(sk, skb1);
621 else if (sk_add_backlog(sk, skb1)) { 621 else if (sk_add_backlog(sk, skb1, sk->sk_rcvbuf)) {
622 kfree_skb(skb1); 622 kfree_skb(skb1);
623 bh_unlock_sock(sk); 623 bh_unlock_sock(sk);
624 goto drop; 624 goto drop;
@@ -790,14 +790,14 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
790 790
791 /* deliver */ 791 /* deliver */
792 792
793 if (sk_rcvqueues_full(sk, skb)) { 793 if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf)) {
794 sock_put(sk); 794 sock_put(sk);
795 goto discard; 795 goto discard;
796 } 796 }
797 bh_lock_sock(sk); 797 bh_lock_sock(sk);
798 if (!sock_owned_by_user(sk)) 798 if (!sock_owned_by_user(sk))
799 udpv6_queue_rcv_skb(sk, skb); 799 udpv6_queue_rcv_skb(sk, skb);
800 else if (sk_add_backlog(sk, skb)) { 800 else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) {
801 atomic_inc(&sk->sk_drops); 801 atomic_inc(&sk->sk_drops);
802 bh_unlock_sock(sk); 802 bh_unlock_sock(sk);
803 sock_put(sk); 803 sock_put(sk);
diff --git a/net/llc/llc_conn.c b/net/llc/llc_conn.c
index ba137a6a224d..0d0d416dfab6 100644
--- a/net/llc/llc_conn.c
+++ b/net/llc/llc_conn.c
@@ -828,7 +828,7 @@ void llc_conn_handler(struct llc_sap *sap, struct sk_buff *skb)
828 else { 828 else {
829 dprintk("%s: adding to backlog...\n", __func__); 829 dprintk("%s: adding to backlog...\n", __func__);
830 llc_set_backlog_type(skb, LLC_PACKET); 830 llc_set_backlog_type(skb, LLC_PACKET);
831 if (sk_add_backlog(sk, skb)) 831 if (sk_add_backlog(sk, skb, sk->sk_rcvbuf))
832 goto drop_unlock; 832 goto drop_unlock;
833 } 833 }
834out: 834out:
diff --git a/net/sctp/input.c b/net/sctp/input.c
index 80f71af71384..80564fe03024 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -342,7 +342,7 @@ int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb)
342 sctp_bh_lock_sock(sk); 342 sctp_bh_lock_sock(sk);
343 343
344 if (sock_owned_by_user(sk)) { 344 if (sock_owned_by_user(sk)) {
345 if (sk_add_backlog(sk, skb)) 345 if (sk_add_backlog(sk, skb, sk->sk_rcvbuf))
346 sctp_chunk_free(chunk); 346 sctp_chunk_free(chunk);
347 else 347 else
348 backloged = 1; 348 backloged = 1;
@@ -376,7 +376,7 @@ static int sctp_add_backlog(struct sock *sk, struct sk_buff *skb)
376 struct sctp_ep_common *rcvr = chunk->rcvr; 376 struct sctp_ep_common *rcvr = chunk->rcvr;
377 int ret; 377 int ret;
378 378
379 ret = sk_add_backlog(sk, skb); 379 ret = sk_add_backlog(sk, skb, sk->sk_rcvbuf);
380 if (!ret) { 380 if (!ret) {
381 /* Hold the assoc/ep while hanging on the backlog queue. 381 /* Hold the assoc/ep while hanging on the backlog queue.
382 * This way, we know structures we need will not disappear 382 * This way, we know structures we need will not disappear
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index c19fc4a228a8..6d4991e8f670 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -1330,7 +1330,7 @@ static u32 dispatch(struct tipc_port *tport, struct sk_buff *buf)
1330 if (!sock_owned_by_user(sk)) { 1330 if (!sock_owned_by_user(sk)) {
1331 res = filter_rcv(sk, buf); 1331 res = filter_rcv(sk, buf);
1332 } else { 1332 } else {
1333 if (sk_add_backlog(sk, buf)) 1333 if (sk_add_backlog(sk, buf, sk->sk_rcvbuf))
1334 res = TIPC_ERR_OVERLOAD; 1334 res = TIPC_ERR_OVERLOAD;
1335 else 1335 else
1336 res = TIPC_OK; 1336 res = TIPC_OK;
diff --git a/net/x25/x25_dev.c b/net/x25/x25_dev.c
index f0ce862d1f46..a8a236338e61 100644
--- a/net/x25/x25_dev.c
+++ b/net/x25/x25_dev.c
@@ -58,7 +58,7 @@ static int x25_receive_data(struct sk_buff *skb, struct x25_neigh *nb)
58 if (!sock_owned_by_user(sk)) { 58 if (!sock_owned_by_user(sk)) {
59 queued = x25_process_rx_frame(sk, skb); 59 queued = x25_process_rx_frame(sk, skb);
60 } else { 60 } else {
61 queued = !sk_add_backlog(sk, skb); 61 queued = !sk_add_backlog(sk, skb, sk->sk_rcvbuf);
62 } 62 }
63 bh_unlock_sock(sk); 63 bh_unlock_sock(sk);
64 sock_put(sk); 64 sock_put(sk);