diff options
author | Eric Dumazet <edumazet@google.com> | 2012-04-22 19:34:26 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2012-04-23 22:28:28 -0400 |
commit | f545a38f74584cc7424cb74f792a00c6d2589485 (patch) | |
tree | b272cbfed3267a7750f55f23989e1b070ae6ac3e /net/ipv6/udp.c | |
parent | b98985073bc5403ef1320866e4ef8bbc5d587ceb (diff) |
net: add a limit parameter to sk_add_backlog()
sk_add_backlog() & sk_rcvqueues_full() hard coded sk_rcvbuf as the
memory limit. We need to make this limit a parameter for TCP use.
No functional change expected in this patch, all callers still using the
old sk_rcvbuf limit.
Signed-off-by: Eric Dumazet <edumazet@google.com>
Cc: Neal Cardwell <ncardwell@google.com>
Cc: Tom Herbert <therbert@google.com>
Cc: Maciej Żenczykowski <maze@google.com>
Cc: Yuchung Cheng <ycheng@google.com>
Cc: Ilpo Järvinen <ilpo.jarvinen@helsinki.fi>
Cc: Rick Jones <rick.jones2@hp.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv6/udp.c')
-rw-r--r-- | net/ipv6/udp.c | 8 |
1 files changed, 4 insertions, 4 deletions
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 37b0699e95e5..d39bbc9e0622 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c | |||
@@ -611,14 +611,14 @@ static void flush_stack(struct sock **stack, unsigned int count, | |||
611 | 611 | ||
612 | sk = stack[i]; | 612 | sk = stack[i]; |
613 | if (skb1) { | 613 | if (skb1) { |
614 | if (sk_rcvqueues_full(sk, skb1)) { | 614 | if (sk_rcvqueues_full(sk, skb1, sk->sk_rcvbuf)) { |
615 | kfree_skb(skb1); | 615 | kfree_skb(skb1); |
616 | goto drop; | 616 | goto drop; |
617 | } | 617 | } |
618 | bh_lock_sock(sk); | 618 | bh_lock_sock(sk); |
619 | if (!sock_owned_by_user(sk)) | 619 | if (!sock_owned_by_user(sk)) |
620 | udpv6_queue_rcv_skb(sk, skb1); | 620 | udpv6_queue_rcv_skb(sk, skb1); |
621 | else if (sk_add_backlog(sk, skb1)) { | 621 | else if (sk_add_backlog(sk, skb1, sk->sk_rcvbuf)) { |
622 | kfree_skb(skb1); | 622 | kfree_skb(skb1); |
623 | bh_unlock_sock(sk); | 623 | bh_unlock_sock(sk); |
624 | goto drop; | 624 | goto drop; |
@@ -790,14 +790,14 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, | |||
790 | 790 | ||
791 | /* deliver */ | 791 | /* deliver */ |
792 | 792 | ||
793 | if (sk_rcvqueues_full(sk, skb)) { | 793 | if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf)) { |
794 | sock_put(sk); | 794 | sock_put(sk); |
795 | goto discard; | 795 | goto discard; |
796 | } | 796 | } |
797 | bh_lock_sock(sk); | 797 | bh_lock_sock(sk); |
798 | if (!sock_owned_by_user(sk)) | 798 | if (!sock_owned_by_user(sk)) |
799 | udpv6_queue_rcv_skb(sk, skb); | 799 | udpv6_queue_rcv_skb(sk, skb); |
800 | else if (sk_add_backlog(sk, skb)) { | 800 | else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) { |
801 | atomic_inc(&sk->sk_drops); | 801 | atomic_inc(&sk->sk_drops); |
802 | bh_unlock_sock(sk); | 802 | bh_unlock_sock(sk); |
803 | sock_put(sk); | 803 | sock_put(sk); |