aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorZhu Yi <yi.zhu@intel.com>2010-03-04 13:01:42 -0500
committerGreg Kroah-Hartman <gregkh@suse.de>2010-04-01 19:02:02 -0400
commit230401ee7e13ca89e15b64ddd1e8514a016a627e (patch)
tree0b1e9baf8e73f0fc357487a430d439cd2187165c /net
parent20a92ecc4bfd6c5c0b5a71296a7b89aa53cf49e3 (diff)
udp: use limited socket backlog
[ Upstream commit 55349790d7cbf0d381873a7ece1dcafcffd4aaa9 ] Make udp adapt to the limited socket backlog change. Cc: "David S. Miller" <davem@davemloft.net> Cc: Alexey Kuznetsov <kuznet@ms2.inr.ac.ru> Cc: "Pekka Savola (ipv6)" <pekkas@netcore.fi> Cc: Patrick McHardy <kaber@trash.net> Signed-off-by: Zhu Yi <yi.zhu@intel.com> Acked-by: Eric Dumazet <eric.dumazet@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
Diffstat (limited to 'net')
-rw-r--r--net/ipv4/udp.c6
-rw-r--r--net/ipv6/udp.c28
2 files changed, 22 insertions, 12 deletions
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index f0126fdd7e04..7bb45686f3c5 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1372,8 +1372,10 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
1372 bh_lock_sock(sk); 1372 bh_lock_sock(sk);
1373 if (!sock_owned_by_user(sk)) 1373 if (!sock_owned_by_user(sk))
1374 rc = __udp_queue_rcv_skb(sk, skb); 1374 rc = __udp_queue_rcv_skb(sk, skb);
1375 else 1375 else if (sk_add_backlog_limited(sk, skb)) {
1376 sk_add_backlog(sk, skb); 1376 bh_unlock_sock(sk);
1377 goto drop;
1378 }
1377 bh_unlock_sock(sk); 1379 bh_unlock_sock(sk);
1378 1380
1379 return rc; 1381 return rc;
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 69ebdbe78c47..bf88ce073d26 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -584,16 +584,20 @@ static void flush_stack(struct sock **stack, unsigned int count,
584 bh_lock_sock(sk); 584 bh_lock_sock(sk);
585 if (!sock_owned_by_user(sk)) 585 if (!sock_owned_by_user(sk))
586 udpv6_queue_rcv_skb(sk, skb1); 586 udpv6_queue_rcv_skb(sk, skb1);
587 else 587 else if (sk_add_backlog_limited(sk, skb1)) {
588 sk_add_backlog(sk, skb1); 588 kfree_skb(skb1);
589 bh_unlock_sock(sk);
590 goto drop;
591 }
589 bh_unlock_sock(sk); 592 bh_unlock_sock(sk);
590 } else { 593 continue;
591 atomic_inc(&sk->sk_drops);
592 UDP6_INC_STATS_BH(sock_net(sk),
593 UDP_MIB_RCVBUFERRORS, IS_UDPLITE(sk));
594 UDP6_INC_STATS_BH(sock_net(sk),
595 UDP_MIB_INERRORS, IS_UDPLITE(sk));
596 } 594 }
595drop:
596 atomic_inc(&sk->sk_drops);
597 UDP6_INC_STATS_BH(sock_net(sk),
598 UDP_MIB_RCVBUFERRORS, IS_UDPLITE(sk));
599 UDP6_INC_STATS_BH(sock_net(sk),
600 UDP_MIB_INERRORS, IS_UDPLITE(sk));
597 } 601 }
598} 602}
599/* 603/*
@@ -756,8 +760,12 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
756 bh_lock_sock(sk); 760 bh_lock_sock(sk);
757 if (!sock_owned_by_user(sk)) 761 if (!sock_owned_by_user(sk))
758 udpv6_queue_rcv_skb(sk, skb); 762 udpv6_queue_rcv_skb(sk, skb);
759 else 763 else if (sk_add_backlog_limited(sk, skb)) {
760 sk_add_backlog(sk, skb); 764 atomic_inc(&sk->sk_drops);
765 bh_unlock_sock(sk);
766 sock_put(sk);
767 goto discard;
768 }
761 bh_unlock_sock(sk); 769 bh_unlock_sock(sk);
762 sock_put(sk); 770 sock_put(sk);
763 return 0; 771 return 0;