aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorZhu Yi <yi.zhu@intel.com>2010-03-04 13:01:42 -0500
committerDavid S. Miller <davem@davemloft.net>2010-03-05 16:34:00 -0500
commit55349790d7cbf0d381873a7ece1dcafcffd4aaa9 (patch)
tree2f5cc194b8d7b2fd559c24fe2dfd946535923daa /net
parent6b03a53a5ab7ccf2d5d69f96cf1c739c4d2a8fb9 (diff)
udp: use limited socket backlog
Make udp adapt to the limited socket backlog change. Cc: "David S. Miller" <davem@davemloft.net> Cc: Alexey Kuznetsov <kuznet@ms2.inr.ac.ru> Cc: "Pekka Savola (ipv6)" <pekkas@netcore.fi> Cc: Patrick McHardy <kaber@trash.net> Signed-off-by: Zhu Yi <yi.zhu@intel.com> Acked-by: Eric Dumazet <eric.dumazet@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r--net/ipv4/udp.c6
-rw-r--r--net/ipv6/udp.c28
2 files changed, 22 insertions, 12 deletions
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 608a5446d05b..e7eb47f338d4 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1371,8 +1371,10 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
1371 bh_lock_sock(sk); 1371 bh_lock_sock(sk);
1372 if (!sock_owned_by_user(sk)) 1372 if (!sock_owned_by_user(sk))
1373 rc = __udp_queue_rcv_skb(sk, skb); 1373 rc = __udp_queue_rcv_skb(sk, skb);
1374 else 1374 else if (sk_add_backlog_limited(sk, skb)) {
1375 sk_add_backlog(sk, skb); 1375 bh_unlock_sock(sk);
1376 goto drop;
1377 }
1376 bh_unlock_sock(sk); 1378 bh_unlock_sock(sk);
1377 1379
1378 return rc; 1380 return rc;
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 52b8347ae3b2..64804912b093 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -583,16 +583,20 @@ static void flush_stack(struct sock **stack, unsigned int count,
583 bh_lock_sock(sk); 583 bh_lock_sock(sk);
584 if (!sock_owned_by_user(sk)) 584 if (!sock_owned_by_user(sk))
585 udpv6_queue_rcv_skb(sk, skb1); 585 udpv6_queue_rcv_skb(sk, skb1);
586 else 586 else if (sk_add_backlog_limited(sk, skb1)) {
587 sk_add_backlog(sk, skb1); 587 kfree_skb(skb1);
588 bh_unlock_sock(sk);
589 goto drop;
590 }
588 bh_unlock_sock(sk); 591 bh_unlock_sock(sk);
589 } else { 592 continue;
590 atomic_inc(&sk->sk_drops);
591 UDP6_INC_STATS_BH(sock_net(sk),
592 UDP_MIB_RCVBUFERRORS, IS_UDPLITE(sk));
593 UDP6_INC_STATS_BH(sock_net(sk),
594 UDP_MIB_INERRORS, IS_UDPLITE(sk));
595 } 593 }
594drop:
595 atomic_inc(&sk->sk_drops);
596 UDP6_INC_STATS_BH(sock_net(sk),
597 UDP_MIB_RCVBUFERRORS, IS_UDPLITE(sk));
598 UDP6_INC_STATS_BH(sock_net(sk),
599 UDP_MIB_INERRORS, IS_UDPLITE(sk));
596 } 600 }
597} 601}
598/* 602/*
@@ -754,8 +758,12 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
754 bh_lock_sock(sk); 758 bh_lock_sock(sk);
755 if (!sock_owned_by_user(sk)) 759 if (!sock_owned_by_user(sk))
756 udpv6_queue_rcv_skb(sk, skb); 760 udpv6_queue_rcv_skb(sk, skb);
757 else 761 else if (sk_add_backlog_limited(sk, skb)) {
758 sk_add_backlog(sk, skb); 762 atomic_inc(&sk->sk_drops);
763 bh_unlock_sock(sk);
764 sock_put(sk);
765 goto discard;
766 }
759 bh_unlock_sock(sk); 767 bh_unlock_sock(sk);
760 sock_put(sk); 768 sock_put(sk);
761 return 0; 769 return 0;