aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4
diff options
context:
space:
mode:
authorEric Dumazet <eric.dumazet@gmail.com>2009-11-08 05:20:19 -0500
committerDavid S. Miller <davem@davemloft.net>2009-11-08 23:53:10 -0500
commitf6b8f32ca71406de718391369490f6b1e81fe0bb (patch)
tree6a9d65cc246c74a78d7274d1911baf38ffc8eaf0 /net/ipv4
parenta1ab77f97ed03f5dae66ae4c64375beffab83772 (diff)
udp: multicast RX should increment SNMP/sk_drops counter in allocation failures
When skb_clone() fails, we should increment sk_drops and SNMP counters. Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4')
-rw-r--r--net/ipv4/udp.c12
1 files changed, 11 insertions, 1 deletions
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 9d9072c6cce7..d73e9170536b 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1335,12 +1335,22 @@ static void flush_stack(struct sock **stack, unsigned int count,
1335{ 1335{
1336 unsigned int i; 1336 unsigned int i;
1337 struct sk_buff *skb1 = NULL; 1337 struct sk_buff *skb1 = NULL;
1338 struct sock *sk;
1338 1339
1339 for (i = 0; i < count; i++) { 1340 for (i = 0; i < count; i++) {
1341 sk = stack[i];
1340 if (likely(skb1 == NULL)) 1342 if (likely(skb1 == NULL))
1341 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC); 1343 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
1342 1344
1343 if (skb1 && udp_queue_rcv_skb(stack[i], skb1) <= 0) 1345 if (!skb1) {
1346 atomic_inc(&sk->sk_drops);
1347 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
1348 IS_UDPLITE(sk));
1349 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
1350 IS_UDPLITE(sk));
1351 }
1352
1353 if (skb1 && udp_queue_rcv_skb(sk, skb1) <= 0)
1344 skb1 = NULL; 1354 skb1 = NULL;
1345 } 1355 }
1346 if (unlikely(skb1)) 1356 if (unlikely(skb1))