diff options
author | Eric Dumazet <eric.dumazet@gmail.com> | 2009-11-08 05:20:19 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2009-11-08 23:53:10 -0500 |
commit | f6b8f32ca71406de718391369490f6b1e81fe0bb (patch) | |
tree | 6a9d65cc246c74a78d7274d1911baf38ffc8eaf0 | |
parent | a1ab77f97ed03f5dae66ae4c64375beffab83772 (diff) |
udp: multicast RX should increment SNMP/sk_drops counter in allocation failures
When skb_clone() fails, we should increment sk_drops and SNMP counters.
Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | net/ipv4/udp.c | 12 | ||||
-rw-r--r-- | net/ipv6/udp.c | 8 |
2 files changed, 18 insertions, 2 deletions
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 9d9072c6cce7..d73e9170536b 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c | |||
@@ -1335,12 +1335,22 @@ static void flush_stack(struct sock **stack, unsigned int count, | |||
1335 | { | 1335 | { |
1336 | unsigned int i; | 1336 | unsigned int i; |
1337 | struct sk_buff *skb1 = NULL; | 1337 | struct sk_buff *skb1 = NULL; |
1338 | struct sock *sk; | ||
1338 | 1339 | ||
1339 | for (i = 0; i < count; i++) { | 1340 | for (i = 0; i < count; i++) { |
1341 | sk = stack[i]; | ||
1340 | if (likely(skb1 == NULL)) | 1342 | if (likely(skb1 == NULL)) |
1341 | skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC); | 1343 | skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC); |
1342 | 1344 | ||
1343 | if (skb1 && udp_queue_rcv_skb(stack[i], skb1) <= 0) | 1345 | if (!skb1) { |
1346 | atomic_inc(&sk->sk_drops); | ||
1347 | UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS, | ||
1348 | IS_UDPLITE(sk)); | ||
1349 | UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, | ||
1350 | IS_UDPLITE(sk)); | ||
1351 | } | ||
1352 | |||
1353 | if (skb1 && udp_queue_rcv_skb(sk, skb1) <= 0) | ||
1344 | skb1 = NULL; | 1354 | skb1 = NULL; |
1345 | } | 1355 | } |
1346 | if (unlikely(skb1)) | 1356 | if (unlikely(skb1)) |
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 948e823d70c2..2915e1dad726 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c | |||
@@ -579,14 +579,20 @@ static void flush_stack(struct sock **stack, unsigned int count, | |||
579 | for (i = 0; i < count; i++) { | 579 | for (i = 0; i < count; i++) { |
580 | skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC); | 580 | skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC); |
581 | 581 | ||
582 | sk = stack[i]; | ||
582 | if (skb1) { | 583 | if (skb1) { |
583 | sk = stack[i]; | ||
584 | bh_lock_sock(sk); | 584 | bh_lock_sock(sk); |
585 | if (!sock_owned_by_user(sk)) | 585 | if (!sock_owned_by_user(sk)) |
586 | udpv6_queue_rcv_skb(sk, skb1); | 586 | udpv6_queue_rcv_skb(sk, skb1); |
587 | else | 587 | else |
588 | sk_add_backlog(sk, skb1); | 588 | sk_add_backlog(sk, skb1); |
589 | bh_unlock_sock(sk); | 589 | bh_unlock_sock(sk); |
590 | } else { | ||
591 | atomic_inc(&sk->sk_drops); | ||
592 | UDP6_INC_STATS_BH(sock_net(sk), | ||
593 | UDP_MIB_RCVBUFERRORS, IS_UDPLITE(sk)); | ||
594 | UDP6_INC_STATS_BH(sock_net(sk), | ||
595 | UDP_MIB_INERRORS, IS_UDPLITE(sk)); | ||
590 | } | 596 | } |
591 | } | 597 | } |
592 | } | 598 | } |