diff options
author | Eric Dumazet <eric.dumazet@gmail.com> | 2009-10-14 23:40:11 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2009-10-14 23:40:11 -0400 |
commit | 766e9037cc139ee25ed93ee5ad11e1450c4b99f6 (patch) | |
tree | 062702b8edf203a6e91d1e6853ab24989617d758 /net/ipv4 | |
parent | 48bccd25df71f4f8177cb800f4b288222eb57761 (diff) |
net: sk_drops consolidation
sock_queue_rcv_skb() can update sk_drops itself, removing need for
callers to take care of it. This is more consistent since
sock_queue_rcv_skb() also reads sk_drops when queueing a skb.
This adds sk_drops managment to many protocols that not cared yet.
Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4')
-rw-r--r-- | net/ipv4/raw.c | 1 | ||||
-rw-r--r-- | net/ipv4/udp.c | 19 |
2 files changed, 8 insertions, 12 deletions
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c index f18172b07611..39e2a6b8752c 100644 --- a/net/ipv4/raw.c +++ b/net/ipv4/raw.c | |||
@@ -292,7 +292,6 @@ static int raw_rcv_skb(struct sock * sk, struct sk_buff * skb) | |||
292 | /* Charge it to the socket. */ | 292 | /* Charge it to the socket. */ |
293 | 293 | ||
294 | if (sock_queue_rcv_skb(sk, skb) < 0) { | 294 | if (sock_queue_rcv_skb(sk, skb) < 0) { |
295 | atomic_inc(&sk->sk_drops); | ||
296 | kfree_skb(skb); | 295 | kfree_skb(skb); |
297 | return NET_RX_DROP; | 296 | return NET_RX_DROP; |
298 | } | 297 | } |
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index ee61b3fc4cae..45a8a7e374d8 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c | |||
@@ -1063,25 +1063,22 @@ EXPORT_SYMBOL(udp_lib_unhash); | |||
1063 | 1063 | ||
1064 | static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) | 1064 | static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) |
1065 | { | 1065 | { |
1066 | int is_udplite = IS_UDPLITE(sk); | 1066 | int rc = sock_queue_rcv_skb(sk, skb); |
1067 | int rc; | 1067 | |
1068 | if (rc < 0) { | ||
1069 | int is_udplite = IS_UDPLITE(sk); | ||
1068 | 1070 | ||
1069 | if ((rc = sock_queue_rcv_skb(sk, skb)) < 0) { | ||
1070 | /* Note that an ENOMEM error is charged twice */ | 1071 | /* Note that an ENOMEM error is charged twice */ |
1071 | if (rc == -ENOMEM) { | 1072 | if (rc == -ENOMEM) |
1072 | UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS, | 1073 | UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS, |
1073 | is_udplite); | 1074 | is_udplite); |
1074 | atomic_inc(&sk->sk_drops); | 1075 | UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite); |
1075 | } | 1076 | kfree_skb(skb); |
1076 | goto drop; | 1077 | return -1; |
1077 | } | 1078 | } |
1078 | 1079 | ||
1079 | return 0; | 1080 | return 0; |
1080 | 1081 | ||
1081 | drop: | ||
1082 | UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite); | ||
1083 | kfree_skb(skb); | ||
1084 | return -1; | ||
1085 | } | 1082 | } |
1086 | 1083 | ||
1087 | /* returns: | 1084 | /* returns: |