diff options
author | Eric Dumazet <eric.dumazet@gmail.com> | 2009-10-14 23:40:11 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2009-10-14 23:40:11 -0400 |
commit | 766e9037cc139ee25ed93ee5ad11e1450c4b99f6 (patch) | |
tree | 062702b8edf203a6e91d1e6853ab24989617d758 /net/core/sock.c | |
parent | 48bccd25df71f4f8177cb800f4b288222eb57761 (diff) |
net: sk_drops consolidation
sock_queue_rcv_skb() can update sk_drops itself, removing need for
callers to take care of it. This is more consistent since
sock_queue_rcv_skb() also reads sk_drops when queueing a skb.
This adds sk_drops managment to many protocols that not cared yet.
Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core/sock.c')
-rw-r--r-- | net/core/sock.c | 15 |
1 files changed, 7 insertions, 8 deletions
diff --git a/net/core/sock.c b/net/core/sock.c index 43ca2c995393..38713aa3faf2 100644 --- a/net/core/sock.c +++ b/net/core/sock.c | |||
@@ -274,7 +274,7 @@ static void sock_disable_timestamp(struct sock *sk, int flag) | |||
274 | 274 | ||
275 | int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) | 275 | int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) |
276 | { | 276 | { |
277 | int err = 0; | 277 | int err; |
278 | int skb_len; | 278 | int skb_len; |
279 | unsigned long flags; | 279 | unsigned long flags; |
280 | struct sk_buff_head *list = &sk->sk_receive_queue; | 280 | struct sk_buff_head *list = &sk->sk_receive_queue; |
@@ -284,17 +284,17 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) | |||
284 | */ | 284 | */ |
285 | if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= | 285 | if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= |
286 | (unsigned)sk->sk_rcvbuf) { | 286 | (unsigned)sk->sk_rcvbuf) { |
287 | err = -ENOMEM; | 287 | atomic_inc(&sk->sk_drops); |
288 | goto out; | 288 | return -ENOMEM; |
289 | } | 289 | } |
290 | 290 | ||
291 | err = sk_filter(sk, skb); | 291 | err = sk_filter(sk, skb); |
292 | if (err) | 292 | if (err) |
293 | goto out; | 293 | return err; |
294 | 294 | ||
295 | if (!sk_rmem_schedule(sk, skb->truesize)) { | 295 | if (!sk_rmem_schedule(sk, skb->truesize)) { |
296 | err = -ENOBUFS; | 296 | atomic_inc(&sk->sk_drops); |
297 | goto out; | 297 | return -ENOBUFS; |
298 | } | 298 | } |
299 | 299 | ||
300 | skb->dev = NULL; | 300 | skb->dev = NULL; |
@@ -314,8 +314,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) | |||
314 | 314 | ||
315 | if (!sock_flag(sk, SOCK_DEAD)) | 315 | if (!sock_flag(sk, SOCK_DEAD)) |
316 | sk->sk_data_ready(sk, skb_len); | 316 | sk->sk_data_ready(sk, skb_len); |
317 | out: | 317 | return 0; |
318 | return err; | ||
319 | } | 318 | } |
320 | EXPORT_SYMBOL(sock_queue_rcv_skb); | 319 | EXPORT_SYMBOL(sock_queue_rcv_skb); |
321 | 320 | ||