diff options
author | Herbert Xu <herbert@gondor.apana.org.au> | 2005-12-14 02:16:37 -0500 |
---|---|---|
committer | David S. Miller <davem@sunset.davemloft.net> | 2006-01-03 16:10:41 -0500 |
commit | 3305b80c214c642b89cd5c21af83bc91ec13f8bd (patch) | |
tree | 909ed75c500d0ac422738781f84a819c933703c5 /net/ipv6/raw.c | |
parent | 57cca05af1e20fdc65b55be52c042c234f86c866 (diff) |
[IP]: Simplify and consolidate MSG_PEEK error handling
When a packet is obtained from skb_recv_datagram with MSG_PEEK enabled
it is left on the socket receive queue. This means that when we detect
a checksum error we have to be careful when trying to free the packet
as someone could have dequeued it in the time being.
Currently this delicate logic is duplicated three times between UDPv4,
UDPv6 and RAWv6. This patch moves them into a one place and simplifies
the code somewhat.
This is based on a suggestion by Eric Dumazet.
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv6/raw.c')
-rw-r--r-- | net/ipv6/raw.c | 16 |
1 files changed, 3 insertions, 13 deletions
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c index a66900cda2af..66f1d12ea578 100644 --- a/net/ipv6/raw.c +++ b/net/ipv6/raw.c | |||
@@ -32,6 +32,7 @@ | |||
32 | #include <linux/icmpv6.h> | 32 | #include <linux/icmpv6.h> |
33 | #include <linux/netfilter.h> | 33 | #include <linux/netfilter.h> |
34 | #include <linux/netfilter_ipv6.h> | 34 | #include <linux/netfilter_ipv6.h> |
35 | #include <linux/skbuff.h> | ||
35 | #include <asm/uaccess.h> | 36 | #include <asm/uaccess.h> |
36 | #include <asm/ioctls.h> | 37 | #include <asm/ioctls.h> |
37 | #include <asm/bug.h> | 38 | #include <asm/bug.h> |
@@ -433,25 +434,14 @@ out: | |||
433 | return err; | 434 | return err; |
434 | 435 | ||
435 | csum_copy_err: | 436 | csum_copy_err: |
436 | /* Clear queue. */ | 437 | skb_kill_datagram(sk, skb, flags); |
437 | if (flags&MSG_PEEK) { | ||
438 | int clear = 0; | ||
439 | spin_lock_bh(&sk->sk_receive_queue.lock); | ||
440 | if (skb == skb_peek(&sk->sk_receive_queue)) { | ||
441 | __skb_unlink(skb, &sk->sk_receive_queue); | ||
442 | clear = 1; | ||
443 | } | ||
444 | spin_unlock_bh(&sk->sk_receive_queue.lock); | ||
445 | if (clear) | ||
446 | kfree_skb(skb); | ||
447 | } | ||
448 | 438 | ||
449 | /* Error for blocking case is chosen to masquerade | 439 | /* Error for blocking case is chosen to masquerade |
450 | as some normal condition. | 440 | as some normal condition. |
451 | */ | 441 | */ |
452 | err = (flags&MSG_DONTWAIT) ? -EAGAIN : -EHOSTUNREACH; | 442 | err = (flags&MSG_DONTWAIT) ? -EAGAIN : -EHOSTUNREACH; |
453 | /* FIXME: increment a raw6 drops counter here */ | 443 | /* FIXME: increment a raw6 drops counter here */ |
454 | goto out_free; | 444 | goto out; |
455 | } | 445 | } |
456 | 446 | ||
457 | static int rawv6_push_pending_frames(struct sock *sk, struct flowi *fl, | 447 | static int rawv6_push_pending_frames(struct sock *sk, struct flowi *fl, |