diff options
author | Herbert Xu <herbert@gondor.apana.org.au> | 2007-12-05 04:51:58 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2008-01-28 17:56:32 -0500 |
commit | 27ab2568649d5ba6c5a20212079b7c4f6da4ca0d (patch) | |
tree | 19bb85e73d7deb0adf40386af3117c2f397b653d /net/core | |
parent | c8fecf2242a0ab7230210665986b8ef915e1ae9e (diff) |
[UDP]: Avoid repeated counting of checksum errors due to peeking
Currently it is possible for two processes to peek on the same socket
and end up incrementing the error counter twice for the same packet.
This patch fixes it by making skb_kill_datagram return whether it
succeeded in unlinking the packet and only incrementing the counter
if it did.
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core')
-rw-r--r-- | net/core/datagram.c | 9 |
1 files changed, 8 insertions, 1 deletions
diff --git a/net/core/datagram.c b/net/core/datagram.c index 029b93e246b4..fbd6c76436d0 100644 --- a/net/core/datagram.c +++ b/net/core/datagram.c | |||
@@ -217,20 +217,27 @@ void skb_free_datagram(struct sock *sk, struct sk_buff *skb) | |||
217 | * This function currently only disables BH when acquiring the | 217 | * This function currently only disables BH when acquiring the |
218 | * sk_receive_queue lock. Therefore it must not be used in a | 218 | * sk_receive_queue lock. Therefore it must not be used in a |
219 | * context where that lock is acquired in an IRQ context. | 219 | * context where that lock is acquired in an IRQ context. |
220 | * | ||
221 | * It returns 0 if the packet was removed by us. | ||
220 | */ | 222 | */ |
221 | 223 | ||
222 | void skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags) | 224 | int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags) |
223 | { | 225 | { |
226 | int err = 0; | ||
227 | |||
224 | if (flags & MSG_PEEK) { | 228 | if (flags & MSG_PEEK) { |
229 | err = -ENOENT; | ||
225 | spin_lock_bh(&sk->sk_receive_queue.lock); | 230 | spin_lock_bh(&sk->sk_receive_queue.lock); |
226 | if (skb == skb_peek(&sk->sk_receive_queue)) { | 231 | if (skb == skb_peek(&sk->sk_receive_queue)) { |
227 | __skb_unlink(skb, &sk->sk_receive_queue); | 232 | __skb_unlink(skb, &sk->sk_receive_queue); |
228 | atomic_dec(&skb->users); | 233 | atomic_dec(&skb->users); |
234 | err = 0; | ||
229 | } | 235 | } |
230 | spin_unlock_bh(&sk->sk_receive_queue.lock); | 236 | spin_unlock_bh(&sk->sk_receive_queue.lock); |
231 | } | 237 | } |
232 | 238 | ||
233 | kfree_skb(skb); | 239 | kfree_skb(skb); |
240 | return err; | ||
234 | } | 241 | } |
235 | 242 | ||
236 | EXPORT_SYMBOL(skb_kill_datagram); | 243 | EXPORT_SYMBOL(skb_kill_datagram); |