aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4
diff options
context:
space:
mode:
authorWillem de Bruijn <willemb@google.com>2014-08-31 21:30:27 -0400
committerDavid S. Miller <davem@davemloft.net>2014-09-02 00:49:08 -0400
commit364a9e93243d1785f310c0964af0e24bf1adac03 (patch)
treed94daf2c5c0cf6492708d28c56160f96d9917201 /net/ipv4
parent8fe2f761cae9da9f9031162f104164a812ce78ab (diff)
sock: deduplicate errqueue dequeue
sk->sk_error_queue is dequeued in four locations. All share the exact same logic. Deduplicate. Also collapse the two critical sections for dequeue (at the top of the recv handler) and signal (at the bottom). This moves signal generation for the next packet forward, which should be harmless. It also changes the behavior if the recv handler exits early with an error. Previously, a signal for follow-up packets on the errqueue would then not be scheduled. The new behavior, to always signal, is arguably a bug fix. For rxrpc, the change causes the same function to be called repeatedly for each queued packet (because the recv handler == sk_error_report). It is likely that all packets will fail for the same reason (e.g., memory exhaustion). This code runs without sk_lock held, so it is not safe to trust that sk->sk_err is immutable inbetween releasing q->lock and the subsequent test. Introduce int err just to avoid this potential race. Signed-off-by: Willem de Bruijn <willemb@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4')
-rw-r--r--net/ipv4/ip_sockglue.c15
1 files changed, 2 insertions, 13 deletions
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index 5cb830c78990..455e75bcb167 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -405,7 +405,7 @@ void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 port, u32 inf
405int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len) 405int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
406{ 406{
407 struct sock_exterr_skb *serr; 407 struct sock_exterr_skb *serr;
408 struct sk_buff *skb, *skb2; 408 struct sk_buff *skb;
409 DECLARE_SOCKADDR(struct sockaddr_in *, sin, msg->msg_name); 409 DECLARE_SOCKADDR(struct sockaddr_in *, sin, msg->msg_name);
410 struct { 410 struct {
411 struct sock_extended_err ee; 411 struct sock_extended_err ee;
@@ -415,7 +415,7 @@ int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
415 int copied; 415 int copied;
416 416
417 err = -EAGAIN; 417 err = -EAGAIN;
418 skb = skb_dequeue(&sk->sk_error_queue); 418 skb = sock_dequeue_err_skb(sk);
419 if (skb == NULL) 419 if (skb == NULL)
420 goto out; 420 goto out;
421 421
@@ -462,17 +462,6 @@ int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
462 msg->msg_flags |= MSG_ERRQUEUE; 462 msg->msg_flags |= MSG_ERRQUEUE;
463 err = copied; 463 err = copied;
464 464
465 /* Reset and regenerate socket error */
466 spin_lock_bh(&sk->sk_error_queue.lock);
467 sk->sk_err = 0;
468 skb2 = skb_peek(&sk->sk_error_queue);
469 if (skb2 != NULL) {
470 sk->sk_err = SKB_EXT_ERR(skb2)->ee.ee_errno;
471 spin_unlock_bh(&sk->sk_error_queue.lock);
472 sk->sk_error_report(sk);
473 } else
474 spin_unlock_bh(&sk->sk_error_queue.lock);
475
476out_free_skb: 465out_free_skb:
477 kfree_skb(skb); 466 kfree_skb(skb);
478out: 467out: