aboutsummaryrefslogtreecommitdiffstats
path: root/net/core
diff options
context:
space:
mode:
authorWillem de Bruijn <willemb@google.com>2014-08-31 21:30:27 -0400
committerDavid S. Miller <davem@davemloft.net>2014-09-02 00:49:08 -0400
commit364a9e93243d1785f310c0964af0e24bf1adac03 (patch)
treed94daf2c5c0cf6492708d28c56160f96d9917201 /net/core
parent8fe2f761cae9da9f9031162f104164a812ce78ab (diff)
sock: deduplicate errqueue dequeue
sk->sk_error_queue is dequeued in four locations. All share the exact same logic. Deduplicate. Also collapse the two critical sections for dequeue (at the top of the recv handler) and signal (at the bottom). This moves signal generation for the next packet forward, which should be harmless. It also changes the behavior if the recv handler exits early with an error. Previously, a signal for follow-up packets on the errqueue would then not be scheduled. The new behavior, to always signal, is arguably a bug fix. For rxrpc, the change causes the same function to be called repeatedly for each queued packet (because the recv handler == sk_error_report). It is likely that all packets will fail for the same reason (e.g., memory exhaustion). This code runs without sk_lock held, so it is not safe to trust that sk->sk_err is immutable inbetween releasing q->lock and the subsequent test. Introduce int err just to avoid this potential race. Signed-off-by: Willem de Bruijn <willemb@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core')
-rw-r--r--net/core/skbuff.c20
-rw-r--r--net/core/sock.c14
2 files changed, 22 insertions, 12 deletions
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 163b673f9e62..53ce536e3d6e 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -3491,6 +3491,26 @@ int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb)
3491} 3491}
3492EXPORT_SYMBOL(sock_queue_err_skb); 3492EXPORT_SYMBOL(sock_queue_err_skb);
3493 3493
3494struct sk_buff *sock_dequeue_err_skb(struct sock *sk)
3495{
3496 struct sk_buff_head *q = &sk->sk_error_queue;
3497 struct sk_buff *skb, *skb_next;
3498 int err = 0;
3499
3500 spin_lock_bh(&q->lock);
3501 skb = __skb_dequeue(q);
3502 if (skb && (skb_next = skb_peek(q)))
3503 err = SKB_EXT_ERR(skb_next)->ee.ee_errno;
3504 spin_unlock_bh(&q->lock);
3505
3506 sk->sk_err = err;
3507 if (err)
3508 sk->sk_error_report(sk);
3509
3510 return skb;
3511}
3512EXPORT_SYMBOL(sock_dequeue_err_skb);
3513
3494void __skb_tstamp_tx(struct sk_buff *orig_skb, 3514void __skb_tstamp_tx(struct sk_buff *orig_skb,
3495 struct skb_shared_hwtstamps *hwtstamps, 3515 struct skb_shared_hwtstamps *hwtstamps,
3496 struct sock *sk, int tstype) 3516 struct sock *sk, int tstype)
diff --git a/net/core/sock.c b/net/core/sock.c
index f7f2352200ad..f1a638ee93d9 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -2488,11 +2488,11 @@ int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len,
2488 int level, int type) 2488 int level, int type)
2489{ 2489{
2490 struct sock_exterr_skb *serr; 2490 struct sock_exterr_skb *serr;
2491 struct sk_buff *skb, *skb2; 2491 struct sk_buff *skb;
2492 int copied, err; 2492 int copied, err;
2493 2493
2494 err = -EAGAIN; 2494 err = -EAGAIN;
2495 skb = skb_dequeue(&sk->sk_error_queue); 2495 skb = sock_dequeue_err_skb(sk);
2496 if (skb == NULL) 2496 if (skb == NULL)
2497 goto out; 2497 goto out;
2498 2498
@@ -2513,16 +2513,6 @@ int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len,
2513 msg->msg_flags |= MSG_ERRQUEUE; 2513 msg->msg_flags |= MSG_ERRQUEUE;
2514 err = copied; 2514 err = copied;
2515 2515
2516 /* Reset and regenerate socket error */
2517 spin_lock_bh(&sk->sk_error_queue.lock);
2518 sk->sk_err = 0;
2519 if ((skb2 = skb_peek(&sk->sk_error_queue)) != NULL) {
2520 sk->sk_err = SKB_EXT_ERR(skb2)->ee.ee_errno;
2521 spin_unlock_bh(&sk->sk_error_queue.lock);
2522 sk->sk_error_report(sk);
2523 } else
2524 spin_unlock_bh(&sk->sk_error_queue.lock);
2525
2526out_free_skb: 2516out_free_skb:
2527 kfree_skb(skb); 2517 kfree_skb(skb);
2528out: 2518out: