aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp_ipv4.c
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2016-02-18 08:39:18 -0500
committerDavid S. Miller <davem@davemloft.net>2016-02-18 11:35:51 -0500
commit7716682cc58e305e22207d5bb315f26af6b1e243 (patch)
treebd3b7c897a192192bf0312cca7f4cd0e5f39da5d /net/ipv4/tcp_ipv4.c
parentdeed49df7390d5239024199e249190328f1651e7 (diff)
tcp/dccp: fix another race at listener dismantle
Ilya reported following lockdep splat: kernel: ========================= kernel: [ BUG: held lock freed! ] kernel: 4.5.0-rc1-ceph-00026-g5e0a311 #1 Not tainted kernel: ------------------------- kernel: swapper/5/0 is freeing memory ffff880035c9d200-ffff880035c9dbff, with a lock still held there! kernel: (&(&queue->rskq_lock)->rlock){+.-...}, at: [<ffffffff816f6a88>] inet_csk_reqsk_queue_add+0x28/0xa0 kernel: 4 locks held by swapper/5/0: kernel: #0: (rcu_read_lock){......}, at: [<ffffffff8169ef6b>] netif_receive_skb_internal+0x4b/0x1f0 kernel: #1: (rcu_read_lock){......}, at: [<ffffffff816e977f>] ip_local_deliver_finish+0x3f/0x380 kernel: #2: (slock-AF_INET){+.-...}, at: [<ffffffff81685ffb>] sk_clone_lock+0x19b/0x440 kernel: #3: (&(&queue->rskq_lock)->rlock){+.-...}, at: [<ffffffff816f6a88>] inet_csk_reqsk_queue_add+0x28/0xa0 To properly fix this issue, inet_csk_reqsk_queue_add() needs to return to its callers if the child as been queued into accept queue. We also need to make sure listener is still there before calling sk->sk_data_ready(), by holding a reference on it, since the reference carried by the child can disappear as soon as the child is put on accept queue. Reported-by: Ilya Dryomov <idryomov@gmail.com> Fixes: ebb516af60e1 ("tcp/dccp: fix race at listener dismantle phase") Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp_ipv4.c')
-rw-r--r--net/ipv4/tcp_ipv4.c14
1 files changed, 7 insertions, 7 deletions
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index c84477949d3a..487ac67059e2 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1597,30 +1597,30 @@ process:
1597 1597
1598 if (sk->sk_state == TCP_NEW_SYN_RECV) { 1598 if (sk->sk_state == TCP_NEW_SYN_RECV) {
1599 struct request_sock *req = inet_reqsk(sk); 1599 struct request_sock *req = inet_reqsk(sk);
1600 struct sock *nsk = NULL; 1600 struct sock *nsk;
1601 1601
1602 sk = req->rsk_listener; 1602 sk = req->rsk_listener;
1603 if (unlikely(tcp_v4_inbound_md5_hash(sk, skb))) { 1603 if (unlikely(tcp_v4_inbound_md5_hash(sk, skb))) {
1604 reqsk_put(req); 1604 reqsk_put(req);
1605 goto discard_it; 1605 goto discard_it;
1606 } 1606 }
1607 if (likely(sk->sk_state == TCP_LISTEN)) { 1607 if (unlikely(sk->sk_state != TCP_LISTEN)) {
1608 nsk = tcp_check_req(sk, skb, req, false);
1609 } else {
1610 inet_csk_reqsk_queue_drop_and_put(sk, req); 1608 inet_csk_reqsk_queue_drop_and_put(sk, req);
1611 goto lookup; 1609 goto lookup;
1612 } 1610 }
1611 sock_hold(sk);
1612 nsk = tcp_check_req(sk, skb, req, false);
1613 if (!nsk) { 1613 if (!nsk) {
1614 reqsk_put(req); 1614 reqsk_put(req);
1615 goto discard_it; 1615 goto discard_and_relse;
1616 } 1616 }
1617 if (nsk == sk) { 1617 if (nsk == sk) {
1618 sock_hold(sk);
1619 reqsk_put(req); 1618 reqsk_put(req);
1620 } else if (tcp_child_process(sk, nsk, skb)) { 1619 } else if (tcp_child_process(sk, nsk, skb)) {
1621 tcp_v4_send_reset(nsk, skb); 1620 tcp_v4_send_reset(nsk, skb);
1622 goto discard_it; 1621 goto discard_and_relse;
1623 } else { 1622 } else {
1623 sock_put(sk);
1624 return 0; 1624 return 0;
1625 } 1625 }
1626 } 1626 }