diff options
Diffstat (limited to 'net/ipv4/tcp_ipv4.c')
-rw-r--r-- | net/ipv4/tcp_ipv4.c | 31 |
1 files changed, 19 insertions, 12 deletions
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 3f872a6bc274..4c8d58dfac9b 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c | |||
@@ -311,7 +311,7 @@ static void do_redirect(struct sk_buff *skb, struct sock *sk) | |||
311 | 311 | ||
312 | 312 | ||
313 | /* handle ICMP messages on TCP_NEW_SYN_RECV request sockets */ | 313 | /* handle ICMP messages on TCP_NEW_SYN_RECV request sockets */ |
314 | void tcp_req_err(struct sock *sk, u32 seq) | 314 | void tcp_req_err(struct sock *sk, u32 seq, bool abort) |
315 | { | 315 | { |
316 | struct request_sock *req = inet_reqsk(sk); | 316 | struct request_sock *req = inet_reqsk(sk); |
317 | struct net *net = sock_net(sk); | 317 | struct net *net = sock_net(sk); |
@@ -323,7 +323,7 @@ void tcp_req_err(struct sock *sk, u32 seq) | |||
323 | 323 | ||
324 | if (seq != tcp_rsk(req)->snt_isn) { | 324 | if (seq != tcp_rsk(req)->snt_isn) { |
325 | NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS); | 325 | NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS); |
326 | } else { | 326 | } else if (abort) { |
327 | /* | 327 | /* |
328 | * Still in SYN_RECV, just remove it silently. | 328 | * Still in SYN_RECV, just remove it silently. |
329 | * There is no good way to pass the error to the newly | 329 | * There is no good way to pass the error to the newly |
@@ -383,7 +383,12 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info) | |||
383 | } | 383 | } |
384 | seq = ntohl(th->seq); | 384 | seq = ntohl(th->seq); |
385 | if (sk->sk_state == TCP_NEW_SYN_RECV) | 385 | if (sk->sk_state == TCP_NEW_SYN_RECV) |
386 | return tcp_req_err(sk, seq); | 386 | return tcp_req_err(sk, seq, |
387 | type == ICMP_PARAMETERPROB || | ||
388 | type == ICMP_TIME_EXCEEDED || | ||
389 | (type == ICMP_DEST_UNREACH && | ||
390 | (code == ICMP_NET_UNREACH || | ||
391 | code == ICMP_HOST_UNREACH))); | ||
387 | 392 | ||
388 | bh_lock_sock(sk); | 393 | bh_lock_sock(sk); |
389 | /* If too many ICMPs get dropped on busy | 394 | /* If too many ICMPs get dropped on busy |
@@ -1592,28 +1597,30 @@ process: | |||
1592 | 1597 | ||
1593 | if (sk->sk_state == TCP_NEW_SYN_RECV) { | 1598 | if (sk->sk_state == TCP_NEW_SYN_RECV) { |
1594 | struct request_sock *req = inet_reqsk(sk); | 1599 | struct request_sock *req = inet_reqsk(sk); |
1595 | struct sock *nsk = NULL; | 1600 | struct sock *nsk; |
1596 | 1601 | ||
1597 | sk = req->rsk_listener; | 1602 | sk = req->rsk_listener; |
1598 | if (tcp_v4_inbound_md5_hash(sk, skb)) | 1603 | if (unlikely(tcp_v4_inbound_md5_hash(sk, skb))) { |
1599 | goto discard_and_relse; | 1604 | reqsk_put(req); |
1600 | if (likely(sk->sk_state == TCP_LISTEN)) { | 1605 | goto discard_it; |
1601 | nsk = tcp_check_req(sk, skb, req, false); | 1606 | } |
1602 | } else { | 1607 | if (unlikely(sk->sk_state != TCP_LISTEN)) { |
1603 | inet_csk_reqsk_queue_drop_and_put(sk, req); | 1608 | inet_csk_reqsk_queue_drop_and_put(sk, req); |
1604 | goto lookup; | 1609 | goto lookup; |
1605 | } | 1610 | } |
1611 | sock_hold(sk); | ||
1612 | nsk = tcp_check_req(sk, skb, req, false); | ||
1606 | if (!nsk) { | 1613 | if (!nsk) { |
1607 | reqsk_put(req); | 1614 | reqsk_put(req); |
1608 | goto discard_it; | 1615 | goto discard_and_relse; |
1609 | } | 1616 | } |
1610 | if (nsk == sk) { | 1617 | if (nsk == sk) { |
1611 | sock_hold(sk); | ||
1612 | reqsk_put(req); | 1618 | reqsk_put(req); |
1613 | } else if (tcp_child_process(sk, nsk, skb)) { | 1619 | } else if (tcp_child_process(sk, nsk, skb)) { |
1614 | tcp_v4_send_reset(nsk, skb); | 1620 | tcp_v4_send_reset(nsk, skb); |
1615 | goto discard_it; | 1621 | goto discard_and_relse; |
1616 | } else { | 1622 | } else { |
1623 | sock_put(sk); | ||
1617 | return 0; | 1624 | return 0; |
1618 | } | 1625 | } |
1619 | } | 1626 | } |