diff options
Diffstat (limited to 'net/ipv4/tcp_ipv4.c')
| -rw-r--r-- | net/ipv4/tcp_ipv4.c | 39 |
1 files changed, 27 insertions, 12 deletions
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 65b8ebfd078a..f4df5f931f36 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c | |||
| @@ -370,6 +370,11 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info) | |||
| 370 | if (sk->sk_state == TCP_CLOSE) | 370 | if (sk->sk_state == TCP_CLOSE) |
| 371 | goto out; | 371 | goto out; |
| 372 | 372 | ||
| 373 | if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) { | ||
| 374 | NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP); | ||
| 375 | goto out; | ||
| 376 | } | ||
| 377 | |||
| 373 | icsk = inet_csk(sk); | 378 | icsk = inet_csk(sk); |
| 374 | tp = tcp_sk(sk); | 379 | tp = tcp_sk(sk); |
| 375 | seq = ntohl(th->seq); | 380 | seq = ntohl(th->seq); |
| @@ -742,9 +747,9 @@ static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb, | |||
| 742 | * This still operates on a request_sock only, not on a big | 747 | * This still operates on a request_sock only, not on a big |
| 743 | * socket. | 748 | * socket. |
| 744 | */ | 749 | */ |
| 745 | static int __tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst, | 750 | static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst, |
| 746 | struct request_sock *req, | 751 | struct request_sock *req, |
| 747 | struct request_values *rvp) | 752 | struct request_values *rvp) |
| 748 | { | 753 | { |
| 749 | const struct inet_request_sock *ireq = inet_rsk(req); | 754 | const struct inet_request_sock *ireq = inet_rsk(req); |
| 750 | int err = -1; | 755 | int err = -1; |
| @@ -775,10 +780,11 @@ static int __tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst, | |||
| 775 | return err; | 780 | return err; |
| 776 | } | 781 | } |
| 777 | 782 | ||
| 778 | static int tcp_v4_send_synack(struct sock *sk, struct request_sock *req, | 783 | static int tcp_v4_rtx_synack(struct sock *sk, struct request_sock *req, |
| 779 | struct request_values *rvp) | 784 | struct request_values *rvp) |
| 780 | { | 785 | { |
| 781 | return __tcp_v4_send_synack(sk, NULL, req, rvp); | 786 | TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS); |
| 787 | return tcp_v4_send_synack(sk, NULL, req, rvp); | ||
| 782 | } | 788 | } |
| 783 | 789 | ||
| 784 | /* | 790 | /* |
| @@ -1192,10 +1198,11 @@ static int tcp_v4_inbound_md5_hash(struct sock *sk, struct sk_buff *skb) | |||
| 1192 | struct request_sock_ops tcp_request_sock_ops __read_mostly = { | 1198 | struct request_sock_ops tcp_request_sock_ops __read_mostly = { |
| 1193 | .family = PF_INET, | 1199 | .family = PF_INET, |
| 1194 | .obj_size = sizeof(struct tcp_request_sock), | 1200 | .obj_size = sizeof(struct tcp_request_sock), |
| 1195 | .rtx_syn_ack = tcp_v4_send_synack, | 1201 | .rtx_syn_ack = tcp_v4_rtx_synack, |
| 1196 | .send_ack = tcp_v4_reqsk_send_ack, | 1202 | .send_ack = tcp_v4_reqsk_send_ack, |
| 1197 | .destructor = tcp_v4_reqsk_destructor, | 1203 | .destructor = tcp_v4_reqsk_destructor, |
| 1198 | .send_reset = tcp_v4_send_reset, | 1204 | .send_reset = tcp_v4_send_reset, |
| 1205 | .syn_ack_timeout = tcp_syn_ack_timeout, | ||
| 1199 | }; | 1206 | }; |
| 1200 | 1207 | ||
| 1201 | #ifdef CONFIG_TCP_MD5SIG | 1208 | #ifdef CONFIG_TCP_MD5SIG |
| @@ -1373,8 +1380,8 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) | |||
| 1373 | } | 1380 | } |
| 1374 | tcp_rsk(req)->snt_isn = isn; | 1381 | tcp_rsk(req)->snt_isn = isn; |
| 1375 | 1382 | ||
| 1376 | if (__tcp_v4_send_synack(sk, dst, req, | 1383 | if (tcp_v4_send_synack(sk, dst, req, |
| 1377 | (struct request_values *)&tmp_ext) || | 1384 | (struct request_values *)&tmp_ext) || |
| 1378 | want_cookie) | 1385 | want_cookie) |
| 1379 | goto drop_and_free; | 1386 | goto drop_and_free; |
| 1380 | 1387 | ||
| @@ -1653,6 +1660,11 @@ process: | |||
| 1653 | if (sk->sk_state == TCP_TIME_WAIT) | 1660 | if (sk->sk_state == TCP_TIME_WAIT) |
| 1654 | goto do_time_wait; | 1661 | goto do_time_wait; |
| 1655 | 1662 | ||
| 1663 | if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) { | ||
| 1664 | NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP); | ||
| 1665 | goto discard_and_relse; | ||
| 1666 | } | ||
| 1667 | |||
| 1656 | if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) | 1668 | if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) |
| 1657 | goto discard_and_relse; | 1669 | goto discard_and_relse; |
| 1658 | nf_reset(skb); | 1670 | nf_reset(skb); |
| @@ -1677,8 +1689,11 @@ process: | |||
| 1677 | if (!tcp_prequeue(sk, skb)) | 1689 | if (!tcp_prequeue(sk, skb)) |
| 1678 | ret = tcp_v4_do_rcv(sk, skb); | 1690 | ret = tcp_v4_do_rcv(sk, skb); |
| 1679 | } | 1691 | } |
| 1680 | } else | 1692 | } else if (unlikely(sk_add_backlog(sk, skb))) { |
| 1681 | sk_add_backlog(sk, skb); | 1693 | bh_unlock_sock(sk); |
| 1694 | NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP); | ||
| 1695 | goto discard_and_relse; | ||
| 1696 | } | ||
| 1682 | bh_unlock_sock(sk); | 1697 | bh_unlock_sock(sk); |
| 1683 | 1698 | ||
| 1684 | sock_put(sk); | 1699 | sock_put(sk); |
| @@ -2425,12 +2440,12 @@ static struct tcp_seq_afinfo tcp4_seq_afinfo = { | |||
| 2425 | }, | 2440 | }, |
| 2426 | }; | 2441 | }; |
| 2427 | 2442 | ||
| 2428 | static int tcp4_proc_init_net(struct net *net) | 2443 | static int __net_init tcp4_proc_init_net(struct net *net) |
| 2429 | { | 2444 | { |
| 2430 | return tcp_proc_register(net, &tcp4_seq_afinfo); | 2445 | return tcp_proc_register(net, &tcp4_seq_afinfo); |
| 2431 | } | 2446 | } |
| 2432 | 2447 | ||
| 2433 | static void tcp4_proc_exit_net(struct net *net) | 2448 | static void __net_exit tcp4_proc_exit_net(struct net *net) |
| 2434 | { | 2449 | { |
| 2435 | tcp_proc_unregister(net, &tcp4_seq_afinfo); | 2450 | tcp_proc_unregister(net, &tcp4_seq_afinfo); |
| 2436 | } | 2451 | } |
