aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp_ipv4.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/ipv4/tcp_ipv4.c')
-rw-r--r--net/ipv4/tcp_ipv4.c42
1 files changed, 24 insertions, 18 deletions
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 1ed230716d51..145d3bf8df86 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -369,11 +369,10 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
369 * We do take care of PMTU discovery (RFC1191) special case : 369 * We do take care of PMTU discovery (RFC1191) special case :
370 * we can receive locally generated ICMP messages while socket is held. 370 * we can receive locally generated ICMP messages while socket is held.
371 */ 371 */
372 if (sock_owned_by_user(sk) && 372 if (sock_owned_by_user(sk)) {
373 type != ICMP_DEST_UNREACH && 373 if (!(type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED))
374 code != ICMP_FRAG_NEEDED) 374 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
375 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS); 375 }
376
377 if (sk->sk_state == TCP_CLOSE) 376 if (sk->sk_state == TCP_CLOSE)
378 goto out; 377 goto out;
379 378
@@ -497,6 +496,7 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
497 * errors returned from accept(). 496 * errors returned from accept().
498 */ 497 */
499 inet_csk_reqsk_queue_drop(sk, req, prev); 498 inet_csk_reqsk_queue_drop(sk, req, prev);
499 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
500 goto out; 500 goto out;
501 501
502 case TCP_SYN_SENT: 502 case TCP_SYN_SENT:
@@ -657,7 +657,8 @@ static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
657 * no RST generated if md5 hash doesn't match. 657 * no RST generated if md5 hash doesn't match.
658 */ 658 */
659 sk1 = __inet_lookup_listener(dev_net(skb_dst(skb)->dev), 659 sk1 = __inet_lookup_listener(dev_net(skb_dst(skb)->dev),
660 &tcp_hashinfo, ip_hdr(skb)->daddr, 660 &tcp_hashinfo, ip_hdr(skb)->saddr,
661 th->source, ip_hdr(skb)->daddr,
661 ntohs(th->source), inet_iif(skb)); 662 ntohs(th->source), inet_iif(skb));
662 /* don't send rst if it can't find key */ 663 /* don't send rst if it can't find key */
663 if (!sk1) 664 if (!sk1)
@@ -725,7 +726,7 @@ release_sk1:
725 */ 726 */
726 727
727static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack, 728static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
728 u32 win, u32 ts, int oif, 729 u32 win, u32 tsval, u32 tsecr, int oif,
729 struct tcp_md5sig_key *key, 730 struct tcp_md5sig_key *key,
730 int reply_flags, u8 tos) 731 int reply_flags, u8 tos)
731{ 732{
@@ -746,12 +747,12 @@ static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
746 747
747 arg.iov[0].iov_base = (unsigned char *)&rep; 748 arg.iov[0].iov_base = (unsigned char *)&rep;
748 arg.iov[0].iov_len = sizeof(rep.th); 749 arg.iov[0].iov_len = sizeof(rep.th);
749 if (ts) { 750 if (tsecr) {
750 rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | 751 rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
751 (TCPOPT_TIMESTAMP << 8) | 752 (TCPOPT_TIMESTAMP << 8) |
752 TCPOLEN_TIMESTAMP); 753 TCPOLEN_TIMESTAMP);
753 rep.opt[1] = htonl(tcp_time_stamp); 754 rep.opt[1] = htonl(tsval);
754 rep.opt[2] = htonl(ts); 755 rep.opt[2] = htonl(tsecr);
755 arg.iov[0].iov_len += TCPOLEN_TSTAMP_ALIGNED; 756 arg.iov[0].iov_len += TCPOLEN_TSTAMP_ALIGNED;
756 } 757 }
757 758
@@ -766,7 +767,7 @@ static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
766 767
767#ifdef CONFIG_TCP_MD5SIG 768#ifdef CONFIG_TCP_MD5SIG
768 if (key) { 769 if (key) {
769 int offset = (ts) ? 3 : 0; 770 int offset = (tsecr) ? 3 : 0;
770 771
771 rep.opt[offset++] = htonl((TCPOPT_NOP << 24) | 772 rep.opt[offset++] = htonl((TCPOPT_NOP << 24) |
772 (TCPOPT_NOP << 16) | 773 (TCPOPT_NOP << 16) |
@@ -801,6 +802,7 @@ static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
801 802
802 tcp_v4_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt, 803 tcp_v4_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
803 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale, 804 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
805 tcp_time_stamp + tcptw->tw_ts_offset,
804 tcptw->tw_ts_recent, 806 tcptw->tw_ts_recent,
805 tw->tw_bound_dev_if, 807 tw->tw_bound_dev_if,
806 tcp_twsk_md5_key(tcptw), 808 tcp_twsk_md5_key(tcptw),
@@ -820,6 +822,7 @@ static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
820 tcp_v4_send_ack(skb, (sk->sk_state == TCP_LISTEN) ? 822 tcp_v4_send_ack(skb, (sk->sk_state == TCP_LISTEN) ?
821 tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt, 823 tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
822 tcp_rsk(req)->rcv_nxt, req->rcv_wnd, 824 tcp_rsk(req)->rcv_nxt, req->rcv_wnd,
825 tcp_time_stamp,
823 req->ts_recent, 826 req->ts_recent,
824 0, 827 0,
825 tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->daddr, 828 tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->daddr,
@@ -1501,8 +1504,10 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1501 * clogging syn queue with openreqs with exponentially increasing 1504 * clogging syn queue with openreqs with exponentially increasing
1502 * timeout. 1505 * timeout.
1503 */ 1506 */
1504 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) 1507 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) {
1508 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1505 goto drop; 1509 goto drop;
1510 }
1506 1511
1507 req = inet_reqsk_alloc(&tcp_request_sock_ops); 1512 req = inet_reqsk_alloc(&tcp_request_sock_ops);
1508 if (!req) 1513 if (!req)
@@ -1568,7 +1573,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1568 goto drop_and_free; 1573 goto drop_and_free;
1569 1574
1570 if (!want_cookie || tmp_opt.tstamp_ok) 1575 if (!want_cookie || tmp_opt.tstamp_ok)
1571 TCP_ECN_create_request(req, skb); 1576 TCP_ECN_create_request(req, skb, sock_net(sk));
1572 1577
1573 if (want_cookie) { 1578 if (want_cookie) {
1574 isn = cookie_v4_init_sequence(sk, skb, &req->mss); 1579 isn = cookie_v4_init_sequence(sk, skb, &req->mss);
@@ -1667,6 +1672,7 @@ drop_and_release:
1667drop_and_free: 1672drop_and_free:
1668 reqsk_free(req); 1673 reqsk_free(req);
1669drop: 1674drop:
1675 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1670 return 0; 1676 return 0;
1671} 1677}
1672EXPORT_SYMBOL(tcp_v4_conn_request); 1678EXPORT_SYMBOL(tcp_v4_conn_request);
@@ -1767,10 +1773,8 @@ exit:
1767 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS); 1773 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1768 return NULL; 1774 return NULL;
1769put_and_exit: 1775put_and_exit:
1770 tcp_clear_xmit_timers(newsk); 1776 inet_csk_prepare_forced_close(newsk);
1771 tcp_cleanup_congestion_control(newsk); 1777 tcp_done(newsk);
1772 bh_unlock_sock(newsk);
1773 sock_put(newsk);
1774 goto exit; 1778 goto exit;
1775} 1779}
1776EXPORT_SYMBOL(tcp_v4_syn_recv_sock); 1780EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
@@ -2076,6 +2080,7 @@ do_time_wait:
2076 case TCP_TW_SYN: { 2080 case TCP_TW_SYN: {
2077 struct sock *sk2 = inet_lookup_listener(dev_net(skb->dev), 2081 struct sock *sk2 = inet_lookup_listener(dev_net(skb->dev),
2078 &tcp_hashinfo, 2082 &tcp_hashinfo,
2083 iph->saddr, th->source,
2079 iph->daddr, th->dest, 2084 iph->daddr, th->dest,
2080 inet_iif(skb)); 2085 inet_iif(skb));
2081 if (sk2) { 2086 if (sk2) {
@@ -2611,7 +2616,7 @@ EXPORT_SYMBOL(tcp_proc_register);
2611 2616
2612void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo) 2617void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo)
2613{ 2618{
2614 proc_net_remove(net, afinfo->name); 2619 remove_proc_entry(afinfo->name, net->proc_net);
2615} 2620}
2616EXPORT_SYMBOL(tcp_proc_unregister); 2621EXPORT_SYMBOL(tcp_proc_unregister);
2617 2622
@@ -2890,6 +2895,7 @@ EXPORT_SYMBOL(tcp_prot);
2890 2895
2891static int __net_init tcp_sk_init(struct net *net) 2896static int __net_init tcp_sk_init(struct net *net)
2892{ 2897{
2898 net->ipv4.sysctl_tcp_ecn = 2;
2893 return 0; 2899 return 0;
2894} 2900}
2895 2901