diff options
author | Patrick McHardy <kaber@trash.net> | 2011-01-14 08:12:37 -0500 |
---|---|---|
committer | Patrick McHardy <kaber@trash.net> | 2011-01-14 08:12:37 -0500 |
commit | 0134e89c7bcc9fde1da962c82a120691e185619f (patch) | |
tree | 3e03335cf001019a2687d161e956de4f73379984 /net/ipv4/tcp_ipv4.c | |
parent | c7066f70d9610df0b9406cc635fc09e86136e714 (diff) | |
parent | 6faee60a4e82075853a437831768cc9e2e563e4e (diff) |
Merge branch 'master' of git://1984.lsi.us.es/net-next-2.6
Conflicts:
net/ipv4/route.c
Signed-off-by: Patrick McHardy <kaber@trash.net>
Diffstat (limited to 'net/ipv4/tcp_ipv4.c')
-rw-r--r-- | net/ipv4/tcp_ipv4.c | 90 |
1 files changed, 30 insertions, 60 deletions
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 8f8527d41682..856f68466d49 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c | |||
@@ -415,6 +415,9 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info) | |||
415 | !icsk->icsk_backoff) | 415 | !icsk->icsk_backoff) |
416 | break; | 416 | break; |
417 | 417 | ||
418 | if (sock_owned_by_user(sk)) | ||
419 | break; | ||
420 | |||
418 | icsk->icsk_backoff--; | 421 | icsk->icsk_backoff--; |
419 | inet_csk(sk)->icsk_rto = __tcp_set_rto(tp) << | 422 | inet_csk(sk)->icsk_rto = __tcp_set_rto(tp) << |
420 | icsk->icsk_backoff; | 423 | icsk->icsk_backoff; |
@@ -429,11 +432,6 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info) | |||
429 | if (remaining) { | 432 | if (remaining) { |
430 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, | 433 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, |
431 | remaining, TCP_RTO_MAX); | 434 | remaining, TCP_RTO_MAX); |
432 | } else if (sock_owned_by_user(sk)) { | ||
433 | /* RTO revert clocked out retransmission, | ||
434 | * but socket is locked. Will defer. */ | ||
435 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, | ||
436 | HZ/20, TCP_RTO_MAX); | ||
437 | } else { | 435 | } else { |
438 | /* RTO revert clocked out retransmission. | 436 | /* RTO revert clocked out retransmission. |
439 | * Will retransmit now */ | 437 | * Will retransmit now */ |
@@ -1212,12 +1210,6 @@ static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = { | |||
1212 | }; | 1210 | }; |
1213 | #endif | 1211 | #endif |
1214 | 1212 | ||
1215 | static struct timewait_sock_ops tcp_timewait_sock_ops = { | ||
1216 | .twsk_obj_size = sizeof(struct tcp_timewait_sock), | ||
1217 | .twsk_unique = tcp_twsk_unique, | ||
1218 | .twsk_destructor= tcp_twsk_destructor, | ||
1219 | }; | ||
1220 | |||
1221 | int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) | 1213 | int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) |
1222 | { | 1214 | { |
1223 | struct tcp_extend_values tmp_ext; | 1215 | struct tcp_extend_values tmp_ext; |
@@ -1349,7 +1341,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) | |||
1349 | tcp_death_row.sysctl_tw_recycle && | 1341 | tcp_death_row.sysctl_tw_recycle && |
1350 | (dst = inet_csk_route_req(sk, req)) != NULL && | 1342 | (dst = inet_csk_route_req(sk, req)) != NULL && |
1351 | (peer = rt_get_peer((struct rtable *)dst)) != NULL && | 1343 | (peer = rt_get_peer((struct rtable *)dst)) != NULL && |
1352 | peer->v4daddr == saddr) { | 1344 | peer->daddr.a4 == saddr) { |
1353 | inet_peer_refcheck(peer); | 1345 | inet_peer_refcheck(peer); |
1354 | if ((u32)get_seconds() - peer->tcp_ts_stamp < TCP_PAWS_MSL && | 1346 | if ((u32)get_seconds() - peer->tcp_ts_stamp < TCP_PAWS_MSL && |
1355 | (s32)(peer->tcp_ts - req->ts_recent) > | 1347 | (s32)(peer->tcp_ts - req->ts_recent) > |
@@ -1444,7 +1436,7 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb, | |||
1444 | 1436 | ||
1445 | tcp_mtup_init(newsk); | 1437 | tcp_mtup_init(newsk); |
1446 | tcp_sync_mss(newsk, dst_mtu(dst)); | 1438 | tcp_sync_mss(newsk, dst_mtu(dst)); |
1447 | newtp->advmss = dst_metric(dst, RTAX_ADVMSS); | 1439 | newtp->advmss = dst_metric_advmss(dst); |
1448 | if (tcp_sk(sk)->rx_opt.user_mss && | 1440 | if (tcp_sk(sk)->rx_opt.user_mss && |
1449 | tcp_sk(sk)->rx_opt.user_mss < newtp->advmss) | 1441 | tcp_sk(sk)->rx_opt.user_mss < newtp->advmss) |
1450 | newtp->advmss = tcp_sk(sk)->rx_opt.user_mss; | 1442 | newtp->advmss = tcp_sk(sk)->rx_opt.user_mss; |
@@ -1765,64 +1757,40 @@ do_time_wait: | |||
1765 | goto discard_it; | 1757 | goto discard_it; |
1766 | } | 1758 | } |
1767 | 1759 | ||
1768 | /* VJ's idea. Save last timestamp seen from this destination | 1760 | struct inet_peer *tcp_v4_get_peer(struct sock *sk, bool *release_it) |
1769 | * and hold it at least for normal timewait interval to use for duplicate | ||
1770 | * segment detection in subsequent connections, before they enter synchronized | ||
1771 | * state. | ||
1772 | */ | ||
1773 | |||
1774 | int tcp_v4_remember_stamp(struct sock *sk) | ||
1775 | { | 1761 | { |
1762 | struct rtable *rt = (struct rtable *) __sk_dst_get(sk); | ||
1776 | struct inet_sock *inet = inet_sk(sk); | 1763 | struct inet_sock *inet = inet_sk(sk); |
1777 | struct tcp_sock *tp = tcp_sk(sk); | 1764 | struct inet_peer *peer; |
1778 | struct rtable *rt = (struct rtable *)__sk_dst_get(sk); | ||
1779 | struct inet_peer *peer = NULL; | ||
1780 | int release_it = 0; | ||
1781 | 1765 | ||
1782 | if (!rt || rt->rt_dst != inet->inet_daddr) { | 1766 | if (!rt || rt->rt_dst != inet->inet_daddr) { |
1783 | peer = inet_getpeer(inet->inet_daddr, 1); | 1767 | peer = inet_getpeer_v4(inet->inet_daddr, 1); |
1784 | release_it = 1; | 1768 | *release_it = true; |
1785 | } else { | 1769 | } else { |
1786 | if (!rt->peer) | 1770 | if (!rt->peer) |
1787 | rt_bind_peer(rt, 1); | 1771 | rt_bind_peer(rt, 1); |
1788 | peer = rt->peer; | 1772 | peer = rt->peer; |
1773 | *release_it = false; | ||
1789 | } | 1774 | } |
1790 | 1775 | ||
1791 | if (peer) { | 1776 | return peer; |
1792 | if ((s32)(peer->tcp_ts - tp->rx_opt.ts_recent) <= 0 || | ||
1793 | ((u32)get_seconds() - peer->tcp_ts_stamp > TCP_PAWS_MSL && | ||
1794 | peer->tcp_ts_stamp <= (u32)tp->rx_opt.ts_recent_stamp)) { | ||
1795 | peer->tcp_ts_stamp = (u32)tp->rx_opt.ts_recent_stamp; | ||
1796 | peer->tcp_ts = tp->rx_opt.ts_recent; | ||
1797 | } | ||
1798 | if (release_it) | ||
1799 | inet_putpeer(peer); | ||
1800 | return 1; | ||
1801 | } | ||
1802 | |||
1803 | return 0; | ||
1804 | } | 1777 | } |
1805 | EXPORT_SYMBOL(tcp_v4_remember_stamp); | 1778 | EXPORT_SYMBOL(tcp_v4_get_peer); |
1806 | 1779 | ||
1807 | int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw) | 1780 | void *tcp_v4_tw_get_peer(struct sock *sk) |
1808 | { | 1781 | { |
1809 | struct inet_peer *peer = inet_getpeer(tw->tw_daddr, 1); | 1782 | struct inet_timewait_sock *tw = inet_twsk(sk); |
1810 | |||
1811 | if (peer) { | ||
1812 | const struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw); | ||
1813 | |||
1814 | if ((s32)(peer->tcp_ts - tcptw->tw_ts_recent) <= 0 || | ||
1815 | ((u32)get_seconds() - peer->tcp_ts_stamp > TCP_PAWS_MSL && | ||
1816 | peer->tcp_ts_stamp <= (u32)tcptw->tw_ts_recent_stamp)) { | ||
1817 | peer->tcp_ts_stamp = (u32)tcptw->tw_ts_recent_stamp; | ||
1818 | peer->tcp_ts = tcptw->tw_ts_recent; | ||
1819 | } | ||
1820 | inet_putpeer(peer); | ||
1821 | return 1; | ||
1822 | } | ||
1823 | 1783 | ||
1824 | return 0; | 1784 | return inet_getpeer_v4(tw->tw_daddr, 1); |
1825 | } | 1785 | } |
1786 | EXPORT_SYMBOL(tcp_v4_tw_get_peer); | ||
1787 | |||
1788 | static struct timewait_sock_ops tcp_timewait_sock_ops = { | ||
1789 | .twsk_obj_size = sizeof(struct tcp_timewait_sock), | ||
1790 | .twsk_unique = tcp_twsk_unique, | ||
1791 | .twsk_destructor= tcp_twsk_destructor, | ||
1792 | .twsk_getpeer = tcp_v4_tw_get_peer, | ||
1793 | }; | ||
1826 | 1794 | ||
1827 | const struct inet_connection_sock_af_ops ipv4_specific = { | 1795 | const struct inet_connection_sock_af_ops ipv4_specific = { |
1828 | .queue_xmit = ip_queue_xmit, | 1796 | .queue_xmit = ip_queue_xmit, |
@@ -1830,7 +1798,7 @@ const struct inet_connection_sock_af_ops ipv4_specific = { | |||
1830 | .rebuild_header = inet_sk_rebuild_header, | 1798 | .rebuild_header = inet_sk_rebuild_header, |
1831 | .conn_request = tcp_v4_conn_request, | 1799 | .conn_request = tcp_v4_conn_request, |
1832 | .syn_recv_sock = tcp_v4_syn_recv_sock, | 1800 | .syn_recv_sock = tcp_v4_syn_recv_sock, |
1833 | .remember_stamp = tcp_v4_remember_stamp, | 1801 | .get_peer = tcp_v4_get_peer, |
1834 | .net_header_len = sizeof(struct iphdr), | 1802 | .net_header_len = sizeof(struct iphdr), |
1835 | .setsockopt = ip_setsockopt, | 1803 | .setsockopt = ip_setsockopt, |
1836 | .getsockopt = ip_getsockopt, | 1804 | .getsockopt = ip_getsockopt, |
@@ -2032,7 +2000,7 @@ static void *listening_get_next(struct seq_file *seq, void *cur) | |||
2032 | get_req: | 2000 | get_req: |
2033 | req = icsk->icsk_accept_queue.listen_opt->syn_table[st->sbucket]; | 2001 | req = icsk->icsk_accept_queue.listen_opt->syn_table[st->sbucket]; |
2034 | } | 2002 | } |
2035 | sk = sk_next(st->syn_wait_sk); | 2003 | sk = sk_nulls_next(st->syn_wait_sk); |
2036 | st->state = TCP_SEQ_STATE_LISTENING; | 2004 | st->state = TCP_SEQ_STATE_LISTENING; |
2037 | read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock); | 2005 | read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock); |
2038 | } else { | 2006 | } else { |
@@ -2041,11 +2009,13 @@ get_req: | |||
2041 | if (reqsk_queue_len(&icsk->icsk_accept_queue)) | 2009 | if (reqsk_queue_len(&icsk->icsk_accept_queue)) |
2042 | goto start_req; | 2010 | goto start_req; |
2043 | read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock); | 2011 | read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock); |
2044 | sk = sk_next(sk); | 2012 | sk = sk_nulls_next(sk); |
2045 | } | 2013 | } |
2046 | get_sk: | 2014 | get_sk: |
2047 | sk_nulls_for_each_from(sk, node) { | 2015 | sk_nulls_for_each_from(sk, node) { |
2048 | if (sk->sk_family == st->family && net_eq(sock_net(sk), net)) { | 2016 | if (!net_eq(sock_net(sk), net)) |
2017 | continue; | ||
2018 | if (sk->sk_family == st->family) { | ||
2049 | cur = sk; | 2019 | cur = sk; |
2050 | goto out; | 2020 | goto out; |
2051 | } | 2021 | } |