diff options
author | Arnaldo Carvalho de Melo <acme@ghostprotocols.net> | 2005-08-09 23:10:42 -0400 |
---|---|---|
committer | David S. Miller <davem@sunset.davemloft.net> | 2005-08-29 18:43:19 -0400 |
commit | 463c84b97f24010a67cd871746d6a7e4c925a5f9 (patch) | |
tree | 48df67ede4ebb5d12b3c0ae55d72531574bd51a6 /net/ipv4/tcp.c | |
parent | 87d11ceb9deb7a3f13fdee6e89d9bb6be7d27a71 (diff) |
[NET]: Introduce inet_connection_sock
This creates struct inet_connection_sock, moving members out of struct
tcp_sock that are shareable with other INET connection oriented
protocols, such as DCCP, that in my private tree already uses most of
these members.
The functions that operate on these members were renamed, using a
inet_csk_ prefix while not being moved yet to a new file, so as to
ease the review of these changes.
Signed-off-by: Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp.c')
-rw-r--r-- | net/ipv4/tcp.c | 90 |
1 files changed, 47 insertions, 43 deletions
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index f1a708bf7a97..8177b86570db 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c | |||
@@ -313,7 +313,7 @@ EXPORT_SYMBOL(tcp_enter_memory_pressure); | |||
313 | static __inline__ unsigned int tcp_listen_poll(struct sock *sk, | 313 | static __inline__ unsigned int tcp_listen_poll(struct sock *sk, |
314 | poll_table *wait) | 314 | poll_table *wait) |
315 | { | 315 | { |
316 | return !reqsk_queue_empty(&tcp_sk(sk)->accept_queue) ? (POLLIN | POLLRDNORM) : 0; | 316 | return !reqsk_queue_empty(&inet_csk(sk)->icsk_accept_queue) ? (POLLIN | POLLRDNORM) : 0; |
317 | } | 317 | } |
318 | 318 | ||
319 | /* | 319 | /* |
@@ -458,15 +458,15 @@ int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg) | |||
458 | int tcp_listen_start(struct sock *sk) | 458 | int tcp_listen_start(struct sock *sk) |
459 | { | 459 | { |
460 | struct inet_sock *inet = inet_sk(sk); | 460 | struct inet_sock *inet = inet_sk(sk); |
461 | struct tcp_sock *tp = tcp_sk(sk); | 461 | struct inet_connection_sock *icsk = inet_csk(sk); |
462 | int rc = reqsk_queue_alloc(&tp->accept_queue, TCP_SYNQ_HSIZE); | 462 | int rc = reqsk_queue_alloc(&icsk->icsk_accept_queue, TCP_SYNQ_HSIZE); |
463 | 463 | ||
464 | if (rc != 0) | 464 | if (rc != 0) |
465 | return rc; | 465 | return rc; |
466 | 466 | ||
467 | sk->sk_max_ack_backlog = 0; | 467 | sk->sk_max_ack_backlog = 0; |
468 | sk->sk_ack_backlog = 0; | 468 | sk->sk_ack_backlog = 0; |
469 | tcp_delack_init(tp); | 469 | inet_csk_delack_init(sk); |
470 | 470 | ||
471 | /* There is race window here: we announce ourselves listening, | 471 | /* There is race window here: we announce ourselves listening, |
472 | * but this transition is still not validated by get_port(). | 472 | * but this transition is still not validated by get_port(). |
@@ -484,7 +484,7 @@ int tcp_listen_start(struct sock *sk) | |||
484 | } | 484 | } |
485 | 485 | ||
486 | sk->sk_state = TCP_CLOSE; | 486 | sk->sk_state = TCP_CLOSE; |
487 | __reqsk_queue_destroy(&tp->accept_queue); | 487 | __reqsk_queue_destroy(&icsk->icsk_accept_queue); |
488 | return -EADDRINUSE; | 488 | return -EADDRINUSE; |
489 | } | 489 | } |
490 | 490 | ||
@@ -495,14 +495,14 @@ int tcp_listen_start(struct sock *sk) | |||
495 | 495 | ||
496 | static void tcp_listen_stop (struct sock *sk) | 496 | static void tcp_listen_stop (struct sock *sk) |
497 | { | 497 | { |
498 | struct tcp_sock *tp = tcp_sk(sk); | 498 | struct inet_connection_sock *icsk = inet_csk(sk); |
499 | struct request_sock *acc_req; | 499 | struct request_sock *acc_req; |
500 | struct request_sock *req; | 500 | struct request_sock *req; |
501 | 501 | ||
502 | tcp_delete_keepalive_timer(sk); | 502 | inet_csk_delete_keepalive_timer(sk); |
503 | 503 | ||
504 | /* make all the listen_opt local to us */ | 504 | /* make all the listen_opt local to us */ |
505 | acc_req = reqsk_queue_yank_acceptq(&tp->accept_queue); | 505 | acc_req = reqsk_queue_yank_acceptq(&icsk->icsk_accept_queue); |
506 | 506 | ||
507 | /* Following specs, it would be better either to send FIN | 507 | /* Following specs, it would be better either to send FIN |
508 | * (and enter FIN-WAIT-1, it is normal close) | 508 | * (and enter FIN-WAIT-1, it is normal close) |
@@ -512,7 +512,7 @@ static void tcp_listen_stop (struct sock *sk) | |||
512 | * To be honest, we are not able to make either | 512 | * To be honest, we are not able to make either |
513 | * of the variants now. --ANK | 513 | * of the variants now. --ANK |
514 | */ | 514 | */ |
515 | reqsk_queue_destroy(&tp->accept_queue); | 515 | reqsk_queue_destroy(&icsk->icsk_accept_queue); |
516 | 516 | ||
517 | while ((req = acc_req) != NULL) { | 517 | while ((req = acc_req) != NULL) { |
518 | struct sock *child = req->sk; | 518 | struct sock *child = req->sk; |
@@ -1039,20 +1039,21 @@ static void cleanup_rbuf(struct sock *sk, int copied) | |||
1039 | BUG_TRAP(!skb || before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq)); | 1039 | BUG_TRAP(!skb || before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq)); |
1040 | #endif | 1040 | #endif |
1041 | 1041 | ||
1042 | if (tcp_ack_scheduled(tp)) { | 1042 | if (inet_csk_ack_scheduled(sk)) { |
1043 | const struct inet_connection_sock *icsk = inet_csk(sk); | ||
1043 | /* Delayed ACKs frequently hit locked sockets during bulk | 1044 | /* Delayed ACKs frequently hit locked sockets during bulk |
1044 | * receive. */ | 1045 | * receive. */ |
1045 | if (tp->ack.blocked || | 1046 | if (icsk->icsk_ack.blocked || |
1046 | /* Once-per-two-segments ACK was not sent by tcp_input.c */ | 1047 | /* Once-per-two-segments ACK was not sent by tcp_input.c */ |
1047 | tp->rcv_nxt - tp->rcv_wup > tp->ack.rcv_mss || | 1048 | tp->rcv_nxt - tp->rcv_wup > icsk->icsk_ack.rcv_mss || |
1048 | /* | 1049 | /* |
1049 | * If this read emptied read buffer, we send ACK, if | 1050 | * If this read emptied read buffer, we send ACK, if |
1050 | * connection is not bidirectional, user drained | 1051 | * connection is not bidirectional, user drained |
1051 | * receive buffer and there was a small segment | 1052 | * receive buffer and there was a small segment |
1052 | * in queue. | 1053 | * in queue. |
1053 | */ | 1054 | */ |
1054 | (copied > 0 && (tp->ack.pending & TCP_ACK_PUSHED) && | 1055 | (copied > 0 && (icsk->icsk_ack.pending & ICSK_ACK_PUSHED) && |
1055 | !tp->ack.pingpong && !atomic_read(&sk->sk_rmem_alloc))) | 1056 | !icsk->icsk_ack.pingpong && !atomic_read(&sk->sk_rmem_alloc))) |
1056 | time_to_ack = 1; | 1057 | time_to_ack = 1; |
1057 | } | 1058 | } |
1058 | 1059 | ||
@@ -1569,7 +1570,7 @@ void tcp_destroy_sock(struct sock *sk) | |||
1569 | BUG_TRAP(sk_unhashed(sk)); | 1570 | BUG_TRAP(sk_unhashed(sk)); |
1570 | 1571 | ||
1571 | /* If it has not 0 inet_sk(sk)->num, it must be bound */ | 1572 | /* If it has not 0 inet_sk(sk)->num, it must be bound */ |
1572 | BUG_TRAP(!inet_sk(sk)->num || inet_sk(sk)->bind_hash); | 1573 | BUG_TRAP(!inet_sk(sk)->num || inet_csk(sk)->icsk_bind_hash); |
1573 | 1574 | ||
1574 | sk->sk_prot->destroy(sk); | 1575 | sk->sk_prot->destroy(sk); |
1575 | 1576 | ||
@@ -1698,10 +1699,10 @@ adjudge_to_death: | |||
1698 | tcp_send_active_reset(sk, GFP_ATOMIC); | 1699 | tcp_send_active_reset(sk, GFP_ATOMIC); |
1699 | NET_INC_STATS_BH(LINUX_MIB_TCPABORTONLINGER); | 1700 | NET_INC_STATS_BH(LINUX_MIB_TCPABORTONLINGER); |
1700 | } else { | 1701 | } else { |
1701 | int tmo = tcp_fin_time(tp); | 1702 | const int tmo = tcp_fin_time(sk); |
1702 | 1703 | ||
1703 | if (tmo > TCP_TIMEWAIT_LEN) { | 1704 | if (tmo > TCP_TIMEWAIT_LEN) { |
1704 | tcp_reset_keepalive_timer(sk, tcp_fin_time(tp)); | 1705 | inet_csk_reset_keepalive_timer(sk, tcp_fin_time(sk)); |
1705 | } else { | 1706 | } else { |
1706 | atomic_inc(&tcp_orphan_count); | 1707 | atomic_inc(&tcp_orphan_count); |
1707 | tcp_time_wait(sk, TCP_FIN_WAIT2, tmo); | 1708 | tcp_time_wait(sk, TCP_FIN_WAIT2, tmo); |
@@ -1746,6 +1747,7 @@ static inline int tcp_need_reset(int state) | |||
1746 | int tcp_disconnect(struct sock *sk, int flags) | 1747 | int tcp_disconnect(struct sock *sk, int flags) |
1747 | { | 1748 | { |
1748 | struct inet_sock *inet = inet_sk(sk); | 1749 | struct inet_sock *inet = inet_sk(sk); |
1750 | struct inet_connection_sock *icsk = inet_csk(sk); | ||
1749 | struct tcp_sock *tp = tcp_sk(sk); | 1751 | struct tcp_sock *tp = tcp_sk(sk); |
1750 | int err = 0; | 1752 | int err = 0; |
1751 | int old_state = sk->sk_state; | 1753 | int old_state = sk->sk_state; |
@@ -1782,7 +1784,7 @@ int tcp_disconnect(struct sock *sk, int flags) | |||
1782 | tp->srtt = 0; | 1784 | tp->srtt = 0; |
1783 | if ((tp->write_seq += tp->max_window + 2) == 0) | 1785 | if ((tp->write_seq += tp->max_window + 2) == 0) |
1784 | tp->write_seq = 1; | 1786 | tp->write_seq = 1; |
1785 | tp->backoff = 0; | 1787 | icsk->icsk_backoff = 0; |
1786 | tp->snd_cwnd = 2; | 1788 | tp->snd_cwnd = 2; |
1787 | tp->probes_out = 0; | 1789 | tp->probes_out = 0; |
1788 | tp->packets_out = 0; | 1790 | tp->packets_out = 0; |
@@ -1790,13 +1792,13 @@ int tcp_disconnect(struct sock *sk, int flags) | |||
1790 | tp->snd_cwnd_cnt = 0; | 1792 | tp->snd_cwnd_cnt = 0; |
1791 | tcp_set_ca_state(tp, TCP_CA_Open); | 1793 | tcp_set_ca_state(tp, TCP_CA_Open); |
1792 | tcp_clear_retrans(tp); | 1794 | tcp_clear_retrans(tp); |
1793 | tcp_delack_init(tp); | 1795 | inet_csk_delack_init(sk); |
1794 | sk->sk_send_head = NULL; | 1796 | sk->sk_send_head = NULL; |
1795 | tp->rx_opt.saw_tstamp = 0; | 1797 | tp->rx_opt.saw_tstamp = 0; |
1796 | tcp_sack_reset(&tp->rx_opt); | 1798 | tcp_sack_reset(&tp->rx_opt); |
1797 | __sk_dst_reset(sk); | 1799 | __sk_dst_reset(sk); |
1798 | 1800 | ||
1799 | BUG_TRAP(!inet->num || inet->bind_hash); | 1801 | BUG_TRAP(!inet->num || icsk->icsk_bind_hash); |
1800 | 1802 | ||
1801 | sk->sk_error_report(sk); | 1803 | sk->sk_error_report(sk); |
1802 | return err; | 1804 | return err; |
@@ -1808,7 +1810,7 @@ int tcp_disconnect(struct sock *sk, int flags) | |||
1808 | */ | 1810 | */ |
1809 | static int wait_for_connect(struct sock *sk, long timeo) | 1811 | static int wait_for_connect(struct sock *sk, long timeo) |
1810 | { | 1812 | { |
1811 | struct tcp_sock *tp = tcp_sk(sk); | 1813 | struct inet_connection_sock *icsk = inet_csk(sk); |
1812 | DEFINE_WAIT(wait); | 1814 | DEFINE_WAIT(wait); |
1813 | int err; | 1815 | int err; |
1814 | 1816 | ||
@@ -1830,11 +1832,11 @@ static int wait_for_connect(struct sock *sk, long timeo) | |||
1830 | prepare_to_wait_exclusive(sk->sk_sleep, &wait, | 1832 | prepare_to_wait_exclusive(sk->sk_sleep, &wait, |
1831 | TASK_INTERRUPTIBLE); | 1833 | TASK_INTERRUPTIBLE); |
1832 | release_sock(sk); | 1834 | release_sock(sk); |
1833 | if (reqsk_queue_empty(&tp->accept_queue)) | 1835 | if (reqsk_queue_empty(&icsk->icsk_accept_queue)) |
1834 | timeo = schedule_timeout(timeo); | 1836 | timeo = schedule_timeout(timeo); |
1835 | lock_sock(sk); | 1837 | lock_sock(sk); |
1836 | err = 0; | 1838 | err = 0; |
1837 | if (!reqsk_queue_empty(&tp->accept_queue)) | 1839 | if (!reqsk_queue_empty(&icsk->icsk_accept_queue)) |
1838 | break; | 1840 | break; |
1839 | err = -EINVAL; | 1841 | err = -EINVAL; |
1840 | if (sk->sk_state != TCP_LISTEN) | 1842 | if (sk->sk_state != TCP_LISTEN) |
@@ -1854,9 +1856,9 @@ static int wait_for_connect(struct sock *sk, long timeo) | |||
1854 | * This will accept the next outstanding connection. | 1856 | * This will accept the next outstanding connection. |
1855 | */ | 1857 | */ |
1856 | 1858 | ||
1857 | struct sock *tcp_accept(struct sock *sk, int flags, int *err) | 1859 | struct sock *inet_csk_accept(struct sock *sk, int flags, int *err) |
1858 | { | 1860 | { |
1859 | struct tcp_sock *tp = tcp_sk(sk); | 1861 | struct inet_connection_sock *icsk = inet_csk(sk); |
1860 | struct sock *newsk; | 1862 | struct sock *newsk; |
1861 | int error; | 1863 | int error; |
1862 | 1864 | ||
@@ -1870,7 +1872,7 @@ struct sock *tcp_accept(struct sock *sk, int flags, int *err) | |||
1870 | goto out_err; | 1872 | goto out_err; |
1871 | 1873 | ||
1872 | /* Find already established connection */ | 1874 | /* Find already established connection */ |
1873 | if (reqsk_queue_empty(&tp->accept_queue)) { | 1875 | if (reqsk_queue_empty(&icsk->icsk_accept_queue)) { |
1874 | long timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK); | 1876 | long timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK); |
1875 | 1877 | ||
1876 | /* If this is a non blocking socket don't sleep */ | 1878 | /* If this is a non blocking socket don't sleep */ |
@@ -1883,7 +1885,7 @@ struct sock *tcp_accept(struct sock *sk, int flags, int *err) | |||
1883 | goto out_err; | 1885 | goto out_err; |
1884 | } | 1886 | } |
1885 | 1887 | ||
1886 | newsk = reqsk_queue_get_child(&tp->accept_queue, sk); | 1888 | newsk = reqsk_queue_get_child(&icsk->icsk_accept_queue, sk); |
1887 | BUG_TRAP(newsk->sk_state != TCP_SYN_RECV); | 1889 | BUG_TRAP(newsk->sk_state != TCP_SYN_RECV); |
1888 | out: | 1890 | out: |
1889 | release_sock(sk); | 1891 | release_sock(sk); |
@@ -1901,6 +1903,7 @@ int tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval, | |||
1901 | int optlen) | 1903 | int optlen) |
1902 | { | 1904 | { |
1903 | struct tcp_sock *tp = tcp_sk(sk); | 1905 | struct tcp_sock *tp = tcp_sk(sk); |
1906 | struct inet_connection_sock *icsk = inet_csk(sk); | ||
1904 | int val; | 1907 | int val; |
1905 | int err = 0; | 1908 | int err = 0; |
1906 | 1909 | ||
@@ -1999,7 +2002,7 @@ int tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval, | |||
1999 | elapsed = tp->keepalive_time - elapsed; | 2002 | elapsed = tp->keepalive_time - elapsed; |
2000 | else | 2003 | else |
2001 | elapsed = 0; | 2004 | elapsed = 0; |
2002 | tcp_reset_keepalive_timer(sk, elapsed); | 2005 | inet_csk_reset_keepalive_timer(sk, elapsed); |
2003 | } | 2006 | } |
2004 | } | 2007 | } |
2005 | break; | 2008 | break; |
@@ -2019,7 +2022,7 @@ int tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval, | |||
2019 | if (val < 1 || val > MAX_TCP_SYNCNT) | 2022 | if (val < 1 || val > MAX_TCP_SYNCNT) |
2020 | err = -EINVAL; | 2023 | err = -EINVAL; |
2021 | else | 2024 | else |
2022 | tp->syn_retries = val; | 2025 | icsk->icsk_syn_retries = val; |
2023 | break; | 2026 | break; |
2024 | 2027 | ||
2025 | case TCP_LINGER2: | 2028 | case TCP_LINGER2: |
@@ -2058,16 +2061,16 @@ int tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval, | |||
2058 | 2061 | ||
2059 | case TCP_QUICKACK: | 2062 | case TCP_QUICKACK: |
2060 | if (!val) { | 2063 | if (!val) { |
2061 | tp->ack.pingpong = 1; | 2064 | icsk->icsk_ack.pingpong = 1; |
2062 | } else { | 2065 | } else { |
2063 | tp->ack.pingpong = 0; | 2066 | icsk->icsk_ack.pingpong = 0; |
2064 | if ((1 << sk->sk_state) & | 2067 | if ((1 << sk->sk_state) & |
2065 | (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT) && | 2068 | (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT) && |
2066 | tcp_ack_scheduled(tp)) { | 2069 | inet_csk_ack_scheduled(sk)) { |
2067 | tp->ack.pending |= TCP_ACK_PUSHED; | 2070 | icsk->icsk_ack.pending |= ICSK_ACK_PUSHED; |
2068 | cleanup_rbuf(sk, 1); | 2071 | cleanup_rbuf(sk, 1); |
2069 | if (!(val & 1)) | 2072 | if (!(val & 1)) |
2070 | tp->ack.pingpong = 1; | 2073 | icsk->icsk_ack.pingpong = 1; |
2071 | } | 2074 | } |
2072 | } | 2075 | } |
2073 | break; | 2076 | break; |
@@ -2084,15 +2087,16 @@ int tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval, | |||
2084 | void tcp_get_info(struct sock *sk, struct tcp_info *info) | 2087 | void tcp_get_info(struct sock *sk, struct tcp_info *info) |
2085 | { | 2088 | { |
2086 | struct tcp_sock *tp = tcp_sk(sk); | 2089 | struct tcp_sock *tp = tcp_sk(sk); |
2090 | const struct inet_connection_sock *icsk = inet_csk(sk); | ||
2087 | u32 now = tcp_time_stamp; | 2091 | u32 now = tcp_time_stamp; |
2088 | 2092 | ||
2089 | memset(info, 0, sizeof(*info)); | 2093 | memset(info, 0, sizeof(*info)); |
2090 | 2094 | ||
2091 | info->tcpi_state = sk->sk_state; | 2095 | info->tcpi_state = sk->sk_state; |
2092 | info->tcpi_ca_state = tp->ca_state; | 2096 | info->tcpi_ca_state = tp->ca_state; |
2093 | info->tcpi_retransmits = tp->retransmits; | 2097 | info->tcpi_retransmits = icsk->icsk_retransmits; |
2094 | info->tcpi_probes = tp->probes_out; | 2098 | info->tcpi_probes = tp->probes_out; |
2095 | info->tcpi_backoff = tp->backoff; | 2099 | info->tcpi_backoff = icsk->icsk_backoff; |
2096 | 2100 | ||
2097 | if (tp->rx_opt.tstamp_ok) | 2101 | if (tp->rx_opt.tstamp_ok) |
2098 | info->tcpi_options |= TCPI_OPT_TIMESTAMPS; | 2102 | info->tcpi_options |= TCPI_OPT_TIMESTAMPS; |
@@ -2107,10 +2111,10 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info) | |||
2107 | if (tp->ecn_flags&TCP_ECN_OK) | 2111 | if (tp->ecn_flags&TCP_ECN_OK) |
2108 | info->tcpi_options |= TCPI_OPT_ECN; | 2112 | info->tcpi_options |= TCPI_OPT_ECN; |
2109 | 2113 | ||
2110 | info->tcpi_rto = jiffies_to_usecs(tp->rto); | 2114 | info->tcpi_rto = jiffies_to_usecs(icsk->icsk_rto); |
2111 | info->tcpi_ato = jiffies_to_usecs(tp->ack.ato); | 2115 | info->tcpi_ato = jiffies_to_usecs(icsk->icsk_ack.ato); |
2112 | info->tcpi_snd_mss = tp->mss_cache; | 2116 | info->tcpi_snd_mss = tp->mss_cache; |
2113 | info->tcpi_rcv_mss = tp->ack.rcv_mss; | 2117 | info->tcpi_rcv_mss = icsk->icsk_ack.rcv_mss; |
2114 | 2118 | ||
2115 | info->tcpi_unacked = tp->packets_out; | 2119 | info->tcpi_unacked = tp->packets_out; |
2116 | info->tcpi_sacked = tp->sacked_out; | 2120 | info->tcpi_sacked = tp->sacked_out; |
@@ -2119,7 +2123,7 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info) | |||
2119 | info->tcpi_fackets = tp->fackets_out; | 2123 | info->tcpi_fackets = tp->fackets_out; |
2120 | 2124 | ||
2121 | info->tcpi_last_data_sent = jiffies_to_msecs(now - tp->lsndtime); | 2125 | info->tcpi_last_data_sent = jiffies_to_msecs(now - tp->lsndtime); |
2122 | info->tcpi_last_data_recv = jiffies_to_msecs(now - tp->ack.lrcvtime); | 2126 | info->tcpi_last_data_recv = jiffies_to_msecs(now - icsk->icsk_ack.lrcvtime); |
2123 | info->tcpi_last_ack_recv = jiffies_to_msecs(now - tp->rcv_tstamp); | 2127 | info->tcpi_last_ack_recv = jiffies_to_msecs(now - tp->rcv_tstamp); |
2124 | 2128 | ||
2125 | info->tcpi_pmtu = tp->pmtu_cookie; | 2129 | info->tcpi_pmtu = tp->pmtu_cookie; |
@@ -2179,7 +2183,7 @@ int tcp_getsockopt(struct sock *sk, int level, int optname, char __user *optval, | |||
2179 | val = tp->keepalive_probes ? : sysctl_tcp_keepalive_probes; | 2183 | val = tp->keepalive_probes ? : sysctl_tcp_keepalive_probes; |
2180 | break; | 2184 | break; |
2181 | case TCP_SYNCNT: | 2185 | case TCP_SYNCNT: |
2182 | val = tp->syn_retries ? : sysctl_tcp_syn_retries; | 2186 | val = inet_csk(sk)->icsk_syn_retries ? : sysctl_tcp_syn_retries; |
2183 | break; | 2187 | break; |
2184 | case TCP_LINGER2: | 2188 | case TCP_LINGER2: |
2185 | val = tp->linger2; | 2189 | val = tp->linger2; |
@@ -2209,7 +2213,7 @@ int tcp_getsockopt(struct sock *sk, int level, int optname, char __user *optval, | |||
2209 | return 0; | 2213 | return 0; |
2210 | } | 2214 | } |
2211 | case TCP_QUICKACK: | 2215 | case TCP_QUICKACK: |
2212 | val = !tp->ack.pingpong; | 2216 | val = !inet_csk(sk)->icsk_ack.pingpong; |
2213 | break; | 2217 | break; |
2214 | 2218 | ||
2215 | case TCP_CONGESTION: | 2219 | case TCP_CONGESTION: |
@@ -2340,7 +2344,7 @@ void __init tcp_init(void) | |||
2340 | tcp_register_congestion_control(&tcp_reno); | 2344 | tcp_register_congestion_control(&tcp_reno); |
2341 | } | 2345 | } |
2342 | 2346 | ||
2343 | EXPORT_SYMBOL(tcp_accept); | 2347 | EXPORT_SYMBOL(inet_csk_accept); |
2344 | EXPORT_SYMBOL(tcp_close); | 2348 | EXPORT_SYMBOL(tcp_close); |
2345 | EXPORT_SYMBOL(tcp_destroy_sock); | 2349 | EXPORT_SYMBOL(tcp_destroy_sock); |
2346 | EXPORT_SYMBOL(tcp_disconnect); | 2350 | EXPORT_SYMBOL(tcp_disconnect); |