diff options
Diffstat (limited to 'net/ipv4/tcp_output.c')
-rw-r--r-- | net/ipv4/tcp_output.c | 86 |
1 files changed, 46 insertions, 40 deletions
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index a4d1eb9a0926..6f0a7e30ceac 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c | |||
@@ -105,8 +105,9 @@ static __u16 tcp_advertise_mss(struct sock *sk) | |||
105 | 105 | ||
106 | /* RFC2861. Reset CWND after idle period longer RTO to "restart window". | 106 | /* RFC2861. Reset CWND after idle period longer RTO to "restart window". |
107 | * This is the first part of cwnd validation mechanism. */ | 107 | * This is the first part of cwnd validation mechanism. */ |
108 | static void tcp_cwnd_restart(struct tcp_sock *tp, struct dst_entry *dst) | 108 | static void tcp_cwnd_restart(struct sock *sk, struct dst_entry *dst) |
109 | { | 109 | { |
110 | struct tcp_sock *tp = tcp_sk(sk); | ||
110 | s32 delta = tcp_time_stamp - tp->lsndtime; | 111 | s32 delta = tcp_time_stamp - tp->lsndtime; |
111 | u32 restart_cwnd = tcp_init_cwnd(tp, dst); | 112 | u32 restart_cwnd = tcp_init_cwnd(tp, dst); |
112 | u32 cwnd = tp->snd_cwnd; | 113 | u32 cwnd = tp->snd_cwnd; |
@@ -116,7 +117,7 @@ static void tcp_cwnd_restart(struct tcp_sock *tp, struct dst_entry *dst) | |||
116 | tp->snd_ssthresh = tcp_current_ssthresh(tp); | 117 | tp->snd_ssthresh = tcp_current_ssthresh(tp); |
117 | restart_cwnd = min(restart_cwnd, cwnd); | 118 | restart_cwnd = min(restart_cwnd, cwnd); |
118 | 119 | ||
119 | while ((delta -= tp->rto) > 0 && cwnd > restart_cwnd) | 120 | while ((delta -= inet_csk(sk)->icsk_rto) > 0 && cwnd > restart_cwnd) |
120 | cwnd >>= 1; | 121 | cwnd >>= 1; |
121 | tp->snd_cwnd = max(cwnd, restart_cwnd); | 122 | tp->snd_cwnd = max(cwnd, restart_cwnd); |
122 | tp->snd_cwnd_stamp = tcp_time_stamp; | 123 | tp->snd_cwnd_stamp = tcp_time_stamp; |
@@ -126,26 +127,25 @@ static void tcp_cwnd_restart(struct tcp_sock *tp, struct dst_entry *dst) | |||
126 | static inline void tcp_event_data_sent(struct tcp_sock *tp, | 127 | static inline void tcp_event_data_sent(struct tcp_sock *tp, |
127 | struct sk_buff *skb, struct sock *sk) | 128 | struct sk_buff *skb, struct sock *sk) |
128 | { | 129 | { |
129 | u32 now = tcp_time_stamp; | 130 | struct inet_connection_sock *icsk = inet_csk(sk); |
131 | const u32 now = tcp_time_stamp; | ||
130 | 132 | ||
131 | if (!tp->packets_out && (s32)(now - tp->lsndtime) > tp->rto) | 133 | if (!tp->packets_out && (s32)(now - tp->lsndtime) > icsk->icsk_rto) |
132 | tcp_cwnd_restart(tp, __sk_dst_get(sk)); | 134 | tcp_cwnd_restart(sk, __sk_dst_get(sk)); |
133 | 135 | ||
134 | tp->lsndtime = now; | 136 | tp->lsndtime = now; |
135 | 137 | ||
136 | /* If it is a reply for ato after last received | 138 | /* If it is a reply for ato after last received |
137 | * packet, enter pingpong mode. | 139 | * packet, enter pingpong mode. |
138 | */ | 140 | */ |
139 | if ((u32)(now - tp->ack.lrcvtime) < tp->ack.ato) | 141 | if ((u32)(now - icsk->icsk_ack.lrcvtime) < icsk->icsk_ack.ato) |
140 | tp->ack.pingpong = 1; | 142 | icsk->icsk_ack.pingpong = 1; |
141 | } | 143 | } |
142 | 144 | ||
143 | static __inline__ void tcp_event_ack_sent(struct sock *sk, unsigned int pkts) | 145 | static __inline__ void tcp_event_ack_sent(struct sock *sk, unsigned int pkts) |
144 | { | 146 | { |
145 | struct tcp_sock *tp = tcp_sk(sk); | 147 | tcp_dec_quickack_mode(sk, pkts); |
146 | 148 | inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK); | |
147 | tcp_dec_quickack_mode(tp, pkts); | ||
148 | tcp_clear_xmit_timer(sk, TCP_TIME_DACK); | ||
149 | } | 149 | } |
150 | 150 | ||
151 | /* Determine a window scaling and initial window to offer. | 151 | /* Determine a window scaling and initial window to offer. |
@@ -696,7 +696,7 @@ static inline void tcp_cwnd_validate(struct sock *sk, struct tcp_sock *tp) | |||
696 | if (tp->packets_out > tp->snd_cwnd_used) | 696 | if (tp->packets_out > tp->snd_cwnd_used) |
697 | tp->snd_cwnd_used = tp->packets_out; | 697 | tp->snd_cwnd_used = tp->packets_out; |
698 | 698 | ||
699 | if ((s32)(tcp_time_stamp - tp->snd_cwnd_stamp) >= tp->rto) | 699 | if ((s32)(tcp_time_stamp - tp->snd_cwnd_stamp) >= inet_csk(sk)->icsk_rto) |
700 | tcp_cwnd_application_limited(sk); | 700 | tcp_cwnd_application_limited(sk); |
701 | } | 701 | } |
702 | } | 702 | } |
@@ -1147,6 +1147,7 @@ void tcp_push_one(struct sock *sk, unsigned int mss_now) | |||
1147 | */ | 1147 | */ |
1148 | u32 __tcp_select_window(struct sock *sk) | 1148 | u32 __tcp_select_window(struct sock *sk) |
1149 | { | 1149 | { |
1150 | struct inet_connection_sock *icsk = inet_csk(sk); | ||
1150 | struct tcp_sock *tp = tcp_sk(sk); | 1151 | struct tcp_sock *tp = tcp_sk(sk); |
1151 | /* MSS for the peer's data. Previous verions used mss_clamp | 1152 | /* MSS for the peer's data. Previous verions used mss_clamp |
1152 | * here. I don't know if the value based on our guesses | 1153 | * here. I don't know if the value based on our guesses |
@@ -1154,7 +1155,7 @@ u32 __tcp_select_window(struct sock *sk) | |||
1154 | * but may be worse for the performance because of rcv_mss | 1155 | * but may be worse for the performance because of rcv_mss |
1155 | * fluctuations. --SAW 1998/11/1 | 1156 | * fluctuations. --SAW 1998/11/1 |
1156 | */ | 1157 | */ |
1157 | int mss = tp->ack.rcv_mss; | 1158 | int mss = icsk->icsk_ack.rcv_mss; |
1158 | int free_space = tcp_space(sk); | 1159 | int free_space = tcp_space(sk); |
1159 | int full_space = min_t(int, tp->window_clamp, tcp_full_space(sk)); | 1160 | int full_space = min_t(int, tp->window_clamp, tcp_full_space(sk)); |
1160 | int window; | 1161 | int window; |
@@ -1163,7 +1164,7 @@ u32 __tcp_select_window(struct sock *sk) | |||
1163 | mss = full_space; | 1164 | mss = full_space; |
1164 | 1165 | ||
1165 | if (free_space < full_space/2) { | 1166 | if (free_space < full_space/2) { |
1166 | tp->ack.quick = 0; | 1167 | icsk->icsk_ack.quick = 0; |
1167 | 1168 | ||
1168 | if (tcp_memory_pressure) | 1169 | if (tcp_memory_pressure) |
1169 | tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U*tp->advmss); | 1170 | tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U*tp->advmss); |
@@ -1491,7 +1492,8 @@ void tcp_xmit_retransmit_queue(struct sock *sk) | |||
1491 | 1492 | ||
1492 | if (skb == | 1493 | if (skb == |
1493 | skb_peek(&sk->sk_write_queue)) | 1494 | skb_peek(&sk->sk_write_queue)) |
1494 | tcp_reset_xmit_timer(sk, TCP_TIME_RETRANS, tp->rto); | 1495 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, |
1496 | inet_csk(sk)->icsk_rto); | ||
1495 | } | 1497 | } |
1496 | 1498 | ||
1497 | packet_cnt -= tcp_skb_pcount(skb); | 1499 | packet_cnt -= tcp_skb_pcount(skb); |
@@ -1544,7 +1546,7 @@ void tcp_xmit_retransmit_queue(struct sock *sk) | |||
1544 | break; | 1546 | break; |
1545 | 1547 | ||
1546 | if (skb == skb_peek(&sk->sk_write_queue)) | 1548 | if (skb == skb_peek(&sk->sk_write_queue)) |
1547 | tcp_reset_xmit_timer(sk, TCP_TIME_RETRANS, tp->rto); | 1549 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, inet_csk(sk)->icsk_rto); |
1548 | 1550 | ||
1549 | NET_INC_STATS_BH(LINUX_MIB_TCPFORWARDRETRANS); | 1551 | NET_INC_STATS_BH(LINUX_MIB_TCPFORWARDRETRANS); |
1550 | } | 1552 | } |
@@ -1780,8 +1782,8 @@ static inline void tcp_connect_init(struct sock *sk) | |||
1780 | tp->rcv_wup = 0; | 1782 | tp->rcv_wup = 0; |
1781 | tp->copied_seq = 0; | 1783 | tp->copied_seq = 0; |
1782 | 1784 | ||
1783 | tp->rto = TCP_TIMEOUT_INIT; | 1785 | inet_csk(sk)->icsk_rto = TCP_TIMEOUT_INIT; |
1784 | tp->retransmits = 0; | 1786 | inet_csk(sk)->icsk_retransmits = 0; |
1785 | tcp_clear_retrans(tp); | 1787 | tcp_clear_retrans(tp); |
1786 | } | 1788 | } |
1787 | 1789 | ||
@@ -1824,7 +1826,7 @@ int tcp_connect(struct sock *sk) | |||
1824 | TCP_INC_STATS(TCP_MIB_ACTIVEOPENS); | 1826 | TCP_INC_STATS(TCP_MIB_ACTIVEOPENS); |
1825 | 1827 | ||
1826 | /* Timer for repeating the SYN until an answer. */ | 1828 | /* Timer for repeating the SYN until an answer. */ |
1827 | tcp_reset_xmit_timer(sk, TCP_TIME_RETRANS, tp->rto); | 1829 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, inet_csk(sk)->icsk_rto); |
1828 | return 0; | 1830 | return 0; |
1829 | } | 1831 | } |
1830 | 1832 | ||
@@ -1834,20 +1836,21 @@ int tcp_connect(struct sock *sk) | |||
1834 | */ | 1836 | */ |
1835 | void tcp_send_delayed_ack(struct sock *sk) | 1837 | void tcp_send_delayed_ack(struct sock *sk) |
1836 | { | 1838 | { |
1837 | struct tcp_sock *tp = tcp_sk(sk); | 1839 | struct inet_connection_sock *icsk = inet_csk(sk); |
1838 | int ato = tp->ack.ato; | 1840 | int ato = icsk->icsk_ack.ato; |
1839 | unsigned long timeout; | 1841 | unsigned long timeout; |
1840 | 1842 | ||
1841 | if (ato > TCP_DELACK_MIN) { | 1843 | if (ato > TCP_DELACK_MIN) { |
1844 | const struct tcp_sock *tp = tcp_sk(sk); | ||
1842 | int max_ato = HZ/2; | 1845 | int max_ato = HZ/2; |
1843 | 1846 | ||
1844 | if (tp->ack.pingpong || (tp->ack.pending&TCP_ACK_PUSHED)) | 1847 | if (icsk->icsk_ack.pingpong || (icsk->icsk_ack.pending & ICSK_ACK_PUSHED)) |
1845 | max_ato = TCP_DELACK_MAX; | 1848 | max_ato = TCP_DELACK_MAX; |
1846 | 1849 | ||
1847 | /* Slow path, intersegment interval is "high". */ | 1850 | /* Slow path, intersegment interval is "high". */ |
1848 | 1851 | ||
1849 | /* If some rtt estimate is known, use it to bound delayed ack. | 1852 | /* If some rtt estimate is known, use it to bound delayed ack. |
1850 | * Do not use tp->rto here, use results of rtt measurements | 1853 | * Do not use inet_csk(sk)->icsk_rto here, use results of rtt measurements |
1851 | * directly. | 1854 | * directly. |
1852 | */ | 1855 | */ |
1853 | if (tp->srtt) { | 1856 | if (tp->srtt) { |
@@ -1864,21 +1867,22 @@ void tcp_send_delayed_ack(struct sock *sk) | |||
1864 | timeout = jiffies + ato; | 1867 | timeout = jiffies + ato; |
1865 | 1868 | ||
1866 | /* Use new timeout only if there wasn't a older one earlier. */ | 1869 | /* Use new timeout only if there wasn't a older one earlier. */ |
1867 | if (tp->ack.pending&TCP_ACK_TIMER) { | 1870 | if (icsk->icsk_ack.pending & ICSK_ACK_TIMER) { |
1868 | /* If delack timer was blocked or is about to expire, | 1871 | /* If delack timer was blocked or is about to expire, |
1869 | * send ACK now. | 1872 | * send ACK now. |
1870 | */ | 1873 | */ |
1871 | if (tp->ack.blocked || time_before_eq(tp->ack.timeout, jiffies+(ato>>2))) { | 1874 | if (icsk->icsk_ack.blocked || |
1875 | time_before_eq(icsk->icsk_ack.timeout, jiffies + (ato >> 2))) { | ||
1872 | tcp_send_ack(sk); | 1876 | tcp_send_ack(sk); |
1873 | return; | 1877 | return; |
1874 | } | 1878 | } |
1875 | 1879 | ||
1876 | if (!time_before(timeout, tp->ack.timeout)) | 1880 | if (!time_before(timeout, icsk->icsk_ack.timeout)) |
1877 | timeout = tp->ack.timeout; | 1881 | timeout = icsk->icsk_ack.timeout; |
1878 | } | 1882 | } |
1879 | tp->ack.pending |= TCP_ACK_SCHED|TCP_ACK_TIMER; | 1883 | icsk->icsk_ack.pending |= ICSK_ACK_SCHED | ICSK_ACK_TIMER; |
1880 | tp->ack.timeout = timeout; | 1884 | icsk->icsk_ack.timeout = timeout; |
1881 | sk_reset_timer(sk, &tp->delack_timer, timeout); | 1885 | sk_reset_timer(sk, &icsk->icsk_delack_timer, timeout); |
1882 | } | 1886 | } |
1883 | 1887 | ||
1884 | /* This routine sends an ack and also updates the window. */ | 1888 | /* This routine sends an ack and also updates the window. */ |
@@ -1895,9 +1899,9 @@ void tcp_send_ack(struct sock *sk) | |||
1895 | */ | 1899 | */ |
1896 | buff = alloc_skb(MAX_TCP_HEADER, GFP_ATOMIC); | 1900 | buff = alloc_skb(MAX_TCP_HEADER, GFP_ATOMIC); |
1897 | if (buff == NULL) { | 1901 | if (buff == NULL) { |
1898 | tcp_schedule_ack(tp); | 1902 | inet_csk_schedule_ack(sk); |
1899 | tp->ack.ato = TCP_ATO_MIN; | 1903 | inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN; |
1900 | tcp_reset_xmit_timer(sk, TCP_TIME_DACK, TCP_DELACK_MAX); | 1904 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, TCP_DELACK_MAX); |
1901 | return; | 1905 | return; |
1902 | } | 1906 | } |
1903 | 1907 | ||
@@ -2011,6 +2015,7 @@ int tcp_write_wakeup(struct sock *sk) | |||
2011 | */ | 2015 | */ |
2012 | void tcp_send_probe0(struct sock *sk) | 2016 | void tcp_send_probe0(struct sock *sk) |
2013 | { | 2017 | { |
2018 | struct inet_connection_sock *icsk = inet_csk(sk); | ||
2014 | struct tcp_sock *tp = tcp_sk(sk); | 2019 | struct tcp_sock *tp = tcp_sk(sk); |
2015 | int err; | 2020 | int err; |
2016 | 2021 | ||
@@ -2019,16 +2024,16 @@ void tcp_send_probe0(struct sock *sk) | |||
2019 | if (tp->packets_out || !sk->sk_send_head) { | 2024 | if (tp->packets_out || !sk->sk_send_head) { |
2020 | /* Cancel probe timer, if it is not required. */ | 2025 | /* Cancel probe timer, if it is not required. */ |
2021 | tp->probes_out = 0; | 2026 | tp->probes_out = 0; |
2022 | tp->backoff = 0; | 2027 | icsk->icsk_backoff = 0; |
2023 | return; | 2028 | return; |
2024 | } | 2029 | } |
2025 | 2030 | ||
2026 | if (err <= 0) { | 2031 | if (err <= 0) { |
2027 | if (tp->backoff < sysctl_tcp_retries2) | 2032 | if (icsk->icsk_backoff < sysctl_tcp_retries2) |
2028 | tp->backoff++; | 2033 | icsk->icsk_backoff++; |
2029 | tp->probes_out++; | 2034 | tp->probes_out++; |
2030 | tcp_reset_xmit_timer (sk, TCP_TIME_PROBE0, | 2035 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0, |
2031 | min(tp->rto << tp->backoff, TCP_RTO_MAX)); | 2036 | min(icsk->icsk_rto << icsk->icsk_backoff, TCP_RTO_MAX)); |
2032 | } else { | 2037 | } else { |
2033 | /* If packet was not sent due to local congestion, | 2038 | /* If packet was not sent due to local congestion, |
2034 | * do not backoff and do not remember probes_out. | 2039 | * do not backoff and do not remember probes_out. |
@@ -2038,8 +2043,9 @@ void tcp_send_probe0(struct sock *sk) | |||
2038 | */ | 2043 | */ |
2039 | if (!tp->probes_out) | 2044 | if (!tp->probes_out) |
2040 | tp->probes_out=1; | 2045 | tp->probes_out=1; |
2041 | tcp_reset_xmit_timer (sk, TCP_TIME_PROBE0, | 2046 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0, |
2042 | min(tp->rto << tp->backoff, TCP_RESOURCE_PROBE_INTERVAL)); | 2047 | min(icsk->icsk_rto << icsk->icsk_backoff, |
2048 | TCP_RESOURCE_PROBE_INTERVAL)); | ||
2043 | } | 2049 | } |
2044 | } | 2050 | } |
2045 | 2051 | ||