aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp_timer.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/ipv4/tcp_timer.c')
-rw-r--r--net/ipv4/tcp_timer.c27
1 files changed, 14 insertions, 13 deletions
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index 63ed9d6830e7..328e0cf42b3c 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -5,8 +5,6 @@
5 * 5 *
6 * Implementation of the Transmission Control Protocol(TCP). 6 * Implementation of the Transmission Control Protocol(TCP).
7 * 7 *
8 * Version: $Id: tcp_timer.c,v 1.88 2002/02/01 22:01:04 davem Exp $
9 *
10 * Authors: Ross Biro 8 * Authors: Ross Biro
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk> 10 * Mark Evans, <evansmp@uhura.aston.ac.uk>
@@ -50,7 +48,7 @@ static void tcp_write_err(struct sock *sk)
50 sk->sk_error_report(sk); 48 sk->sk_error_report(sk);
51 49
52 tcp_done(sk); 50 tcp_done(sk);
53 NET_INC_STATS_BH(LINUX_MIB_TCPABORTONTIMEOUT); 51 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONTIMEOUT);
54} 52}
55 53
56/* Do not allow orphaned sockets to eat all our resources. 54/* Do not allow orphaned sockets to eat all our resources.
@@ -91,7 +89,7 @@ static int tcp_out_of_resources(struct sock *sk, int do_reset)
91 if (do_reset) 89 if (do_reset)
92 tcp_send_active_reset(sk, GFP_ATOMIC); 90 tcp_send_active_reset(sk, GFP_ATOMIC);
93 tcp_done(sk); 91 tcp_done(sk);
94 NET_INC_STATS_BH(LINUX_MIB_TCPABORTONMEMORY); 92 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONMEMORY);
95 return 1; 93 return 1;
96 } 94 }
97 return 0; 95 return 0;
@@ -181,7 +179,7 @@ static void tcp_delack_timer(unsigned long data)
181 if (sock_owned_by_user(sk)) { 179 if (sock_owned_by_user(sk)) {
182 /* Try again later. */ 180 /* Try again later. */
183 icsk->icsk_ack.blocked = 1; 181 icsk->icsk_ack.blocked = 1;
184 NET_INC_STATS_BH(LINUX_MIB_DELAYEDACKLOCKED); 182 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOCKED);
185 sk_reset_timer(sk, &icsk->icsk_delack_timer, jiffies + TCP_DELACK_MIN); 183 sk_reset_timer(sk, &icsk->icsk_delack_timer, jiffies + TCP_DELACK_MIN);
186 goto out_unlock; 184 goto out_unlock;
187 } 185 }
@@ -200,7 +198,7 @@ static void tcp_delack_timer(unsigned long data)
200 if (!skb_queue_empty(&tp->ucopy.prequeue)) { 198 if (!skb_queue_empty(&tp->ucopy.prequeue)) {
201 struct sk_buff *skb; 199 struct sk_buff *skb;
202 200
203 NET_INC_STATS_BH(LINUX_MIB_TCPSCHEDULERFAILED); 201 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSCHEDULERFAILED);
204 202
205 while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) 203 while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL)
206 sk->sk_backlog_rcv(sk, skb); 204 sk->sk_backlog_rcv(sk, skb);
@@ -220,7 +218,7 @@ static void tcp_delack_timer(unsigned long data)
220 icsk->icsk_ack.ato = TCP_ATO_MIN; 218 icsk->icsk_ack.ato = TCP_ATO_MIN;
221 } 219 }
222 tcp_send_ack(sk); 220 tcp_send_ack(sk);
223 NET_INC_STATS_BH(LINUX_MIB_DELAYEDACKS); 221 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKS);
224 } 222 }
225 TCP_CHECK_TIMER(sk); 223 TCP_CHECK_TIMER(sk);
226 224
@@ -328,24 +326,27 @@ static void tcp_retransmit_timer(struct sock *sk)
328 goto out; 326 goto out;
329 327
330 if (icsk->icsk_retransmits == 0) { 328 if (icsk->icsk_retransmits == 0) {
329 int mib_idx;
330
331 if (icsk->icsk_ca_state == TCP_CA_Disorder || 331 if (icsk->icsk_ca_state == TCP_CA_Disorder ||
332 icsk->icsk_ca_state == TCP_CA_Recovery) { 332 icsk->icsk_ca_state == TCP_CA_Recovery) {
333 if (tcp_is_sack(tp)) { 333 if (tcp_is_sack(tp)) {
334 if (icsk->icsk_ca_state == TCP_CA_Recovery) 334 if (icsk->icsk_ca_state == TCP_CA_Recovery)
335 NET_INC_STATS_BH(LINUX_MIB_TCPSACKRECOVERYFAIL); 335 mib_idx = LINUX_MIB_TCPSACKRECOVERYFAIL;
336 else 336 else
337 NET_INC_STATS_BH(LINUX_MIB_TCPSACKFAILURES); 337 mib_idx = LINUX_MIB_TCPSACKFAILURES;
338 } else { 338 } else {
339 if (icsk->icsk_ca_state == TCP_CA_Recovery) 339 if (icsk->icsk_ca_state == TCP_CA_Recovery)
340 NET_INC_STATS_BH(LINUX_MIB_TCPRENORECOVERYFAIL); 340 mib_idx = LINUX_MIB_TCPRENORECOVERYFAIL;
341 else 341 else
342 NET_INC_STATS_BH(LINUX_MIB_TCPRENOFAILURES); 342 mib_idx = LINUX_MIB_TCPRENOFAILURES;
343 } 343 }
344 } else if (icsk->icsk_ca_state == TCP_CA_Loss) { 344 } else if (icsk->icsk_ca_state == TCP_CA_Loss) {
345 NET_INC_STATS_BH(LINUX_MIB_TCPLOSSFAILURES); 345 mib_idx = LINUX_MIB_TCPLOSSFAILURES;
346 } else { 346 } else {
347 NET_INC_STATS_BH(LINUX_MIB_TCPTIMEOUTS); 347 mib_idx = LINUX_MIB_TCPTIMEOUTS;
348 } 348 }
349 NET_INC_STATS_BH(sock_net(sk), mib_idx);
349 } 350 }
350 351
351 if (tcp_use_frto(sk)) { 352 if (tcp_use_frto(sk)) {