diff options
author | Pavel Emelyanov <xemul@openvz.org> | 2008-07-16 23:31:16 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2008-07-16 23:31:16 -0400 |
commit | de0744af1fe2d0a3d428f6af0f2fe1f6179b1a9c (patch) | |
tree | 68d02820b1aa13e8fa9743c0ece5930a13d5a205 /net/ipv4/tcp_timer.c | |
parent | 4e6734447dbc7a0a85e09616821c0782d9fb1141 (diff) |
mib: add net to NET_INC_STATS_BH
Signed-off-by: Pavel Emelyanov <xemul@openvz.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp_timer.c')
-rw-r--r-- | net/ipv4/tcp_timer.c | 12 |
1 files changed, 6 insertions, 6 deletions
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c index 6a480d1fd8f6..328e0cf42b3c 100644 --- a/net/ipv4/tcp_timer.c +++ b/net/ipv4/tcp_timer.c | |||
@@ -48,7 +48,7 @@ static void tcp_write_err(struct sock *sk) | |||
48 | sk->sk_error_report(sk); | 48 | sk->sk_error_report(sk); |
49 | 49 | ||
50 | tcp_done(sk); | 50 | tcp_done(sk); |
51 | NET_INC_STATS_BH(LINUX_MIB_TCPABORTONTIMEOUT); | 51 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONTIMEOUT); |
52 | } | 52 | } |
53 | 53 | ||
54 | /* Do not allow orphaned sockets to eat all our resources. | 54 | /* Do not allow orphaned sockets to eat all our resources. |
@@ -89,7 +89,7 @@ static int tcp_out_of_resources(struct sock *sk, int do_reset) | |||
89 | if (do_reset) | 89 | if (do_reset) |
90 | tcp_send_active_reset(sk, GFP_ATOMIC); | 90 | tcp_send_active_reset(sk, GFP_ATOMIC); |
91 | tcp_done(sk); | 91 | tcp_done(sk); |
92 | NET_INC_STATS_BH(LINUX_MIB_TCPABORTONMEMORY); | 92 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONMEMORY); |
93 | return 1; | 93 | return 1; |
94 | } | 94 | } |
95 | return 0; | 95 | return 0; |
@@ -179,7 +179,7 @@ static void tcp_delack_timer(unsigned long data) | |||
179 | if (sock_owned_by_user(sk)) { | 179 | if (sock_owned_by_user(sk)) { |
180 | /* Try again later. */ | 180 | /* Try again later. */ |
181 | icsk->icsk_ack.blocked = 1; | 181 | icsk->icsk_ack.blocked = 1; |
182 | NET_INC_STATS_BH(LINUX_MIB_DELAYEDACKLOCKED); | 182 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOCKED); |
183 | sk_reset_timer(sk, &icsk->icsk_delack_timer, jiffies + TCP_DELACK_MIN); | 183 | sk_reset_timer(sk, &icsk->icsk_delack_timer, jiffies + TCP_DELACK_MIN); |
184 | goto out_unlock; | 184 | goto out_unlock; |
185 | } | 185 | } |
@@ -198,7 +198,7 @@ static void tcp_delack_timer(unsigned long data) | |||
198 | if (!skb_queue_empty(&tp->ucopy.prequeue)) { | 198 | if (!skb_queue_empty(&tp->ucopy.prequeue)) { |
199 | struct sk_buff *skb; | 199 | struct sk_buff *skb; |
200 | 200 | ||
201 | NET_INC_STATS_BH(LINUX_MIB_TCPSCHEDULERFAILED); | 201 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSCHEDULERFAILED); |
202 | 202 | ||
203 | while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) | 203 | while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) |
204 | sk->sk_backlog_rcv(sk, skb); | 204 | sk->sk_backlog_rcv(sk, skb); |
@@ -218,7 +218,7 @@ static void tcp_delack_timer(unsigned long data) | |||
218 | icsk->icsk_ack.ato = TCP_ATO_MIN; | 218 | icsk->icsk_ack.ato = TCP_ATO_MIN; |
219 | } | 219 | } |
220 | tcp_send_ack(sk); | 220 | tcp_send_ack(sk); |
221 | NET_INC_STATS_BH(LINUX_MIB_DELAYEDACKS); | 221 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKS); |
222 | } | 222 | } |
223 | TCP_CHECK_TIMER(sk); | 223 | TCP_CHECK_TIMER(sk); |
224 | 224 | ||
@@ -346,7 +346,7 @@ static void tcp_retransmit_timer(struct sock *sk) | |||
346 | } else { | 346 | } else { |
347 | mib_idx = LINUX_MIB_TCPTIMEOUTS; | 347 | mib_idx = LINUX_MIB_TCPTIMEOUTS; |
348 | } | 348 | } |
349 | NET_INC_STATS_BH(mib_idx); | 349 | NET_INC_STATS_BH(sock_net(sk), mib_idx); |
350 | } | 350 | } |
351 | 351 | ||
352 | if (tcp_use_frto(sk)) { | 352 | if (tcp_use_frto(sk)) { |