aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorPavel Emelyanov <xemul@openvz.org>2008-07-16 23:31:16 -0400
committerDavid S. Miller <davem@davemloft.net>2008-07-16 23:31:16 -0400
commitde0744af1fe2d0a3d428f6af0f2fe1f6179b1a9c (patch)
tree68d02820b1aa13e8fa9743c0ece5930a13d5a205 /net
parent4e6734447dbc7a0a85e09616821c0782d9fb1141 (diff)
mib: add net to NET_INC_STATS_BH
Signed-off-by: Pavel Emelyanov <xemul@openvz.org> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r--net/dccp/ipv4.c10
-rw-r--r--net/dccp/ipv6.c8
-rw-r--r--net/dccp/timer.c4
-rw-r--r--net/ipv4/arp.c2
-rw-r--r--net/ipv4/inet_hashtables.c4
-rw-r--r--net/ipv4/syncookies.c6
-rw-r--r--net/ipv4/tcp.c6
-rw-r--r--net/ipv4/tcp_input.c65
-rw-r--r--net/ipv4/tcp_ipv4.c12
-rw-r--r--net/ipv4/tcp_minisocks.c6
-rw-r--r--net/ipv4/tcp_output.c4
-rw-r--r--net/ipv4/tcp_timer.c12
-rw-r--r--net/ipv6/inet6_hashtables.c4
-rw-r--r--net/ipv6/syncookies.c6
-rw-r--r--net/ipv6/tcp_ipv6.c10
-rw-r--r--net/sctp/input.c2
16 files changed, 82 insertions, 79 deletions
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index 9f760a1e312..2622ace17c4 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -230,7 +230,7 @@ static void dccp_v4_err(struct sk_buff *skb, u32 info)
230 * servers this needs to be solved differently. 230 * servers this needs to be solved differently.
231 */ 231 */
232 if (sock_owned_by_user(sk)) 232 if (sock_owned_by_user(sk))
233 NET_INC_STATS_BH(LINUX_MIB_LOCKDROPPEDICMPS); 233 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
234 234
235 if (sk->sk_state == DCCP_CLOSED) 235 if (sk->sk_state == DCCP_CLOSED)
236 goto out; 236 goto out;
@@ -239,7 +239,7 @@ static void dccp_v4_err(struct sk_buff *skb, u32 info)
239 seq = dccp_hdr_seq(dh); 239 seq = dccp_hdr_seq(dh);
240 if ((1 << sk->sk_state) & ~(DCCPF_REQUESTING | DCCPF_LISTEN) && 240 if ((1 << sk->sk_state) & ~(DCCPF_REQUESTING | DCCPF_LISTEN) &&
241 !between48(seq, dp->dccps_swl, dp->dccps_swh)) { 241 !between48(seq, dp->dccps_swl, dp->dccps_swh)) {
242 NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS); 242 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
243 goto out; 243 goto out;
244 } 244 }
245 245
@@ -286,7 +286,7 @@ static void dccp_v4_err(struct sk_buff *skb, u32 info)
286 BUG_TRAP(!req->sk); 286 BUG_TRAP(!req->sk);
287 287
288 if (seq != dccp_rsk(req)->dreq_iss) { 288 if (seq != dccp_rsk(req)->dreq_iss) {
289 NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS); 289 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
290 goto out; 290 goto out;
291 } 291 }
292 /* 292 /*
@@ -409,9 +409,9 @@ struct sock *dccp_v4_request_recv_sock(struct sock *sk, struct sk_buff *skb,
409 return newsk; 409 return newsk;
410 410
411exit_overflow: 411exit_overflow:
412 NET_INC_STATS_BH(LINUX_MIB_LISTENOVERFLOWS); 412 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
413exit: 413exit:
414 NET_INC_STATS_BH(LINUX_MIB_LISTENDROPS); 414 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
415 dst_release(dst); 415 dst_release(dst);
416 return NULL; 416 return NULL;
417} 417}
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index 83cc9bbc620..b74e8b2cbe5 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -111,7 +111,7 @@ static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
111 111
112 bh_lock_sock(sk); 112 bh_lock_sock(sk);
113 if (sock_owned_by_user(sk)) 113 if (sock_owned_by_user(sk))
114 NET_INC_STATS_BH(LINUX_MIB_LOCKDROPPEDICMPS); 114 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
115 115
116 if (sk->sk_state == DCCP_CLOSED) 116 if (sk->sk_state == DCCP_CLOSED)
117 goto out; 117 goto out;
@@ -189,7 +189,7 @@ static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
189 BUG_TRAP(req->sk == NULL); 189 BUG_TRAP(req->sk == NULL);
190 190
191 if (seq != dccp_rsk(req)->dreq_iss) { 191 if (seq != dccp_rsk(req)->dreq_iss) {
192 NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS); 192 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
193 goto out; 193 goto out;
194 } 194 }
195 195
@@ -630,9 +630,9 @@ static struct sock *dccp_v6_request_recv_sock(struct sock *sk,
630 return newsk; 630 return newsk;
631 631
632out_overflow: 632out_overflow:
633 NET_INC_STATS_BH(LINUX_MIB_LISTENOVERFLOWS); 633 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
634out: 634out:
635 NET_INC_STATS_BH(LINUX_MIB_LISTENDROPS); 635 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
636 if (opt != NULL && opt != np->opt) 636 if (opt != NULL && opt != np->opt)
637 sock_kfree_s(sk, opt, opt->tot_len); 637 sock_kfree_s(sk, opt, opt->tot_len);
638 dst_release(dst); 638 dst_release(dst);
diff --git a/net/dccp/timer.c b/net/dccp/timer.c
index 8703a792b56..3608d5342ca 100644
--- a/net/dccp/timer.c
+++ b/net/dccp/timer.c
@@ -224,7 +224,7 @@ static void dccp_delack_timer(unsigned long data)
224 if (sock_owned_by_user(sk)) { 224 if (sock_owned_by_user(sk)) {
225 /* Try again later. */ 225 /* Try again later. */
226 icsk->icsk_ack.blocked = 1; 226 icsk->icsk_ack.blocked = 1;
227 NET_INC_STATS_BH(LINUX_MIB_DELAYEDACKLOCKED); 227 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOCKED);
228 sk_reset_timer(sk, &icsk->icsk_delack_timer, 228 sk_reset_timer(sk, &icsk->icsk_delack_timer,
229 jiffies + TCP_DELACK_MIN); 229 jiffies + TCP_DELACK_MIN);
230 goto out; 230 goto out;
@@ -254,7 +254,7 @@ static void dccp_delack_timer(unsigned long data)
254 icsk->icsk_ack.ato = TCP_ATO_MIN; 254 icsk->icsk_ack.ato = TCP_ATO_MIN;
255 } 255 }
256 dccp_send_ack(sk); 256 dccp_send_ack(sk);
257 NET_INC_STATS_BH(LINUX_MIB_DELAYEDACKS); 257 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKS);
258 } 258 }
259out: 259out:
260 bh_unlock_sock(sk); 260 bh_unlock_sock(sk);
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index aab98b8a994..b043eda60b0 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -426,7 +426,7 @@ static int arp_filter(__be32 sip, __be32 tip, struct net_device *dev)
426 if (ip_route_output_key(net, &rt, &fl) < 0) 426 if (ip_route_output_key(net, &rt, &fl) < 0)
427 return 1; 427 return 1;
428 if (rt->u.dst.dev != dev) { 428 if (rt->u.dst.dev != dev) {
429 NET_INC_STATS_BH(LINUX_MIB_ARPFILTER); 429 NET_INC_STATS_BH(net, LINUX_MIB_ARPFILTER);
430 flag = 1; 430 flag = 1;
431 } 431 }
432 ip_rt_put(rt); 432 ip_rt_put(rt);
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index eca5899729e..115f53722d2 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
@@ -312,11 +312,11 @@ unique:
312 312
313 if (twp) { 313 if (twp) {
314 *twp = tw; 314 *twp = tw;
315 NET_INC_STATS_BH(LINUX_MIB_TIMEWAITRECYCLED); 315 NET_INC_STATS_BH(net, LINUX_MIB_TIMEWAITRECYCLED);
316 } else if (tw) { 316 } else if (tw) {
317 /* Silly. Should hash-dance instead... */ 317 /* Silly. Should hash-dance instead... */
318 inet_twsk_deschedule(tw, death_row); 318 inet_twsk_deschedule(tw, death_row);
319 NET_INC_STATS_BH(LINUX_MIB_TIMEWAITRECYCLED); 319 NET_INC_STATS_BH(net, LINUX_MIB_TIMEWAITRECYCLED);
320 320
321 inet_twsk_put(tw); 321 inet_twsk_put(tw);
322 } 322 }
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index fdde2ae07e2..51bc24d3b8a 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -173,7 +173,7 @@ __u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb, __u16 *mssp)
173 ; 173 ;
174 *mssp = msstab[mssind] + 1; 174 *mssp = msstab[mssind] + 1;
175 175
176 NET_INC_STATS_BH(LINUX_MIB_SYNCOOKIESSENT); 176 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESSENT);
177 177
178 return secure_tcp_syn_cookie(iph->saddr, iph->daddr, 178 return secure_tcp_syn_cookie(iph->saddr, iph->daddr,
179 th->source, th->dest, ntohl(th->seq), 179 th->source, th->dest, ntohl(th->seq),
@@ -269,11 +269,11 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
269 269
270 if (time_after(jiffies, tp->last_synq_overflow + TCP_TIMEOUT_INIT) || 270 if (time_after(jiffies, tp->last_synq_overflow + TCP_TIMEOUT_INIT) ||
271 (mss = cookie_check(skb, cookie)) == 0) { 271 (mss = cookie_check(skb, cookie)) == 0) {
272 NET_INC_STATS_BH(LINUX_MIB_SYNCOOKIESFAILED); 272 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESFAILED);
273 goto out; 273 goto out;
274 } 274 }
275 275
276 NET_INC_STATS_BH(LINUX_MIB_SYNCOOKIESRECV); 276 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESRECV);
277 277
278 /* check for timestamp cookie support */ 278 /* check for timestamp cookie support */
279 memset(&tcp_opt, 0, sizeof(tcp_opt)); 279 memset(&tcp_opt, 0, sizeof(tcp_opt));
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 85f08291e92..9e0e45c3780 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -1871,7 +1871,8 @@ adjudge_to_death:
1871 if (tp->linger2 < 0) { 1871 if (tp->linger2 < 0) {
1872 tcp_set_state(sk, TCP_CLOSE); 1872 tcp_set_state(sk, TCP_CLOSE);
1873 tcp_send_active_reset(sk, GFP_ATOMIC); 1873 tcp_send_active_reset(sk, GFP_ATOMIC);
1874 NET_INC_STATS_BH(LINUX_MIB_TCPABORTONLINGER); 1874 NET_INC_STATS_BH(sock_net(sk),
1875 LINUX_MIB_TCPABORTONLINGER);
1875 } else { 1876 } else {
1876 const int tmo = tcp_fin_time(sk); 1877 const int tmo = tcp_fin_time(sk);
1877 1878
@@ -1893,7 +1894,8 @@ adjudge_to_death:
1893 "sockets\n"); 1894 "sockets\n");
1894 tcp_set_state(sk, TCP_CLOSE); 1895 tcp_set_state(sk, TCP_CLOSE);
1895 tcp_send_active_reset(sk, GFP_ATOMIC); 1896 tcp_send_active_reset(sk, GFP_ATOMIC);
1896 NET_INC_STATS_BH(LINUX_MIB_TCPABORTONMEMORY); 1897 NET_INC_STATS_BH(sock_net(sk),
1898 LINUX_MIB_TCPABORTONMEMORY);
1897 } 1899 }
1898 } 1900 }
1899 1901
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index f50d8433f04..fac49a6e161 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -961,7 +961,7 @@ static void tcp_update_reordering(struct sock *sk, const int metric,
961 else 961 else
962 mib_idx = LINUX_MIB_TCPSACKREORDER; 962 mib_idx = LINUX_MIB_TCPSACKREORDER;
963 963
964 NET_INC_STATS_BH(mib_idx); 964 NET_INC_STATS_BH(sock_net(sk), mib_idx);
965#if FASTRETRANS_DEBUG > 1 965#if FASTRETRANS_DEBUG > 1
966 printk(KERN_DEBUG "Disorder%d %d %u f%u s%u rr%d\n", 966 printk(KERN_DEBUG "Disorder%d %d %u f%u s%u rr%d\n",
967 tp->rx_opt.sack_ok, inet_csk(sk)->icsk_ca_state, 967 tp->rx_opt.sack_ok, inet_csk(sk)->icsk_ca_state,
@@ -1157,7 +1157,7 @@ static void tcp_mark_lost_retrans(struct sock *sk)
1157 tp->lost_out += tcp_skb_pcount(skb); 1157 tp->lost_out += tcp_skb_pcount(skb);
1158 TCP_SKB_CB(skb)->sacked |= TCPCB_LOST; 1158 TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;
1159 } 1159 }
1160 NET_INC_STATS_BH(LINUX_MIB_TCPLOSTRETRANSMIT); 1160 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPLOSTRETRANSMIT);
1161 } else { 1161 } else {
1162 if (before(ack_seq, new_low_seq)) 1162 if (before(ack_seq, new_low_seq))
1163 new_low_seq = ack_seq; 1163 new_low_seq = ack_seq;
@@ -1181,7 +1181,7 @@ static int tcp_check_dsack(struct sock *sk, struct sk_buff *ack_skb,
1181 if (before(start_seq_0, TCP_SKB_CB(ack_skb)->ack_seq)) { 1181 if (before(start_seq_0, TCP_SKB_CB(ack_skb)->ack_seq)) {
1182 dup_sack = 1; 1182 dup_sack = 1;
1183 tcp_dsack_seen(tp); 1183 tcp_dsack_seen(tp);
1184 NET_INC_STATS_BH(LINUX_MIB_TCPDSACKRECV); 1184 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPDSACKRECV);
1185 } else if (num_sacks > 1) { 1185 } else if (num_sacks > 1) {
1186 u32 end_seq_1 = get_unaligned_be32(&sp[1].end_seq); 1186 u32 end_seq_1 = get_unaligned_be32(&sp[1].end_seq);
1187 u32 start_seq_1 = get_unaligned_be32(&sp[1].start_seq); 1187 u32 start_seq_1 = get_unaligned_be32(&sp[1].start_seq);
@@ -1190,7 +1190,8 @@ static int tcp_check_dsack(struct sock *sk, struct sk_buff *ack_skb,
1190 !before(start_seq_0, start_seq_1)) { 1190 !before(start_seq_0, start_seq_1)) {
1191 dup_sack = 1; 1191 dup_sack = 1;
1192 tcp_dsack_seen(tp); 1192 tcp_dsack_seen(tp);
1193 NET_INC_STATS_BH(LINUX_MIB_TCPDSACKOFORECV); 1193 NET_INC_STATS_BH(sock_net(sk),
1194 LINUX_MIB_TCPDSACKOFORECV);
1194 } 1195 }
1195 } 1196 }
1196 1197
@@ -1476,7 +1477,7 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb,
1476 mib_idx = LINUX_MIB_TCPSACKDISCARD; 1477 mib_idx = LINUX_MIB_TCPSACKDISCARD;
1477 } 1478 }
1478 1479
1479 NET_INC_STATS_BH(mib_idx); 1480 NET_INC_STATS_BH(sock_net(sk), mib_idx);
1480 if (i == 0) 1481 if (i == 0)
1481 first_sack_index = -1; 1482 first_sack_index = -1;
1482 continue; 1483 continue;
@@ -1969,7 +1970,7 @@ static int tcp_check_sack_reneging(struct sock *sk, int flag)
1969{ 1970{
1970 if (flag & FLAG_SACK_RENEGING) { 1971 if (flag & FLAG_SACK_RENEGING) {
1971 struct inet_connection_sock *icsk = inet_csk(sk); 1972 struct inet_connection_sock *icsk = inet_csk(sk);
1972 NET_INC_STATS_BH(LINUX_MIB_TCPSACKRENEGING); 1973 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSACKRENEGING);
1973 1974
1974 tcp_enter_loss(sk, 1); 1975 tcp_enter_loss(sk, 1);
1975 icsk->icsk_retransmits++; 1976 icsk->icsk_retransmits++;
@@ -2401,7 +2402,7 @@ static int tcp_try_undo_recovery(struct sock *sk)
2401 else 2402 else
2402 mib_idx = LINUX_MIB_TCPFULLUNDO; 2403 mib_idx = LINUX_MIB_TCPFULLUNDO;
2403 2404
2404 NET_INC_STATS_BH(mib_idx); 2405 NET_INC_STATS_BH(sock_net(sk), mib_idx);
2405 tp->undo_marker = 0; 2406 tp->undo_marker = 0;
2406 } 2407 }
2407 if (tp->snd_una == tp->high_seq && tcp_is_reno(tp)) { 2408 if (tp->snd_una == tp->high_seq && tcp_is_reno(tp)) {
@@ -2424,7 +2425,7 @@ static void tcp_try_undo_dsack(struct sock *sk)
2424 DBGUNDO(sk, "D-SACK"); 2425 DBGUNDO(sk, "D-SACK");
2425 tcp_undo_cwr(sk, 1); 2426 tcp_undo_cwr(sk, 1);
2426 tp->undo_marker = 0; 2427 tp->undo_marker = 0;
2427 NET_INC_STATS_BH(LINUX_MIB_TCPDSACKUNDO); 2428 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPDSACKUNDO);
2428 } 2429 }
2429} 2430}
2430 2431
@@ -2447,7 +2448,7 @@ static int tcp_try_undo_partial(struct sock *sk, int acked)
2447 2448
2448 DBGUNDO(sk, "Hoe"); 2449 DBGUNDO(sk, "Hoe");
2449 tcp_undo_cwr(sk, 0); 2450 tcp_undo_cwr(sk, 0);
2450 NET_INC_STATS_BH(LINUX_MIB_TCPPARTIALUNDO); 2451 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPPARTIALUNDO);
2451 2452
2452 /* So... Do not make Hoe's retransmit yet. 2453 /* So... Do not make Hoe's retransmit yet.
2453 * If the first packet was delayed, the rest 2454 * If the first packet was delayed, the rest
@@ -2476,7 +2477,7 @@ static int tcp_try_undo_loss(struct sock *sk)
2476 DBGUNDO(sk, "partial loss"); 2477 DBGUNDO(sk, "partial loss");
2477 tp->lost_out = 0; 2478 tp->lost_out = 0;
2478 tcp_undo_cwr(sk, 1); 2479 tcp_undo_cwr(sk, 1);
2479 NET_INC_STATS_BH(LINUX_MIB_TCPLOSSUNDO); 2480 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPLOSSUNDO);
2480 inet_csk(sk)->icsk_retransmits = 0; 2481 inet_csk(sk)->icsk_retransmits = 0;
2481 tp->undo_marker = 0; 2482 tp->undo_marker = 0;
2482 if (tcp_is_sack(tp)) 2483 if (tcp_is_sack(tp))
@@ -2595,7 +2596,7 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked, int flag)
2595 icsk->icsk_ca_state != TCP_CA_Open && 2596 icsk->icsk_ca_state != TCP_CA_Open &&
2596 tp->fackets_out > tp->reordering) { 2597 tp->fackets_out > tp->reordering) {
2597 tcp_mark_head_lost(sk, tp->fackets_out - tp->reordering); 2598 tcp_mark_head_lost(sk, tp->fackets_out - tp->reordering);
2598 NET_INC_STATS_BH(LINUX_MIB_TCPLOSS); 2599 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPLOSS);
2599 } 2600 }
2600 2601
2601 /* D. Check consistency of the current state. */ 2602 /* D. Check consistency of the current state. */
@@ -2700,7 +2701,7 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked, int flag)
2700 else 2701 else
2701 mib_idx = LINUX_MIB_TCPSACKRECOVERY; 2702 mib_idx = LINUX_MIB_TCPSACKRECOVERY;
2702 2703
2703 NET_INC_STATS_BH(mib_idx); 2704 NET_INC_STATS_BH(sock_net(sk), mib_idx);
2704 2705
2705 tp->high_seq = tp->snd_nxt; 2706 tp->high_seq = tp->snd_nxt;
2706 tp->prior_ssthresh = 0; 2707 tp->prior_ssthresh = 0;
@@ -3211,7 +3212,7 @@ static int tcp_process_frto(struct sock *sk, int flag)
3211 } 3212 }
3212 tp->frto_counter = 0; 3213 tp->frto_counter = 0;
3213 tp->undo_marker = 0; 3214 tp->undo_marker = 0;
3214 NET_INC_STATS_BH(LINUX_MIB_TCPSPURIOUSRTOS); 3215 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSPURIOUSRTOS);
3215 } 3216 }
3216 return 0; 3217 return 0;
3217} 3218}
@@ -3264,12 +3265,12 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
3264 3265
3265 tcp_ca_event(sk, CA_EVENT_FAST_ACK); 3266 tcp_ca_event(sk, CA_EVENT_FAST_ACK);
3266 3267
3267 NET_INC_STATS_BH(LINUX_MIB_TCPHPACKS); 3268 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPHPACKS);
3268 } else { 3269 } else {
3269 if (ack_seq != TCP_SKB_CB(skb)->end_seq) 3270 if (ack_seq != TCP_SKB_CB(skb)->end_seq)
3270 flag |= FLAG_DATA; 3271 flag |= FLAG_DATA;
3271 else 3272 else
3272 NET_INC_STATS_BH(LINUX_MIB_TCPPUREACKS); 3273 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPPUREACKS);
3273 3274
3274 flag |= tcp_ack_update_window(sk, skb, ack, ack_seq); 3275 flag |= tcp_ack_update_window(sk, skb, ack, ack_seq);
3275 3276
@@ -3724,7 +3725,7 @@ static void tcp_dsack_set(struct sock *sk, u32 seq, u32 end_seq)
3724 else 3725 else
3725 mib_idx = LINUX_MIB_TCPDSACKOFOSENT; 3726 mib_idx = LINUX_MIB_TCPDSACKOFOSENT;
3726 3727
3727 NET_INC_STATS_BH(mib_idx); 3728 NET_INC_STATS_BH(sock_net(sk), mib_idx);
3728 3729
3729 tp->rx_opt.dsack = 1; 3730 tp->rx_opt.dsack = 1;
3730 tp->duplicate_sack[0].start_seq = seq; 3731 tp->duplicate_sack[0].start_seq = seq;
@@ -3750,7 +3751,7 @@ static void tcp_send_dupack(struct sock *sk, struct sk_buff *skb)
3750 3751
3751 if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq && 3752 if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq &&
3752 before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) { 3753 before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) {
3753 NET_INC_STATS_BH(LINUX_MIB_DELAYEDACKLOST); 3754 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOST);
3754 tcp_enter_quickack_mode(sk); 3755 tcp_enter_quickack_mode(sk);
3755 3756
3756 if (tcp_is_sack(tp) && sysctl_tcp_dsack) { 3757 if (tcp_is_sack(tp) && sysctl_tcp_dsack) {
@@ -4039,7 +4040,7 @@ queue_and_out:
4039 4040
4040 if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) { 4041 if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) {
4041 /* A retransmit, 2nd most common case. Force an immediate ack. */ 4042 /* A retransmit, 2nd most common case. Force an immediate ack. */
4042 NET_INC_STATS_BH(LINUX_MIB_DELAYEDACKLOST); 4043 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOST);
4043 tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq); 4044 tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq);
4044 4045
4045out_of_window: 4046out_of_window:
@@ -4181,7 +4182,7 @@ tcp_collapse(struct sock *sk, struct sk_buff_head *list,
4181 struct sk_buff *next = skb->next; 4182 struct sk_buff *next = skb->next;
4182 __skb_unlink(skb, list); 4183 __skb_unlink(skb, list);
4183 __kfree_skb(skb); 4184 __kfree_skb(skb);
4184 NET_INC_STATS_BH(LINUX_MIB_TCPRCVCOLLAPSED); 4185 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRCVCOLLAPSED);
4185 skb = next; 4186 skb = next;
4186 continue; 4187 continue;
4187 } 4188 }
@@ -4249,7 +4250,7 @@ tcp_collapse(struct sock *sk, struct sk_buff_head *list,
4249 struct sk_buff *next = skb->next; 4250 struct sk_buff *next = skb->next;
4250 __skb_unlink(skb, list); 4251 __skb_unlink(skb, list);
4251 __kfree_skb(skb); 4252 __kfree_skb(skb);
4252 NET_INC_STATS_BH(LINUX_MIB_TCPRCVCOLLAPSED); 4253 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRCVCOLLAPSED);
4253 skb = next; 4254 skb = next;
4254 if (skb == tail || 4255 if (skb == tail ||
4255 tcp_hdr(skb)->syn || 4256 tcp_hdr(skb)->syn ||
@@ -4312,7 +4313,7 @@ static int tcp_prune_ofo_queue(struct sock *sk)
4312 int res = 0; 4313 int res = 0;
4313 4314
4314 if (!skb_queue_empty(&tp->out_of_order_queue)) { 4315 if (!skb_queue_empty(&tp->out_of_order_queue)) {
4315 NET_INC_STATS_BH(LINUX_MIB_OFOPRUNED); 4316 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_OFOPRUNED);
4316 __skb_queue_purge(&tp->out_of_order_queue); 4317 __skb_queue_purge(&tp->out_of_order_queue);
4317 4318
4318 /* Reset SACK state. A conforming SACK implementation will 4319 /* Reset SACK state. A conforming SACK implementation will
@@ -4341,7 +4342,7 @@ static int tcp_prune_queue(struct sock *sk)
4341 4342
4342 SOCK_DEBUG(sk, "prune_queue: c=%x\n", tp->copied_seq); 4343 SOCK_DEBUG(sk, "prune_queue: c=%x\n", tp->copied_seq);
4343 4344
4344 NET_INC_STATS_BH(LINUX_MIB_PRUNECALLED); 4345 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PRUNECALLED);
4345 4346
4346 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) 4347 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
4347 tcp_clamp_window(sk); 4348 tcp_clamp_window(sk);
@@ -4370,7 +4371,7 @@ static int tcp_prune_queue(struct sock *sk)
4370 * drop receive data on the floor. It will get retransmitted 4371 * drop receive data on the floor. It will get retransmitted
4371 * and hopefully then we'll have sufficient space. 4372 * and hopefully then we'll have sufficient space.
4372 */ 4373 */
4373 NET_INC_STATS_BH(LINUX_MIB_RCVPRUNED); 4374 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_RCVPRUNED);
4374 4375
4375 /* Massive buffer overcommit. */ 4376 /* Massive buffer overcommit. */
4376 tp->pred_flags = 0; 4377 tp->pred_flags = 0;
@@ -4837,7 +4838,7 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
4837 4838
4838 __skb_pull(skb, tcp_header_len); 4839 __skb_pull(skb, tcp_header_len);
4839 tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq; 4840 tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
4840 NET_INC_STATS_BH(LINUX_MIB_TCPHPHITSTOUSER); 4841 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPHPHITSTOUSER);
4841 } 4842 }
4842 if (copied_early) 4843 if (copied_early)
4843 tcp_cleanup_rbuf(sk, skb->len); 4844 tcp_cleanup_rbuf(sk, skb->len);
@@ -4860,7 +4861,7 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
4860 if ((int)skb->truesize > sk->sk_forward_alloc) 4861 if ((int)skb->truesize > sk->sk_forward_alloc)
4861 goto step5; 4862 goto step5;
4862 4863
4863 NET_INC_STATS_BH(LINUX_MIB_TCPHPHITS); 4864 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPHPHITS);
4864 4865
4865 /* Bulk data transfer: receiver */ 4866 /* Bulk data transfer: receiver */
4866 __skb_pull(skb, tcp_header_len); 4867 __skb_pull(skb, tcp_header_len);
@@ -4904,7 +4905,7 @@ slow_path:
4904 if (tcp_fast_parse_options(skb, th, tp) && tp->rx_opt.saw_tstamp && 4905 if (tcp_fast_parse_options(skb, th, tp) && tp->rx_opt.saw_tstamp &&
4905 tcp_paws_discard(sk, skb)) { 4906 tcp_paws_discard(sk, skb)) {
4906 if (!th->rst) { 4907 if (!th->rst) {
4907 NET_INC_STATS_BH(LINUX_MIB_PAWSESTABREJECTED); 4908 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED);
4908 tcp_send_dupack(sk, skb); 4909 tcp_send_dupack(sk, skb);
4909 goto discard; 4910 goto discard;
4910 } 4911 }
@@ -4940,7 +4941,7 @@ slow_path:
4940 4941
4941 if (th->syn && !before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) { 4942 if (th->syn && !before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) {
4942 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS); 4943 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
4943 NET_INC_STATS_BH(LINUX_MIB_TCPABORTONSYN); 4944 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONSYN);
4944 tcp_reset(sk); 4945 tcp_reset(sk);
4945 return 1; 4946 return 1;
4946 } 4947 }
@@ -4996,7 +4997,7 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
4996 if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr && 4997 if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr &&
4997 !between(tp->rx_opt.rcv_tsecr, tp->retrans_stamp, 4998 !between(tp->rx_opt.rcv_tsecr, tp->retrans_stamp,
4998 tcp_time_stamp)) { 4999 tcp_time_stamp)) {
4999 NET_INC_STATS_BH(LINUX_MIB_PAWSACTIVEREJECTED); 5000 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSACTIVEREJECTED);
5000 goto reset_and_undo; 5001 goto reset_and_undo;
5001 } 5002 }
5002 5003
@@ -5280,7 +5281,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
5280 if (tcp_fast_parse_options(skb, th, tp) && tp->rx_opt.saw_tstamp && 5281 if (tcp_fast_parse_options(skb, th, tp) && tp->rx_opt.saw_tstamp &&
5281 tcp_paws_discard(sk, skb)) { 5282 tcp_paws_discard(sk, skb)) {
5282 if (!th->rst) { 5283 if (!th->rst) {
5283 NET_INC_STATS_BH(LINUX_MIB_PAWSESTABREJECTED); 5284 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED);
5284 tcp_send_dupack(sk, skb); 5285 tcp_send_dupack(sk, skb);
5285 goto discard; 5286 goto discard;
5286 } 5287 }
@@ -5309,7 +5310,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
5309 * Check for a SYN in window. 5310 * Check for a SYN in window.
5310 */ 5311 */
5311 if (th->syn && !before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) { 5312 if (th->syn && !before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) {
5312 NET_INC_STATS_BH(LINUX_MIB_TCPABORTONSYN); 5313 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONSYN);
5313 tcp_reset(sk); 5314 tcp_reset(sk);
5314 return 1; 5315 return 1;
5315 } 5316 }
@@ -5391,7 +5392,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
5391 (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq && 5392 (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq &&
5392 after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt))) { 5393 after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt))) {
5393 tcp_done(sk); 5394 tcp_done(sk);
5394 NET_INC_STATS_BH(LINUX_MIB_TCPABORTONDATA); 5395 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONDATA);
5395 return 1; 5396 return 1;
5396 } 5397 }
5397 5398
@@ -5451,7 +5452,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
5451 if (sk->sk_shutdown & RCV_SHUTDOWN) { 5452 if (sk->sk_shutdown & RCV_SHUTDOWN) {
5452 if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq && 5453 if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq &&
5453 after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt)) { 5454 after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt)) {
5454 NET_INC_STATS_BH(LINUX_MIB_TCPABORTONDATA); 5455 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONDATA);
5455 tcp_reset(sk); 5456 tcp_reset(sk);
5456 return 1; 5457 return 1;
5457 } 5458 }
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index e876312b950..29adc668ad5 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -366,7 +366,7 @@ void tcp_v4_err(struct sk_buff *skb, u32 info)
366 * servers this needs to be solved differently. 366 * servers this needs to be solved differently.
367 */ 367 */
368 if (sock_owned_by_user(sk)) 368 if (sock_owned_by_user(sk))
369 NET_INC_STATS_BH(LINUX_MIB_LOCKDROPPEDICMPS); 369 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
370 370
371 if (sk->sk_state == TCP_CLOSE) 371 if (sk->sk_state == TCP_CLOSE)
372 goto out; 372 goto out;
@@ -375,7 +375,7 @@ void tcp_v4_err(struct sk_buff *skb, u32 info)
375 seq = ntohl(th->seq); 375 seq = ntohl(th->seq);
376 if (sk->sk_state != TCP_LISTEN && 376 if (sk->sk_state != TCP_LISTEN &&
377 !between(seq, tp->snd_una, tp->snd_nxt)) { 377 !between(seq, tp->snd_una, tp->snd_nxt)) {
378 NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS); 378 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
379 goto out; 379 goto out;
380 } 380 }
381 381
@@ -422,7 +422,7 @@ void tcp_v4_err(struct sk_buff *skb, u32 info)
422 BUG_TRAP(!req->sk); 422 BUG_TRAP(!req->sk);
423 423
424 if (seq != tcp_rsk(req)->snt_isn) { 424 if (seq != tcp_rsk(req)->snt_isn) {
425 NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS); 425 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
426 goto out; 426 goto out;
427 } 427 }
428 428
@@ -1251,7 +1251,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1251 if (get_seconds() < peer->tcp_ts_stamp + TCP_PAWS_MSL && 1251 if (get_seconds() < peer->tcp_ts_stamp + TCP_PAWS_MSL &&
1252 (s32)(peer->tcp_ts - req->ts_recent) > 1252 (s32)(peer->tcp_ts - req->ts_recent) >
1253 TCP_PAWS_WINDOW) { 1253 TCP_PAWS_WINDOW) {
1254 NET_INC_STATS_BH(LINUX_MIB_PAWSPASSIVEREJECTED); 1254 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED);
1255 goto drop_and_release; 1255 goto drop_and_release;
1256 } 1256 }
1257 } 1257 }
@@ -1365,9 +1365,9 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1365 return newsk; 1365 return newsk;
1366 1366
1367exit_overflow: 1367exit_overflow:
1368 NET_INC_STATS_BH(LINUX_MIB_LISTENOVERFLOWS); 1368 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1369exit: 1369exit:
1370 NET_INC_STATS_BH(LINUX_MIB_LISTENDROPS); 1370 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1371 dst_release(dst); 1371 dst_release(dst);
1372 return NULL; 1372 return NULL;
1373} 1373}
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 8b02b103996..204c4216266 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -244,7 +244,7 @@ kill:
244 } 244 }
245 245
246 if (paws_reject) 246 if (paws_reject)
247 NET_INC_STATS_BH(LINUX_MIB_PAWSESTABREJECTED); 247 NET_INC_STATS_BH(twsk_net(tw), LINUX_MIB_PAWSESTABREJECTED);
248 248
249 if (!th->rst) { 249 if (!th->rst) {
250 /* In this case we must reset the TIMEWAIT timer. 250 /* In this case we must reset the TIMEWAIT timer.
@@ -611,7 +611,7 @@ struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb,
611 if (!(flg & TCP_FLAG_RST)) 611 if (!(flg & TCP_FLAG_RST))
612 req->rsk_ops->send_ack(skb, req); 612 req->rsk_ops->send_ack(skb, req);
613 if (paws_reject) 613 if (paws_reject)
614 NET_INC_STATS_BH(LINUX_MIB_PAWSESTABREJECTED); 614 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED);
615 return NULL; 615 return NULL;
616 } 616 }
617 617
@@ -695,7 +695,7 @@ struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb,
695 } 695 }
696 696
697 embryonic_reset: 697 embryonic_reset:
698 NET_INC_STATS_BH(LINUX_MIB_EMBRYONICRSTS); 698 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
699 if (!(flg & TCP_FLAG_RST)) 699 if (!(flg & TCP_FLAG_RST))
700 req->rsk_ops->send_reset(sk, skb); 700 req->rsk_ops->send_reset(sk, skb);
701 701
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 176f0702b8a..36a19707f67 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -1995,7 +1995,7 @@ void tcp_xmit_retransmit_queue(struct sock *sk)
1995 mib_idx = LINUX_MIB_TCPFASTRETRANS; 1995 mib_idx = LINUX_MIB_TCPFASTRETRANS;
1996 else 1996 else
1997 mib_idx = LINUX_MIB_TCPSLOWSTARTRETRANS; 1997 mib_idx = LINUX_MIB_TCPSLOWSTARTRETRANS;
1998 NET_INC_STATS_BH(mib_idx); 1998 NET_INC_STATS_BH(sock_net(sk), mib_idx);
1999 1999
2000 if (skb == tcp_write_queue_head(sk)) 2000 if (skb == tcp_write_queue_head(sk))
2001 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, 2001 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
@@ -2065,7 +2065,7 @@ void tcp_xmit_retransmit_queue(struct sock *sk)
2065 inet_csk(sk)->icsk_rto, 2065 inet_csk(sk)->icsk_rto,
2066 TCP_RTO_MAX); 2066 TCP_RTO_MAX);
2067 2067
2068 NET_INC_STATS_BH(LINUX_MIB_TCPFORWARDRETRANS); 2068 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPFORWARDRETRANS);
2069 } 2069 }
2070} 2070}
2071 2071
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index 6a480d1fd8f..328e0cf42b3 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -48,7 +48,7 @@ static void tcp_write_err(struct sock *sk)
48 sk->sk_error_report(sk); 48 sk->sk_error_report(sk);
49 49
50 tcp_done(sk); 50 tcp_done(sk);
51 NET_INC_STATS_BH(LINUX_MIB_TCPABORTONTIMEOUT); 51 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONTIMEOUT);
52} 52}
53 53
54/* Do not allow orphaned sockets to eat all our resources. 54/* Do not allow orphaned sockets to eat all our resources.
@@ -89,7 +89,7 @@ static int tcp_out_of_resources(struct sock *sk, int do_reset)
89 if (do_reset) 89 if (do_reset)
90 tcp_send_active_reset(sk, GFP_ATOMIC); 90 tcp_send_active_reset(sk, GFP_ATOMIC);
91 tcp_done(sk); 91 tcp_done(sk);
92 NET_INC_STATS_BH(LINUX_MIB_TCPABORTONMEMORY); 92 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONMEMORY);
93 return 1; 93 return 1;
94 } 94 }
95 return 0; 95 return 0;
@@ -179,7 +179,7 @@ static void tcp_delack_timer(unsigned long data)
179 if (sock_owned_by_user(sk)) { 179 if (sock_owned_by_user(sk)) {
180 /* Try again later. */ 180 /* Try again later. */
181 icsk->icsk_ack.blocked = 1; 181 icsk->icsk_ack.blocked = 1;
182 NET_INC_STATS_BH(LINUX_MIB_DELAYEDACKLOCKED); 182 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOCKED);
183 sk_reset_timer(sk, &icsk->icsk_delack_timer, jiffies + TCP_DELACK_MIN); 183 sk_reset_timer(sk, &icsk->icsk_delack_timer, jiffies + TCP_DELACK_MIN);
184 goto out_unlock; 184 goto out_unlock;
185 } 185 }
@@ -198,7 +198,7 @@ static void tcp_delack_timer(unsigned long data)
198 if (!skb_queue_empty(&tp->ucopy.prequeue)) { 198 if (!skb_queue_empty(&tp->ucopy.prequeue)) {
199 struct sk_buff *skb; 199 struct sk_buff *skb;
200 200
201 NET_INC_STATS_BH(LINUX_MIB_TCPSCHEDULERFAILED); 201 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSCHEDULERFAILED);
202 202
203 while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) 203 while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL)
204 sk->sk_backlog_rcv(sk, skb); 204 sk->sk_backlog_rcv(sk, skb);
@@ -218,7 +218,7 @@ static void tcp_delack_timer(unsigned long data)
218 icsk->icsk_ack.ato = TCP_ATO_MIN; 218 icsk->icsk_ack.ato = TCP_ATO_MIN;
219 } 219 }
220 tcp_send_ack(sk); 220 tcp_send_ack(sk);
221 NET_INC_STATS_BH(LINUX_MIB_DELAYEDACKS); 221 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKS);
222 } 222 }
223 TCP_CHECK_TIMER(sk); 223 TCP_CHECK_TIMER(sk);
224 224
@@ -346,7 +346,7 @@ static void tcp_retransmit_timer(struct sock *sk)
346 } else { 346 } else {
347 mib_idx = LINUX_MIB_TCPTIMEOUTS; 347 mib_idx = LINUX_MIB_TCPTIMEOUTS;
348 } 348 }
349 NET_INC_STATS_BH(mib_idx); 349 NET_INC_STATS_BH(sock_net(sk), mib_idx);
350 } 350 }
351 351
352 if (tcp_use_frto(sk)) { 352 if (tcp_use_frto(sk)) {
diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c
index a9cc8ab33a4..00a8a5f9380 100644
--- a/net/ipv6/inet6_hashtables.c
+++ b/net/ipv6/inet6_hashtables.c
@@ -210,11 +210,11 @@ unique:
210 210
211 if (twp != NULL) { 211 if (twp != NULL) {
212 *twp = tw; 212 *twp = tw;
213 NET_INC_STATS_BH(LINUX_MIB_TIMEWAITRECYCLED); 213 NET_INC_STATS_BH(twsk_net(tw), LINUX_MIB_TIMEWAITRECYCLED);
214 } else if (tw != NULL) { 214 } else if (tw != NULL) {
215 /* Silly. Should hash-dance instead... */ 215 /* Silly. Should hash-dance instead... */
216 inet_twsk_deschedule(tw, death_row); 216 inet_twsk_deschedule(tw, death_row);
217 NET_INC_STATS_BH(LINUX_MIB_TIMEWAITRECYCLED); 217 NET_INC_STATS_BH(twsk_net(tw), LINUX_MIB_TIMEWAITRECYCLED);
218 218
219 inet_twsk_put(tw); 219 inet_twsk_put(tw);
220 } 220 }
diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
index 3ecc1157994..6a68eeb7bbf 100644
--- a/net/ipv6/syncookies.c
+++ b/net/ipv6/syncookies.c
@@ -137,7 +137,7 @@ __u32 cookie_v6_init_sequence(struct sock *sk, struct sk_buff *skb, __u16 *mssp)
137 ; 137 ;
138 *mssp = msstab[mssind] + 1; 138 *mssp = msstab[mssind] + 1;
139 139
140 NET_INC_STATS_BH(LINUX_MIB_SYNCOOKIESSENT); 140 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESSENT);
141 141
142 return secure_tcp_syn_cookie(&iph->saddr, &iph->daddr, th->source, 142 return secure_tcp_syn_cookie(&iph->saddr, &iph->daddr, th->source,
143 th->dest, ntohl(th->seq), 143 th->dest, ntohl(th->seq),
@@ -177,11 +177,11 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
177 177
178 if (time_after(jiffies, tp->last_synq_overflow + TCP_TIMEOUT_INIT) || 178 if (time_after(jiffies, tp->last_synq_overflow + TCP_TIMEOUT_INIT) ||
179 (mss = cookie_check(skb, cookie)) == 0) { 179 (mss = cookie_check(skb, cookie)) == 0) {
180 NET_INC_STATS_BH(LINUX_MIB_SYNCOOKIESFAILED); 180 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESFAILED);
181 goto out; 181 goto out;
182 } 182 }
183 183
184 NET_INC_STATS_BH(LINUX_MIB_SYNCOOKIESRECV); 184 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESRECV);
185 185
186 /* check for timestamp cookie support */ 186 /* check for timestamp cookie support */
187 memset(&tcp_opt, 0, sizeof(tcp_opt)); 187 memset(&tcp_opt, 0, sizeof(tcp_opt));
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index d58b83ac06f..ca5b93a5c02 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -340,7 +340,7 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
340 340
341 bh_lock_sock(sk); 341 bh_lock_sock(sk);
342 if (sock_owned_by_user(sk)) 342 if (sock_owned_by_user(sk))
343 NET_INC_STATS_BH(LINUX_MIB_LOCKDROPPEDICMPS); 343 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
344 344
345 if (sk->sk_state == TCP_CLOSE) 345 if (sk->sk_state == TCP_CLOSE)
346 goto out; 346 goto out;
@@ -349,7 +349,7 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
349 seq = ntohl(th->seq); 349 seq = ntohl(th->seq);
350 if (sk->sk_state != TCP_LISTEN && 350 if (sk->sk_state != TCP_LISTEN &&
351 !between(seq, tp->snd_una, tp->snd_nxt)) { 351 !between(seq, tp->snd_una, tp->snd_nxt)) {
352 NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS); 352 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
353 goto out; 353 goto out;
354 } 354 }
355 355
@@ -424,7 +424,7 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
424 BUG_TRAP(req->sk == NULL); 424 BUG_TRAP(req->sk == NULL);
425 425
426 if (seq != tcp_rsk(req)->snt_isn) { 426 if (seq != tcp_rsk(req)->snt_isn) {
427 NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS); 427 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
428 goto out; 428 goto out;
429 } 429 }
430 430
@@ -1449,9 +1449,9 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1449 return newsk; 1449 return newsk;
1450 1450
1451out_overflow: 1451out_overflow:
1452 NET_INC_STATS_BH(LINUX_MIB_LISTENOVERFLOWS); 1452 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1453out: 1453out:
1454 NET_INC_STATS_BH(LINUX_MIB_LISTENDROPS); 1454 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1455 if (opt && opt != np->opt) 1455 if (opt && opt != np->opt)
1456 sock_kfree_s(sk, opt, opt->tot_len); 1456 sock_kfree_s(sk, opt, opt->tot_len);
1457 dst_release(dst); 1457 dst_release(dst);
diff --git a/net/sctp/input.c b/net/sctp/input.c
index ed8834e7f14..5ed93c05c23 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -486,7 +486,7 @@ struct sock *sctp_err_lookup(int family, struct sk_buff *skb,
486 * servers this needs to be solved differently. 486 * servers this needs to be solved differently.
487 */ 487 */
488 if (sock_owned_by_user(sk)) 488 if (sock_owned_by_user(sk))
489 NET_INC_STATS_BH(LINUX_MIB_LOCKDROPPEDICMPS); 489 NET_INC_STATS_BH(&init_net, LINUX_MIB_LOCKDROPPEDICMPS);
490 490
491 *app = asoc; 491 *app = asoc;
492 *tpp = transport; 492 *tpp = transport;