aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4
diff options
context:
space:
mode:
authorPavel Emelyanov <xemul@openvz.org>2008-07-16 23:31:16 -0400
committerDavid S. Miller <davem@davemloft.net>2008-07-16 23:31:16 -0400
commitde0744af1fe2d0a3d428f6af0f2fe1f6179b1a9c (patch)
tree68d02820b1aa13e8fa9743c0ece5930a13d5a205 /net/ipv4
parent4e6734447dbc7a0a85e09616821c0782d9fb1141 (diff)
mib: add net to NET_INC_STATS_BH
Signed-off-by: Pavel Emelyanov <xemul@openvz.org> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4')
-rw-r--r--net/ipv4/arp.c2
-rw-r--r--net/ipv4/inet_hashtables.c4
-rw-r--r--net/ipv4/syncookies.c6
-rw-r--r--net/ipv4/tcp.c6
-rw-r--r--net/ipv4/tcp_input.c65
-rw-r--r--net/ipv4/tcp_ipv4.c12
-rw-r--r--net/ipv4/tcp_minisocks.c6
-rw-r--r--net/ipv4/tcp_output.c4
-rw-r--r--net/ipv4/tcp_timer.c12
9 files changed, 60 insertions, 57 deletions
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index aab98b8a9945..b043eda60b04 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -426,7 +426,7 @@ static int arp_filter(__be32 sip, __be32 tip, struct net_device *dev)
426 if (ip_route_output_key(net, &rt, &fl) < 0) 426 if (ip_route_output_key(net, &rt, &fl) < 0)
427 return 1; 427 return 1;
428 if (rt->u.dst.dev != dev) { 428 if (rt->u.dst.dev != dev) {
429 NET_INC_STATS_BH(LINUX_MIB_ARPFILTER); 429 NET_INC_STATS_BH(net, LINUX_MIB_ARPFILTER);
430 flag = 1; 430 flag = 1;
431 } 431 }
432 ip_rt_put(rt); 432 ip_rt_put(rt);
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index eca5899729e3..115f53722d20 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
@@ -312,11 +312,11 @@ unique:
312 312
313 if (twp) { 313 if (twp) {
314 *twp = tw; 314 *twp = tw;
315 NET_INC_STATS_BH(LINUX_MIB_TIMEWAITRECYCLED); 315 NET_INC_STATS_BH(net, LINUX_MIB_TIMEWAITRECYCLED);
316 } else if (tw) { 316 } else if (tw) {
317 /* Silly. Should hash-dance instead... */ 317 /* Silly. Should hash-dance instead... */
318 inet_twsk_deschedule(tw, death_row); 318 inet_twsk_deschedule(tw, death_row);
319 NET_INC_STATS_BH(LINUX_MIB_TIMEWAITRECYCLED); 319 NET_INC_STATS_BH(net, LINUX_MIB_TIMEWAITRECYCLED);
320 320
321 inet_twsk_put(tw); 321 inet_twsk_put(tw);
322 } 322 }
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index fdde2ae07e24..51bc24d3b8a7 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -173,7 +173,7 @@ __u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb, __u16 *mssp)
173 ; 173 ;
174 *mssp = msstab[mssind] + 1; 174 *mssp = msstab[mssind] + 1;
175 175
176 NET_INC_STATS_BH(LINUX_MIB_SYNCOOKIESSENT); 176 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESSENT);
177 177
178 return secure_tcp_syn_cookie(iph->saddr, iph->daddr, 178 return secure_tcp_syn_cookie(iph->saddr, iph->daddr,
179 th->source, th->dest, ntohl(th->seq), 179 th->source, th->dest, ntohl(th->seq),
@@ -269,11 +269,11 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
269 269
270 if (time_after(jiffies, tp->last_synq_overflow + TCP_TIMEOUT_INIT) || 270 if (time_after(jiffies, tp->last_synq_overflow + TCP_TIMEOUT_INIT) ||
271 (mss = cookie_check(skb, cookie)) == 0) { 271 (mss = cookie_check(skb, cookie)) == 0) {
272 NET_INC_STATS_BH(LINUX_MIB_SYNCOOKIESFAILED); 272 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESFAILED);
273 goto out; 273 goto out;
274 } 274 }
275 275
276 NET_INC_STATS_BH(LINUX_MIB_SYNCOOKIESRECV); 276 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESRECV);
277 277
278 /* check for timestamp cookie support */ 278 /* check for timestamp cookie support */
279 memset(&tcp_opt, 0, sizeof(tcp_opt)); 279 memset(&tcp_opt, 0, sizeof(tcp_opt));
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 85f08291e928..9e0e45c37806 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -1871,7 +1871,8 @@ adjudge_to_death:
1871 if (tp->linger2 < 0) { 1871 if (tp->linger2 < 0) {
1872 tcp_set_state(sk, TCP_CLOSE); 1872 tcp_set_state(sk, TCP_CLOSE);
1873 tcp_send_active_reset(sk, GFP_ATOMIC); 1873 tcp_send_active_reset(sk, GFP_ATOMIC);
1874 NET_INC_STATS_BH(LINUX_MIB_TCPABORTONLINGER); 1874 NET_INC_STATS_BH(sock_net(sk),
1875 LINUX_MIB_TCPABORTONLINGER);
1875 } else { 1876 } else {
1876 const int tmo = tcp_fin_time(sk); 1877 const int tmo = tcp_fin_time(sk);
1877 1878
@@ -1893,7 +1894,8 @@ adjudge_to_death:
1893 "sockets\n"); 1894 "sockets\n");
1894 tcp_set_state(sk, TCP_CLOSE); 1895 tcp_set_state(sk, TCP_CLOSE);
1895 tcp_send_active_reset(sk, GFP_ATOMIC); 1896 tcp_send_active_reset(sk, GFP_ATOMIC);
1896 NET_INC_STATS_BH(LINUX_MIB_TCPABORTONMEMORY); 1897 NET_INC_STATS_BH(sock_net(sk),
1898 LINUX_MIB_TCPABORTONMEMORY);
1897 } 1899 }
1898 } 1900 }
1899 1901
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index f50d8433f042..fac49a6e1611 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -961,7 +961,7 @@ static void tcp_update_reordering(struct sock *sk, const int metric,
961 else 961 else
962 mib_idx = LINUX_MIB_TCPSACKREORDER; 962 mib_idx = LINUX_MIB_TCPSACKREORDER;
963 963
964 NET_INC_STATS_BH(mib_idx); 964 NET_INC_STATS_BH(sock_net(sk), mib_idx);
965#if FASTRETRANS_DEBUG > 1 965#if FASTRETRANS_DEBUG > 1
966 printk(KERN_DEBUG "Disorder%d %d %u f%u s%u rr%d\n", 966 printk(KERN_DEBUG "Disorder%d %d %u f%u s%u rr%d\n",
967 tp->rx_opt.sack_ok, inet_csk(sk)->icsk_ca_state, 967 tp->rx_opt.sack_ok, inet_csk(sk)->icsk_ca_state,
@@ -1157,7 +1157,7 @@ static void tcp_mark_lost_retrans(struct sock *sk)
1157 tp->lost_out += tcp_skb_pcount(skb); 1157 tp->lost_out += tcp_skb_pcount(skb);
1158 TCP_SKB_CB(skb)->sacked |= TCPCB_LOST; 1158 TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;
1159 } 1159 }
1160 NET_INC_STATS_BH(LINUX_MIB_TCPLOSTRETRANSMIT); 1160 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPLOSTRETRANSMIT);
1161 } else { 1161 } else {
1162 if (before(ack_seq, new_low_seq)) 1162 if (before(ack_seq, new_low_seq))
1163 new_low_seq = ack_seq; 1163 new_low_seq = ack_seq;
@@ -1181,7 +1181,7 @@ static int tcp_check_dsack(struct sock *sk, struct sk_buff *ack_skb,
1181 if (before(start_seq_0, TCP_SKB_CB(ack_skb)->ack_seq)) { 1181 if (before(start_seq_0, TCP_SKB_CB(ack_skb)->ack_seq)) {
1182 dup_sack = 1; 1182 dup_sack = 1;
1183 tcp_dsack_seen(tp); 1183 tcp_dsack_seen(tp);
1184 NET_INC_STATS_BH(LINUX_MIB_TCPDSACKRECV); 1184 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPDSACKRECV);
1185 } else if (num_sacks > 1) { 1185 } else if (num_sacks > 1) {
1186 u32 end_seq_1 = get_unaligned_be32(&sp[1].end_seq); 1186 u32 end_seq_1 = get_unaligned_be32(&sp[1].end_seq);
1187 u32 start_seq_1 = get_unaligned_be32(&sp[1].start_seq); 1187 u32 start_seq_1 = get_unaligned_be32(&sp[1].start_seq);
@@ -1190,7 +1190,8 @@ static int tcp_check_dsack(struct sock *sk, struct sk_buff *ack_skb,
1190 !before(start_seq_0, start_seq_1)) { 1190 !before(start_seq_0, start_seq_1)) {
1191 dup_sack = 1; 1191 dup_sack = 1;
1192 tcp_dsack_seen(tp); 1192 tcp_dsack_seen(tp);
1193 NET_INC_STATS_BH(LINUX_MIB_TCPDSACKOFORECV); 1193 NET_INC_STATS_BH(sock_net(sk),
1194 LINUX_MIB_TCPDSACKOFORECV);
1194 } 1195 }
1195 } 1196 }
1196 1197
@@ -1476,7 +1477,7 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb,
1476 mib_idx = LINUX_MIB_TCPSACKDISCARD; 1477 mib_idx = LINUX_MIB_TCPSACKDISCARD;
1477 } 1478 }
1478 1479
1479 NET_INC_STATS_BH(mib_idx); 1480 NET_INC_STATS_BH(sock_net(sk), mib_idx);
1480 if (i == 0) 1481 if (i == 0)
1481 first_sack_index = -1; 1482 first_sack_index = -1;
1482 continue; 1483 continue;
@@ -1969,7 +1970,7 @@ static int tcp_check_sack_reneging(struct sock *sk, int flag)
1969{ 1970{
1970 if (flag & FLAG_SACK_RENEGING) { 1971 if (flag & FLAG_SACK_RENEGING) {
1971 struct inet_connection_sock *icsk = inet_csk(sk); 1972 struct inet_connection_sock *icsk = inet_csk(sk);
1972 NET_INC_STATS_BH(LINUX_MIB_TCPSACKRENEGING); 1973 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSACKRENEGING);
1973 1974
1974 tcp_enter_loss(sk, 1); 1975 tcp_enter_loss(sk, 1);
1975 icsk->icsk_retransmits++; 1976 icsk->icsk_retransmits++;
@@ -2401,7 +2402,7 @@ static int tcp_try_undo_recovery(struct sock *sk)
2401 else 2402 else
2402 mib_idx = LINUX_MIB_TCPFULLUNDO; 2403 mib_idx = LINUX_MIB_TCPFULLUNDO;
2403 2404
2404 NET_INC_STATS_BH(mib_idx); 2405 NET_INC_STATS_BH(sock_net(sk), mib_idx);
2405 tp->undo_marker = 0; 2406 tp->undo_marker = 0;
2406 } 2407 }
2407 if (tp->snd_una == tp->high_seq && tcp_is_reno(tp)) { 2408 if (tp->snd_una == tp->high_seq && tcp_is_reno(tp)) {
@@ -2424,7 +2425,7 @@ static void tcp_try_undo_dsack(struct sock *sk)
2424 DBGUNDO(sk, "D-SACK"); 2425 DBGUNDO(sk, "D-SACK");
2425 tcp_undo_cwr(sk, 1); 2426 tcp_undo_cwr(sk, 1);
2426 tp->undo_marker = 0; 2427 tp->undo_marker = 0;
2427 NET_INC_STATS_BH(LINUX_MIB_TCPDSACKUNDO); 2428 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPDSACKUNDO);
2428 } 2429 }
2429} 2430}
2430 2431
@@ -2447,7 +2448,7 @@ static int tcp_try_undo_partial(struct sock *sk, int acked)
2447 2448
2448 DBGUNDO(sk, "Hoe"); 2449 DBGUNDO(sk, "Hoe");
2449 tcp_undo_cwr(sk, 0); 2450 tcp_undo_cwr(sk, 0);
2450 NET_INC_STATS_BH(LINUX_MIB_TCPPARTIALUNDO); 2451 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPPARTIALUNDO);
2451 2452
2452 /* So... Do not make Hoe's retransmit yet. 2453 /* So... Do not make Hoe's retransmit yet.
2453 * If the first packet was delayed, the rest 2454 * If the first packet was delayed, the rest
@@ -2476,7 +2477,7 @@ static int tcp_try_undo_loss(struct sock *sk)
2476 DBGUNDO(sk, "partial loss"); 2477 DBGUNDO(sk, "partial loss");
2477 tp->lost_out = 0; 2478 tp->lost_out = 0;
2478 tcp_undo_cwr(sk, 1); 2479 tcp_undo_cwr(sk, 1);
2479 NET_INC_STATS_BH(LINUX_MIB_TCPLOSSUNDO); 2480 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPLOSSUNDO);
2480 inet_csk(sk)->icsk_retransmits = 0; 2481 inet_csk(sk)->icsk_retransmits = 0;
2481 tp->undo_marker = 0; 2482 tp->undo_marker = 0;
2482 if (tcp_is_sack(tp)) 2483 if (tcp_is_sack(tp))
@@ -2595,7 +2596,7 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked, int flag)
2595 icsk->icsk_ca_state != TCP_CA_Open && 2596 icsk->icsk_ca_state != TCP_CA_Open &&
2596 tp->fackets_out > tp->reordering) { 2597 tp->fackets_out > tp->reordering) {
2597 tcp_mark_head_lost(sk, tp->fackets_out - tp->reordering); 2598 tcp_mark_head_lost(sk, tp->fackets_out - tp->reordering);
2598 NET_INC_STATS_BH(LINUX_MIB_TCPLOSS); 2599 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPLOSS);
2599 } 2600 }
2600 2601
2601 /* D. Check consistency of the current state. */ 2602 /* D. Check consistency of the current state. */
@@ -2700,7 +2701,7 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked, int flag)
2700 else 2701 else
2701 mib_idx = LINUX_MIB_TCPSACKRECOVERY; 2702 mib_idx = LINUX_MIB_TCPSACKRECOVERY;
2702 2703
2703 NET_INC_STATS_BH(mib_idx); 2704 NET_INC_STATS_BH(sock_net(sk), mib_idx);
2704 2705
2705 tp->high_seq = tp->snd_nxt; 2706 tp->high_seq = tp->snd_nxt;
2706 tp->prior_ssthresh = 0; 2707 tp->prior_ssthresh = 0;
@@ -3211,7 +3212,7 @@ static int tcp_process_frto(struct sock *sk, int flag)
3211 } 3212 }
3212 tp->frto_counter = 0; 3213 tp->frto_counter = 0;
3213 tp->undo_marker = 0; 3214 tp->undo_marker = 0;
3214 NET_INC_STATS_BH(LINUX_MIB_TCPSPURIOUSRTOS); 3215 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSPURIOUSRTOS);
3215 } 3216 }
3216 return 0; 3217 return 0;
3217} 3218}
@@ -3264,12 +3265,12 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
3264 3265
3265 tcp_ca_event(sk, CA_EVENT_FAST_ACK); 3266 tcp_ca_event(sk, CA_EVENT_FAST_ACK);
3266 3267
3267 NET_INC_STATS_BH(LINUX_MIB_TCPHPACKS); 3268 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPHPACKS);
3268 } else { 3269 } else {
3269 if (ack_seq != TCP_SKB_CB(skb)->end_seq) 3270 if (ack_seq != TCP_SKB_CB(skb)->end_seq)
3270 flag |= FLAG_DATA; 3271 flag |= FLAG_DATA;
3271 else 3272 else
3272 NET_INC_STATS_BH(LINUX_MIB_TCPPUREACKS); 3273 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPPUREACKS);
3273 3274
3274 flag |= tcp_ack_update_window(sk, skb, ack, ack_seq); 3275 flag |= tcp_ack_update_window(sk, skb, ack, ack_seq);
3275 3276
@@ -3724,7 +3725,7 @@ static void tcp_dsack_set(struct sock *sk, u32 seq, u32 end_seq)
3724 else 3725 else
3725 mib_idx = LINUX_MIB_TCPDSACKOFOSENT; 3726 mib_idx = LINUX_MIB_TCPDSACKOFOSENT;
3726 3727
3727 NET_INC_STATS_BH(mib_idx); 3728 NET_INC_STATS_BH(sock_net(sk), mib_idx);
3728 3729
3729 tp->rx_opt.dsack = 1; 3730 tp->rx_opt.dsack = 1;
3730 tp->duplicate_sack[0].start_seq = seq; 3731 tp->duplicate_sack[0].start_seq = seq;
@@ -3750,7 +3751,7 @@ static void tcp_send_dupack(struct sock *sk, struct sk_buff *skb)
3750 3751
3751 if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq && 3752 if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq &&
3752 before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) { 3753 before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) {
3753 NET_INC_STATS_BH(LINUX_MIB_DELAYEDACKLOST); 3754 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOST);
3754 tcp_enter_quickack_mode(sk); 3755 tcp_enter_quickack_mode(sk);
3755 3756
3756 if (tcp_is_sack(tp) && sysctl_tcp_dsack) { 3757 if (tcp_is_sack(tp) && sysctl_tcp_dsack) {
@@ -4039,7 +4040,7 @@ queue_and_out:
4039 4040
4040 if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) { 4041 if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) {
4041 /* A retransmit, 2nd most common case. Force an immediate ack. */ 4042 /* A retransmit, 2nd most common case. Force an immediate ack. */
4042 NET_INC_STATS_BH(LINUX_MIB_DELAYEDACKLOST); 4043 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOST);
4043 tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq); 4044 tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq);
4044 4045
4045out_of_window: 4046out_of_window:
@@ -4181,7 +4182,7 @@ tcp_collapse(struct sock *sk, struct sk_buff_head *list,
4181 struct sk_buff *next = skb->next; 4182 struct sk_buff *next = skb->next;
4182 __skb_unlink(skb, list); 4183 __skb_unlink(skb, list);
4183 __kfree_skb(skb); 4184 __kfree_skb(skb);
4184 NET_INC_STATS_BH(LINUX_MIB_TCPRCVCOLLAPSED); 4185 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRCVCOLLAPSED);
4185 skb = next; 4186 skb = next;
4186 continue; 4187 continue;
4187 } 4188 }
@@ -4249,7 +4250,7 @@ tcp_collapse(struct sock *sk, struct sk_buff_head *list,
4249 struct sk_buff *next = skb->next; 4250 struct sk_buff *next = skb->next;
4250 __skb_unlink(skb, list); 4251 __skb_unlink(skb, list);
4251 __kfree_skb(skb); 4252 __kfree_skb(skb);
4252 NET_INC_STATS_BH(LINUX_MIB_TCPRCVCOLLAPSED); 4253 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRCVCOLLAPSED);
4253 skb = next; 4254 skb = next;
4254 if (skb == tail || 4255 if (skb == tail ||
4255 tcp_hdr(skb)->syn || 4256 tcp_hdr(skb)->syn ||
@@ -4312,7 +4313,7 @@ static int tcp_prune_ofo_queue(struct sock *sk)
4312 int res = 0; 4313 int res = 0;
4313 4314
4314 if (!skb_queue_empty(&tp->out_of_order_queue)) { 4315 if (!skb_queue_empty(&tp->out_of_order_queue)) {
4315 NET_INC_STATS_BH(LINUX_MIB_OFOPRUNED); 4316 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_OFOPRUNED);
4316 __skb_queue_purge(&tp->out_of_order_queue); 4317 __skb_queue_purge(&tp->out_of_order_queue);
4317 4318
4318 /* Reset SACK state. A conforming SACK implementation will 4319 /* Reset SACK state. A conforming SACK implementation will
@@ -4341,7 +4342,7 @@ static int tcp_prune_queue(struct sock *sk)
4341 4342
4342 SOCK_DEBUG(sk, "prune_queue: c=%x\n", tp->copied_seq); 4343 SOCK_DEBUG(sk, "prune_queue: c=%x\n", tp->copied_seq);
4343 4344
4344 NET_INC_STATS_BH(LINUX_MIB_PRUNECALLED); 4345 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PRUNECALLED);
4345 4346
4346 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) 4347 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
4347 tcp_clamp_window(sk); 4348 tcp_clamp_window(sk);
@@ -4370,7 +4371,7 @@ static int tcp_prune_queue(struct sock *sk)
4370 * drop receive data on the floor. It will get retransmitted 4371 * drop receive data on the floor. It will get retransmitted
4371 * and hopefully then we'll have sufficient space. 4372 * and hopefully then we'll have sufficient space.
4372 */ 4373 */
4373 NET_INC_STATS_BH(LINUX_MIB_RCVPRUNED); 4374 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_RCVPRUNED);
4374 4375
4375 /* Massive buffer overcommit. */ 4376 /* Massive buffer overcommit. */
4376 tp->pred_flags = 0; 4377 tp->pred_flags = 0;
@@ -4837,7 +4838,7 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
4837 4838
4838 __skb_pull(skb, tcp_header_len); 4839 __skb_pull(skb, tcp_header_len);
4839 tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq; 4840 tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
4840 NET_INC_STATS_BH(LINUX_MIB_TCPHPHITSTOUSER); 4841 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPHPHITSTOUSER);
4841 } 4842 }
4842 if (copied_early) 4843 if (copied_early)
4843 tcp_cleanup_rbuf(sk, skb->len); 4844 tcp_cleanup_rbuf(sk, skb->len);
@@ -4860,7 +4861,7 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
4860 if ((int)skb->truesize > sk->sk_forward_alloc) 4861 if ((int)skb->truesize > sk->sk_forward_alloc)
4861 goto step5; 4862 goto step5;
4862 4863
4863 NET_INC_STATS_BH(LINUX_MIB_TCPHPHITS); 4864 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPHPHITS);
4864 4865
4865 /* Bulk data transfer: receiver */ 4866 /* Bulk data transfer: receiver */
4866 __skb_pull(skb, tcp_header_len); 4867 __skb_pull(skb, tcp_header_len);
@@ -4904,7 +4905,7 @@ slow_path:
4904 if (tcp_fast_parse_options(skb, th, tp) && tp->rx_opt.saw_tstamp && 4905 if (tcp_fast_parse_options(skb, th, tp) && tp->rx_opt.saw_tstamp &&
4905 tcp_paws_discard(sk, skb)) { 4906 tcp_paws_discard(sk, skb)) {
4906 if (!th->rst) { 4907 if (!th->rst) {
4907 NET_INC_STATS_BH(LINUX_MIB_PAWSESTABREJECTED); 4908 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED);
4908 tcp_send_dupack(sk, skb); 4909 tcp_send_dupack(sk, skb);
4909 goto discard; 4910 goto discard;
4910 } 4911 }
@@ -4940,7 +4941,7 @@ slow_path:
4940 4941
4941 if (th->syn && !before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) { 4942 if (th->syn && !before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) {
4942 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS); 4943 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
4943 NET_INC_STATS_BH(LINUX_MIB_TCPABORTONSYN); 4944 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONSYN);
4944 tcp_reset(sk); 4945 tcp_reset(sk);
4945 return 1; 4946 return 1;
4946 } 4947 }
@@ -4996,7 +4997,7 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
4996 if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr && 4997 if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr &&
4997 !between(tp->rx_opt.rcv_tsecr, tp->retrans_stamp, 4998 !between(tp->rx_opt.rcv_tsecr, tp->retrans_stamp,
4998 tcp_time_stamp)) { 4999 tcp_time_stamp)) {
4999 NET_INC_STATS_BH(LINUX_MIB_PAWSACTIVEREJECTED); 5000 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSACTIVEREJECTED);
5000 goto reset_and_undo; 5001 goto reset_and_undo;
5001 } 5002 }
5002 5003
@@ -5280,7 +5281,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
5280 if (tcp_fast_parse_options(skb, th, tp) && tp->rx_opt.saw_tstamp && 5281 if (tcp_fast_parse_options(skb, th, tp) && tp->rx_opt.saw_tstamp &&
5281 tcp_paws_discard(sk, skb)) { 5282 tcp_paws_discard(sk, skb)) {
5282 if (!th->rst) { 5283 if (!th->rst) {
5283 NET_INC_STATS_BH(LINUX_MIB_PAWSESTABREJECTED); 5284 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED);
5284 tcp_send_dupack(sk, skb); 5285 tcp_send_dupack(sk, skb);
5285 goto discard; 5286 goto discard;
5286 } 5287 }
@@ -5309,7 +5310,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
5309 * Check for a SYN in window. 5310 * Check for a SYN in window.
5310 */ 5311 */
5311 if (th->syn && !before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) { 5312 if (th->syn && !before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) {
5312 NET_INC_STATS_BH(LINUX_MIB_TCPABORTONSYN); 5313 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONSYN);
5313 tcp_reset(sk); 5314 tcp_reset(sk);
5314 return 1; 5315 return 1;
5315 } 5316 }
@@ -5391,7 +5392,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
5391 (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq && 5392 (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq &&
5392 after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt))) { 5393 after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt))) {
5393 tcp_done(sk); 5394 tcp_done(sk);
5394 NET_INC_STATS_BH(LINUX_MIB_TCPABORTONDATA); 5395 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONDATA);
5395 return 1; 5396 return 1;
5396 } 5397 }
5397 5398
@@ -5451,7 +5452,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
5451 if (sk->sk_shutdown & RCV_SHUTDOWN) { 5452 if (sk->sk_shutdown & RCV_SHUTDOWN) {
5452 if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq && 5453 if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq &&
5453 after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt)) { 5454 after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt)) {
5454 NET_INC_STATS_BH(LINUX_MIB_TCPABORTONDATA); 5455 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONDATA);
5455 tcp_reset(sk); 5456 tcp_reset(sk);
5456 return 1; 5457 return 1;
5457 } 5458 }
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index e876312b950a..29adc668ad51 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -366,7 +366,7 @@ void tcp_v4_err(struct sk_buff *skb, u32 info)
366 * servers this needs to be solved differently. 366 * servers this needs to be solved differently.
367 */ 367 */
368 if (sock_owned_by_user(sk)) 368 if (sock_owned_by_user(sk))
369 NET_INC_STATS_BH(LINUX_MIB_LOCKDROPPEDICMPS); 369 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
370 370
371 if (sk->sk_state == TCP_CLOSE) 371 if (sk->sk_state == TCP_CLOSE)
372 goto out; 372 goto out;
@@ -375,7 +375,7 @@ void tcp_v4_err(struct sk_buff *skb, u32 info)
375 seq = ntohl(th->seq); 375 seq = ntohl(th->seq);
376 if (sk->sk_state != TCP_LISTEN && 376 if (sk->sk_state != TCP_LISTEN &&
377 !between(seq, tp->snd_una, tp->snd_nxt)) { 377 !between(seq, tp->snd_una, tp->snd_nxt)) {
378 NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS); 378 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
379 goto out; 379 goto out;
380 } 380 }
381 381
@@ -422,7 +422,7 @@ void tcp_v4_err(struct sk_buff *skb, u32 info)
422 BUG_TRAP(!req->sk); 422 BUG_TRAP(!req->sk);
423 423
424 if (seq != tcp_rsk(req)->snt_isn) { 424 if (seq != tcp_rsk(req)->snt_isn) {
425 NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS); 425 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
426 goto out; 426 goto out;
427 } 427 }
428 428
@@ -1251,7 +1251,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1251 if (get_seconds() < peer->tcp_ts_stamp + TCP_PAWS_MSL && 1251 if (get_seconds() < peer->tcp_ts_stamp + TCP_PAWS_MSL &&
1252 (s32)(peer->tcp_ts - req->ts_recent) > 1252 (s32)(peer->tcp_ts - req->ts_recent) >
1253 TCP_PAWS_WINDOW) { 1253 TCP_PAWS_WINDOW) {
1254 NET_INC_STATS_BH(LINUX_MIB_PAWSPASSIVEREJECTED); 1254 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED);
1255 goto drop_and_release; 1255 goto drop_and_release;
1256 } 1256 }
1257 } 1257 }
@@ -1365,9 +1365,9 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1365 return newsk; 1365 return newsk;
1366 1366
1367exit_overflow: 1367exit_overflow:
1368 NET_INC_STATS_BH(LINUX_MIB_LISTENOVERFLOWS); 1368 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1369exit: 1369exit:
1370 NET_INC_STATS_BH(LINUX_MIB_LISTENDROPS); 1370 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1371 dst_release(dst); 1371 dst_release(dst);
1372 return NULL; 1372 return NULL;
1373} 1373}
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 8b02b1039968..204c42162660 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -244,7 +244,7 @@ kill:
244 } 244 }
245 245
246 if (paws_reject) 246 if (paws_reject)
247 NET_INC_STATS_BH(LINUX_MIB_PAWSESTABREJECTED); 247 NET_INC_STATS_BH(twsk_net(tw), LINUX_MIB_PAWSESTABREJECTED);
248 248
249 if (!th->rst) { 249 if (!th->rst) {
250 /* In this case we must reset the TIMEWAIT timer. 250 /* In this case we must reset the TIMEWAIT timer.
@@ -611,7 +611,7 @@ struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb,
611 if (!(flg & TCP_FLAG_RST)) 611 if (!(flg & TCP_FLAG_RST))
612 req->rsk_ops->send_ack(skb, req); 612 req->rsk_ops->send_ack(skb, req);
613 if (paws_reject) 613 if (paws_reject)
614 NET_INC_STATS_BH(LINUX_MIB_PAWSESTABREJECTED); 614 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED);
615 return NULL; 615 return NULL;
616 } 616 }
617 617
@@ -695,7 +695,7 @@ struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb,
695 } 695 }
696 696
697 embryonic_reset: 697 embryonic_reset:
698 NET_INC_STATS_BH(LINUX_MIB_EMBRYONICRSTS); 698 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
699 if (!(flg & TCP_FLAG_RST)) 699 if (!(flg & TCP_FLAG_RST))
700 req->rsk_ops->send_reset(sk, skb); 700 req->rsk_ops->send_reset(sk, skb);
701 701
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 176f0702b8ac..36a19707f67f 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -1995,7 +1995,7 @@ void tcp_xmit_retransmit_queue(struct sock *sk)
1995 mib_idx = LINUX_MIB_TCPFASTRETRANS; 1995 mib_idx = LINUX_MIB_TCPFASTRETRANS;
1996 else 1996 else
1997 mib_idx = LINUX_MIB_TCPSLOWSTARTRETRANS; 1997 mib_idx = LINUX_MIB_TCPSLOWSTARTRETRANS;
1998 NET_INC_STATS_BH(mib_idx); 1998 NET_INC_STATS_BH(sock_net(sk), mib_idx);
1999 1999
2000 if (skb == tcp_write_queue_head(sk)) 2000 if (skb == tcp_write_queue_head(sk))
2001 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, 2001 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
@@ -2065,7 +2065,7 @@ void tcp_xmit_retransmit_queue(struct sock *sk)
2065 inet_csk(sk)->icsk_rto, 2065 inet_csk(sk)->icsk_rto,
2066 TCP_RTO_MAX); 2066 TCP_RTO_MAX);
2067 2067
2068 NET_INC_STATS_BH(LINUX_MIB_TCPFORWARDRETRANS); 2068 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPFORWARDRETRANS);
2069 } 2069 }
2070} 2070}
2071 2071
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index 6a480d1fd8f6..328e0cf42b3c 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -48,7 +48,7 @@ static void tcp_write_err(struct sock *sk)
48 sk->sk_error_report(sk); 48 sk->sk_error_report(sk);
49 49
50 tcp_done(sk); 50 tcp_done(sk);
51 NET_INC_STATS_BH(LINUX_MIB_TCPABORTONTIMEOUT); 51 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONTIMEOUT);
52} 52}
53 53
54/* Do not allow orphaned sockets to eat all our resources. 54/* Do not allow orphaned sockets to eat all our resources.
@@ -89,7 +89,7 @@ static int tcp_out_of_resources(struct sock *sk, int do_reset)
89 if (do_reset) 89 if (do_reset)
90 tcp_send_active_reset(sk, GFP_ATOMIC); 90 tcp_send_active_reset(sk, GFP_ATOMIC);
91 tcp_done(sk); 91 tcp_done(sk);
92 NET_INC_STATS_BH(LINUX_MIB_TCPABORTONMEMORY); 92 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONMEMORY);
93 return 1; 93 return 1;
94 } 94 }
95 return 0; 95 return 0;
@@ -179,7 +179,7 @@ static void tcp_delack_timer(unsigned long data)
179 if (sock_owned_by_user(sk)) { 179 if (sock_owned_by_user(sk)) {
180 /* Try again later. */ 180 /* Try again later. */
181 icsk->icsk_ack.blocked = 1; 181 icsk->icsk_ack.blocked = 1;
182 NET_INC_STATS_BH(LINUX_MIB_DELAYEDACKLOCKED); 182 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOCKED);
183 sk_reset_timer(sk, &icsk->icsk_delack_timer, jiffies + TCP_DELACK_MIN); 183 sk_reset_timer(sk, &icsk->icsk_delack_timer, jiffies + TCP_DELACK_MIN);
184 goto out_unlock; 184 goto out_unlock;
185 } 185 }
@@ -198,7 +198,7 @@ static void tcp_delack_timer(unsigned long data)
198 if (!skb_queue_empty(&tp->ucopy.prequeue)) { 198 if (!skb_queue_empty(&tp->ucopy.prequeue)) {
199 struct sk_buff *skb; 199 struct sk_buff *skb;
200 200
201 NET_INC_STATS_BH(LINUX_MIB_TCPSCHEDULERFAILED); 201 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSCHEDULERFAILED);
202 202
203 while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) 203 while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL)
204 sk->sk_backlog_rcv(sk, skb); 204 sk->sk_backlog_rcv(sk, skb);
@@ -218,7 +218,7 @@ static void tcp_delack_timer(unsigned long data)
218 icsk->icsk_ack.ato = TCP_ATO_MIN; 218 icsk->icsk_ack.ato = TCP_ATO_MIN;
219 } 219 }
220 tcp_send_ack(sk); 220 tcp_send_ack(sk);
221 NET_INC_STATS_BH(LINUX_MIB_DELAYEDACKS); 221 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKS);
222 } 222 }
223 TCP_CHECK_TIMER(sk); 223 TCP_CHECK_TIMER(sk);
224 224
@@ -346,7 +346,7 @@ static void tcp_retransmit_timer(struct sock *sk)
346 } else { 346 } else {
347 mib_idx = LINUX_MIB_TCPTIMEOUTS; 347 mib_idx = LINUX_MIB_TCPTIMEOUTS;
348 } 348 }
349 NET_INC_STATS_BH(mib_idx); 349 NET_INC_STATS_BH(sock_net(sk), mib_idx);
350 } 350 }
351 351
352 if (tcp_use_frto(sk)) { 352 if (tcp_use_frto(sk)) {